8301072: Replace NULL with nullptr in share/oops/

Reviewed-by: stefank, coleenp, dholmes
This commit is contained in:
Johan Sjölen 2023-02-10 09:57:59 +00:00
parent 1c7b09bc23
commit c8ace482ed
56 changed files with 1080 additions and 1080 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -309,7 +309,7 @@ public:
T* dst,
size_t length) {
AccessT::arraycopy(src_obj, src_offset_in_bytes, static_cast<const T*>(nullptr),
NULL, 0, dst,
nullptr, 0, dst,
length);
}
@ -317,7 +317,7 @@ public:
static inline void arraycopy_from_native(const T* src,
arrayOop dst_obj, size_t dst_offset_in_bytes,
size_t length) {
AccessT::arraycopy(NULL, 0, src,
AccessT::arraycopy(nullptr, 0, src,
dst_obj, dst_offset_in_bytes, static_cast<T*>(nullptr),
length);
}
@ -332,8 +332,8 @@ public:
template <typename T>
static inline bool oop_arraycopy_raw(T* src, T* dst, size_t length) {
return AccessT::oop_arraycopy(NULL, 0, src,
NULL, 0, dst,
return AccessT::oop_arraycopy(nullptr, 0, src,
nullptr, 0, dst,
length);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -211,7 +211,7 @@ namespace AccessInternal {
FunctionPointerT>::type
resolve_barrier_gc() {
BarrierSet* bs = BarrierSet::barrier_set();
assert(bs != NULL, "GC barriers invoked before BarrierSet is set");
assert(bs != nullptr, "GC barriers invoked before BarrierSet is set");
switch (bs->kind()) {
#define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \
case BarrierSet::bs_name: { \
@ -224,7 +224,7 @@ namespace AccessInternal {
default:
fatal("BarrierSet AccessBarrier resolving not implemented");
return NULL;
return nullptr;
};
}
@ -234,7 +234,7 @@ namespace AccessInternal {
FunctionPointerT>::type
resolve_barrier_gc() {
BarrierSet* bs = BarrierSet::barrier_set();
assert(bs != NULL, "GC barriers invoked before BarrierSet is set");
assert(bs != nullptr, "GC barriers invoked before BarrierSet is set");
switch (bs->kind()) {
#define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \
case BarrierSet::bs_name: { \
@ -247,7 +247,7 @@ namespace AccessInternal {
default:
fatal("BarrierSet AccessBarrier resolving not implemented");
return NULL;
return nullptr;
};
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@ Annotations* Annotations::allocate(ClassLoaderData* loader_data, TRAPS) {
// helper
void Annotations::free_contents(ClassLoaderData* loader_data, Array<AnnotationArray*>* p) {
if (p != NULL) {
if (p != nullptr) {
for (int i = 0; i < p->length(); i++) {
MetadataFactory::free_array<u1>(loader_data, p->at(i));
}
@ -49,12 +49,12 @@ void Annotations::free_contents(ClassLoaderData* loader_data, Array<AnnotationAr
}
void Annotations::deallocate_contents(ClassLoaderData* loader_data) {
if (class_annotations() != NULL) {
if (class_annotations() != nullptr) {
MetadataFactory::free_array<u1>(loader_data, class_annotations());
}
free_contents(loader_data, fields_annotations());
if (class_type_annotations() != NULL) {
if (class_type_annotations() != nullptr) {
MetadataFactory::free_array<u1>(loader_data, class_type_annotations());
}
free_contents(loader_data, fields_type_annotations());
@ -64,7 +64,7 @@ void Annotations::deallocate_contents(ClassLoaderData* loader_data) {
// The alternative to creating this array and adding to Java heap pressure
// is to have a hashtable of the already created typeArrayOops
typeArrayOop Annotations::make_java_array(AnnotationArray* annotations, TRAPS) {
if (annotations != NULL) {
if (annotations != nullptr) {
int length = annotations->length();
typeArrayOop copy = oopFactory::new_byteArray(length, CHECK_NULL);
for (int i = 0; i< length; i++) {
@ -72,7 +72,7 @@ typeArrayOop Annotations::make_java_array(AnnotationArray* annotations, TRAPS) {
}
return copy;
} else {
return NULL;
return nullptr;
}
}
@ -81,7 +81,7 @@ void Annotations::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_class_annotations);
it->push(&_fields_annotations);
it->push(&_class_type_annotations);
it->push(&_fields_type_annotations); // FIXME: need a test case where _fields_type_annotations != NULL
it->push(&_fields_type_annotations); // FIXME: need a test case where _fields_type_annotations != nullptr
}
void Annotations::print_value_on(outputStream* st) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,10 +70,10 @@ class Annotations: public MetaspaceObj {
static bool is_read_only_by_default() { return true; }
// Constructor to initialize to null
Annotations() : _class_annotations(NULL),
_fields_annotations(NULL),
_class_type_annotations(NULL),
_fields_type_annotations(NULL) {}
Annotations() : _class_annotations(nullptr),
_fields_annotations(nullptr),
_class_type_annotations(nullptr),
_fields_type_annotations(nullptr) {}
AnnotationArray* class_annotations() const { return _class_annotations; }
Array<AnnotationArray*>* fields_annotations() const { return _fields_annotations; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,7 @@ int ArrayKlass::static_size(int header_size) {
InstanceKlass* ArrayKlass::java_super() const {
if (super() == NULL) return NULL; // bootstrap case
if (super() == nullptr) return nullptr; // bootstrap case
// Array klasses have primary supertypes which are not reported to Java.
// Example super chain: String[][] -> Object[][] -> Object[] -> Object
return vmClasses::Object_klass();
@ -61,7 +61,7 @@ InstanceKlass* ArrayKlass::java_super() const {
oop ArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) {
ShouldNotReachHere();
return NULL;
return nullptr;
}
// find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined
@ -86,13 +86,13 @@ Method* ArrayKlass::uncached_lookup_method(const Symbol* name,
ArrayKlass::ArrayKlass(Symbol* name, KlassKind kind) :
Klass(kind),
_dimension(1),
_higher_dimension(NULL),
_lower_dimension(NULL) {
_higher_dimension(nullptr),
_lower_dimension(nullptr) {
// Arrays don't add any new methods, so their vtable is the same size as
// the vtable of klass Object.
set_vtable_length(Universe::base_vtable_size());
set_name(name);
set_super(Universe::is_bootstrapping() ? NULL : vmClasses::Object_klass());
set_super(Universe::is_bootstrapping() ? nullptr : vmClasses::Object_klass());
set_layout_helper(Klass::_lh_neutral_value);
set_is_cloneable(); // All arrays are considered to be cloneable (See JLS 20.1.5)
JFR_ONLY(INIT_ID(this);)
@ -102,15 +102,15 @@ ArrayKlass::ArrayKlass(Symbol* name, KlassKind kind) :
// Initialization of vtables and mirror object is done separately from base_create_array_klass,
// since a GC can happen. At this point all instance variables of the ArrayKlass must be setup.
void ArrayKlass::complete_create_array_klass(ArrayKlass* k, Klass* super_klass, ModuleEntry* module_entry, TRAPS) {
k->initialize_supers(super_klass, NULL, CHECK);
k->initialize_supers(super_klass, nullptr, CHECK);
k->vtable().initialize_vtable();
// During bootstrapping, before java.base is defined, the module_entry may not be present yet.
// These classes will be put on a fixup list and their module fields will be patched once
// java.base is defined.
assert((module_entry != NULL) || ((module_entry == NULL) && !ModuleEntryTable::javabase_defined()),
assert((module_entry != nullptr) || ((module_entry == nullptr) && !ModuleEntryTable::javabase_defined()),
"module entry not available post " JAVA_BASE_NAME " definition");
oop module = (module_entry != NULL) ? module_entry->module() : (oop)NULL;
oop module = (module_entry != nullptr) ? module_entry->module() : (oop)nullptr;
java_lang_Class::create_mirror(k, Handle(THREAD, k->class_loader()), Handle(THREAD, module), Handle(), Handle(), CHECK);
}
@ -118,10 +118,10 @@ GrowableArray<Klass*>* ArrayKlass::compute_secondary_supers(int num_extra_slots,
Array<InstanceKlass*>* transitive_interfaces) {
// interfaces = { cloneable_klass, serializable_klass };
assert(num_extra_slots == 0, "sanity of primitive array type");
assert(transitive_interfaces == NULL, "sanity");
assert(transitive_interfaces == nullptr, "sanity");
// Must share this for correct bootstrapping!
set_secondary_supers(Universe::the_array_interfaces_array());
return NULL;
return nullptr;
}
objArrayOop ArrayKlass::allocate_arrayArray(int n, int length, TRAPS) {
@ -131,7 +131,7 @@ objArrayOop ArrayKlass::allocate_arrayArray(int n, int length, TRAPS) {
ArrayKlass* ak = ArrayKlass::cast(k);
objArrayOop o = (objArrayOop)Universe::heap()->array_allocate(ak, size, length,
/* do_zero */ true, CHECK_NULL);
// initialization to NULL not necessary, area already cleared
// initialization to null not necessary, area already cleared
return o;
}
@ -159,7 +159,7 @@ void ArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) {
#if INCLUDE_CDS
void ArrayKlass::remove_unshareable_info() {
Klass::remove_unshareable_info();
if (_higher_dimension != NULL) {
if (_higher_dimension != nullptr) {
ArrayKlass *ak = ArrayKlass::cast(higher_dimension());
ak->remove_unshareable_info();
}
@ -167,7 +167,7 @@ void ArrayKlass::remove_unshareable_info() {
void ArrayKlass::remove_java_mirror() {
Klass::remove_java_mirror();
if (_higher_dimension != NULL) {
if (_higher_dimension != nullptr) {
ArrayKlass *ak = ArrayKlass::cast(higher_dimension());
ak->remove_java_mirror();
}
@ -178,7 +178,7 @@ void ArrayKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle p
Klass::restore_unshareable_info(loader_data, protection_domain, CHECK);
// Klass recreates the component mirror also
if (_higher_dimension != NULL) {
if (_higher_dimension != nullptr) {
ArrayKlass *ak = ArrayKlass::cast(higher_dimension());
ak->restore_unshareable_info(loader_data, protection_domain, CHECK);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,12 +96,12 @@ class arrayOopDesc : public oopDesc {
template <typename T>
static T* obj_offset_to_raw(arrayOop obj, size_t offset_in_bytes, T* raw) {
if (obj != NULL) {
assert(raw == NULL, "either raw or in-heap");
if (obj != nullptr) {
assert(raw == nullptr, "either raw or in-heap");
char* base = reinterpret_cast<char*>((void*) obj);
raw = reinterpret_cast<T*>(base + offset_in_bytes);
} else {
assert(raw != NULL, "either raw or in-heap");
assert(raw != nullptr, "either raw or in-heap");
}
return raw;
}

View File

@ -35,7 +35,7 @@
#include "runtime/globals.hpp"
// For UseCompressedOops.
NarrowPtrStruct CompressedOops::_narrow_oop = { NULL, 0, true };
NarrowPtrStruct CompressedOops::_narrow_oop = { nullptr, 0, true };
MemRegion CompressedOops::_heap_address_range;
// Choose the heap base address and oop encoding mode
@ -80,7 +80,7 @@ void CompressedOops::initialize(const ReservedHeapSpace& heap_space) {
// base() is one page below the heap.
assert((intptr_t)base() <= ((intptr_t)_heap_address_range.start() - (intptr_t)os::vm_page_size()) ||
base() == NULL, "invalid value");
base() == nullptr, "invalid value");
assert(shift() == LogMinObjAlignmentInBytes ||
shift() == 0, "invalid value");
#endif
@ -148,14 +148,14 @@ bool CompressedOops::is_disjoint_heap_base_address(address addr) {
// Check for disjoint base compressed oops.
bool CompressedOops::base_disjoint() {
return _narrow_oop._base != NULL && is_disjoint_heap_base_address(_narrow_oop._base);
return _narrow_oop._base != nullptr && is_disjoint_heap_base_address(_narrow_oop._base);
}
// Check for real heapbased compressed oops.
// We must subtract the base as the bits overlap.
// If we negate above function, we also get unscaled and zerobased.
bool CompressedOops::base_overlaps() {
return _narrow_oop._base != NULL && !is_disjoint_heap_base_address(_narrow_oop._base);
return _narrow_oop._base != nullptr && !is_disjoint_heap_base_address(_narrow_oop._base);
}
void CompressedOops::print_mode(outputStream* st) {
@ -179,7 +179,7 @@ void CompressedOops::print_mode(outputStream* st) {
}
// For UseCompressedClassPointers.
NarrowPtrStruct CompressedKlassPointers::_narrow_klass = { NULL, 0, true };
NarrowPtrStruct CompressedKlassPointers::_narrow_klass = { nullptr, 0, true };
// CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump.
// (Todo: we should #ifdef out CompressedKlassPointers for 32bit completely and fix all call sites which

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,7 +36,7 @@ class ReservedHeapSpace;
struct NarrowPtrStruct {
// Base address for oop-within-java-object materialization.
// NULL if using wide oops or zero based narrow oops.
// null if using wide oops or zero based narrow oops.
address _base;
// Number of shift bits for encoding/decoding narrow ptrs.
// 0 if using wide ptrs or zero based unscaled narrow ptrs,
@ -116,7 +116,7 @@ public:
static void print_mode(outputStream* st);
static bool is_null(oop v) { return v == NULL; }
static bool is_null(oop v) { return v == nullptr; }
static bool is_null(narrowOop v) { return v == narrowOop::null; }
static inline oop decode_raw_not_null(narrowOop v);
@ -179,7 +179,7 @@ public:
static size_t range() { return _range; }
static int shift() { return _narrow_klass._shift; }
static bool is_null(Klass* v) { return v == NULL; }
static bool is_null(Klass* v) { return v == nullptr; }
static bool is_null(narrowKlass v) { return v == 0; }
static inline Klass* decode_raw(narrowKlass v, address base);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,7 +60,7 @@ inline oop CompressedOops::decode_not_null(narrowOop v) {
}
inline oop CompressedOops::decode(narrowOop v) {
return is_null(v) ? (oop)NULL : decode_not_null(v);
return is_null(v) ? nullptr : decode_not_null(v);
}
inline narrowOop CompressedOops::encode_not_null(oop v) {
@ -142,7 +142,7 @@ inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v, address na
}
inline Klass* CompressedKlassPointers::decode(narrowKlass v) {
return is_null(v) ? (Klass*)NULL : decode_not_null(v);
return is_null(v) ? nullptr : decode_not_null(v);
}
inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,8 +53,8 @@ ConstMethod::ConstMethod(int byte_code_size,
NoSafepointVerifier no_safepoint;
init_fingerprint();
set_constants(NULL);
set_stackmap_data(NULL);
set_constants(nullptr);
set_stackmap_data(nullptr);
set_code_size(byte_code_size);
set_constMethod_size(size);
set_inlined_tables_length(sizes); // sets _flags
@ -62,7 +62,7 @@ ConstMethod::ConstMethod(int byte_code_size,
assert(this->size() == size, "wrong size for object");
set_name_index(0);
set_signature_index(0);
set_constants(NULL);
set_constants(nullptr);
set_max_stack(0);
set_max_locals(0);
set_method_idnum(0);
@ -79,10 +79,10 @@ void ConstMethod::copy_stackmap_data(ClassLoaderData* loader_data,
// Deallocate metadata fields associated with ConstMethod*
void ConstMethod::deallocate_contents(ClassLoaderData* loader_data) {
if (stackmap_data() != NULL) {
if (stackmap_data() != nullptr) {
MetadataFactory::free_array<u1>(loader_data, stackmap_data());
}
set_stackmap_data(NULL);
set_stackmap_data(nullptr);
// deallocate annotation arrays
if (has_method_annotations())
@ -432,7 +432,7 @@ void ConstMethod::print_on(outputStream* st) const {
st->print_cr("%s", internal_name());
Method* m = method();
st->print(" - method: " PTR_FORMAT " ", p2i(m));
if (m != NULL) {
if (m != nullptr) {
m->print_value_on(st);
}
st->cr();
@ -448,10 +448,10 @@ void ConstMethod::print_on(outputStream* st) const {
void ConstMethod::print_value_on(outputStream* st) const {
st->print(" const part of method " );
Method* m = method();
if (m != NULL) {
if (m != nullptr) {
m->print_value_on(st);
} else {
st->print("NULL");
st->print("null");
}
}
@ -460,7 +460,7 @@ void ConstMethod::print_value_on(outputStream* st) const {
void ConstMethod::verify_on(outputStream* st) {
// Verification can occur during oop construction before the method or
// other fields have been initialized.
guarantee(method() != NULL && method()->is_method(), "should be method");
guarantee(method() != nullptr && method()->is_method(), "should be method");
address m_end = (address)((intptr_t) this + size());
address compressed_table_start = code_end();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -276,7 +276,7 @@ public:
Array<u1>* stackmap_data() const { return _stackmap_data; }
void set_stackmap_data(Array<u1>* sd) { _stackmap_data = sd; }
void copy_stackmap_data(ClassLoaderData* loader_data, u1* sd, int length, TRAPS);
bool has_stackmap_table() const { return _stackmap_data != NULL; }
bool has_stackmap_table() const { return _stackmap_data != nullptr; }
void init_fingerprint() {
const uint64_t initval = UCONST64(0x8000000000000000);
@ -398,7 +398,7 @@ public:
AnnotationArray** method_annotations_addr() const;
AnnotationArray* method_annotations() const {
return has_method_annotations() ? *(method_annotations_addr()) : NULL;
return has_method_annotations() ? *(method_annotations_addr()) : nullptr;
}
void set_method_annotations(AnnotationArray* anno) {
*(method_annotations_addr()) = anno;
@ -406,7 +406,7 @@ public:
AnnotationArray** parameter_annotations_addr() const;
AnnotationArray* parameter_annotations() const {
return has_parameter_annotations() ? *(parameter_annotations_addr()) : NULL;
return has_parameter_annotations() ? *(parameter_annotations_addr()) : nullptr;
}
void set_parameter_annotations(AnnotationArray* anno) {
*(parameter_annotations_addr()) = anno;
@ -414,7 +414,7 @@ public:
AnnotationArray** type_annotations_addr() const;
AnnotationArray* type_annotations() const {
return has_type_annotations() ? *(type_annotations_addr()) : NULL;
return has_type_annotations() ? *(type_annotations_addr()) : nullptr;
}
void set_type_annotations(AnnotationArray* anno) {
*(type_annotations_addr()) = anno;
@ -422,7 +422,7 @@ public:
AnnotationArray** default_annotations_addr() const;
AnnotationArray* default_annotations() const {
return has_default_annotations() ? *(default_annotations_addr()) : NULL;
return has_default_annotations() ? *(default_annotations_addr()) : nullptr;
}
void set_default_annotations(AnnotationArray* anno) {
*(default_annotations_addr()) = anno;

View File

@ -89,7 +89,7 @@ void ConstantPool::copy_fields(const ConstantPool* orig) {
// MetaspaceObj allocation invariant is calloc equivalent memory
// simple verification of this here (JVM_CONSTANT_Invalid == 0 )
static bool tag_array_is_zero_initialized(Array<u1>* tags) {
assert(tags != NULL, "invariant");
assert(tags != nullptr, "invariant");
const int length = tags->length();
for (int index = 0; index < length; ++index) {
if (JVM_CONSTANT_Invalid != tags->at(index)) {
@ -105,31 +105,31 @@ ConstantPool::ConstantPool(Array<u1>* tags) :
_tags(tags),
_length(tags->length()) {
assert(_tags != NULL, "invariant");
assert(_tags != nullptr, "invariant");
assert(tags->length() == _length, "invariant");
assert(tag_array_is_zero_initialized(tags), "invariant");
assert(0 == flags(), "invariant");
assert(0 == version(), "invariant");
assert(NULL == _pool_holder, "invariant");
assert(nullptr == _pool_holder, "invariant");
}
void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
if (cache() != NULL) {
if (cache() != nullptr) {
MetadataFactory::free_metadata(loader_data, cache());
set_cache(NULL);
set_cache(nullptr);
}
MetadataFactory::free_array<Klass*>(loader_data, resolved_klasses());
set_resolved_klasses(NULL);
set_resolved_klasses(nullptr);
MetadataFactory::free_array<jushort>(loader_data, operands());
set_operands(NULL);
set_operands(nullptr);
release_C_heap_structures();
// free tag array
MetadataFactory::free_array<u1>(loader_data, tags());
set_tags(NULL);
set_tags(nullptr);
}
void ConstantPool::release_C_heap_structures() {
@ -164,8 +164,8 @@ objArrayOop ConstantPool::resolved_references() const {
// Called from outside constant pool resolution where a resolved_reference array
// may not be present.
objArrayOop ConstantPool::resolved_references_or_null() const {
if (_cache == NULL) {
return NULL;
if (_cache == nullptr) {
return nullptr;
} else {
return _cache->resolved_references();
}
@ -224,7 +224,7 @@ void ConstantPool::allocate_resolved_klasses(ClassLoaderData* loader_data, int n
// This allows us to use 0xffff (ConstantPool::_temp_resolved_klass_index) to indicate
// UnresolvedKlass entries that are temporarily created during class redefinition.
assert(num_klasses < CPKlassSlot::_temp_resolved_klass_index, "sanity");
assert(resolved_klasses() == NULL, "sanity");
assert(resolved_klasses() == nullptr, "sanity");
Array<Klass*>* rk = MetadataFactory::new_array<Klass*>(loader_data, num_klasses, CHECK);
set_resolved_klasses(rk);
}
@ -255,22 +255,22 @@ void ConstantPool::initialize_unresolved_klasses(ClassLoaderData* loader_data, T
// Hidden class support:
void ConstantPool::klass_at_put(int class_index, Klass* k) {
assert(k != NULL, "must be valid klass");
assert(k != nullptr, "must be valid klass");
CPKlassSlot kslot = klass_slot_at(class_index);
int resolved_klass_index = kslot.resolved_klass_index();
Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
Atomic::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* non-NULL, so we need hardware store ordering here.
// and the Klass* non-null, so we need hardware store ordering here.
release_tag_at_put(class_index, JVM_CONSTANT_Class);
}
#if INCLUDE_CDS_JAVA_HEAP
// Returns the _resolved_reference array after removing unarchivable items from it.
// Returns nullptr if this class is not supported, or _resolved_reference doesn't exist.
// Returns null if this class is not supported, or _resolved_reference doesn't exist.
objArrayOop ConstantPool::prepare_resolved_references_for_archiving() {
if (_cache == NULL) {
if (_cache == nullptr) {
return nullptr; // nothing to do
}
@ -285,12 +285,12 @@ objArrayOop ConstantPool::prepare_resolved_references_for_archiving() {
objArrayOop rr = resolved_references();
if (rr != nullptr) {
Array<u2>* ref_map = reference_map();
int ref_map_len = ref_map == NULL ? 0 : ref_map->length();
int ref_map_len = ref_map == nullptr ? 0 : ref_map->length();
int rr_len = rr->length();
for (int i = 0; i < rr_len; i++) {
oop obj = rr->obj_at(i);
rr->obj_at_put(i, nullptr);
if (obj != NULL && i < ref_map_len) {
if (obj != nullptr && i < ref_map_len) {
int index = object_to_cp_index(i);
if (tag_at(index).is_string()) {
assert(java_lang_String::is_instance(obj), "must be");
@ -307,7 +307,7 @@ objArrayOop ConstantPool::prepare_resolved_references_for_archiving() {
void ConstantPool::add_dumped_interned_strings() {
objArrayOop rr = resolved_references();
if (rr != NULL) {
if (rr != nullptr) {
int rr_len = rr->length();
for (int i = 0; i < rr_len; i++) {
oop p = rr->obj_at(i);
@ -328,16 +328,16 @@ void ConstantPool::restore_unshareable_info(TRAPS) {
assert(is_constantPool(), "ensure C++ vtable is restored");
assert(on_stack(), "should always be set for shared constant pools");
assert(is_shared(), "should always be set for shared constant pools");
assert(_cache != NULL, "constant pool _cache should not be NULL");
assert(_cache != nullptr, "constant pool _cache should not be null");
// Only create the new resolved references array if it hasn't been attempted before
if (resolved_references() != NULL) return;
if (resolved_references() != nullptr) return;
if (vmClasses::Object_klass_loaded()) {
ClassLoaderData* loader_data = pool_holder()->class_loader_data();
#if INCLUDE_CDS_JAVA_HEAP
if (ArchiveHeapLoader::is_fully_available() &&
_cache->archived_references() != NULL) {
_cache->archived_references() != nullptr) {
oop archived = _cache->archived_references();
// Create handle for the archived resolved reference array object
Handle refs_handle(THREAD, archived);
@ -374,7 +374,7 @@ void ConstantPool::remove_unshareable_info() {
// re-creating the resolved reference array if archived heap data cannot be map
// at runtime.
set_resolved_reference_length(
resolved_references() != NULL ? resolved_references()->length() : 0);
resolved_references() != nullptr ? resolved_references()->length() : 0);
set_resolved_references(OopHandle());
bool archived = false;
@ -399,8 +399,8 @@ void ConstantPool::remove_unshareable_info() {
}
}
if (cache() != NULL) {
// cache() is NULL if this class is not yet linked.
if (cache() != nullptr) {
// cache() is null if this class is not yet linked.
cache()->remove_unshareable_info();
}
}
@ -419,10 +419,10 @@ bool ConstantPool::maybe_archive_resolved_klass_at(int cp_index) {
CPKlassSlot kslot = klass_slot_at(cp_index);
int resolved_klass_index = kslot.resolved_klass_index();
Klass* k = resolved_klasses()->at(resolved_klass_index);
// k could be NULL if the referenced class has been excluded via
// k could be null if the referenced class has been excluded via
// SystemDictionaryShared::is_excluded_class().
if (k != NULL) {
if (k != nullptr) {
ConstantPool* src_cp = ArchiveBuilder::current()->get_source_addr(this);
if (ClassPrelinker::can_archive_resolved_klass(src_cp, cp_index)) {
if (log_is_enabled(Debug, cds, resolve)) {
@ -436,7 +436,7 @@ bool ConstantPool::maybe_archive_resolved_klass_at(int cp_index) {
// This referenced class cannot be archived. Revert the tag to UnresolvedClass,
// so that the proper class loading and initialization can happen at runtime.
resolved_klasses()->at_put(resolved_klass_index, NULL);
resolved_klasses()->at_put(resolved_klass_index, nullptr);
tag_at_put(cp_index, JVM_CONSTANT_UnresolvedClass);
return false;
}
@ -457,21 +457,21 @@ void ConstantPool::string_at_put(int which, int obj_index, oop str) {
void ConstantPool::trace_class_resolution(const constantPoolHandle& this_cp, Klass* k) {
ResourceMark rm;
int line_number = -1;
const char * source_file = NULL;
const char * source_file = nullptr;
if (JavaThread::current()->has_last_Java_frame()) {
// try to identify the method which called this function.
vframeStream vfst(JavaThread::current());
if (!vfst.at_end()) {
line_number = vfst.method()->line_number_from_bci(vfst.bci());
Symbol* s = vfst.method()->method_holder()->source_file_name();
if (s != NULL) {
if (s != nullptr) {
source_file = s->as_C_string();
}
}
}
if (k != this_cp->pool_holder()) {
// only print something if the classes are different
if (source_file != NULL) {
if (source_file != nullptr) {
log_debug(class, resolve)("%s %s %s:%d",
this_cp->pool_holder()->external_name(),
k->external_name(), source_file, line_number);
@ -499,7 +499,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which,
// the unresolved_klasses() array.
if (this_cp->tag_at(which).is_klass()) {
Klass* klass = this_cp->resolved_klasses()->at(resolved_klass_index);
if (klass != NULL) {
if (klass != nullptr) {
return klass;
}
}
@ -545,7 +545,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which,
// some other thread has beaten us and has resolved the class.
// To preserve old behavior, we return the resolved class.
Klass* klass = this_cp->resolved_klasses()->at(resolved_klass_index);
assert(klass != NULL, "must be resolved if exception was cleared");
assert(klass != nullptr, "must be resolved if exception was cleared");
return klass;
}
@ -557,7 +557,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which,
Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index);
Atomic::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* stored in _resolved_klasses is non-NULL, so we need
// and the Klass* stored in _resolved_klasses is non-null, so we need
// hardware store ordering here.
// We also need to CAS to not overwrite an error from a racing thread.
@ -568,7 +568,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which,
// We need to recheck exceptions from racing thread and return the same.
if (old_tag == JVM_CONSTANT_UnresolvedClassInError) {
// Remove klass.
this_cp->resolved_klasses()->at_put(resolved_klass_index, NULL);
this_cp->resolved_klasses()->at_put(resolved_klass_index, nullptr);
throw_resolution_error(this_cp, which, CHECK_NULL);
}
@ -578,7 +578,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which,
// Does not update ConstantPool* - to avoid any exception throwing. Used
// by compiler and exception handling. Also used to avoid classloads for
// instanceof operations. Returns NULL if the class has not been loaded or
// instanceof operations. Returns null if the class has not been loaded or
// if the verification of constant pool failed
Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int which) {
CPKlassSlot kslot = this_cp->klass_slot_at(which);
@ -588,10 +588,10 @@ Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int w
if (this_cp->tag_at(which).is_klass()) {
Klass* k = this_cp->resolved_klasses()->at(resolved_klass_index);
assert(k != NULL, "should be resolved");
assert(k != nullptr, "should be resolved");
return k;
} else if (this_cp->tag_at(which).is_unresolved_klass_in_error()) {
return NULL;
return nullptr;
} else {
Thread* current = Thread::current();
Symbol* name = this_cp->symbol_at(name_index);
@ -602,15 +602,15 @@ Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int w
Klass* k = SystemDictionary::find_instance_klass(current, name, h_loader, h_prot);
// Avoid constant pool verification at a safepoint, as it takes the Module_lock.
if (k != NULL && current->is_Java_thread()) {
if (k != nullptr && current->is_Java_thread()) {
// Make sure that resolving is legal
JavaThread* THREAD = JavaThread::cast(current); // For exception macros.
ExceptionMark em(THREAD);
// return NULL if verification fails
// return null if verification fails
verify_constant_pool_resolve(this_cp, k, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
return NULL;
return nullptr;
}
return k;
} else {
@ -621,12 +621,12 @@ Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int w
Method* ConstantPool::method_at_if_loaded(const constantPoolHandle& cpool,
int which) {
if (cpool->cache() == NULL) return NULL; // nothing to load yet
if (cpool->cache() == nullptr) return nullptr; // nothing to load yet
int cache_index = decode_cpcache_index(which, true);
if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) {
// FIXME: should be an assert
log_debug(class, resolve)("bad operand %d in:", which); cpool->print();
return NULL;
return nullptr;
}
ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
return e->method_if_resolved(cpool);
@ -634,14 +634,14 @@ Method* ConstantPool::method_at_if_loaded(const constantPoolHandle& cpool,
bool ConstantPool::has_appendix_at_if_loaded(const constantPoolHandle& cpool, int which) {
if (cpool->cache() == NULL) return false; // nothing to load yet
if (cpool->cache() == nullptr) return false; // nothing to load yet
int cache_index = decode_cpcache_index(which, true);
ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
return e->has_appendix();
}
oop ConstantPool::appendix_at_if_loaded(const constantPoolHandle& cpool, int which) {
if (cpool->cache() == NULL) return NULL; // nothing to load yet
if (cpool->cache() == nullptr) return nullptr; // nothing to load yet
int cache_index = decode_cpcache_index(which, true);
ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
return e->appendix_if_resolved(cpool);
@ -649,7 +649,7 @@ oop ConstantPool::appendix_at_if_loaded(const constantPoolHandle& cpool, int whi
bool ConstantPool::has_local_signature_at_if_loaded(const constantPoolHandle& cpool, int which) {
if (cpool->cache() == NULL) return false; // nothing to load yet
if (cpool->cache() == nullptr) return false; // nothing to load yet
int cache_index = decode_cpcache_index(which, true);
ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
return e->has_local_signature();
@ -668,7 +668,7 @@ Symbol* ConstantPool::impl_signature_ref_at(int which, bool uncached) {
int ConstantPool::impl_name_and_type_ref_index_at(int which, bool uncached) {
int i = which;
if (!uncached && cache() != NULL) {
if (!uncached && cache() != nullptr) {
if (ConstantPool::is_invokedynamic_index(which)) {
// Invokedynamic index is index into the constant pool cache
int pool_index = invokedynamic_bootstrap_ref_index_at(which);
@ -693,7 +693,7 @@ int ConstantPool::impl_name_and_type_ref_index_at(int which, bool uncached) {
constantTag ConstantPool::impl_tag_ref_at(int which, bool uncached) {
int pool_index = which;
if (!uncached && cache() != NULL) {
if (!uncached && cache() != nullptr) {
if (ConstantPool::is_invokedynamic_index(which)) {
// Invokedynamic index is index into resolved_references
pool_index = invokedynamic_bootstrap_ref_index_at(which);
@ -709,7 +709,7 @@ int ConstantPool::impl_klass_ref_index_at(int which, bool uncached) {
guarantee(!ConstantPool::is_invokedynamic_index(which),
"an invokedynamic instruction does not have a klass");
int i = which;
if (!uncached && cache() != NULL) {
if (!uncached && cache() != nullptr) {
// change byte-ordering and go via cache
i = remap_instruction_operand_from_cache(which);
}
@ -788,7 +788,7 @@ void ConstantPool::resolve_string_constants_impl(const constantPoolHandle& this_
static Symbol* exception_message(const constantPoolHandle& this_cp, int which, constantTag tag, oop pending_exception) {
// Dig out the detailed message to reuse if possible
Symbol* message = java_lang_Throwable::detail_message(pending_exception);
if (message != NULL) {
if (message != nullptr) {
return message;
}
@ -824,9 +824,9 @@ static void add_resolution_error(const constantPoolHandle& this_cp, int which,
oop cause = java_lang_Throwable::cause(pending_exception);
// Also dig out the exception cause, if present.
Symbol* cause_sym = NULL;
Symbol* cause_msg = NULL;
if (cause != NULL && cause != pending_exception) {
Symbol* cause_sym = nullptr;
Symbol* cause_msg = nullptr;
if (cause != nullptr && cause != pending_exception) {
cause_sym = cause->klass()->name();
cause_msg = java_lang_Throwable::detail_message(cause);
}
@ -838,24 +838,24 @@ static void add_resolution_error(const constantPoolHandle& this_cp, int which,
void ConstantPool::throw_resolution_error(const constantPoolHandle& this_cp, int which, TRAPS) {
ResourceMark rm(THREAD);
Symbol* message = NULL;
Symbol* cause = NULL;
Symbol* cause_msg = NULL;
Symbol* message = nullptr;
Symbol* cause = nullptr;
Symbol* cause_msg = nullptr;
Symbol* error = SystemDictionary::find_resolution_error(this_cp, which, &message, &cause, &cause_msg);
assert(error != NULL, "checking");
const char* cause_str = cause_msg != NULL ? cause_msg->as_C_string() : NULL;
assert(error != nullptr, "checking");
const char* cause_str = cause_msg != nullptr ? cause_msg->as_C_string() : nullptr;
CLEAR_PENDING_EXCEPTION;
if (message != NULL) {
if (message != nullptr) {
char* msg = message->as_C_string();
if (cause != NULL) {
if (cause != nullptr) {
Handle h_cause = Exceptions::new_exception(THREAD, cause, cause_str);
THROW_MSG_CAUSE(error, msg, h_cause);
} else {
THROW_MSG(error, msg);
}
} else {
if (cause != NULL) {
if (cause != nullptr) {
Handle h_cause = Exceptions::new_exception(THREAD, cause, cause_str);
THROW_CAUSE(error, h_cause);
} else {
@ -926,7 +926,7 @@ BasicType ConstantPool::basic_type_for_constant_at(int which) {
oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
int index, int cache_index,
bool* status_return, TRAPS) {
oop result_oop = NULL;
oop result_oop = nullptr;
Handle throw_exception;
if (cache_index == _possible_index_sentinel) {
@ -942,13 +942,13 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
if (cache_index >= 0) {
result_oop = this_cp->resolved_reference_at(cache_index);
if (result_oop != NULL) {
if (result_oop != nullptr) {
if (result_oop == Universe::the_null_sentinel()) {
DEBUG_ONLY(int temp_index = (index >= 0 ? index : this_cp->object_to_cp_index(cache_index)));
assert(this_cp->tag_at(temp_index).is_dynamic_constant(), "only condy uses the null sentinel");
result_oop = NULL;
result_oop = nullptr;
}
if (status_return != NULL) (*status_return) = true;
if (status_return != nullptr) (*status_return) = true;
return result_oop;
// That was easy...
}
@ -959,16 +959,16 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
constantTag tag = this_cp->tag_at(index);
if (status_return != NULL) {
if (status_return != nullptr) {
// don't trigger resolution if the constant might need it
switch (tag.value()) {
case JVM_CONSTANT_Class:
{
CPKlassSlot kslot = this_cp->klass_slot_at(index);
int resolved_klass_index = kslot.resolved_klass_index();
if (this_cp->resolved_klasses()->at(resolved_klass_index) == NULL) {
if (this_cp->resolved_klasses()->at(resolved_klass_index) == nullptr) {
(*status_return) = false;
return NULL;
return nullptr;
}
// the klass is waiting in the CP; go get it
break;
@ -982,7 +982,7 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
break;
default:
(*status_return) = false;
return NULL;
return nullptr;
}
// from now on there is either success or an OOME
(*status_return) = true;
@ -1028,8 +1028,8 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
if (!is_reference_type(type)) {
// Make sure the primitive value is properly boxed.
// This is a JDK responsibility.
const char* fail = NULL;
if (result_oop == NULL) {
const char* fail = nullptr;
if (result_oop == nullptr) {
fail = "null result instead of box";
} else if (!is_java_primitive(type)) {
// FIXME: support value types via unboxing
@ -1037,7 +1037,7 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
} else if (!java_lang_boxing_object::is_instance(result_oop, type)) {
fail = "primitive is not properly boxed";
}
if (fail != NULL) {
if (fail != nullptr) {
// Since this exception is not a LinkageError, throw exception
// but do not save a DynamicInError resolution result.
// See section 5.4.3 of the VM spec.
@ -1163,7 +1163,7 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
// The important thing here is that all threads pick up the same result.
// It doesn't matter which racing thread wins, as long as only one
// result is used by all threads, and all future queries.
oop new_result = (result_oop == NULL ? Universe::the_null_sentinel() : result_oop);
oop new_result = (result_oop == nullptr ? Universe::the_null_sentinel() : result_oop);
oop old_result = this_cp->set_resolved_reference_at(cache_index, new_result);
if (old_result == nullptr) {
return result_oop; // was installed
@ -1171,7 +1171,7 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
// Return the winning thread's result. This can be different than
// the result here for MethodHandles.
if (old_result == Universe::the_null_sentinel())
old_result = NULL;
old_result = nullptr;
return old_result;
}
} else {
@ -1182,7 +1182,7 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp,
oop ConstantPool::uncached_string_at(int which, TRAPS) {
Symbol* sym = unresolved_string_at(which);
oop str = StringTable::intern(sym, CHECK_(NULL));
oop str = StringTable::intern(sym, CHECK_(nullptr));
assert(java_lang_String::is_instance(str), "must be string");
return str;
}
@ -1228,9 +1228,9 @@ oop ConstantPool::string_at_impl(const constantPoolHandle& this_cp, int which, i
// If the string has already been interned, this entry will be non-null
oop str = this_cp->resolved_reference_at(obj_index);
assert(str != Universe::the_null_sentinel(), "");
if (str != NULL) return str;
if (str != nullptr) return str;
Symbol* sym = this_cp->unresolved_string_at(which);
str = StringTable::intern(sym, CHECK_(NULL));
str = StringTable::intern(sym, CHECK_(nullptr));
this_cp->string_at_put(which, obj_index, str);
assert(java_lang_String::is_instance(str), "must be string");
return str;
@ -1480,7 +1480,7 @@ void ConstantPool::resize_operands(int delta_len, int delta_size, TRAPS) {
(min_size - 2*min_len) * sizeof(u2));
// Explicitly deallocate old operands array.
// Note, it is not needed for 7u backport.
if ( operands() != NULL) { // the safety check
if ( operands() != nullptr) { // the safety check
MetadataFactory::free_array<u2>(loader_data, operands());
}
set_operands(new_ops);
@ -2281,7 +2281,7 @@ void ConstantPool::print_on(outputStream* st) const {
if (on_stack()) st->print(" on_stack");
st->cr();
}
if (pool_holder() != NULL) {
if (pool_holder() != nullptr) {
st->print_cr(" - holder: " PTR_FORMAT, p2i(pool_holder()));
}
st->print_cr(" - cache: " PTR_FORMAT, p2i(cache()));
@ -2311,7 +2311,7 @@ void ConstantPool::print_entry_on(const int index, outputStream* st) {
switch (tag_at(index).value()) {
case JVM_CONSTANT_Class :
{ Klass* k = klass_at(index, CATCH);
guarantee(k != NULL, "need klass");
guarantee(k != nullptr, "need klass");
k->print_value_on(st);
st->print(" {" PTR_FORMAT "}", p2i(k));
}
@ -2408,15 +2408,15 @@ void ConstantPool::print_value_on(outputStream* st) const {
assert(is_constantPool(), "must be constantPool");
st->print("constant pool [%d]", length());
if (has_preresolution()) st->print("/preresolution");
if (operands() != NULL) st->print("/operands[%d]", operands()->length());
if (operands() != nullptr) st->print("/operands[%d]", operands()->length());
print_address_on(st);
if (pool_holder() != NULL) {
if (pool_holder() != nullptr) {
st->print(" for ");
pool_holder()->print_value_on(st);
bool extra = (pool_holder()->constants() != this);
if (extra) st->print(" (extra)");
}
if (cache() != NULL) {
if (cache() != nullptr) {
st->print(" cache=" PTR_FORMAT, p2i(cache()));
}
}
@ -2437,8 +2437,8 @@ void ConstantPool::verify_on(outputStream* st) {
guarantee(entry->refcount() != 0, "should have nonzero reference count");
}
}
if (pool_holder() != NULL) {
// Note: pool_holder() can be NULL in temporary constant pools
if (pool_holder() != nullptr) {
// Note: pool_holder() can be null in temporary constant pools
// used during constant pool merging
guarantee(pool_holder()->is_klass(), "should be klass");
}

View File

@ -186,7 +186,7 @@ class ConstantPool : public Metadata {
// generics support
Symbol* generic_signature() const {
return (_generic_signature_index == 0) ?
(Symbol*)NULL : symbol_at(_generic_signature_index);
nullptr : symbol_at(_generic_signature_index);
}
u2 generic_signature_index() const { return _generic_signature_index; }
void set_generic_signature_index(u2 sig_index) { _generic_signature_index = sig_index; }
@ -194,7 +194,7 @@ class ConstantPool : public Metadata {
// source file name
Symbol* source_file_name() const {
return (_source_file_name_index == 0) ?
(Symbol*)NULL : symbol_at(_source_file_name_index);
nullptr : symbol_at(_source_file_name_index);
}
u2 source_file_name_index() const { return _source_file_name_index; }
void set_source_file_name_index(u2 sourcefile_index) { _source_file_name_index = sourcefile_index; }
@ -565,7 +565,7 @@ class ConstantPool : public Metadata {
operands->at_put(n+1, extract_high_short_from_int(offset));
}
static int operand_array_length(Array<u2>* operands) {
if (operands == NULL || operands->length() == 0) return 0;
if (operands == nullptr || operands->length() == 0) return 0;
int second_part = operand_offset_at(operands, 0);
return (second_part / 2);
}
@ -716,17 +716,17 @@ class ConstantPool : public Metadata {
// Resolve late bound constants.
oop resolve_constant_at(int index, TRAPS) {
constantPoolHandle h_this(THREAD, this);
return resolve_constant_at_impl(h_this, index, _no_index_sentinel, NULL, THREAD);
return resolve_constant_at_impl(h_this, index, _no_index_sentinel, nullptr, THREAD);
}
oop resolve_cached_constant_at(int cache_index, TRAPS) {
constantPoolHandle h_this(THREAD, this);
return resolve_constant_at_impl(h_this, _no_index_sentinel, cache_index, NULL, THREAD);
return resolve_constant_at_impl(h_this, _no_index_sentinel, cache_index, nullptr, THREAD);
}
oop resolve_possibly_cached_constant_at(int pool_index, TRAPS) {
constantPoolHandle h_this(THREAD, this);
return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, NULL, THREAD);
return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, nullptr, THREAD);
}
oop find_cached_constant_at(int pool_index, bool& found_it, TRAPS) {
@ -811,7 +811,7 @@ class ConstantPool : public Metadata {
private:
void set_resolved_references(OopHandle s) { _cache->set_resolved_references(s); }
Array<u2>* reference_map() const { return (_cache == NULL) ? NULL : _cache->reference_map(); }
Array<u2>* reference_map() const { return (_cache == nullptr) ? nullptr : _cache->reference_map(); }
void set_reference_map(Array<u2>* o) { _cache->set_reference_map(o); }
Symbol* impl_name_ref_at(int which, bool uncached);

View File

@ -59,7 +59,7 @@
void ConstantPoolCacheEntry::initialize_entry(int index) {
assert(0 < index && index < 0x10000, "sanity check");
_indices = index;
_f1 = NULL;
_f1 = nullptr;
_f2 = _flags = 0;
assert(constant_pool_index() == index, "");
}
@ -101,7 +101,7 @@ void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
// Sets f1, ordering with previous writes.
void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
assert(f1 != NULL, "");
assert(f1 != nullptr, "");
Atomic::release_store(&_f1, f1);
}
@ -158,12 +158,12 @@ void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_co
int vtable_index,
bool sender_is_interface) {
bool is_vtable_call = (vtable_index >= 0); // FIXME: split this method on this boolean
assert(method->interpreter_entry() != NULL, "should have been set at this point");
assert(method->interpreter_entry() != nullptr, "should have been set at this point");
assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");
int byte_no = -1;
bool change_to_virtual = false;
InstanceKlass* holder = NULL; // have to declare this outside the switch
InstanceKlass* holder = nullptr; // have to declare this outside the switch
switch (invoke_code) {
case Bytecodes::_invokeinterface:
holder = method->method_holder();
@ -396,7 +396,7 @@ void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle&
( 1 << is_final_shift ),
adapter->size_of_parameters());
LogStream* log_stream = NULL;
LogStream* log_stream = nullptr;
LogStreamHandle(Debug, methodhandles, indy) lsh_indy;
if (lsh_indy.is_enabled()) {
ResourceMark rm;
@ -443,7 +443,7 @@ void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle&
set_bytecode_1(invoke_code);
NOT_PRODUCT(verify(tty));
if (log_stream != NULL) {
if (log_stream != nullptr) {
this->print(log_stream, 0, cpool->cache());
}
@ -483,7 +483,7 @@ Method* ConstantPoolCacheEntry::method_if_resolved(const constantPoolHandle& cpo
Bytecodes::Code invoke_code = bytecode_1();
if (invoke_code != (Bytecodes::Code)0) {
Metadata* f1 = f1_ord();
if (f1 != NULL) {
if (f1 != nullptr) {
switch (invoke_code) {
case Bytecodes::_invokeinterface:
assert(f1->is_klass(), "");
@ -521,13 +521,13 @@ Method* ConstantPoolCacheEntry::method_if_resolved(const constantPoolHandle& cpo
break;
}
}
return NULL;
return nullptr;
}
oop ConstantPoolCacheEntry::appendix_if_resolved(const constantPoolHandle& cpool) const {
if (!has_appendix())
return NULL;
return nullptr;
const int ref_index = f2_as_index();
return cpool->resolved_reference_at(ref_index);
}
@ -563,7 +563,7 @@ void ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
return;
}
assert (_f1 != NULL, "should not call with uninteresting entry");
assert (_f1 != nullptr, "should not call with uninteresting entry");
if (!(_f1->is_method())) {
// _f1 is a Klass* for an interface, _f2 is the method
@ -581,7 +581,7 @@ void ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() {
Method* m = get_interesting_method_entry();
// return false if m refers to a non-deleted old or obsolete method
if (m != NULL) {
if (m != nullptr) {
assert(m->is_valid() && m->is_method(), "m is a valid method");
return !m->is_old() && !m->is_obsolete(); // old is always set for old and obsolete
} else {
@ -592,15 +592,15 @@ bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() {
Method* ConstantPoolCacheEntry::get_interesting_method_entry() {
if (!is_method_entry()) {
// not a method entry so not interesting by default
return NULL;
return nullptr;
}
Method* m = NULL;
Method* m = nullptr;
if (is_vfinal()) {
// virtual and final so _f2 contains method ptr instead of vtable index
m = f2_as_vfinal_method();
} else if (is_f1_null()) {
// NULL _f1 means this is a virtual entry so also not interesting
return NULL;
// null _f1 means this is a virtual entry so also not interesting
return nullptr;
} else {
if (!(_f1->is_method())) {
// _f1 is a Klass* for an interface
@ -609,9 +609,9 @@ Method* ConstantPoolCacheEntry::get_interesting_method_entry() {
m = f1_as_method();
}
}
assert(m != NULL && m->is_method(), "sanity check");
if (m == NULL || !m->is_method()) {
return NULL;
assert(m != nullptr && m->is_method(), "sanity check");
if (m == nullptr || !m->is_method()) {
return nullptr;
}
return m;
}
@ -729,14 +729,14 @@ void ConstantPoolCache::remove_unshareable_info() {
// <this> is the copy to be written into the archive. It's in the ArchiveBuilder's "buffer space".
// However, this->_initial_entries was not copied/relocated by the ArchiveBuilder, so it's
// still pointing to the array allocated inside save_for_archive().
assert(_initial_entries != NULL, "archived cpcache must have been initialized");
assert(_initial_entries != nullptr, "archived cpcache must have been initialized");
assert(!ArchiveBuilder::current()->is_in_buffer_space(_initial_entries), "must be");
for (int i=0; i<length(); i++) {
// Restore each entry to the initial state -- just after Rewriter::make_constant_pool_cache()
// has finished.
*entry_at(i) = _initial_entries->at(i);
}
_initial_entries = NULL;
_initial_entries = nullptr;
}
#endif // INCLUDE_CDS
@ -745,12 +745,12 @@ void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) {
data->remove_handle(_resolved_references);
set_resolved_references(OopHandle());
MetadataFactory::free_array<u2>(data, _reference_map);
set_reference_map(NULL);
set_reference_map(nullptr);
#if INCLUDE_CDS
if (_initial_entries != NULL) {
if (_initial_entries != nullptr) {
Arguments::assert_is_dumping_archive();
MetadataFactory::free_array<ConstantPoolCacheEntry>(data, _initial_entries);
_initial_entries = NULL;
_initial_entries = nullptr;
}
#endif
}
@ -758,7 +758,7 @@ void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) {
#if INCLUDE_CDS_JAVA_HEAP
oop ConstantPoolCache::archived_references() {
if (_archived_references_index < 0) {
return NULL;
return nullptr;
}
return HeapShared::get_root(_archived_references_index);
}
@ -784,7 +784,7 @@ void ConstantPoolCache::adjust_method_entries(bool * trace_name_printed) {
for (int i = 0; i < length(); i++) {
ConstantPoolCacheEntry* entry = entry_at(i);
Method* old_method = entry->get_interesting_method_entry();
if (old_method == NULL || !old_method->is_old()) {
if (old_method == nullptr || !old_method->is_old()) {
continue; // skip uninteresting entries
}
if (old_method->is_deleted()) {
@ -802,7 +802,7 @@ bool ConstantPoolCache::check_no_old_or_obsolete_entries() {
ResourceMark rm;
for (int i = 1; i < length(); i++) {
Method* m = entry_at(i)->get_interesting_method_entry();
if (m != NULL && !entry_at(i)->check_no_old_or_obsolete_entries()) {
if (m != nullptr && !entry_at(i)->check_no_old_or_obsolete_entries()) {
log_trace(redefine, class, update, constantpool)
("cpcache check found old method entry: class: %s, old: %d, obsolete: %d, method: %s",
constant_pool()->pool_holder()->external_name(), m->is_old(), m->is_obsolete(), m->external_name());
@ -814,7 +814,7 @@ bool ConstantPoolCache::check_no_old_or_obsolete_entries() {
void ConstantPoolCache::dump_cache() {
for (int i = 1; i < length(); i++) {
if (entry_at(i)->get_interesting_method_entry() != NULL) {
if (entry_at(i)->get_interesting_method_entry() != nullptr) {
entry_at(i)->print(tty, i, this);
}
}

View File

@ -145,7 +145,7 @@ class ConstantPoolCacheEntry {
void set_bytecode_2(Bytecodes::Code code);
void set_f1(Metadata* f1) {
Metadata* existing_f1 = _f1; // read once
assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change");
assert(existing_f1 == nullptr || existing_f1 == f1, "illegal field change");
_f1 = f1;
}
void release_set_f1(Metadata* f1);
@ -226,7 +226,7 @@ class ConstantPoolCacheEntry {
private:
void set_direct_or_vtable_call(
Bytecodes::Code invoke_code, // the bytecode used for invoking the method
const methodHandle& method, // the method/prototype if any (NULL, otherwise)
const methodHandle& method, // the method/prototype if any (null, otherwise)
int vtable_index, // the vtable index if any, else negative
bool sender_is_interface
);
@ -442,7 +442,7 @@ class ConstantPoolCache: public MetaspaceObj {
void metaspace_pointers_do(MetaspaceClosure* it);
MetaspaceObj::Type type() const { return ConstantPoolCacheType; }
oop archived_references() NOT_CDS_JAVA_HEAP_RETURN_(NULL);
oop archived_references() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
void set_archived_references(int root_index) NOT_CDS_JAVA_HEAP_RETURN;
void clear_archived_references() NOT_CDS_JAVA_HEAP_RETURN;

View File

@ -57,16 +57,16 @@ inline Method* ConstantPoolCacheEntry::f2_as_interface_method() const {
inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)Atomic::load_acquire(&_f1); }
inline Method* ConstantPoolCacheEntry::f1_as_method() const {
Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), "");
Metadata* f1 = f1_ord(); assert(f1 == nullptr || f1->is_method(), "");
return (Method*)f1;
}
inline Klass* ConstantPoolCacheEntry::f1_as_klass() const {
Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), "");
Metadata* f1 = f1_ord(); assert(f1 == nullptr || f1->is_klass(), "");
return (Klass*)f1;
}
inline bool ConstantPoolCacheEntry::is_f1_null() const { Metadata* f1 = f1_ord(); return f1 == NULL; }
inline bool ConstantPoolCacheEntry::is_f1_null() const { Metadata* f1 = f1_ord(); return f1 == nullptr; }
inline bool ConstantPoolCacheEntry::has_appendix() const {
return (!is_f1_null()) && (_flags & (1 << has_appendix_shift)) != 0;
@ -89,7 +89,7 @@ inline ConstantPoolCache::ConstantPoolCache(int length,
const intStack& invokedynamic_inverse_index_map,
const intStack& invokedynamic_references_map) :
_length(length),
_constant_pool(NULL),
_constant_pool(nullptr),
_gc_epoch(0) {
CDS_JAVA_HEAP_ONLY(_archived_references_index = -1;)
initialize(inverse_index_map, invokedynamic_inverse_index_map,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -126,7 +126,7 @@ class FieldStreamBase : public StackObj {
int index = _fields->at(_generic_signature_slot);
return _constants->symbol_at(index);
} else {
return NULL;
return nullptr;
}
}

View File

@ -280,7 +280,7 @@ RetTableEntry* RetTable::find_jsrs_for_target(int targBci) {
cur = cur->next();
}
ShouldNotReachHere();
return NULL;
return nullptr;
}
// The instruction at bci is changing size by "delta". Update the return map.
@ -418,7 +418,7 @@ void GenerateOopMap::mark_bbheaders_and_count_gc_points() {
// First mark all exception handlers as start of a basic-block
ExceptionTable excps(method());
for(int i = 0; i < excps.length(); i ++) {
bb_mark_fct(this, excps.handler_pc(i), NULL);
bb_mark_fct(this, excps.handler_pc(i), nullptr);
}
// Then iterate through the code
@ -429,19 +429,19 @@ void GenerateOopMap::mark_bbheaders_and_count_gc_points() {
int bci = bcs.bci();
if (!fellThrough)
bb_mark_fct(this, bci, NULL);
bb_mark_fct(this, bci, nullptr);
fellThrough = jump_targets_do(&bcs, &GenerateOopMap::bb_mark_fct, NULL);
fellThrough = jump_targets_do(&bcs, &GenerateOopMap::bb_mark_fct, nullptr);
/* We will also mark successors of jsr's as basic block headers. */
switch (bytecode) {
case Bytecodes::_jsr:
assert(!fellThrough, "should not happen");
bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), NULL);
bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), nullptr);
break;
case Bytecodes::_jsr_w:
assert(!fellThrough, "should not happen");
bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), NULL);
bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), nullptr);
break;
default:
break;
@ -649,7 +649,7 @@ BasicBlock *GenerateOopMap::get_basic_block_containing(int bci) const {
}
fatal("should have found BB");
return NULL;
return nullptr;
}
void GenerateOopMap::restore_state(BasicBlock *bb)
@ -672,11 +672,11 @@ int GenerateOopMap::next_bb_start_pc(BasicBlock *bb) {
//
// Allocate memory and throw LinkageError if failure.
#define ALLOC_RESOURCE_ARRAY(var, type, count) \
var = NEW_RESOURCE_ARRAY_RETURN_NULL(type, count); \
if (var == NULL) { \
#define ALLOC_RESOURCE_ARRAY(var, type, count) \
var = NEW_RESOURCE_ARRAY_RETURN_NULL(type, count); \
if (var == nullptr) { \
report_error("Cannot reserve enough memory to analyze this method"); \
return; \
return; \
}
@ -801,7 +801,7 @@ void GenerateOopMap::copy_state(CellTypeState *dst, CellTypeState *src) {
// monitor matching is purely informational and doesn't say anything
// about the correctness of the code.
void GenerateOopMap::merge_state_into_bb(BasicBlock *bb) {
guarantee(bb != NULL, "null basicblock");
guarantee(bb != nullptr, "null basicblock");
assert(bb->is_alive(), "merging state into a dead basicblock");
if (_stack_top == bb->_stack_top) {
@ -1157,13 +1157,13 @@ void GenerateOopMap::interp_bb(BasicBlock *bb) {
}
interp1(&itr);
bool fall_through = jump_targets_do(&itr, GenerateOopMap::merge_state, NULL);
bool fall_through = jump_targets_do(&itr, GenerateOopMap::merge_state, nullptr);
if (_got_error) return;
if (itr.code() == Bytecodes::_ret) {
assert(!fall_through, "cannot be set if ret instruction");
// Automatically handles 'wide' ret indices
ret_jump_targets_do(&itr, GenerateOopMap::merge_state, itr.get_index(), NULL);
ret_jump_targets_do(&itr, GenerateOopMap::merge_state, itr.get_index(), nullptr);
} else if (fall_through) {
// Hit end of BB, but the instr. was a fall-through instruction,
// so perform transition as if the BB ended in a "jump".
@ -1226,7 +1226,7 @@ void GenerateOopMap::do_exception_edge(BytecodeStream* itr) {
if (start_pc <= bci && bci < end_pc) {
BasicBlock *excBB = get_basic_block_at(handler_pc);
guarantee(excBB != NULL, "no basic block for exception");
guarantee(excBB != nullptr, "no basic block for exception");
CellTypeState *excStk = excBB->stack();
CellTypeState *cOpStck = stack();
CellTypeState cOpStck_0 = cOpStck[0];
@ -1840,7 +1840,7 @@ void GenerateOopMap::do_monitorexit(int bci) {
// possibility that this bytecode will throw an
// exception.
BasicBlock* bb = get_basic_block_containing(bci);
guarantee(bb != NULL, "no basic block for bci");
guarantee(bb != nullptr, "no basic block for bci");
bb->set_changed(true);
bb->_monitor_top = bad_monitors;
@ -2067,7 +2067,7 @@ GenerateOopMap::GenerateOopMap(const methodHandle& method) {
// We have to initialize all variables here, that can be queried directly
_method = method;
_max_locals=0;
_init_vars = NULL;
_init_vars = nullptr;
#ifndef PRODUCT
// If we are doing a detailed trace, include the regular trace information.
@ -2088,7 +2088,7 @@ bool GenerateOopMap::compute_map(Thread* current) {
}
#endif
TraceTime t_single("oopmap time", TimeOopMap2);
TraceTime t_all(NULL, &_total_oopmap_time, TimeOopMap);
TraceTime t_all(nullptr, &_total_oopmap_time, TimeOopMap);
// Initialize values
_got_error = false;
@ -2100,7 +2100,7 @@ bool GenerateOopMap::compute_map(Thread* current) {
_init_vars = new GrowableArray<intptr_t>(5); // There are seldom more than 5 init_vars
_report_result = false;
_report_result_for_send = false;
_new_var_map = NULL;
_new_var_map = nullptr;
_ret_adr_tos = new GrowableArray<intptr_t>(5); // 5 seems like a good number;
_did_rewriting = false;
_did_relocation = false;
@ -2218,7 +2218,7 @@ void GenerateOopMap::result_for_basicblock(int bci) {
// Find basicblock and report results
BasicBlock* bb = get_basic_block_containing(bci);
guarantee(bb != NULL, "no basic block for bci");
guarantee(bb != nullptr, "no basic block for bci");
assert(bb->is_reachable(), "getting result from unreachable basicblock");
bb->set_changed(true);
interp_bb(bb);
@ -2278,7 +2278,7 @@ void GenerateOopMap::rewrite_refval_conflicts()
method()->print_codes();
}
assert(_new_var_map!=NULL, "nothing to rewrite");
assert(_new_var_map!=nullptr, "nothing to rewrite");
assert(_conflict==true, "We should not be here");
compute_ret_adr_at_TOS();
@ -2302,7 +2302,7 @@ void GenerateOopMap::rewrite_refval_conflicts()
_max_locals += _nof_refval_conflicts;
// That was that...
_new_var_map = NULL;
_new_var_map = nullptr;
_nof_refval_conflicts = 0;
}
@ -2502,7 +2502,7 @@ bool GenerateOopMap::stack_top_holds_ret_addr(int bci) {
}
void GenerateOopMap::compute_ret_adr_at_TOS() {
assert(_ret_adr_tos != NULL, "must be initialized");
assert(_ret_adr_tos != nullptr, "must be initialized");
_ret_adr_tos->clear();
for (int i = 0; i < bb_count(); i++) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -77,7 +77,7 @@ class RetTable {
void add_jsr(int return_bci, int target_bci); // Adds entry to list
public:
RetTable() { _first = NULL; }
RetTable() { _first = nullptr; }
void compute_ret_table(const methodHandle& method);
void update_ret_table(int bci, int delta);
RetTableEntry* find_jsrs_for_target(int targBci);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,7 @@ inline void InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* c
if (Devirtualizer::do_metadata(closure)) {
ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
// cld can be null if we have a non-registered class loader.
if (cld != NULL) {
if (cld != nullptr) {
Devirtualizer::do_cld(closure, cld);
}
}
@ -64,7 +64,7 @@ inline void InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosur
if (mr.contains(obj)) {
ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
// cld can be null if we have a non-registered class loader.
if (cld != NULL) {
if (cld != nullptr) {
Devirtualizer::do_cld(closure, cld);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -78,12 +78,12 @@ public:
};
// Print fields.
// If "obj" argument to constructor is NULL, prints static fields, otherwise prints non-static fields.
// If "obj" argument to constructor is null, prints static fields, otherwise prints non-static fields.
class FieldPrinter: public FieldClosure {
oop _obj;
outputStream* _st;
public:
FieldPrinter(outputStream* st, oop obj = NULL) : _obj(obj), _st(st) {}
FieldPrinter(outputStream* st, oop obj = nullptr) : _obj(obj), _st(st) {}
void do_field(fieldDescriptor* fd);
};
@ -201,9 +201,9 @@ class InstanceKlass: public Klass {
// The contents of the Record attribute.
Array<RecordComponent*>* _record_components;
// the source debug extension for this klass, NULL if not specified.
// the source debug extension for this klass, null if not specified.
// Specified as UTF-8 string without terminating zero byte in the classfile,
// it is stored in the instanceklass as a NULL-terminated UTF-8 string
// it is stored in the instanceklass as a null-terminated UTF-8 string
const char* _source_debug_extension;
// Number of heapOopSize words used by non-static fields in this klass
@ -238,7 +238,7 @@ class InstanceKlass: public Klass {
OopMapCache* volatile _oop_map_cache; // OopMapCache for all methods in the klass (allocated lazily)
JNIid* _jni_ids; // First JNI identifier for static fields in this class
jmethodID* volatile _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or NULL if none
jmethodID* volatile _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or null if none
nmethodBucket* volatile _dep_context; // packed DependencyContext structure
uint64_t volatile _dep_context_last_cleaned;
nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class
@ -294,7 +294,7 @@ class InstanceKlass: public Klass {
// The embedded implementor only exists if the current klass is an
// interface. The possible values of the implementor fall into following
// three cases:
// NULL: no implementor.
// null: no implementor.
// A Klass* that's not itself: one implementor.
// Itself: more than one implementors.
//
@ -382,12 +382,12 @@ class InstanceKlass: public Klass {
// interfaces
Array<InstanceKlass*>* local_interfaces() const { return _local_interfaces; }
void set_local_interfaces(Array<InstanceKlass*>* a) {
guarantee(_local_interfaces == NULL || a == NULL, "Just checking");
guarantee(_local_interfaces == nullptr || a == nullptr, "Just checking");
_local_interfaces = a; }
Array<InstanceKlass*>* transitive_interfaces() const { return _transitive_interfaces; }
void set_transitive_interfaces(Array<InstanceKlass*>* a) {
guarantee(_transitive_interfaces == NULL || a == NULL, "Just checking");
guarantee(_transitive_interfaces == nullptr || a == nullptr, "Just checking");
_transitive_interfaces = a;
}
@ -406,7 +406,7 @@ class InstanceKlass: public Klass {
Array<u2>* fields() const { return _fields; }
void set_fields(Array<u2>* f, u2 java_fields_count) {
guarantee(_fields == NULL || f == NULL, "Just checking");
guarantee(_fields == nullptr || f == nullptr, "Just checking");
_fields = f;
_java_fields_count = java_fields_count;
}
@ -444,15 +444,15 @@ private:
public:
// Call this only if you know that the nest host has been initialized.
InstanceKlass* nest_host_not_null() {
assert(_nest_host != NULL, "must be");
assert(_nest_host != nullptr, "must be");
return _nest_host;
}
// Used to construct informative IllegalAccessError messages at a higher level,
// if there was an issue resolving or validating the nest host.
// Returns NULL if there was no error.
// Returns null if there was no error.
const char* nest_host_error();
// Returns nest-host class, resolving and validating it if needed.
// Returns NULL if resolution is not possible from the calling context.
// Returns null if resolution is not possible from the calling context.
InstanceKlass* nest_host(TRAPS);
// Check if this klass is a nestmate of k - resolves this nest-host and k's
bool has_nestmate_access_to(InstanceKlass* k, TRAPS);
@ -478,7 +478,7 @@ public:
// package
PackageEntry* package() const { return _package_entry; }
ModuleEntry* module() const;
bool in_unnamed_package() const { return (_package_entry == NULL); }
bool in_unnamed_package() const { return (_package_entry == nullptr); }
void set_package(ClassLoaderData* loader_data, PackageEntry* pkg_entry, TRAPS);
// If the package for the InstanceKlass is in the boot loader's package entry
// table then sets the classpath_index field so that
@ -583,7 +583,7 @@ public:
_disable_method_binary_search = true;
}
// find a local method (returns NULL if not found)
// find a local method (returns null if not found)
Method* find_method(const Symbol* name, const Symbol* signature) const;
static Method* find_method(const Array<Method*>* methods,
const Symbol* name,
@ -597,14 +597,14 @@ public:
const Symbol* signature,
PrivateLookupMode private_mode);
// find a local method (returns NULL if not found)
// find a local method (returns null if not found)
Method* find_local_method(const Symbol* name,
const Symbol* signature,
OverpassLookupMode overpass_mode,
StaticLookupMode static_mode,
PrivateLookupMode private_mode) const;
// find a local method from given methods array (returns NULL if not found)
// find a local method from given methods array (returns null if not found)
static Method* find_local_method(const Array<Method*>* methods,
const Symbol* name,
const Symbol* signature,
@ -620,18 +620,18 @@ public:
StaticLookupMode static_mode,
PrivateLookupMode private_mode);
// lookup operation (returns NULL if not found)
// lookup operation (returns null if not found)
Method* uncached_lookup_method(const Symbol* name,
const Symbol* signature,
OverpassLookupMode overpass_mode,
PrivateLookupMode private_mode = PrivateLookupMode::find) const;
// lookup a method in all the interfaces that this class implements
// (returns NULL if not found)
// (returns null if not found)
Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature, DefaultsLookupMode defaults_mode) const;
// lookup a method in local defaults then in all interfaces
// (returns NULL if not found)
// (returns null if not found)
Method* lookup_method_in_ordered_interfaces(Symbol* name, Symbol* signature) const;
// Find method indices by name. If a method with the specified name is
@ -706,16 +706,16 @@ public:
InstanceKlass* previous_versions() const { return _previous_versions; }
#else
InstanceKlass* previous_versions() const { return NULL; }
InstanceKlass* previous_versions() const { return nullptr; }
#endif
InstanceKlass* get_klass_version(int version) {
for (InstanceKlass* ik = this; ik != NULL; ik = ik->previous_versions()) {
for (InstanceKlass* ik = this; ik != nullptr; ik = ik->previous_versions()) {
if (ik->constants()->version() == version) {
return ik;
}
}
return NULL;
return nullptr;
}
bool has_been_redefined() const { return _misc_flags.has_been_redefined(); }
@ -736,7 +736,7 @@ public:
#if INCLUDE_JVMTI
void init_previous_versions() {
_previous_versions = NULL;
_previous_versions = nullptr;
}
private:
@ -772,9 +772,9 @@ public:
static bool has_previous_versions_and_reset() { return false; }
void set_cached_class_file(JvmtiCachedClassFileData *data) {
assert(data == NULL, "unexpected call with JVMTI disabled");
assert(data == nullptr, "unexpected call with JVMTI disabled");
}
JvmtiCachedClassFileData * get_cached_class_file() { return (JvmtiCachedClassFileData *)NULL; }
JvmtiCachedClassFileData * get_cached_class_file() { return (JvmtiCachedClassFileData *)nullptr; }
#endif // INCLUDE_JVMTI
@ -819,16 +819,16 @@ public:
void set_annotations(Annotations* anno) { _annotations = anno; }
AnnotationArray* class_annotations() const {
return (_annotations != NULL) ? _annotations->class_annotations() : NULL;
return (_annotations != nullptr) ? _annotations->class_annotations() : nullptr;
}
Array<AnnotationArray*>* fields_annotations() const {
return (_annotations != NULL) ? _annotations->fields_annotations() : NULL;
return (_annotations != nullptr) ? _annotations->fields_annotations() : nullptr;
}
AnnotationArray* class_type_annotations() const {
return (_annotations != NULL) ? _annotations->class_type_annotations() : NULL;
return (_annotations != nullptr) ? _annotations->class_type_annotations() : nullptr;
}
Array<AnnotationArray*>* fields_type_annotations() const {
return (_annotations != NULL) ? _annotations->fields_type_annotations() : NULL;
return (_annotations != nullptr) ? _annotations->fields_type_annotations() : nullptr;
}
// allocation
instanceOop allocate_instance(TRAPS);
@ -925,13 +925,13 @@ public:
}
static const InstanceKlass* cast(const Klass* k) {
assert(k != NULL, "k should not be null");
assert(k != nullptr, "k should not be null");
assert(k->is_instance_klass(), "cast to InstanceKlass");
return static_cast<const InstanceKlass*>(k);
}
virtual InstanceKlass* java_super() const {
return (super() == NULL) ? NULL : cast(super());
return (super() == nullptr) ? nullptr : cast(super());
}
// Sizing (in words)
@ -1085,7 +1085,7 @@ public:
// The RedefineClasses() API can cause new method idnums to be needed
// which will cause the caches to grow. Safety requires different
// cache management logic if the caches can grow instead of just
// going from NULL to non-NULL.
// going from null to non-null.
bool idnum_can_increment() const { return has_been_redefined(); }
inline jmethodID* methods_jmethod_ids_acquire() const;
inline void release_set_methods_jmethod_ids(jmethodID* jmeths);
@ -1113,7 +1113,7 @@ private:
void add_initialization_error(JavaThread* current, Handle exception);
oop get_initialization_error(JavaThread* current);
// find a local method (returns NULL if not found)
// find a local method (returns null if not found)
Method* find_method_impl(const Symbol* name,
const Symbol* signature,
OverpassLookupMode overpass_mode,
@ -1246,7 +1246,7 @@ class InnerClassesIterator : public StackObj {
InnerClassesIterator(const InstanceKlass* k) {
_inner_classes = k->inner_classes();
if (k->inner_classes() != NULL) {
if (k->inner_classes() != nullptr) {
_length = _inner_classes->length();
// The inner class array's length should be the multiple of
// inner_class_next_offset if it only contains the InnerClasses
@ -1332,7 +1332,7 @@ class ClassHierarchyIterator : public StackObj {
}
bool done() {
return (_current == NULL);
return (_current == nullptr);
}
// Make a step iterating over the class hierarchy under the root class.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,7 +58,7 @@ inline InstanceKlass* volatile* InstanceKlass::adr_implementor() const {
if (is_interface()) {
return (InstanceKlass* volatile*)end_of_nonstatic_oop_maps();
} else {
return NULL;
return nullptr;
}
}
@ -187,9 +187,9 @@ ALWAYSINLINE void InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType
inline instanceOop InstanceKlass::allocate_instance(oop java_class, TRAPS) {
Klass* k = java_lang_Class::as_Klass(java_class);
if (k == NULL) {
if (k == nullptr) {
ResourceMark rm(THREAD);
THROW_(vmSymbols::java_lang_InstantiationException(), NULL);
THROW_(vmSymbols::java_lang_InstantiationException(), nullptr);
}
InstanceKlass* ik = cast(k);
ik->check_valid_for_instantiation(false, CHECK_NULL);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,7 @@
int InstanceMirrorKlass::_offset_of_static_fields = 0;
size_t InstanceMirrorKlass::instance_size(Klass* k) {
if (k != NULL && k->is_instance_klass()) {
if (k != nullptr && k->is_instance_klass()) {
return align_object_size(size_helper() + InstanceKlass::cast(k)->static_field_size());
}
return size_helper();
@ -61,7 +61,7 @@ size_t InstanceMirrorKlass::oop_size(oop obj) const {
int InstanceMirrorKlass::compute_static_oop_field_count(oop obj) {
Klass* k = java_lang_Class::as_Klass(obj);
if (k != NULL && k->is_instance_klass()) {
if (k != nullptr && k->is_instance_klass()) {
return InstanceKlass::cast(k)->static_oop_field_count();
}
return 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,9 +52,9 @@ void InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
if (Devirtualizer::do_metadata(closure)) {
Klass* klass = java_lang_Class::as_Klass(obj);
// We'll get NULL for primitive mirrors.
if (klass != NULL) {
if (klass->class_loader_data() == NULL) {
// We'll get null for primitive mirrors.
if (klass != nullptr) {
if (klass->class_loader_data() == nullptr) {
// This is a mirror that belongs to a shared class that has not be loaded yet.
// It's only reachable via HeapShared::roots(). All of its fields should be zero
// so there's no need to scan.
@ -71,7 +71,7 @@ void InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
Devirtualizer::do_klass(closure, klass);
}
} else {
// We would like to assert here (as below) that if klass has been NULL, then
// We would like to assert here (as below) that if klass has been null, then
// this has been a mirror for a primitive type that we do not need to follow
// as they are always strong roots.
// However, we might get across a klass that just changed during CMS concurrent
@ -125,8 +125,8 @@ void InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closu
if (Devirtualizer::do_metadata(closure)) {
if (mr.contains(obj)) {
Klass* klass = java_lang_Class::as_Klass(obj);
// We'll get NULL for primitive mirrors.
if (klass != NULL) {
// We'll get null for primitive mirrors.
if (klass != nullptr) {
Devirtualizer::do_klass(closure, klass);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -112,12 +112,12 @@ void InstanceRefKlass::oop_verify_on(oop obj, outputStream* st) {
InstanceKlass::oop_verify_on(obj, st);
// Verify referent field
oop referent = java_lang_ref_Reference::unknown_referent_no_keepalive(obj);
if (referent != NULL) {
if (referent != nullptr) {
guarantee(oopDesc::is_oop(referent), "referent field heap failed");
}
// Additional verification for next field, which must be a Reference or null
oop next = java_lang_ref_Reference::next(obj);
if (next != NULL) {
if (next != nullptr) {
guarantee(oopDesc::is_oop(next), "next field should be an oop");
guarantee(next->is_instanceRef(), "next field verify failed");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,9 +66,9 @@ static inline oop load_referent(oop obj, ReferenceType type) {
template <typename T, class OopClosureType>
bool InstanceRefKlass::try_discover(oop obj, ReferenceType type, OopClosureType* closure) {
ReferenceDiscoverer* rd = closure->ref_discoverer();
if (rd != NULL) {
if (rd != nullptr) {
oop referent = load_referent(obj, type);
if (referent != NULL) {
if (referent != nullptr) {
if (!referent->is_gc_marked()) {
// Only try to discover if not yet marked.
return rd->discover_reference(obj, type);
@ -100,14 +100,14 @@ void InstanceRefKlass::oop_oop_iterate_discovered_and_discovery(oop obj, Referen
template <typename T, class OopClosureType, class Contains>
void InstanceRefKlass::oop_oop_iterate_fields(oop obj, OopClosureType* closure, Contains& contains) {
assert(closure->ref_discoverer() == NULL, "ReferenceDiscoverer should not be set");
assert(closure->ref_discoverer() == nullptr, "ReferenceDiscoverer should not be set");
do_referent<T>(obj, closure, contains);
do_discovered<T>(obj, closure, contains);
}
template <typename T, class OopClosureType, class Contains>
void InstanceRefKlass::oop_oop_iterate_fields_except_referent(oop obj, OopClosureType* closure, Contains& contains) {
assert(closure->ref_discoverer() == NULL, "ReferenceDiscoverer should not be set");
assert(closure->ref_discoverer() == nullptr, "ReferenceDiscoverer should not be set");
do_discovered<T>(obj, closure, contains);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -235,7 +235,7 @@ public:
void InstanceStackChunkKlass::print_chunk(const stackChunkOop c, bool verbose, outputStream* st) {
if (c == nullptr) {
st->print_cr("CHUNK NULL");
st->print_cr("CHUNK null");
return;
}

View File

@ -83,7 +83,7 @@ void Klass::set_is_cloneable() {
void Klass::set_name(Symbol* n) {
_name = n;
if (_name != NULL) _name->increment_refcount();
if (_name != nullptr) _name->increment_refcount();
if (Arguments::is_dumping_archive() && is_instance_klass()) {
SystemDictionaryShared::init_dumptime_info(InstanceKlass::cast(this));
@ -96,7 +96,7 @@ bool Klass::is_subclass_of(const Klass* k) const {
Klass* t = const_cast<Klass*>(this)->super();
while (t != NULL) {
while (t != nullptr) {
if (t == k) return true;
t = t->super();
}
@ -104,7 +104,7 @@ bool Klass::is_subclass_of(const Klass* k) const {
}
void Klass::release_C_heap_structures(bool release_constant_pool) {
if (_name != NULL) _name->decrement_refcount();
if (_name != nullptr) _name->decrement_refcount();
}
bool Klass::search_secondary_supers(Klass* k) const {
@ -131,7 +131,7 @@ Klass *Klass::up_cast_abstract() {
Klass *r = this;
while( r->is_abstract() ) { // Receiver is abstract?
Klass *s = r->subklass(); // Check for exactly 1 subklass
if (s == NULL || s->next_sibling() != NULL) // Oops; wrong count; give up
if (s == nullptr || s->next_sibling() != nullptr) // Oops; wrong count; give up
return this; // Return 'this' as a no-progress flag
r = s; // Loop till find concrete class
}
@ -159,7 +159,7 @@ void Klass::check_valid_for_instantiation(bool throwError, TRAPS) {
void Klass::copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS) {
ResourceMark rm(THREAD);
assert(s != NULL, "Throw NPE!");
assert(s != nullptr, "Throw NPE!");
THROW_MSG(vmSymbols::java_lang_ArrayStoreException(),
err_msg("arraycopy: source type %s is not an array", s->klass()->external_name()));
}
@ -176,7 +176,7 @@ Klass* Klass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
" wrap return value in a mirror object.");
#endif
ShouldNotReachHere();
return NULL;
return nullptr;
}
Method* Klass::uncached_lookup_method(const Symbol* name, const Symbol* signature,
@ -188,7 +188,7 @@ Method* Klass::uncached_lookup_method(const Symbol* name, const Symbol* signatur
" wrap return value in a mirror object.");
#endif
ShouldNotReachHere();
return NULL;
return nullptr;
}
void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() {
@ -228,7 +228,7 @@ jint Klass::array_layout_helper(BasicType etype) {
}
bool Klass::can_be_primary_super_slow() const {
if (super() == NULL)
if (super() == nullptr)
return true;
else if (super()->super_depth() >= primary_super_limit()-1)
return false;
@ -237,12 +237,12 @@ bool Klass::can_be_primary_super_slow() const {
}
void Klass::initialize_supers(Klass* k, Array<InstanceKlass*>* transitive_interfaces, TRAPS) {
if (k == NULL) {
set_super(NULL);
if (k == nullptr) {
set_super(nullptr);
_primary_supers[0] = this;
assert(super_depth() == 0, "Object must already be initialized properly");
} else if (k != super() || k == vmClasses::Object_klass()) {
assert(super() == NULL || super() == vmClasses::Object_klass(),
assert(super() == nullptr || super() == vmClasses::Object_klass(),
"initialize this only once to a non-trivial value");
set_super(k);
Klass* sup = k;
@ -273,9 +273,9 @@ void Klass::initialize_supers(Klass* k, Array<InstanceKlass*>* transitive_interf
j = t->super_depth();
}
for (juint j1 = j+1; j1 < primary_super_limit(); j1++) {
assert(primary_super_of_depth(j1) == NULL, "super list padding");
assert(primary_super_of_depth(j1) == nullptr, "super list padding");
}
while (t != NULL) {
while (t != nullptr) {
assert(primary_super_of_depth(j) == t, "super list initialization");
t = t->super();
--j;
@ -285,14 +285,14 @@ void Klass::initialize_supers(Klass* k, Array<InstanceKlass*>* transitive_interf
#endif
}
if (secondary_supers() == NULL) {
if (secondary_supers() == nullptr) {
// Now compute the list of secondary supertypes.
// Secondaries can occasionally be on the super chain,
// if the inline "_primary_supers" array overflows.
int extras = 0;
Klass* p;
for (p = super(); !(p == NULL || p->can_be_primary_super()); p = p->super()) {
for (p = super(); !(p == nullptr || p->can_be_primary_super()); p = p->super()) {
++extras;
}
@ -300,14 +300,14 @@ void Klass::initialize_supers(Klass* k, Array<InstanceKlass*>* transitive_interf
// Compute the "real" non-extra secondaries.
GrowableArray<Klass*>* secondaries = compute_secondary_supers(extras, transitive_interfaces);
if (secondaries == NULL) {
if (secondaries == nullptr) {
// secondary_supers set by compute_secondary_supers
return;
}
GrowableArray<Klass*>* primaries = new GrowableArray<Klass*>(extras);
for (p = super(); !(p == NULL || p->can_be_primary_super()); p = p->super()) {
for (p = super(); !(p == nullptr || p->can_be_primary_super()); p = p->super()) {
int i; // Scan for overflow primaries being duplicates of 2nd'arys
// This happens frequently for very deeply nested arrays: the
@ -339,9 +339,9 @@ void Klass::initialize_supers(Klass* k, Array<InstanceKlass*>* transitive_interf
}
#ifdef ASSERT
// We must not copy any NULL placeholders left over from bootstrap.
// We must not copy any null placeholders left over from bootstrap.
for (int j = 0; j < s2->length(); j++) {
assert(s2->at(j) != NULL, "correct bootstrapping order");
assert(s2->at(j) != nullptr, "correct bootstrapping order");
}
#endif
@ -352,16 +352,16 @@ void Klass::initialize_supers(Klass* k, Array<InstanceKlass*>* transitive_interf
GrowableArray<Klass*>* Klass::compute_secondary_supers(int num_extra_slots,
Array<InstanceKlass*>* transitive_interfaces) {
assert(num_extra_slots == 0, "override for complex klasses");
assert(transitive_interfaces == NULL, "sanity");
assert(transitive_interfaces == nullptr, "sanity");
set_secondary_supers(Universe::the_empty_klass_array());
return NULL;
return nullptr;
}
// superklass links
InstanceKlass* Klass::superklass() const {
assert(super() == NULL || super()->is_instance_klass(), "must be instance klass");
return _super == NULL ? NULL : InstanceKlass::cast(_super);
assert(super() == nullptr || super()->is_instance_klass(), "must be instance klass");
return _super == nullptr ? nullptr : InstanceKlass::cast(_super);
}
// subklass links. Used by the compiler (and vtable initialization)
@ -371,7 +371,7 @@ Klass* Klass::subklass(bool log) const {
// Need load_acquire on the _subklass, because it races with inserts that
// publishes freshly initialized data.
for (Klass* chain = Atomic::load_acquire(&_subklass);
chain != NULL;
chain != nullptr;
// Do not need load_acquire on _next_sibling, because inserts never
// create _next_sibling edges to dead data.
chain = Atomic::load(&chain->_next_sibling))
@ -385,14 +385,14 @@ Klass* Klass::subklass(bool log) const {
}
}
}
return NULL;
return nullptr;
}
Klass* Klass::next_sibling(bool log) const {
// Do not need load_acquire on _next_sibling, because inserts never
// create _next_sibling edges to dead data.
for (Klass* chain = Atomic::load(&_next_sibling);
chain != NULL;
chain != nullptr;
chain = Atomic::load(&chain->_next_sibling)) {
// Only return alive klass, there may be stale klass
// in this chain if cleaned concurrently.
@ -405,7 +405,7 @@ Klass* Klass::next_sibling(bool log) const {
}
}
}
return NULL;
return nullptr;
}
void Klass::set_subklass(Klass* s) {
@ -428,9 +428,9 @@ void Klass::append_to_sibling_list() {
debug_only(verify();)
// add ourselves to superklass' subklass list
InstanceKlass* super = superklass();
if (super == NULL) return; // special case: class Object
if (super == nullptr) return; // special case: class Object
assert((!super->is_interface() // interfaces cannot be supers
&& (super->superklass() == NULL || !is_interface())),
&& (super->superklass() == nullptr || !is_interface())),
"an interface can only be a subklass of Object");
// Make sure there is no stale subklass head
@ -438,7 +438,7 @@ void Klass::append_to_sibling_list() {
for (;;) {
Klass* prev_first_subklass = Atomic::load_acquire(&_super->_subklass);
if (prev_first_subklass != NULL) {
if (prev_first_subklass != nullptr) {
// set our sibling to be the superklass' previous first subklass
assert(prev_first_subklass->is_loader_alive(), "May not attach not alive klasses");
set_next_sibling(prev_first_subklass);
@ -457,7 +457,7 @@ void Klass::clean_subklass() {
for (;;) {
// Need load_acquire, due to contending with concurrent inserts
Klass* subklass = Atomic::load_acquire(&_subklass);
if (subklass == NULL || subklass->is_loader_alive()) {
if (subklass == nullptr || subklass->is_loader_alive()) {
return;
}
// Try to fix _subklass until it points at something not dead.
@ -482,14 +482,14 @@ void Klass::clean_weak_klass_links(bool unloading_occurred, bool clean_alive_kla
// Find and set the first alive subklass
Klass* sub = current->subklass(true);
current->clean_subklass();
if (sub != NULL) {
if (sub != nullptr) {
stack.push(sub);
}
// Find and set the first alive sibling
Klass* sibling = current->next_sibling(true);
current->set_next_sibling(sibling);
if (sibling != NULL) {
if (sibling != nullptr) {
stack.push(sibling);
}
@ -500,7 +500,7 @@ void Klass::clean_weak_klass_links(bool unloading_occurred, bool clean_alive_kla
// JVMTI RedefineClasses creates previous versions that are not in
// the class hierarchy, so process them here.
while ((ik = ik->previous_versions()) != NULL) {
while ((ik = ik->previous_versions()) != nullptr) {
ik->clean_weak_instanceklass_links();
}
}
@ -522,7 +522,7 @@ void Klass::metaspace_pointers_do(MetaspaceClosure* it) {
it->push(&_super);
if (!Arguments::is_dumping_archive()) {
// If dumping archive, these may point to excluded classes. There's no need
// to follow these pointers anyway, as they will be set to NULL in
// to follow these pointers anyway, as they will be set to null in
// remove_unshareable_info().
it->push((Klass**)&_subklass);
it->push((Klass**)&_next_sibling);
@ -545,12 +545,12 @@ void Klass::remove_unshareable_info() {
log_trace(cds, unshareable)("remove: %s", external_name());
}
set_subklass(NULL);
set_next_sibling(NULL);
set_next_link(NULL);
set_subklass(nullptr);
set_next_sibling(nullptr);
set_next_link(nullptr);
// Null out class_loader_data because we don't share that yet.
set_class_loader_data(NULL);
set_class_loader_data(nullptr);
set_is_shared();
}
@ -576,7 +576,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec
// If an exception happened during CDS restore, some of these fields may already be
// set. We leave the class on the CLD list, even if incomplete so that we don't
// modify the CLD list outside a safepoint.
if (class_loader_data() == NULL) {
if (class_loader_data() == nullptr) {
set_class_loader_data(loader_data);
// Add to class loader list first before creating the mirror
@ -585,7 +585,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec
}
Handle loader(THREAD, loader_data->class_loader());
ModuleEntry* module_entry = NULL;
ModuleEntry* module_entry = nullptr;
Klass* k = this;
if (k->is_objArray_klass()) {
k = ObjArrayKlass::cast(k)->bottom_klass();
@ -598,7 +598,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec
module_entry = ModuleEntryTable::javabase_moduleEntry();
}
// Obtain java.lang.Module, if available
Handle module_handle(THREAD, ((module_entry != NULL) ? module_entry->module() : (oop)NULL));
Handle module_handle(THREAD, ((module_entry != nullptr) ? module_entry->module() : (oop)nullptr));
if (this->has_archived_mirror_index()) {
ResourceMark rm(THREAD);
@ -620,7 +620,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec
// Only recreate it if not present. A previous attempt to restore may have
// gotten an OOM later but keep the mirror if it was created.
if (java_mirror() == NULL) {
if (java_mirror() == nullptr) {
ResourceMark rm(THREAD);
log_trace(cds, mirror)("Recreate mirror for %s", external_name());
java_lang_Class::create_mirror(this, loader, module_handle, protection_domain, Handle(), CHECK);
@ -689,12 +689,12 @@ const char* Klass::external_name() const {
char* result = convert_hidden_name_to_java(name());
return result;
}
if (name() == NULL) return "<unknown>";
if (name() == nullptr) return "<unknown>";
return name()->as_klass_external_name();
}
const char* Klass::signature_name() const {
if (name() == NULL) return "<unknown>";
if (name() == nullptr) return "<unknown>";
if (is_objArray_klass() && ObjArrayKlass::cast(this)->bottom_klass()->is_hidden()) {
size_t name_len = name()->utf8_length();
char* result = NEW_RESOURCE_ARRAY(char, name_len + 1);
@ -769,21 +769,21 @@ void Klass::verify_on(outputStream* st) {
guarantee(this->is_klass(),"should be klass");
if (super() != NULL) {
if (super() != nullptr) {
guarantee(super()->is_klass(), "should be klass");
}
if (secondary_super_cache() != NULL) {
if (secondary_super_cache() != nullptr) {
Klass* ko = secondary_super_cache();
guarantee(ko->is_klass(), "should be klass");
}
for ( uint i = 0; i < primary_super_limit(); i++ ) {
Klass* ko = _primary_supers[i];
if (ko != NULL) {
if (ko != nullptr) {
guarantee(ko->is_klass(), "should be klass");
}
}
if (java_mirror_no_keepalive() != NULL) {
if (java_mirror_no_keepalive() != nullptr) {
guarantee(java_lang_Class::is_instance(java_mirror_no_keepalive()), "should be instance");
}
}
@ -846,7 +846,7 @@ const char* Klass::joint_in_module_of_loader(const Klass* class2, bool include_p
char* joint_description = NEW_RESOURCE_ARRAY_RETURN_NULL(char, len);
// Just return the FQN if error when allocating string
if (joint_description == NULL) {
if (joint_description == nullptr) {
return class1_name;
}
@ -905,7 +905,7 @@ const char* Klass::class_in_module_of_loader(bool use_are, bool include_parent_l
// 3. class loader's name_and_id
ClassLoaderData* cld = class_loader_data();
assert(cld != NULL, "class_loader_data should not be null");
assert(cld != nullptr, "class_loader_data should not be null");
const char* loader_name_and_id = cld->loader_name_and_id();
len += strlen(loader_name_and_id);
@ -919,9 +919,9 @@ const char* Klass::class_in_module_of_loader(bool use_are, bool include_parent_l
// The parent loader's ClassLoaderData could be null if it is
// a delegating class loader that has never defined a class.
// In this case the loader's name must be obtained via the parent loader's oop.
if (parent_cld == NULL) {
if (parent_cld == nullptr) {
oop cl_name_and_id = java_lang_ClassLoader::nameAndId(parent_loader);
if (cl_name_and_id != NULL) {
if (cl_name_and_id != nullptr) {
parent_loader_name_and_id = java_lang_String::as_utf8_string(cl_name_and_id);
}
} else {
@ -938,7 +938,7 @@ const char* Klass::class_in_module_of_loader(bool use_are, bool include_parent_l
char* class_description = NEW_RESOURCE_ARRAY_RETURN_NULL(char, len);
// Just return the FQN if error when allocating string
if (class_description == NULL) {
if (class_description == nullptr) {
return klass_name;
}

View File

@ -145,9 +145,9 @@ class Klass : public Metadata {
OopHandle _java_mirror;
// Superclass
Klass* _super;
// First subclass (NULL if none); _subklass->next_sibling() is next one
// First subclass (null if none); _subklass->next_sibling() is next one
Klass* volatile _subklass;
// Sibling link (or NULL); links all subklasses of a klass
// Sibling link (or null); links all subklasses of a klass
Klass* volatile _next_sibling;
// All klasses loaded by a class loader are chained through these links
@ -220,7 +220,7 @@ protected:
Array<InstanceKlass*>* transitive_interfaces);
// java_super is the Java-level super type as specified by Class.getSuperClass.
virtual InstanceKlass* java_super() const { return NULL; }
virtual InstanceKlass* java_super() const { return nullptr; }
juint super_check_offset() const { return _super_check_offset; }
void set_super_check_offset(juint o) { _super_check_offset = o; }
@ -232,11 +232,11 @@ protected:
void set_secondary_supers(Array<Klass*>* k) { _secondary_supers = k; }
// Return the element of the _super chain of the given depth.
// If there is no such element, return either NULL or this.
// If there is no such element, return either null or this.
Klass* primary_super_of_depth(juint i) const {
assert(i < primary_super_limit(), "oob");
Klass* super = _primary_supers[i];
assert(super == NULL || super->super_depth() == i, "correct display");
assert(super == nullptr || super->super_depth() == i, "correct display");
return super;
}
@ -265,14 +265,14 @@ protected:
oop java_mirror_no_keepalive() const;
void set_java_mirror(Handle m);
oop archived_java_mirror() NOT_CDS_JAVA_HEAP_RETURN_(NULL);
oop archived_java_mirror() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
void set_archived_java_mirror(int mirror_index) NOT_CDS_JAVA_HEAP_RETURN;
// Temporary mirror switch used by RedefineClasses
OopHandle java_mirror_handle() const { return _java_mirror; }
void swap_java_mirror_handle(OopHandle& mirror) { _java_mirror.swap(mirror); }
// Set java mirror OopHandle to NULL for CDS
// Set java mirror OopHandle to null for CDS
// This leaves the OopHandle in the CLD, but that's ok, you can't release them.
void clear_java_mirror_handle() { _java_mirror = OopHandle(); }
@ -528,7 +528,7 @@ protected:
// array class with this klass as element type
virtual Klass* array_klass(TRAPS) = 0;
// These will return NULL instead of allocating on the heap:
// These will return null instead of allocating on the heap:
virtual Klass* array_klass_or_null(int rank) = 0;
virtual Klass* array_klass_or_null() = 0;
@ -567,7 +567,7 @@ protected:
if (has_archived_mirror_index()) {
// _java_mirror is not a valid OopHandle but rather an encoded reference in the shared heap
return false;
} else if (_java_mirror.ptr_raw() == NULL) {
} else if (_java_mirror.ptr_raw() == nullptr) {
return false;
} else {
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,7 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
int vtable_length = 0;
// start off with super's vtable length
vtable_length = super == NULL ? 0 : super->vtable_length();
vtable_length = super == nullptr ? 0 : super->vtable_length();
// go thru each method in the methods table to see if it needs a new entry
int len = methods->length();
@ -90,7 +90,7 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
GrowableArray<Method*> new_mirandas(20);
// compute the number of mirandas methods that must be added to the end
get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces,
get_mirandas(&new_mirandas, all_mirandas, super, methods, nullptr, local_interfaces,
class_flags.is_interface());
*num_new_mirandas = new_mirandas.length();
@ -106,7 +106,7 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
vtable_length = Universe::base_vtable_size();
}
if (super == NULL && vtable_length != Universe::base_vtable_size()) {
if (super == nullptr && vtable_length != Universe::base_vtable_size()) {
if (Universe::is_bootstrapping()) {
// Someone is attempting to override java.lang.Object incorrectly on the
// bootclasspath. The JVM cannot recover from this error including throwing
@ -131,7 +131,7 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
// and return the number of entries copied. Expects that 'super' is the Java
// super class (arrays can have "array" super classes that must be skipped).
int klassVtable::initialize_from_super(Klass* super) {
if (super == NULL) {
if (super == nullptr) {
return 0;
} else if (is_preinitialized_vtable()) {
// A shared class' vtable is preinitialized at dump time. No need to copy
@ -212,11 +212,11 @@ void klassVtable::initialize_vtable(GrowableArray<InstanceKlass*>* supers) {
// update vtable with default_methods
Array<Method*>* default_methods = ik()->default_methods();
if (default_methods != NULL) {
if (default_methods != nullptr) {
len = default_methods->length();
if (len > 0) {
Array<int>* def_vtable_indices = ik()->default_vtable_indices();
assert(def_vtable_indices != NULL, "should be created");
assert(def_vtable_indices != nullptr, "should be created");
assert(def_vtable_indices->length() == len, "reinit vtable len?");
for (int i = 0; i < len; i++) {
bool needs_new_entry;
@ -314,7 +314,7 @@ InstanceKlass* klassVtable::find_transitive_override(InstanceKlass* initialsuper
Symbol* target_classname) {
InstanceKlass* superk = initialsuper;
while (superk != NULL && superk->super() != NULL) {
while (superk != nullptr && superk->super() != nullptr) {
klassVtable ssVtable = (superk->super())->vtable();
if (vtable_index < ssVtable.length()) {
Method* super_method = ssVtable.method_at(vtable_index);
@ -343,11 +343,11 @@ InstanceKlass* klassVtable::find_transitive_override(InstanceKlass* initialsuper
}
} else {
// super class has no vtable entry here, stop transitive search
superk = (InstanceKlass*)NULL;
superk = (InstanceKlass*)nullptr;
break;
}
// if no override found yet, continue to search up
superk = superk->super() == NULL ? NULL : InstanceKlass::cast(superk->super());
superk = superk->super() == nullptr ? nullptr : InstanceKlass::cast(superk->super());
}
return superk;
@ -388,7 +388,7 @@ bool klassVtable::update_inherited_vtable(Thread* current,
InstanceKlass* klass = ik();
Array<int>* def_vtable_indices = NULL;
Array<int>* def_vtable_indices = nullptr;
bool is_default = false;
// default methods are non-private concrete methods in superinterfaces which are added
@ -400,7 +400,7 @@ bool klassVtable::update_inherited_vtable(Thread* current,
is_default = true;
def_vtable_indices = klass->default_vtable_indices();
assert(!target_method->is_private(), "private interface method flagged as default");
assert(def_vtable_indices != NULL, "def vtable alloc?");
assert(def_vtable_indices != nullptr, "def vtable alloc?");
assert(default_index <= def_vtable_indices->length(), "def vtable len?");
} else {
assert(klass == target_method->method_holder(), "caller resp.");
@ -438,7 +438,7 @@ bool klassVtable::update_inherited_vtable(Thread* current,
// we need a new entry if there is no superclass
Klass* super = klass->super();
if (super == NULL) {
if (super == nullptr) {
return allocate_new;
}
@ -451,8 +451,8 @@ bool klassVtable::update_inherited_vtable(Thread* current,
Symbol* signature = target_method->signature();
Klass* target_klass = target_method->method_holder();
assert(target_klass != NULL, "impossible");
if (target_klass == NULL) {
assert(target_klass != nullptr, "impossible");
if (target_klass == nullptr) {
target_klass = _klass;
}
@ -491,7 +491,7 @@ bool klassVtable::update_inherited_vtable(Thread* current,
(klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION &&
(super_klass = find_transitive_override(super_klass,
target_method, i, target_loader,
target_classname)) != NULL))) {
target_classname)) != nullptr))) {
// Package private methods always need a new entry to root their own
// overriding. They may also override other methods.
@ -504,7 +504,7 @@ bool klassVtable::update_inherited_vtable(Thread* current,
// super class or interface.
put_method_at(target_method(), i);
// Save super for constraint checking.
if (supers != NULL) {
if (supers != nullptr) {
supers->at_put(i, super_klass);
}
@ -512,7 +512,7 @@ bool klassVtable::update_inherited_vtable(Thread* current,
if (!is_default) {
target_method->set_vtable_index(i);
} else {
if (def_vtable_indices != NULL) {
if (def_vtable_indices != nullptr) {
if (is_preinitialized_vtable()) {
// At runtime initialize_vtable is rerun as part of link_class_impl()
// for a shared class loaded by the non-boot loader.
@ -550,9 +550,9 @@ void klassVtable::put_method_at(Method* m, int index) {
ResourceMark rm;
LogTarget(Trace, vtables) lt;
LogStream ls(lt);
const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : "<NULL>";
const char* sig = (m != nullptr) ? m->name_and_sig_as_C_string() : "<null>";
ls.print("adding %s at index %d, flags: ", sig, index);
if (m != NULL) {
if (m != nullptr) {
m->print_linkage_flags(&ls);
}
ls.cr();
@ -568,7 +568,7 @@ void klassVtable::check_constraints(GrowableArray<InstanceKlass*>* supers, TRAPS
for (int i = 0; i < length(); i++) {
methodHandle target_method(THREAD, unchecked_method_at(i));
InstanceKlass* super_klass = supers->at(i);
if (target_method() != NULL && super_klass != NULL) {
if (target_method() != nullptr && super_klass != nullptr) {
// Do not check loader constraints for overpass methods because overpass
// methods are created by the jvm to throw exceptions.
if (!target_method->is_overpass()) {
@ -589,7 +589,7 @@ void klassVtable::check_constraints(GrowableArray<InstanceKlass*>* supers, TRAPS
_klass,
target_loader, super_loader,
true);
if (failed_type_symbol != NULL) {
if (failed_type_symbol != nullptr) {
stringStream ss;
ss.print("loader constraint violation for class %s: when selecting "
"overriding method '", _klass->external_name());
@ -615,7 +615,7 @@ void klassVtable::check_constraints(GrowableArray<InstanceKlass*>* supers, TRAPS
void klassVtable::initialize_vtable_and_check_constraints(TRAPS) {
// Save a superclass from each vtable entry to do constraint checking
ResourceMark rm(THREAD);
GrowableArray<InstanceKlass*>* supers = new GrowableArray<InstanceKlass*>(_length, _length, NULL);
GrowableArray<InstanceKlass*>* supers = new GrowableArray<InstanceKlass*>(_length, _length, nullptr);
initialize_vtable(supers);
check_constraints(supers, CHECK);
}
@ -660,7 +660,7 @@ bool klassVtable::needs_new_vtable_entry(Method* target_method,
// Concrete interface methods do not need new entries, they override
// abstract method entries using default inheritance rules
if (target_method->method_holder() != NULL &&
if (target_method->method_holder() != nullptr &&
target_method->method_holder()->is_interface() &&
!target_method->is_abstract()) {
assert(target_method->is_default_method(),
@ -669,7 +669,7 @@ bool klassVtable::needs_new_vtable_entry(Method* target_method,
}
// we need a new entry if there is no superclass
if (super == NULL) {
if (super == nullptr) {
return true;
}
@ -684,14 +684,14 @@ bool klassVtable::needs_new_vtable_entry(Method* target_method,
Symbol* name = target_method->name();
Symbol* signature = target_method->signature();
const Klass* k = super;
Method* super_method = NULL;
InstanceKlass *holder = NULL;
Method* recheck_method = NULL;
Method* super_method = nullptr;
InstanceKlass *holder = nullptr;
Method* recheck_method = nullptr;
bool found_pkg_prvt_method = false;
while (k != NULL) {
while (k != nullptr) {
// lookup through the hierarchy for a method with matching name and sign.
super_method = InstanceKlass::cast(k)->lookup_method(name, signature);
if (super_method == NULL) {
if (super_method == nullptr) {
break; // we still have to search for a matching miranda method
}
// get the class holding the matching method
@ -743,7 +743,7 @@ bool klassVtable::needs_new_vtable_entry(Method* target_method,
// this check for all access permissions.
const InstanceKlass *sk = InstanceKlass::cast(super);
if (sk->has_miranda_methods()) {
if (sk->lookup_method_in_all_interfaces(name, signature, Klass::DefaultsLookupMode::find) != NULL) {
if (sk->lookup_method_in_all_interfaces(name, signature, Klass::DefaultsLookupMode::find) != nullptr) {
return false; // found a matching miranda; we do not need a new entry
}
}
@ -849,14 +849,14 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
if (InstanceKlass::find_local_method(class_methods, name, signature,
Klass::OverpassLookupMode::find,
Klass::StaticLookupMode::skip,
Klass::PrivateLookupMode::skip) != NULL)
Klass::PrivateLookupMode::skip) != nullptr)
{
return false;
}
// Check local default methods
if ((default_methods != NULL) &&
(InstanceKlass::find_method(default_methods, name, signature) != NULL))
if ((default_methods != nullptr) &&
(InstanceKlass::find_method(default_methods, name, signature) != nullptr))
{
return false;
}
@ -866,14 +866,14 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
// Overpasses may or may not exist for supers for pass 1,
// they should have been created for pass 2 and later.
for (const Klass* cursuper = super; cursuper != NULL; cursuper = cursuper->super())
for (const Klass* cursuper = super; cursuper != nullptr; cursuper = cursuper->super())
{
Method* found_mth = InstanceKlass::cast(cursuper)->find_local_method(name, signature,
Klass::OverpassLookupMode::find,
Klass::StaticLookupMode::skip,
Klass::PrivateLookupMode::skip);
// Ignore non-public methods in java.lang.Object if klass is an interface.
if (found_mth != NULL && (!is_interface ||
if (found_mth != nullptr && (!is_interface ||
!SystemDictionary::is_nonpublic_Object_method(found_mth))) {
return false;
}
@ -915,10 +915,10 @@ void klassVtable::add_new_mirandas_to_lists(
if (is_miranda(im, class_methods, default_methods, super, is_interface)) { // is it a miranda at all?
const InstanceKlass *sk = InstanceKlass::cast(super);
// check if it is a duplicate of a super's miranda
if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), Klass::DefaultsLookupMode::find) == NULL) {
if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), Klass::DefaultsLookupMode::find) == nullptr) {
new_mirandas->append(im);
}
if (all_mirandas != NULL) {
if (all_mirandas != nullptr) {
all_mirandas->append(im);
}
}
@ -962,7 +962,7 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
int klassVtable::fill_in_mirandas(Thread* current, int initialized) {
ResourceMark rm(current);
GrowableArray<Method*> mirandas(20);
get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
get_mirandas(&mirandas, nullptr, ik()->super(), ik()->methods(),
ik()->default_methods(), ik()->local_interfaces(),
klass()->is_interface());
for (int i = 0; i < mirandas.length(); i++) {
@ -970,7 +970,7 @@ int klassVtable::fill_in_mirandas(Thread* current, int initialized) {
Method* meth = mirandas.at(i);
LogTarget(Trace, vtables) lt;
LogStream ls(lt);
if (meth != NULL) {
if (meth != nullptr) {
char* sig = meth->name_and_sig_as_C_string();
ls.print("fill in mirandas with %s index %d, flags: ",
sig, initialized);
@ -997,7 +997,7 @@ bool klassVtable::adjust_default_method(int vtable_index, Method* old_method, Me
bool updated = false;
Array<Method*>* default_methods = ik()->default_methods();
if (default_methods != NULL) {
if (default_methods != nullptr) {
int len = default_methods->length();
for (int idx = 0; idx < len; idx++) {
if (vtable_index == ik()->default_vtable_indices()->at(idx)) {
@ -1019,7 +1019,7 @@ void klassVtable::adjust_method_entries(bool * trace_name_printed) {
for (int index = 0; index < length(); index++) {
Method* old_method = unchecked_method_at(index);
if (old_method == NULL || !old_method->is_old()) {
if (old_method == nullptr || !old_method->is_old()) {
continue; // skip uninteresting entries
}
assert(!old_method->is_deleted(), "vtable methods may not be deleted");
@ -1052,7 +1052,7 @@ bool klassVtable::check_no_old_or_obsolete_entries() {
for (int i = 0; i < length(); i++) {
Method* m = unchecked_method_at(i);
if (m != NULL &&
if (m != nullptr &&
(NOT_PRODUCT(!m->is_valid() ||) m->is_old() || m->is_obsolete())) {
log_trace(redefine, class, update, vtables)
("vtable check found old method entry: class: %s old: %d obsolete: %d, method: %s",
@ -1067,7 +1067,7 @@ void klassVtable::dump_vtable() {
tty->print_cr("vtable dump --");
for (int i = 0; i < length(); i++) {
Method* m = unchecked_method_at(i);
if (m != NULL) {
if (m != nullptr) {
tty->print(" (%5d) ", i);
m->access_flags().print_on(tty);
if (m->is_default_method()) {
@ -1089,7 +1089,7 @@ void klassVtable::dump_vtable() {
// Initialize a itableMethodEntry
void itableMethodEntry::initialize(InstanceKlass* klass, Method* m) {
if (m == NULL) return;
if (m == nullptr) return;
#ifdef ASSERT
if (MetaspaceShared::is_in_shared_metaspace((void*)&_method) &&
@ -1112,7 +1112,7 @@ klassItable::klassItable(InstanceKlass* klass) {
if (klass->itable_length() > 0) {
itableOffsetEntry* offset_entry = (itableOffsetEntry*)klass->start_of_itable();
if (offset_entry != NULL && offset_entry->interface_klass() != NULL) { // Check that itable is initialized
if (offset_entry != nullptr && offset_entry->interface_klass() != nullptr) { // Check that itable is initialized
// First offset entry points to the first method_entry
intptr_t* method_entry = (intptr_t *)(((address)klass) + offset_entry->offset());
intptr_t* end = klass->end_of_itable();
@ -1161,14 +1161,14 @@ void klassItable::initialize_itable(GrowableArray<Method*>* supers) {
for(int i = 0; i < num_interfaces; i++) {
itableOffsetEntry* ioe = offset_entry(i);
InstanceKlass* interf = ioe->interface_klass();
assert(interf != NULL && ioe->offset() != 0, "bad offset entry in itable");
assert(interf != nullptr && ioe->offset() != 0, "bad offset entry in itable");
initialize_itable_for_interface(ioe->offset(), interf, supers,
(ioe->offset() - offset_entry(0)->offset())/wordSize);
}
}
// Check that the last entry is empty
itableOffsetEntry* ioe = offset_entry(size_offset_table() - 1);
guarantee(ioe->interface_klass() == NULL && ioe->offset() == 0, "terminator entry missing");
guarantee(ioe->interface_klass() == nullptr && ioe->offset() == 0, "terminator entry missing");
}
void klassItable::check_constraints(GrowableArray<Method*>* supers, TRAPS) {
@ -1179,7 +1179,7 @@ void klassItable::check_constraints(GrowableArray<Method*>* supers, TRAPS) {
Method* target = ime->method();
Method* interface_method = supers->at(i); // method overridden
if (target != NULL && interface_method != NULL) {
if (target != nullptr && interface_method != nullptr) {
InstanceKlass* method_holder = target->method_holder();
InstanceKlass* interf = interface_method->method_holder();
HandleMark hm(THREAD);
@ -1194,7 +1194,7 @@ void klassItable::check_constraints(GrowableArray<Method*>* supers, TRAPS) {
method_holder_loader,
interface_loader,
true);
if (failed_type_symbol != NULL) {
if (failed_type_symbol != nullptr) {
stringStream ss;
ss.print("loader constraint violation in interface itable"
" initialization for class %s: when selecting method '",
@ -1223,7 +1223,7 @@ void klassItable::initialize_itable_and_check_constraints(TRAPS) {
// Save a super interface from each itable entry to do constraint checking
ResourceMark rm(THREAD);
GrowableArray<Method*>* supers =
new GrowableArray<Method*>(_size_method_table, _size_method_table, NULL);
new GrowableArray<Method*>(_size_method_table, _size_method_table, nullptr);
initialize_itable(supers);
check_constraints(supers, CHECK);
}
@ -1259,7 +1259,7 @@ int klassItable::assign_itable_indices_for_interface(InstanceKlass* klass) {
ResourceMark rm;
LogTarget(Trace, itables) lt;
LogStream ls(lt);
assert(m != NULL, "methods can never be null");
assert(m != nullptr, "methods can never be null");
const char* sig = m->name_and_sig_as_C_string();
if (m->has_vtable_index()) {
ls.print("vtable index %d for method: %s, flags: ", m->vtable_index(), sig);
@ -1321,7 +1321,7 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Insta
int ime_count = method_count_for_interface(interf);
for (int i = 0; i < nof_methods; i++) {
Method* m = methods->at(i);
Method* target = NULL;
Method* target = nullptr;
if (m->has_itable_index()) {
// This search must match the runtime resolution, i.e. selection search for invokeinterface
// to correctly enforce loader constraints for interface method inheritance.
@ -1332,11 +1332,11 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Insta
target = LinkResolver::lookup_instance_method_in_klasses(_klass, m->name(), m->signature(),
Klass::PrivateLookupMode::skip);
}
if (target == NULL || !target->is_public() || target->is_abstract() || target->is_overpass()) {
assert(target == NULL || !target->is_overpass() || target->is_public(),
if (target == nullptr || !target->is_public() || target->is_abstract() || target->is_overpass()) {
assert(target == nullptr || !target->is_overpass() || target->is_public(),
"Non-public overpass method!");
// Entry does not resolve. Leave it empty for AbstractMethodError or other error.
if (!(target == NULL) && !target->is_public()) {
if (!(target == nullptr) && !target->is_public()) {
// Stuff an IllegalAccessError throwing method in there instead.
itableOffsetEntry::method_entry(_klass, method_table_offset)[m->itable_index()].
initialize(_klass, Universe::throw_illegal_access_error());
@ -1348,14 +1348,14 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Insta
// Save super interface method to perform constraint checks.
// The method is in the error message, that's why.
if (supers != NULL) {
if (supers != nullptr) {
supers->at_put(start_offset + ime_num, m);
}
itableOffsetEntry::method_entry(_klass, method_table_offset)[ime_num].initialize(_klass, target);
if (log_develop_is_enabled(Trace, itables)) {
ResourceMark rm;
if (target != NULL) {
if (target != nullptr) {
LogTarget(Trace, itables) lt;
LogStream ls(lt);
char* sig = target->name_and_sig_as_C_string();
@ -1379,7 +1379,7 @@ void klassItable::adjust_method_entries(bool * trace_name_printed) {
for (int i = 0; i < _size_method_table; i++, ime++) {
Method* old_method = ime->method();
if (old_method == NULL || !old_method->is_old()) {
if (old_method == nullptr || !old_method->is_old()) {
continue; // skip uninteresting entries
}
assert(!old_method->is_deleted(), "itable methods may not be deleted");
@ -1402,7 +1402,7 @@ bool klassItable::check_no_old_or_obsolete_entries() {
for (int i = 0; i < _size_method_table; i++) {
Method* m = ime->method();
if (m != NULL &&
if (m != nullptr &&
(NOT_PRODUCT(!m->is_valid() ||) m->is_old() || m->is_obsolete())) {
log_trace(redefine, class, update, itables)
("itable check found old method entry: class: %s old: %d obsolete: %d, method: %s",
@ -1419,7 +1419,7 @@ void klassItable::dump_itable() {
tty->print_cr("itable dump --");
for (int i = 0; i < _size_method_table; i++) {
Method* m = ime->method();
if (m != NULL) {
if (m != nullptr) {
tty->print(" (%5d) ", i);
m->access_flags().print_on(tty);
if (m->is_default_method()) {
@ -1570,7 +1570,7 @@ void klassVtable::verify(outputStream* st, bool forced) {
for (int i = 0; i < _length; i++) table()[i].verify(this, st);
// verify consistency with superKlass vtable
Klass* super = _klass->super();
if (super != NULL) {
if (super != nullptr) {
InstanceKlass* sk = InstanceKlass::cast(super);
klassVtable vt = sk->vtable();
for (int i = 0; i < vt.length(); i++) {
@ -1602,9 +1602,9 @@ void vtableEntry::verify(klassVtable* vt, outputStream* st) {
Klass* vtklass = vt->klass();
if (vtklass->is_instance_klass() &&
(InstanceKlass::cast(vtklass)->major_version() >= klassVtable::VTABLE_TRANSITIVE_OVERRIDE_VERSION)) {
assert(method() != NULL, "must have set method");
assert(method() != nullptr, "must have set method");
}
if (method() != NULL) {
if (method() != nullptr) {
method()->verify();
// we sub_type, because it could be a miranda method
if (!vtklass->is_subtype_of(method()->method_holder())) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,7 @@ class klassVtable {
int index_of_miranda(Symbol* name, Symbol* signature);
// initialize vtable of a new klass
void initialize_vtable(GrowableArray<InstanceKlass*>* supers = NULL);
void initialize_vtable(GrowableArray<InstanceKlass*>* supers = nullptr);
void initialize_vtable_and_check_constraints(TRAPS);
// computes vtable length (in words) and the number of miranda methods
@ -190,8 +190,8 @@ class vtableEntry {
private:
Method* _method;
void set(Method* method) { assert(method != NULL, "use clear"); _method = method; }
void clear() { _method = NULL; }
void set(Method* method) { assert(method != nullptr, "use clear"); _method = method; }
void clear() { _method = nullptr; }
void print() PRODUCT_RETURN;
void verify(klassVtable* vt, outputStream* st);
@ -201,7 +201,7 @@ class vtableEntry {
inline Method* klassVtable::method_at(int i) const {
assert(i >= 0 && i < _length, "index out of bounds");
assert(table()[i].method() != NULL, "should not be null");
assert(table()[i].method() != nullptr, "should not be null");
assert(((Metadata*)table()[i].method())->is_method(), "should be method");
return table()[i].method();
}
@ -246,7 +246,7 @@ class itableMethodEntry {
Method* method() const { return _method; }
Method**method_addr() { return &_method; }
void clear() { _method = NULL; }
void clear() { _method = nullptr; }
void initialize(InstanceKlass* klass, Method* method);
@ -298,7 +298,7 @@ class klassItable {
// Initialization
void initialize_itable_and_check_constraints(TRAPS);
void initialize_itable(GrowableArray<Method*>* supers = NULL);
void initialize_itable(GrowableArray<Method*>* supers = nullptr);
#if INCLUDE_JVMTI
// RedefineClasses() API support:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,8 +69,8 @@ void markWord::print_on(outputStream* st, bool print_monitor_info) const {
st->print(" monitor(" INTPTR_FORMAT ")=", value());
if (print_monitor_info) {
ObjectMonitor* mon = monitor();
if (mon == NULL) {
st->print("NULL (this should never be seen!)");
if (mon == nullptr) {
st->print("null (this should never be seen!)");
} else {
mon->print_on(st);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,8 +54,8 @@ class Metadata : public MetaspaceObj {
void print_value() const;
static void print_value_on_maybe_null(outputStream* st, const Metadata* m) {
if (NULL == m)
st->print("NULL");
if (nullptr == m)
st->print("null");
else
m->print_value_on(st);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -107,18 +107,18 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, Symbol* name) {
set_dont_inline(false);
set_changes_current_thread(false);
set_has_injected_profile(false);
set_method_data(NULL);
set_method_data(nullptr);
clear_method_counters();
set_vtable_index(Method::garbage_vtable_index);
// Fix and bury in Method*
set_interpreter_entry(NULL); // sets i2i entry and from_int
set_adapter_entry(NULL);
set_interpreter_entry(nullptr); // sets i2i entry and from_int
set_adapter_entry(nullptr);
Method::clear_code(); // from_c/from_i get set to c2i/i2i
if (access_flags.is_native()) {
clear_native_function();
set_signature_handler(NULL);
set_signature_handler(nullptr);
}
NOT_PRODUCT(set_compiled_invocation_count(0);)
@ -130,13 +130,13 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, Symbol* name) {
// we've walked the code cache.
void Method::deallocate_contents(ClassLoaderData* loader_data) {
MetadataFactory::free_metadata(loader_data, constMethod());
set_constMethod(NULL);
set_constMethod(nullptr);
MetadataFactory::free_metadata(loader_data, method_data());
set_method_data(NULL);
set_method_data(nullptr);
MetadataFactory::free_metadata(loader_data, method_counters());
clear_method_counters();
// The nmethod will be gone when we get here.
if (code() != NULL) _code = NULL;
if (code() != nullptr) _code = nullptr;
}
void Method::release_C_heap_structures() {
@ -149,23 +149,23 @@ void Method::release_C_heap_structures() {
}
address Method::get_i2c_entry() {
assert(adapter() != NULL, "must have");
assert(adapter() != nullptr, "must have");
return adapter()->get_i2c_entry();
}
address Method::get_c2i_entry() {
assert(adapter() != NULL, "must have");
assert(adapter() != nullptr, "must have");
return adapter()->get_c2i_entry();
}
address Method::get_c2i_unverified_entry() {
assert(adapter() != NULL, "must have");
assert(adapter() != nullptr, "must have");
return adapter()->get_c2i_unverified_entry();
}
address Method::get_c2i_no_clinit_check_entry() {
assert(VM_Version::supports_fast_class_init_checks(), "");
assert(adapter() != NULL, "must have");
assert(adapter() != nullptr, "must have");
return adapter()->get_c2i_no_clinit_check_entry();
}
@ -233,7 +233,7 @@ int Method::fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_kla
if (log_is_enabled(Debug, exceptions)) {
ResourceMark rm(THREAD);
log_debug(exceptions)("Looking for catch handler for exception of type \"%s\" in method \"%s\"",
ex_klass == NULL ? "NULL" : ex_klass->external_name(), mh->name()->as_C_string());
ex_klass == nullptr ? "null" : ex_klass->external_name(), mh->name()->as_C_string());
}
// exception table holds quadruple entries of the form (beg_bci, end_bci, handler_bci, klass_index)
// access exception table
@ -260,14 +260,14 @@ int Method::fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_kla
if (log_is_enabled(Info, exceptions)) {
ResourceMark rm(THREAD);
log_info(exceptions)("Found catch-all handler for exception of type \"%s\" in method \"%s\" at BCI: %d",
ex_klass == NULL ? "NULL" : ex_klass->external_name(), mh->name()->as_C_string(), handler_bci);
ex_klass == nullptr ? "null" : ex_klass->external_name(), mh->name()->as_C_string(), handler_bci);
}
return handler_bci;
} else if (ex_klass == NULL) {
} else if (ex_klass == nullptr) {
// Is this even possible?
if (log_is_enabled(Info, exceptions)) {
ResourceMark rm(THREAD);
log_info(exceptions)("NULL exception class is implicitly caught by handler in method \"%s\" at BCI: %d",
log_info(exceptions)("null exception class is implicitly caught by handler in method \"%s\" at BCI: %d",
mh()->name()->as_C_string(), handler_bci);
}
return handler_bci;
@ -289,12 +289,12 @@ int Method::fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_kla
}
return handler_bci;
}
assert(k != NULL, "klass not loaded");
assert(k != nullptr, "klass not loaded");
if (ex_klass->is_subtype_of(k)) {
if (log_is_enabled(Info, exceptions)) {
ResourceMark rm(THREAD);
log_info(exceptions)("Found matching handler for exception of type \"%s\" in method \"%s\" at BCI: %d",
ex_klass == NULL ? "NULL" : ex_klass->external_name(), mh->name()->as_C_string(), handler_bci);
ex_klass == nullptr ? "null" : ex_klass->external_name(), mh->name()->as_C_string(), handler_bci);
}
return handler_bci;
}
@ -369,7 +369,7 @@ address Method::bcp_from(int bci) const {
}
address Method::bcp_from(address bcp) const {
if (is_native() && bcp == NULL) {
if (is_native() && bcp == nullptr) {
return code_base();
} else {
return bcp;
@ -464,11 +464,11 @@ static Method* find_prefixed_native(Klass* k, Symbol* name, Symbol* signature, T
strcpy(trial_name_str, prefix);
strcat(trial_name_str, name_str);
TempNewSymbol trial_name = SymbolTable::probe(trial_name_str, trial_len);
if (trial_name == NULL) {
if (trial_name == nullptr) {
continue; // no such symbol, so this prefix wasn't used, try the next prefix
}
method = k->lookup_method(trial_name, signature);
if (method == NULL) {
if (method == nullptr) {
continue; // signature doesn't match, try the next prefix
}
if (method->is_native()) {
@ -480,12 +480,12 @@ static Method* find_prefixed_native(Klass* k, Symbol* name, Symbol* signature, T
name_str = trial_name_str;
}
#endif // INCLUDE_JVMTI
return NULL; // not found
return nullptr; // not found
}
bool Method::register_native(Klass* k, Symbol* name, Symbol* signature, address entry, TRAPS) {
Method* method = k->lookup_method(name, signature);
if (method == NULL) {
if (method == nullptr) {
ResourceMark rm(THREAD);
stringStream st;
st.print("Method '");
@ -496,7 +496,7 @@ bool Method::register_native(Klass* k, Symbol* name, Symbol* signature, address
if (!method->is_native()) {
// trying to register to a non-native method, see if a JVM TI agent has added prefix(es)
method = find_prefixed_native(k, name, signature, THREAD);
if (method == NULL) {
if (method == nullptr) {
ResourceMark rm(THREAD);
stringStream st;
st.print("Method '");
@ -506,7 +506,7 @@ bool Method::register_native(Klass* k, Symbol* name, Symbol* signature, address
}
}
if (entry != NULL) {
if (entry != nullptr) {
method->set_native_function(entry, native_bind_event_is_interesting);
} else {
method->clear_native_function();
@ -524,14 +524,14 @@ bool Method::was_executed_more_than(int n) {
// Invocation counter is reset when the Method* is compiled.
// If the method has compiled code we therefore assume it has
// be executed more than n times.
if (is_accessor() || is_empty_method() || (code() != NULL)) {
if (is_accessor() || is_empty_method() || (code() != nullptr)) {
// interpreter doesn't bump invocation counter of trivial methods
// compiler does not bump invocation counter of compiled methods
return true;
}
else if ((method_counters() != NULL &&
else if ((method_counters() != nullptr &&
method_counters()->invocation_counter()->carry()) ||
(method_data() != NULL &&
(method_data() != nullptr &&
method_data()->invocation_counter()->carry())) {
// The carry bit is set when the counter overflows and causes
// a compilation to occur. We don't know how many times
@ -568,7 +568,7 @@ void Method::print_invocation_count() {
tty->print_cr (" invocation_counter: " INT32_FORMAT_W(11), invocation_count());
tty->print_cr (" backedge_counter: " INT32_FORMAT_W(11), backedge_count());
if (method_data() != NULL) {
if (method_data() != nullptr) {
tty->print_cr (" decompile_count: " UINT32_FORMAT_W(11), method_data()->decompile_count());
}
@ -614,7 +614,7 @@ void Method::build_profiling_method_data(const methodHandle& method, TRAPS) {
MethodCounters* Method::build_method_counters(Thread* current, Method* m) {
// Do not profile the method if metaspace has hit an OOM previously
if (ClassLoaderDataGraph::has_metaspace_oom()) {
return NULL;
return nullptr;
}
methodHandle mh(current, m);
@ -633,10 +633,10 @@ MethodCounters* Method::build_method_counters(Thread* current, Method* m) {
counters = MethodCounters::allocate_no_exception(mh);
}
if (counters == NULL) {
if (counters == nullptr) {
CompileBroker::log_metaspace_failure();
ClassLoaderDataGraph::set_metaspace_oom(true);
return NULL;
return nullptr;
}
if (!mh->init_method_counters(counters)) {
@ -793,7 +793,7 @@ bool Method::is_final_method() const {
}
bool Method::is_default_method() const {
if (method_holder() != NULL &&
if (method_holder() != nullptr &&
method_holder()->is_interface() &&
!is_abstract() && !is_private()) {
return true;
@ -971,7 +971,7 @@ bool Method::is_klass_loaded_by_klass_index(int klass_index) const {
Symbol* klass_name = constants()->klass_name_at(klass_index);
Handle loader(thread, method_holder()->class_loader());
Handle prot (thread, method_holder()->protection_domain());
return SystemDictionary::find_instance_klass(thread, klass_name, loader, prot) != NULL;
return SystemDictionary::find_instance_klass(thread, klass_name, loader, prot) != nullptr;
} else {
return true;
}
@ -989,7 +989,7 @@ bool Method::is_klass_loaded(int refinfo_index, bool must_be_resolved) const {
void Method::set_native_function(address function, bool post_event_flag) {
assert(function != NULL, "use clear_native_function to unregister natives");
assert(function != nullptr, "use clear_native_function to unregister natives");
assert(!is_special_native_intrinsic() || function == SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), "");
address* native_function = native_function_addr();
@ -998,7 +998,7 @@ void Method::set_native_function(address function, bool post_event_flag) {
address current = *native_function;
if (current == function) return;
if (post_event_flag && JvmtiExport::should_post_native_method_bind() &&
function != NULL) {
function != nullptr) {
// native_method_throw_unsatisfied_link_error_entry() should only
// be passed when post_event_flag is false.
assert(function !=
@ -1013,7 +1013,7 @@ void Method::set_native_function(address function, bool post_event_flag) {
// use the latest registered method -> check if a stub already has been generated.
// If so, we have to make it not_entrant.
CompiledMethod* nm = code(); // Put it into local variable to guard against concurrent updates
if (nm != NULL) {
if (nm != nullptr) {
nm->make_not_entrant();
}
}
@ -1023,7 +1023,7 @@ bool Method::has_native_function() const {
if (is_special_native_intrinsic())
return false; // special-cased in SharedRuntime::generate_native_wrapper
address func = native_function();
return (func != NULL && func != SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
return (func != nullptr && func != SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
}
@ -1043,7 +1043,7 @@ void Method::set_signature_handler(address handler) {
void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason) {
assert(reason != NULL, "must provide a reason");
assert(reason != nullptr, "must provide a reason");
if (PrintCompilation && report) {
ttyLocker ttyl;
tty->print("made not %scompilable on ", is_osr ? "OSR " : "");
@ -1057,16 +1057,16 @@ void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report,
if (size > 0) {
tty->print(" (%d bytes)", size);
}
if (reason != NULL) {
if (reason != nullptr) {
tty->print(" %s", reason);
}
tty->cr();
}
if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) {
if ((TraceDeoptimization || LogCompilation) && (xtty != nullptr)) {
ttyLocker ttyl;
xtty->begin_elem("make_not_compilable thread='" UINTX_FORMAT "' osr='%d' level='%d'",
os::current_thread_id(), is_osr, comp_level);
if (reason != NULL) {
if (reason != nullptr) {
xtty->print(" reason=\'%s\'", reason);
}
xtty->method(this);
@ -1147,21 +1147,21 @@ void Method::set_not_osr_compilable(const char* reason, int comp_level, bool rep
// Revert to using the interpreter and clear out the nmethod
void Method::clear_code() {
// this may be NULL if c2i adapters have not been made yet
// this may be null if c2i adapters have not been made yet
// Only should happen at allocate time.
if (adapter() == NULL) {
_from_compiled_entry = NULL;
if (adapter() == nullptr) {
_from_compiled_entry = nullptr;
} else {
_from_compiled_entry = adapter()->get_c2i_entry();
}
OrderAccess::storestore();
_from_interpreted_entry = _i2i_entry;
OrderAccess::storestore();
_code = NULL;
_code = nullptr;
}
void Method::unlink_code(CompiledMethod *compare) {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
// We need to check if either the _code or _from_compiled_code_entry_point
// refer to this nmethod because there is a race in setting these two fields
// in Method* as seen in bugid 4947125.
@ -1172,7 +1172,7 @@ void Method::unlink_code(CompiledMethod *compare) {
}
void Method::unlink_code() {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
clear_code();
}
@ -1180,19 +1180,19 @@ void Method::unlink_code() {
// Called by class data sharing to remove any entry points (which are not shared)
void Method::unlink_method() {
Arguments::assert_is_dumping_archive();
_code = NULL;
_adapter = NULL;
_i2i_entry = NULL;
_from_compiled_entry = NULL;
_from_interpreted_entry = NULL;
_code = nullptr;
_adapter = nullptr;
_i2i_entry = nullptr;
_from_compiled_entry = nullptr;
_from_interpreted_entry = nullptr;
if (is_native()) {
*native_function_addr() = NULL;
set_signature_handler(NULL);
*native_function_addr() = nullptr;
set_signature_handler(nullptr);
}
NOT_PRODUCT(set_compiled_invocation_count(0);)
set_method_data(NULL);
set_method_data(nullptr);
clear_method_counters();
}
#endif
@ -1202,17 +1202,17 @@ void Method::unlink_method() {
void Method::link_method(const methodHandle& h_method, TRAPS) {
// If the code cache is full, we may reenter this function for the
// leftover methods that weren't linked.
if (adapter() != NULL) {
if (adapter() != nullptr) {
return;
}
assert( _code == NULL, "nothing compiled yet" );
assert( _code == nullptr, "nothing compiled yet" );
// Setup interpreter entrypoint
assert(this == h_method(), "wrong h_method()" );
assert(adapter() == NULL, "init'd to NULL");
assert(adapter() == nullptr, "init'd to null");
address entry = Interpreter::entry_for_method(h_method);
assert(entry != NULL, "interpreter entry must be non-null");
assert(entry != nullptr, "interpreter entry must be non-null");
// Sets both _i2i_entry and _from_interpreted_entry
set_interpreter_entry(entry);
@ -1237,9 +1237,9 @@ void Method::link_method(const methodHandle& h_method, TRAPS) {
if (h_method->is_continuation_native_intrinsic()) {
// the entry points to this method will be set in set_code, called when first resolving this method
_from_interpreted_entry = NULL;
_from_compiled_entry = NULL;
_i2i_entry = NULL;
_from_interpreted_entry = nullptr;
_from_compiled_entry = nullptr;
_i2i_entry = nullptr;
}
}
@ -1248,7 +1248,7 @@ address Method::make_adapters(const methodHandle& mh, TRAPS) {
// small (generally < 100 bytes) and quick to make (and cached and shared)
// so making them eagerly shouldn't be too expensive.
AdapterHandlerEntry* adapter = AdapterHandlerLibrary::get_adapter(mh);
if (adapter == NULL ) {
if (adapter == nullptr ) {
if (!is_init_completed()) {
// Don't throw exceptions during VM initialization because java.lang.* classes
// might not have been initialized, causing problems when constructing the
@ -1273,7 +1273,7 @@ address Method::make_adapters(const methodHandle& mh, TRAPS) {
// This function must not hit a safepoint!
address Method::verified_code_entry() {
debug_only(NoSafepointVerifier nsv;)
assert(_from_compiled_entry != NULL, "must be set");
assert(_from_compiled_entry != nullptr, "must be set");
return _from_compiled_entry;
}
@ -1283,7 +1283,7 @@ address Method::verified_code_entry() {
bool Method::check_code() const {
// cached in a register or local. There's a race on the value of the field.
CompiledMethod *code = Atomic::load_acquire(&_code);
return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method());
return code == nullptr || (code->method() == nullptr) || (code->method() == (Method*)this && !code->is_osr_method());
}
// Install compiled code. Instantly it can execute.
@ -1292,7 +1292,7 @@ void Method::set_code(const methodHandle& mh, CompiledMethod *code) {
assert( code, "use clear_code to remove code" );
assert( mh->check_code(), "" );
guarantee(mh->adapter() != NULL, "Adapter blob must already exist!");
guarantee(mh->adapter() != nullptr, "Adapter blob must already exist!");
// These writes must happen in this order, because the interpreter will
// directly jump to from_interpreted_entry which jumps to an i2c adapter
@ -1311,7 +1311,7 @@ void Method::set_code(const methodHandle& mh, CompiledMethod *code) {
OrderAccess::storestore();
if (mh->is_continuation_native_intrinsic()) {
assert(mh->_from_interpreted_entry == NULL, "initialized incorrectly"); // see link_method
assert(mh->_from_interpreted_entry == nullptr, "initialized incorrectly"); // see link_method
if (mh->is_continuation_enter_intrinsic()) {
// This is the entry used when we're in interpreter-only mode; see InterpreterMacroAssembler::jump_from_interpreted
@ -1339,7 +1339,7 @@ bool Method::is_overridden_in(Klass* k) const {
// is a miranda method
if (method_holder()->is_interface()) {
// Check that method is not a miranda method
if (ik->lookup_method(name(), signature()) == NULL) {
if (ik->lookup_method(name(), signature()) == nullptr) {
// No implementation exist - so miranda method
return false;
}
@ -1496,12 +1496,12 @@ methodHandle Method::make_method_handle_intrinsic(vmIntrinsics::ID iid,
}
Klass* Method::check_non_bcp_klass(Klass* klass) {
if (klass != NULL && klass->class_loader() != NULL) {
if (klass != nullptr && klass->class_loader() != nullptr) {
if (klass->is_objArray_klass())
klass = ObjArrayKlass::cast(klass)->bottom_klass();
return klass;
}
return NULL;
return nullptr;
}
@ -1614,12 +1614,12 @@ methodHandle Method::clone_with_new_data(const methodHandle& m, u_char* new_code
}
vmSymbolID Method::klass_id_for_intrinsics(const Klass* holder) {
// if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
// if loader is not the default loader (i.e., non-null), we can't know the intrinsics
// because we are not loading from core libraries
// exception: the AES intrinsics come from lib/ext/sunjce_provider.jar
// which does not use the class default class loader so we check for its loader here
const InstanceKlass* ik = InstanceKlass::cast(holder);
if ((ik->class_loader() != NULL) && !SystemDictionary::is_platform_class_loader(ik->class_loader())) {
if ((ik->class_loader() != nullptr) && !SystemDictionary::is_platform_class_loader(ik->class_loader())) {
return vmSymbolID::NO_SID; // regardless of name, no intrinsics here
}
@ -1713,7 +1713,7 @@ bool Method::load_signature_classes(const methodHandle& m, TRAPS) {
return false;
}
}
if( klass == NULL) { sig_is_loaded = false; }
if( klass == nullptr) { sig_is_loaded = false; }
}
}
return sig_is_loaded;
@ -1744,7 +1744,7 @@ static int method_comparator(Method* a, Method* b) {
void Method::sort_methods(Array<Method*>* methods, bool set_idnums, method_comparator_func func) {
int length = methods->length();
if (length > 1) {
if (func == NULL) {
if (func == nullptr) {
func = method_comparator;
}
{
@ -1845,7 +1845,7 @@ bool CompressedLineNumberReadStream::read_pair() {
Bytecodes::Code Method::orig_bytecode_at(int bci) const {
BreakpointInfo* bp = method_holder()->breakpoints();
for (; bp != NULL; bp = bp->next()) {
for (; bp != nullptr; bp = bp->next()) {
if (bp->match(this, bci)) {
return bp->orig_bytecode();
}
@ -1860,7 +1860,7 @@ Bytecodes::Code Method::orig_bytecode_at(int bci) const {
void Method::set_orig_bytecode_at(int bci, Bytecodes::Code code) {
assert(code != Bytecodes::_breakpoint, "cannot patch breakpoints this way");
BreakpointInfo* bp = method_holder()->breakpoints();
for (; bp != NULL; bp = bp->next()) {
for (; bp != nullptr; bp = bp->next()) {
if (bp->match(this, bci)) {
bp->set_orig_bytecode(code);
// and continue, in case there is more than one
@ -1879,16 +1879,16 @@ void Method::set_breakpoint(int bci) {
static void clear_matches(Method* m, int bci) {
InstanceKlass* ik = m->method_holder();
BreakpointInfo* prev_bp = NULL;
BreakpointInfo* prev_bp = nullptr;
BreakpointInfo* next_bp;
for (BreakpointInfo* bp = ik->breakpoints(); bp != NULL; bp = next_bp) {
for (BreakpointInfo* bp = ik->breakpoints(); bp != nullptr; bp = next_bp) {
next_bp = bp->next();
// bci value of -1 is used to delete all breakpoints in method m (ex: clear_all_breakpoint).
if (bci >= 0 ? bp->match(m, bci) : bp->match(m)) {
// do this first:
bp->clear(m);
// unhook it
if (prev_bp != NULL)
if (prev_bp != nullptr)
prev_bp->set_next(next_bp);
else
ik->set_breakpoints(next_bp);
@ -1927,30 +1927,30 @@ void Method::clear_all_breakpoints() {
int Method::invocation_count() const {
MethodCounters* mcs = method_counters();
MethodData* mdo = method_data();
if (((mcs != NULL) ? mcs->invocation_counter()->carry() : false) ||
((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) {
if (((mcs != nullptr) ? mcs->invocation_counter()->carry() : false) ||
((mdo != nullptr) ? mdo->invocation_counter()->carry() : false)) {
return InvocationCounter::count_limit;
} else {
return ((mcs != NULL) ? mcs->invocation_counter()->count() : 0) +
((mdo != NULL) ? mdo->invocation_counter()->count() : 0);
return ((mcs != nullptr) ? mcs->invocation_counter()->count() : 0) +
((mdo != nullptr) ? mdo->invocation_counter()->count() : 0);
}
}
int Method::backedge_count() const {
MethodCounters* mcs = method_counters();
MethodData* mdo = method_data();
if (((mcs != NULL) ? mcs->backedge_counter()->carry() : false) ||
((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) {
if (((mcs != nullptr) ? mcs->backedge_counter()->carry() : false) ||
((mdo != nullptr) ? mdo->backedge_counter()->carry() : false)) {
return InvocationCounter::count_limit;
} else {
return ((mcs != NULL) ? mcs->backedge_counter()->count() : 0) +
((mdo != NULL) ? mdo->backedge_counter()->count() : 0);
return ((mcs != nullptr) ? mcs->backedge_counter()->count() : 0) +
((mdo != nullptr) ? mdo->backedge_counter()->count() : 0);
}
}
int Method::highest_comp_level() const {
const MethodCounters* mcs = method_counters();
if (mcs != NULL) {
if (mcs != nullptr) {
return mcs->highest_comp_level();
} else {
return CompLevel_none;
@ -1959,7 +1959,7 @@ int Method::highest_comp_level() const {
int Method::highest_osr_comp_level() const {
const MethodCounters* mcs = method_counters();
if (mcs != NULL) {
if (mcs != nullptr) {
return mcs->highest_osr_comp_level();
} else {
return CompLevel_none;
@ -1968,14 +1968,14 @@ int Method::highest_osr_comp_level() const {
void Method::set_highest_comp_level(int level) {
MethodCounters* mcs = method_counters();
if (mcs != NULL) {
if (mcs != nullptr) {
mcs->set_highest_comp_level(level);
}
}
void Method::set_highest_osr_comp_level(int level) {
MethodCounters* mcs = method_counters();
if (mcs != NULL) {
if (mcs != nullptr) {
mcs->set_highest_osr_comp_level(level);
}
}
@ -1989,7 +1989,7 @@ BreakpointInfo::BreakpointInfo(Method* m, int bci) {
_orig_bytecode = (Bytecodes::Code) *m->bcp_from(_bci);
if (_orig_bytecode == Bytecodes::_breakpoint)
_orig_bytecode = m->orig_bytecode_at(_bci);
_next = NULL;
_next = nullptr;
}
void BreakpointInfo::set(Method* method) {
@ -2049,7 +2049,7 @@ class JNIMethodBlockNode : public CHeapObj<mtClass> {
return;
}
}
if (_next == NULL) {
if (_next == nullptr) {
_next = new JNIMethodBlockNode(MAX2(num_addl_methods, min_block_size));
} else {
_next->ensure_methods(num_addl_methods);
@ -2071,7 +2071,7 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
}
Method** add_method(Method* m) {
for (JNIMethodBlockNode* b = _last_free; b != NULL; b = b->_next) {
for (JNIMethodBlockNode* b = _last_free; b != nullptr; b = b->_next) {
if (b->_top < b->_number_of_methods) {
// top points to the next free entry.
int i = b->_top;
@ -2093,17 +2093,17 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
b->_top++;
}
// need to allocate a next block.
if (b->_next == NULL) {
if (b->_next == nullptr) {
b->_next = _last_free = new JNIMethodBlockNode();
}
}
guarantee(false, "Should always allocate a free block");
return NULL;
return nullptr;
}
bool contains(Method** m) {
if (m == NULL) return false;
for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
if (m == nullptr) return false;
for (JNIMethodBlockNode* b = &_head; b != nullptr; b = b->_next) {
if (b->_methods <= m && m < b->_methods + b->_number_of_methods) {
// This is a bit of extra checking, for two reasons. One is
// that contains() deals with pointers that are passed in by
@ -2133,9 +2133,9 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
// During class unloading the methods are cleared, which is different
// than freed.
void clear_all_methods() {
for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
for (JNIMethodBlockNode* b = &_head; b != nullptr; b = b->_next) {
for (int i = 0; i< b->_number_of_methods; i++) {
b->_methods[i] = NULL;
b->_methods[i] = nullptr;
}
}
}
@ -2143,7 +2143,7 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
int count_methods() {
// count all allocated methods
int count = 0;
for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) {
for (JNIMethodBlockNode* b = &_head; b != nullptr; b = b->_next) {
for (int i = 0; i< b->_number_of_methods; i++) {
if (b->_methods[i] != _free_method) count++;
}
@ -2156,7 +2156,7 @@ class JNIMethodBlock : public CHeapObj<mtClass> {
// Something that can't be mistaken for an address or a markWord
Method* const JNIMethodBlock::_free_method = (Method*)55;
JNIMethodBlockNode::JNIMethodBlockNode(int num_methods) : _top(0), _next(NULL) {
JNIMethodBlockNode::JNIMethodBlockNode(int num_methods) : _top(0), _next(nullptr) {
_number_of_methods = MAX2(num_methods, min_block_size);
_methods = NEW_C_HEAP_ARRAY(Method*, _number_of_methods, mtInternal);
for (int i = 0; i < _number_of_methods; i++) {
@ -2169,7 +2169,7 @@ void Method::ensure_jmethod_ids(ClassLoaderData* cld, int capacity) {
// Also have to add the method to the list safely, which the lock
// protects as well.
MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag);
if (cld->jmethod_ids() == NULL) {
if (cld->jmethod_ids() == nullptr) {
cld->set_jmethod_ids(new JNIMethodBlock(capacity));
} else {
cld->jmethod_ids()->ensure_methods(capacity);
@ -2182,7 +2182,7 @@ jmethodID Method::make_jmethod_id(ClassLoaderData* cld, Method* m) {
// Also have to add the method to the list safely, which the lock
// protects as well.
assert(JmethodIdCreation_lock->owned_by_self(), "sanity check");
if (cld->jmethod_ids() == NULL) {
if (cld->jmethod_ids() == nullptr) {
cld->set_jmethod_ids(new JNIMethodBlock());
}
// jmethodID is a pointer to Method*
@ -2198,7 +2198,7 @@ jmethodID Method::jmethod_id() {
// InstanceKlass while creating the jmethodID cache.
void Method::destroy_jmethod_id(ClassLoaderData* cld, jmethodID m) {
Method** ptr = (Method**)m;
assert(cld->jmethod_ids() != NULL, "should have method handles");
assert(cld->jmethod_ids() != nullptr, "should have method handles");
cld->jmethod_ids()->destroy_method(ptr);
}
@ -2207,7 +2207,7 @@ void Method::change_method_associated_with_jmethod_id(jmethodID jmid, Method* ne
// scratch method holder.
assert(resolve_jmethod_id(jmid)->method_holder()->class_loader()
== new_method->method_holder()->class_loader() ||
new_method->method_holder()->class_loader() == NULL, // allow Unsafe substitution
new_method->method_holder()->class_loader() == nullptr, // allow Unsafe substitution
"changing to a different class loader");
// Just change the method in place, jmethodID pointer doesn't change.
*((Method**)jmid) = new_method;
@ -2215,25 +2215,25 @@ void Method::change_method_associated_with_jmethod_id(jmethodID jmid, Method* ne
bool Method::is_method_id(jmethodID mid) {
Method* m = resolve_jmethod_id(mid);
assert(m != NULL, "should be called with non-null method");
assert(m != nullptr, "should be called with non-null method");
InstanceKlass* ik = m->method_holder();
ClassLoaderData* cld = ik->class_loader_data();
if (cld->jmethod_ids() == NULL) return false;
if (cld->jmethod_ids() == nullptr) return false;
return (cld->jmethod_ids()->contains((Method**)mid));
}
Method* Method::checked_resolve_jmethod_id(jmethodID mid) {
if (mid == NULL) return NULL;
if (mid == nullptr) return nullptr;
Method* o = resolve_jmethod_id(mid);
if (o == NULL || o == JNIMethodBlock::_free_method) {
return NULL;
if (o == nullptr || o == JNIMethodBlock::_free_method) {
return nullptr;
}
// Method should otherwise be valid. Assert for testing.
assert(is_valid_method(o), "should be valid jmethodid");
// If the method's class holder object is unreferenced, but not yet marked as
// unloaded, we need to return NULL here too because after a safepoint, its memory
// unloaded, we need to return null here too because after a safepoint, its memory
// will be reclaimed.
return o->method_holder()->is_loader_alive() ? o : NULL;
return o->method_holder()->is_loader_alive() ? o : nullptr;
};
void Method::set_on_stack(const bool value) {
@ -2268,7 +2268,7 @@ bool Method::has_method_vptr(const void* ptr) {
// Check that this pointer is valid by checking that the vtbl pointer matches
bool Method::is_valid_method(const Method* m) {
if (m == NULL) {
if (m == nullptr) {
return false;
} else if ((intptr_t(m) & (wordSize-1)) != 0) {
// Quick sanity check on pointer.
@ -2318,7 +2318,7 @@ void Method::print_on(outputStream* st) const {
st->print_cr(" - i2i entry: " PTR_FORMAT, p2i(interpreter_entry()));
st->print( " - adapters: ");
AdapterHandlerEntry* a = ((Method*)this)->adapter();
if (a == NULL)
if (a == nullptr)
st->print_cr(PTR_FORMAT, p2i(a));
else
a->print_adapter_on(st);
@ -2328,7 +2328,7 @@ void Method::print_on(outputStream* st) const {
st->print_cr(" - code start: " PTR_FORMAT, p2i(code_base()));
st->print_cr(" - code end (excl): " PTR_FORMAT, p2i(code_base() + code_size()));
}
if (method_data() != NULL) {
if (method_data() != nullptr) {
st->print_cr(" - method data: " PTR_FORMAT, p2i(method_data()));
}
st->print_cr(" - checked ex length: %d", checked_exceptions_length());
@ -2366,7 +2366,7 @@ void Method::print_on(outputStream* st) const {
}
}
}
if (code() != NULL) {
if (code() != nullptr) {
st->print (" - compiled code: ");
code()->print_value_on(st);
}
@ -2399,7 +2399,7 @@ void Method::print_value_on(outputStream* st) const {
method_holder()->print_value_on(st);
if (WizardMode) st->print("#%d", _vtable_index);
if (WizardMode) st->print("[%d,%d]", size_of_parameters(), max_locals());
if (WizardMode && code() != NULL) st->print(" ((nmethod*)%p)", code());
if (WizardMode && code() != nullptr) st->print(" ((nmethod*)%p)", code());
}
// Verification
@ -2408,6 +2408,6 @@ void Method::verify_on(outputStream* st) {
guarantee(is_method(), "object must be method");
guarantee(constants()->is_constantPool(), "should be constant pool");
MethodData* md = method_data();
guarantee(md == NULL ||
guarantee(md == nullptr ||
md->is_methodData(), "should be method data");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -111,9 +111,9 @@ class Method : public Metadata {
volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry()
// The entry point for calling both from and to compiled code is
// "_code->entry_point()". Because of tiered compilation and de-opt, this
// field can come and go. It can transition from NULL to not-null at any
// field can come and go. It can transition from null to not-null at any
// time (whenever a compile completes). It can transition from not-null to
// NULL only at safepoints (because of a de-opt).
// null only at safepoints (because of a de-opt).
CompiledMethod* volatile _code; // Points to the corresponding piece of native code
volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
@ -164,7 +164,7 @@ class Method : public Metadata {
void set_signature_index(int index) { constMethod()->set_signature_index(index); }
// generics support
Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); }
Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : nullptr); }
int generic_signature_index() const { return constMethod()->generic_signature_index(); }
void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); }
@ -232,7 +232,7 @@ class Method : public Metadata {
// Only mutated by VM thread.
u2 number_of_breakpoints() const {
MethodCounters* mcs = method_counters();
if (mcs == NULL) {
if (mcs == nullptr) {
return 0;
} else {
return mcs->number_of_breakpoints();
@ -240,20 +240,20 @@ class Method : public Metadata {
}
void incr_number_of_breakpoints(Thread* current) {
MethodCounters* mcs = get_method_counters(current);
if (mcs != NULL) {
if (mcs != nullptr) {
mcs->incr_number_of_breakpoints();
}
}
void decr_number_of_breakpoints(Thread* current) {
MethodCounters* mcs = get_method_counters(current);
if (mcs != NULL) {
if (mcs != nullptr) {
mcs->decr_number_of_breakpoints();
}
}
// Initialization only
void clear_number_of_breakpoints() {
MethodCounters* mcs = method_counters();
if (mcs != NULL) {
if (mcs != nullptr) {
mcs->clear_number_of_breakpoints();
}
}
@ -296,7 +296,7 @@ class Method : public Metadata {
// Count of times method was exited via exception while interpreting
void interpreter_throwout_increment(Thread* current) {
MethodCounters* mcs = get_method_counters(current);
if (mcs != NULL) {
if (mcs != nullptr) {
mcs->interpreter_throwout_increment();
}
}
@ -304,7 +304,7 @@ class Method : public Metadata {
int interpreter_throwout_count() const {
MethodCounters* mcs = method_counters();
if (mcs == NULL) {
if (mcs == nullptr) {
return 0;
} else {
return mcs->interpreter_throwout_count();
@ -339,7 +339,7 @@ class Method : public Metadata {
{ return constMethod()->exception_table_start(); }
// Finds the first entry point bci of an exception handler for an
// exception of klass ex_klass thrown at throw_bci. A value of NULL
// exception of klass ex_klass thrown at throw_bci. A value of null
// for ex_klass indicates that the exception klass is not known; in
// this case it matches any constraint class. Returns -1 if the
// exception cannot be handled in this method. The handler
@ -370,38 +370,38 @@ class Method : public Metadata {
}
void clear_method_counters() {
_method_counters = NULL;
_method_counters = nullptr;
}
bool init_method_counters(MethodCounters* counters);
int prev_event_count() const {
MethodCounters* mcs = method_counters();
return mcs == NULL ? 0 : mcs->prev_event_count();
return mcs == nullptr ? 0 : mcs->prev_event_count();
}
void set_prev_event_count(int count) {
MethodCounters* mcs = method_counters();
if (mcs != NULL) {
if (mcs != nullptr) {
mcs->set_prev_event_count(count);
}
}
jlong prev_time() const {
MethodCounters* mcs = method_counters();
return mcs == NULL ? 0 : mcs->prev_time();
return mcs == nullptr ? 0 : mcs->prev_time();
}
void set_prev_time(jlong time) {
MethodCounters* mcs = method_counters();
if (mcs != NULL) {
if (mcs != nullptr) {
mcs->set_prev_time(time);
}
}
float rate() const {
MethodCounters* mcs = method_counters();
return mcs == NULL ? 0 : mcs->rate();
return mcs == nullptr ? 0 : mcs->rate();
}
void set_rate(float rate) {
MethodCounters* mcs = method_counters();
if (mcs != NULL) {
if (mcs != nullptr) {
mcs->set_rate(rate);
}
}
@ -506,7 +506,7 @@ public:
};
address native_function() const { return *(native_function_addr()); }
// Must specify a real function (not NULL).
// Must specify a real function (not null).
// Use clear_native_function() to unregister.
void set_native_function(address function, bool post_event_flag);
bool has_native_function() const;
@ -773,7 +773,7 @@ public:
// once created they are never reclaimed. The methods to which they refer,
// however, can be GC'ed away if the class is unloaded or if the method is
// made obsolete or deleted -- in these cases, the jmethodID
// refers to NULL (as is the case for any weak reference).
// refers to null (as is the case for any weak reference).
static jmethodID make_jmethod_id(ClassLoaderData* cld, Method* mh);
static void destroy_jmethod_id(ClassLoaderData* cld, jmethodID mid);
@ -784,14 +784,14 @@ public:
// Use resolve_jmethod_id() in situations where the caller is expected
// to provide a valid jmethodID; the only sanity checks are in asserts;
// result guaranteed not to be NULL.
// result guaranteed not to be null.
inline static Method* resolve_jmethod_id(jmethodID mid) {
assert(mid != NULL, "JNI method id should not be null");
assert(mid != nullptr, "JNI method id should not be null");
return *((Method**)mid);
}
// Use checked_resolve_jmethod_id() in situations where the caller
// should provide a valid jmethodID, but might not. NULL is returned
// should provide a valid jmethodID, but might not. Null is returned
// when the jmethodID does not refer to a valid method.
static Method* checked_resolve_jmethod_id(jmethodID mid);
@ -805,7 +805,7 @@ public:
// Get this method's jmethodID -- allocate if it doesn't exist
jmethodID jmethod_id();
// Lookup the jmethodID for this method. Return NULL if not found.
// Lookup the jmethodID for this method. Return null if not found.
// NOTE that this function can be called from a signal handler
// (see AsyncGetCallTrace support for Forte Analyzer) and this
// needs to be async-safe. No allocation should be done and
@ -902,7 +902,7 @@ public:
// On-stack replacement support
bool has_osr_nmethod(int level, bool match_level) {
return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != nullptr;
}
int mark_osr_nmethods() {
@ -937,7 +937,7 @@ public:
public:
MethodCounters* get_method_counters(Thread* current) {
if (_method_counters == NULL) {
if (_method_counters == nullptr) {
build_method_counters(current, this);
}
return _method_counters;
@ -976,7 +976,7 @@ public:
typedef int (*method_comparator_func)(Method* a, Method* b);
// Helper routine used for method sorting
static void sort_methods(Array<Method*>* methods, bool set_idnums = true, method_comparator_func func = NULL);
static void sort_methods(Array<Method*>* methods, bool set_idnums = true, method_comparator_func func = nullptr);
// Deallocation function for redefine classes or if an error occurs
void deallocate_contents(ClassLoaderData* loader_data);
@ -987,7 +987,7 @@ public:
InstanceKlass* holder = method_holder();
Method* new_method = holder->method_with_idnum(orig_method_idnum());
assert(new_method != NULL, "method_with_idnum() should not be NULL");
assert(new_method != nullptr, "method_with_idnum() should not be null");
assert(this != new_method, "sanity check");
return new_method;
}
@ -1122,7 +1122,7 @@ class ExceptionTable : public StackObj {
_table = m->exception_table_start();
_length = m->exception_table_length();
} else {
_table = NULL;
_table = nullptr;
_length = 0;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -83,7 +83,7 @@ inline void CompressedLineNumberWriteStream::write_pair(int bci, int line) {
write_pair_inline(bci, line);
}
inline bool Method::has_compiled_code() const { return code() != NULL; }
inline bool Method::has_compiled_code() const { return code() != nullptr; }
inline bool Method::is_empty_method() const {
return code_size() == 1

View File

@ -88,7 +88,7 @@ void DataLayout::clean_weak_klass_links(bool always_clean) {
// Constructor for invalid ProfileData.
ProfileData::ProfileData() {
_data = NULL;
_data = nullptr;
}
char* ProfileData::print_data_on_helper(const MethodData* md) const {
@ -118,7 +118,7 @@ char* ProfileData::print_data_on_helper(const MethodData* md) const {
fatal("unexpected tag %d", dp->tag());
}
}
return NULL;
return nullptr;
}
void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
@ -135,7 +135,7 @@ void ProfileData::print_shared(outputStream* st, const char* name, const char* e
char buf[100];
st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
}
if (extra != NULL) {
if (extra != nullptr) {
st->print("%s", extra);
}
int flags = data()->flags();
@ -317,8 +317,8 @@ void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
for (int i = 0; i < _number_of_entries; i++) {
intptr_t p = type(i);
Klass* k = (Klass*)klass_part(p);
if (k != NULL && (always_clean || !k->is_loader_alive())) {
set_type(i, with_status((Klass*)NULL, p));
if (k != nullptr && (always_clean || !k->is_loader_alive())) {
set_type(i, with_status((Klass*)nullptr, p));
}
}
}
@ -326,8 +326,8 @@ void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
intptr_t p = type();
Klass* k = (Klass*)klass_part(p);
if (k != NULL && (always_clean || !k->is_loader_alive())) {
set_type(with_status((Klass*)NULL, p));
if (k != nullptr && (always_clean || !k->is_loader_alive())) {
set_type(with_status((Klass*)nullptr, p));
}
}
@ -406,7 +406,7 @@ void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) con
void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
for (uint row = 0; row < row_limit(); row++) {
Klass* p = receiver(row);
if (p != NULL && (always_clean || !p->is_loader_alive())) {
if (p != nullptr && (always_clean || !p->is_loader_alive())) {
clear_row(row);
}
}
@ -416,7 +416,7 @@ void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
uint row;
int entries = 0;
for (row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) entries++;
if (receiver(row) != nullptr) entries++;
}
#if INCLUDE_JVMCI
st->print_cr("count(%u) nonprofiled_count(%u) entries(%u)", count(), nonprofiled_count(), entries);
@ -425,12 +425,12 @@ void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
#endif
int total = count();
for (row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) {
if (receiver(row) != nullptr) {
total += receiver_count(row);
}
}
for (row = 0; row < row_limit(); row++) {
if (receiver(row) != NULL) {
if (receiver(row) != nullptr) {
tab(st);
receiver(row)->print_value_on(st);
st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
@ -805,7 +805,7 @@ void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
}
FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(NULL) {
FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) {
memcpy(data(), speculation, speculation_len);
}
@ -814,15 +814,15 @@ static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation**
jlong head = (jlong)(address) *failed_speculations_address;
if ((head & 0x1) == 0x1) {
stringStream st;
if (nm != NULL) {
if (nm != nullptr) {
st.print("%d", nm->compile_id());
Method* method = nm->method();
st.print_raw("{");
if (method != NULL) {
if (method != nullptr) {
method->print_name(&st);
} else {
const char* jvmci_name = nm->jvmci_name();
if (jvmci_name != NULL) {
if (jvmci_name != nullptr) {
st.print_raw(jvmci_name);
}
}
@ -835,10 +835,10 @@ static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation**
}
bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
assert(failed_speculations_address != NULL, "must be");
assert(failed_speculations_address != nullptr, "must be");
size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
FailedSpeculation* fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
if (fs == NULL) {
if (fs == nullptr) {
// no memory -> ignore failed speculation
return false;
}
@ -848,9 +848,9 @@ bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation**
FailedSpeculation** cursor = failed_speculations_address;
do {
if (*cursor == NULL) {
FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) NULL, fs);
if (old_fs == NULL) {
if (*cursor == nullptr) {
FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs);
if (old_fs == nullptr) {
// Successfully appended fs to end of the list
return true;
}
@ -862,9 +862,9 @@ bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation**
}
void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
assert(failed_speculations_address != NULL, "must be");
assert(failed_speculations_address != nullptr, "must be");
FailedSpeculation* fs = *failed_speculations_address;
while (fs != NULL) {
while (fs != nullptr) {
FailedSpeculation* next = fs->next();
delete fs;
fs = next;
@ -1099,7 +1099,7 @@ int MethodData::initialize_data(BytecodeStream* stream,
// Get the data at an arbitrary (sort of) data index.
ProfileData* MethodData::data_at(int data_index) const {
if (out_of_bounds(data_index)) {
return NULL;
return nullptr;
}
DataLayout* data_layout = data_layout_at(data_index);
return data_layout->data_in();
@ -1144,7 +1144,7 @@ ProfileData* DataLayout::data_in() {
case DataLayout::no_tag:
default:
ShouldNotReachHere();
return NULL;
return nullptr;
case DataLayout::bit_data_tag:
return new BitData(this);
case DataLayout::counter_data_tag:
@ -1186,7 +1186,7 @@ DataLayout* MethodData::next_data_layout(DataLayout* current) const {
int current_index = dp_to_di((address)current);
int next_index = current_index + current->size_in_bytes();
if (out_of_bounds(next_index)) {
return NULL;
return nullptr;
}
DataLayout* next = data_layout_at(next_index);
return next;
@ -1203,7 +1203,7 @@ void MethodData::post_initialize(BytecodeStream* stream) {
data->post_initialize(stream, this);
}
if (_parameters_type_data_di != no_parameters) {
parameters_type_data()->post_initialize(NULL, this);
parameters_type_data()->post_initialize(nullptr, this);
}
}
@ -1310,7 +1310,7 @@ void MethodData::init() {
#if INCLUDE_JVMCI
_jvmci_ir_size = 0;
_failed_speculations = NULL;
_failed_speculations = nullptr;
#endif
#if INCLUDE_RTM_OPT
@ -1345,11 +1345,11 @@ bool MethodData::is_mature() const {
address MethodData::bci_to_dp(int bci) {
ResourceMark rm;
DataLayout* data = data_layout_before(bci);
DataLayout* prev = NULL;
DataLayout* prev = nullptr;
for ( ; is_valid(data); data = next_data_layout(data)) {
if (data->bci() >= bci) {
if (data->bci() == bci) set_hint_di(dp_to_di((address)data));
else if (prev != NULL) set_hint_di(dp_to_di((address)prev));
else if (prev != nullptr) set_hint_di(dp_to_di((address)prev));
return (address)data;
}
prev = data;
@ -1357,7 +1357,7 @@ address MethodData::bci_to_dp(int bci) {
return (address)limit_data_position();
}
// Translate a bci to its corresponding data, or NULL.
// Translate a bci to its corresponding data, or null.
ProfileData* MethodData::bci_to_data(int bci) {
DataLayout* data = data_layout_before(bci);
for ( ; is_valid(data); data = next_data_layout(data)) {
@ -1368,7 +1368,7 @@ ProfileData* MethodData::bci_to_data(int bci) {
break;
}
}
return bci_to_extra_data(bci, NULL, false);
return bci_to_extra_data(bci, nullptr, false);
}
DataLayout* MethodData::next_extra(DataLayout* dp) {
@ -1396,25 +1396,25 @@ ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout
// since the data structure is monotonic.
switch(dp->tag()) {
case DataLayout::no_tag:
return NULL;
return nullptr;
case DataLayout::arg_info_data_tag:
dp = end;
return NULL; // ArgInfoData is at the end of extra data section.
return nullptr; // ArgInfoData is at the end of extra data section.
case DataLayout::bit_data_tag:
if (m == NULL && dp->bci() == bci) {
if (m == nullptr && dp->bci() == bci) {
return new BitData(dp);
}
break;
case DataLayout::speculative_trap_data_tag:
if (m != NULL) {
if (m != nullptr) {
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
// data->method() may be null in case of a concurrent
// allocation. Maybe it's for the same method. Try to use that
// entry in that case.
if (dp->bci() == bci) {
if (data->method() == NULL) {
if (data->method() == nullptr) {
assert(concurrent, "impossible because no concurrent allocation");
return NULL;
return nullptr;
} else if (data->method() == m) {
return data;
}
@ -1425,11 +1425,11 @@ ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout
fatal("unexpected tag %d", dp->tag());
}
}
return NULL;
return nullptr;
}
// Translate a bci to its corresponding extra data, or NULL.
// Translate a bci to its corresponding extra data, or null.
ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
// This code assumes an entry for a SpeculativeTrapData is 2 cells
assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
@ -1437,8 +1437,8 @@ ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_mi
"code needs to be adjusted");
// Do not create one of these if method has been redefined.
if (m != NULL && m->is_old()) {
return NULL;
if (m != nullptr && m->is_old()) {
return nullptr;
}
DataLayout* dp = extra_data_base();
@ -1448,7 +1448,7 @@ ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_mi
// all entries have the same size and non atomic concurrent
// allocation would result in a corrupted extra data space.
ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true);
if (result != NULL) {
if (result != nullptr) {
return result;
}
@ -1457,16 +1457,16 @@ ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_mi
// Check again now that we have the lock. Another thread may
// have added extra data entries.
ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false);
if (result != NULL || dp >= end) {
if (result != nullptr || dp >= end) {
return result;
}
assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free");
assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
// SpeculativeTrapData is 2 slots. Make sure we have room.
if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
return NULL;
if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) {
return nullptr;
}
DataLayout temp;
temp.initialize(tag, bci, 0);
@ -1482,7 +1482,7 @@ ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_mi
return data;
}
}
return NULL;
return nullptr;
}
ArgInfoData *MethodData::arg_info() {
@ -1492,7 +1492,7 @@ ArgInfoData *MethodData::arg_info() {
if (dp->tag() == DataLayout::arg_info_data_tag)
return new ArgInfoData(dp);
}
return NULL;
return nullptr;
}
// Printing
@ -1737,7 +1737,7 @@ void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
case DataLayout::speculative_trap_data_tag: {
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
Method* m = data->method();
assert(m != NULL, "should have a method");
assert(m != nullptr, "should have a method");
if (!cl->is_live(m)) {
// "shift" accumulates the number of cells for dead
// SpeculativeTrapData entries that have been seen so
@ -1781,7 +1781,7 @@ void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
case DataLayout::speculative_trap_data_tag: {
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
Method* m = data->method();
assert(m != NULL && cl->is_live(m), "Method should exist");
assert(m != nullptr && cl->is_live(m), "Method should exist");
break;
}
case DataLayout::bit_data_tag:
@ -1804,7 +1804,7 @@ void MethodData::clean_method_data(bool always_clean) {
data->clean_weak_klass_links(always_clean);
}
ParametersTypeData* parameters = parameters_type_data();
if (parameters != NULL) {
if (parameters != nullptr) {
parameters->clean_weak_klass_links(always_clean);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -401,59 +401,59 @@ public:
BitData* as_BitData() const {
assert(is_BitData(), "wrong type");
return is_BitData() ? (BitData*) this : NULL;
return is_BitData() ? (BitData*) this : nullptr;
}
CounterData* as_CounterData() const {
assert(is_CounterData(), "wrong type");
return is_CounterData() ? (CounterData*) this : NULL;
return is_CounterData() ? (CounterData*) this : nullptr;
}
JumpData* as_JumpData() const {
assert(is_JumpData(), "wrong type");
return is_JumpData() ? (JumpData*) this : NULL;
return is_JumpData() ? (JumpData*) this : nullptr;
}
ReceiverTypeData* as_ReceiverTypeData() const {
assert(is_ReceiverTypeData(), "wrong type");
return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL;
return is_ReceiverTypeData() ? (ReceiverTypeData*)this : nullptr;
}
VirtualCallData* as_VirtualCallData() const {
assert(is_VirtualCallData(), "wrong type");
return is_VirtualCallData() ? (VirtualCallData*)this : NULL;
return is_VirtualCallData() ? (VirtualCallData*)this : nullptr;
}
RetData* as_RetData() const {
assert(is_RetData(), "wrong type");
return is_RetData() ? (RetData*) this : NULL;
return is_RetData() ? (RetData*) this : nullptr;
}
BranchData* as_BranchData() const {
assert(is_BranchData(), "wrong type");
return is_BranchData() ? (BranchData*) this : NULL;
return is_BranchData() ? (BranchData*) this : nullptr;
}
ArrayData* as_ArrayData() const {
assert(is_ArrayData(), "wrong type");
return is_ArrayData() ? (ArrayData*) this : NULL;
return is_ArrayData() ? (ArrayData*) this : nullptr;
}
MultiBranchData* as_MultiBranchData() const {
assert(is_MultiBranchData(), "wrong type");
return is_MultiBranchData() ? (MultiBranchData*)this : NULL;
return is_MultiBranchData() ? (MultiBranchData*)this : nullptr;
}
ArgInfoData* as_ArgInfoData() const {
assert(is_ArgInfoData(), "wrong type");
return is_ArgInfoData() ? (ArgInfoData*)this : NULL;
return is_ArgInfoData() ? (ArgInfoData*)this : nullptr;
}
CallTypeData* as_CallTypeData() const {
assert(is_CallTypeData(), "wrong type");
return is_CallTypeData() ? (CallTypeData*)this : NULL;
return is_CallTypeData() ? (CallTypeData*)this : nullptr;
}
VirtualCallTypeData* as_VirtualCallTypeData() const {
assert(is_VirtualCallTypeData(), "wrong type");
return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : nullptr;
}
ParametersTypeData* as_ParametersTypeData() const {
assert(is_ParametersTypeData(), "wrong type");
return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL;
return is_ParametersTypeData() ? (ParametersTypeData*)this : nullptr;
}
SpeculativeTrapData* as_SpeculativeTrapData() const {
assert(is_SpeculativeTrapData(), "wrong type");
return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : NULL;
return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : nullptr;
}
@ -470,7 +470,7 @@ public:
// translation here, and the required translators are in the ci subclasses.
virtual void translate_from(const ProfileData* data) {}
virtual void print_data_on(outputStream* st, const char* extra = NULL) const {
virtual void print_data_on(outputStream* st, const char* extra = nullptr) const {
ShouldNotReachHere();
}
@ -533,7 +533,7 @@ public:
return cell_offset(bit_cell_count);
}
void print_data_on(outputStream* st, const char* extra = NULL) const;
void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
// CounterData
@ -583,7 +583,7 @@ public:
set_int_at(count_off, count);
}
void print_data_on(outputStream* st, const char* extra = NULL) const;
void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
// JumpData
@ -656,7 +656,7 @@ public:
// Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo);
void print_data_on(outputStream* st, const char* extra = NULL) const;
void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
// Entries in a ProfileData object to record types: it can either be
@ -714,10 +714,10 @@ public:
if (!is_type_none(k) &&
!is_type_unknown(k)) {
Klass* res = (Klass*)klass_part(k);
assert(res != NULL, "invalid");
assert(res != nullptr, "invalid");
return res;
} else {
return NULL;
return nullptr;
}
}
@ -738,7 +738,7 @@ protected:
const int _base_off;
TypeEntries(int base_off)
: _pd(NULL), _base_off(base_off) {}
: _pd(nullptr), _base_off(base_off) {}
void set_intptr_at(int index, intptr_t value) {
_pd->set_intptr_at(index, value);
@ -1074,7 +1074,7 @@ public:
}
}
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
// ReceiverTypeData
@ -1139,7 +1139,7 @@ public:
assert(row < row_limit(), "oob");
Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
assert(recv == NULL || recv->is_klass(), "wrong type");
assert(recv == nullptr || recv->is_klass(), "wrong type");
return recv;
}
@ -1177,7 +1177,7 @@ public:
// We do sorting a profiling info (ciCallProfile) for compilation.
//
set_count(0);
set_receiver(row, NULL);
set_receiver(row, nullptr);
set_receiver_count(row, 0);
#if INCLUDE_JVMCI
if (!this->is_VirtualCallData()) {
@ -1214,7 +1214,7 @@ public:
virtual void clean_weak_klass_links(bool always_clean);
void print_receiver_data_on(outputStream* st) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
// VirtualCallData
@ -1246,7 +1246,7 @@ public:
}
void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN;
void print_data_on(outputStream* st, const char* extra = NULL) const;
void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
// VirtualCallTypeData
@ -1378,7 +1378,7 @@ public:
}
}
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
// RetData
@ -1470,7 +1470,7 @@ public:
// Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo);
void print_data_on(outputStream* st, const char* extra = NULL) const;
void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
// BranchData
@ -1534,7 +1534,7 @@ public:
// Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo);
void print_data_on(outputStream* st, const char* extra = NULL) const;
void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
// ArrayData
@ -1693,7 +1693,7 @@ public:
// Specific initialization.
void post_initialize(BytecodeStream* stream, MethodData* mdo);
void print_data_on(outputStream* st, const char* extra = NULL) const;
void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
class ArgInfoData : public ArrayData {
@ -1718,7 +1718,7 @@ public:
array_set_int_at(arg, val);
}
void print_data_on(outputStream* st, const char* extra = NULL) const;
void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
// ParametersTypeData
@ -1777,7 +1777,7 @@ public:
_parameters.clean_weak_klass_links(always_clean);
}
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
static ByteSize stack_slot_offset(int i) {
return cell_offset(stack_slot_local_offset(i));
@ -1847,7 +1847,7 @@ public:
return cell_offset(speculative_trap_method);
}
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
};
// MethodData*
@ -1883,7 +1883,7 @@ public:
// interpretation, when a bytecode is encountered that has profile data
// associated with it, the entry pointed to by mdp is updated, then the
// mdp is adjusted to point to the next appropriate DataLayout. If mdp
// is NULL to begin with, the interpreter assumes that the current method
// is null to begin with, the interpreter assumes that the current method
// is not (yet) being profiled.
//
// In MethodData* parlance, "dp" is a "data pointer", the actual address
@ -2132,7 +2132,7 @@ private:
DataLayout* data_layout_before(int bci) {
// avoid SEGV on this edge case
if (data_size() == 0)
return NULL;
return nullptr;
DataLayout* layout = data_layout_at(hint_di());
if (layout->bci() <= bci)
return layout;
@ -2283,7 +2283,7 @@ public:
intx arg_stack() { return _arg_stack; }
intx arg_returned() { return _arg_returned; }
uint arg_modified(int a) { ArgInfoData *aid = arg_info();
assert(aid != NULL, "arg_info must be not null");
assert(aid != nullptr, "arg_info must be not null");
assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
return aid->arg_modified(a); }
@ -2292,7 +2292,7 @@ public:
void set_arg_stack(intx v) { _arg_stack = v; }
void set_arg_returned(intx v) { _arg_returned = v; }
void set_arg_modified(int a, uint v) { ArgInfoData *aid = arg_info();
assert(aid != NULL, "arg_info must be not null");
assert(aid != nullptr, "arg_info must be not null");
assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
aid->set_arg_modified(a, v); }
@ -2308,7 +2308,7 @@ public:
int parameters_size_in_bytes() const {
ParametersTypeData* param = parameters_type_data();
return param == NULL ? 0 : param->size_in_bytes();
return param == nullptr ? 0 : param->size_in_bytes();
}
// Accessors
@ -2321,8 +2321,8 @@ public:
ProfileData* first_data() const { return data_at(first_di()); }
ProfileData* next_data(ProfileData* current) const;
DataLayout* next_data_layout(DataLayout* current) const;
bool is_valid(ProfileData* current) const { return current != NULL; }
bool is_valid(DataLayout* current) const { return current != NULL; }
bool is_valid(ProfileData* current) const { return current != nullptr; }
bool is_valid(DataLayout* current) const { return current != nullptr; }
// Convert a dp (data pointer) to a di (data index).
int dp_to_di(address dp) const {
@ -2335,30 +2335,30 @@ public:
return dp_to_di(bci_to_dp(bci));
}
// Get the data at an arbitrary bci, or NULL if there is none.
// Get the data at an arbitrary bci, or null if there is none.
ProfileData* bci_to_data(int bci);
// Same, but try to create an extra_data record if one is needed:
ProfileData* allocate_bci_to_data(int bci, Method* m) {
ProfileData* data = NULL;
// If m not NULL, try to allocate a SpeculativeTrapData entry
if (m == NULL) {
ProfileData* data = nullptr;
// If m not null, try to allocate a SpeculativeTrapData entry
if (m == nullptr) {
data = bci_to_data(bci);
}
if (data != NULL) {
if (data != nullptr) {
return data;
}
data = bci_to_extra_data(bci, m, true);
if (data != NULL) {
if (data != nullptr) {
return data;
}
// If SpeculativeTrapData allocation fails try to allocate a
// regular entry
data = bci_to_data(bci);
if (data != NULL) {
if (data != nullptr) {
return data;
}
return bci_to_extra_data(bci, NULL, true);
return bci_to_extra_data(bci, nullptr, true);
}
// Add a handful of extra data records, for trap tracking.
@ -2409,7 +2409,7 @@ public:
// Return pointer to area dedicated to parameters in MDO
ParametersTypeData* parameters_type_data() const {
assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL;
return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : nullptr;
}
int parameters_type_data_di() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,26 +58,26 @@ ObjArrayKlass* ObjArrayKlass::allocate_objArray_klass(ClassLoaderData* loader_da
int n, Klass* element_klass, TRAPS) {
// Eagerly allocate the direct array supertype.
Klass* super_klass = NULL;
Klass* super_klass = nullptr;
if (!Universe::is_bootstrapping() || vmClasses::Object_klass_loaded()) {
Klass* element_super = element_klass->super();
if (element_super != NULL) {
if (element_super != nullptr) {
// The element type has a direct super. E.g., String[] has direct super of Object[].
super_klass = element_super->array_klass_or_null();
bool supers_exist = super_klass != NULL;
bool supers_exist = super_klass != nullptr;
// Also, see if the element has secondary supertypes.
// We need an array type for each.
const Array<Klass*>* element_supers = element_klass->secondary_supers();
for( int i = element_supers->length()-1; i >= 0; i-- ) {
Klass* elem_super = element_supers->at(i);
if (elem_super->array_klass_or_null() == NULL) {
if (elem_super->array_klass_or_null() == nullptr) {
supers_exist = false;
break;
}
}
if (!supers_exist) {
// Oops. Not allocated yet. Back out, allocate it, and retry.
Klass* ek = NULL;
Klass* ek = nullptr;
{
MutexUnlocker mu(MultiArray_lock);
super_klass = element_super->array_klass(CHECK_NULL);
@ -97,7 +97,7 @@ ObjArrayKlass* ObjArrayKlass::allocate_objArray_klass(ClassLoaderData* loader_da
}
// Create type name for klass.
Symbol* name = NULL;
Symbol* name = nullptr;
{
ResourceMark rm(THREAD);
char *name_str = element_klass->name()->as_C_string();
@ -121,13 +121,13 @@ ObjArrayKlass* ObjArrayKlass::allocate_objArray_klass(ClassLoaderData* loader_da
ObjArrayKlass* oak = ObjArrayKlass::allocate(loader_data, n, element_klass, name, CHECK_NULL);
ModuleEntry* module = oak->module();
assert(module != NULL, "No module entry for array");
assert(module != nullptr, "No module entry for array");
// Call complete_create_array_klass after all instance variables has been initialized.
ArrayKlass::complete_create_array_klass(oak, super_klass, module, CHECK_NULL);
// Add all classes to our internal class loader list here,
// including classes in the bootstrap (NULL) class loader.
// including classes in the bootstrap (null) class loader.
// Do this step after creating the mirror so that if the
// mirror creation fails, loaded_classes_do() doesn't find
// an array class without a mirror.
@ -146,7 +146,7 @@ ObjArrayKlass::ObjArrayKlass(int n, Klass* element_klass, Symbol* name) : ArrayK
} else {
bk = element_klass;
}
assert(bk != NULL && (bk->is_instance_klass() || bk->is_typeArray_klass()), "invalid bottom klass");
assert(bk != nullptr && (bk->is_instance_klass() || bk->is_typeArray_klass()), "invalid bottom klass");
set_bottom_klass(bk);
set_class_loader_data(bk->class_loader_data());
@ -291,17 +291,17 @@ void ObjArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
if (UseCompressedOops) {
size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset<narrowOop>(src_pos);
size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset<narrowOop>(dst_pos);
assert(arrayOopDesc::obj_offset_to_raw<narrowOop>(s, src_offset, NULL) ==
assert(arrayOopDesc::obj_offset_to_raw<narrowOop>(s, src_offset, nullptr) ==
objArrayOop(s)->obj_at_addr<narrowOop>(src_pos), "sanity");
assert(arrayOopDesc::obj_offset_to_raw<narrowOop>(d, dst_offset, NULL) ==
assert(arrayOopDesc::obj_offset_to_raw<narrowOop>(d, dst_offset, nullptr) ==
objArrayOop(d)->obj_at_addr<narrowOop>(dst_pos), "sanity");
do_copy(s, src_offset, d, dst_offset, length, CHECK);
} else {
size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(src_pos);
size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(dst_pos);
assert(arrayOopDesc::obj_offset_to_raw<oop>(s, src_offset, NULL) ==
assert(arrayOopDesc::obj_offset_to_raw<oop>(s, src_offset, nullptr) ==
objArrayOop(s)->obj_at_addr<oop>(src_pos), "sanity");
assert(arrayOopDesc::obj_offset_to_raw<oop>(d, dst_offset, NULL) ==
assert(arrayOopDesc::obj_offset_to_raw<oop>(d, dst_offset, nullptr) ==
objArrayOop(d)->obj_at_addr<oop>(dst_pos), "sanity");
do_copy(s, src_offset, d, dst_offset, length, CHECK);
}
@ -315,7 +315,7 @@ Klass* ObjArrayKlass::array_klass(int n, TRAPS) {
if (dim == n) return this;
// lock-free read needs acquire semantics
if (higher_dimension_acquire() == NULL) {
if (higher_dimension_acquire() == nullptr) {
ResourceMark rm(THREAD);
{
@ -323,7 +323,7 @@ Klass* ObjArrayKlass::array_klass(int n, TRAPS) {
MutexLocker mu(THREAD, MultiArray_lock);
// Check if another thread beat us
if (higher_dimension() == NULL) {
if (higher_dimension() == nullptr) {
// Create multi-dim klass object and link them together
Klass* k =
@ -349,8 +349,8 @@ Klass* ObjArrayKlass::array_klass_or_null(int n) {
if (dim == n) return this;
// lock-free read needs acquire semantics
if (higher_dimension_acquire() == NULL) {
return NULL;
if (higher_dimension_acquire() == nullptr) {
return nullptr;
}
ObjArrayKlass *ak = ObjArrayKlass::cast(higher_dimension());
@ -375,15 +375,15 @@ bool ObjArrayKlass::can_be_primary_super_slow() const {
GrowableArray<Klass*>* ObjArrayKlass::compute_secondary_supers(int num_extra_slots,
Array<InstanceKlass*>* transitive_interfaces) {
assert(transitive_interfaces == NULL, "sanity");
assert(transitive_interfaces == nullptr, "sanity");
// interfaces = { cloneable_klass, serializable_klass, elemSuper[], ... };
const Array<Klass*>* elem_supers = element_klass()->secondary_supers();
int num_elem_supers = elem_supers == NULL ? 0 : elem_supers->length();
int num_elem_supers = elem_supers == nullptr ? 0 : elem_supers->length();
int num_secondaries = num_extra_slots + 2 + num_elem_supers;
if (num_secondaries == 2) {
// Must share this for correct bootstrapping!
set_secondary_supers(Universe::the_array_interfaces_array());
return NULL;
return nullptr;
} else {
GrowableArray<Klass*>* secondaries = new GrowableArray<Klass*>(num_elem_supers+2);
secondaries->push(vmClasses::Cloneable_klass());
@ -391,7 +391,7 @@ GrowableArray<Klass*>* ObjArrayKlass::compute_secondary_supers(int num_extra_slo
for (int i = 0; i < num_elem_supers; i++) {
Klass* elem_super = elem_supers->at(i);
Klass* array_super = elem_super->array_klass_or_null();
assert(array_super != NULL, "must already have been created");
assert(array_super != nullptr, "must already have been created");
secondaries->push(array_super);
}
return secondaries;
@ -410,7 +410,7 @@ void ObjArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) {
jint ObjArrayKlass::compute_modifier_flags() const {
// The modifier for an objectArray is the same as its element
if (element_klass() == NULL) {
if (element_klass() == nullptr) {
assert(Universe::is_bootstrapping(), "partial objArray only at startup");
return JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC;
}
@ -422,13 +422,13 @@ jint ObjArrayKlass::compute_modifier_flags() const {
}
ModuleEntry* ObjArrayKlass::module() const {
assert(bottom_klass() != NULL, "ObjArrayKlass returned unexpected NULL bottom_klass");
assert(bottom_klass() != nullptr, "ObjArrayKlass returned unexpected null bottom_klass");
// The array is defined in the module of its bottom class
return bottom_klass()->module();
}
PackageEntry* ObjArrayKlass::package() const {
assert(bottom_klass() != NULL, "ObjArrayKlass returned unexpected NULL bottom_klass");
assert(bottom_klass() != nullptr, "ObjArrayKlass returned unexpected null bottom_klass");
return bottom_klass()->package();
}
@ -459,11 +459,11 @@ void ObjArrayKlass::oop_print_on(oop obj, outputStream* st) {
int print_len = MIN2((intx) oa->length(), MaxElementPrintSize);
for(int index = 0; index < print_len; index++) {
st->print(" - %3d : ", index);
if (oa->obj_at(index) != NULL) {
if (oa->obj_at(index) != nullptr) {
oa->obj_at(index)->print_value_on(st);
st->cr();
} else {
st->print_cr("NULL");
st->print_cr("null");
}
}
int remaining = oa->length() - print_len;
@ -480,10 +480,10 @@ void ObjArrayKlass::oop_print_value_on(oop obj, outputStream* st) {
element_klass()->print_value_on(st);
int len = objArrayOop(obj)->length();
st->print("[%d] ", len);
if (obj != NULL) {
if (obj != nullptr) {
obj->print_address_on(st);
} else {
st->print_cr("NULL");
st->print_cr("null");
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,7 +86,7 @@ void oopDesc::print_value_on(outputStream* st) const {
void oopDesc::verify_on(outputStream* st, oopDesc* oop_desc) {
if (oop_desc != NULL) {
if (oop_desc != nullptr) {
oop_desc->klass()->oop_verify_on(oop_desc, st);
}
}
@ -123,7 +123,7 @@ bool oopDesc::is_oop(oop obj, bool ignore_mark_word) {
// used only for asserts and guarantees
bool oopDesc::is_oop_or_null(oop obj, bool ignore_mark_word) {
return obj == NULL ? true : is_oop(obj, ignore_mark_word);
return obj == nullptr ? true : is_oop(obj, ignore_mark_word);
}
VerifyOopClosure VerifyOopClosure::verify_oop;
@ -160,7 +160,7 @@ void oopDesc::set_narrow_klass(narrowKlass nk) {
void* oopDesc::load_klass_raw(oop obj) {
if (UseCompressedClassPointers) {
narrowKlass narrow_klass = obj->_metadata._compressed_klass;
if (narrow_klass == 0) return NULL;
if (narrow_klass == 0) return nullptr;
return (void*)CompressedKlassPointers::decode_raw(narrow_klass);
} else {
return obj->_metadata._klass;
@ -171,7 +171,7 @@ void* oopDesc::load_oop_raw(oop obj, int offset) {
uintptr_t addr = (uintptr_t)(void*)obj + (uint)offset;
if (UseCompressedOops) {
narrowOop narrow_oop = *(narrowOop*)addr;
if (CompressedOops::is_null(narrow_oop)) return NULL;
if (CompressedOops::is_null(narrow_oop)) return nullptr;
return (void*)CompressedOops::decode_raw(narrow_oop);
} else {
return *(void**)addr;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -262,7 +262,7 @@ class oopDesc {
// Like "forward_to", but inserts the forwarding pointer atomically.
// Exactly one thread succeeds in inserting the forwarding pointer, and
// this call returns "NULL" for that thread; any other thread has the
// this call returns null for that thread; any other thread has the
// value of the forwarding pointer returned and does not modify "this".
inline oop forward_to_atomic(oop p, markWord compare, atomic_memory_order order = memory_order_conservative);

View File

@ -116,7 +116,7 @@ Klass* oopDesc::klass_raw() const {
}
void oopDesc::set_klass(Klass* k) {
assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass");
assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass");
if (UseCompressedClassPointers) {
_metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k);
} else {
@ -125,7 +125,7 @@ void oopDesc::set_klass(Klass* k) {
}
void oopDesc::release_set_klass(HeapWord* mem, Klass* k) {
assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass");
assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass");
char* raw_mem = ((char*)mem + klass_offset_in_bytes());
if (UseCompressedClassPointers) {
Atomic::release_store((narrowKlass*)raw_mem,
@ -278,7 +278,7 @@ oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order orde
assert(m.decode_pointer() == p, "encoding must be reversible");
markWord old_mark = cas_set_mark(m, compare, order);
if (old_mark == compare) {
return NULL;
return nullptr;
} else {
return cast_to_oop(old_mark.decode_pointer());
}
@ -349,7 +349,7 @@ void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) {
}
bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
return obj == NULL || obj->klass()->is_subtype_of(klass);
return obj == nullptr || obj->klass()->is_subtype_of(klass);
}
intptr_t oopDesc::identity_hash() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,7 @@ private:
oop* _obj;
public:
OopHandle() : _obj(NULL) {}
OopHandle() : _obj(nullptr) {}
explicit OopHandle(oop* w) : _obj(w) {}
OopHandle(OopStorage* storage, oop obj);
@ -64,7 +64,7 @@ public:
inline oop resolve() const;
inline oop peek() const;
bool is_empty() const { return _obj == NULL; }
bool is_empty() const { return _obj == nullptr; }
inline void release(OopStorage* storage);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,16 +31,16 @@
#include "gc/shared/oopStorage.inline.hpp"
inline oop OopHandle::resolve() const {
return (_obj == NULL) ? (oop)NULL : NativeAccess<>::oop_load(_obj);
return (_obj == nullptr) ? (oop)nullptr : NativeAccess<>::oop_load(_obj);
}
inline oop OopHandle::peek() const {
return (_obj == NULL) ? (oop)NULL : NativeAccess<AS_NO_KEEPALIVE>::oop_load(_obj);
return (_obj == nullptr) ? (oop)nullptr : NativeAccess<AS_NO_KEEPALIVE>::oop_load(_obj);
}
inline OopHandle::OopHandle(OopStorage* storage, oop obj) :
_obj(storage->allocate()) {
if (_obj == NULL) {
if (_obj == nullptr) {
vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
"Cannot create oop handle");
}
@ -48,16 +48,16 @@ inline OopHandle::OopHandle(OopStorage* storage, oop obj) :
}
inline void OopHandle::release(OopStorage* storage) {
if (_obj != NULL) {
if (_obj != nullptr) {
// Clear the OopHandle first
NativeAccess<>::oop_store(_obj, (oop)NULL);
NativeAccess<>::oop_store(_obj, nullptr);
storage->release(_obj);
}
}
inline void OopHandle::replace(oop obj) {
oop* ptr = ptr_raw();
assert(ptr != NULL, "should not use replace");
assert(ptr != nullptr, "should not use replace");
NativeAccess<>::oop_store(ptr, obj);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@ void oop::register_oop() {
if (!Universe::is_fully_initialized()) return;
// This gets expensive, which is why checking unhandled oops is on a switch.
Thread* t = Thread::current_or_null();
if (t != NULL && t->is_Java_thread()) {
if (t != nullptr && t->is_Java_thread()) {
t->unhandled_oops()->register_unhandled_oop(this);
}
}
@ -45,7 +45,7 @@ void oop::unregister_oop() {
if (!Universe::is_fully_initialized()) return;
// This gets expensive, which is why checking unhandled oops is on a switch.
Thread* t = Thread::current_or_null();
if (t != NULL && t->is_Java_thread()) {
if (t != nullptr && t->is_Java_thread()) {
t->unhandled_oops()->unregister_unhandled_oop(this);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,7 +68,7 @@ typedef class typeArrayOopDesc* typeArrayOop;
// a conversion to or from an oop to a numerical type is needed,
// use the inline template methods, cast_*_oop, defined below.
//
// Converting NULL to oop to Handle implicit is no longer accepted by the
// Converting null to oop to Handle implicit is no longer accepted by the
// compiler because there are too many steps in the conversion. Use Handle()
// instead, which generates less code anyway.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,10 +43,10 @@ RecordComponent* RecordComponent::allocate(ClassLoaderData* loader_data,
}
void RecordComponent::deallocate_contents(ClassLoaderData* loader_data) {
if (annotations() != NULL) {
if (annotations() != nullptr) {
MetadataFactory::free_array<u1>(loader_data, annotations());
}
if (type_annotations() != NULL) {
if (type_annotations() != nullptr) {
MetadataFactory::free_array<u1>(loader_data, type_annotations());
}
}
@ -70,11 +70,11 @@ void RecordComponent::print_on(outputStream* st) const {
st->print(" - generic_signature_index: %d", _generic_signature_index);
}
st->cr();
if (_annotations != NULL) {
if (_annotations != nullptr) {
st->print_cr("record component annotations");
_annotations->print_value_on(st);
}
if (_type_annotations != NULL) {
if (_type_annotations != nullptr) {
st->print_cr("record component type annotations");
_type_annotations->print_value_on(st);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -193,8 +193,8 @@ public:
void print_on(bool verbose, outputStream* st) const;
// Verifies the consistency of the chunk's data
bool verify(size_t* out_size = NULL, int* out_oops = NULL,
int* out_frames = NULL, int* out_interpreted_frames = NULL) NOT_DEBUG({ return true; });
bool verify(size_t* out_size = nullptr, int* out_oops = nullptr,
int* out_frames = nullptr, int* out_interpreted_frames = nullptr) NOT_DEBUG({ return true; });
private:
template <BarrierType barrier, ChunkFrames frames = ChunkFrames::Mixed, typename RegisterMapT>

View File

@ -132,7 +132,7 @@ int Symbol::index_of_at(int i, const char* substr, int substr_len) const {
return -1;
for (; scan <= limit; scan++) {
scan = (address) memchr(scan, first_char, (limit + 1 - scan));
if (scan == NULL)
if (scan == nullptr)
return -1; // not found
assert(scan >= bytes+i && scan <= limit, "scan oob");
if (substr_len <= 2
@ -145,7 +145,7 @@ int Symbol::index_of_at(int i, const char* substr, int substr_len) const {
}
bool Symbol::is_star_match(const char* pattern) const {
if (strchr(pattern, '*') == NULL) {
if (strchr(pattern, '*') == nullptr) {
return equals(pattern);
} else {
ResourceMark rm;
@ -185,7 +185,7 @@ void Symbol::print_symbol_on(outputStream* st) const {
s = as_quoted_ascii();
s = os::strdup(s);
}
if (s == NULL) {
if (s == nullptr) {
st->print("(null)");
} else {
st->print("%s", s);

View File

@ -173,12 +173,12 @@ class Symbol : public MetaspaceObj {
void make_permanent();
static void maybe_increment_refcount(Symbol* s) {
if (s != NULL) {
if (s != nullptr) {
s->increment_refcount();
}
}
static void maybe_decrement_refcount(Symbol* s) {
if (s != NULL) {
if (s != nullptr) {
s->decrement_refcount();
}
}
@ -228,7 +228,7 @@ class Symbol : public MetaspaceObj {
// Tests if the symbol contains the given utf8 substring
// at the given byte position.
bool contains_utf8_at(int position, const char* substring, int len) const {
assert(len >= 0 && substring != NULL, "substring must be valid");
assert(len >= 0 && substring != nullptr, "substring must be valid");
if (position < 0) return false; // can happen with ends_with
if (position + len > utf8_length()) return false;
return (memcmp((char*)base() + position, substring, len) == 0);
@ -281,7 +281,7 @@ class Symbol : public MetaspaceObj {
MetaspaceObj::Type type() const { return SymbolType; }
// Printing
void print_symbol_on(outputStream* st = NULL) const;
void print_symbol_on(outputStream* st = nullptr) const;
void print_utf8_on(outputStream* st) const;
void print_on(outputStream* st) const; // First level print
void print_value_on(outputStream* st) const; // Second level print.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,7 @@ class SymbolHandleBase : public StackObj {
Symbol* _temp;
public:
SymbolHandleBase() : _temp(NULL) { }
SymbolHandleBase() : _temp(nullptr) { }
// Conversion from a Symbol* to a SymbolHandleBase.
// Does not increment the current reference count if temporary.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,8 +44,8 @@
TypeArrayKlass* TypeArrayKlass::create_klass(BasicType type,
const char* name_str, TRAPS) {
Symbol* sym = NULL;
if (name_str != NULL) {
Symbol* sym = nullptr;
if (name_str != nullptr) {
sym = SymbolTable::new_permanent_symbol(name_str);
}
@ -57,7 +57,7 @@ TypeArrayKlass* TypeArrayKlass::create_klass(BasicType type,
complete_create_array_klass(ak, ak->super(), ModuleEntryTable::javabase_moduleEntry(), CHECK_NULL);
// Add all classes to our internal class loader list here,
// including classes in the bootstrap (NULL) class loader.
// including classes in the bootstrap (null) class loader.
// Do this step after creating the mirror so that if the
// mirror creation fails, loaded_classes_do() doesn't find
// an array class without a mirror.
@ -178,7 +178,7 @@ Klass* TypeArrayKlass::array_klass(int n, TRAPS) {
return this;
// lock-free read needs acquire semantics
if (higher_dimension_acquire() == NULL) {
if (higher_dimension_acquire() == nullptr) {
ResourceMark rm;
JavaThread *jt = THREAD;
@ -186,7 +186,7 @@ Klass* TypeArrayKlass::array_klass(int n, TRAPS) {
// Atomic create higher dimension and link into list
MutexLocker mu(THREAD, MultiArray_lock);
if (higher_dimension() == NULL) {
if (higher_dimension() == nullptr) {
Klass* oak = ObjArrayKlass::allocate_objArray_klass(
class_loader_data(), dim + 1, this, CHECK_NULL);
ObjArrayKlass* h_ak = ObjArrayKlass::cast(oak);
@ -211,8 +211,8 @@ Klass* TypeArrayKlass::array_klass_or_null(int n) {
return this;
// lock-free read needs acquire semantics
if (higher_dimension_acquire() == NULL) {
return NULL;
if (higher_dimension_acquire() == nullptr) {
return nullptr;
}
ObjArrayKlass* h_ak = ObjArrayKlass::cast(higher_dimension());
@ -250,7 +250,7 @@ const char* TypeArrayKlass::external_name(BasicType type) {
case T_LONG: return "[J";
default: ShouldNotReachHere();
}
return NULL;
return nullptr;
}
@ -372,5 +372,5 @@ ModuleEntry* TypeArrayKlass::module() const {
}
PackageEntry* TypeArrayKlass::package() const {
return NULL;
return nullptr;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,7 +68,7 @@ class TypeArrayKlass : public ArrayKlass {
typeArrayOop allocate(int length, TRAPS) { return allocate_common(length, true, THREAD); }
oop multi_allocate(int rank, jint* sizes, TRAPS);
oop protection_domain() const { return NULL; }
oop protection_domain() const { return nullptr; }
// Copying
void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,9 +35,9 @@ WeakHandle::WeakHandle(OopStorage* storage, Handle obj) :
WeakHandle::WeakHandle(OopStorage* storage, oop obj) :
_obj(storage->allocate()) {
assert(obj != NULL, "no need to create weak null oop");
assert(obj != nullptr, "no need to create weak null oop");
if (_obj == NULL) {
if (_obj == nullptr) {
vm_exit_out_of_memory(sizeof(oop*), OOM_MALLOC_ERROR,
"Unable to create new weak oop handle in OopStorage %s",
storage->name());
@ -48,10 +48,10 @@ WeakHandle::WeakHandle(OopStorage* storage, oop obj) :
void WeakHandle::release(OopStorage* storage) const {
// Only release if the pointer to the object has been created.
if (_obj != NULL) {
if (_obj != nullptr) {
// Clear the WeakHandle. For race in creating ClassLoaderData, we can release this
// WeakHandle before it is cleared by GC.
NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(_obj, (oop)NULL);
NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(_obj, nullptr);
storage->release(_obj);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@ class OopStorage;
// processed weakly by GC. The runtime structures that point to the oop must
// either peek or resolve the oop, the latter will keep the oop alive for
// the GC cycle. The runtime structures that reference the oop must test
// if the value is NULL. If it is NULL, it has been cleaned out by GC.
// if the value is null. If it is null, it has been cleaned out by GC.
// This is the vm version of jweak but has different GC lifetimes and policies,
// depending on the type.
@ -46,21 +46,21 @@ class WeakHandle {
WeakHandle(oop* w) : _obj(w) {}
public:
WeakHandle() : _obj(NULL) {} // needed for init
WeakHandle() : _obj(nullptr) {} // needed for init
WeakHandle(OopStorage* storage, Handle obj);
WeakHandle(OopStorage* storage, oop obj);
inline oop resolve() const;
inline oop peek() const;
void release(OopStorage* storage) const;
bool is_null() const { return _obj == NULL; }
bool is_null() const { return _obj == nullptr; }
void replace(oop with_obj);
void print() const;
void print_on(outputStream* st) const;
bool is_empty() const { return _obj == NULL; }
bool is_empty() const { return _obj == nullptr; }
oop* ptr_raw() const { return _obj; }
};