diff --git a/src/hotspot/share/oops/access.hpp b/src/hotspot/share/oops/access.hpp index 983f7d8d793..79268a34749 100644 --- a/src/hotspot/share/oops/access.hpp +++ b/src/hotspot/share/oops/access.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -309,7 +309,7 @@ public: T* dst, size_t length) { AccessT::arraycopy(src_obj, src_offset_in_bytes, static_cast(nullptr), - NULL, 0, dst, + nullptr, 0, dst, length); } @@ -317,7 +317,7 @@ public: static inline void arraycopy_from_native(const T* src, arrayOop dst_obj, size_t dst_offset_in_bytes, size_t length) { - AccessT::arraycopy(NULL, 0, src, + AccessT::arraycopy(nullptr, 0, src, dst_obj, dst_offset_in_bytes, static_cast(nullptr), length); } @@ -332,8 +332,8 @@ public: template static inline bool oop_arraycopy_raw(T* src, T* dst, size_t length) { - return AccessT::oop_arraycopy(NULL, 0, src, - NULL, 0, dst, + return AccessT::oop_arraycopy(nullptr, 0, src, + nullptr, 0, dst, length); } diff --git a/src/hotspot/share/oops/access.inline.hpp b/src/hotspot/share/oops/access.inline.hpp index 7b14503da67..b3f15f1168d 100644 --- a/src/hotspot/share/oops/access.inline.hpp +++ b/src/hotspot/share/oops/access.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -211,7 +211,7 @@ namespace AccessInternal { FunctionPointerT>::type resolve_barrier_gc() { BarrierSet* bs = BarrierSet::barrier_set(); - assert(bs != NULL, "GC barriers invoked before BarrierSet is set"); + assert(bs != nullptr, "GC barriers invoked before BarrierSet is set"); switch (bs->kind()) { #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \ case BarrierSet::bs_name: { \ @@ -224,7 +224,7 @@ namespace AccessInternal { default: fatal("BarrierSet AccessBarrier resolving not implemented"); - return NULL; + return nullptr; }; } @@ -234,7 +234,7 @@ namespace AccessInternal { FunctionPointerT>::type resolve_barrier_gc() { BarrierSet* bs = BarrierSet::barrier_set(); - assert(bs != NULL, "GC barriers invoked before BarrierSet is set"); + assert(bs != nullptr, "GC barriers invoked before BarrierSet is set"); switch (bs->kind()) { #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \ case BarrierSet::bs_name: { \ @@ -247,7 +247,7 @@ namespace AccessInternal { default: fatal("BarrierSet AccessBarrier resolving not implemented"); - return NULL; + return nullptr; }; } diff --git a/src/hotspot/share/oops/annotations.cpp b/src/hotspot/share/oops/annotations.cpp index ebe1477dd16..8b2b236960d 100644 --- a/src/hotspot/share/oops/annotations.cpp +++ b/src/hotspot/share/oops/annotations.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ Annotations* Annotations::allocate(ClassLoaderData* loader_data, TRAPS) { // helper void Annotations::free_contents(ClassLoaderData* loader_data, Array* p) { - if (p != NULL) { + if (p != nullptr) { for (int i = 0; i < p->length(); i++) { MetadataFactory::free_array(loader_data, p->at(i)); } @@ -49,12 +49,12 @@ void Annotations::free_contents(ClassLoaderData* loader_data, Array(loader_data, class_annotations()); } free_contents(loader_data, fields_annotations()); - if (class_type_annotations() != NULL) { + if (class_type_annotations() != nullptr) { MetadataFactory::free_array(loader_data, class_type_annotations()); } free_contents(loader_data, fields_type_annotations()); @@ -64,7 +64,7 @@ void Annotations::deallocate_contents(ClassLoaderData* loader_data) { // The alternative to creating this array and adding to Java heap pressure // is to have a hashtable of the already created typeArrayOops typeArrayOop Annotations::make_java_array(AnnotationArray* annotations, TRAPS) { - if (annotations != NULL) { + if (annotations != nullptr) { int length = annotations->length(); typeArrayOop copy = oopFactory::new_byteArray(length, CHECK_NULL); for (int i = 0; i< length; i++) { @@ -72,7 +72,7 @@ typeArrayOop Annotations::make_java_array(AnnotationArray* annotations, TRAPS) { } return copy; } else { - return NULL; + return nullptr; } } @@ -81,7 +81,7 @@ void Annotations::metaspace_pointers_do(MetaspaceClosure* it) { it->push(&_class_annotations); it->push(&_fields_annotations); it->push(&_class_type_annotations); - it->push(&_fields_type_annotations); // FIXME: need a test case where _fields_type_annotations != NULL + it->push(&_fields_type_annotations); // FIXME: need a test case where _fields_type_annotations != nullptr } void Annotations::print_value_on(outputStream* st) const { diff --git a/src/hotspot/share/oops/annotations.hpp b/src/hotspot/share/oops/annotations.hpp index e650b5f55c1..c7919ff0ff9 100644 --- a/src/hotspot/share/oops/annotations.hpp +++ b/src/hotspot/share/oops/annotations.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,10 +70,10 @@ class Annotations: public MetaspaceObj { static bool is_read_only_by_default() { return true; } // Constructor to initialize to null - Annotations() : _class_annotations(NULL), - _fields_annotations(NULL), - _class_type_annotations(NULL), - _fields_type_annotations(NULL) {} + Annotations() : _class_annotations(nullptr), + _fields_annotations(nullptr), + _class_type_annotations(nullptr), + _fields_type_annotations(nullptr) {} AnnotationArray* class_annotations() const { return _class_annotations; } Array* fields_annotations() const { return _fields_annotations; } diff --git a/src/hotspot/share/oops/arrayKlass.cpp b/src/hotspot/share/oops/arrayKlass.cpp index 9d7cd13177e..1243786aff2 100644 --- a/src/hotspot/share/oops/arrayKlass.cpp +++ b/src/hotspot/share/oops/arrayKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,7 +52,7 @@ int ArrayKlass::static_size(int header_size) { InstanceKlass* ArrayKlass::java_super() const { - if (super() == NULL) return NULL; // bootstrap case + if (super() == nullptr) return nullptr; // bootstrap case // Array klasses have primary supertypes which are not reported to Java. // Example super chain: String[][] -> Object[][] -> Object[] -> Object return vmClasses::Object_klass(); @@ -61,7 +61,7 @@ InstanceKlass* ArrayKlass::java_super() const { oop ArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) { ShouldNotReachHere(); - return NULL; + return nullptr; } // find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined @@ -86,13 +86,13 @@ Method* ArrayKlass::uncached_lookup_method(const Symbol* name, ArrayKlass::ArrayKlass(Symbol* name, KlassKind kind) : Klass(kind), _dimension(1), - _higher_dimension(NULL), - _lower_dimension(NULL) { + _higher_dimension(nullptr), + _lower_dimension(nullptr) { // Arrays don't add any new methods, so their vtable is the same size as // the vtable of klass Object. set_vtable_length(Universe::base_vtable_size()); set_name(name); - set_super(Universe::is_bootstrapping() ? NULL : vmClasses::Object_klass()); + set_super(Universe::is_bootstrapping() ? nullptr : vmClasses::Object_klass()); set_layout_helper(Klass::_lh_neutral_value); set_is_cloneable(); // All arrays are considered to be cloneable (See JLS 20.1.5) JFR_ONLY(INIT_ID(this);) @@ -102,15 +102,15 @@ ArrayKlass::ArrayKlass(Symbol* name, KlassKind kind) : // Initialization of vtables and mirror object is done separately from base_create_array_klass, // since a GC can happen. At this point all instance variables of the ArrayKlass must be setup. void ArrayKlass::complete_create_array_klass(ArrayKlass* k, Klass* super_klass, ModuleEntry* module_entry, TRAPS) { - k->initialize_supers(super_klass, NULL, CHECK); + k->initialize_supers(super_klass, nullptr, CHECK); k->vtable().initialize_vtable(); // During bootstrapping, before java.base is defined, the module_entry may not be present yet. // These classes will be put on a fixup list and their module fields will be patched once // java.base is defined. - assert((module_entry != NULL) || ((module_entry == NULL) && !ModuleEntryTable::javabase_defined()), + assert((module_entry != nullptr) || ((module_entry == nullptr) && !ModuleEntryTable::javabase_defined()), "module entry not available post " JAVA_BASE_NAME " definition"); - oop module = (module_entry != NULL) ? module_entry->module() : (oop)NULL; + oop module = (module_entry != nullptr) ? module_entry->module() : (oop)nullptr; java_lang_Class::create_mirror(k, Handle(THREAD, k->class_loader()), Handle(THREAD, module), Handle(), Handle(), CHECK); } @@ -118,10 +118,10 @@ GrowableArray* ArrayKlass::compute_secondary_supers(int num_extra_slots, Array* transitive_interfaces) { // interfaces = { cloneable_klass, serializable_klass }; assert(num_extra_slots == 0, "sanity of primitive array type"); - assert(transitive_interfaces == NULL, "sanity"); + assert(transitive_interfaces == nullptr, "sanity"); // Must share this for correct bootstrapping! set_secondary_supers(Universe::the_array_interfaces_array()); - return NULL; + return nullptr; } objArrayOop ArrayKlass::allocate_arrayArray(int n, int length, TRAPS) { @@ -131,7 +131,7 @@ objArrayOop ArrayKlass::allocate_arrayArray(int n, int length, TRAPS) { ArrayKlass* ak = ArrayKlass::cast(k); objArrayOop o = (objArrayOop)Universe::heap()->array_allocate(ak, size, length, /* do_zero */ true, CHECK_NULL); - // initialization to NULL not necessary, area already cleared + // initialization to null not necessary, area already cleared return o; } @@ -159,7 +159,7 @@ void ArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) { #if INCLUDE_CDS void ArrayKlass::remove_unshareable_info() { Klass::remove_unshareable_info(); - if (_higher_dimension != NULL) { + if (_higher_dimension != nullptr) { ArrayKlass *ak = ArrayKlass::cast(higher_dimension()); ak->remove_unshareable_info(); } @@ -167,7 +167,7 @@ void ArrayKlass::remove_unshareable_info() { void ArrayKlass::remove_java_mirror() { Klass::remove_java_mirror(); - if (_higher_dimension != NULL) { + if (_higher_dimension != nullptr) { ArrayKlass *ak = ArrayKlass::cast(higher_dimension()); ak->remove_java_mirror(); } @@ -178,7 +178,7 @@ void ArrayKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle p Klass::restore_unshareable_info(loader_data, protection_domain, CHECK); // Klass recreates the component mirror also - if (_higher_dimension != NULL) { + if (_higher_dimension != nullptr) { ArrayKlass *ak = ArrayKlass::cast(higher_dimension()); ak->restore_unshareable_info(loader_data, protection_domain, CHECK); } diff --git a/src/hotspot/share/oops/arrayOop.hpp b/src/hotspot/share/oops/arrayOop.hpp index 100497f52cb..0d265ea70ad 100644 --- a/src/hotspot/share/oops/arrayOop.hpp +++ b/src/hotspot/share/oops/arrayOop.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,12 +96,12 @@ class arrayOopDesc : public oopDesc { template static T* obj_offset_to_raw(arrayOop obj, size_t offset_in_bytes, T* raw) { - if (obj != NULL) { - assert(raw == NULL, "either raw or in-heap"); + if (obj != nullptr) { + assert(raw == nullptr, "either raw or in-heap"); char* base = reinterpret_cast((void*) obj); raw = reinterpret_cast(base + offset_in_bytes); } else { - assert(raw != NULL, "either raw or in-heap"); + assert(raw != nullptr, "either raw or in-heap"); } return raw; } diff --git a/src/hotspot/share/oops/compressedOops.cpp b/src/hotspot/share/oops/compressedOops.cpp index a488df00b0e..84ab807e4fb 100644 --- a/src/hotspot/share/oops/compressedOops.cpp +++ b/src/hotspot/share/oops/compressedOops.cpp @@ -35,7 +35,7 @@ #include "runtime/globals.hpp" // For UseCompressedOops. -NarrowPtrStruct CompressedOops::_narrow_oop = { NULL, 0, true }; +NarrowPtrStruct CompressedOops::_narrow_oop = { nullptr, 0, true }; MemRegion CompressedOops::_heap_address_range; // Choose the heap base address and oop encoding mode @@ -80,7 +80,7 @@ void CompressedOops::initialize(const ReservedHeapSpace& heap_space) { // base() is one page below the heap. assert((intptr_t)base() <= ((intptr_t)_heap_address_range.start() - (intptr_t)os::vm_page_size()) || - base() == NULL, "invalid value"); + base() == nullptr, "invalid value"); assert(shift() == LogMinObjAlignmentInBytes || shift() == 0, "invalid value"); #endif @@ -148,14 +148,14 @@ bool CompressedOops::is_disjoint_heap_base_address(address addr) { // Check for disjoint base compressed oops. bool CompressedOops::base_disjoint() { - return _narrow_oop._base != NULL && is_disjoint_heap_base_address(_narrow_oop._base); + return _narrow_oop._base != nullptr && is_disjoint_heap_base_address(_narrow_oop._base); } // Check for real heapbased compressed oops. // We must subtract the base as the bits overlap. // If we negate above function, we also get unscaled and zerobased. bool CompressedOops::base_overlaps() { - return _narrow_oop._base != NULL && !is_disjoint_heap_base_address(_narrow_oop._base); + return _narrow_oop._base != nullptr && !is_disjoint_heap_base_address(_narrow_oop._base); } void CompressedOops::print_mode(outputStream* st) { @@ -179,7 +179,7 @@ void CompressedOops::print_mode(outputStream* st) { } // For UseCompressedClassPointers. -NarrowPtrStruct CompressedKlassPointers::_narrow_klass = { NULL, 0, true }; +NarrowPtrStruct CompressedKlassPointers::_narrow_klass = { nullptr, 0, true }; // CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump. // (Todo: we should #ifdef out CompressedKlassPointers for 32bit completely and fix all call sites which diff --git a/src/hotspot/share/oops/compressedOops.hpp b/src/hotspot/share/oops/compressedOops.hpp index 97178b01e23..d004c272fbb 100644 --- a/src/hotspot/share/oops/compressedOops.hpp +++ b/src/hotspot/share/oops/compressedOops.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,7 +36,7 @@ class ReservedHeapSpace; struct NarrowPtrStruct { // Base address for oop-within-java-object materialization. - // NULL if using wide oops or zero based narrow oops. + // null if using wide oops or zero based narrow oops. address _base; // Number of shift bits for encoding/decoding narrow ptrs. // 0 if using wide ptrs or zero based unscaled narrow ptrs, @@ -116,7 +116,7 @@ public: static void print_mode(outputStream* st); - static bool is_null(oop v) { return v == NULL; } + static bool is_null(oop v) { return v == nullptr; } static bool is_null(narrowOop v) { return v == narrowOop::null; } static inline oop decode_raw_not_null(narrowOop v); @@ -179,7 +179,7 @@ public: static size_t range() { return _range; } static int shift() { return _narrow_klass._shift; } - static bool is_null(Klass* v) { return v == NULL; } + static bool is_null(Klass* v) { return v == nullptr; } static bool is_null(narrowKlass v) { return v == 0; } static inline Klass* decode_raw(narrowKlass v, address base); diff --git a/src/hotspot/share/oops/compressedOops.inline.hpp b/src/hotspot/share/oops/compressedOops.inline.hpp index b01a2c3f71f..3f33926aa6d 100644 --- a/src/hotspot/share/oops/compressedOops.inline.hpp +++ b/src/hotspot/share/oops/compressedOops.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ inline oop CompressedOops::decode_not_null(narrowOop v) { } inline oop CompressedOops::decode(narrowOop v) { - return is_null(v) ? (oop)NULL : decode_not_null(v); + return is_null(v) ? nullptr : decode_not_null(v); } inline narrowOop CompressedOops::encode_not_null(oop v) { @@ -142,7 +142,7 @@ inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v, address na } inline Klass* CompressedKlassPointers::decode(narrowKlass v) { - return is_null(v) ? (Klass*)NULL : decode_not_null(v); + return is_null(v) ? nullptr : decode_not_null(v); } inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v) { diff --git a/src/hotspot/share/oops/constMethod.cpp b/src/hotspot/share/oops/constMethod.cpp index f0a35062c2d..456cc110b50 100644 --- a/src/hotspot/share/oops/constMethod.cpp +++ b/src/hotspot/share/oops/constMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,8 +53,8 @@ ConstMethod::ConstMethod(int byte_code_size, NoSafepointVerifier no_safepoint; init_fingerprint(); - set_constants(NULL); - set_stackmap_data(NULL); + set_constants(nullptr); + set_stackmap_data(nullptr); set_code_size(byte_code_size); set_constMethod_size(size); set_inlined_tables_length(sizes); // sets _flags @@ -62,7 +62,7 @@ ConstMethod::ConstMethod(int byte_code_size, assert(this->size() == size, "wrong size for object"); set_name_index(0); set_signature_index(0); - set_constants(NULL); + set_constants(nullptr); set_max_stack(0); set_max_locals(0); set_method_idnum(0); @@ -79,10 +79,10 @@ void ConstMethod::copy_stackmap_data(ClassLoaderData* loader_data, // Deallocate metadata fields associated with ConstMethod* void ConstMethod::deallocate_contents(ClassLoaderData* loader_data) { - if (stackmap_data() != NULL) { + if (stackmap_data() != nullptr) { MetadataFactory::free_array(loader_data, stackmap_data()); } - set_stackmap_data(NULL); + set_stackmap_data(nullptr); // deallocate annotation arrays if (has_method_annotations()) @@ -432,7 +432,7 @@ void ConstMethod::print_on(outputStream* st) const { st->print_cr("%s", internal_name()); Method* m = method(); st->print(" - method: " PTR_FORMAT " ", p2i(m)); - if (m != NULL) { + if (m != nullptr) { m->print_value_on(st); } st->cr(); @@ -448,10 +448,10 @@ void ConstMethod::print_on(outputStream* st) const { void ConstMethod::print_value_on(outputStream* st) const { st->print(" const part of method " ); Method* m = method(); - if (m != NULL) { + if (m != nullptr) { m->print_value_on(st); } else { - st->print("NULL"); + st->print("null"); } } @@ -460,7 +460,7 @@ void ConstMethod::print_value_on(outputStream* st) const { void ConstMethod::verify_on(outputStream* st) { // Verification can occur during oop construction before the method or // other fields have been initialized. - guarantee(method() != NULL && method()->is_method(), "should be method"); + guarantee(method() != nullptr && method()->is_method(), "should be method"); address m_end = (address)((intptr_t) this + size()); address compressed_table_start = code_end(); diff --git a/src/hotspot/share/oops/constMethod.hpp b/src/hotspot/share/oops/constMethod.hpp index f7c22f42fe0..cfe9b518aa2 100644 --- a/src/hotspot/share/oops/constMethod.hpp +++ b/src/hotspot/share/oops/constMethod.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -276,7 +276,7 @@ public: Array* stackmap_data() const { return _stackmap_data; } void set_stackmap_data(Array* sd) { _stackmap_data = sd; } void copy_stackmap_data(ClassLoaderData* loader_data, u1* sd, int length, TRAPS); - bool has_stackmap_table() const { return _stackmap_data != NULL; } + bool has_stackmap_table() const { return _stackmap_data != nullptr; } void init_fingerprint() { const uint64_t initval = UCONST64(0x8000000000000000); @@ -398,7 +398,7 @@ public: AnnotationArray** method_annotations_addr() const; AnnotationArray* method_annotations() const { - return has_method_annotations() ? *(method_annotations_addr()) : NULL; + return has_method_annotations() ? *(method_annotations_addr()) : nullptr; } void set_method_annotations(AnnotationArray* anno) { *(method_annotations_addr()) = anno; @@ -406,7 +406,7 @@ public: AnnotationArray** parameter_annotations_addr() const; AnnotationArray* parameter_annotations() const { - return has_parameter_annotations() ? *(parameter_annotations_addr()) : NULL; + return has_parameter_annotations() ? *(parameter_annotations_addr()) : nullptr; } void set_parameter_annotations(AnnotationArray* anno) { *(parameter_annotations_addr()) = anno; @@ -414,7 +414,7 @@ public: AnnotationArray** type_annotations_addr() const; AnnotationArray* type_annotations() const { - return has_type_annotations() ? *(type_annotations_addr()) : NULL; + return has_type_annotations() ? *(type_annotations_addr()) : nullptr; } void set_type_annotations(AnnotationArray* anno) { *(type_annotations_addr()) = anno; @@ -422,7 +422,7 @@ public: AnnotationArray** default_annotations_addr() const; AnnotationArray* default_annotations() const { - return has_default_annotations() ? *(default_annotations_addr()) : NULL; + return has_default_annotations() ? *(default_annotations_addr()) : nullptr; } void set_default_annotations(AnnotationArray* anno) { *(default_annotations_addr()) = anno; diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp index e30415fe087..3adedc13ad4 100644 --- a/src/hotspot/share/oops/constantPool.cpp +++ b/src/hotspot/share/oops/constantPool.cpp @@ -89,7 +89,7 @@ void ConstantPool::copy_fields(const ConstantPool* orig) { // MetaspaceObj allocation invariant is calloc equivalent memory // simple verification of this here (JVM_CONSTANT_Invalid == 0 ) static bool tag_array_is_zero_initialized(Array* tags) { - assert(tags != NULL, "invariant"); + assert(tags != nullptr, "invariant"); const int length = tags->length(); for (int index = 0; index < length; ++index) { if (JVM_CONSTANT_Invalid != tags->at(index)) { @@ -105,31 +105,31 @@ ConstantPool::ConstantPool(Array* tags) : _tags(tags), _length(tags->length()) { - assert(_tags != NULL, "invariant"); + assert(_tags != nullptr, "invariant"); assert(tags->length() == _length, "invariant"); assert(tag_array_is_zero_initialized(tags), "invariant"); assert(0 == flags(), "invariant"); assert(0 == version(), "invariant"); - assert(NULL == _pool_holder, "invariant"); + assert(nullptr == _pool_holder, "invariant"); } void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) { - if (cache() != NULL) { + if (cache() != nullptr) { MetadataFactory::free_metadata(loader_data, cache()); - set_cache(NULL); + set_cache(nullptr); } MetadataFactory::free_array(loader_data, resolved_klasses()); - set_resolved_klasses(NULL); + set_resolved_klasses(nullptr); MetadataFactory::free_array(loader_data, operands()); - set_operands(NULL); + set_operands(nullptr); release_C_heap_structures(); // free tag array MetadataFactory::free_array(loader_data, tags()); - set_tags(NULL); + set_tags(nullptr); } void ConstantPool::release_C_heap_structures() { @@ -164,8 +164,8 @@ objArrayOop ConstantPool::resolved_references() const { // Called from outside constant pool resolution where a resolved_reference array // may not be present. objArrayOop ConstantPool::resolved_references_or_null() const { - if (_cache == NULL) { - return NULL; + if (_cache == nullptr) { + return nullptr; } else { return _cache->resolved_references(); } @@ -224,7 +224,7 @@ void ConstantPool::allocate_resolved_klasses(ClassLoaderData* loader_data, int n // This allows us to use 0xffff (ConstantPool::_temp_resolved_klass_index) to indicate // UnresolvedKlass entries that are temporarily created during class redefinition. assert(num_klasses < CPKlassSlot::_temp_resolved_klass_index, "sanity"); - assert(resolved_klasses() == NULL, "sanity"); + assert(resolved_klasses() == nullptr, "sanity"); Array* rk = MetadataFactory::new_array(loader_data, num_klasses, CHECK); set_resolved_klasses(rk); } @@ -255,22 +255,22 @@ void ConstantPool::initialize_unresolved_klasses(ClassLoaderData* loader_data, T // Hidden class support: void ConstantPool::klass_at_put(int class_index, Klass* k) { - assert(k != NULL, "must be valid klass"); + assert(k != nullptr, "must be valid klass"); CPKlassSlot kslot = klass_slot_at(class_index); int resolved_klass_index = kslot.resolved_klass_index(); Klass** adr = resolved_klasses()->adr_at(resolved_klass_index); Atomic::release_store(adr, k); // The interpreter assumes when the tag is stored, the klass is resolved - // and the Klass* non-NULL, so we need hardware store ordering here. + // and the Klass* non-null, so we need hardware store ordering here. release_tag_at_put(class_index, JVM_CONSTANT_Class); } #if INCLUDE_CDS_JAVA_HEAP // Returns the _resolved_reference array after removing unarchivable items from it. -// Returns nullptr if this class is not supported, or _resolved_reference doesn't exist. +// Returns null if this class is not supported, or _resolved_reference doesn't exist. objArrayOop ConstantPool::prepare_resolved_references_for_archiving() { - if (_cache == NULL) { + if (_cache == nullptr) { return nullptr; // nothing to do } @@ -285,12 +285,12 @@ objArrayOop ConstantPool::prepare_resolved_references_for_archiving() { objArrayOop rr = resolved_references(); if (rr != nullptr) { Array* ref_map = reference_map(); - int ref_map_len = ref_map == NULL ? 0 : ref_map->length(); + int ref_map_len = ref_map == nullptr ? 0 : ref_map->length(); int rr_len = rr->length(); for (int i = 0; i < rr_len; i++) { oop obj = rr->obj_at(i); rr->obj_at_put(i, nullptr); - if (obj != NULL && i < ref_map_len) { + if (obj != nullptr && i < ref_map_len) { int index = object_to_cp_index(i); if (tag_at(index).is_string()) { assert(java_lang_String::is_instance(obj), "must be"); @@ -307,7 +307,7 @@ objArrayOop ConstantPool::prepare_resolved_references_for_archiving() { void ConstantPool::add_dumped_interned_strings() { objArrayOop rr = resolved_references(); - if (rr != NULL) { + if (rr != nullptr) { int rr_len = rr->length(); for (int i = 0; i < rr_len; i++) { oop p = rr->obj_at(i); @@ -328,16 +328,16 @@ void ConstantPool::restore_unshareable_info(TRAPS) { assert(is_constantPool(), "ensure C++ vtable is restored"); assert(on_stack(), "should always be set for shared constant pools"); assert(is_shared(), "should always be set for shared constant pools"); - assert(_cache != NULL, "constant pool _cache should not be NULL"); + assert(_cache != nullptr, "constant pool _cache should not be null"); // Only create the new resolved references array if it hasn't been attempted before - if (resolved_references() != NULL) return; + if (resolved_references() != nullptr) return; if (vmClasses::Object_klass_loaded()) { ClassLoaderData* loader_data = pool_holder()->class_loader_data(); #if INCLUDE_CDS_JAVA_HEAP if (ArchiveHeapLoader::is_fully_available() && - _cache->archived_references() != NULL) { + _cache->archived_references() != nullptr) { oop archived = _cache->archived_references(); // Create handle for the archived resolved reference array object Handle refs_handle(THREAD, archived); @@ -374,7 +374,7 @@ void ConstantPool::remove_unshareable_info() { // re-creating the resolved reference array if archived heap data cannot be map // at runtime. set_resolved_reference_length( - resolved_references() != NULL ? resolved_references()->length() : 0); + resolved_references() != nullptr ? resolved_references()->length() : 0); set_resolved_references(OopHandle()); bool archived = false; @@ -399,8 +399,8 @@ void ConstantPool::remove_unshareable_info() { } } - if (cache() != NULL) { - // cache() is NULL if this class is not yet linked. + if (cache() != nullptr) { + // cache() is null if this class is not yet linked. cache()->remove_unshareable_info(); } } @@ -419,10 +419,10 @@ bool ConstantPool::maybe_archive_resolved_klass_at(int cp_index) { CPKlassSlot kslot = klass_slot_at(cp_index); int resolved_klass_index = kslot.resolved_klass_index(); Klass* k = resolved_klasses()->at(resolved_klass_index); - // k could be NULL if the referenced class has been excluded via + // k could be null if the referenced class has been excluded via // SystemDictionaryShared::is_excluded_class(). - if (k != NULL) { + if (k != nullptr) { ConstantPool* src_cp = ArchiveBuilder::current()->get_source_addr(this); if (ClassPrelinker::can_archive_resolved_klass(src_cp, cp_index)) { if (log_is_enabled(Debug, cds, resolve)) { @@ -436,7 +436,7 @@ bool ConstantPool::maybe_archive_resolved_klass_at(int cp_index) { // This referenced class cannot be archived. Revert the tag to UnresolvedClass, // so that the proper class loading and initialization can happen at runtime. - resolved_klasses()->at_put(resolved_klass_index, NULL); + resolved_klasses()->at_put(resolved_klass_index, nullptr); tag_at_put(cp_index, JVM_CONSTANT_UnresolvedClass); return false; } @@ -457,21 +457,21 @@ void ConstantPool::string_at_put(int which, int obj_index, oop str) { void ConstantPool::trace_class_resolution(const constantPoolHandle& this_cp, Klass* k) { ResourceMark rm; int line_number = -1; - const char * source_file = NULL; + const char * source_file = nullptr; if (JavaThread::current()->has_last_Java_frame()) { // try to identify the method which called this function. vframeStream vfst(JavaThread::current()); if (!vfst.at_end()) { line_number = vfst.method()->line_number_from_bci(vfst.bci()); Symbol* s = vfst.method()->method_holder()->source_file_name(); - if (s != NULL) { + if (s != nullptr) { source_file = s->as_C_string(); } } } if (k != this_cp->pool_holder()) { // only print something if the classes are different - if (source_file != NULL) { + if (source_file != nullptr) { log_debug(class, resolve)("%s %s %s:%d", this_cp->pool_holder()->external_name(), k->external_name(), source_file, line_number); @@ -499,7 +499,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which, // the unresolved_klasses() array. if (this_cp->tag_at(which).is_klass()) { Klass* klass = this_cp->resolved_klasses()->at(resolved_klass_index); - if (klass != NULL) { + if (klass != nullptr) { return klass; } } @@ -545,7 +545,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which, // some other thread has beaten us and has resolved the class. // To preserve old behavior, we return the resolved class. Klass* klass = this_cp->resolved_klasses()->at(resolved_klass_index); - assert(klass != NULL, "must be resolved if exception was cleared"); + assert(klass != nullptr, "must be resolved if exception was cleared"); return klass; } @@ -557,7 +557,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which, Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index); Atomic::release_store(adr, k); // The interpreter assumes when the tag is stored, the klass is resolved - // and the Klass* stored in _resolved_klasses is non-NULL, so we need + // and the Klass* stored in _resolved_klasses is non-null, so we need // hardware store ordering here. // We also need to CAS to not overwrite an error from a racing thread. @@ -568,7 +568,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which, // We need to recheck exceptions from racing thread and return the same. if (old_tag == JVM_CONSTANT_UnresolvedClassInError) { // Remove klass. - this_cp->resolved_klasses()->at_put(resolved_klass_index, NULL); + this_cp->resolved_klasses()->at_put(resolved_klass_index, nullptr); throw_resolution_error(this_cp, which, CHECK_NULL); } @@ -578,7 +578,7 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int which, // Does not update ConstantPool* - to avoid any exception throwing. Used // by compiler and exception handling. Also used to avoid classloads for -// instanceof operations. Returns NULL if the class has not been loaded or +// instanceof operations. Returns null if the class has not been loaded or // if the verification of constant pool failed Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int which) { CPKlassSlot kslot = this_cp->klass_slot_at(which); @@ -588,10 +588,10 @@ Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int w if (this_cp->tag_at(which).is_klass()) { Klass* k = this_cp->resolved_klasses()->at(resolved_klass_index); - assert(k != NULL, "should be resolved"); + assert(k != nullptr, "should be resolved"); return k; } else if (this_cp->tag_at(which).is_unresolved_klass_in_error()) { - return NULL; + return nullptr; } else { Thread* current = Thread::current(); Symbol* name = this_cp->symbol_at(name_index); @@ -602,15 +602,15 @@ Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int w Klass* k = SystemDictionary::find_instance_klass(current, name, h_loader, h_prot); // Avoid constant pool verification at a safepoint, as it takes the Module_lock. - if (k != NULL && current->is_Java_thread()) { + if (k != nullptr && current->is_Java_thread()) { // Make sure that resolving is legal JavaThread* THREAD = JavaThread::cast(current); // For exception macros. ExceptionMark em(THREAD); - // return NULL if verification fails + // return null if verification fails verify_constant_pool_resolve(this_cp, k, THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; - return NULL; + return nullptr; } return k; } else { @@ -621,12 +621,12 @@ Klass* ConstantPool::klass_at_if_loaded(const constantPoolHandle& this_cp, int w Method* ConstantPool::method_at_if_loaded(const constantPoolHandle& cpool, int which) { - if (cpool->cache() == NULL) return NULL; // nothing to load yet + if (cpool->cache() == nullptr) return nullptr; // nothing to load yet int cache_index = decode_cpcache_index(which, true); if (!(cache_index >= 0 && cache_index < cpool->cache()->length())) { // FIXME: should be an assert log_debug(class, resolve)("bad operand %d in:", which); cpool->print(); - return NULL; + return nullptr; } ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); return e->method_if_resolved(cpool); @@ -634,14 +634,14 @@ Method* ConstantPool::method_at_if_loaded(const constantPoolHandle& cpool, bool ConstantPool::has_appendix_at_if_loaded(const constantPoolHandle& cpool, int which) { - if (cpool->cache() == NULL) return false; // nothing to load yet + if (cpool->cache() == nullptr) return false; // nothing to load yet int cache_index = decode_cpcache_index(which, true); ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); return e->has_appendix(); } oop ConstantPool::appendix_at_if_loaded(const constantPoolHandle& cpool, int which) { - if (cpool->cache() == NULL) return NULL; // nothing to load yet + if (cpool->cache() == nullptr) return nullptr; // nothing to load yet int cache_index = decode_cpcache_index(which, true); ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); return e->appendix_if_resolved(cpool); @@ -649,7 +649,7 @@ oop ConstantPool::appendix_at_if_loaded(const constantPoolHandle& cpool, int whi bool ConstantPool::has_local_signature_at_if_loaded(const constantPoolHandle& cpool, int which) { - if (cpool->cache() == NULL) return false; // nothing to load yet + if (cpool->cache() == nullptr) return false; // nothing to load yet int cache_index = decode_cpcache_index(which, true); ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index); return e->has_local_signature(); @@ -668,7 +668,7 @@ Symbol* ConstantPool::impl_signature_ref_at(int which, bool uncached) { int ConstantPool::impl_name_and_type_ref_index_at(int which, bool uncached) { int i = which; - if (!uncached && cache() != NULL) { + if (!uncached && cache() != nullptr) { if (ConstantPool::is_invokedynamic_index(which)) { // Invokedynamic index is index into the constant pool cache int pool_index = invokedynamic_bootstrap_ref_index_at(which); @@ -693,7 +693,7 @@ int ConstantPool::impl_name_and_type_ref_index_at(int which, bool uncached) { constantTag ConstantPool::impl_tag_ref_at(int which, bool uncached) { int pool_index = which; - if (!uncached && cache() != NULL) { + if (!uncached && cache() != nullptr) { if (ConstantPool::is_invokedynamic_index(which)) { // Invokedynamic index is index into resolved_references pool_index = invokedynamic_bootstrap_ref_index_at(which); @@ -709,7 +709,7 @@ int ConstantPool::impl_klass_ref_index_at(int which, bool uncached) { guarantee(!ConstantPool::is_invokedynamic_index(which), "an invokedynamic instruction does not have a klass"); int i = which; - if (!uncached && cache() != NULL) { + if (!uncached && cache() != nullptr) { // change byte-ordering and go via cache i = remap_instruction_operand_from_cache(which); } @@ -788,7 +788,7 @@ void ConstantPool::resolve_string_constants_impl(const constantPoolHandle& this_ static Symbol* exception_message(const constantPoolHandle& this_cp, int which, constantTag tag, oop pending_exception) { // Dig out the detailed message to reuse if possible Symbol* message = java_lang_Throwable::detail_message(pending_exception); - if (message != NULL) { + if (message != nullptr) { return message; } @@ -824,9 +824,9 @@ static void add_resolution_error(const constantPoolHandle& this_cp, int which, oop cause = java_lang_Throwable::cause(pending_exception); // Also dig out the exception cause, if present. - Symbol* cause_sym = NULL; - Symbol* cause_msg = NULL; - if (cause != NULL && cause != pending_exception) { + Symbol* cause_sym = nullptr; + Symbol* cause_msg = nullptr; + if (cause != nullptr && cause != pending_exception) { cause_sym = cause->klass()->name(); cause_msg = java_lang_Throwable::detail_message(cause); } @@ -838,24 +838,24 @@ static void add_resolution_error(const constantPoolHandle& this_cp, int which, void ConstantPool::throw_resolution_error(const constantPoolHandle& this_cp, int which, TRAPS) { ResourceMark rm(THREAD); - Symbol* message = NULL; - Symbol* cause = NULL; - Symbol* cause_msg = NULL; + Symbol* message = nullptr; + Symbol* cause = nullptr; + Symbol* cause_msg = nullptr; Symbol* error = SystemDictionary::find_resolution_error(this_cp, which, &message, &cause, &cause_msg); - assert(error != NULL, "checking"); - const char* cause_str = cause_msg != NULL ? cause_msg->as_C_string() : NULL; + assert(error != nullptr, "checking"); + const char* cause_str = cause_msg != nullptr ? cause_msg->as_C_string() : nullptr; CLEAR_PENDING_EXCEPTION; - if (message != NULL) { + if (message != nullptr) { char* msg = message->as_C_string(); - if (cause != NULL) { + if (cause != nullptr) { Handle h_cause = Exceptions::new_exception(THREAD, cause, cause_str); THROW_MSG_CAUSE(error, msg, h_cause); } else { THROW_MSG(error, msg); } } else { - if (cause != NULL) { + if (cause != nullptr) { Handle h_cause = Exceptions::new_exception(THREAD, cause, cause_str); THROW_CAUSE(error, h_cause); } else { @@ -926,7 +926,7 @@ BasicType ConstantPool::basic_type_for_constant_at(int which) { oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, int index, int cache_index, bool* status_return, TRAPS) { - oop result_oop = NULL; + oop result_oop = nullptr; Handle throw_exception; if (cache_index == _possible_index_sentinel) { @@ -942,13 +942,13 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, if (cache_index >= 0) { result_oop = this_cp->resolved_reference_at(cache_index); - if (result_oop != NULL) { + if (result_oop != nullptr) { if (result_oop == Universe::the_null_sentinel()) { DEBUG_ONLY(int temp_index = (index >= 0 ? index : this_cp->object_to_cp_index(cache_index))); assert(this_cp->tag_at(temp_index).is_dynamic_constant(), "only condy uses the null sentinel"); - result_oop = NULL; + result_oop = nullptr; } - if (status_return != NULL) (*status_return) = true; + if (status_return != nullptr) (*status_return) = true; return result_oop; // That was easy... } @@ -959,16 +959,16 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, constantTag tag = this_cp->tag_at(index); - if (status_return != NULL) { + if (status_return != nullptr) { // don't trigger resolution if the constant might need it switch (tag.value()) { case JVM_CONSTANT_Class: { CPKlassSlot kslot = this_cp->klass_slot_at(index); int resolved_klass_index = kslot.resolved_klass_index(); - if (this_cp->resolved_klasses()->at(resolved_klass_index) == NULL) { + if (this_cp->resolved_klasses()->at(resolved_klass_index) == nullptr) { (*status_return) = false; - return NULL; + return nullptr; } // the klass is waiting in the CP; go get it break; @@ -982,7 +982,7 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, break; default: (*status_return) = false; - return NULL; + return nullptr; } // from now on there is either success or an OOME (*status_return) = true; @@ -1028,8 +1028,8 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, if (!is_reference_type(type)) { // Make sure the primitive value is properly boxed. // This is a JDK responsibility. - const char* fail = NULL; - if (result_oop == NULL) { + const char* fail = nullptr; + if (result_oop == nullptr) { fail = "null result instead of box"; } else if (!is_java_primitive(type)) { // FIXME: support value types via unboxing @@ -1037,7 +1037,7 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, } else if (!java_lang_boxing_object::is_instance(result_oop, type)) { fail = "primitive is not properly boxed"; } - if (fail != NULL) { + if (fail != nullptr) { // Since this exception is not a LinkageError, throw exception // but do not save a DynamicInError resolution result. // See section 5.4.3 of the VM spec. @@ -1163,7 +1163,7 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, // The important thing here is that all threads pick up the same result. // It doesn't matter which racing thread wins, as long as only one // result is used by all threads, and all future queries. - oop new_result = (result_oop == NULL ? Universe::the_null_sentinel() : result_oop); + oop new_result = (result_oop == nullptr ? Universe::the_null_sentinel() : result_oop); oop old_result = this_cp->set_resolved_reference_at(cache_index, new_result); if (old_result == nullptr) { return result_oop; // was installed @@ -1171,7 +1171,7 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, // Return the winning thread's result. This can be different than // the result here for MethodHandles. if (old_result == Universe::the_null_sentinel()) - old_result = NULL; + old_result = nullptr; return old_result; } } else { @@ -1182,7 +1182,7 @@ oop ConstantPool::resolve_constant_at_impl(const constantPoolHandle& this_cp, oop ConstantPool::uncached_string_at(int which, TRAPS) { Symbol* sym = unresolved_string_at(which); - oop str = StringTable::intern(sym, CHECK_(NULL)); + oop str = StringTable::intern(sym, CHECK_(nullptr)); assert(java_lang_String::is_instance(str), "must be string"); return str; } @@ -1228,9 +1228,9 @@ oop ConstantPool::string_at_impl(const constantPoolHandle& this_cp, int which, i // If the string has already been interned, this entry will be non-null oop str = this_cp->resolved_reference_at(obj_index); assert(str != Universe::the_null_sentinel(), ""); - if (str != NULL) return str; + if (str != nullptr) return str; Symbol* sym = this_cp->unresolved_string_at(which); - str = StringTable::intern(sym, CHECK_(NULL)); + str = StringTable::intern(sym, CHECK_(nullptr)); this_cp->string_at_put(which, obj_index, str); assert(java_lang_String::is_instance(str), "must be string"); return str; @@ -1480,7 +1480,7 @@ void ConstantPool::resize_operands(int delta_len, int delta_size, TRAPS) { (min_size - 2*min_len) * sizeof(u2)); // Explicitly deallocate old operands array. // Note, it is not needed for 7u backport. - if ( operands() != NULL) { // the safety check + if ( operands() != nullptr) { // the safety check MetadataFactory::free_array(loader_data, operands()); } set_operands(new_ops); @@ -2281,7 +2281,7 @@ void ConstantPool::print_on(outputStream* st) const { if (on_stack()) st->print(" on_stack"); st->cr(); } - if (pool_holder() != NULL) { + if (pool_holder() != nullptr) { st->print_cr(" - holder: " PTR_FORMAT, p2i(pool_holder())); } st->print_cr(" - cache: " PTR_FORMAT, p2i(cache())); @@ -2311,7 +2311,7 @@ void ConstantPool::print_entry_on(const int index, outputStream* st) { switch (tag_at(index).value()) { case JVM_CONSTANT_Class : { Klass* k = klass_at(index, CATCH); - guarantee(k != NULL, "need klass"); + guarantee(k != nullptr, "need klass"); k->print_value_on(st); st->print(" {" PTR_FORMAT "}", p2i(k)); } @@ -2408,15 +2408,15 @@ void ConstantPool::print_value_on(outputStream* st) const { assert(is_constantPool(), "must be constantPool"); st->print("constant pool [%d]", length()); if (has_preresolution()) st->print("/preresolution"); - if (operands() != NULL) st->print("/operands[%d]", operands()->length()); + if (operands() != nullptr) st->print("/operands[%d]", operands()->length()); print_address_on(st); - if (pool_holder() != NULL) { + if (pool_holder() != nullptr) { st->print(" for "); pool_holder()->print_value_on(st); bool extra = (pool_holder()->constants() != this); if (extra) st->print(" (extra)"); } - if (cache() != NULL) { + if (cache() != nullptr) { st->print(" cache=" PTR_FORMAT, p2i(cache())); } } @@ -2437,8 +2437,8 @@ void ConstantPool::verify_on(outputStream* st) { guarantee(entry->refcount() != 0, "should have nonzero reference count"); } } - if (pool_holder() != NULL) { - // Note: pool_holder() can be NULL in temporary constant pools + if (pool_holder() != nullptr) { + // Note: pool_holder() can be null in temporary constant pools // used during constant pool merging guarantee(pool_holder()->is_klass(), "should be klass"); } diff --git a/src/hotspot/share/oops/constantPool.hpp b/src/hotspot/share/oops/constantPool.hpp index 9f9c88f0734..a98786a600a 100644 --- a/src/hotspot/share/oops/constantPool.hpp +++ b/src/hotspot/share/oops/constantPool.hpp @@ -186,7 +186,7 @@ class ConstantPool : public Metadata { // generics support Symbol* generic_signature() const { return (_generic_signature_index == 0) ? - (Symbol*)NULL : symbol_at(_generic_signature_index); + nullptr : symbol_at(_generic_signature_index); } u2 generic_signature_index() const { return _generic_signature_index; } void set_generic_signature_index(u2 sig_index) { _generic_signature_index = sig_index; } @@ -194,7 +194,7 @@ class ConstantPool : public Metadata { // source file name Symbol* source_file_name() const { return (_source_file_name_index == 0) ? - (Symbol*)NULL : symbol_at(_source_file_name_index); + nullptr : symbol_at(_source_file_name_index); } u2 source_file_name_index() const { return _source_file_name_index; } void set_source_file_name_index(u2 sourcefile_index) { _source_file_name_index = sourcefile_index; } @@ -565,7 +565,7 @@ class ConstantPool : public Metadata { operands->at_put(n+1, extract_high_short_from_int(offset)); } static int operand_array_length(Array* operands) { - if (operands == NULL || operands->length() == 0) return 0; + if (operands == nullptr || operands->length() == 0) return 0; int second_part = operand_offset_at(operands, 0); return (second_part / 2); } @@ -716,17 +716,17 @@ class ConstantPool : public Metadata { // Resolve late bound constants. oop resolve_constant_at(int index, TRAPS) { constantPoolHandle h_this(THREAD, this); - return resolve_constant_at_impl(h_this, index, _no_index_sentinel, NULL, THREAD); + return resolve_constant_at_impl(h_this, index, _no_index_sentinel, nullptr, THREAD); } oop resolve_cached_constant_at(int cache_index, TRAPS) { constantPoolHandle h_this(THREAD, this); - return resolve_constant_at_impl(h_this, _no_index_sentinel, cache_index, NULL, THREAD); + return resolve_constant_at_impl(h_this, _no_index_sentinel, cache_index, nullptr, THREAD); } oop resolve_possibly_cached_constant_at(int pool_index, TRAPS) { constantPoolHandle h_this(THREAD, this); - return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, NULL, THREAD); + return resolve_constant_at_impl(h_this, pool_index, _possible_index_sentinel, nullptr, THREAD); } oop find_cached_constant_at(int pool_index, bool& found_it, TRAPS) { @@ -811,7 +811,7 @@ class ConstantPool : public Metadata { private: void set_resolved_references(OopHandle s) { _cache->set_resolved_references(s); } - Array* reference_map() const { return (_cache == NULL) ? NULL : _cache->reference_map(); } + Array* reference_map() const { return (_cache == nullptr) ? nullptr : _cache->reference_map(); } void set_reference_map(Array* o) { _cache->set_reference_map(o); } Symbol* impl_name_ref_at(int which, bool uncached); diff --git a/src/hotspot/share/oops/cpCache.cpp b/src/hotspot/share/oops/cpCache.cpp index 23fe7edf1fb..05b5efd4ecd 100644 --- a/src/hotspot/share/oops/cpCache.cpp +++ b/src/hotspot/share/oops/cpCache.cpp @@ -59,7 +59,7 @@ void ConstantPoolCacheEntry::initialize_entry(int index) { assert(0 < index && index < 0x10000, "sanity check"); _indices = index; - _f1 = NULL; + _f1 = nullptr; _f2 = _flags = 0; assert(constant_pool_index() == index, ""); } @@ -101,7 +101,7 @@ void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { // Sets f1, ordering with previous writes. void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) { - assert(f1 != NULL, ""); + assert(f1 != nullptr, ""); Atomic::release_store(&_f1, f1); } @@ -158,12 +158,12 @@ void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_co int vtable_index, bool sender_is_interface) { bool is_vtable_call = (vtable_index >= 0); // FIXME: split this method on this boolean - assert(method->interpreter_entry() != NULL, "should have been set at this point"); + assert(method->interpreter_entry() != nullptr, "should have been set at this point"); assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); int byte_no = -1; bool change_to_virtual = false; - InstanceKlass* holder = NULL; // have to declare this outside the switch + InstanceKlass* holder = nullptr; // have to declare this outside the switch switch (invoke_code) { case Bytecodes::_invokeinterface: holder = method->method_holder(); @@ -396,7 +396,7 @@ void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle& ( 1 << is_final_shift ), adapter->size_of_parameters()); - LogStream* log_stream = NULL; + LogStream* log_stream = nullptr; LogStreamHandle(Debug, methodhandles, indy) lsh_indy; if (lsh_indy.is_enabled()) { ResourceMark rm; @@ -443,7 +443,7 @@ void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle& set_bytecode_1(invoke_code); NOT_PRODUCT(verify(tty)); - if (log_stream != NULL) { + if (log_stream != nullptr) { this->print(log_stream, 0, cpool->cache()); } @@ -483,7 +483,7 @@ Method* ConstantPoolCacheEntry::method_if_resolved(const constantPoolHandle& cpo Bytecodes::Code invoke_code = bytecode_1(); if (invoke_code != (Bytecodes::Code)0) { Metadata* f1 = f1_ord(); - if (f1 != NULL) { + if (f1 != nullptr) { switch (invoke_code) { case Bytecodes::_invokeinterface: assert(f1->is_klass(), ""); @@ -521,13 +521,13 @@ Method* ConstantPoolCacheEntry::method_if_resolved(const constantPoolHandle& cpo break; } } - return NULL; + return nullptr; } oop ConstantPoolCacheEntry::appendix_if_resolved(const constantPoolHandle& cpool) const { if (!has_appendix()) - return NULL; + return nullptr; const int ref_index = f2_as_index(); return cpool->resolved_reference_at(ref_index); } @@ -563,7 +563,7 @@ void ConstantPoolCacheEntry::adjust_method_entry(Method* old_method, return; } - assert (_f1 != NULL, "should not call with uninteresting entry"); + assert (_f1 != nullptr, "should not call with uninteresting entry"); if (!(_f1->is_method())) { // _f1 is a Klass* for an interface, _f2 is the method @@ -581,7 +581,7 @@ void ConstantPoolCacheEntry::adjust_method_entry(Method* old_method, bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() { Method* m = get_interesting_method_entry(); // return false if m refers to a non-deleted old or obsolete method - if (m != NULL) { + if (m != nullptr) { assert(m->is_valid() && m->is_method(), "m is a valid method"); return !m->is_old() && !m->is_obsolete(); // old is always set for old and obsolete } else { @@ -592,15 +592,15 @@ bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() { Method* ConstantPoolCacheEntry::get_interesting_method_entry() { if (!is_method_entry()) { // not a method entry so not interesting by default - return NULL; + return nullptr; } - Method* m = NULL; + Method* m = nullptr; if (is_vfinal()) { // virtual and final so _f2 contains method ptr instead of vtable index m = f2_as_vfinal_method(); } else if (is_f1_null()) { - // NULL _f1 means this is a virtual entry so also not interesting - return NULL; + // null _f1 means this is a virtual entry so also not interesting + return nullptr; } else { if (!(_f1->is_method())) { // _f1 is a Klass* for an interface @@ -609,9 +609,9 @@ Method* ConstantPoolCacheEntry::get_interesting_method_entry() { m = f1_as_method(); } } - assert(m != NULL && m->is_method(), "sanity check"); - if (m == NULL || !m->is_method()) { - return NULL; + assert(m != nullptr && m->is_method(), "sanity check"); + if (m == nullptr || !m->is_method()) { + return nullptr; } return m; } @@ -729,14 +729,14 @@ void ConstantPoolCache::remove_unshareable_info() { // is the copy to be written into the archive. It's in the ArchiveBuilder's "buffer space". // However, this->_initial_entries was not copied/relocated by the ArchiveBuilder, so it's // still pointing to the array allocated inside save_for_archive(). - assert(_initial_entries != NULL, "archived cpcache must have been initialized"); + assert(_initial_entries != nullptr, "archived cpcache must have been initialized"); assert(!ArchiveBuilder::current()->is_in_buffer_space(_initial_entries), "must be"); for (int i=0; iat(i); } - _initial_entries = NULL; + _initial_entries = nullptr; } #endif // INCLUDE_CDS @@ -745,12 +745,12 @@ void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) { data->remove_handle(_resolved_references); set_resolved_references(OopHandle()); MetadataFactory::free_array(data, _reference_map); - set_reference_map(NULL); + set_reference_map(nullptr); #if INCLUDE_CDS - if (_initial_entries != NULL) { + if (_initial_entries != nullptr) { Arguments::assert_is_dumping_archive(); MetadataFactory::free_array(data, _initial_entries); - _initial_entries = NULL; + _initial_entries = nullptr; } #endif } @@ -758,7 +758,7 @@ void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) { #if INCLUDE_CDS_JAVA_HEAP oop ConstantPoolCache::archived_references() { if (_archived_references_index < 0) { - return NULL; + return nullptr; } return HeapShared::get_root(_archived_references_index); } @@ -784,7 +784,7 @@ void ConstantPoolCache::adjust_method_entries(bool * trace_name_printed) { for (int i = 0; i < length(); i++) { ConstantPoolCacheEntry* entry = entry_at(i); Method* old_method = entry->get_interesting_method_entry(); - if (old_method == NULL || !old_method->is_old()) { + if (old_method == nullptr || !old_method->is_old()) { continue; // skip uninteresting entries } if (old_method->is_deleted()) { @@ -802,7 +802,7 @@ bool ConstantPoolCache::check_no_old_or_obsolete_entries() { ResourceMark rm; for (int i = 1; i < length(); i++) { Method* m = entry_at(i)->get_interesting_method_entry(); - if (m != NULL && !entry_at(i)->check_no_old_or_obsolete_entries()) { + if (m != nullptr && !entry_at(i)->check_no_old_or_obsolete_entries()) { log_trace(redefine, class, update, constantpool) ("cpcache check found old method entry: class: %s, old: %d, obsolete: %d, method: %s", constant_pool()->pool_holder()->external_name(), m->is_old(), m->is_obsolete(), m->external_name()); @@ -814,7 +814,7 @@ bool ConstantPoolCache::check_no_old_or_obsolete_entries() { void ConstantPoolCache::dump_cache() { for (int i = 1; i < length(); i++) { - if (entry_at(i)->get_interesting_method_entry() != NULL) { + if (entry_at(i)->get_interesting_method_entry() != nullptr) { entry_at(i)->print(tty, i, this); } } diff --git a/src/hotspot/share/oops/cpCache.hpp b/src/hotspot/share/oops/cpCache.hpp index b4daaf1ee5b..3ba5cc17eaa 100644 --- a/src/hotspot/share/oops/cpCache.hpp +++ b/src/hotspot/share/oops/cpCache.hpp @@ -145,7 +145,7 @@ class ConstantPoolCacheEntry { void set_bytecode_2(Bytecodes::Code code); void set_f1(Metadata* f1) { Metadata* existing_f1 = _f1; // read once - assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change"); + assert(existing_f1 == nullptr || existing_f1 == f1, "illegal field change"); _f1 = f1; } void release_set_f1(Metadata* f1); @@ -226,7 +226,7 @@ class ConstantPoolCacheEntry { private: void set_direct_or_vtable_call( Bytecodes::Code invoke_code, // the bytecode used for invoking the method - const methodHandle& method, // the method/prototype if any (NULL, otherwise) + const methodHandle& method, // the method/prototype if any (null, otherwise) int vtable_index, // the vtable index if any, else negative bool sender_is_interface ); @@ -442,7 +442,7 @@ class ConstantPoolCache: public MetaspaceObj { void metaspace_pointers_do(MetaspaceClosure* it); MetaspaceObj::Type type() const { return ConstantPoolCacheType; } - oop archived_references() NOT_CDS_JAVA_HEAP_RETURN_(NULL); + oop archived_references() NOT_CDS_JAVA_HEAP_RETURN_(nullptr); void set_archived_references(int root_index) NOT_CDS_JAVA_HEAP_RETURN; void clear_archived_references() NOT_CDS_JAVA_HEAP_RETURN; diff --git a/src/hotspot/share/oops/cpCache.inline.hpp b/src/hotspot/share/oops/cpCache.inline.hpp index 259b99669e4..4befcb19498 100644 --- a/src/hotspot/share/oops/cpCache.inline.hpp +++ b/src/hotspot/share/oops/cpCache.inline.hpp @@ -57,16 +57,16 @@ inline Method* ConstantPoolCacheEntry::f2_as_interface_method() const { inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)Atomic::load_acquire(&_f1); } inline Method* ConstantPoolCacheEntry::f1_as_method() const { - Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); + Metadata* f1 = f1_ord(); assert(f1 == nullptr || f1->is_method(), ""); return (Method*)f1; } inline Klass* ConstantPoolCacheEntry::f1_as_klass() const { - Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), ""); + Metadata* f1 = f1_ord(); assert(f1 == nullptr || f1->is_klass(), ""); return (Klass*)f1; } -inline bool ConstantPoolCacheEntry::is_f1_null() const { Metadata* f1 = f1_ord(); return f1 == NULL; } +inline bool ConstantPoolCacheEntry::is_f1_null() const { Metadata* f1 = f1_ord(); return f1 == nullptr; } inline bool ConstantPoolCacheEntry::has_appendix() const { return (!is_f1_null()) && (_flags & (1 << has_appendix_shift)) != 0; @@ -89,7 +89,7 @@ inline ConstantPoolCache::ConstantPoolCache(int length, const intStack& invokedynamic_inverse_index_map, const intStack& invokedynamic_references_map) : _length(length), - _constant_pool(NULL), + _constant_pool(nullptr), _gc_epoch(0) { CDS_JAVA_HEAP_ONLY(_archived_references_index = -1;) initialize(inverse_index_map, invokedynamic_inverse_index_map, diff --git a/src/hotspot/share/oops/fieldStreams.hpp b/src/hotspot/share/oops/fieldStreams.hpp index d2387531f29..c075e11fab6 100644 --- a/src/hotspot/share/oops/fieldStreams.hpp +++ b/src/hotspot/share/oops/fieldStreams.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -126,7 +126,7 @@ class FieldStreamBase : public StackObj { int index = _fields->at(_generic_signature_slot); return _constants->symbol_at(index); } else { - return NULL; + return nullptr; } } diff --git a/src/hotspot/share/oops/generateOopMap.cpp b/src/hotspot/share/oops/generateOopMap.cpp index 70916b4aad9..cc8efdf8d1f 100644 --- a/src/hotspot/share/oops/generateOopMap.cpp +++ b/src/hotspot/share/oops/generateOopMap.cpp @@ -280,7 +280,7 @@ RetTableEntry* RetTable::find_jsrs_for_target(int targBci) { cur = cur->next(); } ShouldNotReachHere(); - return NULL; + return nullptr; } // The instruction at bci is changing size by "delta". Update the return map. @@ -418,7 +418,7 @@ void GenerateOopMap::mark_bbheaders_and_count_gc_points() { // First mark all exception handlers as start of a basic-block ExceptionTable excps(method()); for(int i = 0; i < excps.length(); i ++) { - bb_mark_fct(this, excps.handler_pc(i), NULL); + bb_mark_fct(this, excps.handler_pc(i), nullptr); } // Then iterate through the code @@ -429,19 +429,19 @@ void GenerateOopMap::mark_bbheaders_and_count_gc_points() { int bci = bcs.bci(); if (!fellThrough) - bb_mark_fct(this, bci, NULL); + bb_mark_fct(this, bci, nullptr); - fellThrough = jump_targets_do(&bcs, &GenerateOopMap::bb_mark_fct, NULL); + fellThrough = jump_targets_do(&bcs, &GenerateOopMap::bb_mark_fct, nullptr); /* We will also mark successors of jsr's as basic block headers. */ switch (bytecode) { case Bytecodes::_jsr: assert(!fellThrough, "should not happen"); - bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), NULL); + bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), nullptr); break; case Bytecodes::_jsr_w: assert(!fellThrough, "should not happen"); - bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), NULL); + bb_mark_fct(this, bci + Bytecodes::length_for(bytecode), nullptr); break; default: break; @@ -649,7 +649,7 @@ BasicBlock *GenerateOopMap::get_basic_block_containing(int bci) const { } fatal("should have found BB"); - return NULL; + return nullptr; } void GenerateOopMap::restore_state(BasicBlock *bb) @@ -672,11 +672,11 @@ int GenerateOopMap::next_bb_start_pc(BasicBlock *bb) { // // Allocate memory and throw LinkageError if failure. -#define ALLOC_RESOURCE_ARRAY(var, type, count) \ - var = NEW_RESOURCE_ARRAY_RETURN_NULL(type, count); \ - if (var == NULL) { \ +#define ALLOC_RESOURCE_ARRAY(var, type, count) \ + var = NEW_RESOURCE_ARRAY_RETURN_NULL(type, count); \ + if (var == nullptr) { \ report_error("Cannot reserve enough memory to analyze this method"); \ - return; \ + return; \ } @@ -801,7 +801,7 @@ void GenerateOopMap::copy_state(CellTypeState *dst, CellTypeState *src) { // monitor matching is purely informational and doesn't say anything // about the correctness of the code. void GenerateOopMap::merge_state_into_bb(BasicBlock *bb) { - guarantee(bb != NULL, "null basicblock"); + guarantee(bb != nullptr, "null basicblock"); assert(bb->is_alive(), "merging state into a dead basicblock"); if (_stack_top == bb->_stack_top) { @@ -1157,13 +1157,13 @@ void GenerateOopMap::interp_bb(BasicBlock *bb) { } interp1(&itr); - bool fall_through = jump_targets_do(&itr, GenerateOopMap::merge_state, NULL); + bool fall_through = jump_targets_do(&itr, GenerateOopMap::merge_state, nullptr); if (_got_error) return; if (itr.code() == Bytecodes::_ret) { assert(!fall_through, "cannot be set if ret instruction"); // Automatically handles 'wide' ret indices - ret_jump_targets_do(&itr, GenerateOopMap::merge_state, itr.get_index(), NULL); + ret_jump_targets_do(&itr, GenerateOopMap::merge_state, itr.get_index(), nullptr); } else if (fall_through) { // Hit end of BB, but the instr. was a fall-through instruction, // so perform transition as if the BB ended in a "jump". @@ -1226,7 +1226,7 @@ void GenerateOopMap::do_exception_edge(BytecodeStream* itr) { if (start_pc <= bci && bci < end_pc) { BasicBlock *excBB = get_basic_block_at(handler_pc); - guarantee(excBB != NULL, "no basic block for exception"); + guarantee(excBB != nullptr, "no basic block for exception"); CellTypeState *excStk = excBB->stack(); CellTypeState *cOpStck = stack(); CellTypeState cOpStck_0 = cOpStck[0]; @@ -1840,7 +1840,7 @@ void GenerateOopMap::do_monitorexit(int bci) { // possibility that this bytecode will throw an // exception. BasicBlock* bb = get_basic_block_containing(bci); - guarantee(bb != NULL, "no basic block for bci"); + guarantee(bb != nullptr, "no basic block for bci"); bb->set_changed(true); bb->_monitor_top = bad_monitors; @@ -2067,7 +2067,7 @@ GenerateOopMap::GenerateOopMap(const methodHandle& method) { // We have to initialize all variables here, that can be queried directly _method = method; _max_locals=0; - _init_vars = NULL; + _init_vars = nullptr; #ifndef PRODUCT // If we are doing a detailed trace, include the regular trace information. @@ -2088,7 +2088,7 @@ bool GenerateOopMap::compute_map(Thread* current) { } #endif TraceTime t_single("oopmap time", TimeOopMap2); - TraceTime t_all(NULL, &_total_oopmap_time, TimeOopMap); + TraceTime t_all(nullptr, &_total_oopmap_time, TimeOopMap); // Initialize values _got_error = false; @@ -2100,7 +2100,7 @@ bool GenerateOopMap::compute_map(Thread* current) { _init_vars = new GrowableArray(5); // There are seldom more than 5 init_vars _report_result = false; _report_result_for_send = false; - _new_var_map = NULL; + _new_var_map = nullptr; _ret_adr_tos = new GrowableArray(5); // 5 seems like a good number; _did_rewriting = false; _did_relocation = false; @@ -2218,7 +2218,7 @@ void GenerateOopMap::result_for_basicblock(int bci) { // Find basicblock and report results BasicBlock* bb = get_basic_block_containing(bci); - guarantee(bb != NULL, "no basic block for bci"); + guarantee(bb != nullptr, "no basic block for bci"); assert(bb->is_reachable(), "getting result from unreachable basicblock"); bb->set_changed(true); interp_bb(bb); @@ -2278,7 +2278,7 @@ void GenerateOopMap::rewrite_refval_conflicts() method()->print_codes(); } - assert(_new_var_map!=NULL, "nothing to rewrite"); + assert(_new_var_map!=nullptr, "nothing to rewrite"); assert(_conflict==true, "We should not be here"); compute_ret_adr_at_TOS(); @@ -2302,7 +2302,7 @@ void GenerateOopMap::rewrite_refval_conflicts() _max_locals += _nof_refval_conflicts; // That was that... - _new_var_map = NULL; + _new_var_map = nullptr; _nof_refval_conflicts = 0; } @@ -2502,7 +2502,7 @@ bool GenerateOopMap::stack_top_holds_ret_addr(int bci) { } void GenerateOopMap::compute_ret_adr_at_TOS() { - assert(_ret_adr_tos != NULL, "must be initialized"); + assert(_ret_adr_tos != nullptr, "must be initialized"); _ret_adr_tos->clear(); for (int i = 0; i < bb_count(); i++) { diff --git a/src/hotspot/share/oops/generateOopMap.hpp b/src/hotspot/share/oops/generateOopMap.hpp index 8ab15c92b84..229aeffad4c 100644 --- a/src/hotspot/share/oops/generateOopMap.hpp +++ b/src/hotspot/share/oops/generateOopMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,7 +77,7 @@ class RetTable { void add_jsr(int return_bci, int target_bci); // Adds entry to list public: - RetTable() { _first = NULL; } + RetTable() { _first = nullptr; } void compute_ret_table(const methodHandle& method); void update_ret_table(int bci, int delta); RetTableEntry* find_jsrs_for_target(int targBci); diff --git a/src/hotspot/share/oops/instanceClassLoaderKlass.inline.hpp b/src/hotspot/share/oops/instanceClassLoaderKlass.inline.hpp index 0c644da0103..22eb7281654 100644 --- a/src/hotspot/share/oops/instanceClassLoaderKlass.inline.hpp +++ b/src/hotspot/share/oops/instanceClassLoaderKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,7 @@ inline void InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* c if (Devirtualizer::do_metadata(closure)) { ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); // cld can be null if we have a non-registered class loader. - if (cld != NULL) { + if (cld != nullptr) { Devirtualizer::do_cld(closure, cld); } } @@ -64,7 +64,7 @@ inline void InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosur if (mr.contains(obj)) { ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); // cld can be null if we have a non-registered class loader. - if (cld != NULL) { + if (cld != nullptr) { Devirtualizer::do_cld(closure, cld); } } diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index a2d1e55e1f6..5a90bb0a0c3 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -111,10 +111,10 @@ #define HOTSPOT_CLASS_INITIALIZATION_end HOTSPOT_CLASS_INITIALIZATION_END #define DTRACE_CLASSINIT_PROBE(type, thread_type) \ { \ - char* data = NULL; \ + char* data = nullptr; \ int len = 0; \ Symbol* clss_name = name(); \ - if (clss_name != NULL) { \ + if (clss_name != nullptr) { \ data = (char*)clss_name->bytes(); \ len = clss_name->utf8_length(); \ } \ @@ -124,10 +124,10 @@ #define DTRACE_CLASSINIT_PROBE_WAIT(type, thread_type, wait) \ { \ - char* data = NULL; \ + char* data = nullptr; \ int len = 0; \ Symbol* clss_name = name(); \ - if (clss_name != NULL) { \ + if (clss_name != nullptr) { \ data = (char*)clss_name->bytes(); \ len = clss_name->utf8_length(); \ } \ @@ -146,7 +146,7 @@ bool InstanceKlass::_finalization_enabled = true; static inline bool is_class_loader(const Symbol* class_name, const ClassFileParser& parser) { - assert(class_name != NULL, "invariant"); + assert(class_name != nullptr, "invariant"); if (class_name == vmSymbols::java_lang_ClassLoader()) { return true; @@ -154,7 +154,7 @@ static inline bool is_class_loader(const Symbol* class_name, if (vmClasses::ClassLoader_klass_loaded()) { const Klass* const super_klass = parser.super_klass(); - if (super_klass != NULL) { + if (super_klass != nullptr) { if (super_klass->is_subtype_of(vmClasses::ClassLoader_klass())) { return true; } @@ -174,7 +174,7 @@ static inline bool is_stack_chunk_class(const Symbol* class_name, // same classloader. bool InstanceKlass::has_nest_member(JavaThread* current, InstanceKlass* k) const { assert(!is_hidden(), "unexpected hidden class"); - if (_nest_members == NULL || _nest_members == Universe::the_empty_short_array()) { + if (_nest_members == nullptr || _nest_members == Universe::the_empty_short_array()) { if (log_is_enabled(Trace, class, nestmates)) { ResourceMark rm(current); log_trace(class, nestmates)("Checked nest membership of %s in non-nest-host class %s", @@ -206,8 +206,8 @@ bool InstanceKlass::has_nest_member(JavaThread* current, InstanceKlass* k) const // Called to verify that k is a permitted subclass of this class bool InstanceKlass::has_as_permitted_subclass(const InstanceKlass* k) const { Thread* current = Thread::current(); - assert(k != NULL, "sanity check"); - assert(_permitted_subclasses != NULL && _permitted_subclasses != Universe::the_empty_short_array(), + assert(k != nullptr, "sanity check"); + assert(_permitted_subclasses != nullptr && _permitted_subclasses != Universe::the_empty_short_array(), "unexpected empty _permitted_subclasses array"); if (log_is_enabled(Trace, class, sealed)) { @@ -245,20 +245,20 @@ bool InstanceKlass::has_as_permitted_subclass(const InstanceKlass* k) const { // Return nest-host class, resolving, validating and saving it if needed. // In cases where this is called from a thread that cannot do classloading -// (such as a native JIT thread) then we simply return NULL, which in turn +// (such as a native JIT thread) then we simply return null, which in turn // causes the access check to return false. Such code will retry the access // from a more suitable environment later. Otherwise the _nest_host is always // set once this method returns. // Any errors from nest-host resolution must be preserved so they can be queried // from higher-level access checking code, and reported as part of access checking // exceptions. -// VirtualMachineErrors are propagated with a NULL return. -// Under any conditions where the _nest_host can be set to non-NULL the resulting +// VirtualMachineErrors are propagated with a null return. +// Under any conditions where the _nest_host can be set to non-null the resulting // value of it and, if applicable, the nest host resolution/validation error, // are idempotent. InstanceKlass* InstanceKlass::nest_host(TRAPS) { InstanceKlass* nest_host_k = _nest_host; - if (nest_host_k != NULL) { + if (nest_host_k != nullptr) { return nest_host_k; } @@ -271,7 +271,7 @@ InstanceKlass* InstanceKlass::nest_host(TRAPS) { if (!can_resolve && !_constants->tag_at(_nest_host_index).is_klass()) { log_trace(class, nestmates)("Rejected resolution of nest-host of %s in unsuitable thread", this->external_name()); - return NULL; // sentinel to say "try again from a different context" + return nullptr; // sentinel to say "try again from a different context" } log_trace(class, nestmates)("Resolving nest-host of %s using cp entry for %s", @@ -281,7 +281,7 @@ InstanceKlass* InstanceKlass::nest_host(TRAPS) { Klass* k = _constants->klass_at(_nest_host_index, THREAD); if (HAS_PENDING_EXCEPTION) { if (PENDING_EXCEPTION->is_a(vmClasses::VirtualMachineError_klass())) { - return NULL; // propagate VMEs + return nullptr; // propagate VMEs } stringStream ss; char* target_host_class = _constants->klass_name_at(_nest_host_index)->as_C_string(); @@ -298,7 +298,7 @@ InstanceKlass* InstanceKlass::nest_host(TRAPS) { // A valid nest-host is an instance class in the current package that lists this // class as a nest member. If any of these conditions are not met the class is // its own nest-host. - const char* error = NULL; + const char* error = nullptr; // JVMS 5.4.4 indicates package check comes first if (is_same_class_package(k)) { @@ -360,11 +360,11 @@ InstanceKlass* InstanceKlass::nest_host(TRAPS) { // assert some of those facts. void InstanceKlass::set_nest_host(InstanceKlass* host) { assert(is_hidden(), "must be a hidden class"); - assert(host != NULL, "NULL nest host specified"); - assert(_nest_host == NULL, "current class has resolved nest-host"); - assert(nest_host_error() == NULL, "unexpected nest host resolution error exists: %s", + assert(host != nullptr, "null nest host specified"); + assert(_nest_host == nullptr, "current class has resolved nest-host"); + assert(nest_host_error() == nullptr, "unexpected nest host resolution error exists: %s", nest_host_error()); - assert((host->_nest_host == NULL && host->_nest_host_index == 0) || + assert((host->_nest_host == nullptr && host->_nest_host_index == 0) || (host->_nest_host == host), "proposed host is not a valid nest-host"); // Can't assert this as package is not set yet: // assert(is_same_class_package(host), "proposed host is in wrong package"); @@ -375,7 +375,7 @@ void InstanceKlass::set_nest_host(InstanceKlass* host) { // a hidden class does not expect a statically defined nest-host if (_nest_host_index > 0) { msg = "(the NestHost attribute in the current class is ignored)"; - } else if (_nest_members != NULL && _nest_members != Universe::the_empty_short_array()) { + } else if (_nest_members != nullptr && _nest_members != Universe::the_empty_short_array()) { msg = "(the NestMembers attribute in the current class is ignored)"; } log_trace(class, nestmates)("Injected type %s into the nest of %s %s", @@ -387,7 +387,7 @@ void InstanceKlass::set_nest_host(InstanceKlass* host) { _nest_host = host; // Record dependency to keep nest host from being unloaded before this class. ClassLoaderData* this_key = class_loader_data(); - assert(this_key != NULL, "sanity"); + assert(this_key != nullptr, "sanity"); this_key->record_dependency(host); } @@ -403,12 +403,12 @@ bool InstanceKlass::has_nestmate_access_to(InstanceKlass* k, TRAPS) { // the target class k. InstanceKlass* cur_host = nest_host(CHECK_false); - if (cur_host == NULL) { + if (cur_host == nullptr) { return false; } Klass* k_nest_host = k->nest_host(CHECK_false); - if (k_nest_host == NULL) { + if (k_nest_host == nullptr) { return false; } @@ -424,7 +424,7 @@ bool InstanceKlass::has_nestmate_access_to(InstanceKlass* k, TRAPS) { const char* InstanceKlass::nest_host_error() { if (_nest_host_index == 0) { - return NULL; + return nullptr; } else { constantPoolHandle cph(Thread::current(), constants()); return SystemDictionary::find_nest_host_error(cph, (int)_nest_host_index); @@ -438,9 +438,9 @@ InstanceKlass* InstanceKlass::allocate_instance_klass(const ClassFileParser& par parser.is_interface()); const Symbol* const class_name = parser.class_name(); - assert(class_name != NULL, "invariant"); + assert(class_name != nullptr, "invariant"); ClassLoaderData* loader_data = parser.loader_data(); - assert(loader_data != NULL, "invariant"); + assert(loader_data != nullptr, "invariant"); InstanceKlass* ik; @@ -465,7 +465,7 @@ InstanceKlass* InstanceKlass::allocate_instance_klass(const ClassFileParser& par // Check for pending exception before adding to the loader data and incrementing // class count. Can get OOM here. if (HAS_PENDING_EXCEPTION) { - return NULL; + return nullptr; } return ik; @@ -474,7 +474,7 @@ InstanceKlass* InstanceKlass::allocate_instance_klass(const ClassFileParser& par // copy method ordering from resource area to Metaspace void InstanceKlass::copy_method_ordering(const intArray* m, TRAPS) { - if (m != NULL) { + if (m != nullptr) { // allocate a new array and copy contents (memcpy?) _method_ordering = MetadataFactory::new_array(class_loader_data(), m->length(), CHECK); for (int i = 0; i < m->length(); i++) { @@ -488,7 +488,7 @@ void InstanceKlass::copy_method_ordering(const intArray* m, TRAPS) { // create a new array of vtable_indices for default methods Array* InstanceKlass::create_new_default_vtable_indices(int len, TRAPS) { Array* vtable_indices = MetadataFactory::new_array(class_loader_data(), len, CHECK_NULL); - assert(default_vtable_indices() == NULL, "only create once"); + assert(default_vtable_indices() == nullptr, "only create once"); set_default_vtable_indices(vtable_indices); return vtable_indices; } @@ -499,10 +499,10 @@ static Monitor* create_init_monitor(const char* name) { InstanceKlass::InstanceKlass(const ClassFileParser& parser, KlassKind kind, ReferenceType reference_type) : Klass(kind), - _nest_members(NULL), - _nest_host(NULL), - _permitted_subclasses(NULL), - _record_components(NULL), + _nest_members(nullptr), + _nest_host(nullptr), + _permitted_subclasses(nullptr), + _record_components(nullptr), _static_field_size(parser.static_field_size()), _nonstatic_oop_map_size(nonstatic_oop_map_size(parser.total_oop_map_count())), _itable_len(parser.itable_size()), @@ -510,7 +510,7 @@ InstanceKlass::InstanceKlass(const ClassFileParser& parser, KlassKind kind, Refe _init_state(allocated), _reference_type(reference_type), _init_monitor(create_init_monitor("InstanceKlassInitMonitor_lock")), - _init_thread(NULL) + _init_thread(nullptr) { set_vtable_length(parser.vtable_size()); set_access_flags(parser.access_flags()); @@ -518,18 +518,18 @@ InstanceKlass::InstanceKlass(const ClassFileParser& parser, KlassKind kind, Refe set_layout_helper(Klass::instance_layout_helper(parser.layout_size(), false)); - assert(NULL == _methods, "underlying memory not zeroed?"); + assert(nullptr == _methods, "underlying memory not zeroed?"); assert(is_instance_klass(), "is layout incorrect?"); assert(size_helper() == parser.layout_size(), "incorrect size_helper?"); } void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data, Array* methods) { - if (methods != NULL && methods != Universe::the_empty_method_array() && + if (methods != nullptr && methods != Universe::the_empty_method_array() && !methods->is_shared()) { for (int i = 0; i < methods->length(); i++) { Method* method = methods->at(i); - if (method == NULL) continue; // maybe null if error processing + if (method == nullptr) continue; // maybe null if error processing // Only want to delete methods that are not executing for RedefineClasses. // The previous version will point to them so they're not totally dangling assert (!method->on_stack(), "shouldn't be called with methods on stack"); @@ -548,23 +548,23 @@ void InstanceKlass::deallocate_interfaces(ClassLoaderData* loader_data, Array* ti = transitive_interfaces; if (ti != Universe::the_empty_instance_klass_array() && ti != local_interfaces) { // check that the interfaces don't come from super class - Array* sti = (super_klass == NULL) ? NULL : + Array* sti = (super_klass == nullptr) ? nullptr : InstanceKlass::cast(super_klass)->transitive_interfaces(); - if (ti != sti && ti != NULL && !ti->is_shared()) { + if (ti != sti && ti != nullptr && !ti->is_shared()) { MetadataFactory::free_array(loader_data, ti); } } // local interfaces can be empty if (local_interfaces != Universe::the_empty_instance_klass_array() && - local_interfaces != NULL && !local_interfaces->is_shared()) { + local_interfaces != nullptr && !local_interfaces->is_shared()) { MetadataFactory::free_array(loader_data, local_interfaces); } } void InstanceKlass::deallocate_record_components(ClassLoaderData* loader_data, Array* record_components) { - if (record_components != NULL && !record_components->is_shared()) { + if (record_components != nullptr && !record_components->is_shared()) { for (int i = 0; i < record_components->length(); i++) { RecordComponent* record_component = record_components->at(i); MetadataFactory::free_metadata(loader_data, record_component); @@ -577,8 +577,8 @@ void InstanceKlass::deallocate_record_components(ClassLoaderData* loader_data, // InstanceKlass points to. void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) { // Orphan the mirror first, CMS thinks it's still live. - if (java_mirror() != NULL) { - java_lang_Class::set_klass(java_mirror(), NULL); + if (java_mirror() != nullptr) { + java_lang_Class::set_klass(java_mirror(), nullptr); } // Also remove mirror from handles @@ -591,7 +591,7 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) { // For class redefinition, we keep the original class so this scratch class // doesn't have an array class. Either way, assert that there is nothing // to deallocate. - assert(array_klasses() == NULL, "array classes shouldn't be created for this class yet"); + assert(array_klasses() == nullptr, "array classes shouldn't be created for this class yet"); // Release C heap allocated data that this points to, which includes // reference counting symbol names. @@ -601,59 +601,59 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) { release_C_heap_structures(/* release_sub_metadata */ false); deallocate_methods(loader_data, methods()); - set_methods(NULL); + set_methods(nullptr); deallocate_record_components(loader_data, record_components()); - set_record_components(NULL); + set_record_components(nullptr); - if (method_ordering() != NULL && + if (method_ordering() != nullptr && method_ordering() != Universe::the_empty_int_array() && !method_ordering()->is_shared()) { MetadataFactory::free_array(loader_data, method_ordering()); } - set_method_ordering(NULL); + set_method_ordering(nullptr); // default methods can be empty - if (default_methods() != NULL && + if (default_methods() != nullptr && default_methods() != Universe::the_empty_method_array() && !default_methods()->is_shared()) { MetadataFactory::free_array(loader_data, default_methods()); } // Do NOT deallocate the default methods, they are owned by superinterfaces. - set_default_methods(NULL); + set_default_methods(nullptr); // default methods vtable indices can be empty - if (default_vtable_indices() != NULL && + if (default_vtable_indices() != nullptr && !default_vtable_indices()->is_shared()) { MetadataFactory::free_array(loader_data, default_vtable_indices()); } - set_default_vtable_indices(NULL); + set_default_vtable_indices(nullptr); // This array is in Klass, but remove it with the InstanceKlass since // this place would be the only caller and it can share memory with transitive // interfaces. - if (secondary_supers() != NULL && + if (secondary_supers() != nullptr && secondary_supers() != Universe::the_empty_klass_array() && // see comments in compute_secondary_supers about the following cast (address)(secondary_supers()) != (address)(transitive_interfaces()) && !secondary_supers()->is_shared()) { MetadataFactory::free_array(loader_data, secondary_supers()); } - set_secondary_supers(NULL); + set_secondary_supers(nullptr); deallocate_interfaces(loader_data, super(), local_interfaces(), transitive_interfaces()); - set_transitive_interfaces(NULL); - set_local_interfaces(NULL); + set_transitive_interfaces(nullptr); + set_local_interfaces(nullptr); - if (fields() != NULL && !fields()->is_shared()) { + if (fields() != nullptr && !fields()->is_shared()) { MetadataFactory::free_array(loader_data, fields()); } - set_fields(NULL, 0); + set_fields(nullptr, 0); // If a method from a redefined class is using this constant pool, don't // delete it, yet. The new class's previous version will point to this. - if (constants() != NULL) { + if (constants() != nullptr) { assert (!constants()->on_stack(), "shouldn't be called if anything is onstack"); if (!constants()->is_shared()) { MetadataFactory::free_metadata(loader_data, constants()); @@ -661,35 +661,35 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) { // Delete any cached resolution errors for the constant pool SystemDictionary::delete_resolution_error(constants()); - set_constants(NULL); + set_constants(nullptr); } - if (inner_classes() != NULL && + if (inner_classes() != nullptr && inner_classes() != Universe::the_empty_short_array() && !inner_classes()->is_shared()) { MetadataFactory::free_array(loader_data, inner_classes()); } - set_inner_classes(NULL); + set_inner_classes(nullptr); - if (nest_members() != NULL && + if (nest_members() != nullptr && nest_members() != Universe::the_empty_short_array() && !nest_members()->is_shared()) { MetadataFactory::free_array(loader_data, nest_members()); } - set_nest_members(NULL); + set_nest_members(nullptr); - if (permitted_subclasses() != NULL && + if (permitted_subclasses() != nullptr && permitted_subclasses() != Universe::the_empty_short_array() && !permitted_subclasses()->is_shared()) { MetadataFactory::free_array(loader_data, permitted_subclasses()); } - set_permitted_subclasses(NULL); + set_permitted_subclasses(nullptr); // We should deallocate the Annotations instance if it's not in shared spaces. - if (annotations() != NULL && !annotations()->is_shared()) { + if (annotations() != nullptr && !annotations()->is_shared()) { MetadataFactory::free_metadata(loader_data, annotations()); } - set_annotations(NULL); + set_annotations(nullptr); SystemDictionaryShared::handle_class_unloading(this); @@ -701,13 +701,13 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) { } bool InstanceKlass::is_record() const { - return _record_components != NULL && + return _record_components != nullptr && is_final() && java_super() == vmClasses::Record_klass(); } bool InstanceKlass::is_sealed() const { - return _permitted_subclasses != NULL && + return _permitted_subclasses != nullptr && _permitted_subclasses != Universe::the_empty_short_array(); } @@ -816,7 +816,7 @@ bool InstanceKlass::link_class_impl(TRAPS) { // link super class before linking this class Klass* super_klass = super(); - if (super_klass != NULL) { + if (super_klass != nullptr) { if (super_klass->is_interface()) { // check if super class is an interface ResourceMark rm(THREAD); Exceptions::fthrow( @@ -1043,7 +1043,7 @@ void InstanceKlass::initialize_impl(TRAPS) { wait = true; jt->set_class_to_be_initialized(this); ml.wait(); - jt->set_class_to_be_initialized(NULL); + jt->set_class_to_be_initialized(nullptr); } // Step 3 @@ -1090,7 +1090,7 @@ void InstanceKlass::initialize_impl(TRAPS) { // interfaces. if (!is_interface()) { Klass* super_klass = super(); - if (super_klass != NULL && super_klass->should_be_initialized()) { + if (super_klass != nullptr && super_klass->should_be_initialized()) { super_klass->initialize(THREAD); } // If C implements any interface that declares a non-static, concrete method, @@ -1121,7 +1121,7 @@ void InstanceKlass::initialize_impl(TRAPS) { // Step 8 { DTRACE_CLASSINIT_PROBE_WAIT(clinit, -1, wait); - if (class_initializer() != NULL) { + if (class_initializer() != nullptr) { // Timer includes any side effects of class initialization (resolution, // etc), but not recursive entry into call_class_initializer(). PerfClassTraceTime timer(ClassLoader::perf_class_init_time(), @@ -1183,12 +1183,12 @@ void InstanceKlass::set_initialization_state_and_notify(ClassState state, JavaTh if (state == linked && UseVtableBasedCHA && Universe::is_fully_initialized()) { MutexLocker ml(current, Compile_lock); - set_init_thread(NULL); // reset _init_thread before changing _init_state + set_init_thread(nullptr); // reset _init_thread before changing _init_state set_init_state(state); CodeCache::flush_dependents_on(this); } else { - set_init_thread(NULL); // reset _init_thread before changing _init_state + set_init_thread(nullptr); // reset _init_thread before changing _init_state set_init_state(state); } ml.notify_all(); @@ -1196,13 +1196,13 @@ void InstanceKlass::set_initialization_state_and_notify(ClassState state, JavaTh InstanceKlass* InstanceKlass::implementor() const { InstanceKlass* volatile* ik = adr_implementor(); - if (ik == NULL) { - return NULL; + if (ik == nullptr) { + return nullptr; } else { // This load races with inserts, and therefore needs acquire. InstanceKlass* ikls = Atomic::load_acquire(ik); - if (ikls != NULL && !ikls->is_loader_alive()) { - return NULL; // don't return unloaded class + if (ikls != nullptr && !ikls->is_loader_alive()) { + return nullptr; // don't return unloaded class } else { return ikls; } @@ -1214,15 +1214,15 @@ void InstanceKlass::set_implementor(InstanceKlass* ik) { assert_locked_or_safepoint(Compile_lock); assert(is_interface(), "not interface"); InstanceKlass* volatile* addr = adr_implementor(); - assert(addr != NULL, "null addr"); - if (addr != NULL) { + assert(addr != nullptr, "null addr"); + if (addr != nullptr) { Atomic::release_store(addr, ik); } } int InstanceKlass::nof_implementors() const { InstanceKlass* ik = implementor(); - if (ik == NULL) { + if (ik == nullptr) { return 0; } else if (ik != this) { return 1; @@ -1235,7 +1235,7 @@ int InstanceKlass::nof_implementors() const { // When there are more than one implementors, the _implementor field // is set to the interface Klass* itself. Following are the possible // values for the _implementor field: -// NULL - no implementor +// null - no implementor // implementor Klass* - one implementor // self - more than one implementor // @@ -1253,14 +1253,14 @@ void InstanceKlass::add_implementor(InstanceKlass* ik) { // (Note: CHA must walk subclasses of direct implementors // in order to locate indirect implementors.) InstanceKlass* super_ik = ik->java_super(); - if (super_ik != NULL && super_ik->implements_interface(this)) + if (super_ik != nullptr && super_ik->implements_interface(this)) // We only need to check one immediate superclass, since the // implements_interface query looks at transitive_interfaces. // Any supers of the super have the same (or fewer) transitive_interfaces. return; InstanceKlass* iklass = implementor(); - if (iklass == NULL) { + if (iklass == nullptr) { set_implementor(ik); } else if (iklass != this && iklass != ik) { // There is already an implementor. Use itself as an indicator of @@ -1276,7 +1276,7 @@ void InstanceKlass::add_implementor(InstanceKlass* ik) { void InstanceKlass::init_implementor() { if (is_interface()) { - set_implementor(NULL); + set_implementor(nullptr); } } @@ -1306,7 +1306,7 @@ GrowableArray* InstanceKlass::compute_secondary_supers(int num_extra_slo if (num_secondaries == 0) { // Must share this for correct bootstrapping! set_secondary_supers(Universe::the_empty_klass_array()); - return NULL; + return nullptr; } else if (num_extra_slots == 0) { // The secondary super list is exactly the same as the transitive interfaces, so // let's use it instead of making a copy. @@ -1314,7 +1314,7 @@ GrowableArray* InstanceKlass::compute_secondary_supers(int num_extra_slo // We need the cast because Array is NOT a supertype of Array, // (but it's safe to do here because we won't write into _secondary_supers from this point on). set_secondary_supers((Array*)(address)interfaces); - return NULL; + return nullptr; } else { // Copy transitive interfaces to a temporary growable array to be constructed // into the secondary super list with extra slots. @@ -1406,7 +1406,7 @@ void InstanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) { Klass* InstanceKlass::array_klass(int n, TRAPS) { // Need load-acquire for lock-free read - if (array_klasses_acquire() == NULL) { + if (array_klasses_acquire() == nullptr) { ResourceMark rm(THREAD); JavaThread *jt = THREAD; { @@ -1414,7 +1414,7 @@ Klass* InstanceKlass::array_klass(int n, TRAPS) { MutexLocker ma(THREAD, MultiArray_lock); // Check if update has already taken place - if (array_klasses() == NULL) { + if (array_klasses() == nullptr) { ObjArrayKlass* k = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, CHECK_NULL); // use 'release' to pair with lock-free load release_set_array_klasses(k); @@ -1429,8 +1429,8 @@ Klass* InstanceKlass::array_klass(int n, TRAPS) { Klass* InstanceKlass::array_klass_or_null(int n) { // Need load-acquire for lock-free read ObjArrayKlass* oak = array_klasses_acquire(); - if (oak == NULL) { - return NULL; + if (oak == nullptr) { + return nullptr; } else { return oak->array_klass_or_null(n); } @@ -1449,16 +1449,16 @@ static int call_class_initializer_counter = 0; // for debugging Method* InstanceKlass::class_initializer() const { Method* clinit = find_method( vmSymbols::class_initializer_name(), vmSymbols::void_method_signature()); - if (clinit != NULL && clinit->has_valid_initializer_flags()) { + if (clinit != nullptr && clinit->has_valid_initializer_flags()) { return clinit; } - return NULL; + return nullptr; } void InstanceKlass::call_class_initializer(TRAPS) { if (ReplayCompiles && (ReplaySuppressInitializers == 1 || - (ReplaySuppressInitializers >= 2 && class_loader() != NULL))) { + (ReplaySuppressInitializers >= 2 && class_loader() != nullptr))) { // Hide the existence of the initializer for the purpose of replaying the compile return; } @@ -1482,9 +1482,9 @@ void InstanceKlass::call_class_initializer(TRAPS) { LogStream ls(lt); ls.print("%d Initializing ", call_class_initializer_counter++); name()->print_value_on(&ls); - ls.print_cr("%s (" PTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", p2i(this)); + ls.print_cr("%s (" PTR_FORMAT ")", h_method() == nullptr ? "(no method)" : "", p2i(this)); } - if (h_method() != NULL) { + if (h_method() != nullptr) { JavaCallArguments args; // No arguments JavaValue result(T_VOID); JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args) @@ -1497,10 +1497,10 @@ void InstanceKlass::mask_for(const methodHandle& method, int bci, // Lazily create the _oop_map_cache at first request // Lock-free access requires load_acquire. OopMapCache* oop_map_cache = Atomic::load_acquire(&_oop_map_cache); - if (oop_map_cache == NULL) { + if (oop_map_cache == nullptr) { MutexLocker x(OopMapCacheAlloc_lock); // Check if _oop_map_cache was allocated while we were waiting for this lock - if ((oop_map_cache = _oop_map_cache) == NULL) { + if ((oop_map_cache = _oop_map_cache) == nullptr) { oop_map_cache = new OopMapCache(); // Ensure _oop_map_cache is stable, since it is examined without a lock Atomic::release_store(&_oop_map_cache, oop_map_cache); @@ -1540,10 +1540,10 @@ Klass* InstanceKlass::find_interface_field(Symbol* name, Symbol* sig, fieldDescr } // search for field in direct superinterfaces Klass* intf2 = InstanceKlass::cast(intf1)->find_interface_field(name, sig, fd); - if (intf2 != NULL) return intf2; + if (intf2 != nullptr) return intf2; } // otherwise field lookup fails - return NULL; + return nullptr; } @@ -1555,14 +1555,14 @@ Klass* InstanceKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) } // 2) search for field recursively in direct superinterfaces { Klass* intf = find_interface_field(name, sig, fd); - if (intf != NULL) return intf; + if (intf != nullptr) return intf; } // 3) apply field lookup recursively if superclass exists { Klass* supr = super(); - if (supr != NULL) return InstanceKlass::cast(supr)->find_field(name, sig, fd); + if (supr != nullptr) return InstanceKlass::cast(supr)->find_field(name, sig, fd); } // 4) otherwise field lookup fails - return NULL; + return nullptr; } @@ -1575,14 +1575,14 @@ Klass* InstanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fiel // 2) search for field recursively in direct superinterfaces if (is_static) { Klass* intf = find_interface_field(name, sig, fd); - if (intf != NULL) return intf; + if (intf != nullptr) return intf; } // 3) apply field lookup recursively if superclass exists { Klass* supr = super(); - if (supr != NULL) return InstanceKlass::cast(supr)->find_field(name, sig, is_static, fd); + if (supr != nullptr) return InstanceKlass::cast(supr)->find_field(name, sig, is_static, fd); } // 4) otherwise field lookup fails - return NULL; + return nullptr; } @@ -1599,7 +1599,7 @@ bool InstanceKlass::find_local_field_from_offset(int offset, bool is_static, fie bool InstanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const { Klass* klass = const_cast(this); - while (klass != NULL) { + while (klass != nullptr) { if (InstanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) { return true; } @@ -1647,7 +1647,7 @@ void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, Handle, TRAP void InstanceKlass::do_nonstatic_fields(FieldClosure* cl) { InstanceKlass* super = superklass(); - if (super != NULL) { + if (super != nullptr) { super->do_nonstatic_fields(cl); } fieldDescriptor fd; @@ -1667,7 +1667,7 @@ static int compare_fields_by_offset(Pair* a, Pair* b) { void InstanceKlass::print_nonstatic_fields(FieldClosure* cl) { InstanceKlass* super = superklass(); - if (super != NULL) { + if (super != nullptr) { super->print_nonstatic_fields(cl); } ResourceMark rm; @@ -1793,7 +1793,7 @@ Method* InstanceKlass::find_instance_method(const Array* methods, OverpassLookupMode::find, StaticLookupMode::skip, private_mode); - assert(((meth == NULL) || !meth->is_static()), + assert(((meth == nullptr) || !meth->is_static()), "find_instance_method should have skipped statics"); return meth; } @@ -1861,7 +1861,7 @@ Method* InstanceKlass::find_method_impl(const Array* methods, StaticLookupMode static_mode, PrivateLookupMode private_mode) { int hit = find_method_index(methods, name, signature, overpass_mode, static_mode, private_mode); - return hit >= 0 ? methods->at(hit): NULL; + return hit >= 0 ? methods->at(hit): nullptr; } // true if method matches signature and conforms to skipping_X conditions. @@ -1947,7 +1947,7 @@ int InstanceKlass::find_method_by_name(const Symbol* name, int* end) const { int InstanceKlass::find_method_by_name(const Array* methods, const Symbol* name, int* end_ptr) { - assert(end_ptr != NULL, "just checking"); + assert(end_ptr != nullptr, "just checking"); int start = quick_search(methods, name); int end = start + 1; if (start != -1) { @@ -1968,19 +1968,19 @@ Method* InstanceKlass::uncached_lookup_method(const Symbol* name, PrivateLookupMode private_mode) const { OverpassLookupMode overpass_local_mode = overpass_mode; const Klass* klass = this; - while (klass != NULL) { + while (klass != nullptr) { Method* const method = InstanceKlass::cast(klass)->find_method_impl(name, signature, overpass_local_mode, StaticLookupMode::find, private_mode); - if (method != NULL) { + if (method != nullptr) { return method; } klass = klass->super(); overpass_local_mode = OverpassLookupMode::skip; // Always ignore overpass methods in superclasses } - return NULL; + return nullptr; } #ifdef ASSERT @@ -1988,7 +1988,7 @@ Method* InstanceKlass::uncached_lookup_method(const Symbol* name, // one of the superclasses was redefined bool InstanceKlass::has_redefined_this_or_super() const { const Klass* klass = this; - while (klass != NULL) { + while (klass != nullptr) { if (InstanceKlass::cast(klass)->has_been_redefined()) { return true; } @@ -2002,12 +2002,12 @@ bool InstanceKlass::has_redefined_this_or_super() const { // Do NOT return private or static methods Method* InstanceKlass::lookup_method_in_ordered_interfaces(Symbol* name, Symbol* signature) const { - Method* m = NULL; - if (default_methods() != NULL) { + Method* m = nullptr; + if (default_methods() != nullptr) { m = find_method(default_methods(), name, signature); } // Look up interfaces - if (m == NULL) { + if (m == nullptr) { m = lookup_method_in_all_interfaces(name, signature, DefaultsLookupMode::find); } return m; @@ -2021,16 +2021,16 @@ Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name, DefaultsLookupMode defaults_mode) const { Array* all_ifs = transitive_interfaces(); int num_ifs = all_ifs->length(); - InstanceKlass *ik = NULL; + InstanceKlass *ik = nullptr; for (int i = 0; i < num_ifs; i++) { ik = all_ifs->at(i); Method* m = ik->lookup_method(name, signature); - if (m != NULL && m->is_public() && !m->is_static() && + if (m != nullptr && m->is_public() && !m->is_static() && ((defaults_mode != DefaultsLookupMode::skip) || !m->is_default_method())) { return m; } } - return NULL; + return nullptr; } PrintClassClosure::PrintClassClosure(outputStream* st, bool verbose) @@ -2082,8 +2082,8 @@ void PrintClassClosure::do_klass(Klass* k) { /* jni_id_for for jfieldIds only */ JNIid* InstanceKlass::jni_id_for(int offset) { MutexLocker ml(JfieldIdCreation_lock); - JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset); - if (probe == NULL) { + JNIid* probe = jni_ids() == nullptr ? nullptr : jni_ids()->find(offset); + if (probe == nullptr) { // Allocate new static field identifier probe = new JNIid(this, offset, jni_ids()); set_jni_ids(probe); @@ -2093,7 +2093,7 @@ JNIid* InstanceKlass::jni_id_for(int offset) { u2 InstanceKlass::enclosing_method_data(int offset) const { const Array* const inner_class_list = inner_classes(); - if (inner_class_list == NULL) { + if (inner_class_list == nullptr) { return 0; } const int length = inner_class_list->length(); @@ -2108,7 +2108,7 @@ u2 InstanceKlass::enclosing_method_data(int offset) const { void InstanceKlass::set_enclosing_method_indices(u2 class_index, u2 method_index) { Array* inner_class_list = inner_classes(); - assert (inner_class_list != NULL, "_inner_classes list is not set up"); + assert (inner_class_list != nullptr, "_inner_classes list is not set up"); int length = inner_class_list->length(); if (length % inner_class_next_offset == enclosing_method_attribute_size) { int index = length - enclosing_method_attribute_size; @@ -2128,11 +2128,11 @@ jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) { size_t idnum = (size_t)method_h->method_idnum(); jmethodID* jmeths = methods_jmethod_ids_acquire(); size_t length = 0; - jmethodID id = NULL; + jmethodID id = nullptr; // We use a double-check locking idiom here because this cache is // performance sensitive. In the normal system, this cache only - // transitions from NULL to non-NULL which is safe because we use + // transitions from null to non-null which is safe because we use // release_set_methods_jmethod_ids() to advertise the new cache. // A partially constructed cache should never be seen by a racing // thread. We also use release_store() to save a new jmethodID @@ -2143,12 +2143,12 @@ jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) { // generally acquired in those two cases. // // If the RedefineClasses() API has been used, then this cache can - // grow and we'll have transitions from non-NULL to bigger non-NULL. + // grow and we'll have transitions from non-null to bigger non-null. // Cache creation requires no leaks and we require safety between all // cache accesses and freeing of the old cache so a lock is generally // acquired when the RedefineClasses() API has been used. - if (jmeths != NULL) { + if (jmeths != nullptr) { // the cache already exists if (!idnum_can_increment()) { // the cache can't grow so we can just get the current values @@ -2161,20 +2161,20 @@ jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) { // implied else: // we need to allocate a cache so default length and id values are good - if (jmeths == NULL || // no cache yet - length <= idnum || // cache is too short - id == NULL) { // cache doesn't contain entry + if (jmeths == nullptr || // no cache yet + length <= idnum || // cache is too short + id == nullptr) { // cache doesn't contain entry // This function can be called by the VMThread or GC worker threads so we // have to do all things that might block on a safepoint before grabbing the lock. // Otherwise, we can deadlock with the VMThread or have a cache // consistency issue. These vars keep track of what we might have // to free after the lock is dropped. - jmethodID to_dealloc_id = NULL; - jmethodID* to_dealloc_jmeths = NULL; + jmethodID to_dealloc_id = nullptr; + jmethodID* to_dealloc_jmeths = nullptr; // may not allocate new_jmeths or use it if we allocate it - jmethodID* new_jmeths = NULL; + jmethodID* new_jmeths = nullptr; if (length <= idnum) { // allocate a new cache that might be used size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count()); @@ -2187,11 +2187,11 @@ jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) { // allocate a new jmethodID that might be used { MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag); - jmethodID new_id = NULL; + jmethodID new_id = nullptr; if (method_h->is_old() && !method_h->is_obsolete()) { // The method passed in is old (but not obsolete), we need to use the current version Method* current_method = method_with_idnum((int)idnum); - assert(current_method != NULL, "old and but not obsolete, so should exist"); + assert(current_method != nullptr, "old and but not obsolete, so should exist"); new_id = Method::make_jmethod_id(class_loader_data(), current_method); } else { // It is the current version of the method or an obsolete method, @@ -2205,11 +2205,11 @@ jmethodID InstanceKlass::get_jmethod_id(const methodHandle& method_h) { // The lock has been dropped so we can free resources. // Free up either the old cache or the new cache if we allocated one. - if (to_dealloc_jmeths != NULL) { + if (to_dealloc_jmeths != nullptr) { FreeHeap(to_dealloc_jmeths); } // free up the new ID since it wasn't needed - if (to_dealloc_id != NULL) { + if (to_dealloc_id != nullptr) { Method::destroy_jmethod_id(class_loader_data(), to_dealloc_id); } } @@ -2227,7 +2227,7 @@ void InstanceKlass::ensure_space_for_methodids(int start_offset) { for (int index = start_offset; index < length; index++) { Method* m = methods()->at(index); jmethodID id = m->find_jmethod_id_or_null(); - if (id == NULL) { + if (id == nullptr) { new_jmeths++; } } @@ -2245,19 +2245,19 @@ jmethodID InstanceKlass::get_jmethod_id_fetch_or_update( size_t idnum, jmethodID new_id, jmethodID* new_jmeths, jmethodID* to_dealloc_id_p, jmethodID** to_dealloc_jmeths_p) { - assert(new_id != NULL, "sanity check"); - assert(to_dealloc_id_p != NULL, "sanity check"); - assert(to_dealloc_jmeths_p != NULL, "sanity check"); + assert(new_id != nullptr, "sanity check"); + assert(to_dealloc_id_p != nullptr, "sanity check"); + assert(to_dealloc_jmeths_p != nullptr, "sanity check"); assert(JmethodIdCreation_lock->owned_by_self(), "sanity check"); // reacquire the cache - we are locked, single threaded or at a safepoint jmethodID* jmeths = methods_jmethod_ids_acquire(); - jmethodID id = NULL; + jmethodID id = nullptr; size_t length = 0; - if (jmeths == NULL || // no cache yet + if (jmeths == nullptr || // no cache yet (length = (size_t)jmeths[0]) <= idnum) { // cache is too short - if (jmeths != NULL) { + if (jmeths != nullptr) { // copy any existing entries from the old cache for (size_t index = 0; index < length; index++) { new_jmeths[index+1] = jmeths[index+1]; @@ -2270,7 +2270,7 @@ jmethodID InstanceKlass::get_jmethod_id_fetch_or_update( id = jmeths[idnum+1]; *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete } - if (id == NULL) { + if (id == nullptr) { // No matching jmethodID in the existing cache or we have a new // cache or we just grew the cache. This cache write is done here // by the first thread to win the foot race because a jmethodID @@ -2293,29 +2293,29 @@ jmethodID InstanceKlass::get_jmethod_id_fetch_or_update( // void InstanceKlass::get_jmethod_id_length_value(jmethodID* cache, size_t idnum, size_t *length_p, jmethodID* id_p) { - assert(cache != NULL, "sanity check"); - assert(length_p != NULL, "sanity check"); - assert(id_p != NULL, "sanity check"); + assert(cache != nullptr, "sanity check"); + assert(length_p != nullptr, "sanity check"); + assert(id_p != nullptr, "sanity check"); // cache size is stored in element[0], other elements offset by one *length_p = (size_t)cache[0]; if (*length_p <= idnum) { // cache is too short - *id_p = NULL; + *id_p = nullptr; } else { *id_p = cache[idnum+1]; // fetch jmethodID (if any) } } -// Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles +// Lookup a jmethodID, null if not found. Do no blocking, no allocations, no handles jmethodID InstanceKlass::jmethod_id_or_null(Method* method) { size_t idnum = (size_t)method->method_idnum(); jmethodID* jmeths = methods_jmethod_ids_acquire(); size_t length; // length assigned as debugging crumb - jmethodID id = NULL; - if (jmeths != NULL && // If there is a cache + jmethodID id = nullptr; + if (jmeths != nullptr && // If there is a cache (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough, - id = jmeths[idnum+1]; // Look up the id (may be NULL) + id = jmeths[idnum+1]; // Look up the id (may be null) } return id; } @@ -2359,10 +2359,10 @@ void InstanceKlass::clean_implementors_list() { for (;;) { // Use load_acquire due to competing with inserts InstanceKlass* impl = Atomic::load_acquire(adr_implementor()); - if (impl != NULL && !impl->is_loader_alive()) { - // NULL this field, might be an unloaded instance klass or NULL + if (impl != nullptr && !impl->is_loader_alive()) { + // null this field, might be an unloaded instance klass or null InstanceKlass* volatile* iklass = adr_implementor(); - if (Atomic::cmpxchg(iklass, impl, (InstanceKlass*)NULL) == impl) { + if (Atomic::cmpxchg(iklass, impl, (InstanceKlass*)nullptr) == impl) { // Successfully unlinking implementor. if (log_is_enabled(Trace, class, unload)) { ResourceMark rm; @@ -2380,8 +2380,8 @@ void InstanceKlass::clean_implementors_list() { void InstanceKlass::clean_method_data() { for (int m = 0; m < methods()->length(); m++) { MethodData* mdo = methods()->at(m)->method_data(); - if (mdo != NULL) { - MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : mdo->extra_data_lock()); + if (mdo != nullptr) { + MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? nullptr : mdo->extra_data_lock()); mdo->clean_method_data(/*always_clean*/false); } } @@ -2427,7 +2427,7 @@ void InstanceKlass::metaspace_pointers_do(MetaspaceClosure* it) { / itableOffsetEntry::size(); for (int i = 0; i < nof_interfaces; i ++, ioe ++) { - if (ioe->interface_klass() != NULL) { + if (ioe->interface_klass() != nullptr) { it->push(ioe->interface_klass_addr()); itableMethodEntry* ime = ioe->first_method_entry(this); int n = klassItable::method_count_for_interface(ioe->interface_klass()); @@ -2480,54 +2480,54 @@ void InstanceKlass::remove_unshareable_info() { } // do array classes also. - if (array_klasses() != NULL) { + if (array_klasses() != nullptr) { array_klasses()->remove_unshareable_info(); } - // These are not allocated from metaspace. They are safe to set to NULL. - _source_debug_extension = NULL; - _dep_context = NULL; - _osr_nmethods_head = NULL; + // These are not allocated from metaspace. They are safe to set to null. + _source_debug_extension = nullptr; + _dep_context = nullptr; + _osr_nmethods_head = nullptr; #if INCLUDE_JVMTI - _breakpoints = NULL; - _previous_versions = NULL; - _cached_class_file = NULL; - _jvmti_cached_class_field_map = NULL; + _breakpoints = nullptr; + _previous_versions = nullptr; + _cached_class_file = nullptr; + _jvmti_cached_class_field_map = nullptr; #endif - _init_thread = NULL; - _methods_jmethod_ids = NULL; - _jni_ids = NULL; - _oop_map_cache = NULL; + _init_thread = nullptr; + _methods_jmethod_ids = nullptr; + _jni_ids = nullptr; + _oop_map_cache = nullptr; // clear _nest_host to ensure re-load at runtime - _nest_host = NULL; + _nest_host = nullptr; init_shared_package_entry(); _dep_context_last_cleaned = 0; - _init_monitor = NULL; + _init_monitor = nullptr; } void InstanceKlass::remove_java_mirror() { Klass::remove_java_mirror(); // do array classes also. - if (array_klasses() != NULL) { + if (array_klasses() != nullptr) { array_klasses()->remove_java_mirror(); } } void InstanceKlass::init_shared_package_entry() { #if !INCLUDE_CDS_JAVA_HEAP - _package_entry = NULL; + _package_entry = nullptr; #else if (!MetaspaceShared::use_full_module_graph()) { - _package_entry = NULL; + _package_entry = nullptr; } else if (DynamicDumpSharedSpaces) { if (!MetaspaceShared::is_in_shared_metaspace(_package_entry)) { - _package_entry = NULL; + _package_entry = nullptr; } } else { if (is_shared_unregistered_class()) { - _package_entry = NULL; + _package_entry = nullptr; } else { _package_entry = PackageEntry::get_archived_entry(_package_entry); } @@ -2570,7 +2570,7 @@ void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handl // restore constant pool resolved references constants()->restore_unshareable_info(CHECK); - if (array_klasses() != NULL) { + if (array_klasses() != nullptr) { // To get a consistent list of classes we need MultiArray_lock to ensure // array classes aren't observed while they are being restored. MutexLocker ml(MultiArray_lock); @@ -2602,7 +2602,7 @@ bool InstanceKlass::can_be_verified_at_dumptime() const { if (major_version() < 50 /*JAVA_6_VERSION*/) { return false; } - if (java_super() != NULL && !java_super()->can_be_verified_at_dumptime()) { + if (java_super() != nullptr && !java_super()->can_be_verified_at_dumptime()) { return false; } Array* interfaces = local_interfaces(); @@ -2644,7 +2644,7 @@ void InstanceKlass::unload_class(InstanceKlass* ik) { Events::log_class_unloading(Thread::current(), ik); #if INCLUDE_JFR - assert(ik != NULL, "invariant"); + assert(ik != nullptr, "invariant"); EventClassUnload event; event.set_unloadedClass(ik); event.set_definingClassLoader(ik->class_loader_data()); @@ -2670,22 +2670,22 @@ void InstanceKlass::release_C_heap_structures(bool release_sub_metadata) { delete _init_monitor; // Deallocate oop map cache - if (_oop_map_cache != NULL) { + if (_oop_map_cache != nullptr) { delete _oop_map_cache; - _oop_map_cache = NULL; + _oop_map_cache = nullptr; } // Deallocate JNI identifiers for jfieldIDs JNIid::deallocate(jni_ids()); - set_jni_ids(NULL); + set_jni_ids(nullptr); jmethodID* jmeths = methods_jmethod_ids_acquire(); - if (jmeths != (jmethodID*)NULL) { - release_set_methods_jmethod_ids(NULL); + if (jmeths != (jmethodID*)nullptr) { + release_set_methods_jmethod_ids(nullptr); FreeHeap(jmeths); } - assert(_dep_context == NULL, + assert(_dep_context == nullptr, "dependencies should already be cleaned"); #if INCLUDE_JVMTI @@ -2696,9 +2696,9 @@ void InstanceKlass::release_C_heap_structures(bool release_sub_metadata) { } // deallocate the cached class file - if (_cached_class_file != NULL) { + if (_cached_class_file != nullptr) { os::free(_cached_class_file); - _cached_class_file = NULL; + _cached_class_file = nullptr; } #endif @@ -2710,8 +2710,8 @@ void InstanceKlass::release_C_heap_structures(bool release_sub_metadata) { } void InstanceKlass::set_source_debug_extension(const char* array, int length) { - if (array == NULL) { - _source_debug_extension = NULL; + if (array == nullptr) { + _source_debug_extension = nullptr; } else { // Adding one to the attribute length in order to store a null terminator // character could cause an overflow because the attribute length is @@ -2753,7 +2753,7 @@ const char* InstanceKlass::signature_name() const { } } - // Add the semicolon and the NULL + // Add the semicolon and the null dest[dest_index++] = JVM_SIGNATURE_ENDCLASS; dest[dest_index] = '\0'; return dest; @@ -2796,53 +2796,53 @@ void InstanceKlass::set_package(ClassLoaderData* loader_data, PackageEntry* pkg_ check_prohibited_package(name(), loader_data, CHECK); } - if (is_shared() && _package_entry != NULL) { + if (is_shared() && _package_entry != nullptr) { if (MetaspaceShared::use_full_module_graph() && _package_entry == pkg_entry) { // we can use the saved package assert(MetaspaceShared::is_in_shared_metaspace(_package_entry), "must be"); return; } else { - _package_entry = NULL; + _package_entry = nullptr; } } // ClassLoader::package_from_class_name has already incremented the refcount of the symbol // it returns, so we need to decrement it when the current function exits. TempNewSymbol from_class_name = - (pkg_entry != NULL) ? NULL : ClassLoader::package_from_class_name(name()); + (pkg_entry != nullptr) ? nullptr : ClassLoader::package_from_class_name(name()); Symbol* pkg_name; - if (pkg_entry != NULL) { + if (pkg_entry != nullptr) { pkg_name = pkg_entry->name(); } else { pkg_name = from_class_name; } - if (pkg_name != NULL && loader_data != NULL) { + if (pkg_name != nullptr && loader_data != nullptr) { // Find in class loader's package entry table. - _package_entry = pkg_entry != NULL ? pkg_entry : loader_data->packages()->lookup_only(pkg_name); + _package_entry = pkg_entry != nullptr ? pkg_entry : loader_data->packages()->lookup_only(pkg_name); // If the package name is not found in the loader's package // entry table, it is an indication that the package has not // been defined. Consider it defined within the unnamed module. - if (_package_entry == NULL) { + if (_package_entry == nullptr) { if (!ModuleEntryTable::javabase_defined()) { // Before java.base is defined during bootstrapping, define all packages in // the java.base module. If a non-java.base package is erroneously placed // in the java.base module it will be caught later when java.base // is defined by ModuleEntryTable::verify_javabase_packages check. - assert(ModuleEntryTable::javabase_moduleEntry() != NULL, JAVA_BASE_NAME " module is NULL"); + assert(ModuleEntryTable::javabase_moduleEntry() != nullptr, JAVA_BASE_NAME " module is null"); _package_entry = loader_data->packages()->create_entry_if_absent(pkg_name, ModuleEntryTable::javabase_moduleEntry()); } else { - assert(loader_data->unnamed_module() != NULL, "unnamed module is NULL"); + assert(loader_data->unnamed_module() != nullptr, "unnamed module is null"); _package_entry = loader_data->packages()->create_entry_if_absent(pkg_name, loader_data->unnamed_module()); } // A package should have been successfully created DEBUG_ONLY(ResourceMark rm(THREAD)); - assert(_package_entry != NULL, "Package entry for class %s not found, loader %s", + assert(_package_entry != nullptr, "Package entry for class %s not found, loader %s", name()->as_C_string(), loader_data->loader_name_and_id()); } @@ -2859,7 +2859,7 @@ void InstanceKlass::set_package(ClassLoaderData* loader_data, PackageEntry* pkg_ ResourceMark rm(THREAD); log_trace(module)("Setting package: class: %s, package: unnamed, loader: %s, module: %s", external_name(), - (loader_data != NULL) ? loader_data->loader_name_and_id() : "NULL", + (loader_data != nullptr) ? loader_data->loader_name_and_id() : "null", UNNAMED_MODULE); } } @@ -2874,7 +2874,7 @@ void InstanceKlass::set_package(ClassLoaderData* loader_data, PackageEntry* pkg_ // classes are loaded by the boot loader) that at least one of the package's // classes has been loaded. void InstanceKlass::set_classpath_index(s2 path_index) { - if (_package_entry != NULL) { + if (_package_entry != nullptr) { DEBUG_ONLY(PackageEntryTable* pkg_entry_tbl = ClassLoaderData::the_null_class_loader_data()->packages();) assert(pkg_entry_tbl->lookup_only(_package_entry->name()) == _package_entry, "Should be same"); assert(path_index != -1, "Unexpected classpath_index"); @@ -2898,8 +2898,8 @@ bool InstanceKlass::is_same_class_package(const Klass* class2) const { classpkg2 = class2->package(); } else { assert(class2->is_typeArray_klass(), "should be type array"); - classloader2 = NULL; - classpkg2 = NULL; + classloader2 = nullptr; + classpkg2 = nullptr; } // Same package is determined by comparing class loader @@ -2932,13 +2932,13 @@ bool InstanceKlass::is_same_class_package(oop other_class_loader, if (bad_class_name) { return false; } - // Check that package_from_class_name() returns NULL, not "", if there is no package. - assert(other_pkg == NULL || other_pkg->utf8_length() > 0, "package name is empty string"); + // Check that package_from_class_name() returns null, not "", if there is no package. + assert(other_pkg == nullptr || other_pkg->utf8_length() > 0, "package name is empty string"); const Symbol* const this_package_name = - this->package() != NULL ? this->package()->name() : NULL; + this->package() != nullptr ? this->package()->name() : nullptr; - if (this_package_name == NULL || other_pkg == NULL) { + if (this_package_name == nullptr || other_pkg == nullptr) { // One of the two doesn't have a package. Only return true if the other // one also doesn't have a package. return this_package_name == other_pkg; @@ -2967,7 +2967,7 @@ void InstanceKlass::check_prohibited_package(Symbol* class_name, TRAPS) { if (!loader_data->is_boot_class_loader_data() && !loader_data->is_platform_class_loader_data() && - class_name != NULL && class_name->utf8_length() >= 5) { + class_name != nullptr && class_name->utf8_length() >= 5) { ResourceMark rm(THREAD); bool prohibited; const u1* base = class_name->base(); @@ -2979,7 +2979,7 @@ void InstanceKlass::check_prohibited_package(Symbol* class_name, } if (prohibited) { TempNewSymbol pkg_name = ClassLoader::package_from_class_name(class_name); - assert(pkg_name != NULL, "Error in parsing package name starting with 'java/'"); + assert(pkg_name != nullptr, "Error in parsing package name starting with 'java/'"); char* name = pkg_name->as_C_string(); const char* class_loader_name = loader_data->loader_name_and_id(); StringUtils::replace_no_expand(name, "/", "."); @@ -3015,7 +3015,7 @@ bool InstanceKlass::find_inner_classes_attr(int* ooff, int* noff, TRAPS) const { } InstanceKlass* InstanceKlass::compute_enclosing_class(bool* inner_is_member, TRAPS) const { - InstanceKlass* outer_klass = NULL; + InstanceKlass* outer_klass = nullptr; *inner_is_member = false; int ooff = 0, noff = 0; bool has_inner_classes_attr = find_inner_classes_attr(&ooff, &noff, THREAD); @@ -3033,12 +3033,12 @@ InstanceKlass* InstanceKlass::compute_enclosing_class(bool* inner_is_member, TRA "%s and %s disagree on InnerClasses attribute", ok->external_name(), external_name()); - return NULL; + return nullptr; } outer_klass = InstanceKlass::cast(ok); *inner_is_member = true; } - if (NULL == outer_klass) { + if (nullptr == outer_klass) { // It may be a local class; try for that. int encl_method_class_idx = enclosing_method_class_index(); if (encl_method_class_idx != 0) { @@ -3050,7 +3050,7 @@ InstanceKlass* InstanceKlass::compute_enclosing_class(bool* inner_is_member, TRA } // If no inner class attribute found for this class. - if (NULL == outer_klass) return NULL; + if (nullptr == outer_klass) return nullptr; // Throws an exception if outer klass has not declared k as an inner klass // We need evidence that each klass knows about the other, or else @@ -3104,7 +3104,7 @@ Method* InstanceKlass::method_at_itable(InstanceKlass* holder, int index, TRAPS) bool implements_interface; // initialized by method_at_itable_or_null Method* m = method_at_itable_or_null(holder, index, implements_interface); // out parameter - if (m != NULL) { + if (m != nullptr) { assert(implements_interface, "sanity"); return m; } else if (implements_interface) { @@ -3139,7 +3139,7 @@ Method* InstanceKlass::method_at_itable_or_null(InstanceKlass* holder, int index } } implements_interface = false; - return NULL; // offset entry not found + return nullptr; // offset entry not found } int InstanceKlass::vtable_index_of_interface_method(Method* intf_method) { @@ -3152,7 +3152,7 @@ int InstanceKlass::vtable_index_of_interface_method(Method* intf_method) { Symbol* signature = intf_method->signature(); // First check in default method array - if (!intf_method->is_abstract() && default_methods() != NULL) { + if (!intf_method->is_abstract() && default_methods() != nullptr) { int index = find_method_index(default_methods(), name, signature, Klass::OverpassLookupMode::find, @@ -3177,10 +3177,10 @@ int InstanceKlass::vtable_index_of_interface_method(Method* intf_method) { // Note: those in the vtable, should have been updated via adjust_method_entries void InstanceKlass::adjust_default_methods(bool* trace_name_printed) { // search the default_methods for uses of either obsolete or EMCP methods - if (default_methods() != NULL) { + if (default_methods() != nullptr) { for (int index = 0; index < default_methods()->length(); index ++) { Method* old_method = default_methods()->at(index); - if (old_method == NULL || !old_method->is_old()) { + if (old_method == nullptr || !old_method->is_old()) { continue; // skip uninteresting entries } assert(!old_method->is_deleted(), "default methods may not be deleted"); @@ -3209,7 +3209,7 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) { assert_lock_strong(CompiledMethod_lock); #ifndef PRODUCT nmethod* prev = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), n->comp_level(), true); - assert(prev == NULL || !prev->is_in_use() COMPILER2_PRESENT(|| StressRecompilation), + assert(prev == nullptr || !prev->is_in_use() COMPILER2_PRESENT(|| StressRecompilation), "redundant OSR recompilation detected. memory leak in CodeCache!"); #endif // only one compilation can be active @@ -3222,7 +3222,7 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) { // Get rid of the osr methods for the same bci that have lower levels. for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) { nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true); - if (inv != NULL && inv->is_in_use()) { + if (inv != nullptr && inv->is_in_use()) { inv->make_not_entrant(); } } @@ -3231,16 +3231,16 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) { // Remove osr nmethod from the list. Return true if found and removed. bool InstanceKlass::remove_osr_nmethod(nmethod* n) { // This is a short non-blocking critical region, so the no safepoint check is ok. - MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock , Mutex::_no_safepoint_check_flag); assert(n->is_osr_method(), "wrong kind of nmethod"); - nmethod* last = NULL; + nmethod* last = nullptr; nmethod* cur = osr_nmethods_head(); int max_level = CompLevel_none; // Find the max comp level excluding n Method* m = n->method(); // Search for match bool found = false; - while(cur != NULL && cur != n) { + while(cur != nullptr && cur != n) { if (m == cur->method()) { // Find max level before n max_level = MAX2(max_level, cur->comp_level()); @@ -3248,20 +3248,20 @@ bool InstanceKlass::remove_osr_nmethod(nmethod* n) { last = cur; cur = cur->osr_link(); } - nmethod* next = NULL; + nmethod* next = nullptr; if (cur == n) { found = true; next = cur->osr_link(); - if (last == NULL) { + if (last == nullptr) { // Remove first element set_osr_nmethods_head(next); } else { last->set_osr_link(next); } } - n->set_osr_link(NULL); + n->set_osr_link(nullptr); cur = next; - while (cur != NULL) { + while (cur != nullptr) { // Find max level after n if (m == cur->method()) { max_level = MAX2(max_level, cur->comp_level()); @@ -3273,11 +3273,11 @@ bool InstanceKlass::remove_osr_nmethod(nmethod* n) { } int InstanceKlass::mark_osr_nmethods(const Method* m) { - MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); nmethod* osr = osr_nmethods_head(); int found = 0; - while (osr != NULL) { + while (osr != nullptr) { assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); if (osr->method() == m) { osr->mark_for_deoptimization(); @@ -3289,11 +3289,11 @@ int InstanceKlass::mark_osr_nmethods(const Method* m) { } nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_level, bool match_level) const { - MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); nmethod* osr = osr_nmethods_head(); - nmethod* best = NULL; - while (osr != NULL) { + nmethod* best = nullptr; + while (osr != nullptr) { assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); // There can be a time when a c1 osr method exists but we are waiting // for a c2 version. When c2 completes its osr nmethod we will trash @@ -3309,7 +3309,7 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le return osr; } } else { - if (best == NULL || (osr->comp_level() > best->comp_level())) { + if (best == nullptr || (osr->comp_level() > best->comp_level())) { if (osr->comp_level() == CompilationPolicy::highest_compile_level()) { // Found the best possible - return it. return osr; @@ -3321,11 +3321,11 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le osr = osr->osr_link(); } - assert(match_level == false || best == NULL, "shouldn't pick up anything if match_level is set"); - if (best != NULL && best->comp_level() >= comp_level) { + assert(match_level == false || best == nullptr, "shouldn't pick up anything if match_level is set"); + if (best != nullptr && best->comp_level() >= comp_level) { return best; } - return NULL; + return nullptr; } // ----------------------------------------------------------------------------------------------------- @@ -3370,7 +3370,7 @@ void InstanceKlass::print_on(outputStream* st) const { st->print(BULLET"sub: "); Klass* sub = subklass(); int n; - for (n = 0; sub != NULL; n++, sub = sub->next_sibling()) { + for (n = 0; sub != nullptr; n++, sub = sub->next_sibling()) { if (n < MaxSubklassPrintSize) { sub->print_value_on(st); st->print(" "); @@ -3399,29 +3399,29 @@ void InstanceKlass::print_on(outputStream* st) const { } st->print(BULLET"method ordering: "); method_ordering()->print_value_on(st); st->cr(); st->print(BULLET"default_methods: "); default_methods()->print_value_on(st); st->cr(); - if (Verbose && default_methods() != NULL) { + if (Verbose && default_methods() != nullptr) { Array* method_array = default_methods(); for (int i = 0; i < method_array->length(); i++) { st->print("%d : ", i); method_array->at(i)->print_value(); st->cr(); } } - if (default_vtable_indices() != NULL) { + if (default_vtable_indices() != nullptr) { st->print(BULLET"default vtable indices: "); default_vtable_indices()->print_value_on(st); st->cr(); } st->print(BULLET"local interfaces: "); local_interfaces()->print_value_on(st); st->cr(); st->print(BULLET"trans. interfaces: "); transitive_interfaces()->print_value_on(st); st->cr(); st->print(BULLET"constants: "); constants()->print_value_on(st); st->cr(); - if (class_loader_data() != NULL) { + if (class_loader_data() != nullptr) { st->print(BULLET"class loader data: "); class_loader_data()->print_value_on(st); st->cr(); } - if (source_file_name() != NULL) { + if (source_file_name() != nullptr) { st->print(BULLET"source file: "); source_file_name()->print_value_on(st); st->cr(); } - if (source_debug_extension() != NULL) { + if (source_debug_extension() != nullptr) { st->print(BULLET"source debug extension: "); st->print("%s", source_debug_extension()); st->cr(); @@ -3434,7 +3434,7 @@ void InstanceKlass::print_on(outputStream* st) const { bool have_pv = false; // previous versions are linked together through the InstanceKlass for (InstanceKlass* pv_node = previous_versions(); - pv_node != NULL; + pv_node != nullptr; pv_node = pv_node->previous_versions()) { if (!have_pv) st->print(BULLET"previous version: "); @@ -3444,23 +3444,23 @@ void InstanceKlass::print_on(outputStream* st) const { if (have_pv) st->cr(); } - if (generic_signature() != NULL) { + if (generic_signature() != nullptr) { st->print(BULLET"generic signature: "); generic_signature()->print_value_on(st); st->cr(); } st->print(BULLET"inner classes: "); inner_classes()->print_value_on(st); st->cr(); st->print(BULLET"nest members: "); nest_members()->print_value_on(st); st->cr(); - if (record_components() != NULL) { + if (record_components() != nullptr) { st->print(BULLET"record components: "); record_components()->print_value_on(st); st->cr(); } st->print(BULLET"permitted subclasses: "); permitted_subclasses()->print_value_on(st); st->cr(); - if (java_mirror() != NULL) { + if (java_mirror() != nullptr) { st->print(BULLET"java mirror: "); java_mirror()->print_value_on(st); st->cr(); } else { - st->print_cr(BULLET"java mirror: NULL"); + st->print_cr(BULLET"java mirror: null"); } st->print(BULLET"vtable length %d (start addr: " PTR_FORMAT ")", vtable_length(), p2i(start_of_vtable())); st->cr(); if (vtable_length() > 0 && (Verbose || WizardMode)) print_vtable(start_of_vtable(), vtable_length(), st); @@ -3492,7 +3492,7 @@ void InstanceKlass::print_value_on(outputStream* st) const { void FieldPrinter::do_field(fieldDescriptor* fd) { _st->print(BULLET); - if (_obj == NULL) { + if (_obj == nullptr) { fd->print_on(_st); _st->cr(); } else { @@ -3508,7 +3508,7 @@ void InstanceKlass::oop_print_on(oop obj, outputStream* st) { if (this == vmClasses::String_klass()) { typeArrayOop value = java_lang_String::value(obj); juint length = java_lang_String::length(obj); - if (value != NULL && + if (value != nullptr && value->is_typeArray() && length <= (juint) value->length()) { st->print(BULLET"string: "); @@ -3526,7 +3526,7 @@ void InstanceKlass::oop_print_on(oop obj, outputStream* st) { java_lang_Class::print_signature(obj, st); st->cr(); Klass* real_klass = java_lang_Class::as_Klass(obj); - if (real_klass != NULL && real_klass->is_instance_klass()) { + if (real_klass != nullptr && real_klass->is_instance_klass()) { st->print_cr(BULLET"---- static fields (%d):", java_lang_Class::static_oop_field_count(obj)); InstanceKlass::cast(real_klass)->do_local_static_fields(&print_field); } @@ -3552,7 +3552,7 @@ void InstanceKlass::oop_print_value_on(oop obj, outputStream* st) { name()->print_value_on(st); obj->print_address_on(st); if (this == vmClasses::String_klass() - && java_lang_String::value(obj) != NULL) { + && java_lang_String::value(obj) != nullptr) { ResourceMark rm; int len = java_lang_String::length(obj); int plen = (len < 24 ? len : 12); @@ -3563,7 +3563,7 @@ void InstanceKlass::oop_print_value_on(oop obj, outputStream* st) { } else if (this == vmClasses::Class_klass()) { Klass* k = java_lang_Class::as_Klass(obj); st->print(" = "); - if (k != NULL) { + if (k != nullptr) { k->print_value_on(st); } else { const char* tname = type2name(java_lang_Class::primitive_type(obj)); @@ -3577,28 +3577,28 @@ void InstanceKlass::oop_print_value_on(oop obj, outputStream* st) { java_lang_boxing_object::print(obj, st); } else if (this == vmClasses::LambdaForm_klass()) { oop vmentry = java_lang_invoke_LambdaForm::vmentry(obj); - if (vmentry != NULL) { + if (vmentry != nullptr) { st->print(" => "); vmentry->print_value_on(st); } } else if (this == vmClasses::MemberName_klass()) { Metadata* vmtarget = java_lang_invoke_MemberName::vmtarget(obj); - if (vmtarget != NULL) { + if (vmtarget != nullptr) { st->print(" = "); vmtarget->print_value_on(st); } else { oop clazz = java_lang_invoke_MemberName::clazz(obj); oop name = java_lang_invoke_MemberName::name(obj); - if (clazz != NULL) { + if (clazz != nullptr) { clazz->print_value_on(st); } else { - st->print("NULL"); + st->print("null"); } st->print("."); - if (name != NULL) { + if (name != nullptr) { name->print_value_on(st); } else { - st->print("NULL"); + st->print("null"); } } } @@ -3627,10 +3627,10 @@ void InstanceKlass::print_class_load_logging(ClassLoaderData* loader_data, info_stream.print("%s", external_name()); // Source - if (cfs != NULL) { - if (cfs->source() != NULL) { - const char* module_name = (module_entry->name() == NULL) ? UNNAMED_MODULE : module_entry->name()->as_C_string(); - if (module_name != NULL) { + if (cfs != nullptr) { + if (cfs->source() != nullptr) { + const char* module_name = (module_entry->name() == nullptr) ? UNNAMED_MODULE : module_entry->name()->as_C_string(); + if (module_name != nullptr) { // When the boot loader created the stream, it didn't know the module name // yet. Let's format it now. if (cfs->from_boot_loader_modules_image()) { @@ -3645,9 +3645,9 @@ void InstanceKlass::print_class_load_logging(ClassLoaderData* loader_data, Thread* current = Thread::current(); Klass* caller = current->is_Java_thread() ? JavaThread::cast(current)->security_get_caller_class(1): - NULL; - // caller can be NULL, for example, during a JVMTI VM_Init hook - if (caller != NULL) { + nullptr; + // caller can be null, for example, during a JVMTI VM_Init hook + if (caller != nullptr) { info_stream.print(" source: instance of %s", caller->external_name()); } else { // source is unknown @@ -3675,7 +3675,7 @@ void InstanceKlass::print_class_load_logging(ClassLoaderData* loader_data, p2i(this), p2i(superklass())); // Interfaces - if (local_interfaces() != NULL && local_interfaces()->length() > 0) { + if (local_interfaces() != nullptr && local_interfaces()->length() > 0) { debug_stream.print(" interfaces:"); int length = local_interfaces()->length(); for (int i = 0; i < length; i++) { @@ -3740,14 +3740,14 @@ void InstanceKlass::verify_on(outputStream* st) { } // Verify first subklass - if (subklass() != NULL) { + if (subklass() != nullptr) { guarantee(subklass()->is_klass(), "should be klass"); } // Verify siblings Klass* super = this->super(); Klass* sib = next_sibling(); - if (sib != NULL) { + if (sib != nullptr) { if (sib == this) { fatal("subclass points to itself " PTR_FORMAT, p2i(sib)); } @@ -3766,7 +3766,7 @@ void InstanceKlass::verify_on(outputStream* st) { } // Verify transitive interfaces - if (transitive_interfaces() != NULL) { + if (transitive_interfaces() != nullptr) { Array* transitive_interfaces = this->transitive_interfaces(); for (int j = 0; j < transitive_interfaces->length(); j++) { InstanceKlass* e = transitive_interfaces->at(j); @@ -3775,7 +3775,7 @@ void InstanceKlass::verify_on(outputStream* st) { } // Verify methods - if (methods() != NULL) { + if (methods() != nullptr) { Array* methods = this->methods(); for (int j = 0; j < methods->length(); j++) { guarantee(methods->at(j)->is_method(), "non-method in methods array"); @@ -3788,7 +3788,7 @@ void InstanceKlass::verify_on(outputStream* st) { } // Verify method ordering - if (method_ordering() != NULL) { + if (method_ordering() != nullptr) { Array* method_ordering = this->method_ordering(); int length = method_ordering->length(); if (JvmtiExport::can_maintain_original_method_order() || @@ -3809,7 +3809,7 @@ void InstanceKlass::verify_on(outputStream* st) { } // Verify default methods - if (default_methods() != NULL) { + if (default_methods() != nullptr) { Array* methods = this->default_methods(); for (int j = 0; j < methods->length(); j++) { guarantee(methods->at(j)->is_method(), "non-method in methods array"); @@ -3822,12 +3822,12 @@ void InstanceKlass::verify_on(outputStream* st) { } // Verify JNI static field identifiers - if (jni_ids() != NULL) { + if (jni_ids() != nullptr) { jni_ids()->verify(this); } // Verify other fields - if (constants() != NULL) { + if (constants() != nullptr) { guarantee(constants()->is_constantPool(), "should be constant pool"); } } @@ -3853,15 +3853,15 @@ JNIid::JNIid(Klass* holder, int offset, JNIid* next) { JNIid* JNIid::find(int offset) { JNIid* current = this; - while (current != NULL) { + while (current != nullptr) { if (current->offset() == offset) return current; current = current->next(); } - return NULL; + return nullptr; } void JNIid::deallocate(JNIid* current) { - while (current != NULL) { + while (current != nullptr) { JNIid* next = current->next(); delete current; current = next; @@ -3875,7 +3875,7 @@ void JNIid::verify(Klass* holder) { end_field_offset = first_field_offset + (InstanceKlass::cast(holder)->static_field_size() * wordSize); JNIid* current = this; - while (current != NULL) { + while (current != nullptr) { guarantee(current->holder() == holder, "Invalid klass in JNIid"); #ifdef ASSERT int o = current->offset(); @@ -3897,7 +3897,7 @@ void InstanceKlass::set_init_state(ClassState state) { bool link_failed = _init_state == being_linked && state == loaded; assert(good_state || state == allocated || link_failed, "illegal state transition"); #endif - assert(_init_thread == NULL, "should be cleared before state change"); + assert(_init_thread == nullptr, "should be cleared before state change"); _init_state = state; } @@ -3930,7 +3930,7 @@ void InstanceKlass::purge_previous_version_list() { assert(has_been_redefined(), "Should only be called for main class"); // Quick exit. - if (previous_versions() == NULL) { + if (previous_versions() == nullptr) { return; } @@ -3940,7 +3940,7 @@ void InstanceKlass::purge_previous_version_list() { int deleted_count = 0; // leave debugging breadcrumbs int live_count = 0; ClassLoaderData* loader_data = class_loader_data(); - assert(loader_data != NULL, "should never be null"); + assert(loader_data != nullptr, "should never be null"); ResourceMark rm; log_trace(redefine, class, iklass, purge)("%s: previous versions", external_name()); @@ -3951,10 +3951,10 @@ void InstanceKlass::purge_previous_version_list() { int version = 0; // check the previous versions list - for (; pv_node != NULL; ) { + for (; pv_node != nullptr; ) { ConstantPool* pvcp = pv_node->constants(); - assert(pvcp != NULL, "cp ref was unexpectedly cleared"); + assert(pvcp != nullptr, "cp ref was unexpectedly cleared"); if (!pvcp->on_stack()) { // If the constant pool isn't on stack, none of the methods @@ -3966,7 +3966,7 @@ void InstanceKlass::purge_previous_version_list() { // Unlink from previous version list. assert(pv_node->class_loader_data() == loader_data, "wrong loader_data"); InstanceKlass* next = pv_node->previous_versions(); - pv_node->link_previous_versions(NULL); // point next to NULL + pv_node->link_previous_versions(nullptr); // point next to null last->link_previous_versions(next); // Delete this node directly. Nothing is referring to it and we don't // want it to increase the counter for metadata to delete in CLDG. @@ -3977,7 +3977,7 @@ void InstanceKlass::purge_previous_version_list() { continue; } else { log_trace(redefine, class, iklass, purge)("previous version " PTR_FORMAT " is alive", p2i(pv_node)); - assert(pvcp->pool_holder() != NULL, "Constant pool with no holder"); + assert(pvcp->pool_holder() != nullptr, "Constant pool with no holder"); guarantee (!loader_data->is_unloading(), "unloaded classes can't be on the stack"); live_count++; // found a previous version for next time we do class unloading @@ -3998,7 +3998,7 @@ void InstanceKlass::mark_newly_obsolete_methods(Array* old_methods, int obsolete_method_count = old_methods->length() - emcp_method_count; if (emcp_method_count != 0 && obsolete_method_count != 0 && - _previous_versions != NULL) { + _previous_versions != nullptr) { // We have a mix of obsolete and EMCP methods so we have to // clear out any matching EMCP method entries the hard way. int local_count = 0; @@ -4012,7 +4012,7 @@ void InstanceKlass::mark_newly_obsolete_methods(Array* old_methods, // previous versions are linked together through the InstanceKlass int j = 0; for (InstanceKlass* prev_version = _previous_versions; - prev_version != NULL; + prev_version != nullptr; prev_version = prev_version->previous_versions(), j++) { Array* method_refs = prev_version->methods(); @@ -4085,7 +4085,7 @@ void InstanceKlass::add_previous_version(InstanceKlass* scratch_class, // Set has_previous_version flag for processing during class unloading. _has_previous_versions = true; log_trace(redefine, class, iklass, add) ("scratch class added; one of its methods is on_stack."); - assert(scratch_class->previous_versions() == NULL, "shouldn't have a previous version"); + assert(scratch_class->previous_versions() == nullptr, "shouldn't have a previous version"); scratch_class->link_previous_versions(previous_versions()); link_previous_versions(scratch_class); } // end add_previous_version() @@ -4093,11 +4093,11 @@ void InstanceKlass::add_previous_version(InstanceKlass* scratch_class, #endif // INCLUDE_JVMTI Method* InstanceKlass::method_with_idnum(int idnum) { - Method* m = NULL; + Method* m = nullptr; if (idnum < methods()->length()) { m = methods()->at(idnum); } - if (m == NULL || m->method_idnum() != idnum) { + if (m == nullptr || m->method_idnum() != idnum) { for (int index = 0; index < methods()->length(); ++index) { m = methods()->at(index); if (m->method_idnum() == idnum) { @@ -4105,7 +4105,7 @@ Method* InstanceKlass::method_with_idnum(int idnum) { } } // None found, return null for the caller to handle. - return NULL; + return nullptr; } return m; } @@ -4113,10 +4113,10 @@ Method* InstanceKlass::method_with_idnum(int idnum) { Method* InstanceKlass::method_with_orig_idnum(int idnum) { if (idnum >= methods()->length()) { - return NULL; + return nullptr; } Method* m = methods()->at(idnum); - if (m != NULL && m->orig_method_idnum() == idnum) { + if (m != nullptr && m->orig_method_idnum() == idnum) { return m; } // Obsolete method idnum does not match the original idnum @@ -4127,14 +4127,14 @@ Method* InstanceKlass::method_with_orig_idnum(int idnum) { } } // None found, return null for the caller to handle. - return NULL; + return nullptr; } Method* InstanceKlass::method_with_orig_idnum(int idnum, int version) { InstanceKlass* holder = get_klass_version(version); - if (holder == NULL) { - return NULL; // The version of klass is gone, no method is found + if (holder == nullptr) { + return nullptr; // The version of klass is gone, no method is found } Method* method = holder->method_with_orig_idnum(idnum); return method; @@ -4157,18 +4157,18 @@ unsigned char * InstanceKlass::get_cached_class_file_bytes() { // Make a step iterating over the class hierarchy under the root class. // Skips subclasses if requested. void ClassHierarchyIterator::next() { - assert(_current != NULL, "required"); - if (_visit_subclasses && _current->subklass() != NULL) { + assert(_current != nullptr, "required"); + if (_visit_subclasses && _current->subklass() != nullptr) { _current = _current->subklass(); return; // visit next subclass } _visit_subclasses = true; // reset - while (_current->next_sibling() == NULL && _current != _root) { + while (_current->next_sibling() == nullptr && _current != _root) { _current = _current->superklass(); // backtrack; no more sibling subclasses left } if (_current == _root) { // Iteration is over (back at root after backtracking). Invalidate the iterator. - _current = NULL; + _current = nullptr; return; } _current = _current->next_sibling(); diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index d2531de8378..74917f02495 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -78,12 +78,12 @@ public: }; // Print fields. -// If "obj" argument to constructor is NULL, prints static fields, otherwise prints non-static fields. +// If "obj" argument to constructor is null, prints static fields, otherwise prints non-static fields. class FieldPrinter: public FieldClosure { oop _obj; outputStream* _st; public: - FieldPrinter(outputStream* st, oop obj = NULL) : _obj(obj), _st(st) {} + FieldPrinter(outputStream* st, oop obj = nullptr) : _obj(obj), _st(st) {} void do_field(fieldDescriptor* fd); }; @@ -201,9 +201,9 @@ class InstanceKlass: public Klass { // The contents of the Record attribute. Array* _record_components; - // the source debug extension for this klass, NULL if not specified. + // the source debug extension for this klass, null if not specified. // Specified as UTF-8 string without terminating zero byte in the classfile, - // it is stored in the instanceklass as a NULL-terminated UTF-8 string + // it is stored in the instanceklass as a null-terminated UTF-8 string const char* _source_debug_extension; // Number of heapOopSize words used by non-static fields in this klass @@ -238,7 +238,7 @@ class InstanceKlass: public Klass { OopMapCache* volatile _oop_map_cache; // OopMapCache for all methods in the klass (allocated lazily) JNIid* _jni_ids; // First JNI identifier for static fields in this class - jmethodID* volatile _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or NULL if none + jmethodID* volatile _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or null if none nmethodBucket* volatile _dep_context; // packed DependencyContext structure uint64_t volatile _dep_context_last_cleaned; nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class @@ -294,7 +294,7 @@ class InstanceKlass: public Klass { // The embedded implementor only exists if the current klass is an // interface. The possible values of the implementor fall into following // three cases: - // NULL: no implementor. + // null: no implementor. // A Klass* that's not itself: one implementor. // Itself: more than one implementors. // @@ -382,12 +382,12 @@ class InstanceKlass: public Klass { // interfaces Array* local_interfaces() const { return _local_interfaces; } void set_local_interfaces(Array* a) { - guarantee(_local_interfaces == NULL || a == NULL, "Just checking"); + guarantee(_local_interfaces == nullptr || a == nullptr, "Just checking"); _local_interfaces = a; } Array* transitive_interfaces() const { return _transitive_interfaces; } void set_transitive_interfaces(Array* a) { - guarantee(_transitive_interfaces == NULL || a == NULL, "Just checking"); + guarantee(_transitive_interfaces == nullptr || a == nullptr, "Just checking"); _transitive_interfaces = a; } @@ -406,7 +406,7 @@ class InstanceKlass: public Klass { Array* fields() const { return _fields; } void set_fields(Array* f, u2 java_fields_count) { - guarantee(_fields == NULL || f == NULL, "Just checking"); + guarantee(_fields == nullptr || f == nullptr, "Just checking"); _fields = f; _java_fields_count = java_fields_count; } @@ -444,15 +444,15 @@ private: public: // Call this only if you know that the nest host has been initialized. InstanceKlass* nest_host_not_null() { - assert(_nest_host != NULL, "must be"); + assert(_nest_host != nullptr, "must be"); return _nest_host; } // Used to construct informative IllegalAccessError messages at a higher level, // if there was an issue resolving or validating the nest host. - // Returns NULL if there was no error. + // Returns null if there was no error. const char* nest_host_error(); // Returns nest-host class, resolving and validating it if needed. - // Returns NULL if resolution is not possible from the calling context. + // Returns null if resolution is not possible from the calling context. InstanceKlass* nest_host(TRAPS); // Check if this klass is a nestmate of k - resolves this nest-host and k's bool has_nestmate_access_to(InstanceKlass* k, TRAPS); @@ -478,7 +478,7 @@ public: // package PackageEntry* package() const { return _package_entry; } ModuleEntry* module() const; - bool in_unnamed_package() const { return (_package_entry == NULL); } + bool in_unnamed_package() const { return (_package_entry == nullptr); } void set_package(ClassLoaderData* loader_data, PackageEntry* pkg_entry, TRAPS); // If the package for the InstanceKlass is in the boot loader's package entry // table then sets the classpath_index field so that @@ -583,7 +583,7 @@ public: _disable_method_binary_search = true; } - // find a local method (returns NULL if not found) + // find a local method (returns null if not found) Method* find_method(const Symbol* name, const Symbol* signature) const; static Method* find_method(const Array* methods, const Symbol* name, @@ -597,14 +597,14 @@ public: const Symbol* signature, PrivateLookupMode private_mode); - // find a local method (returns NULL if not found) + // find a local method (returns null if not found) Method* find_local_method(const Symbol* name, const Symbol* signature, OverpassLookupMode overpass_mode, StaticLookupMode static_mode, PrivateLookupMode private_mode) const; - // find a local method from given methods array (returns NULL if not found) + // find a local method from given methods array (returns null if not found) static Method* find_local_method(const Array* methods, const Symbol* name, const Symbol* signature, @@ -620,18 +620,18 @@ public: StaticLookupMode static_mode, PrivateLookupMode private_mode); - // lookup operation (returns NULL if not found) + // lookup operation (returns null if not found) Method* uncached_lookup_method(const Symbol* name, const Symbol* signature, OverpassLookupMode overpass_mode, PrivateLookupMode private_mode = PrivateLookupMode::find) const; // lookup a method in all the interfaces that this class implements - // (returns NULL if not found) + // (returns null if not found) Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature, DefaultsLookupMode defaults_mode) const; // lookup a method in local defaults then in all interfaces - // (returns NULL if not found) + // (returns null if not found) Method* lookup_method_in_ordered_interfaces(Symbol* name, Symbol* signature) const; // Find method indices by name. If a method with the specified name is @@ -706,16 +706,16 @@ public: InstanceKlass* previous_versions() const { return _previous_versions; } #else - InstanceKlass* previous_versions() const { return NULL; } + InstanceKlass* previous_versions() const { return nullptr; } #endif InstanceKlass* get_klass_version(int version) { - for (InstanceKlass* ik = this; ik != NULL; ik = ik->previous_versions()) { + for (InstanceKlass* ik = this; ik != nullptr; ik = ik->previous_versions()) { if (ik->constants()->version() == version) { return ik; } } - return NULL; + return nullptr; } bool has_been_redefined() const { return _misc_flags.has_been_redefined(); } @@ -736,7 +736,7 @@ public: #if INCLUDE_JVMTI void init_previous_versions() { - _previous_versions = NULL; + _previous_versions = nullptr; } private: @@ -772,9 +772,9 @@ public: static bool has_previous_versions_and_reset() { return false; } void set_cached_class_file(JvmtiCachedClassFileData *data) { - assert(data == NULL, "unexpected call with JVMTI disabled"); + assert(data == nullptr, "unexpected call with JVMTI disabled"); } - JvmtiCachedClassFileData * get_cached_class_file() { return (JvmtiCachedClassFileData *)NULL; } + JvmtiCachedClassFileData * get_cached_class_file() { return (JvmtiCachedClassFileData *)nullptr; } #endif // INCLUDE_JVMTI @@ -819,16 +819,16 @@ public: void set_annotations(Annotations* anno) { _annotations = anno; } AnnotationArray* class_annotations() const { - return (_annotations != NULL) ? _annotations->class_annotations() : NULL; + return (_annotations != nullptr) ? _annotations->class_annotations() : nullptr; } Array* fields_annotations() const { - return (_annotations != NULL) ? _annotations->fields_annotations() : NULL; + return (_annotations != nullptr) ? _annotations->fields_annotations() : nullptr; } AnnotationArray* class_type_annotations() const { - return (_annotations != NULL) ? _annotations->class_type_annotations() : NULL; + return (_annotations != nullptr) ? _annotations->class_type_annotations() : nullptr; } Array* fields_type_annotations() const { - return (_annotations != NULL) ? _annotations->fields_type_annotations() : NULL; + return (_annotations != nullptr) ? _annotations->fields_type_annotations() : nullptr; } // allocation instanceOop allocate_instance(TRAPS); @@ -925,13 +925,13 @@ public: } static const InstanceKlass* cast(const Klass* k) { - assert(k != NULL, "k should not be null"); + assert(k != nullptr, "k should not be null"); assert(k->is_instance_klass(), "cast to InstanceKlass"); return static_cast(k); } virtual InstanceKlass* java_super() const { - return (super() == NULL) ? NULL : cast(super()); + return (super() == nullptr) ? nullptr : cast(super()); } // Sizing (in words) @@ -1085,7 +1085,7 @@ public: // The RedefineClasses() API can cause new method idnums to be needed // which will cause the caches to grow. Safety requires different // cache management logic if the caches can grow instead of just - // going from NULL to non-NULL. + // going from null to non-null. bool idnum_can_increment() const { return has_been_redefined(); } inline jmethodID* methods_jmethod_ids_acquire() const; inline void release_set_methods_jmethod_ids(jmethodID* jmeths); @@ -1113,7 +1113,7 @@ private: void add_initialization_error(JavaThread* current, Handle exception); oop get_initialization_error(JavaThread* current); - // find a local method (returns NULL if not found) + // find a local method (returns null if not found) Method* find_method_impl(const Symbol* name, const Symbol* signature, OverpassLookupMode overpass_mode, @@ -1246,7 +1246,7 @@ class InnerClassesIterator : public StackObj { InnerClassesIterator(const InstanceKlass* k) { _inner_classes = k->inner_classes(); - if (k->inner_classes() != NULL) { + if (k->inner_classes() != nullptr) { _length = _inner_classes->length(); // The inner class array's length should be the multiple of // inner_class_next_offset if it only contains the InnerClasses @@ -1332,7 +1332,7 @@ class ClassHierarchyIterator : public StackObj { } bool done() { - return (_current == NULL); + return (_current == nullptr); } // Make a step iterating over the class hierarchy under the root class. diff --git a/src/hotspot/share/oops/instanceKlass.inline.hpp b/src/hotspot/share/oops/instanceKlass.inline.hpp index 6af358aeca7..b7a31400333 100644 --- a/src/hotspot/share/oops/instanceKlass.inline.hpp +++ b/src/hotspot/share/oops/instanceKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ inline InstanceKlass* volatile* InstanceKlass::adr_implementor() const { if (is_interface()) { return (InstanceKlass* volatile*)end_of_nonstatic_oop_maps(); } else { - return NULL; + return nullptr; } } @@ -187,9 +187,9 @@ ALWAYSINLINE void InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType inline instanceOop InstanceKlass::allocate_instance(oop java_class, TRAPS) { Klass* k = java_lang_Class::as_Klass(java_class); - if (k == NULL) { + if (k == nullptr) { ResourceMark rm(THREAD); - THROW_(vmSymbols::java_lang_InstantiationException(), NULL); + THROW_(vmSymbols::java_lang_InstantiationException(), nullptr); } InstanceKlass* ik = cast(k); ik->check_valid_for_instantiation(false, CHECK_NULL); diff --git a/src/hotspot/share/oops/instanceMirrorKlass.cpp b/src/hotspot/share/oops/instanceMirrorKlass.cpp index 058e28b469a..2cb0b953d14 100644 --- a/src/hotspot/share/oops/instanceMirrorKlass.cpp +++ b/src/hotspot/share/oops/instanceMirrorKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ int InstanceMirrorKlass::_offset_of_static_fields = 0; size_t InstanceMirrorKlass::instance_size(Klass* k) { - if (k != NULL && k->is_instance_klass()) { + if (k != nullptr && k->is_instance_klass()) { return align_object_size(size_helper() + InstanceKlass::cast(k)->static_field_size()); } return size_helper(); @@ -61,7 +61,7 @@ size_t InstanceMirrorKlass::oop_size(oop obj) const { int InstanceMirrorKlass::compute_static_oop_field_count(oop obj) { Klass* k = java_lang_Class::as_Klass(obj); - if (k != NULL && k->is_instance_klass()) { + if (k != nullptr && k->is_instance_klass()) { return InstanceKlass::cast(k)->static_oop_field_count(); } return 0; diff --git a/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp b/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp index 6cc53f8c302..1d5113f5a8e 100644 --- a/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp +++ b/src/hotspot/share/oops/instanceMirrorKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,9 +52,9 @@ void InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { if (Devirtualizer::do_metadata(closure)) { Klass* klass = java_lang_Class::as_Klass(obj); - // We'll get NULL for primitive mirrors. - if (klass != NULL) { - if (klass->class_loader_data() == NULL) { + // We'll get null for primitive mirrors. + if (klass != nullptr) { + if (klass->class_loader_data() == nullptr) { // This is a mirror that belongs to a shared class that has not be loaded yet. // It's only reachable via HeapShared::roots(). All of its fields should be zero // so there's no need to scan. @@ -71,7 +71,7 @@ void InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { Devirtualizer::do_klass(closure, klass); } } else { - // We would like to assert here (as below) that if klass has been NULL, then + // We would like to assert here (as below) that if klass has been null, then // this has been a mirror for a primitive type that we do not need to follow // as they are always strong roots. // However, we might get across a klass that just changed during CMS concurrent @@ -125,8 +125,8 @@ void InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closu if (Devirtualizer::do_metadata(closure)) { if (mr.contains(obj)) { Klass* klass = java_lang_Class::as_Klass(obj); - // We'll get NULL for primitive mirrors. - if (klass != NULL) { + // We'll get null for primitive mirrors. + if (klass != nullptr) { Devirtualizer::do_klass(closure, klass); } } diff --git a/src/hotspot/share/oops/instanceRefKlass.cpp b/src/hotspot/share/oops/instanceRefKlass.cpp index d14bbebfb30..3af465d718a 100644 --- a/src/hotspot/share/oops/instanceRefKlass.cpp +++ b/src/hotspot/share/oops/instanceRefKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -112,12 +112,12 @@ void InstanceRefKlass::oop_verify_on(oop obj, outputStream* st) { InstanceKlass::oop_verify_on(obj, st); // Verify referent field oop referent = java_lang_ref_Reference::unknown_referent_no_keepalive(obj); - if (referent != NULL) { + if (referent != nullptr) { guarantee(oopDesc::is_oop(referent), "referent field heap failed"); } // Additional verification for next field, which must be a Reference or null oop next = java_lang_ref_Reference::next(obj); - if (next != NULL) { + if (next != nullptr) { guarantee(oopDesc::is_oop(next), "next field should be an oop"); guarantee(next->is_instanceRef(), "next field verify failed"); } diff --git a/src/hotspot/share/oops/instanceRefKlass.inline.hpp b/src/hotspot/share/oops/instanceRefKlass.inline.hpp index 978234cd0a1..af20b0fe14a 100644 --- a/src/hotspot/share/oops/instanceRefKlass.inline.hpp +++ b/src/hotspot/share/oops/instanceRefKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,9 +66,9 @@ static inline oop load_referent(oop obj, ReferenceType type) { template bool InstanceRefKlass::try_discover(oop obj, ReferenceType type, OopClosureType* closure) { ReferenceDiscoverer* rd = closure->ref_discoverer(); - if (rd != NULL) { + if (rd != nullptr) { oop referent = load_referent(obj, type); - if (referent != NULL) { + if (referent != nullptr) { if (!referent->is_gc_marked()) { // Only try to discover if not yet marked. return rd->discover_reference(obj, type); @@ -100,14 +100,14 @@ void InstanceRefKlass::oop_oop_iterate_discovered_and_discovery(oop obj, Referen template void InstanceRefKlass::oop_oop_iterate_fields(oop obj, OopClosureType* closure, Contains& contains) { - assert(closure->ref_discoverer() == NULL, "ReferenceDiscoverer should not be set"); + assert(closure->ref_discoverer() == nullptr, "ReferenceDiscoverer should not be set"); do_referent(obj, closure, contains); do_discovered(obj, closure, contains); } template void InstanceRefKlass::oop_oop_iterate_fields_except_referent(oop obj, OopClosureType* closure, Contains& contains) { - assert(closure->ref_discoverer() == NULL, "ReferenceDiscoverer should not be set"); + assert(closure->ref_discoverer() == nullptr, "ReferenceDiscoverer should not be set"); do_discovered(obj, closure, contains); } diff --git a/src/hotspot/share/oops/instanceStackChunkKlass.cpp b/src/hotspot/share/oops/instanceStackChunkKlass.cpp index 37512d332b4..62c9c89767e 100644 --- a/src/hotspot/share/oops/instanceStackChunkKlass.cpp +++ b/src/hotspot/share/oops/instanceStackChunkKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -235,7 +235,7 @@ public: void InstanceStackChunkKlass::print_chunk(const stackChunkOop c, bool verbose, outputStream* st) { if (c == nullptr) { - st->print_cr("CHUNK NULL"); + st->print_cr("CHUNK null"); return; } diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp index 0d936759f59..d4855886c48 100644 --- a/src/hotspot/share/oops/klass.cpp +++ b/src/hotspot/share/oops/klass.cpp @@ -83,7 +83,7 @@ void Klass::set_is_cloneable() { void Klass::set_name(Symbol* n) { _name = n; - if (_name != NULL) _name->increment_refcount(); + if (_name != nullptr) _name->increment_refcount(); if (Arguments::is_dumping_archive() && is_instance_klass()) { SystemDictionaryShared::init_dumptime_info(InstanceKlass::cast(this)); @@ -96,7 +96,7 @@ bool Klass::is_subclass_of(const Klass* k) const { Klass* t = const_cast(this)->super(); - while (t != NULL) { + while (t != nullptr) { if (t == k) return true; t = t->super(); } @@ -104,7 +104,7 @@ bool Klass::is_subclass_of(const Klass* k) const { } void Klass::release_C_heap_structures(bool release_constant_pool) { - if (_name != NULL) _name->decrement_refcount(); + if (_name != nullptr) _name->decrement_refcount(); } bool Klass::search_secondary_supers(Klass* k) const { @@ -131,7 +131,7 @@ Klass *Klass::up_cast_abstract() { Klass *r = this; while( r->is_abstract() ) { // Receiver is abstract? Klass *s = r->subklass(); // Check for exactly 1 subklass - if (s == NULL || s->next_sibling() != NULL) // Oops; wrong count; give up + if (s == nullptr || s->next_sibling() != nullptr) // Oops; wrong count; give up return this; // Return 'this' as a no-progress flag r = s; // Loop till find concrete class } @@ -159,7 +159,7 @@ void Klass::check_valid_for_instantiation(bool throwError, TRAPS) { void Klass::copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS) { ResourceMark rm(THREAD); - assert(s != NULL, "Throw NPE!"); + assert(s != nullptr, "Throw NPE!"); THROW_MSG(vmSymbols::java_lang_ArrayStoreException(), err_msg("arraycopy: source type %s is not an array", s->klass()->external_name())); } @@ -176,7 +176,7 @@ Klass* Klass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { " wrap return value in a mirror object."); #endif ShouldNotReachHere(); - return NULL; + return nullptr; } Method* Klass::uncached_lookup_method(const Symbol* name, const Symbol* signature, @@ -188,7 +188,7 @@ Method* Klass::uncached_lookup_method(const Symbol* name, const Symbol* signatur " wrap return value in a mirror object."); #endif ShouldNotReachHere(); - return NULL; + return nullptr; } void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() { @@ -228,7 +228,7 @@ jint Klass::array_layout_helper(BasicType etype) { } bool Klass::can_be_primary_super_slow() const { - if (super() == NULL) + if (super() == nullptr) return true; else if (super()->super_depth() >= primary_super_limit()-1) return false; @@ -237,12 +237,12 @@ bool Klass::can_be_primary_super_slow() const { } void Klass::initialize_supers(Klass* k, Array* transitive_interfaces, TRAPS) { - if (k == NULL) { - set_super(NULL); + if (k == nullptr) { + set_super(nullptr); _primary_supers[0] = this; assert(super_depth() == 0, "Object must already be initialized properly"); } else if (k != super() || k == vmClasses::Object_klass()) { - assert(super() == NULL || super() == vmClasses::Object_klass(), + assert(super() == nullptr || super() == vmClasses::Object_klass(), "initialize this only once to a non-trivial value"); set_super(k); Klass* sup = k; @@ -273,9 +273,9 @@ void Klass::initialize_supers(Klass* k, Array* transitive_interf j = t->super_depth(); } for (juint j1 = j+1; j1 < primary_super_limit(); j1++) { - assert(primary_super_of_depth(j1) == NULL, "super list padding"); + assert(primary_super_of_depth(j1) == nullptr, "super list padding"); } - while (t != NULL) { + while (t != nullptr) { assert(primary_super_of_depth(j) == t, "super list initialization"); t = t->super(); --j; @@ -285,14 +285,14 @@ void Klass::initialize_supers(Klass* k, Array* transitive_interf #endif } - if (secondary_supers() == NULL) { + if (secondary_supers() == nullptr) { // Now compute the list of secondary supertypes. // Secondaries can occasionally be on the super chain, // if the inline "_primary_supers" array overflows. int extras = 0; Klass* p; - for (p = super(); !(p == NULL || p->can_be_primary_super()); p = p->super()) { + for (p = super(); !(p == nullptr || p->can_be_primary_super()); p = p->super()) { ++extras; } @@ -300,14 +300,14 @@ void Klass::initialize_supers(Klass* k, Array* transitive_interf // Compute the "real" non-extra secondaries. GrowableArray* secondaries = compute_secondary_supers(extras, transitive_interfaces); - if (secondaries == NULL) { + if (secondaries == nullptr) { // secondary_supers set by compute_secondary_supers return; } GrowableArray* primaries = new GrowableArray(extras); - for (p = super(); !(p == NULL || p->can_be_primary_super()); p = p->super()) { + for (p = super(); !(p == nullptr || p->can_be_primary_super()); p = p->super()) { int i; // Scan for overflow primaries being duplicates of 2nd'arys // This happens frequently for very deeply nested arrays: the @@ -339,9 +339,9 @@ void Klass::initialize_supers(Klass* k, Array* transitive_interf } #ifdef ASSERT - // We must not copy any NULL placeholders left over from bootstrap. + // We must not copy any null placeholders left over from bootstrap. for (int j = 0; j < s2->length(); j++) { - assert(s2->at(j) != NULL, "correct bootstrapping order"); + assert(s2->at(j) != nullptr, "correct bootstrapping order"); } #endif @@ -352,16 +352,16 @@ void Klass::initialize_supers(Klass* k, Array* transitive_interf GrowableArray* Klass::compute_secondary_supers(int num_extra_slots, Array* transitive_interfaces) { assert(num_extra_slots == 0, "override for complex klasses"); - assert(transitive_interfaces == NULL, "sanity"); + assert(transitive_interfaces == nullptr, "sanity"); set_secondary_supers(Universe::the_empty_klass_array()); - return NULL; + return nullptr; } // superklass links InstanceKlass* Klass::superklass() const { - assert(super() == NULL || super()->is_instance_klass(), "must be instance klass"); - return _super == NULL ? NULL : InstanceKlass::cast(_super); + assert(super() == nullptr || super()->is_instance_klass(), "must be instance klass"); + return _super == nullptr ? nullptr : InstanceKlass::cast(_super); } // subklass links. Used by the compiler (and vtable initialization) @@ -371,7 +371,7 @@ Klass* Klass::subklass(bool log) const { // Need load_acquire on the _subklass, because it races with inserts that // publishes freshly initialized data. for (Klass* chain = Atomic::load_acquire(&_subklass); - chain != NULL; + chain != nullptr; // Do not need load_acquire on _next_sibling, because inserts never // create _next_sibling edges to dead data. chain = Atomic::load(&chain->_next_sibling)) @@ -385,14 +385,14 @@ Klass* Klass::subklass(bool log) const { } } } - return NULL; + return nullptr; } Klass* Klass::next_sibling(bool log) const { // Do not need load_acquire on _next_sibling, because inserts never // create _next_sibling edges to dead data. for (Klass* chain = Atomic::load(&_next_sibling); - chain != NULL; + chain != nullptr; chain = Atomic::load(&chain->_next_sibling)) { // Only return alive klass, there may be stale klass // in this chain if cleaned concurrently. @@ -405,7 +405,7 @@ Klass* Klass::next_sibling(bool log) const { } } } - return NULL; + return nullptr; } void Klass::set_subklass(Klass* s) { @@ -428,9 +428,9 @@ void Klass::append_to_sibling_list() { debug_only(verify();) // add ourselves to superklass' subklass list InstanceKlass* super = superklass(); - if (super == NULL) return; // special case: class Object + if (super == nullptr) return; // special case: class Object assert((!super->is_interface() // interfaces cannot be supers - && (super->superklass() == NULL || !is_interface())), + && (super->superklass() == nullptr || !is_interface())), "an interface can only be a subklass of Object"); // Make sure there is no stale subklass head @@ -438,7 +438,7 @@ void Klass::append_to_sibling_list() { for (;;) { Klass* prev_first_subklass = Atomic::load_acquire(&_super->_subklass); - if (prev_first_subklass != NULL) { + if (prev_first_subklass != nullptr) { // set our sibling to be the superklass' previous first subklass assert(prev_first_subklass->is_loader_alive(), "May not attach not alive klasses"); set_next_sibling(prev_first_subklass); @@ -457,7 +457,7 @@ void Klass::clean_subklass() { for (;;) { // Need load_acquire, due to contending with concurrent inserts Klass* subklass = Atomic::load_acquire(&_subklass); - if (subklass == NULL || subklass->is_loader_alive()) { + if (subklass == nullptr || subklass->is_loader_alive()) { return; } // Try to fix _subklass until it points at something not dead. @@ -482,14 +482,14 @@ void Klass::clean_weak_klass_links(bool unloading_occurred, bool clean_alive_kla // Find and set the first alive subklass Klass* sub = current->subklass(true); current->clean_subklass(); - if (sub != NULL) { + if (sub != nullptr) { stack.push(sub); } // Find and set the first alive sibling Klass* sibling = current->next_sibling(true); current->set_next_sibling(sibling); - if (sibling != NULL) { + if (sibling != nullptr) { stack.push(sibling); } @@ -500,7 +500,7 @@ void Klass::clean_weak_klass_links(bool unloading_occurred, bool clean_alive_kla // JVMTI RedefineClasses creates previous versions that are not in // the class hierarchy, so process them here. - while ((ik = ik->previous_versions()) != NULL) { + while ((ik = ik->previous_versions()) != nullptr) { ik->clean_weak_instanceklass_links(); } } @@ -522,7 +522,7 @@ void Klass::metaspace_pointers_do(MetaspaceClosure* it) { it->push(&_super); if (!Arguments::is_dumping_archive()) { // If dumping archive, these may point to excluded classes. There's no need - // to follow these pointers anyway, as they will be set to NULL in + // to follow these pointers anyway, as they will be set to null in // remove_unshareable_info(). it->push((Klass**)&_subklass); it->push((Klass**)&_next_sibling); @@ -545,12 +545,12 @@ void Klass::remove_unshareable_info() { log_trace(cds, unshareable)("remove: %s", external_name()); } - set_subklass(NULL); - set_next_sibling(NULL); - set_next_link(NULL); + set_subklass(nullptr); + set_next_sibling(nullptr); + set_next_link(nullptr); // Null out class_loader_data because we don't share that yet. - set_class_loader_data(NULL); + set_class_loader_data(nullptr); set_is_shared(); } @@ -576,7 +576,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec // If an exception happened during CDS restore, some of these fields may already be // set. We leave the class on the CLD list, even if incomplete so that we don't // modify the CLD list outside a safepoint. - if (class_loader_data() == NULL) { + if (class_loader_data() == nullptr) { set_class_loader_data(loader_data); // Add to class loader list first before creating the mirror @@ -585,7 +585,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec } Handle loader(THREAD, loader_data->class_loader()); - ModuleEntry* module_entry = NULL; + ModuleEntry* module_entry = nullptr; Klass* k = this; if (k->is_objArray_klass()) { k = ObjArrayKlass::cast(k)->bottom_klass(); @@ -598,7 +598,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec module_entry = ModuleEntryTable::javabase_moduleEntry(); } // Obtain java.lang.Module, if available - Handle module_handle(THREAD, ((module_entry != NULL) ? module_entry->module() : (oop)NULL)); + Handle module_handle(THREAD, ((module_entry != nullptr) ? module_entry->module() : (oop)nullptr)); if (this->has_archived_mirror_index()) { ResourceMark rm(THREAD); @@ -620,7 +620,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec // Only recreate it if not present. A previous attempt to restore may have // gotten an OOM later but keep the mirror if it was created. - if (java_mirror() == NULL) { + if (java_mirror() == nullptr) { ResourceMark rm(THREAD); log_trace(cds, mirror)("Recreate mirror for %s", external_name()); java_lang_Class::create_mirror(this, loader, module_handle, protection_domain, Handle(), CHECK); @@ -689,12 +689,12 @@ const char* Klass::external_name() const { char* result = convert_hidden_name_to_java(name()); return result; } - if (name() == NULL) return ""; + if (name() == nullptr) return ""; return name()->as_klass_external_name(); } const char* Klass::signature_name() const { - if (name() == NULL) return ""; + if (name() == nullptr) return ""; if (is_objArray_klass() && ObjArrayKlass::cast(this)->bottom_klass()->is_hidden()) { size_t name_len = name()->utf8_length(); char* result = NEW_RESOURCE_ARRAY(char, name_len + 1); @@ -769,21 +769,21 @@ void Klass::verify_on(outputStream* st) { guarantee(this->is_klass(),"should be klass"); - if (super() != NULL) { + if (super() != nullptr) { guarantee(super()->is_klass(), "should be klass"); } - if (secondary_super_cache() != NULL) { + if (secondary_super_cache() != nullptr) { Klass* ko = secondary_super_cache(); guarantee(ko->is_klass(), "should be klass"); } for ( uint i = 0; i < primary_super_limit(); i++ ) { Klass* ko = _primary_supers[i]; - if (ko != NULL) { + if (ko != nullptr) { guarantee(ko->is_klass(), "should be klass"); } } - if (java_mirror_no_keepalive() != NULL) { + if (java_mirror_no_keepalive() != nullptr) { guarantee(java_lang_Class::is_instance(java_mirror_no_keepalive()), "should be instance"); } } @@ -846,7 +846,7 @@ const char* Klass::joint_in_module_of_loader(const Klass* class2, bool include_p char* joint_description = NEW_RESOURCE_ARRAY_RETURN_NULL(char, len); // Just return the FQN if error when allocating string - if (joint_description == NULL) { + if (joint_description == nullptr) { return class1_name; } @@ -905,7 +905,7 @@ const char* Klass::class_in_module_of_loader(bool use_are, bool include_parent_l // 3. class loader's name_and_id ClassLoaderData* cld = class_loader_data(); - assert(cld != NULL, "class_loader_data should not be null"); + assert(cld != nullptr, "class_loader_data should not be null"); const char* loader_name_and_id = cld->loader_name_and_id(); len += strlen(loader_name_and_id); @@ -919,9 +919,9 @@ const char* Klass::class_in_module_of_loader(bool use_are, bool include_parent_l // The parent loader's ClassLoaderData could be null if it is // a delegating class loader that has never defined a class. // In this case the loader's name must be obtained via the parent loader's oop. - if (parent_cld == NULL) { + if (parent_cld == nullptr) { oop cl_name_and_id = java_lang_ClassLoader::nameAndId(parent_loader); - if (cl_name_and_id != NULL) { + if (cl_name_and_id != nullptr) { parent_loader_name_and_id = java_lang_String::as_utf8_string(cl_name_and_id); } } else { @@ -938,7 +938,7 @@ const char* Klass::class_in_module_of_loader(bool use_are, bool include_parent_l char* class_description = NEW_RESOURCE_ARRAY_RETURN_NULL(char, len); // Just return the FQN if error when allocating string - if (class_description == NULL) { + if (class_description == nullptr) { return klass_name; } diff --git a/src/hotspot/share/oops/klass.hpp b/src/hotspot/share/oops/klass.hpp index b2bd5866647..acb9a41730f 100644 --- a/src/hotspot/share/oops/klass.hpp +++ b/src/hotspot/share/oops/klass.hpp @@ -145,9 +145,9 @@ class Klass : public Metadata { OopHandle _java_mirror; // Superclass Klass* _super; - // First subclass (NULL if none); _subklass->next_sibling() is next one + // First subclass (null if none); _subklass->next_sibling() is next one Klass* volatile _subklass; - // Sibling link (or NULL); links all subklasses of a klass + // Sibling link (or null); links all subklasses of a klass Klass* volatile _next_sibling; // All klasses loaded by a class loader are chained through these links @@ -220,7 +220,7 @@ protected: Array* transitive_interfaces); // java_super is the Java-level super type as specified by Class.getSuperClass. - virtual InstanceKlass* java_super() const { return NULL; } + virtual InstanceKlass* java_super() const { return nullptr; } juint super_check_offset() const { return _super_check_offset; } void set_super_check_offset(juint o) { _super_check_offset = o; } @@ -232,11 +232,11 @@ protected: void set_secondary_supers(Array* k) { _secondary_supers = k; } // Return the element of the _super chain of the given depth. - // If there is no such element, return either NULL or this. + // If there is no such element, return either null or this. Klass* primary_super_of_depth(juint i) const { assert(i < primary_super_limit(), "oob"); Klass* super = _primary_supers[i]; - assert(super == NULL || super->super_depth() == i, "correct display"); + assert(super == nullptr || super->super_depth() == i, "correct display"); return super; } @@ -265,14 +265,14 @@ protected: oop java_mirror_no_keepalive() const; void set_java_mirror(Handle m); - oop archived_java_mirror() NOT_CDS_JAVA_HEAP_RETURN_(NULL); + oop archived_java_mirror() NOT_CDS_JAVA_HEAP_RETURN_(nullptr); void set_archived_java_mirror(int mirror_index) NOT_CDS_JAVA_HEAP_RETURN; // Temporary mirror switch used by RedefineClasses OopHandle java_mirror_handle() const { return _java_mirror; } void swap_java_mirror_handle(OopHandle& mirror) { _java_mirror.swap(mirror); } - // Set java mirror OopHandle to NULL for CDS + // Set java mirror OopHandle to null for CDS // This leaves the OopHandle in the CLD, but that's ok, you can't release them. void clear_java_mirror_handle() { _java_mirror = OopHandle(); } @@ -528,7 +528,7 @@ protected: // array class with this klass as element type virtual Klass* array_klass(TRAPS) = 0; - // These will return NULL instead of allocating on the heap: + // These will return null instead of allocating on the heap: virtual Klass* array_klass_or_null(int rank) = 0; virtual Klass* array_klass_or_null() = 0; @@ -567,7 +567,7 @@ protected: if (has_archived_mirror_index()) { // _java_mirror is not a valid OopHandle but rather an encoded reference in the shared heap return false; - } else if (_java_mirror.ptr_raw() == NULL) { + } else if (_java_mirror.ptr_raw() == nullptr) { return false; } else { return true; diff --git a/src/hotspot/share/oops/klassVtable.cpp b/src/hotspot/share/oops/klassVtable.cpp index c1b8e1f538f..7cfca5cdf24 100644 --- a/src/hotspot/share/oops/klassVtable.cpp +++ b/src/hotspot/share/oops/klassVtable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,7 @@ void klassVtable::compute_vtable_size_and_num_mirandas( int vtable_length = 0; // start off with super's vtable length - vtable_length = super == NULL ? 0 : super->vtable_length(); + vtable_length = super == nullptr ? 0 : super->vtable_length(); // go thru each method in the methods table to see if it needs a new entry int len = methods->length(); @@ -90,7 +90,7 @@ void klassVtable::compute_vtable_size_and_num_mirandas( GrowableArray new_mirandas(20); // compute the number of mirandas methods that must be added to the end - get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces, + get_mirandas(&new_mirandas, all_mirandas, super, methods, nullptr, local_interfaces, class_flags.is_interface()); *num_new_mirandas = new_mirandas.length(); @@ -106,7 +106,7 @@ void klassVtable::compute_vtable_size_and_num_mirandas( vtable_length = Universe::base_vtable_size(); } - if (super == NULL && vtable_length != Universe::base_vtable_size()) { + if (super == nullptr && vtable_length != Universe::base_vtable_size()) { if (Universe::is_bootstrapping()) { // Someone is attempting to override java.lang.Object incorrectly on the // bootclasspath. The JVM cannot recover from this error including throwing @@ -131,7 +131,7 @@ void klassVtable::compute_vtable_size_and_num_mirandas( // and return the number of entries copied. Expects that 'super' is the Java // super class (arrays can have "array" super classes that must be skipped). int klassVtable::initialize_from_super(Klass* super) { - if (super == NULL) { + if (super == nullptr) { return 0; } else if (is_preinitialized_vtable()) { // A shared class' vtable is preinitialized at dump time. No need to copy @@ -212,11 +212,11 @@ void klassVtable::initialize_vtable(GrowableArray* supers) { // update vtable with default_methods Array* default_methods = ik()->default_methods(); - if (default_methods != NULL) { + if (default_methods != nullptr) { len = default_methods->length(); if (len > 0) { Array* def_vtable_indices = ik()->default_vtable_indices(); - assert(def_vtable_indices != NULL, "should be created"); + assert(def_vtable_indices != nullptr, "should be created"); assert(def_vtable_indices->length() == len, "reinit vtable len?"); for (int i = 0; i < len; i++) { bool needs_new_entry; @@ -314,7 +314,7 @@ InstanceKlass* klassVtable::find_transitive_override(InstanceKlass* initialsuper Symbol* target_classname) { InstanceKlass* superk = initialsuper; - while (superk != NULL && superk->super() != NULL) { + while (superk != nullptr && superk->super() != nullptr) { klassVtable ssVtable = (superk->super())->vtable(); if (vtable_index < ssVtable.length()) { Method* super_method = ssVtable.method_at(vtable_index); @@ -343,11 +343,11 @@ InstanceKlass* klassVtable::find_transitive_override(InstanceKlass* initialsuper } } else { // super class has no vtable entry here, stop transitive search - superk = (InstanceKlass*)NULL; + superk = (InstanceKlass*)nullptr; break; } // if no override found yet, continue to search up - superk = superk->super() == NULL ? NULL : InstanceKlass::cast(superk->super()); + superk = superk->super() == nullptr ? nullptr : InstanceKlass::cast(superk->super()); } return superk; @@ -388,7 +388,7 @@ bool klassVtable::update_inherited_vtable(Thread* current, InstanceKlass* klass = ik(); - Array* def_vtable_indices = NULL; + Array* def_vtable_indices = nullptr; bool is_default = false; // default methods are non-private concrete methods in superinterfaces which are added @@ -400,7 +400,7 @@ bool klassVtable::update_inherited_vtable(Thread* current, is_default = true; def_vtable_indices = klass->default_vtable_indices(); assert(!target_method->is_private(), "private interface method flagged as default"); - assert(def_vtable_indices != NULL, "def vtable alloc?"); + assert(def_vtable_indices != nullptr, "def vtable alloc?"); assert(default_index <= def_vtable_indices->length(), "def vtable len?"); } else { assert(klass == target_method->method_holder(), "caller resp."); @@ -438,7 +438,7 @@ bool klassVtable::update_inherited_vtable(Thread* current, // we need a new entry if there is no superclass Klass* super = klass->super(); - if (super == NULL) { + if (super == nullptr) { return allocate_new; } @@ -451,8 +451,8 @@ bool klassVtable::update_inherited_vtable(Thread* current, Symbol* signature = target_method->signature(); Klass* target_klass = target_method->method_holder(); - assert(target_klass != NULL, "impossible"); - if (target_klass == NULL) { + assert(target_klass != nullptr, "impossible"); + if (target_klass == nullptr) { target_klass = _klass; } @@ -491,7 +491,7 @@ bool klassVtable::update_inherited_vtable(Thread* current, (klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION && (super_klass = find_transitive_override(super_klass, target_method, i, target_loader, - target_classname)) != NULL))) { + target_classname)) != nullptr))) { // Package private methods always need a new entry to root their own // overriding. They may also override other methods. @@ -504,7 +504,7 @@ bool klassVtable::update_inherited_vtable(Thread* current, // super class or interface. put_method_at(target_method(), i); // Save super for constraint checking. - if (supers != NULL) { + if (supers != nullptr) { supers->at_put(i, super_klass); } @@ -512,7 +512,7 @@ bool klassVtable::update_inherited_vtable(Thread* current, if (!is_default) { target_method->set_vtable_index(i); } else { - if (def_vtable_indices != NULL) { + if (def_vtable_indices != nullptr) { if (is_preinitialized_vtable()) { // At runtime initialize_vtable is rerun as part of link_class_impl() // for a shared class loaded by the non-boot loader. @@ -550,9 +550,9 @@ void klassVtable::put_method_at(Method* m, int index) { ResourceMark rm; LogTarget(Trace, vtables) lt; LogStream ls(lt); - const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : ""; + const char* sig = (m != nullptr) ? m->name_and_sig_as_C_string() : ""; ls.print("adding %s at index %d, flags: ", sig, index); - if (m != NULL) { + if (m != nullptr) { m->print_linkage_flags(&ls); } ls.cr(); @@ -568,7 +568,7 @@ void klassVtable::check_constraints(GrowableArray* supers, TRAPS for (int i = 0; i < length(); i++) { methodHandle target_method(THREAD, unchecked_method_at(i)); InstanceKlass* super_klass = supers->at(i); - if (target_method() != NULL && super_klass != NULL) { + if (target_method() != nullptr && super_klass != nullptr) { // Do not check loader constraints for overpass methods because overpass // methods are created by the jvm to throw exceptions. if (!target_method->is_overpass()) { @@ -589,7 +589,7 @@ void klassVtable::check_constraints(GrowableArray* supers, TRAPS _klass, target_loader, super_loader, true); - if (failed_type_symbol != NULL) { + if (failed_type_symbol != nullptr) { stringStream ss; ss.print("loader constraint violation for class %s: when selecting " "overriding method '", _klass->external_name()); @@ -615,7 +615,7 @@ void klassVtable::check_constraints(GrowableArray* supers, TRAPS void klassVtable::initialize_vtable_and_check_constraints(TRAPS) { // Save a superclass from each vtable entry to do constraint checking ResourceMark rm(THREAD); - GrowableArray* supers = new GrowableArray(_length, _length, NULL); + GrowableArray* supers = new GrowableArray(_length, _length, nullptr); initialize_vtable(supers); check_constraints(supers, CHECK); } @@ -660,7 +660,7 @@ bool klassVtable::needs_new_vtable_entry(Method* target_method, // Concrete interface methods do not need new entries, they override // abstract method entries using default inheritance rules - if (target_method->method_holder() != NULL && + if (target_method->method_holder() != nullptr && target_method->method_holder()->is_interface() && !target_method->is_abstract()) { assert(target_method->is_default_method(), @@ -669,7 +669,7 @@ bool klassVtable::needs_new_vtable_entry(Method* target_method, } // we need a new entry if there is no superclass - if (super == NULL) { + if (super == nullptr) { return true; } @@ -684,14 +684,14 @@ bool klassVtable::needs_new_vtable_entry(Method* target_method, Symbol* name = target_method->name(); Symbol* signature = target_method->signature(); const Klass* k = super; - Method* super_method = NULL; - InstanceKlass *holder = NULL; - Method* recheck_method = NULL; + Method* super_method = nullptr; + InstanceKlass *holder = nullptr; + Method* recheck_method = nullptr; bool found_pkg_prvt_method = false; - while (k != NULL) { + while (k != nullptr) { // lookup through the hierarchy for a method with matching name and sign. super_method = InstanceKlass::cast(k)->lookup_method(name, signature); - if (super_method == NULL) { + if (super_method == nullptr) { break; // we still have to search for a matching miranda method } // get the class holding the matching method @@ -743,7 +743,7 @@ bool klassVtable::needs_new_vtable_entry(Method* target_method, // this check for all access permissions. const InstanceKlass *sk = InstanceKlass::cast(super); if (sk->has_miranda_methods()) { - if (sk->lookup_method_in_all_interfaces(name, signature, Klass::DefaultsLookupMode::find) != NULL) { + if (sk->lookup_method_in_all_interfaces(name, signature, Klass::DefaultsLookupMode::find) != nullptr) { return false; // found a matching miranda; we do not need a new entry } } @@ -849,14 +849,14 @@ bool klassVtable::is_miranda(Method* m, Array* class_methods, if (InstanceKlass::find_local_method(class_methods, name, signature, Klass::OverpassLookupMode::find, Klass::StaticLookupMode::skip, - Klass::PrivateLookupMode::skip) != NULL) + Klass::PrivateLookupMode::skip) != nullptr) { return false; } // Check local default methods - if ((default_methods != NULL) && - (InstanceKlass::find_method(default_methods, name, signature) != NULL)) + if ((default_methods != nullptr) && + (InstanceKlass::find_method(default_methods, name, signature) != nullptr)) { return false; } @@ -866,14 +866,14 @@ bool klassVtable::is_miranda(Method* m, Array* class_methods, // Overpasses may or may not exist for supers for pass 1, // they should have been created for pass 2 and later. - for (const Klass* cursuper = super; cursuper != NULL; cursuper = cursuper->super()) + for (const Klass* cursuper = super; cursuper != nullptr; cursuper = cursuper->super()) { Method* found_mth = InstanceKlass::cast(cursuper)->find_local_method(name, signature, Klass::OverpassLookupMode::find, Klass::StaticLookupMode::skip, Klass::PrivateLookupMode::skip); // Ignore non-public methods in java.lang.Object if klass is an interface. - if (found_mth != NULL && (!is_interface || + if (found_mth != nullptr && (!is_interface || !SystemDictionary::is_nonpublic_Object_method(found_mth))) { return false; } @@ -915,10 +915,10 @@ void klassVtable::add_new_mirandas_to_lists( if (is_miranda(im, class_methods, default_methods, super, is_interface)) { // is it a miranda at all? const InstanceKlass *sk = InstanceKlass::cast(super); // check if it is a duplicate of a super's miranda - if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), Klass::DefaultsLookupMode::find) == NULL) { + if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), Klass::DefaultsLookupMode::find) == nullptr) { new_mirandas->append(im); } - if (all_mirandas != NULL) { + if (all_mirandas != nullptr) { all_mirandas->append(im); } } @@ -962,7 +962,7 @@ void klassVtable::get_mirandas(GrowableArray* new_mirandas, int klassVtable::fill_in_mirandas(Thread* current, int initialized) { ResourceMark rm(current); GrowableArray mirandas(20); - get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(), + get_mirandas(&mirandas, nullptr, ik()->super(), ik()->methods(), ik()->default_methods(), ik()->local_interfaces(), klass()->is_interface()); for (int i = 0; i < mirandas.length(); i++) { @@ -970,7 +970,7 @@ int klassVtable::fill_in_mirandas(Thread* current, int initialized) { Method* meth = mirandas.at(i); LogTarget(Trace, vtables) lt; LogStream ls(lt); - if (meth != NULL) { + if (meth != nullptr) { char* sig = meth->name_and_sig_as_C_string(); ls.print("fill in mirandas with %s index %d, flags: ", sig, initialized); @@ -997,7 +997,7 @@ bool klassVtable::adjust_default_method(int vtable_index, Method* old_method, Me bool updated = false; Array* default_methods = ik()->default_methods(); - if (default_methods != NULL) { + if (default_methods != nullptr) { int len = default_methods->length(); for (int idx = 0; idx < len; idx++) { if (vtable_index == ik()->default_vtable_indices()->at(idx)) { @@ -1019,7 +1019,7 @@ void klassVtable::adjust_method_entries(bool * trace_name_printed) { for (int index = 0; index < length(); index++) { Method* old_method = unchecked_method_at(index); - if (old_method == NULL || !old_method->is_old()) { + if (old_method == nullptr || !old_method->is_old()) { continue; // skip uninteresting entries } assert(!old_method->is_deleted(), "vtable methods may not be deleted"); @@ -1052,7 +1052,7 @@ bool klassVtable::check_no_old_or_obsolete_entries() { for (int i = 0; i < length(); i++) { Method* m = unchecked_method_at(i); - if (m != NULL && + if (m != nullptr && (NOT_PRODUCT(!m->is_valid() ||) m->is_old() || m->is_obsolete())) { log_trace(redefine, class, update, vtables) ("vtable check found old method entry: class: %s old: %d obsolete: %d, method: %s", @@ -1067,7 +1067,7 @@ void klassVtable::dump_vtable() { tty->print_cr("vtable dump --"); for (int i = 0; i < length(); i++) { Method* m = unchecked_method_at(i); - if (m != NULL) { + if (m != nullptr) { tty->print(" (%5d) ", i); m->access_flags().print_on(tty); if (m->is_default_method()) { @@ -1089,7 +1089,7 @@ void klassVtable::dump_vtable() { // Initialize a itableMethodEntry void itableMethodEntry::initialize(InstanceKlass* klass, Method* m) { - if (m == NULL) return; + if (m == nullptr) return; #ifdef ASSERT if (MetaspaceShared::is_in_shared_metaspace((void*)&_method) && @@ -1112,7 +1112,7 @@ klassItable::klassItable(InstanceKlass* klass) { if (klass->itable_length() > 0) { itableOffsetEntry* offset_entry = (itableOffsetEntry*)klass->start_of_itable(); - if (offset_entry != NULL && offset_entry->interface_klass() != NULL) { // Check that itable is initialized + if (offset_entry != nullptr && offset_entry->interface_klass() != nullptr) { // Check that itable is initialized // First offset entry points to the first method_entry intptr_t* method_entry = (intptr_t *)(((address)klass) + offset_entry->offset()); intptr_t* end = klass->end_of_itable(); @@ -1161,14 +1161,14 @@ void klassItable::initialize_itable(GrowableArray* supers) { for(int i = 0; i < num_interfaces; i++) { itableOffsetEntry* ioe = offset_entry(i); InstanceKlass* interf = ioe->interface_klass(); - assert(interf != NULL && ioe->offset() != 0, "bad offset entry in itable"); + assert(interf != nullptr && ioe->offset() != 0, "bad offset entry in itable"); initialize_itable_for_interface(ioe->offset(), interf, supers, (ioe->offset() - offset_entry(0)->offset())/wordSize); } } // Check that the last entry is empty itableOffsetEntry* ioe = offset_entry(size_offset_table() - 1); - guarantee(ioe->interface_klass() == NULL && ioe->offset() == 0, "terminator entry missing"); + guarantee(ioe->interface_klass() == nullptr && ioe->offset() == 0, "terminator entry missing"); } void klassItable::check_constraints(GrowableArray* supers, TRAPS) { @@ -1179,7 +1179,7 @@ void klassItable::check_constraints(GrowableArray* supers, TRAPS) { Method* target = ime->method(); Method* interface_method = supers->at(i); // method overridden - if (target != NULL && interface_method != NULL) { + if (target != nullptr && interface_method != nullptr) { InstanceKlass* method_holder = target->method_holder(); InstanceKlass* interf = interface_method->method_holder(); HandleMark hm(THREAD); @@ -1194,7 +1194,7 @@ void klassItable::check_constraints(GrowableArray* supers, TRAPS) { method_holder_loader, interface_loader, true); - if (failed_type_symbol != NULL) { + if (failed_type_symbol != nullptr) { stringStream ss; ss.print("loader constraint violation in interface itable" " initialization for class %s: when selecting method '", @@ -1223,7 +1223,7 @@ void klassItable::initialize_itable_and_check_constraints(TRAPS) { // Save a super interface from each itable entry to do constraint checking ResourceMark rm(THREAD); GrowableArray* supers = - new GrowableArray(_size_method_table, _size_method_table, NULL); + new GrowableArray(_size_method_table, _size_method_table, nullptr); initialize_itable(supers); check_constraints(supers, CHECK); } @@ -1259,7 +1259,7 @@ int klassItable::assign_itable_indices_for_interface(InstanceKlass* klass) { ResourceMark rm; LogTarget(Trace, itables) lt; LogStream ls(lt); - assert(m != NULL, "methods can never be null"); + assert(m != nullptr, "methods can never be null"); const char* sig = m->name_and_sig_as_C_string(); if (m->has_vtable_index()) { ls.print("vtable index %d for method: %s, flags: ", m->vtable_index(), sig); @@ -1321,7 +1321,7 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Insta int ime_count = method_count_for_interface(interf); for (int i = 0; i < nof_methods; i++) { Method* m = methods->at(i); - Method* target = NULL; + Method* target = nullptr; if (m->has_itable_index()) { // This search must match the runtime resolution, i.e. selection search for invokeinterface // to correctly enforce loader constraints for interface method inheritance. @@ -1332,11 +1332,11 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Insta target = LinkResolver::lookup_instance_method_in_klasses(_klass, m->name(), m->signature(), Klass::PrivateLookupMode::skip); } - if (target == NULL || !target->is_public() || target->is_abstract() || target->is_overpass()) { - assert(target == NULL || !target->is_overpass() || target->is_public(), + if (target == nullptr || !target->is_public() || target->is_abstract() || target->is_overpass()) { + assert(target == nullptr || !target->is_overpass() || target->is_public(), "Non-public overpass method!"); // Entry does not resolve. Leave it empty for AbstractMethodError or other error. - if (!(target == NULL) && !target->is_public()) { + if (!(target == nullptr) && !target->is_public()) { // Stuff an IllegalAccessError throwing method in there instead. itableOffsetEntry::method_entry(_klass, method_table_offset)[m->itable_index()]. initialize(_klass, Universe::throw_illegal_access_error()); @@ -1348,14 +1348,14 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Insta // Save super interface method to perform constraint checks. // The method is in the error message, that's why. - if (supers != NULL) { + if (supers != nullptr) { supers->at_put(start_offset + ime_num, m); } itableOffsetEntry::method_entry(_klass, method_table_offset)[ime_num].initialize(_klass, target); if (log_develop_is_enabled(Trace, itables)) { ResourceMark rm; - if (target != NULL) { + if (target != nullptr) { LogTarget(Trace, itables) lt; LogStream ls(lt); char* sig = target->name_and_sig_as_C_string(); @@ -1379,7 +1379,7 @@ void klassItable::adjust_method_entries(bool * trace_name_printed) { for (int i = 0; i < _size_method_table; i++, ime++) { Method* old_method = ime->method(); - if (old_method == NULL || !old_method->is_old()) { + if (old_method == nullptr || !old_method->is_old()) { continue; // skip uninteresting entries } assert(!old_method->is_deleted(), "itable methods may not be deleted"); @@ -1402,7 +1402,7 @@ bool klassItable::check_no_old_or_obsolete_entries() { for (int i = 0; i < _size_method_table; i++) { Method* m = ime->method(); - if (m != NULL && + if (m != nullptr && (NOT_PRODUCT(!m->is_valid() ||) m->is_old() || m->is_obsolete())) { log_trace(redefine, class, update, itables) ("itable check found old method entry: class: %s old: %d obsolete: %d, method: %s", @@ -1419,7 +1419,7 @@ void klassItable::dump_itable() { tty->print_cr("itable dump --"); for (int i = 0; i < _size_method_table; i++) { Method* m = ime->method(); - if (m != NULL) { + if (m != nullptr) { tty->print(" (%5d) ", i); m->access_flags().print_on(tty); if (m->is_default_method()) { @@ -1570,7 +1570,7 @@ void klassVtable::verify(outputStream* st, bool forced) { for (int i = 0; i < _length; i++) table()[i].verify(this, st); // verify consistency with superKlass vtable Klass* super = _klass->super(); - if (super != NULL) { + if (super != nullptr) { InstanceKlass* sk = InstanceKlass::cast(super); klassVtable vt = sk->vtable(); for (int i = 0; i < vt.length(); i++) { @@ -1602,9 +1602,9 @@ void vtableEntry::verify(klassVtable* vt, outputStream* st) { Klass* vtklass = vt->klass(); if (vtklass->is_instance_klass() && (InstanceKlass::cast(vtklass)->major_version() >= klassVtable::VTABLE_TRANSITIVE_OVERRIDE_VERSION)) { - assert(method() != NULL, "must have set method"); + assert(method() != nullptr, "must have set method"); } - if (method() != NULL) { + if (method() != nullptr) { method()->verify(); // we sub_type, because it could be a miranda method if (!vtklass->is_subtype_of(method()->method_holder())) { diff --git a/src/hotspot/share/oops/klassVtable.hpp b/src/hotspot/share/oops/klassVtable.hpp index 73dd9ae61fa..256268236a8 100644 --- a/src/hotspot/share/oops/klassVtable.hpp +++ b/src/hotspot/share/oops/klassVtable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,7 +66,7 @@ class klassVtable { int index_of_miranda(Symbol* name, Symbol* signature); // initialize vtable of a new klass - void initialize_vtable(GrowableArray* supers = NULL); + void initialize_vtable(GrowableArray* supers = nullptr); void initialize_vtable_and_check_constraints(TRAPS); // computes vtable length (in words) and the number of miranda methods @@ -190,8 +190,8 @@ class vtableEntry { private: Method* _method; - void set(Method* method) { assert(method != NULL, "use clear"); _method = method; } - void clear() { _method = NULL; } + void set(Method* method) { assert(method != nullptr, "use clear"); _method = method; } + void clear() { _method = nullptr; } void print() PRODUCT_RETURN; void verify(klassVtable* vt, outputStream* st); @@ -201,7 +201,7 @@ class vtableEntry { inline Method* klassVtable::method_at(int i) const { assert(i >= 0 && i < _length, "index out of bounds"); - assert(table()[i].method() != NULL, "should not be null"); + assert(table()[i].method() != nullptr, "should not be null"); assert(((Metadata*)table()[i].method())->is_method(), "should be method"); return table()[i].method(); } @@ -246,7 +246,7 @@ class itableMethodEntry { Method* method() const { return _method; } Method**method_addr() { return &_method; } - void clear() { _method = NULL; } + void clear() { _method = nullptr; } void initialize(InstanceKlass* klass, Method* method); @@ -298,7 +298,7 @@ class klassItable { // Initialization void initialize_itable_and_check_constraints(TRAPS); - void initialize_itable(GrowableArray* supers = NULL); + void initialize_itable(GrowableArray* supers = nullptr); #if INCLUDE_JVMTI // RedefineClasses() API support: diff --git a/src/hotspot/share/oops/markWord.cpp b/src/hotspot/share/oops/markWord.cpp index 30dd8f55a11..ad49fea3076 100644 --- a/src/hotspot/share/oops/markWord.cpp +++ b/src/hotspot/share/oops/markWord.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,8 +69,8 @@ void markWord::print_on(outputStream* st, bool print_monitor_info) const { st->print(" monitor(" INTPTR_FORMAT ")=", value()); if (print_monitor_info) { ObjectMonitor* mon = monitor(); - if (mon == NULL) { - st->print("NULL (this should never be seen!)"); + if (mon == nullptr) { + st->print("null (this should never be seen!)"); } else { mon->print_on(st); } diff --git a/src/hotspot/share/oops/metadata.hpp b/src/hotspot/share/oops/metadata.hpp index 7e6192b56e2..c118ade8586 100644 --- a/src/hotspot/share/oops/metadata.hpp +++ b/src/hotspot/share/oops/metadata.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,8 +54,8 @@ class Metadata : public MetaspaceObj { void print_value() const; static void print_value_on_maybe_null(outputStream* st, const Metadata* m) { - if (NULL == m) - st->print("NULL"); + if (nullptr == m) + st->print("null"); else m->print_value_on(st); } diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp index b5355a156bb..90fb0c2616b 100644 --- a/src/hotspot/share/oops/method.cpp +++ b/src/hotspot/share/oops/method.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -107,18 +107,18 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, Symbol* name) { set_dont_inline(false); set_changes_current_thread(false); set_has_injected_profile(false); - set_method_data(NULL); + set_method_data(nullptr); clear_method_counters(); set_vtable_index(Method::garbage_vtable_index); // Fix and bury in Method* - set_interpreter_entry(NULL); // sets i2i entry and from_int - set_adapter_entry(NULL); + set_interpreter_entry(nullptr); // sets i2i entry and from_int + set_adapter_entry(nullptr); Method::clear_code(); // from_c/from_i get set to c2i/i2i if (access_flags.is_native()) { clear_native_function(); - set_signature_handler(NULL); + set_signature_handler(nullptr); } NOT_PRODUCT(set_compiled_invocation_count(0);) @@ -130,13 +130,13 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, Symbol* name) { // we've walked the code cache. void Method::deallocate_contents(ClassLoaderData* loader_data) { MetadataFactory::free_metadata(loader_data, constMethod()); - set_constMethod(NULL); + set_constMethod(nullptr); MetadataFactory::free_metadata(loader_data, method_data()); - set_method_data(NULL); + set_method_data(nullptr); MetadataFactory::free_metadata(loader_data, method_counters()); clear_method_counters(); // The nmethod will be gone when we get here. - if (code() != NULL) _code = NULL; + if (code() != nullptr) _code = nullptr; } void Method::release_C_heap_structures() { @@ -149,23 +149,23 @@ void Method::release_C_heap_structures() { } address Method::get_i2c_entry() { - assert(adapter() != NULL, "must have"); + assert(adapter() != nullptr, "must have"); return adapter()->get_i2c_entry(); } address Method::get_c2i_entry() { - assert(adapter() != NULL, "must have"); + assert(adapter() != nullptr, "must have"); return adapter()->get_c2i_entry(); } address Method::get_c2i_unverified_entry() { - assert(adapter() != NULL, "must have"); + assert(adapter() != nullptr, "must have"); return adapter()->get_c2i_unverified_entry(); } address Method::get_c2i_no_clinit_check_entry() { assert(VM_Version::supports_fast_class_init_checks(), ""); - assert(adapter() != NULL, "must have"); + assert(adapter() != nullptr, "must have"); return adapter()->get_c2i_no_clinit_check_entry(); } @@ -233,7 +233,7 @@ int Method::fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_kla if (log_is_enabled(Debug, exceptions)) { ResourceMark rm(THREAD); log_debug(exceptions)("Looking for catch handler for exception of type \"%s\" in method \"%s\"", - ex_klass == NULL ? "NULL" : ex_klass->external_name(), mh->name()->as_C_string()); + ex_klass == nullptr ? "null" : ex_klass->external_name(), mh->name()->as_C_string()); } // exception table holds quadruple entries of the form (beg_bci, end_bci, handler_bci, klass_index) // access exception table @@ -260,14 +260,14 @@ int Method::fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_kla if (log_is_enabled(Info, exceptions)) { ResourceMark rm(THREAD); log_info(exceptions)("Found catch-all handler for exception of type \"%s\" in method \"%s\" at BCI: %d", - ex_klass == NULL ? "NULL" : ex_klass->external_name(), mh->name()->as_C_string(), handler_bci); + ex_klass == nullptr ? "null" : ex_klass->external_name(), mh->name()->as_C_string(), handler_bci); } return handler_bci; - } else if (ex_klass == NULL) { + } else if (ex_klass == nullptr) { // Is this even possible? if (log_is_enabled(Info, exceptions)) { ResourceMark rm(THREAD); - log_info(exceptions)("NULL exception class is implicitly caught by handler in method \"%s\" at BCI: %d", + log_info(exceptions)("null exception class is implicitly caught by handler in method \"%s\" at BCI: %d", mh()->name()->as_C_string(), handler_bci); } return handler_bci; @@ -289,12 +289,12 @@ int Method::fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_kla } return handler_bci; } - assert(k != NULL, "klass not loaded"); + assert(k != nullptr, "klass not loaded"); if (ex_klass->is_subtype_of(k)) { if (log_is_enabled(Info, exceptions)) { ResourceMark rm(THREAD); log_info(exceptions)("Found matching handler for exception of type \"%s\" in method \"%s\" at BCI: %d", - ex_klass == NULL ? "NULL" : ex_klass->external_name(), mh->name()->as_C_string(), handler_bci); + ex_klass == nullptr ? "null" : ex_klass->external_name(), mh->name()->as_C_string(), handler_bci); } return handler_bci; } @@ -369,7 +369,7 @@ address Method::bcp_from(int bci) const { } address Method::bcp_from(address bcp) const { - if (is_native() && bcp == NULL) { + if (is_native() && bcp == nullptr) { return code_base(); } else { return bcp; @@ -464,11 +464,11 @@ static Method* find_prefixed_native(Klass* k, Symbol* name, Symbol* signature, T strcpy(trial_name_str, prefix); strcat(trial_name_str, name_str); TempNewSymbol trial_name = SymbolTable::probe(trial_name_str, trial_len); - if (trial_name == NULL) { + if (trial_name == nullptr) { continue; // no such symbol, so this prefix wasn't used, try the next prefix } method = k->lookup_method(trial_name, signature); - if (method == NULL) { + if (method == nullptr) { continue; // signature doesn't match, try the next prefix } if (method->is_native()) { @@ -480,12 +480,12 @@ static Method* find_prefixed_native(Klass* k, Symbol* name, Symbol* signature, T name_str = trial_name_str; } #endif // INCLUDE_JVMTI - return NULL; // not found + return nullptr; // not found } bool Method::register_native(Klass* k, Symbol* name, Symbol* signature, address entry, TRAPS) { Method* method = k->lookup_method(name, signature); - if (method == NULL) { + if (method == nullptr) { ResourceMark rm(THREAD); stringStream st; st.print("Method '"); @@ -496,7 +496,7 @@ bool Method::register_native(Klass* k, Symbol* name, Symbol* signature, address if (!method->is_native()) { // trying to register to a non-native method, see if a JVM TI agent has added prefix(es) method = find_prefixed_native(k, name, signature, THREAD); - if (method == NULL) { + if (method == nullptr) { ResourceMark rm(THREAD); stringStream st; st.print("Method '"); @@ -506,7 +506,7 @@ bool Method::register_native(Klass* k, Symbol* name, Symbol* signature, address } } - if (entry != NULL) { + if (entry != nullptr) { method->set_native_function(entry, native_bind_event_is_interesting); } else { method->clear_native_function(); @@ -524,14 +524,14 @@ bool Method::was_executed_more_than(int n) { // Invocation counter is reset when the Method* is compiled. // If the method has compiled code we therefore assume it has // be executed more than n times. - if (is_accessor() || is_empty_method() || (code() != NULL)) { + if (is_accessor() || is_empty_method() || (code() != nullptr)) { // interpreter doesn't bump invocation counter of trivial methods // compiler does not bump invocation counter of compiled methods return true; } - else if ((method_counters() != NULL && + else if ((method_counters() != nullptr && method_counters()->invocation_counter()->carry()) || - (method_data() != NULL && + (method_data() != nullptr && method_data()->invocation_counter()->carry())) { // The carry bit is set when the counter overflows and causes // a compilation to occur. We don't know how many times @@ -568,7 +568,7 @@ void Method::print_invocation_count() { tty->print_cr (" invocation_counter: " INT32_FORMAT_W(11), invocation_count()); tty->print_cr (" backedge_counter: " INT32_FORMAT_W(11), backedge_count()); - if (method_data() != NULL) { + if (method_data() != nullptr) { tty->print_cr (" decompile_count: " UINT32_FORMAT_W(11), method_data()->decompile_count()); } @@ -614,7 +614,7 @@ void Method::build_profiling_method_data(const methodHandle& method, TRAPS) { MethodCounters* Method::build_method_counters(Thread* current, Method* m) { // Do not profile the method if metaspace has hit an OOM previously if (ClassLoaderDataGraph::has_metaspace_oom()) { - return NULL; + return nullptr; } methodHandle mh(current, m); @@ -633,10 +633,10 @@ MethodCounters* Method::build_method_counters(Thread* current, Method* m) { counters = MethodCounters::allocate_no_exception(mh); } - if (counters == NULL) { + if (counters == nullptr) { CompileBroker::log_metaspace_failure(); ClassLoaderDataGraph::set_metaspace_oom(true); - return NULL; + return nullptr; } if (!mh->init_method_counters(counters)) { @@ -793,7 +793,7 @@ bool Method::is_final_method() const { } bool Method::is_default_method() const { - if (method_holder() != NULL && + if (method_holder() != nullptr && method_holder()->is_interface() && !is_abstract() && !is_private()) { return true; @@ -971,7 +971,7 @@ bool Method::is_klass_loaded_by_klass_index(int klass_index) const { Symbol* klass_name = constants()->klass_name_at(klass_index); Handle loader(thread, method_holder()->class_loader()); Handle prot (thread, method_holder()->protection_domain()); - return SystemDictionary::find_instance_klass(thread, klass_name, loader, prot) != NULL; + return SystemDictionary::find_instance_klass(thread, klass_name, loader, prot) != nullptr; } else { return true; } @@ -989,7 +989,7 @@ bool Method::is_klass_loaded(int refinfo_index, bool must_be_resolved) const { void Method::set_native_function(address function, bool post_event_flag) { - assert(function != NULL, "use clear_native_function to unregister natives"); + assert(function != nullptr, "use clear_native_function to unregister natives"); assert(!is_special_native_intrinsic() || function == SharedRuntime::native_method_throw_unsatisfied_link_error_entry(), ""); address* native_function = native_function_addr(); @@ -998,7 +998,7 @@ void Method::set_native_function(address function, bool post_event_flag) { address current = *native_function; if (current == function) return; if (post_event_flag && JvmtiExport::should_post_native_method_bind() && - function != NULL) { + function != nullptr) { // native_method_throw_unsatisfied_link_error_entry() should only // be passed when post_event_flag is false. assert(function != @@ -1013,7 +1013,7 @@ void Method::set_native_function(address function, bool post_event_flag) { // use the latest registered method -> check if a stub already has been generated. // If so, we have to make it not_entrant. CompiledMethod* nm = code(); // Put it into local variable to guard against concurrent updates - if (nm != NULL) { + if (nm != nullptr) { nm->make_not_entrant(); } } @@ -1023,7 +1023,7 @@ bool Method::has_native_function() const { if (is_special_native_intrinsic()) return false; // special-cased in SharedRuntime::generate_native_wrapper address func = native_function(); - return (func != NULL && func != SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); + return (func != nullptr && func != SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); } @@ -1043,7 +1043,7 @@ void Method::set_signature_handler(address handler) { void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason) { - assert(reason != NULL, "must provide a reason"); + assert(reason != nullptr, "must provide a reason"); if (PrintCompilation && report) { ttyLocker ttyl; tty->print("made not %scompilable on ", is_osr ? "OSR " : ""); @@ -1057,16 +1057,16 @@ void Method::print_made_not_compilable(int comp_level, bool is_osr, bool report, if (size > 0) { tty->print(" (%d bytes)", size); } - if (reason != NULL) { + if (reason != nullptr) { tty->print(" %s", reason); } tty->cr(); } - if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) { + if ((TraceDeoptimization || LogCompilation) && (xtty != nullptr)) { ttyLocker ttyl; xtty->begin_elem("make_not_compilable thread='" UINTX_FORMAT "' osr='%d' level='%d'", os::current_thread_id(), is_osr, comp_level); - if (reason != NULL) { + if (reason != nullptr) { xtty->print(" reason=\'%s\'", reason); } xtty->method(this); @@ -1147,21 +1147,21 @@ void Method::set_not_osr_compilable(const char* reason, int comp_level, bool rep // Revert to using the interpreter and clear out the nmethod void Method::clear_code() { - // this may be NULL if c2i adapters have not been made yet + // this may be null if c2i adapters have not been made yet // Only should happen at allocate time. - if (adapter() == NULL) { - _from_compiled_entry = NULL; + if (adapter() == nullptr) { + _from_compiled_entry = nullptr; } else { _from_compiled_entry = adapter()->get_c2i_entry(); } OrderAccess::storestore(); _from_interpreted_entry = _i2i_entry; OrderAccess::storestore(); - _code = NULL; + _code = nullptr; } void Method::unlink_code(CompiledMethod *compare) { - MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); // We need to check if either the _code or _from_compiled_code_entry_point // refer to this nmethod because there is a race in setting these two fields // in Method* as seen in bugid 4947125. @@ -1172,7 +1172,7 @@ void Method::unlink_code(CompiledMethod *compare) { } void Method::unlink_code() { - MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); clear_code(); } @@ -1180,19 +1180,19 @@ void Method::unlink_code() { // Called by class data sharing to remove any entry points (which are not shared) void Method::unlink_method() { Arguments::assert_is_dumping_archive(); - _code = NULL; - _adapter = NULL; - _i2i_entry = NULL; - _from_compiled_entry = NULL; - _from_interpreted_entry = NULL; + _code = nullptr; + _adapter = nullptr; + _i2i_entry = nullptr; + _from_compiled_entry = nullptr; + _from_interpreted_entry = nullptr; if (is_native()) { - *native_function_addr() = NULL; - set_signature_handler(NULL); + *native_function_addr() = nullptr; + set_signature_handler(nullptr); } NOT_PRODUCT(set_compiled_invocation_count(0);) - set_method_data(NULL); + set_method_data(nullptr); clear_method_counters(); } #endif @@ -1202,17 +1202,17 @@ void Method::unlink_method() { void Method::link_method(const methodHandle& h_method, TRAPS) { // If the code cache is full, we may reenter this function for the // leftover methods that weren't linked. - if (adapter() != NULL) { + if (adapter() != nullptr) { return; } - assert( _code == NULL, "nothing compiled yet" ); + assert( _code == nullptr, "nothing compiled yet" ); // Setup interpreter entrypoint assert(this == h_method(), "wrong h_method()" ); - assert(adapter() == NULL, "init'd to NULL"); + assert(adapter() == nullptr, "init'd to null"); address entry = Interpreter::entry_for_method(h_method); - assert(entry != NULL, "interpreter entry must be non-null"); + assert(entry != nullptr, "interpreter entry must be non-null"); // Sets both _i2i_entry and _from_interpreted_entry set_interpreter_entry(entry); @@ -1237,9 +1237,9 @@ void Method::link_method(const methodHandle& h_method, TRAPS) { if (h_method->is_continuation_native_intrinsic()) { // the entry points to this method will be set in set_code, called when first resolving this method - _from_interpreted_entry = NULL; - _from_compiled_entry = NULL; - _i2i_entry = NULL; + _from_interpreted_entry = nullptr; + _from_compiled_entry = nullptr; + _i2i_entry = nullptr; } } @@ -1248,7 +1248,7 @@ address Method::make_adapters(const methodHandle& mh, TRAPS) { // small (generally < 100 bytes) and quick to make (and cached and shared) // so making them eagerly shouldn't be too expensive. AdapterHandlerEntry* adapter = AdapterHandlerLibrary::get_adapter(mh); - if (adapter == NULL ) { + if (adapter == nullptr ) { if (!is_init_completed()) { // Don't throw exceptions during VM initialization because java.lang.* classes // might not have been initialized, causing problems when constructing the @@ -1273,7 +1273,7 @@ address Method::make_adapters(const methodHandle& mh, TRAPS) { // This function must not hit a safepoint! address Method::verified_code_entry() { debug_only(NoSafepointVerifier nsv;) - assert(_from_compiled_entry != NULL, "must be set"); + assert(_from_compiled_entry != nullptr, "must be set"); return _from_compiled_entry; } @@ -1283,7 +1283,7 @@ address Method::verified_code_entry() { bool Method::check_code() const { // cached in a register or local. There's a race on the value of the field. CompiledMethod *code = Atomic::load_acquire(&_code); - return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method()); + return code == nullptr || (code->method() == nullptr) || (code->method() == (Method*)this && !code->is_osr_method()); } // Install compiled code. Instantly it can execute. @@ -1292,7 +1292,7 @@ void Method::set_code(const methodHandle& mh, CompiledMethod *code) { assert( code, "use clear_code to remove code" ); assert( mh->check_code(), "" ); - guarantee(mh->adapter() != NULL, "Adapter blob must already exist!"); + guarantee(mh->adapter() != nullptr, "Adapter blob must already exist!"); // These writes must happen in this order, because the interpreter will // directly jump to from_interpreted_entry which jumps to an i2c adapter @@ -1311,7 +1311,7 @@ void Method::set_code(const methodHandle& mh, CompiledMethod *code) { OrderAccess::storestore(); if (mh->is_continuation_native_intrinsic()) { - assert(mh->_from_interpreted_entry == NULL, "initialized incorrectly"); // see link_method + assert(mh->_from_interpreted_entry == nullptr, "initialized incorrectly"); // see link_method if (mh->is_continuation_enter_intrinsic()) { // This is the entry used when we're in interpreter-only mode; see InterpreterMacroAssembler::jump_from_interpreted @@ -1339,7 +1339,7 @@ bool Method::is_overridden_in(Klass* k) const { // is a miranda method if (method_holder()->is_interface()) { // Check that method is not a miranda method - if (ik->lookup_method(name(), signature()) == NULL) { + if (ik->lookup_method(name(), signature()) == nullptr) { // No implementation exist - so miranda method return false; } @@ -1496,12 +1496,12 @@ methodHandle Method::make_method_handle_intrinsic(vmIntrinsics::ID iid, } Klass* Method::check_non_bcp_klass(Klass* klass) { - if (klass != NULL && klass->class_loader() != NULL) { + if (klass != nullptr && klass->class_loader() != nullptr) { if (klass->is_objArray_klass()) klass = ObjArrayKlass::cast(klass)->bottom_klass(); return klass; } - return NULL; + return nullptr; } @@ -1614,12 +1614,12 @@ methodHandle Method::clone_with_new_data(const methodHandle& m, u_char* new_code } vmSymbolID Method::klass_id_for_intrinsics(const Klass* holder) { - // if loader is not the default loader (i.e., != NULL), we can't know the intrinsics + // if loader is not the default loader (i.e., non-null), we can't know the intrinsics // because we are not loading from core libraries // exception: the AES intrinsics come from lib/ext/sunjce_provider.jar // which does not use the class default class loader so we check for its loader here const InstanceKlass* ik = InstanceKlass::cast(holder); - if ((ik->class_loader() != NULL) && !SystemDictionary::is_platform_class_loader(ik->class_loader())) { + if ((ik->class_loader() != nullptr) && !SystemDictionary::is_platform_class_loader(ik->class_loader())) { return vmSymbolID::NO_SID; // regardless of name, no intrinsics here } @@ -1713,7 +1713,7 @@ bool Method::load_signature_classes(const methodHandle& m, TRAPS) { return false; } } - if( klass == NULL) { sig_is_loaded = false; } + if( klass == nullptr) { sig_is_loaded = false; } } } return sig_is_loaded; @@ -1744,7 +1744,7 @@ static int method_comparator(Method* a, Method* b) { void Method::sort_methods(Array* methods, bool set_idnums, method_comparator_func func) { int length = methods->length(); if (length > 1) { - if (func == NULL) { + if (func == nullptr) { func = method_comparator; } { @@ -1845,7 +1845,7 @@ bool CompressedLineNumberReadStream::read_pair() { Bytecodes::Code Method::orig_bytecode_at(int bci) const { BreakpointInfo* bp = method_holder()->breakpoints(); - for (; bp != NULL; bp = bp->next()) { + for (; bp != nullptr; bp = bp->next()) { if (bp->match(this, bci)) { return bp->orig_bytecode(); } @@ -1860,7 +1860,7 @@ Bytecodes::Code Method::orig_bytecode_at(int bci) const { void Method::set_orig_bytecode_at(int bci, Bytecodes::Code code) { assert(code != Bytecodes::_breakpoint, "cannot patch breakpoints this way"); BreakpointInfo* bp = method_holder()->breakpoints(); - for (; bp != NULL; bp = bp->next()) { + for (; bp != nullptr; bp = bp->next()) { if (bp->match(this, bci)) { bp->set_orig_bytecode(code); // and continue, in case there is more than one @@ -1879,16 +1879,16 @@ void Method::set_breakpoint(int bci) { static void clear_matches(Method* m, int bci) { InstanceKlass* ik = m->method_holder(); - BreakpointInfo* prev_bp = NULL; + BreakpointInfo* prev_bp = nullptr; BreakpointInfo* next_bp; - for (BreakpointInfo* bp = ik->breakpoints(); bp != NULL; bp = next_bp) { + for (BreakpointInfo* bp = ik->breakpoints(); bp != nullptr; bp = next_bp) { next_bp = bp->next(); // bci value of -1 is used to delete all breakpoints in method m (ex: clear_all_breakpoint). if (bci >= 0 ? bp->match(m, bci) : bp->match(m)) { // do this first: bp->clear(m); // unhook it - if (prev_bp != NULL) + if (prev_bp != nullptr) prev_bp->set_next(next_bp); else ik->set_breakpoints(next_bp); @@ -1927,30 +1927,30 @@ void Method::clear_all_breakpoints() { int Method::invocation_count() const { MethodCounters* mcs = method_counters(); MethodData* mdo = method_data(); - if (((mcs != NULL) ? mcs->invocation_counter()->carry() : false) || - ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) { + if (((mcs != nullptr) ? mcs->invocation_counter()->carry() : false) || + ((mdo != nullptr) ? mdo->invocation_counter()->carry() : false)) { return InvocationCounter::count_limit; } else { - return ((mcs != NULL) ? mcs->invocation_counter()->count() : 0) + - ((mdo != NULL) ? mdo->invocation_counter()->count() : 0); + return ((mcs != nullptr) ? mcs->invocation_counter()->count() : 0) + + ((mdo != nullptr) ? mdo->invocation_counter()->count() : 0); } } int Method::backedge_count() const { MethodCounters* mcs = method_counters(); MethodData* mdo = method_data(); - if (((mcs != NULL) ? mcs->backedge_counter()->carry() : false) || - ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) { + if (((mcs != nullptr) ? mcs->backedge_counter()->carry() : false) || + ((mdo != nullptr) ? mdo->backedge_counter()->carry() : false)) { return InvocationCounter::count_limit; } else { - return ((mcs != NULL) ? mcs->backedge_counter()->count() : 0) + - ((mdo != NULL) ? mdo->backedge_counter()->count() : 0); + return ((mcs != nullptr) ? mcs->backedge_counter()->count() : 0) + + ((mdo != nullptr) ? mdo->backedge_counter()->count() : 0); } } int Method::highest_comp_level() const { const MethodCounters* mcs = method_counters(); - if (mcs != NULL) { + if (mcs != nullptr) { return mcs->highest_comp_level(); } else { return CompLevel_none; @@ -1959,7 +1959,7 @@ int Method::highest_comp_level() const { int Method::highest_osr_comp_level() const { const MethodCounters* mcs = method_counters(); - if (mcs != NULL) { + if (mcs != nullptr) { return mcs->highest_osr_comp_level(); } else { return CompLevel_none; @@ -1968,14 +1968,14 @@ int Method::highest_osr_comp_level() const { void Method::set_highest_comp_level(int level) { MethodCounters* mcs = method_counters(); - if (mcs != NULL) { + if (mcs != nullptr) { mcs->set_highest_comp_level(level); } } void Method::set_highest_osr_comp_level(int level) { MethodCounters* mcs = method_counters(); - if (mcs != NULL) { + if (mcs != nullptr) { mcs->set_highest_osr_comp_level(level); } } @@ -1989,7 +1989,7 @@ BreakpointInfo::BreakpointInfo(Method* m, int bci) { _orig_bytecode = (Bytecodes::Code) *m->bcp_from(_bci); if (_orig_bytecode == Bytecodes::_breakpoint) _orig_bytecode = m->orig_bytecode_at(_bci); - _next = NULL; + _next = nullptr; } void BreakpointInfo::set(Method* method) { @@ -2049,7 +2049,7 @@ class JNIMethodBlockNode : public CHeapObj { return; } } - if (_next == NULL) { + if (_next == nullptr) { _next = new JNIMethodBlockNode(MAX2(num_addl_methods, min_block_size)); } else { _next->ensure_methods(num_addl_methods); @@ -2071,7 +2071,7 @@ class JNIMethodBlock : public CHeapObj { } Method** add_method(Method* m) { - for (JNIMethodBlockNode* b = _last_free; b != NULL; b = b->_next) { + for (JNIMethodBlockNode* b = _last_free; b != nullptr; b = b->_next) { if (b->_top < b->_number_of_methods) { // top points to the next free entry. int i = b->_top; @@ -2093,17 +2093,17 @@ class JNIMethodBlock : public CHeapObj { b->_top++; } // need to allocate a next block. - if (b->_next == NULL) { + if (b->_next == nullptr) { b->_next = _last_free = new JNIMethodBlockNode(); } } guarantee(false, "Should always allocate a free block"); - return NULL; + return nullptr; } bool contains(Method** m) { - if (m == NULL) return false; - for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) { + if (m == nullptr) return false; + for (JNIMethodBlockNode* b = &_head; b != nullptr; b = b->_next) { if (b->_methods <= m && m < b->_methods + b->_number_of_methods) { // This is a bit of extra checking, for two reasons. One is // that contains() deals with pointers that are passed in by @@ -2133,9 +2133,9 @@ class JNIMethodBlock : public CHeapObj { // During class unloading the methods are cleared, which is different // than freed. void clear_all_methods() { - for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) { + for (JNIMethodBlockNode* b = &_head; b != nullptr; b = b->_next) { for (int i = 0; i< b->_number_of_methods; i++) { - b->_methods[i] = NULL; + b->_methods[i] = nullptr; } } } @@ -2143,7 +2143,7 @@ class JNIMethodBlock : public CHeapObj { int count_methods() { // count all allocated methods int count = 0; - for (JNIMethodBlockNode* b = &_head; b != NULL; b = b->_next) { + for (JNIMethodBlockNode* b = &_head; b != nullptr; b = b->_next) { for (int i = 0; i< b->_number_of_methods; i++) { if (b->_methods[i] != _free_method) count++; } @@ -2156,7 +2156,7 @@ class JNIMethodBlock : public CHeapObj { // Something that can't be mistaken for an address or a markWord Method* const JNIMethodBlock::_free_method = (Method*)55; -JNIMethodBlockNode::JNIMethodBlockNode(int num_methods) : _top(0), _next(NULL) { +JNIMethodBlockNode::JNIMethodBlockNode(int num_methods) : _top(0), _next(nullptr) { _number_of_methods = MAX2(num_methods, min_block_size); _methods = NEW_C_HEAP_ARRAY(Method*, _number_of_methods, mtInternal); for (int i = 0; i < _number_of_methods; i++) { @@ -2169,7 +2169,7 @@ void Method::ensure_jmethod_ids(ClassLoaderData* cld, int capacity) { // Also have to add the method to the list safely, which the lock // protects as well. MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag); - if (cld->jmethod_ids() == NULL) { + if (cld->jmethod_ids() == nullptr) { cld->set_jmethod_ids(new JNIMethodBlock(capacity)); } else { cld->jmethod_ids()->ensure_methods(capacity); @@ -2182,7 +2182,7 @@ jmethodID Method::make_jmethod_id(ClassLoaderData* cld, Method* m) { // Also have to add the method to the list safely, which the lock // protects as well. assert(JmethodIdCreation_lock->owned_by_self(), "sanity check"); - if (cld->jmethod_ids() == NULL) { + if (cld->jmethod_ids() == nullptr) { cld->set_jmethod_ids(new JNIMethodBlock()); } // jmethodID is a pointer to Method* @@ -2198,7 +2198,7 @@ jmethodID Method::jmethod_id() { // InstanceKlass while creating the jmethodID cache. void Method::destroy_jmethod_id(ClassLoaderData* cld, jmethodID m) { Method** ptr = (Method**)m; - assert(cld->jmethod_ids() != NULL, "should have method handles"); + assert(cld->jmethod_ids() != nullptr, "should have method handles"); cld->jmethod_ids()->destroy_method(ptr); } @@ -2207,7 +2207,7 @@ void Method::change_method_associated_with_jmethod_id(jmethodID jmid, Method* ne // scratch method holder. assert(resolve_jmethod_id(jmid)->method_holder()->class_loader() == new_method->method_holder()->class_loader() || - new_method->method_holder()->class_loader() == NULL, // allow Unsafe substitution + new_method->method_holder()->class_loader() == nullptr, // allow Unsafe substitution "changing to a different class loader"); // Just change the method in place, jmethodID pointer doesn't change. *((Method**)jmid) = new_method; @@ -2215,25 +2215,25 @@ void Method::change_method_associated_with_jmethod_id(jmethodID jmid, Method* ne bool Method::is_method_id(jmethodID mid) { Method* m = resolve_jmethod_id(mid); - assert(m != NULL, "should be called with non-null method"); + assert(m != nullptr, "should be called with non-null method"); InstanceKlass* ik = m->method_holder(); ClassLoaderData* cld = ik->class_loader_data(); - if (cld->jmethod_ids() == NULL) return false; + if (cld->jmethod_ids() == nullptr) return false; return (cld->jmethod_ids()->contains((Method**)mid)); } Method* Method::checked_resolve_jmethod_id(jmethodID mid) { - if (mid == NULL) return NULL; + if (mid == nullptr) return nullptr; Method* o = resolve_jmethod_id(mid); - if (o == NULL || o == JNIMethodBlock::_free_method) { - return NULL; + if (o == nullptr || o == JNIMethodBlock::_free_method) { + return nullptr; } // Method should otherwise be valid. Assert for testing. assert(is_valid_method(o), "should be valid jmethodid"); // If the method's class holder object is unreferenced, but not yet marked as - // unloaded, we need to return NULL here too because after a safepoint, its memory + // unloaded, we need to return null here too because after a safepoint, its memory // will be reclaimed. - return o->method_holder()->is_loader_alive() ? o : NULL; + return o->method_holder()->is_loader_alive() ? o : nullptr; }; void Method::set_on_stack(const bool value) { @@ -2268,7 +2268,7 @@ bool Method::has_method_vptr(const void* ptr) { // Check that this pointer is valid by checking that the vtbl pointer matches bool Method::is_valid_method(const Method* m) { - if (m == NULL) { + if (m == nullptr) { return false; } else if ((intptr_t(m) & (wordSize-1)) != 0) { // Quick sanity check on pointer. @@ -2318,7 +2318,7 @@ void Method::print_on(outputStream* st) const { st->print_cr(" - i2i entry: " PTR_FORMAT, p2i(interpreter_entry())); st->print( " - adapters: "); AdapterHandlerEntry* a = ((Method*)this)->adapter(); - if (a == NULL) + if (a == nullptr) st->print_cr(PTR_FORMAT, p2i(a)); else a->print_adapter_on(st); @@ -2328,7 +2328,7 @@ void Method::print_on(outputStream* st) const { st->print_cr(" - code start: " PTR_FORMAT, p2i(code_base())); st->print_cr(" - code end (excl): " PTR_FORMAT, p2i(code_base() + code_size())); } - if (method_data() != NULL) { + if (method_data() != nullptr) { st->print_cr(" - method data: " PTR_FORMAT, p2i(method_data())); } st->print_cr(" - checked ex length: %d", checked_exceptions_length()); @@ -2366,7 +2366,7 @@ void Method::print_on(outputStream* st) const { } } } - if (code() != NULL) { + if (code() != nullptr) { st->print (" - compiled code: "); code()->print_value_on(st); } @@ -2399,7 +2399,7 @@ void Method::print_value_on(outputStream* st) const { method_holder()->print_value_on(st); if (WizardMode) st->print("#%d", _vtable_index); if (WizardMode) st->print("[%d,%d]", size_of_parameters(), max_locals()); - if (WizardMode && code() != NULL) st->print(" ((nmethod*)%p)", code()); + if (WizardMode && code() != nullptr) st->print(" ((nmethod*)%p)", code()); } // Verification @@ -2408,6 +2408,6 @@ void Method::verify_on(outputStream* st) { guarantee(is_method(), "object must be method"); guarantee(constants()->is_constantPool(), "should be constant pool"); MethodData* md = method_data(); - guarantee(md == NULL || + guarantee(md == nullptr || md->is_methodData(), "should be method data"); } diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp index 4a5df8134cc..4a154373654 100644 --- a/src/hotspot/share/oops/method.hpp +++ b/src/hotspot/share/oops/method.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,9 +111,9 @@ class Method : public Metadata { volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() // The entry point for calling both from and to compiled code is // "_code->entry_point()". Because of tiered compilation and de-opt, this - // field can come and go. It can transition from NULL to not-null at any + // field can come and go. It can transition from null to not-null at any // time (whenever a compile completes). It can transition from not-null to - // NULL only at safepoints (because of a de-opt). + // null only at safepoints (because of a de-opt). CompiledMethod* volatile _code; // Points to the corresponding piece of native code volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry @@ -164,7 +164,7 @@ class Method : public Metadata { void set_signature_index(int index) { constMethod()->set_signature_index(index); } // generics support - Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); } + Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : nullptr); } int generic_signature_index() const { return constMethod()->generic_signature_index(); } void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); } @@ -232,7 +232,7 @@ class Method : public Metadata { // Only mutated by VM thread. u2 number_of_breakpoints() const { MethodCounters* mcs = method_counters(); - if (mcs == NULL) { + if (mcs == nullptr) { return 0; } else { return mcs->number_of_breakpoints(); @@ -240,20 +240,20 @@ class Method : public Metadata { } void incr_number_of_breakpoints(Thread* current) { MethodCounters* mcs = get_method_counters(current); - if (mcs != NULL) { + if (mcs != nullptr) { mcs->incr_number_of_breakpoints(); } } void decr_number_of_breakpoints(Thread* current) { MethodCounters* mcs = get_method_counters(current); - if (mcs != NULL) { + if (mcs != nullptr) { mcs->decr_number_of_breakpoints(); } } // Initialization only void clear_number_of_breakpoints() { MethodCounters* mcs = method_counters(); - if (mcs != NULL) { + if (mcs != nullptr) { mcs->clear_number_of_breakpoints(); } } @@ -296,7 +296,7 @@ class Method : public Metadata { // Count of times method was exited via exception while interpreting void interpreter_throwout_increment(Thread* current) { MethodCounters* mcs = get_method_counters(current); - if (mcs != NULL) { + if (mcs != nullptr) { mcs->interpreter_throwout_increment(); } } @@ -304,7 +304,7 @@ class Method : public Metadata { int interpreter_throwout_count() const { MethodCounters* mcs = method_counters(); - if (mcs == NULL) { + if (mcs == nullptr) { return 0; } else { return mcs->interpreter_throwout_count(); @@ -339,7 +339,7 @@ class Method : public Metadata { { return constMethod()->exception_table_start(); } // Finds the first entry point bci of an exception handler for an - // exception of klass ex_klass thrown at throw_bci. A value of NULL + // exception of klass ex_klass thrown at throw_bci. A value of null // for ex_klass indicates that the exception klass is not known; in // this case it matches any constraint class. Returns -1 if the // exception cannot be handled in this method. The handler @@ -370,38 +370,38 @@ class Method : public Metadata { } void clear_method_counters() { - _method_counters = NULL; + _method_counters = nullptr; } bool init_method_counters(MethodCounters* counters); int prev_event_count() const { MethodCounters* mcs = method_counters(); - return mcs == NULL ? 0 : mcs->prev_event_count(); + return mcs == nullptr ? 0 : mcs->prev_event_count(); } void set_prev_event_count(int count) { MethodCounters* mcs = method_counters(); - if (mcs != NULL) { + if (mcs != nullptr) { mcs->set_prev_event_count(count); } } jlong prev_time() const { MethodCounters* mcs = method_counters(); - return mcs == NULL ? 0 : mcs->prev_time(); + return mcs == nullptr ? 0 : mcs->prev_time(); } void set_prev_time(jlong time) { MethodCounters* mcs = method_counters(); - if (mcs != NULL) { + if (mcs != nullptr) { mcs->set_prev_time(time); } } float rate() const { MethodCounters* mcs = method_counters(); - return mcs == NULL ? 0 : mcs->rate(); + return mcs == nullptr ? 0 : mcs->rate(); } void set_rate(float rate) { MethodCounters* mcs = method_counters(); - if (mcs != NULL) { + if (mcs != nullptr) { mcs->set_rate(rate); } } @@ -506,7 +506,7 @@ public: }; address native_function() const { return *(native_function_addr()); } - // Must specify a real function (not NULL). + // Must specify a real function (not null). // Use clear_native_function() to unregister. void set_native_function(address function, bool post_event_flag); bool has_native_function() const; @@ -773,7 +773,7 @@ public: // once created they are never reclaimed. The methods to which they refer, // however, can be GC'ed away if the class is unloaded or if the method is // made obsolete or deleted -- in these cases, the jmethodID - // refers to NULL (as is the case for any weak reference). + // refers to null (as is the case for any weak reference). static jmethodID make_jmethod_id(ClassLoaderData* cld, Method* mh); static void destroy_jmethod_id(ClassLoaderData* cld, jmethodID mid); @@ -784,14 +784,14 @@ public: // Use resolve_jmethod_id() in situations where the caller is expected // to provide a valid jmethodID; the only sanity checks are in asserts; - // result guaranteed not to be NULL. + // result guaranteed not to be null. inline static Method* resolve_jmethod_id(jmethodID mid) { - assert(mid != NULL, "JNI method id should not be null"); + assert(mid != nullptr, "JNI method id should not be null"); return *((Method**)mid); } // Use checked_resolve_jmethod_id() in situations where the caller - // should provide a valid jmethodID, but might not. NULL is returned + // should provide a valid jmethodID, but might not. Null is returned // when the jmethodID does not refer to a valid method. static Method* checked_resolve_jmethod_id(jmethodID mid); @@ -805,7 +805,7 @@ public: // Get this method's jmethodID -- allocate if it doesn't exist jmethodID jmethod_id(); - // Lookup the jmethodID for this method. Return NULL if not found. + // Lookup the jmethodID for this method. Return null if not found. // NOTE that this function can be called from a signal handler // (see AsyncGetCallTrace support for Forte Analyzer) and this // needs to be async-safe. No allocation should be done and @@ -902,7 +902,7 @@ public: // On-stack replacement support bool has_osr_nmethod(int level, bool match_level) { - return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; + return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != nullptr; } int mark_osr_nmethods() { @@ -937,7 +937,7 @@ public: public: MethodCounters* get_method_counters(Thread* current) { - if (_method_counters == NULL) { + if (_method_counters == nullptr) { build_method_counters(current, this); } return _method_counters; @@ -976,7 +976,7 @@ public: typedef int (*method_comparator_func)(Method* a, Method* b); // Helper routine used for method sorting - static void sort_methods(Array* methods, bool set_idnums = true, method_comparator_func func = NULL); + static void sort_methods(Array* methods, bool set_idnums = true, method_comparator_func func = nullptr); // Deallocation function for redefine classes or if an error occurs void deallocate_contents(ClassLoaderData* loader_data); @@ -987,7 +987,7 @@ public: InstanceKlass* holder = method_holder(); Method* new_method = holder->method_with_idnum(orig_method_idnum()); - assert(new_method != NULL, "method_with_idnum() should not be NULL"); + assert(new_method != nullptr, "method_with_idnum() should not be null"); assert(this != new_method, "sanity check"); return new_method; } @@ -1122,7 +1122,7 @@ class ExceptionTable : public StackObj { _table = m->exception_table_start(); _length = m->exception_table_length(); } else { - _table = NULL; + _table = nullptr; _length = 0; } } diff --git a/src/hotspot/share/oops/method.inline.hpp b/src/hotspot/share/oops/method.inline.hpp index 2b30f3297ad..50c85733da9 100644 --- a/src/hotspot/share/oops/method.inline.hpp +++ b/src/hotspot/share/oops/method.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,7 +83,7 @@ inline void CompressedLineNumberWriteStream::write_pair(int bci, int line) { write_pair_inline(bci, line); } -inline bool Method::has_compiled_code() const { return code() != NULL; } +inline bool Method::has_compiled_code() const { return code() != nullptr; } inline bool Method::is_empty_method() const { return code_size() == 1 diff --git a/src/hotspot/share/oops/methodData.cpp b/src/hotspot/share/oops/methodData.cpp index bd0bd707d78..e5d6f250592 100644 --- a/src/hotspot/share/oops/methodData.cpp +++ b/src/hotspot/share/oops/methodData.cpp @@ -88,7 +88,7 @@ void DataLayout::clean_weak_klass_links(bool always_clean) { // Constructor for invalid ProfileData. ProfileData::ProfileData() { - _data = NULL; + _data = nullptr; } char* ProfileData::print_data_on_helper(const MethodData* md) const { @@ -118,7 +118,7 @@ char* ProfileData::print_data_on_helper(const MethodData* md) const { fatal("unexpected tag %d", dp->tag()); } } - return NULL; + return nullptr; } void ProfileData::print_data_on(outputStream* st, const MethodData* md) const { @@ -135,7 +135,7 @@ void ProfileData::print_shared(outputStream* st, const char* name, const char* e char buf[100]; st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); } - if (extra != NULL) { + if (extra != nullptr) { st->print("%s", extra); } int flags = data()->flags(); @@ -317,8 +317,8 @@ void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) { for (int i = 0; i < _number_of_entries; i++) { intptr_t p = type(i); Klass* k = (Klass*)klass_part(p); - if (k != NULL && (always_clean || !k->is_loader_alive())) { - set_type(i, with_status((Klass*)NULL, p)); + if (k != nullptr && (always_clean || !k->is_loader_alive())) { + set_type(i, with_status((Klass*)nullptr, p)); } } } @@ -326,8 +326,8 @@ void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) { void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) { intptr_t p = type(); Klass* k = (Klass*)klass_part(p); - if (k != NULL && (always_clean || !k->is_loader_alive())) { - set_type(with_status((Klass*)NULL, p)); + if (k != nullptr && (always_clean || !k->is_loader_alive())) { + set_type(with_status((Klass*)nullptr, p)); } } @@ -406,7 +406,7 @@ void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) con void ReceiverTypeData::clean_weak_klass_links(bool always_clean) { for (uint row = 0; row < row_limit(); row++) { Klass* p = receiver(row); - if (p != NULL && (always_clean || !p->is_loader_alive())) { + if (p != nullptr && (always_clean || !p->is_loader_alive())) { clear_row(row); } } @@ -416,7 +416,7 @@ void ReceiverTypeData::print_receiver_data_on(outputStream* st) const { uint row; int entries = 0; for (row = 0; row < row_limit(); row++) { - if (receiver(row) != NULL) entries++; + if (receiver(row) != nullptr) entries++; } #if INCLUDE_JVMCI st->print_cr("count(%u) nonprofiled_count(%u) entries(%u)", count(), nonprofiled_count(), entries); @@ -425,12 +425,12 @@ void ReceiverTypeData::print_receiver_data_on(outputStream* st) const { #endif int total = count(); for (row = 0; row < row_limit(); row++) { - if (receiver(row) != NULL) { + if (receiver(row) != nullptr) { total += receiver_count(row); } } for (row = 0; row < row_limit(); row++) { - if (receiver(row) != NULL) { + if (receiver(row) != nullptr) { tab(st); receiver(row)->print_value_on(st); st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total); @@ -805,7 +805,7 @@ void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() { return CHeapObj::operator new(fs_size, std::nothrow); } -FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(NULL) { +FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) { memcpy(data(), speculation, speculation_len); } @@ -814,15 +814,15 @@ static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** jlong head = (jlong)(address) *failed_speculations_address; if ((head & 0x1) == 0x1) { stringStream st; - if (nm != NULL) { + if (nm != nullptr) { st.print("%d", nm->compile_id()); Method* method = nm->method(); st.print_raw("{"); - if (method != NULL) { + if (method != nullptr) { method->print_name(&st); } else { const char* jvmci_name = nm->jvmci_name(); - if (jvmci_name != NULL) { + if (jvmci_name != nullptr) { st.print_raw(jvmci_name); } } @@ -835,10 +835,10 @@ static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** } bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) { - assert(failed_speculations_address != NULL, "must be"); + assert(failed_speculations_address != nullptr, "must be"); size_t fs_size = sizeof(FailedSpeculation) + speculation_len; FailedSpeculation* fs = new (fs_size) FailedSpeculation(speculation, speculation_len); - if (fs == NULL) { + if (fs == nullptr) { // no memory -> ignore failed speculation return false; } @@ -848,9 +848,9 @@ bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** FailedSpeculation** cursor = failed_speculations_address; do { - if (*cursor == NULL) { - FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) NULL, fs); - if (old_fs == NULL) { + if (*cursor == nullptr) { + FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs); + if (old_fs == nullptr) { // Successfully appended fs to end of the list return true; } @@ -862,9 +862,9 @@ bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** } void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) { - assert(failed_speculations_address != NULL, "must be"); + assert(failed_speculations_address != nullptr, "must be"); FailedSpeculation* fs = *failed_speculations_address; - while (fs != NULL) { + while (fs != nullptr) { FailedSpeculation* next = fs->next(); delete fs; fs = next; @@ -1099,7 +1099,7 @@ int MethodData::initialize_data(BytecodeStream* stream, // Get the data at an arbitrary (sort of) data index. ProfileData* MethodData::data_at(int data_index) const { if (out_of_bounds(data_index)) { - return NULL; + return nullptr; } DataLayout* data_layout = data_layout_at(data_index); return data_layout->data_in(); @@ -1144,7 +1144,7 @@ ProfileData* DataLayout::data_in() { case DataLayout::no_tag: default: ShouldNotReachHere(); - return NULL; + return nullptr; case DataLayout::bit_data_tag: return new BitData(this); case DataLayout::counter_data_tag: @@ -1186,7 +1186,7 @@ DataLayout* MethodData::next_data_layout(DataLayout* current) const { int current_index = dp_to_di((address)current); int next_index = current_index + current->size_in_bytes(); if (out_of_bounds(next_index)) { - return NULL; + return nullptr; } DataLayout* next = data_layout_at(next_index); return next; @@ -1203,7 +1203,7 @@ void MethodData::post_initialize(BytecodeStream* stream) { data->post_initialize(stream, this); } if (_parameters_type_data_di != no_parameters) { - parameters_type_data()->post_initialize(NULL, this); + parameters_type_data()->post_initialize(nullptr, this); } } @@ -1310,7 +1310,7 @@ void MethodData::init() { #if INCLUDE_JVMCI _jvmci_ir_size = 0; - _failed_speculations = NULL; + _failed_speculations = nullptr; #endif #if INCLUDE_RTM_OPT @@ -1345,11 +1345,11 @@ bool MethodData::is_mature() const { address MethodData::bci_to_dp(int bci) { ResourceMark rm; DataLayout* data = data_layout_before(bci); - DataLayout* prev = NULL; + DataLayout* prev = nullptr; for ( ; is_valid(data); data = next_data_layout(data)) { if (data->bci() >= bci) { if (data->bci() == bci) set_hint_di(dp_to_di((address)data)); - else if (prev != NULL) set_hint_di(dp_to_di((address)prev)); + else if (prev != nullptr) set_hint_di(dp_to_di((address)prev)); return (address)data; } prev = data; @@ -1357,7 +1357,7 @@ address MethodData::bci_to_dp(int bci) { return (address)limit_data_position(); } -// Translate a bci to its corresponding data, or NULL. +// Translate a bci to its corresponding data, or null. ProfileData* MethodData::bci_to_data(int bci) { DataLayout* data = data_layout_before(bci); for ( ; is_valid(data); data = next_data_layout(data)) { @@ -1368,7 +1368,7 @@ ProfileData* MethodData::bci_to_data(int bci) { break; } } - return bci_to_extra_data(bci, NULL, false); + return bci_to_extra_data(bci, nullptr, false); } DataLayout* MethodData::next_extra(DataLayout* dp) { @@ -1396,25 +1396,25 @@ ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout // since the data structure is monotonic. switch(dp->tag()) { case DataLayout::no_tag: - return NULL; + return nullptr; case DataLayout::arg_info_data_tag: dp = end; - return NULL; // ArgInfoData is at the end of extra data section. + return nullptr; // ArgInfoData is at the end of extra data section. case DataLayout::bit_data_tag: - if (m == NULL && dp->bci() == bci) { + if (m == nullptr && dp->bci() == bci) { return new BitData(dp); } break; case DataLayout::speculative_trap_data_tag: - if (m != NULL) { + if (m != nullptr) { SpeculativeTrapData* data = new SpeculativeTrapData(dp); // data->method() may be null in case of a concurrent // allocation. Maybe it's for the same method. Try to use that // entry in that case. if (dp->bci() == bci) { - if (data->method() == NULL) { + if (data->method() == nullptr) { assert(concurrent, "impossible because no concurrent allocation"); - return NULL; + return nullptr; } else if (data->method() == m) { return data; } @@ -1425,11 +1425,11 @@ ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout fatal("unexpected tag %d", dp->tag()); } } - return NULL; + return nullptr; } -// Translate a bci to its corresponding extra data, or NULL. +// Translate a bci to its corresponding extra data, or null. ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) { // This code assumes an entry for a SpeculativeTrapData is 2 cells assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) == @@ -1437,8 +1437,8 @@ ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_mi "code needs to be adjusted"); // Do not create one of these if method has been redefined. - if (m != NULL && m->is_old()) { - return NULL; + if (m != nullptr && m->is_old()) { + return nullptr; } DataLayout* dp = extra_data_base(); @@ -1448,7 +1448,7 @@ ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_mi // all entries have the same size and non atomic concurrent // allocation would result in a corrupted extra data space. ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true); - if (result != NULL) { + if (result != nullptr) { return result; } @@ -1457,16 +1457,16 @@ ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_mi // Check again now that we have the lock. Another thread may // have added extra data entries. ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false); - if (result != NULL || dp >= end) { + if (result != nullptr || dp >= end) { return result; } - assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free"); + assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free"); assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info"); - u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag; + u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag; // SpeculativeTrapData is 2 slots. Make sure we have room. - if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) { - return NULL; + if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) { + return nullptr; } DataLayout temp; temp.initialize(tag, bci, 0); @@ -1482,7 +1482,7 @@ ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_mi return data; } } - return NULL; + return nullptr; } ArgInfoData *MethodData::arg_info() { @@ -1492,7 +1492,7 @@ ArgInfoData *MethodData::arg_info() { if (dp->tag() == DataLayout::arg_info_data_tag) return new ArgInfoData(dp); } - return NULL; + return nullptr; } // Printing @@ -1737,7 +1737,7 @@ void MethodData::clean_extra_data(CleanExtraDataClosure* cl) { case DataLayout::speculative_trap_data_tag: { SpeculativeTrapData* data = new SpeculativeTrapData(dp); Method* m = data->method(); - assert(m != NULL, "should have a method"); + assert(m != nullptr, "should have a method"); if (!cl->is_live(m)) { // "shift" accumulates the number of cells for dead // SpeculativeTrapData entries that have been seen so @@ -1781,7 +1781,7 @@ void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) { case DataLayout::speculative_trap_data_tag: { SpeculativeTrapData* data = new SpeculativeTrapData(dp); Method* m = data->method(); - assert(m != NULL && cl->is_live(m), "Method should exist"); + assert(m != nullptr && cl->is_live(m), "Method should exist"); break; } case DataLayout::bit_data_tag: @@ -1804,7 +1804,7 @@ void MethodData::clean_method_data(bool always_clean) { data->clean_weak_klass_links(always_clean); } ParametersTypeData* parameters = parameters_type_data(); - if (parameters != NULL) { + if (parameters != nullptr) { parameters->clean_weak_klass_links(always_clean); } diff --git a/src/hotspot/share/oops/methodData.hpp b/src/hotspot/share/oops/methodData.hpp index 508278e6d74..775d0907492 100644 --- a/src/hotspot/share/oops/methodData.hpp +++ b/src/hotspot/share/oops/methodData.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -401,59 +401,59 @@ public: BitData* as_BitData() const { assert(is_BitData(), "wrong type"); - return is_BitData() ? (BitData*) this : NULL; + return is_BitData() ? (BitData*) this : nullptr; } CounterData* as_CounterData() const { assert(is_CounterData(), "wrong type"); - return is_CounterData() ? (CounterData*) this : NULL; + return is_CounterData() ? (CounterData*) this : nullptr; } JumpData* as_JumpData() const { assert(is_JumpData(), "wrong type"); - return is_JumpData() ? (JumpData*) this : NULL; + return is_JumpData() ? (JumpData*) this : nullptr; } ReceiverTypeData* as_ReceiverTypeData() const { assert(is_ReceiverTypeData(), "wrong type"); - return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL; + return is_ReceiverTypeData() ? (ReceiverTypeData*)this : nullptr; } VirtualCallData* as_VirtualCallData() const { assert(is_VirtualCallData(), "wrong type"); - return is_VirtualCallData() ? (VirtualCallData*)this : NULL; + return is_VirtualCallData() ? (VirtualCallData*)this : nullptr; } RetData* as_RetData() const { assert(is_RetData(), "wrong type"); - return is_RetData() ? (RetData*) this : NULL; + return is_RetData() ? (RetData*) this : nullptr; } BranchData* as_BranchData() const { assert(is_BranchData(), "wrong type"); - return is_BranchData() ? (BranchData*) this : NULL; + return is_BranchData() ? (BranchData*) this : nullptr; } ArrayData* as_ArrayData() const { assert(is_ArrayData(), "wrong type"); - return is_ArrayData() ? (ArrayData*) this : NULL; + return is_ArrayData() ? (ArrayData*) this : nullptr; } MultiBranchData* as_MultiBranchData() const { assert(is_MultiBranchData(), "wrong type"); - return is_MultiBranchData() ? (MultiBranchData*)this : NULL; + return is_MultiBranchData() ? (MultiBranchData*)this : nullptr; } ArgInfoData* as_ArgInfoData() const { assert(is_ArgInfoData(), "wrong type"); - return is_ArgInfoData() ? (ArgInfoData*)this : NULL; + return is_ArgInfoData() ? (ArgInfoData*)this : nullptr; } CallTypeData* as_CallTypeData() const { assert(is_CallTypeData(), "wrong type"); - return is_CallTypeData() ? (CallTypeData*)this : NULL; + return is_CallTypeData() ? (CallTypeData*)this : nullptr; } VirtualCallTypeData* as_VirtualCallTypeData() const { assert(is_VirtualCallTypeData(), "wrong type"); - return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL; + return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : nullptr; } ParametersTypeData* as_ParametersTypeData() const { assert(is_ParametersTypeData(), "wrong type"); - return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL; + return is_ParametersTypeData() ? (ParametersTypeData*)this : nullptr; } SpeculativeTrapData* as_SpeculativeTrapData() const { assert(is_SpeculativeTrapData(), "wrong type"); - return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : NULL; + return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : nullptr; } @@ -470,7 +470,7 @@ public: // translation here, and the required translators are in the ci subclasses. virtual void translate_from(const ProfileData* data) {} - virtual void print_data_on(outputStream* st, const char* extra = NULL) const { + virtual void print_data_on(outputStream* st, const char* extra = nullptr) const { ShouldNotReachHere(); } @@ -533,7 +533,7 @@ public: return cell_offset(bit_cell_count); } - void print_data_on(outputStream* st, const char* extra = NULL) const; + void print_data_on(outputStream* st, const char* extra = nullptr) const; }; // CounterData @@ -583,7 +583,7 @@ public: set_int_at(count_off, count); } - void print_data_on(outputStream* st, const char* extra = NULL) const; + void print_data_on(outputStream* st, const char* extra = nullptr) const; }; // JumpData @@ -656,7 +656,7 @@ public: // Specific initialization. void post_initialize(BytecodeStream* stream, MethodData* mdo); - void print_data_on(outputStream* st, const char* extra = NULL) const; + void print_data_on(outputStream* st, const char* extra = nullptr) const; }; // Entries in a ProfileData object to record types: it can either be @@ -714,10 +714,10 @@ public: if (!is_type_none(k) && !is_type_unknown(k)) { Klass* res = (Klass*)klass_part(k); - assert(res != NULL, "invalid"); + assert(res != nullptr, "invalid"); return res; } else { - return NULL; + return nullptr; } } @@ -738,7 +738,7 @@ protected: const int _base_off; TypeEntries(int base_off) - : _pd(NULL), _base_off(base_off) {} + : _pd(nullptr), _base_off(base_off) {} void set_intptr_at(int index, intptr_t value) { _pd->set_intptr_at(index, value); @@ -1074,7 +1074,7 @@ public: } } - virtual void print_data_on(outputStream* st, const char* extra = NULL) const; + virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; }; // ReceiverTypeData @@ -1139,7 +1139,7 @@ public: assert(row < row_limit(), "oob"); Klass* recv = (Klass*)intptr_at(receiver_cell_index(row)); - assert(recv == NULL || recv->is_klass(), "wrong type"); + assert(recv == nullptr || recv->is_klass(), "wrong type"); return recv; } @@ -1177,7 +1177,7 @@ public: // We do sorting a profiling info (ciCallProfile) for compilation. // set_count(0); - set_receiver(row, NULL); + set_receiver(row, nullptr); set_receiver_count(row, 0); #if INCLUDE_JVMCI if (!this->is_VirtualCallData()) { @@ -1214,7 +1214,7 @@ public: virtual void clean_weak_klass_links(bool always_clean); void print_receiver_data_on(outputStream* st) const; - void print_data_on(outputStream* st, const char* extra = NULL) const; + void print_data_on(outputStream* st, const char* extra = nullptr) const; }; // VirtualCallData @@ -1246,7 +1246,7 @@ public: } void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN; - void print_data_on(outputStream* st, const char* extra = NULL) const; + void print_data_on(outputStream* st, const char* extra = nullptr) const; }; // VirtualCallTypeData @@ -1378,7 +1378,7 @@ public: } } - virtual void print_data_on(outputStream* st, const char* extra = NULL) const; + virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; }; // RetData @@ -1470,7 +1470,7 @@ public: // Specific initialization. void post_initialize(BytecodeStream* stream, MethodData* mdo); - void print_data_on(outputStream* st, const char* extra = NULL) const; + void print_data_on(outputStream* st, const char* extra = nullptr) const; }; // BranchData @@ -1534,7 +1534,7 @@ public: // Specific initialization. void post_initialize(BytecodeStream* stream, MethodData* mdo); - void print_data_on(outputStream* st, const char* extra = NULL) const; + void print_data_on(outputStream* st, const char* extra = nullptr) const; }; // ArrayData @@ -1693,7 +1693,7 @@ public: // Specific initialization. void post_initialize(BytecodeStream* stream, MethodData* mdo); - void print_data_on(outputStream* st, const char* extra = NULL) const; + void print_data_on(outputStream* st, const char* extra = nullptr) const; }; class ArgInfoData : public ArrayData { @@ -1718,7 +1718,7 @@ public: array_set_int_at(arg, val); } - void print_data_on(outputStream* st, const char* extra = NULL) const; + void print_data_on(outputStream* st, const char* extra = nullptr) const; }; // ParametersTypeData @@ -1777,7 +1777,7 @@ public: _parameters.clean_weak_klass_links(always_clean); } - virtual void print_data_on(outputStream* st, const char* extra = NULL) const; + virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; static ByteSize stack_slot_offset(int i) { return cell_offset(stack_slot_local_offset(i)); @@ -1847,7 +1847,7 @@ public: return cell_offset(speculative_trap_method); } - virtual void print_data_on(outputStream* st, const char* extra = NULL) const; + virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; }; // MethodData* @@ -1883,7 +1883,7 @@ public: // interpretation, when a bytecode is encountered that has profile data // associated with it, the entry pointed to by mdp is updated, then the // mdp is adjusted to point to the next appropriate DataLayout. If mdp -// is NULL to begin with, the interpreter assumes that the current method +// is null to begin with, the interpreter assumes that the current method // is not (yet) being profiled. // // In MethodData* parlance, "dp" is a "data pointer", the actual address @@ -2132,7 +2132,7 @@ private: DataLayout* data_layout_before(int bci) { // avoid SEGV on this edge case if (data_size() == 0) - return NULL; + return nullptr; DataLayout* layout = data_layout_at(hint_di()); if (layout->bci() <= bci) return layout; @@ -2283,7 +2283,7 @@ public: intx arg_stack() { return _arg_stack; } intx arg_returned() { return _arg_returned; } uint arg_modified(int a) { ArgInfoData *aid = arg_info(); - assert(aid != NULL, "arg_info must be not null"); + assert(aid != nullptr, "arg_info must be not null"); assert(a >= 0 && a < aid->number_of_args(), "valid argument number"); return aid->arg_modified(a); } @@ -2292,7 +2292,7 @@ public: void set_arg_stack(intx v) { _arg_stack = v; } void set_arg_returned(intx v) { _arg_returned = v; } void set_arg_modified(int a, uint v) { ArgInfoData *aid = arg_info(); - assert(aid != NULL, "arg_info must be not null"); + assert(aid != nullptr, "arg_info must be not null"); assert(a >= 0 && a < aid->number_of_args(), "valid argument number"); aid->set_arg_modified(a, v); } @@ -2308,7 +2308,7 @@ public: int parameters_size_in_bytes() const { ParametersTypeData* param = parameters_type_data(); - return param == NULL ? 0 : param->size_in_bytes(); + return param == nullptr ? 0 : param->size_in_bytes(); } // Accessors @@ -2321,8 +2321,8 @@ public: ProfileData* first_data() const { return data_at(first_di()); } ProfileData* next_data(ProfileData* current) const; DataLayout* next_data_layout(DataLayout* current) const; - bool is_valid(ProfileData* current) const { return current != NULL; } - bool is_valid(DataLayout* current) const { return current != NULL; } + bool is_valid(ProfileData* current) const { return current != nullptr; } + bool is_valid(DataLayout* current) const { return current != nullptr; } // Convert a dp (data pointer) to a di (data index). int dp_to_di(address dp) const { @@ -2335,30 +2335,30 @@ public: return dp_to_di(bci_to_dp(bci)); } - // Get the data at an arbitrary bci, or NULL if there is none. + // Get the data at an arbitrary bci, or null if there is none. ProfileData* bci_to_data(int bci); // Same, but try to create an extra_data record if one is needed: ProfileData* allocate_bci_to_data(int bci, Method* m) { - ProfileData* data = NULL; - // If m not NULL, try to allocate a SpeculativeTrapData entry - if (m == NULL) { + ProfileData* data = nullptr; + // If m not null, try to allocate a SpeculativeTrapData entry + if (m == nullptr) { data = bci_to_data(bci); } - if (data != NULL) { + if (data != nullptr) { return data; } data = bci_to_extra_data(bci, m, true); - if (data != NULL) { + if (data != nullptr) { return data; } // If SpeculativeTrapData allocation fails try to allocate a // regular entry data = bci_to_data(bci); - if (data != NULL) { + if (data != nullptr) { return data; } - return bci_to_extra_data(bci, NULL, true); + return bci_to_extra_data(bci, nullptr, true); } // Add a handful of extra data records, for trap tracking. @@ -2409,7 +2409,7 @@ public: // Return pointer to area dedicated to parameters in MDO ParametersTypeData* parameters_type_data() const { assert(_parameters_type_data_di != parameters_uninitialized, "called too early"); - return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL; + return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : nullptr; } int parameters_type_data_di() const { diff --git a/src/hotspot/share/oops/objArrayKlass.cpp b/src/hotspot/share/oops/objArrayKlass.cpp index a43816476e8..a0e3661e555 100644 --- a/src/hotspot/share/oops/objArrayKlass.cpp +++ b/src/hotspot/share/oops/objArrayKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,26 +58,26 @@ ObjArrayKlass* ObjArrayKlass::allocate_objArray_klass(ClassLoaderData* loader_da int n, Klass* element_klass, TRAPS) { // Eagerly allocate the direct array supertype. - Klass* super_klass = NULL; + Klass* super_klass = nullptr; if (!Universe::is_bootstrapping() || vmClasses::Object_klass_loaded()) { Klass* element_super = element_klass->super(); - if (element_super != NULL) { + if (element_super != nullptr) { // The element type has a direct super. E.g., String[] has direct super of Object[]. super_klass = element_super->array_klass_or_null(); - bool supers_exist = super_klass != NULL; + bool supers_exist = super_klass != nullptr; // Also, see if the element has secondary supertypes. // We need an array type for each. const Array* element_supers = element_klass->secondary_supers(); for( int i = element_supers->length()-1; i >= 0; i-- ) { Klass* elem_super = element_supers->at(i); - if (elem_super->array_klass_or_null() == NULL) { + if (elem_super->array_klass_or_null() == nullptr) { supers_exist = false; break; } } if (!supers_exist) { // Oops. Not allocated yet. Back out, allocate it, and retry. - Klass* ek = NULL; + Klass* ek = nullptr; { MutexUnlocker mu(MultiArray_lock); super_klass = element_super->array_klass(CHECK_NULL); @@ -97,7 +97,7 @@ ObjArrayKlass* ObjArrayKlass::allocate_objArray_klass(ClassLoaderData* loader_da } // Create type name for klass. - Symbol* name = NULL; + Symbol* name = nullptr; { ResourceMark rm(THREAD); char *name_str = element_klass->name()->as_C_string(); @@ -121,13 +121,13 @@ ObjArrayKlass* ObjArrayKlass::allocate_objArray_klass(ClassLoaderData* loader_da ObjArrayKlass* oak = ObjArrayKlass::allocate(loader_data, n, element_klass, name, CHECK_NULL); ModuleEntry* module = oak->module(); - assert(module != NULL, "No module entry for array"); + assert(module != nullptr, "No module entry for array"); // Call complete_create_array_klass after all instance variables has been initialized. ArrayKlass::complete_create_array_klass(oak, super_klass, module, CHECK_NULL); // Add all classes to our internal class loader list here, - // including classes in the bootstrap (NULL) class loader. + // including classes in the bootstrap (null) class loader. // Do this step after creating the mirror so that if the // mirror creation fails, loaded_classes_do() doesn't find // an array class without a mirror. @@ -146,7 +146,7 @@ ObjArrayKlass::ObjArrayKlass(int n, Klass* element_klass, Symbol* name) : ArrayK } else { bk = element_klass; } - assert(bk != NULL && (bk->is_instance_klass() || bk->is_typeArray_klass()), "invalid bottom klass"); + assert(bk != nullptr && (bk->is_instance_klass() || bk->is_typeArray_klass()), "invalid bottom klass"); set_bottom_klass(bk); set_class_loader_data(bk->class_loader_data()); @@ -291,17 +291,17 @@ void ObjArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d, if (UseCompressedOops) { size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset(src_pos); size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset(dst_pos); - assert(arrayOopDesc::obj_offset_to_raw(s, src_offset, NULL) == + assert(arrayOopDesc::obj_offset_to_raw(s, src_offset, nullptr) == objArrayOop(s)->obj_at_addr(src_pos), "sanity"); - assert(arrayOopDesc::obj_offset_to_raw(d, dst_offset, NULL) == + assert(arrayOopDesc::obj_offset_to_raw(d, dst_offset, nullptr) == objArrayOop(d)->obj_at_addr(dst_pos), "sanity"); do_copy(s, src_offset, d, dst_offset, length, CHECK); } else { size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset(src_pos); size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset(dst_pos); - assert(arrayOopDesc::obj_offset_to_raw(s, src_offset, NULL) == + assert(arrayOopDesc::obj_offset_to_raw(s, src_offset, nullptr) == objArrayOop(s)->obj_at_addr(src_pos), "sanity"); - assert(arrayOopDesc::obj_offset_to_raw(d, dst_offset, NULL) == + assert(arrayOopDesc::obj_offset_to_raw(d, dst_offset, nullptr) == objArrayOop(d)->obj_at_addr(dst_pos), "sanity"); do_copy(s, src_offset, d, dst_offset, length, CHECK); } @@ -315,7 +315,7 @@ Klass* ObjArrayKlass::array_klass(int n, TRAPS) { if (dim == n) return this; // lock-free read needs acquire semantics - if (higher_dimension_acquire() == NULL) { + if (higher_dimension_acquire() == nullptr) { ResourceMark rm(THREAD); { @@ -323,7 +323,7 @@ Klass* ObjArrayKlass::array_klass(int n, TRAPS) { MutexLocker mu(THREAD, MultiArray_lock); // Check if another thread beat us - if (higher_dimension() == NULL) { + if (higher_dimension() == nullptr) { // Create multi-dim klass object and link them together Klass* k = @@ -349,8 +349,8 @@ Klass* ObjArrayKlass::array_klass_or_null(int n) { if (dim == n) return this; // lock-free read needs acquire semantics - if (higher_dimension_acquire() == NULL) { - return NULL; + if (higher_dimension_acquire() == nullptr) { + return nullptr; } ObjArrayKlass *ak = ObjArrayKlass::cast(higher_dimension()); @@ -375,15 +375,15 @@ bool ObjArrayKlass::can_be_primary_super_slow() const { GrowableArray* ObjArrayKlass::compute_secondary_supers(int num_extra_slots, Array* transitive_interfaces) { - assert(transitive_interfaces == NULL, "sanity"); + assert(transitive_interfaces == nullptr, "sanity"); // interfaces = { cloneable_klass, serializable_klass, elemSuper[], ... }; const Array* elem_supers = element_klass()->secondary_supers(); - int num_elem_supers = elem_supers == NULL ? 0 : elem_supers->length(); + int num_elem_supers = elem_supers == nullptr ? 0 : elem_supers->length(); int num_secondaries = num_extra_slots + 2 + num_elem_supers; if (num_secondaries == 2) { // Must share this for correct bootstrapping! set_secondary_supers(Universe::the_array_interfaces_array()); - return NULL; + return nullptr; } else { GrowableArray* secondaries = new GrowableArray(num_elem_supers+2); secondaries->push(vmClasses::Cloneable_klass()); @@ -391,7 +391,7 @@ GrowableArray* ObjArrayKlass::compute_secondary_supers(int num_extra_slo for (int i = 0; i < num_elem_supers; i++) { Klass* elem_super = elem_supers->at(i); Klass* array_super = elem_super->array_klass_or_null(); - assert(array_super != NULL, "must already have been created"); + assert(array_super != nullptr, "must already have been created"); secondaries->push(array_super); } return secondaries; @@ -410,7 +410,7 @@ void ObjArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) { jint ObjArrayKlass::compute_modifier_flags() const { // The modifier for an objectArray is the same as its element - if (element_klass() == NULL) { + if (element_klass() == nullptr) { assert(Universe::is_bootstrapping(), "partial objArray only at startup"); return JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC; } @@ -422,13 +422,13 @@ jint ObjArrayKlass::compute_modifier_flags() const { } ModuleEntry* ObjArrayKlass::module() const { - assert(bottom_klass() != NULL, "ObjArrayKlass returned unexpected NULL bottom_klass"); + assert(bottom_klass() != nullptr, "ObjArrayKlass returned unexpected null bottom_klass"); // The array is defined in the module of its bottom class return bottom_klass()->module(); } PackageEntry* ObjArrayKlass::package() const { - assert(bottom_klass() != NULL, "ObjArrayKlass returned unexpected NULL bottom_klass"); + assert(bottom_klass() != nullptr, "ObjArrayKlass returned unexpected null bottom_klass"); return bottom_klass()->package(); } @@ -459,11 +459,11 @@ void ObjArrayKlass::oop_print_on(oop obj, outputStream* st) { int print_len = MIN2((intx) oa->length(), MaxElementPrintSize); for(int index = 0; index < print_len; index++) { st->print(" - %3d : ", index); - if (oa->obj_at(index) != NULL) { + if (oa->obj_at(index) != nullptr) { oa->obj_at(index)->print_value_on(st); st->cr(); } else { - st->print_cr("NULL"); + st->print_cr("null"); } } int remaining = oa->length() - print_len; @@ -480,10 +480,10 @@ void ObjArrayKlass::oop_print_value_on(oop obj, outputStream* st) { element_klass()->print_value_on(st); int len = objArrayOop(obj)->length(); st->print("[%d] ", len); - if (obj != NULL) { + if (obj != nullptr) { obj->print_address_on(st); } else { - st->print_cr("NULL"); + st->print_cr("null"); } } diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp index caa70f6ee66..f64d76145fd 100644 --- a/src/hotspot/share/oops/oop.cpp +++ b/src/hotspot/share/oops/oop.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,7 +86,7 @@ void oopDesc::print_value_on(outputStream* st) const { void oopDesc::verify_on(outputStream* st, oopDesc* oop_desc) { - if (oop_desc != NULL) { + if (oop_desc != nullptr) { oop_desc->klass()->oop_verify_on(oop_desc, st); } } @@ -123,7 +123,7 @@ bool oopDesc::is_oop(oop obj, bool ignore_mark_word) { // used only for asserts and guarantees bool oopDesc::is_oop_or_null(oop obj, bool ignore_mark_word) { - return obj == NULL ? true : is_oop(obj, ignore_mark_word); + return obj == nullptr ? true : is_oop(obj, ignore_mark_word); } VerifyOopClosure VerifyOopClosure::verify_oop; @@ -160,7 +160,7 @@ void oopDesc::set_narrow_klass(narrowKlass nk) { void* oopDesc::load_klass_raw(oop obj) { if (UseCompressedClassPointers) { narrowKlass narrow_klass = obj->_metadata._compressed_klass; - if (narrow_klass == 0) return NULL; + if (narrow_klass == 0) return nullptr; return (void*)CompressedKlassPointers::decode_raw(narrow_klass); } else { return obj->_metadata._klass; @@ -171,7 +171,7 @@ void* oopDesc::load_oop_raw(oop obj, int offset) { uintptr_t addr = (uintptr_t)(void*)obj + (uint)offset; if (UseCompressedOops) { narrowOop narrow_oop = *(narrowOop*)addr; - if (CompressedOops::is_null(narrow_oop)) return NULL; + if (CompressedOops::is_null(narrow_oop)) return nullptr; return (void*)CompressedOops::decode_raw(narrow_oop); } else { return *(void**)addr; diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp index 52ce810b360..b0ef4ef03af 100644 --- a/src/hotspot/share/oops/oop.hpp +++ b/src/hotspot/share/oops/oop.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -262,7 +262,7 @@ class oopDesc { // Like "forward_to", but inserts the forwarding pointer atomically. // Exactly one thread succeeds in inserting the forwarding pointer, and - // this call returns "NULL" for that thread; any other thread has the + // this call returns null for that thread; any other thread has the // value of the forwarding pointer returned and does not modify "this". inline oop forward_to_atomic(oop p, markWord compare, atomic_memory_order order = memory_order_conservative); diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp index b436b1ef58b..cf05750e862 100644 --- a/src/hotspot/share/oops/oop.inline.hpp +++ b/src/hotspot/share/oops/oop.inline.hpp @@ -116,7 +116,7 @@ Klass* oopDesc::klass_raw() const { } void oopDesc::set_klass(Klass* k) { - assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass"); + assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass"); if (UseCompressedClassPointers) { _metadata._compressed_klass = CompressedKlassPointers::encode_not_null(k); } else { @@ -125,7 +125,7 @@ void oopDesc::set_klass(Klass* k) { } void oopDesc::release_set_klass(HeapWord* mem, Klass* k) { - assert(Universe::is_bootstrapping() || (k != NULL && k->is_klass()), "incorrect Klass"); + assert(Universe::is_bootstrapping() || (k != nullptr && k->is_klass()), "incorrect Klass"); char* raw_mem = ((char*)mem + klass_offset_in_bytes()); if (UseCompressedClassPointers) { Atomic::release_store((narrowKlass*)raw_mem, @@ -278,7 +278,7 @@ oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order orde assert(m.decode_pointer() == p, "encoding must be reversible"); markWord old_mark = cas_set_mark(m, compare, order); if (old_mark == compare) { - return NULL; + return nullptr; } else { return cast_to_oop(old_mark.decode_pointer()); } @@ -349,7 +349,7 @@ void oopDesc::oop_iterate_backwards(OopClosureType* cl, Klass* k) { } bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) { - return obj == NULL || obj->klass()->is_subtype_of(klass); + return obj == nullptr || obj->klass()->is_subtype_of(klass); } intptr_t oopDesc::identity_hash() { diff --git a/src/hotspot/share/oops/oopHandle.hpp b/src/hotspot/share/oops/oopHandle.hpp index 490c9f9eec4..ae631fd7d53 100644 --- a/src/hotspot/share/oops/oopHandle.hpp +++ b/src/hotspot/share/oops/oopHandle.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,7 +43,7 @@ private: oop* _obj; public: - OopHandle() : _obj(NULL) {} + OopHandle() : _obj(nullptr) {} explicit OopHandle(oop* w) : _obj(w) {} OopHandle(OopStorage* storage, oop obj); @@ -64,7 +64,7 @@ public: inline oop resolve() const; inline oop peek() const; - bool is_empty() const { return _obj == NULL; } + bool is_empty() const { return _obj == nullptr; } inline void release(OopStorage* storage); diff --git a/src/hotspot/share/oops/oopHandle.inline.hpp b/src/hotspot/share/oops/oopHandle.inline.hpp index 20de5146ec3..6c7c0275aa7 100644 --- a/src/hotspot/share/oops/oopHandle.inline.hpp +++ b/src/hotspot/share/oops/oopHandle.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,16 +31,16 @@ #include "gc/shared/oopStorage.inline.hpp" inline oop OopHandle::resolve() const { - return (_obj == NULL) ? (oop)NULL : NativeAccess<>::oop_load(_obj); + return (_obj == nullptr) ? (oop)nullptr : NativeAccess<>::oop_load(_obj); } inline oop OopHandle::peek() const { - return (_obj == NULL) ? (oop)NULL : NativeAccess::oop_load(_obj); + return (_obj == nullptr) ? (oop)nullptr : NativeAccess::oop_load(_obj); } inline OopHandle::OopHandle(OopStorage* storage, oop obj) : _obj(storage->allocate()) { - if (_obj == NULL) { + if (_obj == nullptr) { vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR, "Cannot create oop handle"); } @@ -48,16 +48,16 @@ inline OopHandle::OopHandle(OopStorage* storage, oop obj) : } inline void OopHandle::release(OopStorage* storage) { - if (_obj != NULL) { + if (_obj != nullptr) { // Clear the OopHandle first - NativeAccess<>::oop_store(_obj, (oop)NULL); + NativeAccess<>::oop_store(_obj, nullptr); storage->release(_obj); } } inline void OopHandle::replace(oop obj) { oop* ptr = ptr_raw(); - assert(ptr != NULL, "should not use replace"); + assert(ptr != nullptr, "should not use replace"); NativeAccess<>::oop_store(ptr, obj); } diff --git a/src/hotspot/share/oops/oopsHierarchy.cpp b/src/hotspot/share/oops/oopsHierarchy.cpp index 057bfbf3e77..779ca1777d1 100644 --- a/src/hotspot/share/oops/oopsHierarchy.cpp +++ b/src/hotspot/share/oops/oopsHierarchy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ void oop::register_oop() { if (!Universe::is_fully_initialized()) return; // This gets expensive, which is why checking unhandled oops is on a switch. Thread* t = Thread::current_or_null(); - if (t != NULL && t->is_Java_thread()) { + if (t != nullptr && t->is_Java_thread()) { t->unhandled_oops()->register_unhandled_oop(this); } } @@ -45,7 +45,7 @@ void oop::unregister_oop() { if (!Universe::is_fully_initialized()) return; // This gets expensive, which is why checking unhandled oops is on a switch. Thread* t = Thread::current_or_null(); - if (t != NULL && t->is_Java_thread()) { + if (t != nullptr && t->is_Java_thread()) { t->unhandled_oops()->unregister_unhandled_oop(this); } } diff --git a/src/hotspot/share/oops/oopsHierarchy.hpp b/src/hotspot/share/oops/oopsHierarchy.hpp index bdfb47fe38d..a5886a005b2 100644 --- a/src/hotspot/share/oops/oopsHierarchy.hpp +++ b/src/hotspot/share/oops/oopsHierarchy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,7 +68,7 @@ typedef class typeArrayOopDesc* typeArrayOop; // a conversion to or from an oop to a numerical type is needed, // use the inline template methods, cast_*_oop, defined below. // -// Converting NULL to oop to Handle implicit is no longer accepted by the +// Converting null to oop to Handle implicit is no longer accepted by the // compiler because there are too many steps in the conversion. Use Handle() // instead, which generates less code anyway. diff --git a/src/hotspot/share/oops/recordComponent.cpp b/src/hotspot/share/oops/recordComponent.cpp index db4426fc950..5d4d072a7e0 100644 --- a/src/hotspot/share/oops/recordComponent.cpp +++ b/src/hotspot/share/oops/recordComponent.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,10 +43,10 @@ RecordComponent* RecordComponent::allocate(ClassLoaderData* loader_data, } void RecordComponent::deallocate_contents(ClassLoaderData* loader_data) { - if (annotations() != NULL) { + if (annotations() != nullptr) { MetadataFactory::free_array(loader_data, annotations()); } - if (type_annotations() != NULL) { + if (type_annotations() != nullptr) { MetadataFactory::free_array(loader_data, type_annotations()); } } @@ -70,11 +70,11 @@ void RecordComponent::print_on(outputStream* st) const { st->print(" - generic_signature_index: %d", _generic_signature_index); } st->cr(); - if (_annotations != NULL) { + if (_annotations != nullptr) { st->print_cr("record component annotations"); _annotations->print_value_on(st); } - if (_type_annotations != NULL) { + if (_type_annotations != nullptr) { st->print_cr("record component type annotations"); _type_annotations->print_value_on(st); } diff --git a/src/hotspot/share/oops/stackChunkOop.hpp b/src/hotspot/share/oops/stackChunkOop.hpp index c1c0aee8f24..36b06ecd324 100644 --- a/src/hotspot/share/oops/stackChunkOop.hpp +++ b/src/hotspot/share/oops/stackChunkOop.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -193,8 +193,8 @@ public: void print_on(bool verbose, outputStream* st) const; // Verifies the consistency of the chunk's data - bool verify(size_t* out_size = NULL, int* out_oops = NULL, - int* out_frames = NULL, int* out_interpreted_frames = NULL) NOT_DEBUG({ return true; }); + bool verify(size_t* out_size = nullptr, int* out_oops = nullptr, + int* out_frames = nullptr, int* out_interpreted_frames = nullptr) NOT_DEBUG({ return true; }); private: template diff --git a/src/hotspot/share/oops/symbol.cpp b/src/hotspot/share/oops/symbol.cpp index cd4c827adb1..5aa5f4e5dcf 100644 --- a/src/hotspot/share/oops/symbol.cpp +++ b/src/hotspot/share/oops/symbol.cpp @@ -132,7 +132,7 @@ int Symbol::index_of_at(int i, const char* substr, int substr_len) const { return -1; for (; scan <= limit; scan++) { scan = (address) memchr(scan, first_char, (limit + 1 - scan)); - if (scan == NULL) + if (scan == nullptr) return -1; // not found assert(scan >= bytes+i && scan <= limit, "scan oob"); if (substr_len <= 2 @@ -145,7 +145,7 @@ int Symbol::index_of_at(int i, const char* substr, int substr_len) const { } bool Symbol::is_star_match(const char* pattern) const { - if (strchr(pattern, '*') == NULL) { + if (strchr(pattern, '*') == nullptr) { return equals(pattern); } else { ResourceMark rm; @@ -185,7 +185,7 @@ void Symbol::print_symbol_on(outputStream* st) const { s = as_quoted_ascii(); s = os::strdup(s); } - if (s == NULL) { + if (s == nullptr) { st->print("(null)"); } else { st->print("%s", s); diff --git a/src/hotspot/share/oops/symbol.hpp b/src/hotspot/share/oops/symbol.hpp index 7e3657acd4b..2773b9b6c29 100644 --- a/src/hotspot/share/oops/symbol.hpp +++ b/src/hotspot/share/oops/symbol.hpp @@ -173,12 +173,12 @@ class Symbol : public MetaspaceObj { void make_permanent(); static void maybe_increment_refcount(Symbol* s) { - if (s != NULL) { + if (s != nullptr) { s->increment_refcount(); } } static void maybe_decrement_refcount(Symbol* s) { - if (s != NULL) { + if (s != nullptr) { s->decrement_refcount(); } } @@ -228,7 +228,7 @@ class Symbol : public MetaspaceObj { // Tests if the symbol contains the given utf8 substring // at the given byte position. bool contains_utf8_at(int position, const char* substring, int len) const { - assert(len >= 0 && substring != NULL, "substring must be valid"); + assert(len >= 0 && substring != nullptr, "substring must be valid"); if (position < 0) return false; // can happen with ends_with if (position + len > utf8_length()) return false; return (memcmp((char*)base() + position, substring, len) == 0); @@ -281,7 +281,7 @@ class Symbol : public MetaspaceObj { MetaspaceObj::Type type() const { return SymbolType; } // Printing - void print_symbol_on(outputStream* st = NULL) const; + void print_symbol_on(outputStream* st = nullptr) const; void print_utf8_on(outputStream* st) const; void print_on(outputStream* st) const; // First level print void print_value_on(outputStream* st) const; // Second level print. diff --git a/src/hotspot/share/oops/symbolHandle.hpp b/src/hotspot/share/oops/symbolHandle.hpp index b12ba59cace..f4388302310 100644 --- a/src/hotspot/share/oops/symbolHandle.hpp +++ b/src/hotspot/share/oops/symbolHandle.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,7 +46,7 @@ class SymbolHandleBase : public StackObj { Symbol* _temp; public: - SymbolHandleBase() : _temp(NULL) { } + SymbolHandleBase() : _temp(nullptr) { } // Conversion from a Symbol* to a SymbolHandleBase. // Does not increment the current reference count if temporary. diff --git a/src/hotspot/share/oops/typeArrayKlass.cpp b/src/hotspot/share/oops/typeArrayKlass.cpp index fa67b4876d9..899531d33cd 100644 --- a/src/hotspot/share/oops/typeArrayKlass.cpp +++ b/src/hotspot/share/oops/typeArrayKlass.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,8 +44,8 @@ TypeArrayKlass* TypeArrayKlass::create_klass(BasicType type, const char* name_str, TRAPS) { - Symbol* sym = NULL; - if (name_str != NULL) { + Symbol* sym = nullptr; + if (name_str != nullptr) { sym = SymbolTable::new_permanent_symbol(name_str); } @@ -57,7 +57,7 @@ TypeArrayKlass* TypeArrayKlass::create_klass(BasicType type, complete_create_array_klass(ak, ak->super(), ModuleEntryTable::javabase_moduleEntry(), CHECK_NULL); // Add all classes to our internal class loader list here, - // including classes in the bootstrap (NULL) class loader. + // including classes in the bootstrap (null) class loader. // Do this step after creating the mirror so that if the // mirror creation fails, loaded_classes_do() doesn't find // an array class without a mirror. @@ -178,7 +178,7 @@ Klass* TypeArrayKlass::array_klass(int n, TRAPS) { return this; // lock-free read needs acquire semantics - if (higher_dimension_acquire() == NULL) { + if (higher_dimension_acquire() == nullptr) { ResourceMark rm; JavaThread *jt = THREAD; @@ -186,7 +186,7 @@ Klass* TypeArrayKlass::array_klass(int n, TRAPS) { // Atomic create higher dimension and link into list MutexLocker mu(THREAD, MultiArray_lock); - if (higher_dimension() == NULL) { + if (higher_dimension() == nullptr) { Klass* oak = ObjArrayKlass::allocate_objArray_klass( class_loader_data(), dim + 1, this, CHECK_NULL); ObjArrayKlass* h_ak = ObjArrayKlass::cast(oak); @@ -211,8 +211,8 @@ Klass* TypeArrayKlass::array_klass_or_null(int n) { return this; // lock-free read needs acquire semantics - if (higher_dimension_acquire() == NULL) { - return NULL; + if (higher_dimension_acquire() == nullptr) { + return nullptr; } ObjArrayKlass* h_ak = ObjArrayKlass::cast(higher_dimension()); @@ -250,7 +250,7 @@ const char* TypeArrayKlass::external_name(BasicType type) { case T_LONG: return "[J"; default: ShouldNotReachHere(); } - return NULL; + return nullptr; } @@ -372,5 +372,5 @@ ModuleEntry* TypeArrayKlass::module() const { } PackageEntry* TypeArrayKlass::package() const { - return NULL; + return nullptr; } diff --git a/src/hotspot/share/oops/typeArrayKlass.hpp b/src/hotspot/share/oops/typeArrayKlass.hpp index 389c5ddbd74..5d4a739936f 100644 --- a/src/hotspot/share/oops/typeArrayKlass.hpp +++ b/src/hotspot/share/oops/typeArrayKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,7 +68,7 @@ class TypeArrayKlass : public ArrayKlass { typeArrayOop allocate(int length, TRAPS) { return allocate_common(length, true, THREAD); } oop multi_allocate(int rank, jint* sizes, TRAPS); - oop protection_domain() const { return NULL; } + oop protection_domain() const { return nullptr; } // Copying void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS); diff --git a/src/hotspot/share/oops/weakHandle.cpp b/src/hotspot/share/oops/weakHandle.cpp index 48bb2a1368d..84a230fdf48 100644 --- a/src/hotspot/share/oops/weakHandle.cpp +++ b/src/hotspot/share/oops/weakHandle.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,9 +35,9 @@ WeakHandle::WeakHandle(OopStorage* storage, Handle obj) : WeakHandle::WeakHandle(OopStorage* storage, oop obj) : _obj(storage->allocate()) { - assert(obj != NULL, "no need to create weak null oop"); + assert(obj != nullptr, "no need to create weak null oop"); - if (_obj == NULL) { + if (_obj == nullptr) { vm_exit_out_of_memory(sizeof(oop*), OOM_MALLOC_ERROR, "Unable to create new weak oop handle in OopStorage %s", storage->name()); @@ -48,10 +48,10 @@ WeakHandle::WeakHandle(OopStorage* storage, oop obj) : void WeakHandle::release(OopStorage* storage) const { // Only release if the pointer to the object has been created. - if (_obj != NULL) { + if (_obj != nullptr) { // Clear the WeakHandle. For race in creating ClassLoaderData, we can release this // WeakHandle before it is cleared by GC. - NativeAccess::oop_store(_obj, (oop)NULL); + NativeAccess::oop_store(_obj, nullptr); storage->release(_obj); } } diff --git a/src/hotspot/share/oops/weakHandle.hpp b/src/hotspot/share/oops/weakHandle.hpp index c2b589958d2..95328d27aef 100644 --- a/src/hotspot/share/oops/weakHandle.hpp +++ b/src/hotspot/share/oops/weakHandle.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ class OopStorage; // processed weakly by GC. The runtime structures that point to the oop must // either peek or resolve the oop, the latter will keep the oop alive for // the GC cycle. The runtime structures that reference the oop must test -// if the value is NULL. If it is NULL, it has been cleaned out by GC. +// if the value is null. If it is null, it has been cleaned out by GC. // This is the vm version of jweak but has different GC lifetimes and policies, // depending on the type. @@ -46,21 +46,21 @@ class WeakHandle { WeakHandle(oop* w) : _obj(w) {} public: - WeakHandle() : _obj(NULL) {} // needed for init + WeakHandle() : _obj(nullptr) {} // needed for init WeakHandle(OopStorage* storage, Handle obj); WeakHandle(OopStorage* storage, oop obj); inline oop resolve() const; inline oop peek() const; void release(OopStorage* storage) const; - bool is_null() const { return _obj == NULL; } + bool is_null() const { return _obj == nullptr; } void replace(oop with_obj); void print() const; void print_on(outputStream* st) const; - bool is_empty() const { return _obj == NULL; } + bool is_empty() const { return _obj == nullptr; } oop* ptr_raw() const { return _obj; } };