8263976: Remove block allocation from BasicHashtable

Reviewed-by: lfoltan, iklam
This commit is contained in:
Coleen Phillimore 2021-03-23 12:11:26 +00:00
parent fbd57bd498
commit 5bc382fb7a
16 changed files with 47 additions and 168 deletions

View File

@ -74,11 +74,10 @@ Dictionary::~Dictionary() {
}
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on Dictionary's free list");
}
DictionaryEntry* Dictionary::new_entry(unsigned int hash, InstanceKlass* klass) {
DictionaryEntry* entry = (DictionaryEntry*)Hashtable<InstanceKlass*, mtClass>::allocate_new_entry(hash, klass);
DictionaryEntry* entry = (DictionaryEntry*)Hashtable<InstanceKlass*, mtClass>::new_entry(hash, klass);
entry->set_pd_set(NULL);
assert(klass->is_instance_klass(), "Must be");
return entry;
@ -95,9 +94,7 @@ void Dictionary::free_entry(DictionaryEntry* entry) {
entry->set_pd_set(to_delete->next());
delete to_delete;
}
// Unlink from the Hashtable prior to freeing
unlink_entry(entry);
FREE_C_HEAP_ARRAY(char, entry);
BasicHashtable<mtClass>::free_entry(entry);
}
const int _resize_load_trigger = 5; // load factor that will trigger the resize
@ -551,7 +548,7 @@ void SymbolPropertyTable::methods_do(void f(Method*)) {
void SymbolPropertyTable::free_entry(SymbolPropertyEntry* entry) {
entry->free_entry();
Hashtable<Symbol*, mtSymbol>::free_entry(entry);
BasicHashtable<mtSymbol>::free_entry(entry);
}
void DictionaryEntry::verify_protection_domain_set() {

View File

@ -92,14 +92,6 @@ public:
return (DictionaryEntry**)Hashtable<InstanceKlass*, mtClass>::bucket_addr(i);
}
void add_entry(int index, DictionaryEntry* new_entry) {
Hashtable<InstanceKlass*, mtClass>::add_entry(index, (HashtableEntry<InstanceKlass*, mtClass>*)new_entry);
}
void unlink_entry(DictionaryEntry* entry) {
Hashtable<InstanceKlass*, mtClass>::unlink_entry((HashtableEntry<InstanceKlass*, mtClass>*)entry);
}
void free_entry(DictionaryEntry* entry);
bool is_valid_protection_domain(unsigned int hash,

View File

@ -58,7 +58,7 @@ LoaderConstraintEntry* LoaderConstraintTable::new_entry(
void LoaderConstraintTable::free_entry(LoaderConstraintEntry *entry) {
// decrement name refcount before freeing
entry->name()->decrement_refcount();
Hashtable<InstanceKlass*, mtClass>::free_entry(entry);
BasicHashtable<mtClass>::free_entry(entry);
}
// The loaderConstraintTable must always be accessed with the

View File

@ -357,14 +357,10 @@ ModuleEntryTable::~ModuleEntryTable() {
if (to_remove->location() != NULL) {
to_remove->location()->decrement_refcount();
}
// Unlink from the Hashtable prior to freeing
unlink_entry(to_remove);
FREE_C_HEAP_ARRAY(char, to_remove);
BasicHashtable<mtModule>::free_entry(to_remove);
}
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on ModuleEntryTable's free list");
}
void ModuleEntry::set_loader_data(ClassLoaderData* cld) {
@ -579,7 +575,7 @@ ModuleEntry* ModuleEntryTable::new_entry(unsigned int hash, Handle module_handle
Symbol* version, Symbol* location,
ClassLoaderData* loader_data) {
assert(Module_lock->owned_by_self(), "should have the Module_lock");
ModuleEntry* entry = (ModuleEntry*)Hashtable<Symbol*, mtModule>::allocate_new_entry(hash, name);
ModuleEntry* entry = (ModuleEntry*)Hashtable<Symbol*, mtModule>::new_entry(hash, name);
// Initialize fields specific to a ModuleEntry
entry->init();

View File

@ -187,13 +187,10 @@ PackageEntryTable::~PackageEntryTable() {
to_remove->delete_qualified_exports();
to_remove->name()->decrement_refcount();
// Unlink from the Hashtable prior to freeing
unlink_entry(to_remove);
FREE_C_HEAP_ARRAY(char, to_remove);
BasicHashtable<mtModule>::free_entry(to_remove);
}
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on PackageEntryTable's free list");
}
#if INCLUDE_CDS_JAVA_HEAP
@ -322,7 +319,7 @@ void PackageEntryTable::load_archived_entries(Array<PackageEntry*>* archived_pac
PackageEntry* PackageEntryTable::new_entry(unsigned int hash, Symbol* name, ModuleEntry* module) {
assert(Module_lock->owned_by_self(), "should have the Module_lock");
PackageEntry* entry = (PackageEntry*)Hashtable<Symbol*, mtModule>::allocate_new_entry(hash, name);
PackageEntry* entry = (PackageEntry*)Hashtable<Symbol*, mtModule>::new_entry(hash, name);
JFR_ONLY(INIT_ID(entry);)

View File

@ -193,7 +193,7 @@ void PlaceholderTable::free_entry(PlaceholderEntry* entry) {
// decrement Symbol refcount here because Hashtable doesn't.
entry->literal()->decrement_refcount();
if (entry->supername() != NULL) entry->supername()->decrement_refcount();
Hashtable<Symbol*, mtClass>::free_entry(entry);
BasicHashtable<mtClass>::free_entry(entry);
}

View File

@ -137,7 +137,7 @@ void ResolutionErrorTable::free_entry(ResolutionErrorEntry *entry) {
if (entry->nest_host_error() != NULL) {
FREE_C_HEAP_ARRAY(char, entry->nest_host_error());
}
Hashtable<ConstantPool*, mtClass>::free_entry(entry);
BasicHashtable<mtClass>::free_entry(entry);
}

View File

@ -1576,13 +1576,14 @@ InstanceKlass* SystemDictionary::find_or_define_helper(Symbol* class_name, Handl
// Other cases fall through, and may run into duplicate defines
// caught by finding an entry in the SystemDictionary
if (is_parallelDefine(class_loader) && (probe->instance_klass() != NULL)) {
InstanceKlass* ik = probe->instance_klass();
placeholders()->find_and_remove(name_hash, name_h, loader_data, PlaceholderTable::DEFINE_CLASS, THREAD);
SystemDictionary_lock->notify_all();
#ifdef ASSERT
InstanceKlass* check = dictionary->find_class(name_hash, name_h);
assert(check != NULL, "definer missed recording success");
#endif
return probe->instance_klass();
return ik;
} else {
// This thread will define the class (even if earlier thread tried and had an error)
probe->set_definer(THREAD);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,14 +45,7 @@ size_t G1CodeRootSetTable::mem_size() {
G1CodeRootSetTable::Entry* G1CodeRootSetTable::new_entry(nmethod* nm) {
unsigned int hash = compute_hash(nm);
Entry* entry = (Entry*) new_entry_free_list();
if (entry == NULL) {
entry = (Entry*) NEW_C_HEAP_ARRAY2(char, entry_size(), mtGC, CURRENT_PC);
}
entry->set_next(NULL);
entry->set_hash(hash);
entry->set_literal(nm);
return entry;
return (Entry*)Hashtable<nmethod*, mtGC>::new_entry(hash, nm);
}
void G1CodeRootSetTable::remove_entry(Entry* e, Entry* previous) {
@ -73,17 +66,10 @@ G1CodeRootSetTable::~G1CodeRootSetTable() {
Entry* to_remove = e;
// read next before freeing.
e = e->next();
unlink_entry(to_remove);
FREE_C_HEAP_ARRAY(char, to_remove);
BasicHashtable<mtGC>::free_entry(to_remove);
}
}
assert(number_of_entries() == 0, "should have removed all entries");
// Each of the entries in new_entry_free_list() have been allocated in
// G1CodeRootSetTable::new_entry(). We never call the block allocator
// in BasicHashtable::new_entry().
for (BasicHashtableEntry<mtGC>* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) {
FREE_C_HEAP_ARRAY(char, e);
}
}
bool G1CodeRootSetTable::add(nmethod* nm) {
@ -124,7 +110,6 @@ void G1CodeRootSetTable::copy_to(G1CodeRootSetTable* new_table) {
new_table->add(e->literal());
}
}
new_table->copy_freelist(this);
}
void G1CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) {

View File

@ -94,6 +94,7 @@
#include "services/threadService.hpp"
#include "utilities/copy.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/macros.hpp"
#include "utilities/utf8.hpp"

View File

@ -64,7 +64,6 @@ void JvmtiTagMapTable::clear() {
*p = NULL; // clear out buckets.
}
assert(number_of_entries() == 0, "should have removed all entries");
assert(new_entry_free_list() == NULL, "entry present on JvmtiTagMapTable's free list");
}
JvmtiTagMapTable::~JvmtiTagMapTable() {
@ -74,15 +73,14 @@ JvmtiTagMapTable::~JvmtiTagMapTable() {
// Entries are C_Heap allocated
JvmtiTagMapEntry* JvmtiTagMapTable::new_entry(unsigned int hash, WeakHandle w, jlong tag) {
JvmtiTagMapEntry* entry = (JvmtiTagMapEntry*)Hashtable<WeakHandle, mtServiceability>::allocate_new_entry(hash, w);
JvmtiTagMapEntry* entry = (JvmtiTagMapEntry*)Hashtable<WeakHandle, mtServiceability>::new_entry(hash, w);
entry->set_tag(tag);
return entry;
}
void JvmtiTagMapTable::free_entry(JvmtiTagMapEntry* entry) {
unlink_entry(entry);
entry->literal().release(JvmtiExport::weak_tag_storage()); // release to OopStorage
FREE_C_HEAP_ARRAY(char, entry);
BasicHashtable<mtServiceability>::free_entry(entry);
}
unsigned int JvmtiTagMapTable::compute_hash(oop obj) {

View File

@ -491,10 +491,6 @@ typedef HashtableEntry<InstanceKlass*, mtClass> KlassHashtableEntry;
\
nonstatic_field(BasicHashtable<mtInternal>, _table_size, int) \
nonstatic_field(BasicHashtable<mtInternal>, _buckets, HashtableBucket<mtInternal>*) \
volatile_nonstatic_field(BasicHashtable<mtInternal>, _free_list, BasicHashtableEntry<mtInternal>*) \
nonstatic_field(BasicHashtable<mtInternal>, _first_free_entry, char*) \
nonstatic_field(BasicHashtable<mtInternal>, _end_block, char*) \
nonstatic_field(BasicHashtable<mtInternal>, _entry_size, int) \
\
/*******************/ \
/* ClassLoaderData */ \

View File

@ -23,20 +23,19 @@
*/
#include "precompiled.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/dictionary.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/packageEntry.hpp"
#include "classfile/placeholders.hpp"
#include "classfile/protectionDomainCache.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/vmClasses.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "oops/weakHandle.inline.hpp"
#include "prims/jvmtiTagMapTable.hpp"
#include "runtime/safepoint.hpp"
@ -45,67 +44,31 @@
#include "utilities/hashtable.inline.hpp"
#include "utilities/numberSeq.hpp"
// This hashtable is implemented as an open hash table with a fixed number of buckets.
template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() {
BasicHashtableEntry<F>* entry = NULL;
if (_free_list != NULL) {
entry = _free_list;
_free_list = _free_list->next();
}
return entry;
}
// Hashtable entry allocates in the C heap directly.
// HashtableEntrys are allocated in blocks to reduce the space overhead.
template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
BasicHashtableEntry<F>* entry = new_entry_free_list();
if (entry == NULL) {
if (_first_free_entry + _entry_size >= _end_block) {
int block_size = MAX2((int)_table_size / 2, (int)_number_of_entries); // pick a reasonable value
block_size = clamp(block_size, 2, 512); // but never go out of this range
int len = round_down_power_of_2(_entry_size * block_size);
assert(len >= _entry_size, "");
_first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC);
_entry_blocks.append(_first_free_entry);
_end_block = _first_free_entry + len;
}
entry = (BasicHashtableEntry<F>*)_first_free_entry;
_first_free_entry += _entry_size;
}
assert(_entry_size % HeapWordSize == 0, "");
entry->set_hash(hashValue);
BasicHashtableEntry<F>* entry = ::new (NEW_C_HEAP_ARRAY(char, this->entry_size(), F))
BasicHashtableEntry<F>(hashValue);
return entry;
}
template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(unsigned int hashValue, T obj) {
HashtableEntry<T, F>* entry;
entry = (HashtableEntry<T, F>*)BasicHashtable<F>::new_entry(hashValue);
entry->set_literal(obj);
HashtableEntry<T, F>* entry = ::new (NEW_C_HEAP_ARRAY(char, this->entry_size(), F))
HashtableEntry<T, F>(hashValue, obj);
return entry;
}
// Version of hashtable entry allocation that allocates in the C heap directly.
// The block allocator in BasicHashtable has less fragmentation, but the memory is not freed until
// the whole table is freed. Use allocate_new_entry() if you want to individually free the memory
// used by each entry
template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::allocate_new_entry(unsigned int hashValue, T obj) {
HashtableEntry<T, F>* entry = (HashtableEntry<T, F>*) NEW_C_HEAP_ARRAY(char, this->entry_size(), F);
if (DumpSharedSpaces) {
// Avoid random bits in structure padding so we can have deterministic content in CDS archive
memset((void*)entry, 0, this->entry_size());
}
entry->set_hash(hashValue);
entry->set_literal(obj);
entry->set_next(NULL);
return entry;
template <MEMFLAGS F> inline void BasicHashtable<F>::free_entry(BasicHashtableEntry<F>* entry) {
// Unlink from the Hashtable prior to freeing
unlink_entry(entry);
FREE_C_HEAP_ARRAY(char, entry);
JFR_ONLY(_stats_rate.remove();)
}
template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() {
FREE_C_HEAP_ARRAY(HashtableBucket, _buckets);
_buckets = NULL;

View File

@ -26,21 +26,12 @@
#define SHARE_UTILITIES_HASHTABLE_HPP
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
#include "oops/symbol.hpp"
#include "runtime/handles.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/tableStatistics.hpp"
// This is a generic hashtable, designed to be used for the symbol
// and string tables.
//
// It is implemented as an open hash table with a fixed number of buckets.
//
// %note:
// - TableEntrys are allocated in blocks to reduce the space overhead.
// This is a generic hashtable which is implemented as an open hash table with
// a fixed number of buckets.
template <MEMFLAGS F> class BasicHashtableEntry {
friend class VMStructs;
@ -50,16 +41,11 @@ private:
// Link to next element in the linked list for this bucket.
BasicHashtableEntry<F>* _next;
// Windows IA64 compiler requires subclasses to be able to access these
protected:
// Entry objects should not be created, they should be taken from the
// free list with BasicHashtable.new_entry().
BasicHashtableEntry() { ShouldNotReachHere(); }
// Entry objects should not be destroyed. They should be placed on
// the free list instead with BasicHashtable.free_entry().
~BasicHashtableEntry() { ShouldNotReachHere(); }
public:
BasicHashtableEntry(unsigned int hashValue) : _hash(hashValue), _next(nullptr) {}
// Still should not call this. Entries are placement new allocated, so are
// deleted with free_entry.
~BasicHashtableEntry() { ShouldNotReachHere(); }
unsigned int hash() const { return _hash; }
void set_hash(unsigned int hash) { _hash = hash; }
@ -86,6 +72,8 @@ private:
T _literal; // ref to item in table.
public:
HashtableEntry(unsigned int hashValue, T value) : BasicHashtableEntry<F>(hashValue), _literal(value) {}
// Literal
T literal() const { return _literal; }
T* literal_addr() { return &_literal; }
@ -142,12 +130,8 @@ private:
// Instance variables
int _table_size;
HashtableBucket<F>* _buckets;
BasicHashtableEntry<F>* volatile _free_list;
char* _first_free_entry;
char* _end_block;
int _entry_size;
volatile int _number_of_entries;
GrowableArrayCHeap<char*, F> _entry_blocks;
protected:
@ -164,29 +148,16 @@ protected:
// The following method is not MT-safe and must be done under lock.
BasicHashtableEntry<F>** bucket_addr(int i) { return _buckets[i].entry_addr(); }
// Attempt to get an entry from the free list
BasicHashtableEntry<F>* new_entry_free_list();
// Table entry management
BasicHashtableEntry<F>* new_entry(unsigned int hashValue);
// Used when moving the entry to another table
// Clean up links, but do not add to free_list
// Used when moving the entry to another table or deleting entry.
// Clean up links.
void unlink_entry(BasicHashtableEntry<F>* entry) {
entry->set_next(NULL);
--_number_of_entries;
}
// Move over freelist and free block for allocation
void copy_freelist(BasicHashtable* src) {
_free_list = src->_free_list;
src->_free_list = NULL;
_first_free_entry = src->_first_free_entry;
src->_first_free_entry = NULL;
_end_block = src->_end_block;
src->_end_block = NULL;
}
// Free the buckets in this hashtable
void free_buckets();
public:
@ -236,10 +207,7 @@ public:
protected:
// Table entry management
HashtableEntry<T, F>* new_entry(unsigned int hashValue, T obj);
// Don't create and use freelist of HashtableEntry.
HashtableEntry<T, F>* allocate_new_entry(unsigned int hashValue, T obj);
// The following method is MT-safe and may be used with caution.
HashtableEntry<T, F>* bucket(int i) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,6 @@
#include "runtime/atomic.hpp"
#include "services/memTracker.hpp"
#include "utilities/hashtable.hpp"
#include "utilities/dtrace.hpp"
// Inline function definitions for hashtable.hpp.
@ -37,8 +36,7 @@
// Initialize a table.
template <MEMFLAGS F> inline BasicHashtable<F>::BasicHashtable(int table_size, int entry_size) :
_entry_blocks(4) {
template <MEMFLAGS F> inline BasicHashtable<F>::BasicHashtable(int table_size, int entry_size) {
// Called on startup, no locking needed
initialize(table_size, entry_size, 0);
_buckets = NEW_C_HEAP_ARRAY2(HashtableBucket<F>, table_size, F, CURRENT_PC);
@ -51,8 +49,8 @@ template <MEMFLAGS F> inline BasicHashtable<F>::BasicHashtable(int table_size, i
template <MEMFLAGS F> inline BasicHashtable<F>::BasicHashtable(int table_size, int entry_size,
HashtableBucket<F>* buckets,
int number_of_entries) :
_entry_blocks(4) {
int number_of_entries) {
// Called on startup, no locking needed
initialize(table_size, entry_size, number_of_entries);
_buckets = buckets;
@ -60,9 +58,6 @@ template <MEMFLAGS F> inline BasicHashtable<F>::BasicHashtable(int table_size, i
}
template <MEMFLAGS F> inline BasicHashtable<F>::~BasicHashtable() {
for (int i = 0; i < _entry_blocks.length(); i++) {
FREE_C_HEAP_ARRAY(char, _entry_blocks.at(i));
}
free_buckets();
}
@ -71,9 +66,6 @@ template <MEMFLAGS F> inline void BasicHashtable<F>::initialize(int table_size,
// Called on startup, no locking needed
_table_size = table_size;
_entry_size = entry_size;
_free_list = NULL;
_first_free_entry = NULL;
_end_block = NULL;
_number_of_entries = number_of_entries;
}
@ -119,11 +111,4 @@ template <MEMFLAGS F> inline void BasicHashtable<F>::add_entry(int index, BasicH
JFR_ONLY(_stats_rate.add();)
}
template <MEMFLAGS F> inline void BasicHashtable<F>::free_entry(BasicHashtableEntry<F>* entry) {
entry->set_next(_free_list);
_free_list = entry;
--_number_of_entries;
JFR_ONLY(_stats_rate.remove();)
}
#endif // SHARE_UTILITIES_HASHTABLE_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,7 +62,7 @@ public class CheckForProperDetailStackTrace {
to make sure it matches even if the symbol is not unmangled.
*/
private static String stackTraceDefault =
".*Hashtable.*allocate_new_entry.*\n" +
".*Hashtable.*new_entry.*\n" +
".*ModuleEntryTable.*new_entry.*\n" +
".*ModuleEntryTable.*locked_create_entry.*\n" +
".*Modules.*define_module.*\n";
@ -71,7 +71,7 @@ public class CheckForProperDetailStackTrace {
new_entry may be inlined.
*/
private static String stackTraceAlternate =
".*Hashtable.*allocate_new_entry.*\n" +
".*Hashtable.*new_entry.*\n" +
".*ModuleEntryTable.*locked_create_entry.*\n" +
".*Modules.*define_module.*\n" +
".*JVM_DefineModule.*\n";