8233380: CHT: Node allocation and freeing

Reviewed-by: rehn, tschatzl
This commit is contained in:
Ivan Walulya 2021-05-19 08:01:35 +00:00
parent 2563a6a9b5
commit 70f6c67051
7 changed files with 76 additions and 60 deletions

View File

@ -122,11 +122,11 @@ class StringTableConfig : public StackObj {
return 0; return 0;
} }
// We use default allocation/deallocation but counted // We use default allocation/deallocation but counted
static void* allocate_node(size_t size, Value const& value) { static void* allocate_node(void* context, size_t size, Value const& value) {
StringTable::item_added(); StringTable::item_added();
return AllocateHeap(size, mtSymbol); return AllocateHeap(size, mtSymbol);
} }
static void free_node(void* memory, Value const& value) { static void free_node(void* context, void* memory, Value const& value) {
value.release(StringTable::_oop_storage); value.release(StringTable::_oop_storage);
FreeHeap(memory); FreeHeap(memory);
StringTable::item_removed(); StringTable::item_removed();

View File

@ -132,11 +132,11 @@ public:
} }
} }
// We use default allocation/deallocation but counted // We use default allocation/deallocation but counted
static void* allocate_node(size_t size, Value const& value) { static void* allocate_node(void* context, size_t size, Value const& value) {
SymbolTable::item_added(); SymbolTable::item_added();
return AllocateHeap(size, mtSymbol); return AllocateHeap(size, mtSymbol);
} }
static void free_node(void* memory, Value const& value) { static void free_node(void* context, void* memory, Value const& value) {
// We get here because #1 some threads lost a race to insert a newly created Symbol // We get here because #1 some threads lost a race to insert a newly created Symbol
// or #2 we're cleaning up unused symbol. // or #2 we're cleaning up unused symbol.
// If #1, then the symbol can be either permanent, // If #1, then the symbol can be either permanent,

View File

@ -79,11 +79,11 @@ class ResolvedMethodTableConfig : public AllStatic {
} }
// We use default allocation/deallocation but counted // We use default allocation/deallocation but counted
static void* allocate_node(size_t size, Value const& value) { static void* allocate_node(void* context, size_t size, Value const& value) {
ResolvedMethodTable::item_added(); ResolvedMethodTable::item_added();
return AllocateHeap(size, mtClass); return AllocateHeap(size, mtClass);
} }
static void free_node(void* memory, Value const& value) { static void free_node(void* context, void* memory, Value const& value) {
value.release(ResolvedMethodTable::_oop_storage); value.release(ResolvedMethodTable::_oop_storage);
FreeHeap(memory); FreeHeap(memory);
ResolvedMethodTable::item_removed(); ResolvedMethodTable::item_removed();

View File

@ -69,11 +69,11 @@ class ThreadIdTableConfig : public AllStatic {
jlong tid = value->tid(); jlong tid = value->tid();
return primitive_hash(tid); return primitive_hash(tid);
} }
static void* allocate_node(size_t size, Value const& value) { static void* allocate_node(void* context, size_t size, Value const& value) {
ThreadIdTable::item_added(); ThreadIdTable::item_added();
return AllocateHeap(size, mtInternal); return AllocateHeap(size, mtInternal);
} }
static void free_node(void* memory, Value const& value) { static void free_node(void* context, void* memory, Value const& value) {
delete value; delete value;
FreeHeap(memory); FreeHeap(memory);
ThreadIdTable::item_removed(); ThreadIdTable::item_removed();

View File

@ -63,12 +63,12 @@ class ConcurrentHashTable : public CHeapObj<F> {
VALUE* value() { return &_value; } VALUE* value() { return &_value; }
// Creates a node. // Creates a node.
static Node* create_node(const VALUE& value, Node* next = NULL) { static Node* create_node(void* context, const VALUE& value, Node* next = NULL) {
return new (CONFIG::allocate_node(sizeof(Node), value)) Node(value, next); return new (CONFIG::allocate_node(context, sizeof(Node), value)) Node(value, next);
} }
// Destroys a node. // Destroys a node.
static void destroy_node(Node* node) { static void destroy_node(void* context, Node* node) {
CONFIG::free_node((void*)node, node->_value); CONFIG::free_node(context, (void*)node, node->_value);
} }
void print_on(outputStream* st) const {}; void print_on(outputStream* st) const {};
@ -200,6 +200,8 @@ class ConcurrentHashTable : public CHeapObj<F> {
const VALUE& operator()() { return _val; } const VALUE& operator()() { return _val; }
}; };
void* _context;
InternalTable* _table; // Active table. InternalTable* _table; // Active table.
InternalTable* _new_table; // Table we are resizing to. InternalTable* _new_table; // Table we are resizing to.
@ -372,7 +374,11 @@ class ConcurrentHashTable : public CHeapObj<F> {
public: public:
ConcurrentHashTable(size_t log2size = DEFAULT_START_SIZE_LOG2, ConcurrentHashTable(size_t log2size = DEFAULT_START_SIZE_LOG2,
size_t log2size_limit = DEFAULT_MAX_SIZE_LOG2, size_t log2size_limit = DEFAULT_MAX_SIZE_LOG2,
size_t grow_hint = DEFAULT_GROW_HINT); size_t grow_hint = DEFAULT_GROW_HINT,
void* context = NULL);
explicit ConcurrentHashTable(void* context, size_t log2size = DEFAULT_START_SIZE_LOG2) :
ConcurrentHashTable(log2size, DEFAULT_MAX_SIZE_LOG2, DEFAULT_GROW_HINT, context) {}
~ConcurrentHashTable(); ~ConcurrentHashTable();

View File

@ -364,7 +364,7 @@ inline void ConcurrentHashTable<CONFIG, F>::
while (node != NULL) { while (node != NULL) {
Node* free_node = node; Node* free_node = node;
node = node->next(); node = node->next();
Node::destroy_node(free_node); Node::destroy_node(_context, free_node);
} }
} }
} }
@ -469,7 +469,7 @@ inline bool ConcurrentHashTable<CONFIG, F>::
// Publish the deletion. // Publish the deletion.
GlobalCounter::write_synchronize(); GlobalCounter::write_synchronize();
delete_f(rem_n->value()); delete_f(rem_n->value());
Node::destroy_node(rem_n); Node::destroy_node(_context, rem_n);
JFR_ONLY(_stats_rate.remove();) JFR_ONLY(_stats_rate.remove();)
return true; return true;
} }
@ -518,7 +518,7 @@ inline void ConcurrentHashTable<CONFIG, F>::
} }
for (size_t node_it = 0; node_it < nd; node_it++) { for (size_t node_it = 0; node_it < nd; node_it++) {
del_f(ndel[node_it]->value()); del_f(ndel[node_it]->value());
Node::destroy_node(ndel[node_it]); Node::destroy_node(_context, ndel[node_it]);
JFR_ONLY(_stats_rate.remove();) JFR_ONLY(_stats_rate.remove();)
DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;) DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
} }
@ -557,7 +557,7 @@ inline void ConcurrentHashTable<CONFIG, F>::
if (dels > 0) { if (dels > 0) {
GlobalCounter::write_synchronize(); GlobalCounter::write_synchronize();
for (size_t node_it = 0; node_it < dels; node_it++) { for (size_t node_it = 0; node_it < dels; node_it++) {
Node::destroy_node(ndel[node_it]); Node::destroy_node(_context, ndel[node_it]);
JFR_ONLY(_stats_rate.remove();) JFR_ONLY(_stats_rate.remove();)
DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;) DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;)
} }
@ -686,7 +686,7 @@ inline bool ConcurrentHashTable<CONFIG, F>::
// chain. // chain.
write_synchonize_on_visible_epoch(thread); write_synchonize_on_visible_epoch(thread);
if (delete_me != NULL) { if (delete_me != NULL) {
Node::destroy_node(delete_me); Node::destroy_node(_context, delete_me);
delete_me = NULL; delete_me = NULL;
} }
} }
@ -891,7 +891,7 @@ inline bool ConcurrentHashTable<CONFIG, F>::
size_t loops = 0; size_t loops = 0;
size_t i = 0; size_t i = 0;
uintx hash = lookup_f.get_hash(); uintx hash = lookup_f.get_hash();
Node* new_node = Node::create_node(value, NULL); Node* new_node = Node::create_node(_context, value, NULL);
while (true) { while (true) {
{ {
@ -926,7 +926,7 @@ inline bool ConcurrentHashTable<CONFIG, F>::
if (new_node != NULL) { if (new_node != NULL) {
// CAS failed and a duplicate was inserted, we must free this node. // CAS failed and a duplicate was inserted, we must free this node.
Node::destroy_node(new_node); Node::destroy_node(_context, new_node);
} else if (i == 0 && clean) { } else if (i == 0 && clean) {
// We only do cleaning on fast inserts. // We only do cleaning on fast inserts.
Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash());
@ -1007,11 +1007,11 @@ inline size_t ConcurrentHashTable<CONFIG, F>::
// Constructor // Constructor
template <typename CONFIG, MEMFLAGS F> template <typename CONFIG, MEMFLAGS F>
inline ConcurrentHashTable<CONFIG, F>:: inline ConcurrentHashTable<CONFIG, F>::
ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint) ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, void* context)
: _new_table(NULL), _log2_size_limit(log2size_limit), : _context(context), _new_table(NULL), _log2_size_limit(log2size_limit),
_log2_start_size(log2size), _grow_hint(grow_hint), _log2_start_size(log2size), _grow_hint(grow_hint),
_size_limit_reached(false), _resize_lock_owner(NULL), _size_limit_reached(false), _resize_lock_owner(NULL),
_invisible_epoch(0) _invisible_epoch(0)
{ {
_stats_rate = TableRateStatistics(); _stats_rate = TableRateStatistics();
_resize_lock = _resize_lock =
@ -1091,7 +1091,7 @@ inline bool ConcurrentHashTable<CONFIG, F>::
InternalTable* table = get_table(); InternalTable* table = get_table();
Bucket* bucket = get_bucket_in(table, hash); Bucket* bucket = get_bucket_in(table, hash);
assert(!bucket->have_redirect() && !bucket->is_locked(), "bad"); assert(!bucket->have_redirect() && !bucket->is_locked(), "bad");
Node* new_node = Node::create_node(value, bucket->first()); Node* new_node = Node::create_node(_context, value, bucket->first());
if (!bucket->cas_first(new_node, bucket->first())) { if (!bucket->cas_first(new_node, bucket->first())) {
assert(false, "bad"); assert(false, "bad");
} }

View File

@ -41,51 +41,60 @@ struct Pointer : public AllStatic {
static uintx get_hash(const Value& value, bool* dead_hash) { static uintx get_hash(const Value& value, bool* dead_hash) {
return (uintx)value; return (uintx)value;
} }
static void* allocate_node(size_t size, const Value& value) { static void* allocate_node(void* context, size_t size, const Value& value) {
return ::malloc(size); return ::malloc(size);
} }
static void free_node(void* memory, const Value& value) { static void free_node(void* context, void* memory, const Value& value) {
::free(memory); ::free(memory);
} }
}; };
struct Allocator {
struct TableElement{
TableElement * volatile _next;
uintptr_t _value;
};
const uint nelements = 5;
TableElement* elements;
uint cur_index;
Allocator() : cur_index(0) {
elements = (TableElement*)::malloc(nelements * sizeof(TableElement));
}
void* allocate_node() {
return (void*)&elements[cur_index++];
}
void free_node(void* value) { /* Arena allocator. Ignore freed nodes*/ }
void reset() {
cur_index = 0;
}
~Allocator() {
::free(elements);
}
};
struct Config : public AllStatic { struct Config : public AllStatic {
typedef uintptr_t Value; typedef uintptr_t Value;
struct TableElement{
TableElement * volatile _next;
Value _value;
};
static const uint nelements = 5;
static TableElement* elements;
static uint cur_index;
static uintx get_hash(const Value& value, bool* dead_hash) { static uintx get_hash(const Value& value, bool* dead_hash) {
return (uintx)value; return (uintx)value;
} }
static void initialize() { static void* allocate_node(void* context, size_t size, const Value& value) {
elements = (TableElement*)::malloc(nelements * sizeof(TableElement)); Allocator* mm = (Allocator*)context;
} return mm->allocate_node();
static void* allocate_node(size_t size, const Value& value) {
return (void*)&elements[cur_index++];
} }
static void free_node(void* memory, const Value& value) { static void free_node(void* context, void* memory, const Value& value) {
return; Allocator* mm = (Allocator*)context;
} mm->free_node(memory);
static void reset() {
cur_index = 0;
}
static void bulk_free() {
::free(elements);
} }
}; };
Config::TableElement* Config::elements = nullptr;
uint Config::cur_index = 0;
typedef ConcurrentHashTable<Pointer, mtInternal> SimpleTestTable; typedef ConcurrentHashTable<Pointer, mtInternal> SimpleTestTable;
typedef ConcurrentHashTable<Pointer, mtInternal>::MultiGetHandle SimpleTestGetHandle; typedef ConcurrentHashTable<Pointer, mtInternal>::MultiGetHandle SimpleTestGetHandle;
typedef ConcurrentHashTable<Config, mtInternal> CustomTestTable; typedef ConcurrentHashTable<Config, mtInternal> CustomTestTable;
@ -280,15 +289,16 @@ static void cht_reset_shrink(Thread* thr) {
uintptr_t val3 = 3; uintptr_t val3 = 3;
SimpleTestLookup stl1(val1), stl2(val2), stl3(val3); SimpleTestLookup stl1(val1), stl2(val2), stl3(val3);
Config::initialize(); Allocator mem_allocator;
CustomTestTable* cht = new CustomTestTable(); const uint initial_log_table_size = 4;
CustomTestTable* cht = new CustomTestTable(&mem_allocator);
cht_insert_and_find(thr, cht, val1); cht_insert_and_find(thr, cht, val1);
cht_insert_and_find(thr, cht, val2); cht_insert_and_find(thr, cht, val2);
cht_insert_and_find(thr, cht, val3); cht_insert_and_find(thr, cht, val3);
cht->unsafe_reset(); cht->unsafe_reset();
Config::reset(); mem_allocator.reset();
EXPECT_EQ(cht_get_copy(cht, thr, stl1), (uintptr_t)0) << "Table should have been reset"; EXPECT_EQ(cht_get_copy(cht, thr, stl1), (uintptr_t)0) << "Table should have been reset";
// Re-inserted values should not be considered duplicates; table was reset. // Re-inserted values should not be considered duplicates; table was reset.
@ -296,8 +306,8 @@ static void cht_reset_shrink(Thread* thr) {
cht_insert_and_find(thr, cht, val2); cht_insert_and_find(thr, cht, val2);
cht_insert_and_find(thr, cht, val3); cht_insert_and_find(thr, cht, val3);
cht->unsafe_reset();
delete cht; delete cht;
Config::bulk_free();
} }
static void cht_scope(Thread* thr) { static void cht_scope(Thread* thr) {
@ -506,10 +516,10 @@ public:
static uintx get_hash(const Value& value, bool* dead_hash) { static uintx get_hash(const Value& value, bool* dead_hash) {
return (uintx)(value + 18446744073709551557ul) * 18446744073709551557ul; return (uintx)(value + 18446744073709551557ul) * 18446744073709551557ul;
} }
static void* allocate_node(size_t size, const Value& value) { static void* allocate_node(void* context, size_t size, const Value& value) {
return AllocateHeap(size, mtInternal); return AllocateHeap(size, mtInternal);
} }
static void free_node(void* memory, const Value& value) { static void free_node(void* context, void* memory, const Value& value) {
FreeHeap(memory); FreeHeap(memory);
} }
}; };