8333658: NMT: Use an allocator with 4-byte pointers to save memory in NativeCallStackStorage

Reviewed-by: stuefe, azafari
This commit is contained in:
Johan Sjölen 2024-06-25 14:37:38 +00:00
parent 6c6793307d
commit 57f8b91e55
4 changed files with 341 additions and 53 deletions

View File

@ -0,0 +1,106 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_NMT_ARRAYWITHFREELIST_HPP
#define SHARE_NMT_ARRAYWITHFREELIST_HPP
#include "utilities/growableArray.hpp"
#include <type_traits>
// A flat array of elements E, backed by C-heap, growing on-demand. It allows for
// returning arbitrary elements and keeps them in a freelist. Elements can be uniquely
// identified via array index.
template<typename E, MEMFLAGS flag>
class ArrayWithFreeList {
// An E must be trivially copyable and destructible, but it may be constructed
// however it likes.
constexpr void static_assert_E_satisfies_type_requirements() const {
static_assert(std::is_trivially_copyable<E>::value && std::is_trivially_destructible<E>::value, "must be");
}
public:
using I = int32_t;
static constexpr const I nil = -1;
private:
// A free list allocator element is either a link to the next free space
// or an actual element.
union BackingElement {
I link;
E e;
};
GrowableArrayCHeap<BackingElement, flag> _backing_storage;
I _free_start;
bool is_in_bounds(I i) {
return i >= 0 && i < _backing_storage.length();
}
public:
NONCOPYABLE(ArrayWithFreeList<E COMMA flag>);
ArrayWithFreeList(int initial_capacity = 8)
: _backing_storage(initial_capacity),
_free_start(nil) {}
template<typename... Args>
I allocate(Args... args) {
static_assert_E_satisfies_type_requirements();
BackingElement* be;
I i;
if (_free_start != nil) {
// Must point to already existing index
be = &_backing_storage.at(_free_start);
i = _free_start;
_free_start = be->link;
} else {
// There are no free elements, allocate a new one.
i = _backing_storage.append(BackingElement());
be = _backing_storage.adr_at(i);
}
::new (be) E{args...};
return i;
}
void deallocate(I i) {
static_assert_E_satisfies_type_requirements();
assert(i == nil || is_in_bounds(i), "out of bounds free");
if (i == nil) return;
BackingElement& be_freed = _backing_storage.at(i);
be_freed.link = _free_start;
_free_start = i;
}
E& at(I i) {
static_assert_E_satisfies_type_requirements();
assert(i != nil, "null pointer dereference");
assert(is_in_bounds(i), "out of bounds dereference");
return _backing_storage.at(i).e;
}
};
#endif // SHARE_NMT_ARRAYWITHFREELIST_HPP

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "nmt/nmtNativeCallStackStorage.hpp"
NativeCallStackStorage::StackIndex NativeCallStackStorage::put(const NativeCallStack& value) {
int bucket = value.calculate_hash() % _table_size;
TableEntryIndex link = _table[bucket];
while (link != TableEntryStorage::nil) {
TableEntry& l = _entry_storage.at(link);
if (value.equals(get(l.stack))) {
return l.stack;
}
link = l.next;
}
int idx = _stacks.append(value);
StackIndex si{idx};
TableEntryIndex new_link = _entry_storage.allocate(_table[bucket], si);
_table[bucket] = new_link;
return si;
}
NativeCallStackStorage::NativeCallStackStorage(bool is_detailed_mode, int table_size)
: _table_size(table_size),
_table(nullptr),
_stacks(),
_is_detailed_mode(is_detailed_mode),
_fake_stack() {
if (_is_detailed_mode) {
_table = NEW_C_HEAP_ARRAY(TableEntryIndex, _table_size, mtNMT);
for (int i = 0; i < _table_size; i++) {
_table[i] = TableEntryStorage::nil;
}
}
}
NativeCallStackStorage::~NativeCallStackStorage() {
FREE_C_HEAP_ARRAY(LinkPtr, _table);
}

View File

@ -25,8 +25,7 @@
#ifndef SHARE_NMT_NMTNATIVECALLSTACKSTORAGE_HPP #ifndef SHARE_NMT_NMTNATIVECALLSTACKSTORAGE_HPP
#define SHARE_NMT_NMTNATIVECALLSTACKSTORAGE_HPP #define SHARE_NMT_NMTNATIVECALLSTACKSTORAGE_HPP
#include "memory/allocation.hpp" #include "nmt/arrayWithFreeList.hpp"
#include "memory/arena.hpp"
#include "utilities/growableArray.hpp" #include "utilities/growableArray.hpp"
#include "utilities/nativeCallStack.hpp" #include "utilities/nativeCallStack.hpp"
@ -40,64 +39,41 @@
// - Have fast comparisons // - Have fast comparisons
// - Have constant time access // - Have constant time access
// We achieve this by using a closed hashtable for finding previously existing NCS:s and referring to them by an index that's smaller than a pointer. // We achieve this by using a closed hashtable for finding previously existing NCS:s and referring to them by an index that's smaller than a pointer.
class NativeCallStackStorage : public CHeapObj<mtNMT> { class NativeCallStackStorage : public CHeapObjBase {
public: public:
struct StackIndex { struct StackIndex {
friend NativeCallStackStorage; friend NativeCallStackStorage;
private:
static constexpr const int32_t _invalid = -1;
int32_t _stack_index; int32_t _stack_index;
StackIndex(int32_t stack_index)
: _stack_index(stack_index) {
}
public: public:
static constexpr const int32_t invalid = -1;
static bool equals(const StackIndex& a, const StackIndex& b) { static bool equals(const StackIndex& a, const StackIndex& b) {
return a._stack_index == b._stack_index; return a._stack_index == b._stack_index;
} }
bool is_invalid() { bool is_invalid() {
return _stack_index == _invalid; return _stack_index == invalid;
}
StackIndex()
: _stack_index(_invalid) {
} }
}; };
private: private:
struct Link : public ArenaObj { struct TableEntry;
Link* next; using TableEntryStorage = ArrayWithFreeList<TableEntry, mtNMT>;
StackIndex stack; using TableEntryIndex = typename TableEntryStorage::I;
Link(Link* next, StackIndex v)
: next(next), TableEntryStorage _entry_storage;
stack(v) {
} struct TableEntry {
}; TableEntryIndex next;
StackIndex put(const NativeCallStack& value) { StackIndex stack;
int bucket = value.calculate_hash() % _table_size; };
Link* link = _table[bucket];
while (link != nullptr) { StackIndex put(const NativeCallStack& value);
if (value.equals(get(link->stack))) {
return link->stack;
}
link = link->next;
}
int idx = _stacks.append(value);
Link* new_link = new (&_arena) Link(_table[bucket], StackIndex(idx));
_table[bucket] = new_link;
return new_link->stack;
}
// For storage of the Links
Arena _arena;
// Pick a prime number of buckets. // Pick a prime number of buckets.
// 4099 gives a 50% probability of collisions at 76 stacks (as per birthday problem). // 4099 gives a 50% probability of collisions at 76 stacks (as per birthday problem).
static const constexpr int default_table_size = 4099; static const constexpr int default_table_size = 4099;
int _table_size; const int _table_size;
Link** _table; TableEntryIndex* _table;
GrowableArrayCHeap<NativeCallStack, mtNMT> _stacks; GrowableArrayCHeap<NativeCallStack, mtNMT> _stacks;
const bool _is_detailed_mode; const bool _is_detailed_mode;
@ -107,7 +83,7 @@ public:
StackIndex push(const NativeCallStack& stack) { StackIndex push(const NativeCallStack& stack) {
// Not in detailed mode, so not tracking stacks. // Not in detailed mode, so not tracking stacks.
if (!_is_detailed_mode) { if (!_is_detailed_mode) {
return StackIndex(); return StackIndex{StackIndex::invalid};
} }
return put(stack); return put(stack);
} }
@ -119,16 +95,9 @@ public:
return _stacks.at(si._stack_index); return _stacks.at(si._stack_index);
} }
NativeCallStackStorage(bool is_detailed_mode, int table_size = default_table_size) NativeCallStackStorage(bool is_detailed_mode, int table_size = default_table_size);
: _arena(mtNMT), _table_size(table_size), _table(nullptr), _stacks(),
_is_detailed_mode(is_detailed_mode), _fake_stack() { ~NativeCallStackStorage();
if (_is_detailed_mode) {
_table = NEW_ARENA_ARRAY(&_arena, Link*, _table_size);
for (int i = 0; i < _table_size; i++) {
_table[i] = nullptr;
}
}
}
}; };
#endif // SHARE_NMT_NMTNATIVECALLSTACKSTORAGE_HPP #endif // SHARE_NMT_NMTNATIVECALLSTACKSTORAGE_HPP

View File

@ -0,0 +1,153 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "unittest.hpp"
#include "nmt/arrayWithFreeList.hpp"
using A = ArrayWithFreeList<int, mtTest>;
class ArrayWithFreeListTest : public testing::Test {
};
// A linked list which sets the allocator itself
template<typename E>
struct LL {
struct Node;
using NodeAllocator = ArrayWithFreeList<Node, mtTest>;
using NodePtr = typename NodeAllocator::I;
NodeAllocator alloc;
struct Node {
E e;
NodePtr next;
};
NodePtr start;
LL()
: start{NodeAllocator::nil} {
}
void push(E e) {
NodePtr new_element = alloc.allocate(e, NodeAllocator::nil);
NodePtr& current = start;
if (current == NodeAllocator::nil) {
current = new_element;
return;
}
alloc.at(new_element).next = current;
current = new_element;
};
E pop() {
assert(start != NodeAllocator::nil, "must be");
Node& n = alloc.at(start);
E e = n.e;
NodePtr next_start = n.next;
alloc.deallocate(start);
start = next_start;
return e;
}
};
// A linked list which is capable of having multiple different allocators. This is done through higher-kinded types.
// That's a very fancy word that means that a templated type like Foo<E> can be passed around like only Foo at first
// and then be 'applied' to some E. Think of it like passing around a lambda or function pointer, but on a template level,
// where Foo is a function that can be called on some type with the return type being Foo<E>.
template<typename E, template<typename, MEMFLAGS> class Allocator>
struct LL2 {
struct Node;
using NodeAllocator = Allocator<Node, mtTest>;
using NodePtr = typename NodeAllocator::I;
NodeAllocator alloc;
struct Node {
E e;
NodePtr next;
};
NodePtr start;
LL2()
: start(NodeAllocator::nil) {
}
void push(E e) {
NodePtr new_element = alloc.allocate(e, NodeAllocator::nil);
NodePtr& current = start;
if (current == NodeAllocator::nil) {
current = new_element;
return;
}
alloc.at(new_element).next = current;
current = new_element;
};
E pop() {
assert(start != NodeAllocator::nil, "must be");
Node& n = alloc.at(start);
E e = n.e;
NodePtr next_start = n.next;
alloc.deallocate(start);
start = next_start;
return e;
}
};
template<typename List>
void test_with_list(List& list) {
list.push(1);
list.push(2);
EXPECT_EQ(2, list.pop());
EXPECT_EQ(1, list.pop());
}
TEST_VM_F(ArrayWithFreeListTest, TestLinkedLists) {
{
LL<int> list;
test_with_list(list);
}
{
LL2<int, ArrayWithFreeList> list;
test_with_list(list);
}
}
TEST_VM_F(ArrayWithFreeListTest, FreeingShouldReuseMemory) {
A alloc;
A::I i = alloc.allocate(1);
int* x = &alloc.at(i);
alloc.deallocate(i);
i = alloc.allocate(1);
int* y = &alloc.at(i);
EXPECT_EQ(x, y);
}
TEST_VM_F(ArrayWithFreeListTest, FreeingInTheMiddleWorks) {
A alloc;
A::I i0 = alloc.allocate(0);
A::I i1 = alloc.allocate(0);
A::I i2 = alloc.allocate(0);
int* p1 = &alloc.at(i1);
alloc.deallocate(i1);
A::I i3 = alloc.allocate(0);
EXPECT_EQ(p1, &alloc.at(i3));
}