8276299: G1: Unify the wording buffer/node/element in G1SegmentedArrayXxx, G1CardSetXxx and related classes
Reviewed-by: tschatzl, ayang, iwalulya
This commit is contained in:
parent
51d6d7a36b
commit
67745fa749
src/hotspot/share/gc/g1
g1Arguments.cppg1CardSet.cppg1CardSetContainers.hppg1CardSetContainers.inline.hppg1CardSetMemory.cppg1CardSetMemory.hppg1CardSetMemory.inline.hppg1EvacFailureObjectsSet.cppg1EvacFailureObjectsSet.hppg1RemSetSummary.cppg1SegmentedArray.hppg1SegmentedArray.inline.hppg1SegmentedArrayFreePool.cppg1SegmentedArrayFreePool.hpp
@ -143,7 +143,7 @@ void G1Arguments::initialize_card_set_configuration() {
|
||||
// Round to next 8 byte boundary for array to maximize space usage.
|
||||
size_t const cur_size = G1CardSetArray::size_in_bytes(G1RemSetArrayOfCardsEntries);
|
||||
FLAG_SET_ERGO(G1RemSetArrayOfCardsEntries,
|
||||
G1RemSetArrayOfCardsEntries + (uint)(align_up(cur_size, G1CardSetAllocOptions::BufferAlignment) - cur_size) / sizeof(G1CardSetArray::EntryDataType));
|
||||
G1RemSetArrayOfCardsEntries + (uint)(align_up(cur_size, G1CardSetAllocOptions::SlotAlignment) - cur_size) / sizeof(G1CardSetArray::EntryDataType));
|
||||
|
||||
// Howl card set container globals.
|
||||
if (FLAG_IS_DEFAULT(G1RemSetHowlNumBuckets)) {
|
||||
|
@ -125,10 +125,10 @@ void G1CardSetConfiguration::init_card_set_alloc_options() {
|
||||
|
||||
void G1CardSetConfiguration::log_configuration() {
|
||||
log_debug_p(gc, remset)("Card Set container configuration: "
|
||||
"InlinePtr #elems %u size %zu "
|
||||
"Array Of Cards #elems %u size %zu "
|
||||
"InlinePtr #cards %u size %zu "
|
||||
"Array Of Cards #cards %u size %zu "
|
||||
"Howl #buckets %u coarsen threshold %u "
|
||||
"Howl Bitmap #elems %u size %zu coarsen threshold %u "
|
||||
"Howl Bitmap #cards %u size %zu coarsen threshold %u "
|
||||
"Card regions per heap region %u cards per card region %u",
|
||||
max_cards_in_inline_ptr(), sizeof(void*),
|
||||
max_cards_in_array(), G1CardSetArray::size_in_bytes(max_cards_in_array()),
|
||||
@ -199,8 +199,8 @@ void G1CardSetCoarsenStats::print_on(outputStream* out) {
|
||||
class G1CardSetHashTable : public CHeapObj<mtGCCardSet> {
|
||||
using CardSetPtr = G1CardSet::CardSetPtr;
|
||||
|
||||
// Did we insert at least one element in the table?
|
||||
bool volatile _inserted_elem;
|
||||
// Did we insert at least one card in the table?
|
||||
bool volatile _inserted_card;
|
||||
|
||||
G1CardSetMemoryManager* _mm;
|
||||
CardSetHash _table;
|
||||
@ -247,7 +247,7 @@ public:
|
||||
|
||||
G1CardSetHashTable(G1CardSetMemoryManager* mm,
|
||||
size_t initial_log_table_size = InitialLogTableSize) :
|
||||
_inserted_elem(false),
|
||||
_inserted_card(false),
|
||||
_mm(mm),
|
||||
_table(mm, initial_log_table_size) {
|
||||
}
|
||||
@ -267,10 +267,10 @@ public:
|
||||
G1CardSetHashTableValue value(region_idx, G1CardSetInlinePtr());
|
||||
bool inserted = _table.insert_get(Thread::current(), lookup, value, found, should_grow);
|
||||
|
||||
if (!_inserted_elem && inserted) {
|
||||
if (!_inserted_card && inserted) {
|
||||
// It does not matter to us who is setting the flag so a regular atomic store
|
||||
// is sufficient.
|
||||
Atomic::store(&_inserted_elem, true);
|
||||
Atomic::store(&_inserted_card, true);
|
||||
}
|
||||
|
||||
return found.value();
|
||||
@ -295,9 +295,9 @@ public:
|
||||
}
|
||||
|
||||
void reset() {
|
||||
if (Atomic::load(&_inserted_elem)) {
|
||||
if (Atomic::load(&_inserted_card)) {
|
||||
_table.unsafe_reset(InitialLogTableSize);
|
||||
Atomic::store(&_inserted_elem, false);
|
||||
Atomic::store(&_inserted_card, false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -630,7 +630,7 @@ void G1CardSet::transfer_cards_in_howl(CardSetPtr parent_card_set,
|
||||
|
||||
// Need to correct for that the Full remembered set occupies more cards than the
|
||||
// bitmap before.
|
||||
// We add 1 element less because the values will be incremented
|
||||
// We add 1 card less because the values will be incremented
|
||||
// in G1CardSet::add_card for the current addition or where already incremented in
|
||||
// G1CardSet::add_to_howl after coarsening.
|
||||
diff -= 1;
|
||||
|
@ -95,7 +95,7 @@ class G1CardSetInlinePtr : public StackObj {
|
||||
return result;
|
||||
}
|
||||
|
||||
uint find(uint const card_idx, uint const bits_per_card, uint start_at, uint num_elems);
|
||||
uint find(uint const card_idx, uint const bits_per_card, uint start_at, uint num_cards);
|
||||
|
||||
public:
|
||||
G1CardSetInlinePtr() : _value_addr(nullptr), _value((CardSetPtr)G1CardSet::CardSetInlinePtr) { }
|
||||
@ -218,7 +218,7 @@ private:
|
||||
}
|
||||
|
||||
public:
|
||||
G1CardSetArray(uint const card_in_region, EntryCountType num_elems);
|
||||
G1CardSetArray(uint const card_in_region, EntryCountType num_cards);
|
||||
|
||||
G1AddCardResult add(uint card_idx);
|
||||
|
||||
|
@ -50,19 +50,19 @@ inline G1AddCardResult G1CardSetInlinePtr::add(uint card_idx, uint bits_per_card
|
||||
|
||||
uint cur_idx = 0;
|
||||
while (true) {
|
||||
uint num_elems = num_cards_in(_value);
|
||||
if (num_elems > 0) {
|
||||
cur_idx = find(card_idx, bits_per_card, cur_idx, num_elems);
|
||||
uint num_cards = num_cards_in(_value);
|
||||
if (num_cards > 0) {
|
||||
cur_idx = find(card_idx, bits_per_card, cur_idx, num_cards);
|
||||
}
|
||||
// Check if the card is already stored in the pointer.
|
||||
if (cur_idx < num_elems) {
|
||||
if (cur_idx < num_cards) {
|
||||
return Found;
|
||||
}
|
||||
// Check if there is actually enough space.
|
||||
if (num_elems >= max_cards_in_inline_ptr) {
|
||||
if (num_cards >= max_cards_in_inline_ptr) {
|
||||
return Overflow;
|
||||
}
|
||||
CardSetPtr new_value = merge(_value, card_idx, num_elems, bits_per_card);
|
||||
CardSetPtr new_value = merge(_value, card_idx, num_cards, bits_per_card);
|
||||
CardSetPtr old_value = Atomic::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed);
|
||||
if (_value == old_value) {
|
||||
return Added;
|
||||
@ -77,38 +77,38 @@ inline G1AddCardResult G1CardSetInlinePtr::add(uint card_idx, uint bits_per_card
|
||||
}
|
||||
}
|
||||
|
||||
inline uint G1CardSetInlinePtr::find(uint card_idx, uint bits_per_card, uint start_at, uint num_elems) {
|
||||
assert(start_at < num_elems, "Precondition!");
|
||||
inline uint G1CardSetInlinePtr::find(uint card_idx, uint bits_per_card, uint start_at, uint num_cards) {
|
||||
assert(start_at < num_cards, "Precondition!");
|
||||
|
||||
uintptr_t const card_mask = (1 << bits_per_card) - 1;
|
||||
uintptr_t value = ((uintptr_t)_value) >> card_pos_for(start_at, bits_per_card);
|
||||
|
||||
// Check if the card is already stored in the pointer.
|
||||
for (uint cur_idx = start_at; cur_idx < num_elems; cur_idx++) {
|
||||
for (uint cur_idx = start_at; cur_idx < num_cards; cur_idx++) {
|
||||
if ((value & card_mask) == card_idx) {
|
||||
return cur_idx;
|
||||
}
|
||||
value >>= bits_per_card;
|
||||
}
|
||||
return num_elems;
|
||||
return num_cards;
|
||||
}
|
||||
|
||||
inline bool G1CardSetInlinePtr::contains(uint card_idx, uint bits_per_card) {
|
||||
uint num_elems = num_cards_in(_value);
|
||||
if (num_elems == 0) {
|
||||
uint num_cards = num_cards_in(_value);
|
||||
if (num_cards == 0) {
|
||||
return false;
|
||||
}
|
||||
uint cur_idx = find(card_idx, bits_per_card, 0, num_elems);
|
||||
return cur_idx < num_elems;
|
||||
uint cur_idx = find(card_idx, bits_per_card, 0, num_cards);
|
||||
return cur_idx < num_cards;
|
||||
}
|
||||
|
||||
template <class CardVisitor>
|
||||
inline void G1CardSetInlinePtr::iterate(CardVisitor& found, uint bits_per_card) {
|
||||
uint const num_elems = num_cards_in(_value);
|
||||
uint const num_cards = num_cards_in(_value);
|
||||
uintptr_t const card_mask = (1 << bits_per_card) - 1;
|
||||
|
||||
uintptr_t value = ((uintptr_t)_value) >> card_pos_for(0, bits_per_card);
|
||||
for (uint cur_idx = 0; cur_idx < num_elems; cur_idx++) {
|
||||
for (uint cur_idx = 0; cur_idx < num_cards; cur_idx++) {
|
||||
found(value & card_mask);
|
||||
value >>= bits_per_card;
|
||||
}
|
||||
@ -136,9 +136,9 @@ inline uintptr_t G1CardSetContainer::decrement_refcount() {
|
||||
return Atomic::sub(&_ref_count, 2u);
|
||||
}
|
||||
|
||||
inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_elems) :
|
||||
inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_cards) :
|
||||
G1CardSetContainer(),
|
||||
_size(num_elems),
|
||||
_size(num_cards),
|
||||
_num_entries(1) {
|
||||
assert(_size > 0, "CardSetArray of size 0 not supported.");
|
||||
assert(_size < LockBitMask, "Only support CardSetArray of size %u or smaller.", LockBitMask - 1);
|
||||
@ -166,7 +166,7 @@ inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(EntryCountType
|
||||
|
||||
inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
|
||||
assert(card_idx < (1u << (sizeof(_data[0]) * BitsPerByte)),
|
||||
"Card index %u does not fit card element.", card_idx);
|
||||
"Card index %u does not fit allowed card value range.", card_idx);
|
||||
EntryCountType num_entries = Atomic::load_acquire(&_num_entries) & EntryMask;
|
||||
EntryCountType idx = 0;
|
||||
for (; idx < num_entries; idx++) {
|
||||
@ -181,7 +181,7 @@ inline G1AddCardResult G1CardSetArray::add(uint card_idx) {
|
||||
// Reload number of entries from the G1CardSetArrayLocker as it might have changed.
|
||||
// It already read the actual value with the necessary synchronization.
|
||||
num_entries = x.num_entries();
|
||||
// Look if the elements added while waiting for the lock are the same as our card.
|
||||
// Look if the cards added while waiting for the lock are the same as our card.
|
||||
for (; idx < num_entries; idx++) {
|
||||
if (_data[idx] == card_idx) {
|
||||
return Found;
|
||||
|
@ -30,28 +30,28 @@
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
template <class Elem>
|
||||
G1CardSetAllocator<Elem>::G1CardSetAllocator(const char* name,
|
||||
const G1CardSetAllocOptions* buffer_options,
|
||||
G1CardSetBufferList* free_buffer_list) :
|
||||
_segmented_array(buffer_options, free_buffer_list),
|
||||
template <class Slot>
|
||||
G1CardSetAllocator<Slot>::G1CardSetAllocator(const char* name,
|
||||
const G1CardSetAllocOptions* alloc_options,
|
||||
G1CardSetFreeList* free_segment_list) :
|
||||
_segmented_array(alloc_options, free_segment_list),
|
||||
_transfer_lock(false),
|
||||
_free_nodes_list(),
|
||||
_pending_nodes_list(),
|
||||
_num_pending_nodes(0),
|
||||
_num_free_nodes(0)
|
||||
_free_slots_list(),
|
||||
_pending_slots_list(),
|
||||
_num_pending_slots(0),
|
||||
_num_free_slots(0)
|
||||
{
|
||||
uint elem_size = _segmented_array.elem_size();
|
||||
assert(elem_size >= sizeof(G1CardSetContainer), "Element instance size %u for allocator %s too small", elem_size, name);
|
||||
uint slot_size = _segmented_array.slot_size();
|
||||
assert(slot_size >= sizeof(G1CardSetContainer), "Slot instance size %u for allocator %s too small", slot_size, name);
|
||||
}
|
||||
|
||||
template <class Elem>
|
||||
G1CardSetAllocator<Elem>::~G1CardSetAllocator() {
|
||||
template <class Slot>
|
||||
G1CardSetAllocator<Slot>::~G1CardSetAllocator() {
|
||||
drop_all();
|
||||
}
|
||||
|
||||
template <class Elem>
|
||||
bool G1CardSetAllocator<Elem>::try_transfer_pending() {
|
||||
template <class Slot>
|
||||
bool G1CardSetAllocator<Slot>::try_transfer_pending() {
|
||||
// Attempt to claim the lock.
|
||||
if (Atomic::load_acquire(&_transfer_lock) || // Skip CAS if likely to fail.
|
||||
Atomic::cmpxchg(&_transfer_lock, false, true)) {
|
||||
@ -59,13 +59,13 @@ bool G1CardSetAllocator<Elem>::try_transfer_pending() {
|
||||
}
|
||||
// Have the lock; perform the transfer.
|
||||
|
||||
// Claim all the pending nodes.
|
||||
G1CardSetContainer* first = _pending_nodes_list.pop_all();
|
||||
// Claim all the pending slots.
|
||||
G1CardSetContainer* first = _pending_slots_list.pop_all();
|
||||
|
||||
if (first != nullptr) {
|
||||
// Prepare to add the claimed nodes, and update _num_pending_nodes.
|
||||
// Prepare to add the claimed slots, and update _num_pending_slots.
|
||||
G1CardSetContainer* last = first;
|
||||
Atomic::load_acquire(&_num_pending_nodes);
|
||||
Atomic::load_acquire(&_num_pending_slots);
|
||||
|
||||
uint count = 1;
|
||||
for (G1CardSetContainer* next = first->next(); next != nullptr; next = next->next()) {
|
||||
@ -73,70 +73,70 @@ bool G1CardSetAllocator<Elem>::try_transfer_pending() {
|
||||
++count;
|
||||
}
|
||||
|
||||
Atomic::sub(&_num_pending_nodes, count);
|
||||
Atomic::sub(&_num_pending_slots, count);
|
||||
|
||||
// Wait for any in-progress pops to avoid ABA for them.
|
||||
GlobalCounter::write_synchronize();
|
||||
// Add synchronized nodes to _free_node_list.
|
||||
// Add synchronized slots to _free_slots_list.
|
||||
// Update count first so there can be no underflow in allocate().
|
||||
Atomic::add(&_num_free_nodes, count);
|
||||
_free_nodes_list.prepend(*first, *last);
|
||||
Atomic::add(&_num_free_slots, count);
|
||||
_free_slots_list.prepend(*first, *last);
|
||||
}
|
||||
Atomic::release_store(&_transfer_lock, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class Elem>
|
||||
void G1CardSetAllocator<Elem>::free(Elem* elem) {
|
||||
assert(elem != nullptr, "precondition");
|
||||
template <class Slot>
|
||||
void G1CardSetAllocator<Slot>::free(Slot* slot) {
|
||||
assert(slot != nullptr, "precondition");
|
||||
// Desired minimum transfer batch size. There is relatively little
|
||||
// importance to the specific number. It shouldn't be too big, else
|
||||
// we're wasting space when the release rate is low. If the release
|
||||
// rate is high, we might accumulate more than this before being
|
||||
// able to start a new transfer, but that's okay. Also note that
|
||||
// the allocation rate and the release rate are going to be fairly
|
||||
// similar, due to how the buffers are used. - kbarret
|
||||
// similar, due to how the slots are used. - kbarret
|
||||
uint const trigger_transfer = 10;
|
||||
|
||||
uint pending_count = Atomic::add(&_num_pending_nodes, 1u, memory_order_relaxed);
|
||||
uint pending_count = Atomic::add(&_num_pending_slots, 1u, memory_order_relaxed);
|
||||
|
||||
G1CardSetContainer* node = reinterpret_cast<G1CardSetContainer*>(reinterpret_cast<char*>(elem));
|
||||
G1CardSetContainer* container = reinterpret_cast<G1CardSetContainer*>(reinterpret_cast<char*>(slot));
|
||||
|
||||
node->set_next(nullptr);
|
||||
assert(node->next() == nullptr, "precondition");
|
||||
container->set_next(nullptr);
|
||||
assert(container->next() == nullptr, "precondition");
|
||||
|
||||
_pending_nodes_list.push(*node);
|
||||
_pending_slots_list.push(*container);
|
||||
|
||||
if (pending_count > trigger_transfer) {
|
||||
try_transfer_pending();
|
||||
}
|
||||
}
|
||||
|
||||
template <class Elem>
|
||||
void G1CardSetAllocator<Elem>::drop_all() {
|
||||
_free_nodes_list.pop_all();
|
||||
_pending_nodes_list.pop_all();
|
||||
_num_pending_nodes = 0;
|
||||
_num_free_nodes = 0;
|
||||
template <class Slot>
|
||||
void G1CardSetAllocator<Slot>::drop_all() {
|
||||
_free_slots_list.pop_all();
|
||||
_pending_slots_list.pop_all();
|
||||
_num_pending_slots = 0;
|
||||
_num_free_slots = 0;
|
||||
_segmented_array.drop_all();
|
||||
}
|
||||
|
||||
template <class Elem>
|
||||
void G1CardSetAllocator<Elem>::print(outputStream* os) {
|
||||
uint num_allocated_nodes = _segmented_array.num_allocated_nodes();
|
||||
uint num_available_nodes = _segmented_array.num_available_nodes();
|
||||
uint highest = _segmented_array.first_array_buffer() != nullptr
|
||||
? _segmented_array.first_array_buffer()->num_elems()
|
||||
template <class Slot>
|
||||
void G1CardSetAllocator<Slot>::print(outputStream* os) {
|
||||
uint num_allocated_slots = _segmented_array.num_allocated_slots();
|
||||
uint num_available_slots = _segmented_array.num_available_slots();
|
||||
uint highest = _segmented_array.first_array_segment() != nullptr
|
||||
? _segmented_array.first_array_segment()->num_slots()
|
||||
: 0;
|
||||
uint num_buffers = _segmented_array.num_buffers();
|
||||
os->print("MA " PTR_FORMAT ": %u elems pending (allocated %u available %u) used %.3f highest %u buffers %u size %zu ",
|
||||
uint num_segments = _segmented_array.num_segments();
|
||||
os->print("MA " PTR_FORMAT ": %u slots pending (allocated %u available %u) used %.3f highest %u segments %u size %zu ",
|
||||
p2i(this),
|
||||
_num_pending_nodes,
|
||||
num_allocated_nodes,
|
||||
num_available_nodes,
|
||||
percent_of(num_allocated_nodes - _num_pending_nodes, num_available_nodes),
|
||||
_num_pending_slots,
|
||||
num_allocated_slots,
|
||||
num_available_slots,
|
||||
percent_of(num_allocated_slots - _num_pending_slots, num_available_slots),
|
||||
highest,
|
||||
num_buffers,
|
||||
num_segments,
|
||||
mem_size());
|
||||
}
|
||||
|
||||
@ -205,7 +205,7 @@ G1SegmentedArrayMemoryStats G1CardSetMemoryManager::memory_stats() const {
|
||||
G1SegmentedArrayMemoryStats result;
|
||||
for (uint i = 0; i < num_mem_object_types(); i++) {
|
||||
result._num_mem_sizes[i] += _allocators[i].mem_size();
|
||||
result._num_segments[i] += _allocators[i].num_buffers();
|
||||
result._num_segments[i] += _allocators[i].num_segments();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -37,43 +37,43 @@ class G1CardSetConfiguration;
|
||||
class outputStream;
|
||||
|
||||
// Collects G1CardSetAllocator options/heuristics. Called by G1CardSetAllocator
|
||||
// to determine the next size of the allocated G1CardSetBuffer.
|
||||
// to determine the next size of the allocated G1CardSetSegment.
|
||||
class G1CardSetAllocOptions : public G1SegmentedArrayAllocOptions {
|
||||
static const uint MinimumBufferSize = 8;
|
||||
static const uint MaximumBufferSize = UINT_MAX / 2;
|
||||
static const uint MinimumNumSlots = 8;
|
||||
static const uint MaximumNumSlots = UINT_MAX / 2;
|
||||
|
||||
uint exponential_expand(uint prev_num_elems) const {
|
||||
return clamp(prev_num_elems * 2, _initial_num_elems, _max_num_elems);
|
||||
uint exponential_expand(uint prev_num_slots) const {
|
||||
return clamp(prev_num_slots * 2, _initial_num_slots, _max_num_slots);
|
||||
}
|
||||
|
||||
public:
|
||||
static const uint BufferAlignment = 8;
|
||||
static const uint SlotAlignment = 8;
|
||||
|
||||
G1CardSetAllocOptions(uint elem_size, uint initial_num_elems = MinimumBufferSize, uint max_num_elems = MaximumBufferSize) :
|
||||
G1SegmentedArrayAllocOptions(align_up(elem_size, BufferAlignment), initial_num_elems, max_num_elems, BufferAlignment) {
|
||||
G1CardSetAllocOptions(uint slot_size, uint initial_num_slots = MinimumNumSlots, uint max_num_slots = MaximumNumSlots) :
|
||||
G1SegmentedArrayAllocOptions(align_up(slot_size, SlotAlignment), initial_num_slots, max_num_slots, SlotAlignment) {
|
||||
}
|
||||
|
||||
virtual uint next_num_elems(uint prev_num_elems) const override {
|
||||
return exponential_expand(prev_num_elems);
|
||||
virtual uint next_num_slots(uint prev_num_slots) const override {
|
||||
return exponential_expand(prev_num_slots);
|
||||
}
|
||||
};
|
||||
|
||||
typedef G1SegmentedArrayBuffer<mtGCCardSet> G1CardSetBuffer;
|
||||
typedef G1SegmentedArraySegment<mtGCCardSet> G1CardSetSegment;
|
||||
|
||||
typedef G1SegmentedArrayBufferList<mtGCCardSet> G1CardSetBufferList;
|
||||
typedef G1SegmentedArrayFreeList<mtGCCardSet> G1CardSetFreeList;
|
||||
|
||||
// Arena-like allocator for (card set) heap memory objects (Elem elements).
|
||||
// Arena-like allocator for (card set) heap memory objects (Slot slots).
|
||||
//
|
||||
// Allocation and deallocation in the first phase on G1CardSetContainer basis
|
||||
// may occur by multiple threads at once.
|
||||
//
|
||||
// Allocation occurs from an internal free list of G1CardSetContainers first,
|
||||
// only then trying to bump-allocate from the current G1CardSetBuffer. If there is
|
||||
// none, this class allocates a new G1CardSetBuffer (allocated from the C heap,
|
||||
// only then trying to bump-allocate from the current G1CardSetSegment. If there is
|
||||
// none, this class allocates a new G1CardSetSegment (allocated from the C heap,
|
||||
// asking the G1CardSetAllocOptions instance about sizes etc) and uses that one.
|
||||
//
|
||||
// The NodeStack free list is a linked list of G1CardSetContainers
|
||||
// within all G1CardSetBuffer instances allocated so far. It uses a separate
|
||||
// The SegmentStack free list is a linked list of G1CardSetContainers
|
||||
// within all G1CardSetSegment instances allocated so far. It uses a separate
|
||||
// pending list and global synchronization to avoid the ABA problem when the
|
||||
// user frees a memory object.
|
||||
//
|
||||
@ -84,54 +84,56 @@ typedef G1SegmentedArrayBufferList<mtGCCardSet> G1CardSetBufferList;
|
||||
// Since it is expected that every CardSet (and in extension each region) has its
|
||||
// own set of allocators, there is intentionally no padding between them to save
|
||||
// memory.
|
||||
template <class Elem>
|
||||
template <class Slot>
|
||||
class G1CardSetAllocator {
|
||||
// G1CardSetBuffer management.
|
||||
// G1CardSetSegment management.
|
||||
|
||||
typedef G1SegmentedArray<Elem, mtGCCardSet> SegmentedArray;
|
||||
// G1CardSetContainer node management within the G1CardSetBuffers allocated
|
||||
typedef G1SegmentedArray<Slot, mtGCCardSet> SegmentedArray;
|
||||
// G1CardSetContainer slot management within the G1CardSetSegments allocated
|
||||
// by this allocator.
|
||||
static G1CardSetContainer* volatile* next_ptr(G1CardSetContainer& node);
|
||||
typedef LockFreeStack<G1CardSetContainer, &G1CardSetAllocator::next_ptr> NodeStack;
|
||||
static G1CardSetContainer* volatile* next_ptr(G1CardSetContainer& slot);
|
||||
typedef LockFreeStack<G1CardSetContainer, &G1CardSetAllocator::next_ptr> SlotStack;
|
||||
|
||||
SegmentedArray _segmented_array;
|
||||
volatile bool _transfer_lock;
|
||||
NodeStack _free_nodes_list;
|
||||
NodeStack _pending_nodes_list;
|
||||
SlotStack _free_slots_list;
|
||||
SlotStack _pending_slots_list;
|
||||
|
||||
volatile uint _num_pending_nodes; // Number of nodes in the pending list.
|
||||
volatile uint _num_free_nodes; // Number of nodes in the free list.
|
||||
volatile uint _num_pending_slots; // Number of slots in the pending list.
|
||||
volatile uint _num_free_slots; // Number of slots in the free list.
|
||||
|
||||
// Try to transfer nodes from _pending_nodes_list to _free_nodes_list, with a
|
||||
// synchronization delay for any in-progress pops from the _free_nodes_list
|
||||
// Try to transfer slots from _pending_slots_list to _free_slots_list, with a
|
||||
// synchronization delay for any in-progress pops from the _free_slots_list
|
||||
// to solve ABA here.
|
||||
bool try_transfer_pending();
|
||||
|
||||
uint num_free_elems() const;
|
||||
uint num_free_slots() const;
|
||||
|
||||
public:
|
||||
G1CardSetAllocator(const char* name,
|
||||
const G1CardSetAllocOptions* buffer_options,
|
||||
G1CardSetBufferList* free_buffer_list);
|
||||
const G1CardSetAllocOptions* alloc_options,
|
||||
G1CardSetFreeList* free_segment_list);
|
||||
~G1CardSetAllocator();
|
||||
|
||||
Elem* allocate();
|
||||
void free(Elem* elem);
|
||||
Slot* allocate();
|
||||
void free(Slot* slot);
|
||||
|
||||
// Deallocate all buffers to the free buffer list and reset this allocator. Must
|
||||
// Deallocate all segments to the free segment list and reset this allocator. Must
|
||||
// be called in a globally synchronized area.
|
||||
void drop_all();
|
||||
|
||||
size_t mem_size() const {
|
||||
return sizeof(*this) +
|
||||
_segmented_array.num_buffers() * sizeof(G1CardSetBuffer) + _segmented_array.num_available_nodes() * _segmented_array.elem_size();
|
||||
_segmented_array.num_segments() * sizeof(G1CardSetSegment) + _segmented_array.num_available_slots() *
|
||||
_segmented_array.slot_size();
|
||||
}
|
||||
|
||||
size_t wasted_mem_size() const {
|
||||
return (_segmented_array.num_available_nodes() - (_segmented_array.num_allocated_nodes() - _num_pending_nodes)) * _segmented_array.elem_size();
|
||||
return (_segmented_array.num_available_slots() - (_segmented_array.num_allocated_slots() - _num_pending_slots)) *
|
||||
_segmented_array.slot_size();
|
||||
}
|
||||
|
||||
inline uint num_buffers() { return _segmented_array.num_buffers(); }
|
||||
inline uint num_segments() { return _segmented_array.num_segments(); }
|
||||
|
||||
void print(outputStream* os);
|
||||
};
|
||||
|
@ -33,32 +33,32 @@
|
||||
#include "gc/g1/g1CardSetContainers.inline.hpp"
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
|
||||
template <class Elem>
|
||||
G1CardSetContainer* volatile* G1CardSetAllocator<Elem>::next_ptr(G1CardSetContainer& node) {
|
||||
return node.next_addr();
|
||||
template <class Slot>
|
||||
G1CardSetContainer* volatile* G1CardSetAllocator<Slot>::next_ptr(G1CardSetContainer& slot) {
|
||||
return slot.next_addr();
|
||||
}
|
||||
|
||||
template <class Elem>
|
||||
Elem* G1CardSetAllocator<Elem>::allocate() {
|
||||
assert(_segmented_array.elem_size() > 0, "instance size not set.");
|
||||
template <class Slot>
|
||||
Slot* G1CardSetAllocator<Slot>::allocate() {
|
||||
assert(_segmented_array.slot_size() > 0, "instance size not set.");
|
||||
|
||||
if (num_free_elems() > 0) {
|
||||
if (num_free_slots() > 0) {
|
||||
// Pop under critical section to deal with ABA problem
|
||||
// Other solutions to the same problem are more complicated (ref counting, HP)
|
||||
GlobalCounter::CriticalSection cs(Thread::current());
|
||||
|
||||
G1CardSetContainer* node = _free_nodes_list.pop();
|
||||
if (node != nullptr) {
|
||||
Elem* elem = reinterpret_cast<Elem*>(reinterpret_cast<char*>(node));
|
||||
Atomic::sub(&_num_free_nodes, 1u);
|
||||
guarantee(is_aligned(elem, 8), "result " PTR_FORMAT " not aligned", p2i(elem));
|
||||
return elem;
|
||||
G1CardSetContainer* container = _free_slots_list.pop();
|
||||
if (container != nullptr) {
|
||||
Slot* slot = reinterpret_cast<Slot*>(reinterpret_cast<char*>(container));
|
||||
Atomic::sub(&_num_free_slots, 1u);
|
||||
guarantee(is_aligned(slot, 8), "result " PTR_FORMAT " not aligned", p2i(slot));
|
||||
return slot;
|
||||
}
|
||||
}
|
||||
|
||||
Elem* elem = _segmented_array.allocate();
|
||||
assert(elem != nullptr, "must be");
|
||||
return elem;
|
||||
Slot* slot = _segmented_array.allocate();
|
||||
assert(slot != nullptr, "must be");
|
||||
return slot;
|
||||
}
|
||||
|
||||
inline uint8_t* G1CardSetMemoryManager::allocate(uint type) {
|
||||
@ -74,9 +74,9 @@ inline void G1CardSetMemoryManager::free_node(void* value) {
|
||||
free(0, value);
|
||||
}
|
||||
|
||||
template <class Elem>
|
||||
inline uint G1CardSetAllocator<Elem>::num_free_elems() const {
|
||||
return Atomic::load(&_num_free_nodes);
|
||||
template <class Slot>
|
||||
inline uint G1CardSetAllocator<Slot>::num_free_slots() const {
|
||||
return Atomic::load(&_num_free_slots);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_G1CARDSETMEMORY_INLINE_HPP
|
||||
|
@ -31,9 +31,9 @@
|
||||
|
||||
|
||||
const G1SegmentedArrayAllocOptions G1EvacFailureObjectsSet::_alloc_options =
|
||||
G1SegmentedArrayAllocOptions((uint)sizeof(OffsetInRegion), BufferLength, UINT_MAX, Alignment);
|
||||
G1SegmentedArrayAllocOptions((uint)sizeof(OffsetInRegion), SegmentLength, UINT_MAX, Alignment);
|
||||
|
||||
G1SegmentedArrayBufferList<mtGC> G1EvacFailureObjectsSet::_free_buffer_list;
|
||||
G1SegmentedArrayFreeList<mtGC> G1EvacFailureObjectsSet::_free_segment_list;
|
||||
|
||||
#ifdef ASSERT
|
||||
void G1EvacFailureObjectsSet::assert_is_valid_offset(size_t offset) const {
|
||||
@ -57,7 +57,7 @@ G1EvacFailureObjectsSet::OffsetInRegion G1EvacFailureObjectsSet::to_offset(oop o
|
||||
G1EvacFailureObjectsSet::G1EvacFailureObjectsSet(uint region_idx, HeapWord* bottom) :
|
||||
DEBUG_ONLY(_region_idx(region_idx) COMMA)
|
||||
_bottom(bottom),
|
||||
_offsets(&_alloc_options, &_free_buffer_list) {
|
||||
_offsets(&_alloc_options, &_free_segment_list) {
|
||||
assert(HeapRegion::LogOfHRGrainBytes < 32, "must be");
|
||||
}
|
||||
|
||||
@ -76,7 +76,7 @@ class G1EvacFailureObjectsIterationHelper {
|
||||
}
|
||||
|
||||
void join_and_sort() {
|
||||
_segments->iterate_nodes(*this);
|
||||
_segments->iterate_segments(*this);
|
||||
|
||||
QuickSort::sort(_offset_array, _array_length, order_oop, true);
|
||||
}
|
||||
@ -96,7 +96,7 @@ public:
|
||||
_array_length(0) { }
|
||||
|
||||
void process_and_drop(ObjectClosure* closure) {
|
||||
uint num = _segments->num_allocated_nodes();
|
||||
uint num = _segments->num_allocated_slots();
|
||||
_offset_array = NEW_C_HEAP_ARRAY(OffsetInRegion, num, mtGC);
|
||||
|
||||
join_and_sort();
|
||||
@ -106,9 +106,9 @@ public:
|
||||
FREE_C_HEAP_ARRAY(OffsetInRegion, _offset_array);
|
||||
}
|
||||
|
||||
// Callback of G1SegmentedArray::iterate_nodes
|
||||
void do_buffer(G1SegmentedArrayBuffer<mtGC>* node, uint length) {
|
||||
node->copy_to(&_offset_array[_array_length]);
|
||||
// Callback of G1SegmentedArray::iterate_segments
|
||||
void do_segment(G1SegmentedArraySegment<mtGC>* segment, uint length) {
|
||||
segment->copy_to(&_offset_array[_array_length]);
|
||||
_array_length += length;
|
||||
}
|
||||
};
|
||||
|
@ -33,7 +33,7 @@ class G1EvacFailureObjectsIterationHelper;
|
||||
|
||||
// This class collects addresses of objects that failed evacuation in a specific
|
||||
// heap region.
|
||||
// Provides sorted iteration of these elements for processing during the remove
|
||||
// Provides sorted iteration of these objects for processing during the remove
|
||||
// self forwards phase.
|
||||
class G1EvacFailureObjectsSet {
|
||||
friend class G1EvacFailureObjectsIterationHelper;
|
||||
@ -45,13 +45,13 @@ public:
|
||||
typedef uint OffsetInRegion;
|
||||
|
||||
private:
|
||||
static const uint BufferLength = 256;
|
||||
static const uint SegmentLength = 256;
|
||||
static const uint Alignment = 4;
|
||||
|
||||
static const G1SegmentedArrayAllocOptions _alloc_options;
|
||||
|
||||
// This free list is shared among evacuation failure process in all regions.
|
||||
static G1SegmentedArrayBufferList<mtGC> _free_buffer_list;
|
||||
static G1SegmentedArrayFreeList<mtGC> _free_segment_list;
|
||||
|
||||
DEBUG_ONLY(const uint _region_idx;)
|
||||
|
||||
|
@ -319,7 +319,7 @@ public:
|
||||
}
|
||||
|
||||
out->print_cr(" Region with largest amount of code roots = " HR_FORMAT ", "
|
||||
"size = " SIZE_FORMAT "%s, num_elems = " SIZE_FORMAT ".",
|
||||
"size = " SIZE_FORMAT "%s, num_slots = " SIZE_FORMAT ".",
|
||||
HR_FORMAT_PARAMS(max_code_root_mem_sz_region()),
|
||||
byte_size_in_proper_unit(max_code_root_rem_set->strong_code_roots_mem_size()),
|
||||
proper_unit_for_byte_size(max_code_root_rem_set->strong_code_roots_mem_size()),
|
||||
|
@ -29,147 +29,147 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/lockFreeStack.hpp"
|
||||
|
||||
// A single buffer/arena containing _num_elems blocks of memory of _elem_size.
|
||||
// G1SegmentedArrayBuffers can be linked together using a singly linked list.
|
||||
// A single segment/arena containing _num_slots blocks of memory of _slot_size.
|
||||
// G1SegmentedArraySegments can be linked together using a singly linked list.
|
||||
template<MEMFLAGS flag>
|
||||
class G1SegmentedArrayBuffer : public CHeapObj<flag> {
|
||||
const uint _elem_size;
|
||||
const uint _num_elems;
|
||||
class G1SegmentedArraySegment : public CHeapObj<flag> {
|
||||
const uint _slot_size;
|
||||
const uint _num_slots;
|
||||
|
||||
G1SegmentedArrayBuffer* volatile _next;
|
||||
G1SegmentedArraySegment* volatile _next;
|
||||
|
||||
char* _buffer; // Actual data.
|
||||
char* _segment; // Actual data.
|
||||
|
||||
// Index into the next free block to allocate into. Full if equal (or larger)
|
||||
// to _num_elems (can be larger because we atomically increment this value and
|
||||
// Index into the next free slot to allocate into. Full if equal (or larger)
|
||||
// to _num_slots (can be larger because we atomically increment this value and
|
||||
// check only afterwards if the allocation has been successful).
|
||||
uint volatile _next_allocate;
|
||||
|
||||
public:
|
||||
G1SegmentedArrayBuffer(uint elem_size, uint num_elems, G1SegmentedArrayBuffer* next);
|
||||
~G1SegmentedArrayBuffer();
|
||||
G1SegmentedArraySegment(uint slot_size, uint num_slots, G1SegmentedArraySegment* next);
|
||||
~G1SegmentedArraySegment();
|
||||
|
||||
G1SegmentedArrayBuffer* volatile* next_addr() { return &_next; }
|
||||
G1SegmentedArraySegment* volatile* next_addr() { return &_next; }
|
||||
|
||||
void* get_new_buffer_elem();
|
||||
void* get_new_slot();
|
||||
|
||||
uint num_elems() const { return _num_elems; }
|
||||
uint num_slots() const { return _num_slots; }
|
||||
|
||||
G1SegmentedArrayBuffer* next() const { return _next; }
|
||||
G1SegmentedArraySegment* next() const { return _next; }
|
||||
|
||||
void set_next(G1SegmentedArrayBuffer* next) {
|
||||
void set_next(G1SegmentedArraySegment* next) {
|
||||
assert(next != this, " loop condition");
|
||||
_next = next;
|
||||
}
|
||||
|
||||
void reset(G1SegmentedArrayBuffer* next) {
|
||||
void reset(G1SegmentedArraySegment* next) {
|
||||
_next_allocate = 0;
|
||||
assert(next != this, " loop condition");
|
||||
set_next(next);
|
||||
memset((void*)_buffer, 0, (size_t)_num_elems * _elem_size);
|
||||
memset((void*)_segment, 0, (size_t)_num_slots * _slot_size);
|
||||
}
|
||||
|
||||
uint elem_size() const { return _elem_size; }
|
||||
uint slot_size() const { return _slot_size; }
|
||||
|
||||
size_t mem_size() const { return sizeof(*this) + (size_t)_num_elems * _elem_size; }
|
||||
size_t mem_size() const { return sizeof(*this) + (size_t)_num_slots * _slot_size; }
|
||||
|
||||
uint length() const {
|
||||
// _next_allocate might grow larger than _num_elems in multi-thread environments
|
||||
// _next_allocate might grow larger than _num_slots in multi-thread environments
|
||||
// due to races.
|
||||
return MIN2(_next_allocate, _num_elems);
|
||||
return MIN2(_next_allocate, _num_slots);
|
||||
}
|
||||
|
||||
// Copies the (valid) contents of this buffer into the destination.
|
||||
// Copies the (valid) contents of this segment into the destination.
|
||||
void copy_to(void* dest) const {
|
||||
::memcpy(dest, _buffer, length() * _elem_size);
|
||||
::memcpy(dest, _segment, length() * _slot_size);
|
||||
}
|
||||
|
||||
bool is_full() const { return _next_allocate >= _num_elems; }
|
||||
bool is_full() const { return _next_allocate >= _num_slots; }
|
||||
};
|
||||
|
||||
// Set of (free) G1SegmentedArrayBuffers. The assumed usage is that allocation
|
||||
// to it and removal of elements is strictly separate, but every action may be
|
||||
// Set of (free) G1SegmentedArraySegments. The assumed usage is that allocation
|
||||
// to it and removal of segments is strictly separate, but every action may be
|
||||
// performed by multiple threads at the same time.
|
||||
// Counts and memory usage are current on a best-effort basis if accessed concurrently.
|
||||
template<MEMFLAGS flag>
|
||||
class G1SegmentedArrayBufferList {
|
||||
static G1SegmentedArrayBuffer<flag>* volatile* next_ptr(G1SegmentedArrayBuffer<flag>& node) {
|
||||
return node.next_addr();
|
||||
class G1SegmentedArrayFreeList {
|
||||
static G1SegmentedArraySegment<flag>* volatile* next_ptr(G1SegmentedArraySegment<flag>& segment) {
|
||||
return segment.next_addr();
|
||||
}
|
||||
typedef LockFreeStack<G1SegmentedArrayBuffer<flag>, &G1SegmentedArrayBufferList::next_ptr> NodeStack;
|
||||
typedef LockFreeStack<G1SegmentedArraySegment<flag>, &G1SegmentedArrayFreeList::next_ptr> SegmentStack;
|
||||
|
||||
NodeStack _list;
|
||||
SegmentStack _list;
|
||||
|
||||
volatile size_t _num_buffers;
|
||||
volatile size_t _num_segments;
|
||||
volatile size_t _mem_size;
|
||||
|
||||
public:
|
||||
G1SegmentedArrayBufferList() : _list(), _num_buffers(0), _mem_size(0) { }
|
||||
~G1SegmentedArrayBufferList() { free_all(); }
|
||||
G1SegmentedArrayFreeList() : _list(), _num_segments(0), _mem_size(0) { }
|
||||
~G1SegmentedArrayFreeList() { free_all(); }
|
||||
|
||||
void bulk_add(G1SegmentedArrayBuffer<flag>& first, G1SegmentedArrayBuffer<flag>& last, size_t num, size_t mem_size);
|
||||
void bulk_add(G1SegmentedArraySegment<flag>& first, G1SegmentedArraySegment<flag>& last, size_t num, size_t mem_size);
|
||||
|
||||
G1SegmentedArrayBuffer<flag>* get();
|
||||
G1SegmentedArrayBuffer<flag>* get_all(size_t& num_buffers, size_t& mem_size);
|
||||
G1SegmentedArraySegment<flag>* get();
|
||||
G1SegmentedArraySegment<flag>* get_all(size_t& num_segments, size_t& mem_size);
|
||||
|
||||
// Give back all memory to the OS.
|
||||
void free_all();
|
||||
|
||||
void print_on(outputStream* out, const char* prefix = "");
|
||||
|
||||
size_t num_buffers() const { return Atomic::load(&_num_buffers); }
|
||||
size_t num_segments() const { return Atomic::load(&_num_segments); }
|
||||
size_t mem_size() const { return Atomic::load(&_mem_size); }
|
||||
};
|
||||
|
||||
// Configuration for G1SegmentedArray, e.g element size, element number of next G1SegmentedArrayBuffer.
|
||||
// Configuration for G1SegmentedArray, e.g slot size, slot number of next G1SegmentedArraySegment.
|
||||
class G1SegmentedArrayAllocOptions {
|
||||
|
||||
protected:
|
||||
const uint _elem_size;
|
||||
const uint _initial_num_elems;
|
||||
// Defines a limit to the number of elements in the buffer
|
||||
const uint _max_num_elems;
|
||||
const uint _alignment;
|
||||
const uint _slot_size;
|
||||
const uint _initial_num_slots;
|
||||
// Defines a limit to the number of slots in the segment
|
||||
const uint _max_num_slots;
|
||||
const uint _slot_alignment;
|
||||
|
||||
public:
|
||||
G1SegmentedArrayAllocOptions(uint elem_size, uint initial_num_elems, uint max_num_elems, uint alignment) :
|
||||
_elem_size(elem_size),
|
||||
_initial_num_elems(initial_num_elems),
|
||||
_max_num_elems(max_num_elems),
|
||||
_alignment(alignment) {
|
||||
assert(_elem_size > 0, "Must be");
|
||||
assert(_initial_num_elems > 0, "Must be");
|
||||
assert(_max_num_elems > 0, "Must be");
|
||||
assert(_alignment > 0, "Must be");
|
||||
G1SegmentedArrayAllocOptions(uint slot_size, uint initial_num_slots, uint max_num_slots, uint alignment) :
|
||||
_slot_size(slot_size),
|
||||
_initial_num_slots(initial_num_slots),
|
||||
_max_num_slots(max_num_slots),
|
||||
_slot_alignment(alignment) {
|
||||
assert(_slot_size > 0, "Must be");
|
||||
assert(_initial_num_slots > 0, "Must be");
|
||||
assert(_max_num_slots > 0, "Must be");
|
||||
assert(_slot_alignment > 0, "Must be");
|
||||
}
|
||||
|
||||
virtual uint next_num_elems(uint prev_num_elems) const {
|
||||
return _initial_num_elems;
|
||||
virtual uint next_num_slots(uint prev_num_slots) const {
|
||||
return _initial_num_slots;
|
||||
}
|
||||
|
||||
uint elem_size() const { return _elem_size; }
|
||||
uint slot_size() const { return _slot_size; }
|
||||
|
||||
uint alignment() const { return _alignment; }
|
||||
uint slot_alignment() const { return _slot_alignment; }
|
||||
};
|
||||
|
||||
// A segmented array where G1SegmentedArrayBuffer is the segment, and
|
||||
// G1SegmentedArrayBufferList is the free list to cache G1SegmentedArrayBuffer,
|
||||
// A segmented array where G1SegmentedArraySegment is the segment, and
|
||||
// G1SegmentedArrayFreeList is the free list to cache G1SegmentedArraySegments,
|
||||
// and G1SegmentedArrayAllocOptions is the configuration for G1SegmentedArray
|
||||
// attributes.
|
||||
//
|
||||
// Implementation details as below:
|
||||
//
|
||||
// Arena-like allocator for (card set, or ...) heap memory objects (Elem elements).
|
||||
// Arena-like allocator for (card set, or ...) heap memory objects (Slot slots).
|
||||
//
|
||||
// Actual allocation from the C heap occurs on G1SegmentedArrayBuffer basis, i.e. segments
|
||||
// of elements. The assumed allocation pattern for these G1SegmentedArrayBuffer elements
|
||||
// Actual allocation from the C heap occurs on G1SegmentedArraySegment basis, i.e. segments
|
||||
// of slots. The assumed allocation pattern for these G1SegmentedArraySegment slots
|
||||
// is assumed to be strictly two-phased:
|
||||
//
|
||||
// - in the first phase, G1SegmentedArrayBuffers are allocated from the C heap (or a free
|
||||
// - in the first phase, G1SegmentedArraySegments are allocated from the C heap (or a free
|
||||
// list given at initialization time). This allocation may occur in parallel. This
|
||||
// typically corresponds to a single mutator phase, but may extend over multiple.
|
||||
//
|
||||
// - in the second phase, G1SegmentedArrayBuffers are given back in bulk to the free list.
|
||||
// - in the second phase, G1SegmentedArraySegments are given back in bulk to the free list.
|
||||
// This is typically done during a GC pause.
|
||||
//
|
||||
// Some third party is responsible for giving back memory from the free list to
|
||||
@ -180,54 +180,54 @@ public:
|
||||
// The class also manages a few counters for statistics using atomic operations.
|
||||
// Their values are only consistent within each other with extra global
|
||||
// synchronization.
|
||||
template <class Elem, MEMFLAGS flag>
|
||||
template <class Slot, MEMFLAGS flag>
|
||||
class G1SegmentedArray {
|
||||
// G1SegmentedArrayAllocOptions provides parameters for allocation buffer
|
||||
// G1SegmentedArrayAllocOptions provides parameters for allocation segment
|
||||
// sizing and expansion.
|
||||
const G1SegmentedArrayAllocOptions* _alloc_options;
|
||||
|
||||
G1SegmentedArrayBuffer<flag>* volatile _first; // The (start of the) list of all buffers.
|
||||
G1SegmentedArrayBuffer<flag>* _last; // The last element of the list of all buffers.
|
||||
volatile uint _num_buffers; // Number of assigned buffers to this allocator.
|
||||
volatile size_t _mem_size; // Memory used by all buffers.
|
||||
G1SegmentedArraySegment<flag>* volatile _first; // The (start of the) list of all segments.
|
||||
G1SegmentedArraySegment<flag>* _last; // The last segment of the list of all segments.
|
||||
volatile uint _num_segments; // Number of assigned segments to this allocator.
|
||||
volatile size_t _mem_size; // Memory used by all segments.
|
||||
|
||||
G1SegmentedArrayBufferList<flag>* _free_buffer_list; // The global free buffer list to
|
||||
// preferentially get new buffers from.
|
||||
G1SegmentedArrayFreeList<flag>* _free_segment_list; // The global free segment list to
|
||||
// preferentially get new segments from.
|
||||
|
||||
volatile uint _num_available_nodes; // Number of nodes available in all buffers (allocated + free + pending + not yet used).
|
||||
volatile uint _num_allocated_nodes; // Number of total nodes allocated and in use.
|
||||
volatile uint _num_available_slots; // Number of slots available in all segments (allocated + free + pending + not yet used).
|
||||
volatile uint _num_allocated_slots; // Number of total slots allocated and in use.
|
||||
|
||||
private:
|
||||
inline G1SegmentedArrayBuffer<flag>* create_new_buffer(G1SegmentedArrayBuffer<flag>* const prev);
|
||||
inline G1SegmentedArraySegment<flag>* create_new_segment(G1SegmentedArraySegment<flag>* const prev);
|
||||
|
||||
DEBUG_ONLY(uint calculate_length() const;)
|
||||
|
||||
public:
|
||||
const G1SegmentedArrayBuffer<flag>* first_array_buffer() const { return Atomic::load(&_first); }
|
||||
const G1SegmentedArraySegment<flag>* first_array_segment() const { return Atomic::load(&_first); }
|
||||
|
||||
uint num_available_nodes() const { return Atomic::load(&_num_available_nodes); }
|
||||
uint num_allocated_nodes() const {
|
||||
uint allocated = Atomic::load(&_num_allocated_nodes);
|
||||
uint num_available_slots() const { return Atomic::load(&_num_available_slots); }
|
||||
uint num_allocated_slots() const {
|
||||
uint allocated = Atomic::load(&_num_allocated_slots);
|
||||
assert(calculate_length() == allocated, "Must be");
|
||||
return allocated;
|
||||
}
|
||||
|
||||
inline uint elem_size() const;
|
||||
inline uint slot_size() const;
|
||||
|
||||
G1SegmentedArray(const G1SegmentedArrayAllocOptions* buffer_options,
|
||||
G1SegmentedArrayBufferList<flag>* free_buffer_list);
|
||||
G1SegmentedArray(const G1SegmentedArrayAllocOptions* alloc_options,
|
||||
G1SegmentedArrayFreeList<flag>* free_segment_list);
|
||||
~G1SegmentedArray();
|
||||
|
||||
// Deallocate all buffers to the free buffer list and reset this allocator. Must
|
||||
// Deallocate all segments to the free segment list and reset this allocator. Must
|
||||
// be called in a globally synchronized area.
|
||||
void drop_all();
|
||||
|
||||
inline Elem* allocate();
|
||||
inline Slot* allocate();
|
||||
|
||||
inline uint num_buffers() const;
|
||||
inline uint num_segments() const;
|
||||
|
||||
template<typename BufferClosure>
|
||||
void iterate_nodes(BufferClosure& closure) const;
|
||||
template<typename SegmentClosure>
|
||||
void iterate_segments(SegmentClosure& closure) const;
|
||||
};
|
||||
|
||||
#endif //SHARE_GC_G1_G1SEGMENTEDARRAY_HPP
|
||||
|
@ -31,79 +31,79 @@
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
|
||||
template<MEMFLAGS flag>
|
||||
G1SegmentedArrayBuffer<flag>::G1SegmentedArrayBuffer(uint elem_size, uint num_instances, G1SegmentedArrayBuffer* next) :
|
||||
_elem_size(elem_size), _num_elems(num_instances), _next(next), _next_allocate(0) {
|
||||
G1SegmentedArraySegment<flag>::G1SegmentedArraySegment(uint slot_size, uint num_slots, G1SegmentedArraySegment* next) :
|
||||
_slot_size(slot_size), _num_slots(num_slots), _next(next), _next_allocate(0) {
|
||||
|
||||
_buffer = NEW_C_HEAP_ARRAY(char, (size_t)_num_elems * elem_size, mtGCCardSet);
|
||||
_segment = NEW_C_HEAP_ARRAY(char, (size_t)_num_slots * slot_size, mtGCCardSet);
|
||||
}
|
||||
|
||||
template<MEMFLAGS flag>
|
||||
G1SegmentedArrayBuffer<flag>::~G1SegmentedArrayBuffer() {
|
||||
FREE_C_HEAP_ARRAY(mtGCCardSet, _buffer);
|
||||
G1SegmentedArraySegment<flag>::~G1SegmentedArraySegment() {
|
||||
FREE_C_HEAP_ARRAY(mtGCCardSet, _segment);
|
||||
}
|
||||
|
||||
template<MEMFLAGS flag>
|
||||
void* G1SegmentedArrayBuffer<flag>::get_new_buffer_elem() {
|
||||
if (_next_allocate >= _num_elems) {
|
||||
void* G1SegmentedArraySegment<flag>::get_new_slot() {
|
||||
if (_next_allocate >= _num_slots) {
|
||||
return nullptr;
|
||||
}
|
||||
uint result = Atomic::fetch_and_add(&_next_allocate, 1u, memory_order_relaxed);
|
||||
if (result >= _num_elems) {
|
||||
if (result >= _num_slots) {
|
||||
return nullptr;
|
||||
}
|
||||
void* r = _buffer + (uint)result * _elem_size;
|
||||
void* r = _segment + (uint)result * _slot_size;
|
||||
return r;
|
||||
}
|
||||
|
||||
template<MEMFLAGS flag>
|
||||
void G1SegmentedArrayBufferList<flag>::bulk_add(G1SegmentedArrayBuffer<flag>& first,
|
||||
G1SegmentedArrayBuffer<flag>& last,
|
||||
size_t num,
|
||||
size_t mem_size) {
|
||||
void G1SegmentedArrayFreeList<flag>::bulk_add(G1SegmentedArraySegment<flag>& first,
|
||||
G1SegmentedArraySegment<flag>& last,
|
||||
size_t num,
|
||||
size_t mem_size) {
|
||||
_list.prepend(first, last);
|
||||
Atomic::add(&_num_buffers, num, memory_order_relaxed);
|
||||
Atomic::add(&_num_segments, num, memory_order_relaxed);
|
||||
Atomic::add(&_mem_size, mem_size, memory_order_relaxed);
|
||||
}
|
||||
|
||||
template<MEMFLAGS flag>
|
||||
void G1SegmentedArrayBufferList<flag>::print_on(outputStream* out, const char* prefix) {
|
||||
out->print_cr("%s: buffers %zu size %zu",
|
||||
prefix, Atomic::load(&_num_buffers), Atomic::load(&_mem_size));
|
||||
void G1SegmentedArrayFreeList<flag>::print_on(outputStream* out, const char* prefix) {
|
||||
out->print_cr("%s: segments %zu size %zu",
|
||||
prefix, Atomic::load(&_num_segments), Atomic::load(&_mem_size));
|
||||
}
|
||||
|
||||
template<MEMFLAGS flag>
|
||||
G1SegmentedArrayBuffer<flag>* G1SegmentedArrayBufferList<flag>::get() {
|
||||
G1SegmentedArraySegment<flag>* G1SegmentedArrayFreeList<flag>::get() {
|
||||
GlobalCounter::CriticalSection cs(Thread::current());
|
||||
|
||||
G1SegmentedArrayBuffer<flag>* result = _list.pop();
|
||||
G1SegmentedArraySegment<flag>* result = _list.pop();
|
||||
if (result != nullptr) {
|
||||
Atomic::dec(&_num_buffers, memory_order_relaxed);
|
||||
Atomic::dec(&_num_segments, memory_order_relaxed);
|
||||
Atomic::sub(&_mem_size, result->mem_size(), memory_order_relaxed);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template<MEMFLAGS flag>
|
||||
G1SegmentedArrayBuffer<flag>* G1SegmentedArrayBufferList<flag>::get_all(size_t& num_buffers,
|
||||
size_t& mem_size) {
|
||||
G1SegmentedArraySegment<flag>* G1SegmentedArrayFreeList<flag>::get_all(size_t& num_segments,
|
||||
size_t& mem_size) {
|
||||
GlobalCounter::CriticalSection cs(Thread::current());
|
||||
|
||||
G1SegmentedArrayBuffer<flag>* result = _list.pop_all();
|
||||
num_buffers = Atomic::load(&_num_buffers);
|
||||
G1SegmentedArraySegment<flag>* result = _list.pop_all();
|
||||
num_segments = Atomic::load(&_num_segments);
|
||||
mem_size = Atomic::load(&_mem_size);
|
||||
|
||||
if (result != nullptr) {
|
||||
Atomic::sub(&_num_buffers, num_buffers, memory_order_relaxed);
|
||||
Atomic::sub(&_num_segments, num_segments, memory_order_relaxed);
|
||||
Atomic::sub(&_mem_size, mem_size, memory_order_relaxed);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template<MEMFLAGS flag>
|
||||
void G1SegmentedArrayBufferList<flag>::free_all() {
|
||||
void G1SegmentedArrayFreeList<flag>::free_all() {
|
||||
size_t num_freed = 0;
|
||||
size_t mem_size_freed = 0;
|
||||
G1SegmentedArrayBuffer<flag>* cur;
|
||||
G1SegmentedArraySegment<flag>* cur;
|
||||
|
||||
while ((cur = _list.pop()) != nullptr) {
|
||||
mem_size_freed += cur->mem_size();
|
||||
@ -111,130 +111,130 @@ void G1SegmentedArrayBufferList<flag>::free_all() {
|
||||
delete cur;
|
||||
}
|
||||
|
||||
Atomic::sub(&_num_buffers, num_freed, memory_order_relaxed);
|
||||
Atomic::sub(&_num_segments, num_freed, memory_order_relaxed);
|
||||
Atomic::sub(&_mem_size, mem_size_freed, memory_order_relaxed);
|
||||
}
|
||||
|
||||
template <class Elem, MEMFLAGS flag>
|
||||
G1SegmentedArrayBuffer<flag>* G1SegmentedArray<Elem, flag>::create_new_buffer(G1SegmentedArrayBuffer<flag>* const prev) {
|
||||
// Take an existing buffer if available.
|
||||
G1SegmentedArrayBuffer<flag>* next = _free_buffer_list->get();
|
||||
template <class Slot, MEMFLAGS flag>
|
||||
G1SegmentedArraySegment<flag>* G1SegmentedArray<Slot, flag>::create_new_segment(G1SegmentedArraySegment<flag>* const prev) {
|
||||
// Take an existing segment if available.
|
||||
G1SegmentedArraySegment<flag>* next = _free_segment_list->get();
|
||||
if (next == nullptr) {
|
||||
uint prev_num_elems = (prev != nullptr) ? prev->num_elems() : 0;
|
||||
uint num_elems = _alloc_options->next_num_elems(prev_num_elems);
|
||||
next = new G1SegmentedArrayBuffer<flag>(elem_size(), num_elems, prev);
|
||||
uint prev_num_slots = (prev != nullptr) ? prev->num_slots() : 0;
|
||||
uint num_slots = _alloc_options->next_num_slots(prev_num_slots);
|
||||
next = new G1SegmentedArraySegment<flag>(slot_size(), num_slots, prev);
|
||||
} else {
|
||||
assert(elem_size() == next->elem_size() ,
|
||||
"Mismatch %d != %d Elem %zu", elem_size(), next->elem_size(), sizeof(Elem));
|
||||
assert(slot_size() == next->slot_size() ,
|
||||
"Mismatch %d != %d Slot %zu", slot_size(), next->slot_size(), sizeof(Slot));
|
||||
next->reset(prev);
|
||||
}
|
||||
|
||||
// Install it as current allocation buffer.
|
||||
G1SegmentedArrayBuffer<flag>* old = Atomic::cmpxchg(&_first, prev, next);
|
||||
// Install it as current allocation segment.
|
||||
G1SegmentedArraySegment<flag>* old = Atomic::cmpxchg(&_first, prev, next);
|
||||
if (old != prev) {
|
||||
// Somebody else installed the buffer, use that one.
|
||||
// Somebody else installed the segment, use that one.
|
||||
delete next;
|
||||
return old;
|
||||
} else {
|
||||
// Did we install the first element in the list? If so, this is also the last.
|
||||
// Did we install the first segment in the list? If so, this is also the last.
|
||||
if (prev == nullptr) {
|
||||
_last = next;
|
||||
}
|
||||
// Successfully installed the buffer into the list.
|
||||
Atomic::inc(&_num_buffers, memory_order_relaxed);
|
||||
// Successfully installed the segment into the list.
|
||||
Atomic::inc(&_num_segments, memory_order_relaxed);
|
||||
Atomic::add(&_mem_size, next->mem_size(), memory_order_relaxed);
|
||||
Atomic::add(&_num_available_nodes, next->num_elems(), memory_order_relaxed);
|
||||
Atomic::add(&_num_available_slots, next->num_slots(), memory_order_relaxed);
|
||||
return next;
|
||||
}
|
||||
}
|
||||
|
||||
template <class Elem, MEMFLAGS flag>
|
||||
uint G1SegmentedArray<Elem, flag>::elem_size() const {
|
||||
return _alloc_options->elem_size();
|
||||
template <class Slot, MEMFLAGS flag>
|
||||
uint G1SegmentedArray<Slot, flag>::slot_size() const {
|
||||
return _alloc_options->slot_size();
|
||||
}
|
||||
|
||||
template <class Elem, MEMFLAGS flag>
|
||||
G1SegmentedArray<Elem, flag>::G1SegmentedArray(const G1SegmentedArrayAllocOptions* buffer_options,
|
||||
G1SegmentedArrayBufferList<flag>* free_buffer_list) :
|
||||
_alloc_options(buffer_options),
|
||||
template <class Slot, MEMFLAGS flag>
|
||||
G1SegmentedArray<Slot, flag>::G1SegmentedArray(const G1SegmentedArrayAllocOptions* alloc_options,
|
||||
G1SegmentedArrayFreeList<flag>* free_segment_list) :
|
||||
_alloc_options(alloc_options),
|
||||
_first(nullptr),
|
||||
_last(nullptr),
|
||||
_num_buffers(0),
|
||||
_num_segments(0),
|
||||
_mem_size(0),
|
||||
_free_buffer_list(free_buffer_list),
|
||||
_num_available_nodes(0),
|
||||
_num_allocated_nodes(0) {
|
||||
assert(_free_buffer_list != nullptr, "precondition!");
|
||||
_free_segment_list(free_segment_list),
|
||||
_num_available_slots(0),
|
||||
_num_allocated_slots(0) {
|
||||
assert(_free_segment_list != nullptr, "precondition!");
|
||||
}
|
||||
|
||||
template <class Elem, MEMFLAGS flag>
|
||||
G1SegmentedArray<Elem, flag>::~G1SegmentedArray() {
|
||||
template <class Slot, MEMFLAGS flag>
|
||||
G1SegmentedArray<Slot, flag>::~G1SegmentedArray() {
|
||||
drop_all();
|
||||
}
|
||||
|
||||
template <class Elem, MEMFLAGS flag>
|
||||
void G1SegmentedArray<Elem, flag>::drop_all() {
|
||||
G1SegmentedArrayBuffer<flag>* cur = Atomic::load_acquire(&_first);
|
||||
template <class Slot, MEMFLAGS flag>
|
||||
void G1SegmentedArray<Slot, flag>::drop_all() {
|
||||
G1SegmentedArraySegment<flag>* cur = Atomic::load_acquire(&_first);
|
||||
|
||||
if (cur != nullptr) {
|
||||
assert(_last != nullptr, "If there is at least one element, there must be a last one.");
|
||||
assert(_last != nullptr, "If there is at least one segment, there must be a last one.");
|
||||
|
||||
G1SegmentedArrayBuffer<flag>* first = cur;
|
||||
G1SegmentedArraySegment<flag>* first = cur;
|
||||
#ifdef ASSERT
|
||||
// Check list consistency.
|
||||
G1SegmentedArrayBuffer<flag>* last = cur;
|
||||
uint num_buffers = 0;
|
||||
G1SegmentedArraySegment<flag>* last = cur;
|
||||
uint num_segments = 0;
|
||||
size_t mem_size = 0;
|
||||
while (cur != nullptr) {
|
||||
mem_size += cur->mem_size();
|
||||
num_buffers++;
|
||||
num_segments++;
|
||||
|
||||
G1SegmentedArrayBuffer<flag>* next = cur->next();
|
||||
G1SegmentedArraySegment<flag>* next = cur->next();
|
||||
last = cur;
|
||||
cur = next;
|
||||
}
|
||||
#endif
|
||||
assert(num_buffers == _num_buffers, "Buffer count inconsistent %u %u", num_buffers, _num_buffers);
|
||||
assert(num_segments == _num_segments, "Segment count inconsistent %u %u", num_segments, _num_segments);
|
||||
assert(mem_size == _mem_size, "Memory size inconsistent");
|
||||
assert(last == _last, "Inconsistent last element");
|
||||
assert(last == _last, "Inconsistent last segment");
|
||||
|
||||
_free_buffer_list->bulk_add(*first, *_last, _num_buffers, _mem_size);
|
||||
_free_segment_list->bulk_add(*first, *_last, _num_segments, _mem_size);
|
||||
}
|
||||
|
||||
_first = nullptr;
|
||||
_last = nullptr;
|
||||
_num_buffers = 0;
|
||||
_num_segments = 0;
|
||||
_mem_size = 0;
|
||||
_num_available_nodes = 0;
|
||||
_num_allocated_nodes = 0;
|
||||
_num_available_slots = 0;
|
||||
_num_allocated_slots = 0;
|
||||
}
|
||||
|
||||
template <class Elem, MEMFLAGS flag>
|
||||
Elem* G1SegmentedArray<Elem, flag>::allocate() {
|
||||
assert(elem_size() > 0, "instance size not set.");
|
||||
template <class Slot, MEMFLAGS flag>
|
||||
Slot* G1SegmentedArray<Slot, flag>::allocate() {
|
||||
assert(slot_size() > 0, "instance size not set.");
|
||||
|
||||
G1SegmentedArrayBuffer<flag>* cur = Atomic::load_acquire(&_first);
|
||||
G1SegmentedArraySegment<flag>* cur = Atomic::load_acquire(&_first);
|
||||
if (cur == nullptr) {
|
||||
cur = create_new_buffer(cur);
|
||||
cur = create_new_segment(cur);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
Elem* elem = (Elem*)cur->get_new_buffer_elem();
|
||||
if (elem != nullptr) {
|
||||
Atomic::inc(&_num_allocated_nodes, memory_order_relaxed);
|
||||
guarantee(is_aligned(elem, _alloc_options->alignment()),
|
||||
"result " PTR_FORMAT " not aligned at %u", p2i(elem), _alloc_options->alignment());
|
||||
return elem;
|
||||
Slot* slot = (Slot*)cur->get_new_slot();
|
||||
if (slot != nullptr) {
|
||||
Atomic::inc(&_num_allocated_slots, memory_order_relaxed);
|
||||
guarantee(is_aligned(slot, _alloc_options->slot_alignment()),
|
||||
"result " PTR_FORMAT " not aligned at %u", p2i(slot), _alloc_options->slot_alignment());
|
||||
return slot;
|
||||
}
|
||||
// The buffer is full. Next round.
|
||||
// The segment is full. Next round.
|
||||
assert(cur->is_full(), "must be");
|
||||
cur = create_new_buffer(cur);
|
||||
cur = create_new_segment(cur);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Elem, MEMFLAGS flag>
|
||||
inline uint G1SegmentedArray<Elem, flag>::num_buffers() const {
|
||||
return Atomic::load(&_num_buffers);
|
||||
template <class Slot, MEMFLAGS flag>
|
||||
inline uint G1SegmentedArray<Slot, flag>::num_segments() const {
|
||||
return Atomic::load(&_num_segments);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -243,7 +243,7 @@ class LengthClosure {
|
||||
uint _total;
|
||||
public:
|
||||
LengthClosure() : _total(0) {}
|
||||
void do_buffer(G1SegmentedArrayBuffer<flag>* node, uint limit) {
|
||||
void do_segment(G1SegmentedArraySegment<flag>* segment, uint limit) {
|
||||
_total += limit;
|
||||
}
|
||||
uint length() const {
|
||||
@ -251,24 +251,24 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
template <class Elem, MEMFLAGS flag>
|
||||
uint G1SegmentedArray<Elem, flag>::calculate_length() const {
|
||||
template <class Slot, MEMFLAGS flag>
|
||||
uint G1SegmentedArray<Slot, flag>::calculate_length() const {
|
||||
LengthClosure<flag> closure;
|
||||
iterate_nodes(closure);
|
||||
iterate_segments(closure);
|
||||
return closure.length();
|
||||
}
|
||||
#endif
|
||||
|
||||
template <class Elem, MEMFLAGS flag>
|
||||
template <typename BufferClosure>
|
||||
void G1SegmentedArray<Elem, flag>::iterate_nodes(BufferClosure& closure) const {
|
||||
G1SegmentedArrayBuffer<flag>* cur = Atomic::load_acquire(&_first);
|
||||
template <class Slot, MEMFLAGS flag>
|
||||
template <typename SegmentClosure>
|
||||
void G1SegmentedArray<Slot, flag>::iterate_segments(SegmentClosure& closure) const {
|
||||
G1SegmentedArraySegment<flag>* cur = Atomic::load_acquire(&_first);
|
||||
|
||||
assert((cur != nullptr) == (_last != nullptr),
|
||||
"If there is at least one element, there must be a last one");
|
||||
"If there is at least one segment, there must be a last one");
|
||||
|
||||
while (cur != nullptr) {
|
||||
closure.do_buffer(cur, cur->length());
|
||||
closure.do_segment(cur, cur->length());
|
||||
cur = cur->next();
|
||||
}
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ void G1SegmentedArrayFreePool<flag>::update_unlink_processors(G1ReturnMemoryProc
|
||||
}
|
||||
|
||||
template<MEMFLAGS flag>
|
||||
void G1SegmentedArrayFreePool<flag>::G1ReturnMemoryProcessor::visit_free_list(G1SegmentedArrayBufferList<flag>* source) {
|
||||
void G1SegmentedArrayFreePool<flag>::G1ReturnMemoryProcessor::visit_free_list(G1SegmentedArrayFreeList<flag>* source) {
|
||||
assert(_source == nullptr, "already visited");
|
||||
if (_return_to_vm_size > 0) {
|
||||
_source = source;
|
||||
@ -78,13 +78,13 @@ void G1SegmentedArrayFreePool<flag>::G1ReturnMemoryProcessor::visit_free_list(G1
|
||||
template<MEMFLAGS flag>
|
||||
bool G1SegmentedArrayFreePool<flag>::G1ReturnMemoryProcessor::return_to_vm(jlong deadline) {
|
||||
assert(!finished_return_to_vm(), "already returned everything to the VM");
|
||||
assert(_first != nullptr, "must have element to return");
|
||||
assert(_first != nullptr, "must have segment to return");
|
||||
|
||||
size_t keep_size = 0;
|
||||
size_t keep_num = 0;
|
||||
|
||||
G1SegmentedArrayBuffer<flag>* cur = _first;
|
||||
G1SegmentedArrayBuffer<flag>* last = nullptr;
|
||||
G1SegmentedArraySegment<flag>* cur = _first;
|
||||
G1SegmentedArraySegment<flag>* last = nullptr;
|
||||
|
||||
while (cur != nullptr && _return_to_vm_size > 0) {
|
||||
size_t cur_size = cur->mem_size();
|
||||
@ -111,7 +111,7 @@ bool G1SegmentedArrayFreePool<flag>::G1ReturnMemoryProcessor::return_to_vm(jlong
|
||||
_source->bulk_add(*_first, *last, keep_num, keep_size);
|
||||
_first = cur;
|
||||
|
||||
log_trace(gc, task)("Segmented Array Free Memory: Returned to VM %zu buffers size %zu", keep_num, keep_size);
|
||||
log_trace(gc, task)("Segmented Array Free Memory: Returned to VM %zu segments size %zu", keep_num, keep_size);
|
||||
|
||||
// _return_to_vm_size may be larger than what is available in the list at the
|
||||
// time we actually get the list. I.e. the list and _return_to_vm_size may be
|
||||
@ -135,7 +135,7 @@ bool G1SegmentedArrayFreePool<flag>::G1ReturnMemoryProcessor::return_to_os(jlong
|
||||
size_t mem_size_deleted = 0;
|
||||
|
||||
while (_first != nullptr) {
|
||||
G1SegmentedArrayBuffer<flag>* next = _first->next();
|
||||
G1SegmentedArraySegment<flag>* next = _first->next();
|
||||
num_delete++;
|
||||
mem_size_deleted += _first->mem_size();
|
||||
delete _first;
|
||||
@ -147,7 +147,7 @@ bool G1SegmentedArrayFreePool<flag>::G1ReturnMemoryProcessor::return_to_os(jlong
|
||||
}
|
||||
}
|
||||
|
||||
log_trace(gc, task)("Segmented Array Free Memory: Return to OS %zu buffers size %zu", num_delete, mem_size_deleted);
|
||||
log_trace(gc, task)("Segmented Array Free Memory: Return to OS %zu segments size %zu", num_delete, mem_size_deleted);
|
||||
|
||||
return _first != nullptr;
|
||||
}
|
||||
@ -159,16 +159,16 @@ template<MEMFLAGS flag>
|
||||
G1SegmentedArrayFreePool<flag>::G1SegmentedArrayFreePool(uint num_free_lists) :
|
||||
_num_free_lists(num_free_lists) {
|
||||
|
||||
_free_lists = NEW_C_HEAP_ARRAY(G1SegmentedArrayBufferList<flag>, _num_free_lists, mtGC);
|
||||
_free_lists = NEW_C_HEAP_ARRAY(G1SegmentedArrayFreeList < flag >, _num_free_lists, mtGC);
|
||||
for (uint i = 0; i < _num_free_lists; i++) {
|
||||
new (&_free_lists[i]) G1SegmentedArrayBufferList<flag>();
|
||||
new (&_free_lists[i]) G1SegmentedArrayFreeList<flag>();
|
||||
}
|
||||
}
|
||||
|
||||
template<MEMFLAGS flag>
|
||||
G1SegmentedArrayFreePool<flag>::~G1SegmentedArrayFreePool() {
|
||||
for (uint i = 0; i < _num_free_lists; i++) {
|
||||
_free_lists[i].~G1SegmentedArrayBufferList<flag>();
|
||||
_free_lists[i].~G1SegmentedArrayFreeList<flag>();
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(mtGC, _free_lists);
|
||||
}
|
||||
@ -179,7 +179,7 @@ G1SegmentedArrayMemoryStats G1SegmentedArrayFreePool<flag>::memory_sizes() const
|
||||
assert(free_list_stats.num_pools() == num_free_lists(), "must be");
|
||||
for (uint i = 0; i < num_free_lists(); i++) {
|
||||
free_list_stats._num_mem_sizes[i] = _free_lists[i].mem_size();
|
||||
free_list_stats._num_segments[i] = _free_lists[i].num_buffers();
|
||||
free_list_stats._num_segments[i] = _free_lists[i].num_segments();
|
||||
}
|
||||
return free_list_stats;
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ public:
|
||||
uint num_pools() const { return G1CardSetConfiguration::num_mem_object_types(); }
|
||||
};
|
||||
|
||||
// A set of free lists holding memory buffers for use by G1SegmentedArray,
|
||||
// A set of free lists holding freed segments for use by G1SegmentedArray,
|
||||
// e.g. G1CardSetAllocators::SegmentedArray
|
||||
template<MEMFLAGS flag>
|
||||
class G1SegmentedArrayFreePool {
|
||||
@ -62,7 +62,7 @@ class G1SegmentedArrayFreePool {
|
||||
static G1SegmentedArrayFreePool _freelist_pool;
|
||||
|
||||
const uint _num_free_lists;
|
||||
G1SegmentedArrayBufferList<flag>* _free_lists;
|
||||
G1SegmentedArrayFreeList<flag>* _free_lists;
|
||||
|
||||
public:
|
||||
static G1SegmentedArrayFreePool* free_list_pool() { return &_freelist_pool; }
|
||||
@ -76,7 +76,7 @@ public:
|
||||
explicit G1SegmentedArrayFreePool(uint num_free_lists);
|
||||
~G1SegmentedArrayFreePool();
|
||||
|
||||
G1SegmentedArrayBufferList<flag>* free_list(uint i) {
|
||||
G1SegmentedArrayFreeList<flag>* free_list(uint i) {
|
||||
assert(i < _num_free_lists, "must be");
|
||||
return &_free_lists[i];
|
||||
}
|
||||
@ -90,13 +90,13 @@ public:
|
||||
};
|
||||
|
||||
// Data structure containing current in-progress state for returning memory to the
|
||||
// operating system for a single G1SegmentedArrayBufferList.
|
||||
// operating system for a single G1SegmentedArrayFreeList.
|
||||
template<MEMFLAGS flag>
|
||||
class G1SegmentedArrayFreePool<flag>::G1ReturnMemoryProcessor : public CHeapObj<mtGC> {
|
||||
G1SegmentedArrayBufferList<flag>* _source;
|
||||
G1SegmentedArrayFreeList<flag>* _source;
|
||||
size_t _return_to_vm_size;
|
||||
|
||||
G1SegmentedArrayBuffer<flag>* _first;
|
||||
G1SegmentedArraySegment<flag>* _first;
|
||||
size_t _unlinked_bytes;
|
||||
size_t _num_unlinked;
|
||||
|
||||
@ -105,23 +105,23 @@ public:
|
||||
_source(nullptr), _return_to_vm_size(return_to_vm), _first(nullptr), _unlinked_bytes(0), _num_unlinked(0) {
|
||||
}
|
||||
|
||||
// Updates the instance members about the given segmented array buffer list for
|
||||
// Updates the instance members about the given free list for
|
||||
// the purpose of giving back memory. Only necessary members are updated,
|
||||
// e.g. if there is nothing to return to the VM, do not set the source list.
|
||||
void visit_free_list(G1SegmentedArrayBufferList<flag>* source);
|
||||
void visit_free_list(G1SegmentedArrayFreeList<flag>* source);
|
||||
|
||||
bool finished_return_to_vm() const { return _return_to_vm_size == 0; }
|
||||
bool finished_return_to_os() const { return _first == nullptr; }
|
||||
|
||||
// Returns memory to the VM until the given deadline expires. Returns true if
|
||||
// there is no more work. Guarantees forward progress, i.e. at least one buffer
|
||||
// there is no more work. Guarantees forward progress, i.e. at least one segment
|
||||
// has been processed after returning.
|
||||
// return_to_vm() re-adds buffers to the respective free list.
|
||||
// return_to_vm() re-adds segments to the respective free list.
|
||||
bool return_to_vm(jlong deadline);
|
||||
// Returns memory to the VM until the given deadline expires. Returns true if
|
||||
// there is no more work. Guarantees forward progress, i.e. at least one buffer
|
||||
// there is no more work. Guarantees forward progress, i.e. at least one segment
|
||||
// has been processed after returning.
|
||||
// return_to_os() gives back buffers to the OS.
|
||||
// return_to_os() gives back segments to the OS.
|
||||
bool return_to_os(jlong deadline);
|
||||
};
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user