8309899: Rename PtrQueueSet::buffer_size()
Reviewed-by: tschatzl, ayang
This commit is contained in:
parent
931625a930
commit
181845ae46
src/hotspot/share/gc
g1
g1CardTableEntryClosure.hppg1DirtyCardQueue.cppg1DirtyCardQueue.hppg1RedirtyCardsQueue.cppg1RemSet.cppg1YoungGCPostEvacuateTasks.cppg1YoungGCPreEvacuateTasks.cpp
shared
test/hotspot/gtest/gc/shared
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,9 +39,9 @@ public:
|
||||
virtual void do_card_ptr(CardValue* card_ptr, uint worker_id) = 0;
|
||||
|
||||
// Process all the card_ptrs in node.
|
||||
void apply_to_buffer(BufferNode* node, size_t buffer_size, uint worker_id) {
|
||||
void apply_to_buffer(BufferNode* node, size_t buffer_capacity, uint worker_id) {
|
||||
void** buffer = BufferNode::make_buffer_from_node(node);
|
||||
for (size_t i = node->index(); i < buffer_size; ++i) {
|
||||
for (size_t i = node->index(); i < buffer_capacity; ++i) {
|
||||
CardValue* card_ptr = static_cast<CardValue*>(buffer[i]);
|
||||
do_card_ptr(card_ptr, worker_id);
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ uint G1DirtyCardQueueSet::num_par_ids() {
|
||||
void G1DirtyCardQueueSet::flush_queue(G1DirtyCardQueue& queue) {
|
||||
if (queue.buffer() != nullptr) {
|
||||
G1ConcurrentRefineStats* stats = queue.refinement_stats();
|
||||
stats->inc_dirtied_cards(buffer_size() - queue.index());
|
||||
stats->inc_dirtied_cards(buffer_capacity() - queue.index());
|
||||
}
|
||||
PtrQueueSet::flush_queue(queue);
|
||||
}
|
||||
@ -105,7 +105,7 @@ void G1DirtyCardQueueSet::handle_zero_index(G1DirtyCardQueue& queue) {
|
||||
BufferNode* old_node = exchange_buffer_with_new(queue);
|
||||
if (old_node != nullptr) {
|
||||
G1ConcurrentRefineStats* stats = queue.refinement_stats();
|
||||
stats->inc_dirtied_cards(buffer_size());
|
||||
stats->inc_dirtied_cards(buffer_capacity());
|
||||
handle_completed_buffer(old_node, stats);
|
||||
}
|
||||
}
|
||||
@ -123,7 +123,7 @@ void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
|
||||
assert(cbn != nullptr, "precondition");
|
||||
// Increment _num_cards before adding to queue, so queue removal doesn't
|
||||
// need to deal with _num_cards possibly going negative.
|
||||
Atomic::add(&_num_cards, buffer_size() - cbn->index());
|
||||
Atomic::add(&_num_cards, buffer_capacity() - cbn->index());
|
||||
// Perform push in CS. The old tail may be popped while the push is
|
||||
// observing it (attaching it to the new buffer). We need to ensure it
|
||||
// can't be reused until the push completes, to avoid ABA problems.
|
||||
@ -159,7 +159,7 @@ BufferNode* G1DirtyCardQueueSet::get_completed_buffer() {
|
||||
result = dequeue_completed_buffer();
|
||||
if (result == nullptr) return nullptr;
|
||||
}
|
||||
Atomic::sub(&_num_cards, buffer_size() - result->index());
|
||||
Atomic::sub(&_num_cards, buffer_capacity() - result->index());
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -169,7 +169,7 @@ void G1DirtyCardQueueSet::verify_num_cards() const {
|
||||
for (BufferNode* cur = _completed.first();
|
||||
!_completed.is_end(cur);
|
||||
cur = cur->next()) {
|
||||
actual += buffer_size() - cur->index();
|
||||
actual += buffer_capacity() - cur->index();
|
||||
}
|
||||
assert(actual == Atomic::load(&_num_cards),
|
||||
"Num entries in completed buffers should be " SIZE_FORMAT " but are " SIZE_FORMAT,
|
||||
@ -285,7 +285,7 @@ void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) {
|
||||
// notification checking after the coming safepoint if it doesn't GC.
|
||||
// Note that this means the queue's _num_cards differs from the number
|
||||
// of cards in the queued buffers when there are paused buffers.
|
||||
Atomic::add(&_num_cards, buffer_size() - node->index());
|
||||
Atomic::add(&_num_cards, buffer_capacity() - node->index());
|
||||
_paused.add(node);
|
||||
}
|
||||
|
||||
@ -341,7 +341,7 @@ BufferNodeList G1DirtyCardQueueSet::take_all_completed_buffers() {
|
||||
class G1RefineBufferedCards : public StackObj {
|
||||
BufferNode* const _node;
|
||||
CardTable::CardValue** const _node_buffer;
|
||||
const size_t _node_buffer_size;
|
||||
const size_t _node_buffer_capacity;
|
||||
const uint _worker_id;
|
||||
G1ConcurrentRefineStats* _stats;
|
||||
G1RemSet* const _g1rs;
|
||||
@ -351,12 +351,12 @@ class G1RefineBufferedCards : public StackObj {
|
||||
return p2 - p1;
|
||||
}
|
||||
|
||||
// Sorts the cards from start_index to _node_buffer_size in *decreasing*
|
||||
// Sorts the cards from start_index to _node_buffer_capacity in *decreasing*
|
||||
// address order. Tests showed that this order is preferable to not sorting
|
||||
// or increasing address order.
|
||||
void sort_cards(size_t start_index) {
|
||||
QuickSort::sort(&_node_buffer[start_index],
|
||||
_node_buffer_size - start_index,
|
||||
_node_buffer_capacity - start_index,
|
||||
compare_cards,
|
||||
false);
|
||||
}
|
||||
@ -364,7 +364,7 @@ class G1RefineBufferedCards : public StackObj {
|
||||
// Returns the index to the first clean card in the buffer.
|
||||
size_t clean_cards() {
|
||||
const size_t start = _node->index();
|
||||
assert(start <= _node_buffer_size, "invariant");
|
||||
assert(start <= _node_buffer_capacity, "invariant");
|
||||
|
||||
// Two-fingered compaction algorithm similar to the filtering mechanism in
|
||||
// SATBMarkQueue. The main difference is that clean_card_before_refine()
|
||||
@ -372,7 +372,7 @@ class G1RefineBufferedCards : public StackObj {
|
||||
// We don't check for SuspendibleThreadSet::should_yield(), because
|
||||
// cleaning and redirtying the cards is fast.
|
||||
CardTable::CardValue** src = &_node_buffer[start];
|
||||
CardTable::CardValue** dst = &_node_buffer[_node_buffer_size];
|
||||
CardTable::CardValue** dst = &_node_buffer[_node_buffer_capacity];
|
||||
assert(src <= dst, "invariant");
|
||||
for ( ; src < dst; ++src) {
|
||||
// Search low to high for a card to keep.
|
||||
@ -391,7 +391,7 @@ class G1RefineBufferedCards : public StackObj {
|
||||
// dst points to the first retained clean card, or the end of the buffer
|
||||
// if all the cards were discarded.
|
||||
const size_t first_clean = dst - _node_buffer;
|
||||
assert(first_clean >= start && first_clean <= _node_buffer_size, "invariant");
|
||||
assert(first_clean >= start && first_clean <= _node_buffer_capacity, "invariant");
|
||||
// Discarded cards are considered as refined.
|
||||
_stats->inc_refined_cards(first_clean - start);
|
||||
_stats->inc_precleaned_cards(first_clean - start);
|
||||
@ -401,7 +401,7 @@ class G1RefineBufferedCards : public StackObj {
|
||||
bool refine_cleaned_cards(size_t start_index) {
|
||||
bool result = true;
|
||||
size_t i = start_index;
|
||||
for ( ; i < _node_buffer_size; ++i) {
|
||||
for ( ; i < _node_buffer_capacity; ++i) {
|
||||
if (SuspendibleThreadSet::should_yield()) {
|
||||
redirty_unrefined_cards(i);
|
||||
result = false;
|
||||
@ -415,26 +415,26 @@ class G1RefineBufferedCards : public StackObj {
|
||||
}
|
||||
|
||||
void redirty_unrefined_cards(size_t start) {
|
||||
for ( ; start < _node_buffer_size; ++start) {
|
||||
for ( ; start < _node_buffer_capacity; ++start) {
|
||||
*_node_buffer[start] = G1CardTable::dirty_card_val();
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
G1RefineBufferedCards(BufferNode* node,
|
||||
size_t node_buffer_size,
|
||||
size_t node_buffer_capacity,
|
||||
uint worker_id,
|
||||
G1ConcurrentRefineStats* stats) :
|
||||
_node(node),
|
||||
_node_buffer(reinterpret_cast<CardTable::CardValue**>(BufferNode::make_buffer_from_node(node))),
|
||||
_node_buffer_size(node_buffer_size),
|
||||
_node_buffer_capacity(node_buffer_capacity),
|
||||
_worker_id(worker_id),
|
||||
_stats(stats),
|
||||
_g1rs(G1CollectedHeap::heap()->rem_set()) {}
|
||||
|
||||
bool refine() {
|
||||
size_t first_clean_index = clean_cards();
|
||||
if (first_clean_index == _node_buffer_size) {
|
||||
if (first_clean_index == _node_buffer_capacity) {
|
||||
_node->set_index(first_clean_index);
|
||||
return true;
|
||||
}
|
||||
@ -457,7 +457,7 @@ bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node,
|
||||
G1ConcurrentRefineStats* stats) {
|
||||
Ticks start_time = Ticks::now();
|
||||
G1RefineBufferedCards buffered_cards(node,
|
||||
buffer_size(),
|
||||
buffer_capacity(),
|
||||
worker_id,
|
||||
stats);
|
||||
bool result = buffered_cards.refine();
|
||||
@ -468,12 +468,12 @@ bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node,
|
||||
void G1DirtyCardQueueSet::handle_refined_buffer(BufferNode* node,
|
||||
bool fully_processed) {
|
||||
if (fully_processed) {
|
||||
assert(node->index() == buffer_size(),
|
||||
assert(node->index() == buffer_capacity(),
|
||||
"Buffer not fully consumed: index: " SIZE_FORMAT ", size: " SIZE_FORMAT,
|
||||
node->index(), buffer_size());
|
||||
node->index(), buffer_capacity());
|
||||
deallocate_buffer(node);
|
||||
} else {
|
||||
assert(node->index() < buffer_size(), "Buffer fully consumed.");
|
||||
assert(node->index() < buffer_capacity(), "Buffer fully consumed.");
|
||||
// Buffer incompletely processed because there is a pending safepoint.
|
||||
// Record partially processed buffer, to be finished later.
|
||||
record_paused_buffer(node);
|
||||
@ -576,7 +576,7 @@ G1ConcurrentRefineStats G1DirtyCardQueueSet::concatenate_log_and_stats(Thread* t
|
||||
// Flush the buffer if non-empty. Flush before accumulating and
|
||||
// resetting stats, since flushing may modify the stats.
|
||||
if ((queue.buffer() != nullptr) &&
|
||||
(queue.index() != buffer_size())) {
|
||||
(queue.index() != buffer_capacity())) {
|
||||
flush_queue(queue);
|
||||
}
|
||||
|
||||
|
@ -194,7 +194,7 @@ class G1DirtyCardQueueSet: public PtrQueueSet {
|
||||
|
||||
void abandon_completed_buffers();
|
||||
|
||||
// Refine the cards in "node" from its index to buffer_size.
|
||||
// Refine the cards in "node" from its index to buffer_capacity.
|
||||
// Stops processing if SuspendibleThreadSet::should_yield() is true.
|
||||
// Returns true if the entire buffer was processed, false if there
|
||||
// is a pending yield request. The node's index is updated to exclude
|
||||
|
@ -46,7 +46,7 @@ G1RedirtyCardsLocalQueueSet::~G1RedirtyCardsLocalQueueSet() {
|
||||
#endif // ASSERT
|
||||
|
||||
void G1RedirtyCardsLocalQueueSet::enqueue_completed_buffer(BufferNode* node) {
|
||||
_buffers._entry_count += buffer_size() - node->index();
|
||||
_buffers._entry_count += buffer_capacity() - node->index();
|
||||
node->set_next(_buffers._head);
|
||||
_buffers._head = node;
|
||||
if (_buffers._tail == nullptr) {
|
||||
@ -130,7 +130,7 @@ void G1RedirtyCardsQueueSet::update_tail(BufferNode* node) {
|
||||
|
||||
void G1RedirtyCardsQueueSet::enqueue_completed_buffer(BufferNode* node) {
|
||||
assert(_collecting, "precondition");
|
||||
Atomic::add(&_entry_count, buffer_size() - node->index());
|
||||
Atomic::add(&_entry_count, buffer_capacity() - node->index());
|
||||
_list.push(*node);
|
||||
update_tail(node);
|
||||
}
|
||||
|
@ -1262,9 +1262,9 @@ class G1MergeHeapRootsTask : public WorkerTask {
|
||||
|
||||
void apply_closure_to_dirty_card_buffers(G1MergeLogBufferCardsClosure* cl, uint worker_id) {
|
||||
G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
|
||||
size_t buffer_size = dcqs.buffer_size();
|
||||
size_t buffer_capacity = dcqs.buffer_capacity();
|
||||
while (BufferNode* node = _dirty_card_buffers.pop()) {
|
||||
cl->apply_to_buffer(node, buffer_size, worker_id);
|
||||
cl->apply_to_buffer(node, buffer_capacity, worker_id);
|
||||
dcqs.deallocate_buffer(node);
|
||||
}
|
||||
}
|
||||
@ -1567,7 +1567,7 @@ void G1RemSet::enqueue_for_reprocessing(CardValue* card_ptr) {
|
||||
*card_ptr = G1CardTable::dirty_card_val();
|
||||
G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
|
||||
void** buffer = dcqs.allocate_buffer();
|
||||
size_t index = dcqs.buffer_size() - 1;
|
||||
size_t index = dcqs.buffer_capacity() - 1;
|
||||
buffer[index] = card_ptr;
|
||||
dcqs.enqueue_completed_buffer(BufferNode::make_node_from_buffer(buffer, index));
|
||||
}
|
||||
|
@ -397,13 +397,13 @@ public:
|
||||
|
||||
void do_work(uint worker_id) override {
|
||||
RedirtyLoggedCardTableEntryClosure cl(G1CollectedHeap::heap(), _evac_failure_regions);
|
||||
const size_t buffer_size = _rdcqs->buffer_size();
|
||||
const size_t buffer_capacity = _rdcqs->buffer_capacity();
|
||||
BufferNode* next = Atomic::load(&_nodes);
|
||||
while (next != nullptr) {
|
||||
BufferNode* node = next;
|
||||
next = Atomic::cmpxchg(&_nodes, node, node->next());
|
||||
if (next == node) {
|
||||
cl.apply_to_buffer(node, buffer_size, worker_id);
|
||||
cl.apply_to_buffer(node, buffer_capacity, worker_id);
|
||||
next = node->next();
|
||||
}
|
||||
}
|
||||
|
@ -167,11 +167,11 @@ static void verify_empty_dirty_card_logs() {
|
||||
ResourceMark rm;
|
||||
|
||||
struct Verifier : public ThreadClosure {
|
||||
size_t _buffer_size;
|
||||
Verifier() : _buffer_size(G1BarrierSet::dirty_card_queue_set().buffer_size()) {}
|
||||
size_t _buffer_capacity;
|
||||
Verifier() : _buffer_capacity(G1BarrierSet::dirty_card_queue_set().buffer_capacity()) {}
|
||||
void do_thread(Thread* t) override {
|
||||
G1DirtyCardQueue& queue = G1ThreadLocalData::dirty_card_queue(t);
|
||||
assert((queue.buffer() == nullptr) || (queue.index() == _buffer_size),
|
||||
assert((queue.buffer() == nullptr) || (queue.index() == _buffer_capacity),
|
||||
"non-empty dirty card queue for thread %s", t->name());
|
||||
}
|
||||
} verifier;
|
||||
|
@ -30,7 +30,7 @@
|
||||
|
||||
PtrQueue::PtrQueue(PtrQueueSet* qset) :
|
||||
_index(0),
|
||||
_capacity_in_bytes(index_to_byte_index(qset->buffer_size())),
|
||||
_capacity_in_bytes(index_to_byte_index(qset->buffer_capacity())),
|
||||
_buf(nullptr)
|
||||
{}
|
||||
|
||||
@ -38,10 +38,10 @@ PtrQueue::~PtrQueue() {
|
||||
assert(_buf == nullptr, "queue must be flushed before delete");
|
||||
}
|
||||
|
||||
BufferNode::AllocatorConfig::AllocatorConfig(size_t size) : _buffer_size(size) {}
|
||||
BufferNode::AllocatorConfig::AllocatorConfig(size_t size) : _buffer_capacity(size) {}
|
||||
|
||||
void* BufferNode::AllocatorConfig::allocate() {
|
||||
size_t byte_size = _buffer_size * sizeof(void*);
|
||||
size_t byte_size = _buffer_capacity * sizeof(void*);
|
||||
return NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
|
||||
}
|
||||
|
||||
@ -50,8 +50,8 @@ void BufferNode::AllocatorConfig::deallocate(void* node) {
|
||||
FREE_C_HEAP_ARRAY(char, node);
|
||||
}
|
||||
|
||||
BufferNode::Allocator::Allocator(const char* name, size_t buffer_size) :
|
||||
_config(buffer_size),
|
||||
BufferNode::Allocator::Allocator(const char* name, size_t buffer_capacity) :
|
||||
_config(buffer_capacity),
|
||||
_free_list(name, &_config)
|
||||
{
|
||||
|
||||
@ -80,7 +80,7 @@ PtrQueueSet::~PtrQueueSet() {}
|
||||
|
||||
void PtrQueueSet::reset_queue(PtrQueue& queue) {
|
||||
if (queue.buffer() != nullptr) {
|
||||
queue.set_index(buffer_size());
|
||||
queue.set_index(buffer_capacity());
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,7 +91,7 @@ void PtrQueueSet::flush_queue(PtrQueue& queue) {
|
||||
queue.set_buffer(nullptr);
|
||||
queue.set_index(0);
|
||||
BufferNode* node = BufferNode::make_node_from_buffer(buffer, index);
|
||||
if (index == buffer_size()) {
|
||||
if (index == buffer_capacity()) {
|
||||
deallocate_buffer(node);
|
||||
} else {
|
||||
enqueue_completed_buffer(node);
|
||||
@ -129,7 +129,7 @@ BufferNode* PtrQueueSet::exchange_buffer_with_new(PtrQueue& queue) {
|
||||
|
||||
void PtrQueueSet::install_new_buffer(PtrQueue& queue) {
|
||||
queue.set_buffer(allocate_buffer());
|
||||
queue.set_index(buffer_size());
|
||||
queue.set_index(buffer_capacity());
|
||||
}
|
||||
|
||||
void** PtrQueueSet::allocate_buffer() {
|
||||
|
@ -165,7 +165,7 @@ public:
|
||||
// We use BufferNode::AllocatorConfig to set the allocation options for the
|
||||
// FreeListAllocator.
|
||||
class BufferNode::AllocatorConfig : public FreeListConfig {
|
||||
const size_t _buffer_size;
|
||||
const size_t _buffer_capacity;
|
||||
public:
|
||||
explicit AllocatorConfig(size_t size);
|
||||
|
||||
@ -175,7 +175,7 @@ public:
|
||||
|
||||
void deallocate(void* node) override;
|
||||
|
||||
size_t buffer_size() const { return _buffer_size; }
|
||||
size_t buffer_capacity() const { return _buffer_capacity; }
|
||||
};
|
||||
|
||||
class BufferNode::Allocator {
|
||||
@ -187,10 +187,10 @@ class BufferNode::Allocator {
|
||||
NONCOPYABLE(Allocator);
|
||||
|
||||
public:
|
||||
Allocator(const char* name, size_t buffer_size);
|
||||
Allocator(const char* name, size_t buffer_capacity);
|
||||
~Allocator() = default;
|
||||
|
||||
size_t buffer_size() const { return _config.buffer_size(); }
|
||||
size_t buffer_capacity() const { return _config.buffer_capacity(); }
|
||||
size_t free_count() const;
|
||||
BufferNode* allocate();
|
||||
void release(BufferNode* node);
|
||||
@ -236,11 +236,11 @@ public:
|
||||
// Return the associated BufferNode allocator.
|
||||
BufferNode::Allocator* allocator() const { return _allocator; }
|
||||
|
||||
// Return the buffer for a BufferNode of size buffer_size().
|
||||
// Return the buffer for a BufferNode of size buffer_capacity().
|
||||
void** allocate_buffer();
|
||||
|
||||
// Return an empty buffer to the free list. The node is required
|
||||
// to have been allocated with a size of buffer_size().
|
||||
// to have been allocated with a size of buffer_capacity().
|
||||
void deallocate_buffer(BufferNode* node);
|
||||
|
||||
// A completed buffer is a buffer the mutator is finished with, and
|
||||
@ -249,8 +249,8 @@ public:
|
||||
// Adds node to the completed buffer list.
|
||||
virtual void enqueue_completed_buffer(BufferNode* node) = 0;
|
||||
|
||||
size_t buffer_size() const {
|
||||
return _allocator->buffer_size();
|
||||
size_t buffer_capacity() const {
|
||||
return _allocator->buffer_capacity();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -124,7 +124,7 @@ void SATBMarkQueueSet::set_process_completed_buffers_threshold(size_t value) {
|
||||
|
||||
void SATBMarkQueueSet::set_buffer_enqueue_threshold_percentage(uint value) {
|
||||
// Minimum threshold of 1 ensures enqueuing of completely full buffers.
|
||||
size_t size = buffer_size();
|
||||
size_t size = buffer_capacity();
|
||||
size_t enqueue_qty = (size * value) / 100;
|
||||
_buffer_enqueue_threshold = MAX2(size - enqueue_qty, (size_t)1);
|
||||
}
|
||||
@ -194,9 +194,9 @@ void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active)
|
||||
virtual void do_thread(Thread* t) {
|
||||
SATBMarkQueue& queue = _qset->satb_queue_for_thread(t);
|
||||
if (queue.buffer() != nullptr) {
|
||||
assert(!_active || queue.index() == _qset->buffer_size(),
|
||||
assert(!_active || queue.index() == _qset->buffer_capacity(),
|
||||
"queues should be empty when activated");
|
||||
queue.set_index(_qset->buffer_size());
|
||||
queue.set_index(_qset->buffer_capacity());
|
||||
}
|
||||
queue.set_active(_active);
|
||||
}
|
||||
@ -209,7 +209,7 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl)
|
||||
if (nd != nullptr) {
|
||||
void **buf = BufferNode::make_buffer_from_node(nd);
|
||||
size_t index = nd->index();
|
||||
size_t size = buffer_size();
|
||||
size_t size = buffer_capacity();
|
||||
assert(index <= size, "invariant");
|
||||
cl->do_buffer(buf + index, size - index);
|
||||
deallocate_buffer(nd);
|
||||
@ -255,9 +255,9 @@ bool SATBMarkQueueSet::should_enqueue_buffer(SATBMarkQueue& queue) {
|
||||
// Ensure we'll enqueue completely full buffers.
|
||||
assert(threshold > 0, "enqueue threshold = 0");
|
||||
// Ensure we won't enqueue empty buffers.
|
||||
assert(threshold <= buffer_size(),
|
||||
assert(threshold <= buffer_capacity(),
|
||||
"enqueue threshold %zu exceeds capacity %zu",
|
||||
threshold, buffer_size());
|
||||
threshold, buffer_capacity());
|
||||
return queue.index() < threshold;
|
||||
}
|
||||
|
||||
@ -310,7 +310,7 @@ void SATBMarkQueueSet::print_all(const char* msg) {
|
||||
while (nd != nullptr) {
|
||||
void** buf = BufferNode::make_buffer_from_node(nd);
|
||||
os::snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
|
||||
print_satb_buffer(buffer, buf, nd->index(), buffer_size());
|
||||
print_satb_buffer(buffer, buf, nd->index(), buffer_capacity());
|
||||
nd = nd->next();
|
||||
i += 1;
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ inline void SATBMarkQueueSet::apply_filter(Filter filter_out, SATBMarkQueue& que
|
||||
|
||||
// Two-fingered compaction toward the end.
|
||||
void** src = &buf[queue.index()];
|
||||
void** dst = &buf[buffer_size()];
|
||||
void** dst = &buf[buffer_capacity()];
|
||||
assert(src <= dst, "invariant");
|
||||
for ( ; src < dst; ++src) {
|
||||
// Search low to high for an entry to keep.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,9 +52,9 @@ typedef BufferNode::TestSupport::ProcessorThread ProcessorThread;
|
||||
|
||||
// Some basic testing of BufferNode::Allocator.
|
||||
TEST_VM(PtrQueueBufferAllocatorTest, test) {
|
||||
const size_t buffer_size = 256;
|
||||
BufferNode::Allocator allocator("Test Buffer Allocator", buffer_size);
|
||||
ASSERT_EQ(buffer_size, allocator.buffer_size());
|
||||
const size_t buffer_capacity = 256;
|
||||
BufferNode::Allocator allocator("Test Buffer Allocator", buffer_capacity);
|
||||
ASSERT_EQ(buffer_capacity, allocator.buffer_capacity());
|
||||
|
||||
// Allocate some new nodes for use in testing.
|
||||
BufferNode* nodes[10] = {};
|
||||
@ -233,10 +233,9 @@ static void run_test(BufferNode::Allocator* allocator, CompletedList* cbl) {
|
||||
tty->print_cr("allocator free count: " SIZE_FORMAT, allocator->free_count());
|
||||
}
|
||||
|
||||
const size_t buffer_size = 1024;
|
||||
|
||||
TEST_VM(PtrQueueBufferAllocatorTest, stress_free_list_allocator) {
|
||||
BufferNode::Allocator allocator("Test Allocator", buffer_size);
|
||||
const size_t buffer_capacity = 1024;
|
||||
BufferNode::Allocator allocator("Test Allocator", buffer_capacity);
|
||||
CompletedList completed;
|
||||
run_test(&allocator, &completed);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user