8314276: Improve PtrQueue API around size/capacity
Reviewed-by: iwalulya, tschatzl
This commit is contained in:
parent
0c3bc71d24
commit
2a1176b544
@ -39,9 +39,10 @@ public:
|
||||
virtual void do_card_ptr(CardValue* card_ptr, uint worker_id) = 0;
|
||||
|
||||
// Process all the card_ptrs in node.
|
||||
void apply_to_buffer(BufferNode* node, size_t buffer_capacity, uint worker_id) {
|
||||
void apply_to_buffer(BufferNode* node, uint worker_id) {
|
||||
void** buffer = BufferNode::make_buffer_from_node(node);
|
||||
for (size_t i = node->index(); i < buffer_capacity; ++i) {
|
||||
size_t capacity = node->capacity();
|
||||
for (size_t i = node->index(); i < capacity; ++i) {
|
||||
CardValue* card_ptr = static_cast<CardValue*>(buffer[i]);
|
||||
do_card_ptr(card_ptr, worker_id);
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ uint G1DirtyCardQueueSet::num_par_ids() {
|
||||
void G1DirtyCardQueueSet::flush_queue(G1DirtyCardQueue& queue) {
|
||||
if (queue.buffer() != nullptr) {
|
||||
G1ConcurrentRefineStats* stats = queue.refinement_stats();
|
||||
stats->inc_dirtied_cards(buffer_capacity() - queue.index());
|
||||
stats->inc_dirtied_cards(queue.size());
|
||||
}
|
||||
PtrQueueSet::flush_queue(queue);
|
||||
}
|
||||
@ -104,8 +104,9 @@ void G1DirtyCardQueueSet::handle_zero_index(G1DirtyCardQueue& queue) {
|
||||
assert(queue.index() == 0, "precondition");
|
||||
BufferNode* old_node = exchange_buffer_with_new(queue);
|
||||
if (old_node != nullptr) {
|
||||
assert(old_node->index() == 0, "invariant");
|
||||
G1ConcurrentRefineStats* stats = queue.refinement_stats();
|
||||
stats->inc_dirtied_cards(buffer_capacity());
|
||||
stats->inc_dirtied_cards(old_node->capacity());
|
||||
handle_completed_buffer(old_node, stats);
|
||||
}
|
||||
}
|
||||
@ -123,7 +124,7 @@ void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
|
||||
assert(cbn != nullptr, "precondition");
|
||||
// Increment _num_cards before adding to queue, so queue removal doesn't
|
||||
// need to deal with _num_cards possibly going negative.
|
||||
Atomic::add(&_num_cards, buffer_capacity() - cbn->index());
|
||||
Atomic::add(&_num_cards, cbn->size());
|
||||
// Perform push in CS. The old tail may be popped while the push is
|
||||
// observing it (attaching it to the new buffer). We need to ensure it
|
||||
// can't be reused until the push completes, to avoid ABA problems.
|
||||
@ -159,7 +160,7 @@ BufferNode* G1DirtyCardQueueSet::get_completed_buffer() {
|
||||
result = dequeue_completed_buffer();
|
||||
if (result == nullptr) return nullptr;
|
||||
}
|
||||
Atomic::sub(&_num_cards, buffer_capacity() - result->index());
|
||||
Atomic::sub(&_num_cards, result->size());
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -169,7 +170,7 @@ void G1DirtyCardQueueSet::verify_num_cards() const {
|
||||
for (BufferNode* cur = _completed.first();
|
||||
!_completed.is_end(cur);
|
||||
cur = cur->next()) {
|
||||
actual += buffer_capacity() - cur->index();
|
||||
actual += cur->size();
|
||||
}
|
||||
assert(actual == Atomic::load(&_num_cards),
|
||||
"Num entries in completed buffers should be " SIZE_FORMAT " but are " SIZE_FORMAT,
|
||||
@ -285,7 +286,7 @@ void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) {
|
||||
// notification checking after the coming safepoint if it doesn't GC.
|
||||
// Note that this means the queue's _num_cards differs from the number
|
||||
// of cards in the queued buffers when there are paused buffers.
|
||||
Atomic::add(&_num_cards, buffer_capacity() - node->index());
|
||||
Atomic::add(&_num_cards, node->size());
|
||||
_paused.add(node);
|
||||
}
|
||||
|
||||
@ -422,12 +423,11 @@ class G1RefineBufferedCards : public StackObj {
|
||||
|
||||
public:
|
||||
G1RefineBufferedCards(BufferNode* node,
|
||||
size_t node_buffer_capacity,
|
||||
uint worker_id,
|
||||
G1ConcurrentRefineStats* stats) :
|
||||
_node(node),
|
||||
_node_buffer(reinterpret_cast<CardTable::CardValue**>(BufferNode::make_buffer_from_node(node))),
|
||||
_node_buffer_capacity(node_buffer_capacity),
|
||||
_node_buffer_capacity(node->capacity()),
|
||||
_worker_id(worker_id),
|
||||
_stats(stats),
|
||||
_g1rs(G1CollectedHeap::heap()->rem_set()) {}
|
||||
@ -456,10 +456,7 @@ bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node,
|
||||
uint worker_id,
|
||||
G1ConcurrentRefineStats* stats) {
|
||||
Ticks start_time = Ticks::now();
|
||||
G1RefineBufferedCards buffered_cards(node,
|
||||
buffer_capacity(),
|
||||
worker_id,
|
||||
stats);
|
||||
G1RefineBufferedCards buffered_cards(node, worker_id, stats);
|
||||
bool result = buffered_cards.refine();
|
||||
stats->inc_refinement_time(Ticks::now() - start_time);
|
||||
return result;
|
||||
@ -468,12 +465,11 @@ bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node,
|
||||
void G1DirtyCardQueueSet::handle_refined_buffer(BufferNode* node,
|
||||
bool fully_processed) {
|
||||
if (fully_processed) {
|
||||
assert(node->index() == buffer_capacity(),
|
||||
"Buffer not fully consumed: index: " SIZE_FORMAT ", size: " SIZE_FORMAT,
|
||||
node->index(), buffer_capacity());
|
||||
assert(node->is_empty(), "Buffer not fully consumed: index: %zu, size: %zu",
|
||||
node->index(), node->capacity());
|
||||
deallocate_buffer(node);
|
||||
} else {
|
||||
assert(node->index() < buffer_capacity(), "Buffer fully consumed.");
|
||||
assert(!node->is_empty(), "Buffer fully consumed.");
|
||||
// Buffer incompletely processed because there is a pending safepoint.
|
||||
// Record partially processed buffer, to be finished later.
|
||||
record_paused_buffer(node);
|
||||
@ -575,8 +571,7 @@ G1ConcurrentRefineStats G1DirtyCardQueueSet::concatenate_log_and_stats(Thread* t
|
||||
G1DirtyCardQueue& queue = G1ThreadLocalData::dirty_card_queue(thread);
|
||||
// Flush the buffer if non-empty. Flush before accumulating and
|
||||
// resetting stats, since flushing may modify the stats.
|
||||
if ((queue.buffer() != nullptr) &&
|
||||
(queue.index() != buffer_capacity())) {
|
||||
if (!queue.is_empty()) {
|
||||
flush_queue(queue);
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ G1RedirtyCardsLocalQueueSet::~G1RedirtyCardsLocalQueueSet() {
|
||||
#endif // ASSERT
|
||||
|
||||
void G1RedirtyCardsLocalQueueSet::enqueue_completed_buffer(BufferNode* node) {
|
||||
_buffers._entry_count += buffer_capacity() - node->index();
|
||||
_buffers._entry_count += node->size();
|
||||
node->set_next(_buffers._head);
|
||||
_buffers._head = node;
|
||||
if (_buffers._tail == nullptr) {
|
||||
@ -130,7 +130,7 @@ void G1RedirtyCardsQueueSet::update_tail(BufferNode* node) {
|
||||
|
||||
void G1RedirtyCardsQueueSet::enqueue_completed_buffer(BufferNode* node) {
|
||||
assert(_collecting, "precondition");
|
||||
Atomic::add(&_entry_count, buffer_capacity() - node->index());
|
||||
Atomic::add(&_entry_count, node->size());
|
||||
_list.push(*node);
|
||||
update_tail(node);
|
||||
}
|
||||
|
@ -1264,9 +1264,8 @@ class G1MergeHeapRootsTask : public WorkerTask {
|
||||
|
||||
void apply_closure_to_dirty_card_buffers(G1MergeLogBufferCardsClosure* cl, uint worker_id) {
|
||||
G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
|
||||
size_t buffer_capacity = dcqs.buffer_capacity();
|
||||
while (BufferNode* node = _dirty_card_buffers.pop()) {
|
||||
cl->apply_to_buffer(node, buffer_capacity, worker_id);
|
||||
cl->apply_to_buffer(node, worker_id);
|
||||
dcqs.deallocate_buffer(node);
|
||||
}
|
||||
}
|
||||
|
@ -414,13 +414,12 @@ public:
|
||||
|
||||
void do_work(uint worker_id) override {
|
||||
RedirtyLoggedCardTableEntryClosure cl(G1CollectedHeap::heap(), _evac_failure_regions);
|
||||
const size_t buffer_capacity = _rdcqs->buffer_capacity();
|
||||
BufferNode* next = Atomic::load(&_nodes);
|
||||
while (next != nullptr) {
|
||||
BufferNode* node = next;
|
||||
next = Atomic::cmpxchg(&_nodes, node, node->next());
|
||||
if (next == node) {
|
||||
cl.apply_to_buffer(node, buffer_capacity, worker_id);
|
||||
cl.apply_to_buffer(node, worker_id);
|
||||
next = node->next();
|
||||
}
|
||||
}
|
||||
|
@ -167,12 +167,10 @@ static void verify_empty_dirty_card_logs() {
|
||||
ResourceMark rm;
|
||||
|
||||
struct Verifier : public ThreadClosure {
|
||||
size_t _buffer_capacity;
|
||||
Verifier() : _buffer_capacity(G1BarrierSet::dirty_card_queue_set().buffer_capacity()) {}
|
||||
Verifier() {}
|
||||
void do_thread(Thread* t) override {
|
||||
G1DirtyCardQueue& queue = G1ThreadLocalData::dirty_card_queue(t);
|
||||
assert((queue.buffer() == nullptr) || (queue.index() == _buffer_capacity),
|
||||
"non-empty dirty card queue for thread %s", t->name());
|
||||
assert(queue.is_empty(), "non-empty dirty card queue for thread %s", t->name());
|
||||
}
|
||||
} verifier;
|
||||
Threads::threads_do(&verifier);
|
||||
|
@ -146,7 +146,7 @@
|
||||
\
|
||||
product(size_t, G1SATBBufferSize, 1*K, \
|
||||
"Number of entries in an SATB log buffer.") \
|
||||
range(1, max_uintx) \
|
||||
constraint(G1SATBBufferSizeConstraintFunc, AtParse) \
|
||||
\
|
||||
develop(intx, G1SATBProcessCompletedThreshold, 20, \
|
||||
"Number of completed buffers that triggers log processing.") \
|
||||
@ -166,7 +166,7 @@
|
||||
\
|
||||
product(size_t, G1UpdateBufferSize, 256, \
|
||||
"Size of an update buffer") \
|
||||
range(1, NOT_LP64(32*M) LP64_ONLY(1*G)) \
|
||||
constraint(G1UpdateBufferSizeConstraintFunc, AtParse) \
|
||||
\
|
||||
product(intx, G1RSetUpdatingPauseTimePercent, 10, \
|
||||
"A target percentage of time that is allowed to be spend on " \
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/heapRegionBounds.inline.hpp"
|
||||
#include "gc/g1/jvmFlagConstraintsG1.hpp"
|
||||
#include "gc/shared/ptrQueue.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
@ -180,3 +181,32 @@ JVMFlag::Error NewSizeConstraintFuncG1(size_t value, bool verbose) {
|
||||
size_t MaxSizeForHeapAlignmentG1() {
|
||||
return HeapRegionBounds::max_size();
|
||||
}
|
||||
|
||||
static JVMFlag::Error buffer_size_constraint_helper(JVMFlagsEnum flagid,
|
||||
size_t value,
|
||||
bool verbose) {
|
||||
if (UseG1GC) {
|
||||
const size_t min_size = 1;
|
||||
const size_t max_size = BufferNode::max_size();
|
||||
JVMFlag* flag = JVMFlag::flag_from_enum(flagid);
|
||||
if ((value < min_size) || (value > max_size)) {
|
||||
JVMFlag::printError(verbose,
|
||||
"%s (%zu) must be in range [%zu, %zu]\n",
|
||||
flag->name(), value, min_size, max_size);
|
||||
return JVMFlag::OUT_OF_BOUNDS;
|
||||
}
|
||||
}
|
||||
return JVMFlag::SUCCESS;
|
||||
}
|
||||
|
||||
JVMFlag::Error G1SATBBufferSizeConstraintFunc(size_t value, bool verbose) {
|
||||
return buffer_size_constraint_helper(FLAG_MEMBER_ENUM(G1SATBBufferSize),
|
||||
value,
|
||||
verbose);
|
||||
}
|
||||
|
||||
JVMFlag::Error G1UpdateBufferSizeConstraintFunc(size_t value, bool verbose) {
|
||||
return buffer_size_constraint_helper(FLAG_MEMBER_ENUM(G1UpdateBufferSize),
|
||||
value,
|
||||
verbose);
|
||||
}
|
||||
|
@ -43,7 +43,12 @@
|
||||
/* G1 Subconstraints */ \
|
||||
f(uintx, MaxGCPauseMillisConstraintFuncG1) \
|
||||
f(uintx, GCPauseIntervalMillisConstraintFuncG1) \
|
||||
f(size_t, NewSizeConstraintFuncG1)
|
||||
f(size_t, NewSizeConstraintFuncG1) \
|
||||
\
|
||||
/* G1 PtrQueue buffer size constraints */ \
|
||||
f(size_t, G1SATBBufferSizeConstraintFunc) \
|
||||
f(size_t, G1UpdateBufferSizeConstraintFunc) \
|
||||
/* */
|
||||
|
||||
G1_GC_CONSTRAINTS(DECLARE_CONSTRAINT)
|
||||
|
||||
|
@ -30,7 +30,6 @@
|
||||
|
||||
PtrQueue::PtrQueue(PtrQueueSet* qset) :
|
||||
_index(0),
|
||||
_capacity_in_bytes(index_to_byte_index(qset->buffer_capacity())),
|
||||
_buf(nullptr)
|
||||
{}
|
||||
|
||||
@ -38,10 +37,23 @@ PtrQueue::~PtrQueue() {
|
||||
assert(_buf == nullptr, "queue must be flushed before delete");
|
||||
}
|
||||
|
||||
BufferNode::AllocatorConfig::AllocatorConfig(size_t size) : _buffer_capacity(size) {}
|
||||
size_t PtrQueue::current_capacity() const {
|
||||
if (_buf == nullptr) {
|
||||
return 0;
|
||||
} else {
|
||||
return BufferNode::make_node_from_buffer(_buf)->capacity();
|
||||
}
|
||||
}
|
||||
|
||||
BufferNode::AllocatorConfig::AllocatorConfig(size_t size)
|
||||
: _buffer_capacity(size)
|
||||
{
|
||||
assert(size >= 1, "Invalid buffer capacity %zu", size);
|
||||
assert(size <= max_size(), "Invalid buffer capacity %zu", size);
|
||||
}
|
||||
|
||||
void* BufferNode::AllocatorConfig::allocate() {
|
||||
size_t byte_size = _buffer_capacity * sizeof(void*);
|
||||
size_t byte_size = buffer_capacity() * sizeof(void*);
|
||||
return NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
|
||||
}
|
||||
|
||||
@ -53,21 +65,22 @@ void BufferNode::AllocatorConfig::deallocate(void* node) {
|
||||
BufferNode::Allocator::Allocator(const char* name, size_t buffer_capacity) :
|
||||
_config(buffer_capacity),
|
||||
_free_list(name, &_config)
|
||||
{
|
||||
|
||||
}
|
||||
{}
|
||||
|
||||
size_t BufferNode::Allocator::free_count() const {
|
||||
return _free_list.free_count();
|
||||
}
|
||||
|
||||
BufferNode* BufferNode::Allocator::allocate() {
|
||||
return ::new (_free_list.allocate()) BufferNode();
|
||||
auto internal_capacity = static_cast<InternalSizeType>(buffer_capacity());
|
||||
return ::new (_free_list.allocate()) BufferNode(internal_capacity);
|
||||
}
|
||||
|
||||
void BufferNode::Allocator::release(BufferNode* node) {
|
||||
assert(node != nullptr, "precondition");
|
||||
assert(node->next() == nullptr, "precondition");
|
||||
assert(node->capacity() == buffer_capacity(),
|
||||
"Wrong size %zu, expected %zu", node->capacity(), buffer_capacity());
|
||||
node->~BufferNode();
|
||||
_free_list.release(node);
|
||||
}
|
||||
@ -79,9 +92,7 @@ PtrQueueSet::PtrQueueSet(BufferNode::Allocator* allocator) :
|
||||
PtrQueueSet::~PtrQueueSet() {}
|
||||
|
||||
void PtrQueueSet::reset_queue(PtrQueue& queue) {
|
||||
if (queue.buffer() != nullptr) {
|
||||
queue.set_index(buffer_capacity());
|
||||
}
|
||||
queue.set_index(queue.current_capacity());
|
||||
}
|
||||
|
||||
void PtrQueueSet::flush_queue(PtrQueue& queue) {
|
||||
@ -91,7 +102,7 @@ void PtrQueueSet::flush_queue(PtrQueue& queue) {
|
||||
queue.set_buffer(nullptr);
|
||||
queue.set_index(0);
|
||||
BufferNode* node = BufferNode::make_node_from_buffer(buffer, index);
|
||||
if (index == buffer_capacity()) {
|
||||
if (index == node->capacity()) {
|
||||
deallocate_buffer(node);
|
||||
} else {
|
||||
enqueue_completed_buffer(node);
|
||||
@ -128,8 +139,9 @@ BufferNode* PtrQueueSet::exchange_buffer_with_new(PtrQueue& queue) {
|
||||
}
|
||||
|
||||
void PtrQueueSet::install_new_buffer(PtrQueue& queue) {
|
||||
queue.set_buffer(allocate_buffer());
|
||||
queue.set_index(buffer_capacity());
|
||||
BufferNode* node = _allocator->allocate();
|
||||
queue.set_buffer(BufferNode::make_buffer_from_node(node));
|
||||
queue.set_index(node->capacity());
|
||||
}
|
||||
|
||||
void** PtrQueueSet::allocate_buffer() {
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/lockFreeStack.hpp"
|
||||
#include "utilities/sizes.hpp"
|
||||
#include <limits>
|
||||
|
||||
// There are various techniques that require threads to be able to log
|
||||
// addresses. For example, a generational write barrier might log
|
||||
@ -50,18 +51,8 @@ class PtrQueue {
|
||||
// Value is always pointer-size aligned.
|
||||
size_t _index;
|
||||
|
||||
// Size of the current buffer, in bytes.
|
||||
// Value is always pointer-size aligned.
|
||||
size_t _capacity_in_bytes;
|
||||
|
||||
static const size_t _element_size = sizeof(void*);
|
||||
|
||||
// Get the capacity, in bytes. The capacity must have been set.
|
||||
size_t capacity_in_bytes() const {
|
||||
assert(_capacity_in_bytes > 0, "capacity not set");
|
||||
return _capacity_in_bytes;
|
||||
}
|
||||
|
||||
static size_t byte_index_to_index(size_t ind) {
|
||||
assert(is_aligned(ind, _element_size), "precondition");
|
||||
return ind / _element_size;
|
||||
@ -92,17 +83,19 @@ public:
|
||||
}
|
||||
|
||||
void set_index(size_t new_index) {
|
||||
assert(new_index <= capacity(), "precondition");
|
||||
assert(new_index <= current_capacity(), "precondition");
|
||||
_index = index_to_byte_index(new_index);
|
||||
}
|
||||
|
||||
size_t capacity() const {
|
||||
return byte_index_to_index(capacity_in_bytes());
|
||||
}
|
||||
// Returns the capacity of the buffer, or 0 if the queue doesn't currently
|
||||
// have a buffer.
|
||||
size_t current_capacity() const;
|
||||
|
||||
// To support compiler.
|
||||
bool is_empty() const { return index() == current_capacity(); }
|
||||
size_t size() const { return current_capacity() - index(); }
|
||||
|
||||
protected:
|
||||
// To support compiler.
|
||||
template<typename Derived>
|
||||
static ByteSize byte_offset_of_index() {
|
||||
return byte_offset_of(Derived, _index);
|
||||
@ -119,12 +112,19 @@ protected:
|
||||
};
|
||||
|
||||
class BufferNode {
|
||||
size_t _index;
|
||||
using InternalSizeType = LP64_ONLY(uint32_t) NOT_LP64(uint16_t);
|
||||
static_assert(sizeof(InternalSizeType) <= sizeof(size_t), "assumption");
|
||||
|
||||
InternalSizeType _index;
|
||||
InternalSizeType _capacity;
|
||||
BufferNode* volatile _next;
|
||||
void* _buffer[1]; // Pseudo flexible array member.
|
||||
|
||||
BufferNode() : _index(0), _next(nullptr) { }
|
||||
~BufferNode() { }
|
||||
BufferNode(InternalSizeType capacity)
|
||||
: _index(capacity), _capacity(capacity), _next(nullptr)
|
||||
{}
|
||||
|
||||
~BufferNode() = default;
|
||||
|
||||
NONCOPYABLE(BufferNode);
|
||||
|
||||
@ -133,19 +133,36 @@ class BufferNode {
|
||||
}
|
||||
|
||||
public:
|
||||
static constexpr size_t max_size() {
|
||||
return std::numeric_limits<InternalSizeType>::max();
|
||||
}
|
||||
|
||||
static BufferNode* volatile* next_ptr(BufferNode& bn) { return &bn._next; }
|
||||
typedef LockFreeStack<BufferNode, &next_ptr> Stack;
|
||||
|
||||
BufferNode* next() const { return _next; }
|
||||
void set_next(BufferNode* n) { _next = n; }
|
||||
size_t index() const { return _index; }
|
||||
void set_index(size_t i) { _index = i; }
|
||||
|
||||
void set_index(size_t i) {
|
||||
assert(i <= capacity(), "precondition");
|
||||
_index = static_cast<InternalSizeType>(i);
|
||||
}
|
||||
|
||||
size_t capacity() const { return _capacity; }
|
||||
|
||||
bool is_empty() const { return index() == capacity(); }
|
||||
size_t size() const { return capacity() - index(); }
|
||||
|
||||
// Return the BufferNode containing the buffer, WITHOUT setting its index.
|
||||
static BufferNode* make_node_from_buffer(void** buffer) {
|
||||
char* base = reinterpret_cast<char*>(buffer) - buffer_offset();
|
||||
return reinterpret_cast<BufferNode*>(base);
|
||||
}
|
||||
|
||||
// Return the BufferNode containing the buffer, after setting its index.
|
||||
static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
|
||||
BufferNode* node =
|
||||
reinterpret_cast<BufferNode*>(
|
||||
reinterpret_cast<char*>(buffer) - buffer_offset());
|
||||
BufferNode* node = make_node_from_buffer(buffer);
|
||||
node->set_index(index);
|
||||
return node;
|
||||
}
|
||||
@ -166,6 +183,7 @@ public:
|
||||
// FreeListAllocator.
|
||||
class BufferNode::AllocatorConfig : public FreeListConfig {
|
||||
const size_t _buffer_capacity;
|
||||
|
||||
public:
|
||||
explicit AllocatorConfig(size_t size);
|
||||
|
||||
|
@ -60,7 +60,7 @@ static void print_satb_buffer(const char* name,
|
||||
}
|
||||
|
||||
void SATBMarkQueue::print(const char* name) {
|
||||
print_satb_buffer(name, _buf, index(), capacity());
|
||||
print_satb_buffer(name, _buf, index(), current_capacity());
|
||||
}
|
||||
|
||||
#endif // PRODUCT
|
||||
@ -193,10 +193,10 @@ void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active)
|
||||
_qset(qset), _active(active) {}
|
||||
virtual void do_thread(Thread* t) {
|
||||
SATBMarkQueue& queue = _qset->satb_queue_for_thread(t);
|
||||
if (queue.buffer() != nullptr) {
|
||||
assert(!_active || queue.index() == _qset->buffer_capacity(),
|
||||
"queues should be empty when activated");
|
||||
queue.set_index(_qset->buffer_capacity());
|
||||
if (_active) {
|
||||
assert(queue.is_empty(), "queues should be empty when activated");
|
||||
} else {
|
||||
queue.set_index(queue.current_capacity());
|
||||
}
|
||||
queue.set_active(_active);
|
||||
}
|
||||
@ -208,10 +208,7 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl)
|
||||
BufferNode* nd = get_completed_buffer();
|
||||
if (nd != nullptr) {
|
||||
void **buf = BufferNode::make_buffer_from_node(nd);
|
||||
size_t index = nd->index();
|
||||
size_t size = buffer_capacity();
|
||||
assert(index <= size, "invariant");
|
||||
cl->do_buffer(buf + index, size - index);
|
||||
cl->do_buffer(buf + nd->index(), nd->size());
|
||||
deallocate_buffer(nd);
|
||||
return true;
|
||||
} else {
|
||||
@ -250,14 +247,15 @@ void SATBMarkQueueSet::handle_zero_index(SATBMarkQueue& queue) {
|
||||
}
|
||||
|
||||
bool SATBMarkQueueSet::should_enqueue_buffer(SATBMarkQueue& queue) {
|
||||
assert(queue.buffer() != nullptr, "precondition");
|
||||
// Keep the current buffer if filtered index >= threshold.
|
||||
size_t threshold = buffer_enqueue_threshold();
|
||||
// Ensure we'll enqueue completely full buffers.
|
||||
assert(threshold > 0, "enqueue threshold = 0");
|
||||
// Ensure we won't enqueue empty buffers.
|
||||
assert(threshold <= buffer_capacity(),
|
||||
assert(threshold <= queue.current_capacity(),
|
||||
"enqueue threshold %zu exceeds capacity %zu",
|
||||
threshold, buffer_capacity());
|
||||
threshold, queue.current_capacity());
|
||||
return queue.index() < threshold;
|
||||
}
|
||||
|
||||
@ -310,7 +308,7 @@ void SATBMarkQueueSet::print_all(const char* msg) {
|
||||
while (nd != nullptr) {
|
||||
void** buf = BufferNode::make_buffer_from_node(nd);
|
||||
os::snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
|
||||
print_satb_buffer(buffer, buf, nd->index(), buffer_capacity());
|
||||
print_satb_buffer(buffer, buf, nd->index(), nd->capacity());
|
||||
nd = nd->next();
|
||||
i += 1;
|
||||
}
|
||||
|
@ -175,13 +175,13 @@ inline void SATBMarkQueueSet::apply_filter(Filter filter_out, SATBMarkQueue& que
|
||||
void** buf = queue.buffer();
|
||||
|
||||
if (buf == nullptr) {
|
||||
// nothing to do
|
||||
// Nothing to do, and avoid pointer arithmetic on nullptr below.
|
||||
return;
|
||||
}
|
||||
|
||||
// Two-fingered compaction toward the end.
|
||||
void** src = &buf[queue.index()];
|
||||
void** dst = &buf[buffer_capacity()];
|
||||
void** src = buf + queue.index();
|
||||
void** dst = buf + queue.current_capacity();
|
||||
assert(src <= dst, "invariant");
|
||||
for ( ; src < dst; ++src) {
|
||||
// Search low to high for an entry to keep.
|
||||
|
Loading…
x
Reference in New Issue
Block a user