8258252: Move PtrQueue enqueue to PtrQueueSet subclasses

Reviewed-by: tschatzl, rkennke
This commit is contained in:
Kim Barrett 2020-12-16 07:53:40 +00:00
parent 17ace8339d
commit cdb53422e8
22 changed files with 253 additions and 185 deletions

@ -65,7 +65,8 @@ G1BarrierSet::G1BarrierSet(G1CardTable* card_table) :
void G1BarrierSet::enqueue(oop pre_val) {
// Nulls should have been already filtered.
assert(oopDesc::is_oop(pre_val, true), "Error");
G1ThreadLocalData::satb_mark_queue(Thread::current()).enqueue(pre_val);
SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(Thread::current());
G1BarrierSet::satb_mark_queue_set().enqueue(queue, pre_val);
}
template <class T> void
@ -99,7 +100,8 @@ void G1BarrierSet::write_ref_field_post_slow(volatile CardValue* byte) {
if (*byte != G1CardTable::dirty_card_val()) {
*byte = G1CardTable::dirty_card_val();
Thread* thr = Thread::current();
G1ThreadLocalData::dirty_card_queue(thr).enqueue(byte);
G1DirtyCardQueue& queue = G1ThreadLocalData::dirty_card_queue(thr);
G1BarrierSet::dirty_card_queue_set().enqueue(queue, byte);
}
}
@ -116,13 +118,14 @@ void G1BarrierSet::invalidate(MemRegion mr) {
OrderAccess::storeload();
// Enqueue if necessary.
Thread* thr = Thread::current();
G1DirtyCardQueueSet& qset = G1BarrierSet::dirty_card_queue_set();
G1DirtyCardQueue& queue = G1ThreadLocalData::dirty_card_queue(thr);
for (; byte <= last_byte; byte++) {
CardValue bv = *byte;
if ((bv != G1CardTable::g1_young_card_val()) &&
(bv != G1CardTable::dirty_card_val())) {
*byte = G1CardTable::dirty_card_val();
queue.enqueue(byte);
qset.enqueue(queue, byte);
}
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,10 +52,13 @@ JRT_LEAF(void, G1BarrierSetRuntime::write_ref_field_pre_entry(oopDesc* orig, Jav
}
assert(oopDesc::is_oop(orig, true /* ignore mark word */), "Error");
// store the original value that was in the field reference
G1ThreadLocalData::satb_mark_queue(thread).enqueue(orig);
SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread);
G1BarrierSet::satb_mark_queue_set().enqueue(queue, orig);
JRT_END
// G1 post write barrier slowpath
JRT_LEAF(void, G1BarrierSetRuntime::write_ref_field_post_entry(void* card_addr, JavaThread* thread))
G1ThreadLocalData::dirty_card_queue(thread).enqueue(card_addr);
JRT_LEAF(void, G1BarrierSetRuntime::write_ref_field_post_entry(volatile G1CardTable::CardValue* card_addr,
JavaThread* thread))
G1DirtyCardQueue& queue = G1ThreadLocalData::dirty_card_queue(thread);
G1BarrierSet::dirty_card_queue_set().enqueue(queue, card_addr);
JRT_END

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,8 @@
#ifndef SHARE_GC_G1_G1BARRIERSETRUNTIME_HPP
#define SHARE_GC_G1_G1BARRIERSETRUNTIME_HPP
#include "memory/allocation.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "memory/allStatic.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@ -35,6 +36,8 @@ class JavaThread;
class G1BarrierSetRuntime: public AllStatic {
public:
using CardValue = G1CardTable::CardValue;
// Arraycopy stub generator
static void write_ref_array_pre_oop_entry(oop* dst, size_t length);
static void write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length);
@ -42,7 +45,7 @@ public:
// C2 slow-path runtime calls.
static void write_ref_field_pre_entry(oopDesc* orig, JavaThread *thread);
static void write_ref_field_post_entry(void* card_addr, JavaThread* thread);
static void write_ref_field_post_entry(volatile CardValue* card_addr, JavaThread* thread);
};
#endif // SHARE_GC_G1_G1BARRIERSETRUNTIME_HPP

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/g1/g1BarrierSet.inline.hpp"
#include "gc/g1/g1BufferNodeList.hpp"
#include "gc/g1/g1CardTableEntryClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
@ -71,14 +72,6 @@ void G1DirtyCardQueue::on_thread_detach() {
dirty_card_qset()->record_detached_refinement_stats(_refinement_stats);
}
void G1DirtyCardQueue::handle_completed_buffer() {
assert(!is_empty(), "precondition");
_refinement_stats->inc_dirtied_cards(size());
BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
allocate_buffer();
dirty_card_qset()->handle_completed_buffer(node, _refinement_stats);
}
// Assumed to be zero by concurrent threads.
static uint par_ids_start() { return 0; }
@ -106,8 +99,28 @@ uint G1DirtyCardQueueSet::num_par_ids() {
return (uint)os::initial_active_processor_count();
}
void G1DirtyCardQueueSet::enqueue(G1DirtyCardQueue& queue,
volatile CardValue* card_ptr) {
CardValue* value = const_cast<CardValue*>(card_ptr);
if (!try_enqueue(queue, value)) {
handle_zero_index(queue);
retry_enqueue(queue, value);
}
}
void G1DirtyCardQueueSet::handle_zero_index(G1DirtyCardQueue& queue) {
assert(queue.index() == 0, "precondition");
BufferNode* old_node = exchange_buffer_with_new(queue);
if (old_node != nullptr) {
G1ConcurrentRefineStats* stats = queue.refinement_stats();
stats->inc_dirtied_cards(buffer_size());
handle_completed_buffer(old_node, stats);
}
}
void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
G1DirtyCardQueue& queue = G1ThreadLocalData::dirty_card_queue(t);
G1BarrierSet::dirty_card_queue_set().handle_zero_index(queue);
}
#ifdef ASSERT

@ -27,6 +27,7 @@
#include "gc/g1/g1BufferNodeList.hpp"
#include "gc/g1/g1FreeIdSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1ConcurrentRefineStats.hpp"
#include "gc/shared/ptrQueue.hpp"
#include "memory/allocation.hpp"
@ -41,9 +42,6 @@ class Thread;
class G1DirtyCardQueue: public PtrQueue {
G1ConcurrentRefineStats* _refinement_stats;
protected:
virtual void handle_completed_buffer();
public:
G1DirtyCardQueue(G1DirtyCardQueueSet* qset);
@ -263,6 +261,19 @@ class G1DirtyCardQueueSet: public PtrQueueSet {
// if none available.
BufferNode* get_completed_buffer();
// Called when queue is full or has no buffer.
void handle_zero_index(G1DirtyCardQueue& queue);
// Enqueue the buffer, and optionally perform refinement by the mutator.
// Mutator refinement is only done by Java threads, and only if there
// are more than max_cards (possibly padded) cards in the completed
// buffers. Updates stats.
//
// Mutator refinement, if performed, stops processing a buffer if
// SuspendibleThreadSet::should_yield(), recording the incompletely
// processed buffer for later processing of the remainder.
void handle_completed_buffer(BufferNode* node, G1ConcurrentRefineStats* stats);
public:
G1DirtyCardQueueSet(BufferNode::Allocator* allocator);
~G1DirtyCardQueueSet();
@ -302,16 +313,8 @@ public:
G1BufferNodeList take_all_completed_buffers();
// Helper for G1DirtyCardQueue::handle_completed_buffer().
// Enqueue the buffer, and optionally perform refinement by the mutator.
// Mutator refinement is only done by Java threads, and only if there
// are more than max_cards (possibly padded) cards in the completed
// buffers. Updates stats.
//
// Mutator refinement, if performed, stops processing a buffer if
// SuspendibleThreadSet::should_yield(), recording the incompletely
// processed buffer for later processing of the remainder.
void handle_completed_buffer(BufferNode* node, G1ConcurrentRefineStats* stats);
using CardValue = G1CardTable::CardValue;
void enqueue(G1DirtyCardQueue& queue, volatile CardValue* card_ptr);
// If there are more than stop_at cards in the completed buffers, pop
// a buffer, refine its contents, and return true. Otherwise return

@ -40,6 +40,7 @@
class UpdateLogBuffersDeferred : public BasicOopIterateClosure {
private:
G1CollectedHeap* _g1h;
G1RedirtyCardsLocalQueueSet* _rdc_local_qset;
G1RedirtyCardsQueue* _rdcq;
G1CardTable* _ct;
@ -48,8 +49,13 @@ private:
size_t _last_enqueued_card;
public:
UpdateLogBuffersDeferred(G1RedirtyCardsQueue* rdcq) :
_g1h(G1CollectedHeap::heap()), _rdcq(rdcq), _ct(_g1h->card_table()), _last_enqueued_card(SIZE_MAX) {}
UpdateLogBuffersDeferred(G1RedirtyCardsLocalQueueSet* rdc_local_qset,
G1RedirtyCardsQueue* rdcq) :
_g1h(G1CollectedHeap::heap()),
_rdc_local_qset(rdc_local_qset),
_rdcq(rdcq),
_ct(_g1h->card_table()),
_last_enqueued_card(SIZE_MAX) {}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
@ -67,7 +73,7 @@ public:
}
size_t card_index = _ct->index_for(p);
if (card_index != _last_enqueued_card) {
_rdcq->enqueue(_ct->byte_for_index(card_index));
_rdc_local_qset->enqueue(*_rdcq, _ct->byte_for_index(card_index));
_last_enqueued_card = card_index;
}
}
@ -209,7 +215,7 @@ public:
_worker_id(worker_id),
_rdc_local_qset(rdcqs),
_rdcq(&_rdc_local_qset),
_log_buffer_cl(&_rdcq) {
_log_buffer_cl(&_rdc_local_qset, &_rdcq) {
}
~RemoveSelfForwardPtrHRClosure() {

@ -88,8 +88,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
// Used to check whether string dedup should be applied to an object.
Klass* _string_klass_or_null;
G1RedirtyCardsQueue& redirty_cards_queue() { return _rdcq; }
G1CardTable* ct() { return _ct; }
G1CardTable* ct() { return _ct; }
G1HeapRegionAttr dest(G1HeapRegionAttr original) const {
assert(original.is_valid(),
@ -149,7 +148,7 @@ public:
size_t card_index = ct()->index_for(p);
// If the card hasn't been added to the buffer, do it.
if (_last_enqueued_card != card_index) {
redirty_cards_queue().enqueue(ct()->byte_for_index(card_index));
_rdc_local_qset.enqueue(_rdcq, ct()->byte_for_index(card_index));
_last_enqueued_card = card_index;
}
}

@ -28,7 +28,7 @@
#include "utilities/debug.hpp"
#include "utilities/macros.hpp"
// G1RedirtyCardsQueueBase::LocalQSet
// G1RedirtyCardsLocalQueueSet
G1RedirtyCardsLocalQueueSet::G1RedirtyCardsLocalQueueSet(G1RedirtyCardsQueueSet* shared_qset) :
PtrQueueSet(shared_qset->allocator()),
@ -53,6 +53,16 @@ void G1RedirtyCardsLocalQueueSet::enqueue_completed_buffer(BufferNode* node) {
}
}
void G1RedirtyCardsLocalQueueSet::enqueue(G1RedirtyCardsQueue& queue, void* value) {
if (!try_enqueue(queue, value)) {
BufferNode* old_node = exchange_buffer_with_new(queue);
if (old_node != nullptr) {
enqueue_completed_buffer(old_node);
}
retry_enqueue(queue, value);
}
}
void G1RedirtyCardsLocalQueueSet::flush() {
_shared_qset->add_bufferlist(_buffers);
_buffers = G1BufferNodeList();
@ -70,10 +80,6 @@ G1RedirtyCardsQueue::~G1RedirtyCardsQueue() {
}
#endif // ASSERT
void G1RedirtyCardsQueue::handle_completed_buffer() {
enqueue_completed_buffer();
}
void G1RedirtyCardsQueue::flush() {
flush_impl();
}

@ -29,6 +29,7 @@
#include "gc/shared/ptrQueue.hpp"
#include "memory/padded.hpp"
class G1RedirtyCardsQueue;
class G1RedirtyCardsQueueSet;
// Provide G1RedirtyCardsQueue with a thread-local qset. It provides an
@ -42,6 +43,8 @@ public:
G1RedirtyCardsLocalQueueSet(G1RedirtyCardsQueueSet* shared_qset);
~G1RedirtyCardsLocalQueueSet() NOT_DEBUG(= default);
void enqueue(G1RedirtyCardsQueue& queue, void* value);
// Add the buffer to the local list.
virtual void enqueue_completed_buffer(BufferNode* node);
@ -51,9 +54,6 @@ public:
// Worker-local queues of card table entries.
class G1RedirtyCardsQueue : public PtrQueue {
protected:
virtual void handle_completed_buffer();
public:
G1RedirtyCardsQueue(G1RedirtyCardsLocalQueueSet* qset);
~G1RedirtyCardsQueue() NOT_DEBUG(= default);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc/g1/g1BarrierSet.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
@ -37,10 +38,11 @@ G1SATBMarkQueueSet::G1SATBMarkQueueSet(BufferNode::Allocator* allocator) :
{}
void G1SATBMarkQueueSet::handle_zero_index_for_thread(Thread* t) {
G1ThreadLocalData::satb_mark_queue(t).handle_zero_index();
G1SATBMarkQueueSet& qset = G1BarrierSet::satb_mark_queue_set();
qset.handle_zero_index(qset.satb_queue_for_thread(t));
}
SATBMarkQueue& G1SATBMarkQueueSet::satb_queue_for_thread(Thread* const t) const{
SATBMarkQueue& G1SATBMarkQueueSet::satb_queue_for_thread(Thread* const t) const {
return G1ThreadLocalData::satb_mark_queue(t);
}
@ -113,6 +115,6 @@ public:
}
};
void G1SATBMarkQueueSet::filter(SATBMarkQueue* queue) {
void G1SATBMarkQueueSet::filter(SATBMarkQueue& queue) {
apply_filter(G1SATBMarkQueueFilterFn(), queue);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,7 @@ public:
static void handle_zero_index_for_thread(Thread* t);
virtual SATBMarkQueue& satb_queue_for_thread(Thread* const t) const;
virtual void filter(SATBMarkQueue* queue);
virtual void filter(SATBMarkQueue& queue);
};
#endif // SHARE_GC_G1_G1SATBMARKQUEUESET_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,28 +61,6 @@ void PtrQueue::flush_impl() {
}
}
void PtrQueue::enqueue_known_active(void* ptr) {
while (_index == 0) {
handle_zero_index();
}
assert(_buf != NULL, "postcondition");
assert(index() > 0, "postcondition");
assert(index() <= capacity(), "invariant");
_index -= _element_size;
_buf[index()] = ptr;
}
void PtrQueue::handle_zero_index() {
assert(index() == 0, "precondition");
if (_buf != NULL) {
handle_completed_buffer();
} else {
allocate_buffer();
}
}
void PtrQueue::allocate_buffer() {
_buf = qset()->allocate_buffer();
reset();
@ -249,6 +227,39 @@ PtrQueueSet::PtrQueueSet(BufferNode::Allocator* allocator) :
PtrQueueSet::~PtrQueueSet() {}
bool PtrQueueSet::try_enqueue(PtrQueue& queue, void* value) {
size_t index = queue.index();
if (index == 0) return false;
void** buffer = queue.buffer();
assert(buffer != nullptr, "no buffer but non-zero index");
buffer[--index] = value;
queue.set_index(index);
return true;
}
void PtrQueueSet::retry_enqueue(PtrQueue& queue, void* value) {
assert(queue.index() != 0, "precondition");
assert(queue.buffer() != nullptr, "precondition");
size_t index = queue.index();
queue.buffer()[--index] = value;
queue.set_index(index);
}
BufferNode* PtrQueueSet::exchange_buffer_with_new(PtrQueue& queue) {
BufferNode* node = nullptr;
void** buffer = queue.buffer();
if (buffer != nullptr) {
node = BufferNode::make_node_from_buffer(buffer, queue.index());
}
install_new_buffer(queue);
return node;
}
void PtrQueueSet::install_new_buffer(PtrQueue& queue) {
queue.set_buffer(allocate_buffer());
queue.set_index(buffer_size());
}
void** PtrQueueSet::allocate_buffer() {
BufferNode* node = _allocator->allocate();
return BufferNode::make_buffer_from_node(node);

@ -32,9 +32,6 @@
#include "utilities/lockFreeStack.hpp"
#include "utilities/sizes.hpp"
class Mutex;
class Monitor;
// There are various techniques that require threads to be able to log
// addresses. For example, a generational write barrier might log
// the addresses of modified old-generation objects. This type supports
@ -83,29 +80,11 @@ protected:
// The buffer.
void** _buf;
size_t index() const {
return byte_index_to_index(_index);
}
void set_index(size_t new_index) {
size_t byte_index = index_to_byte_index(new_index);
assert(byte_index <= capacity_in_bytes(), "precondition");
_index = byte_index;
}
size_t capacity() const {
return byte_index_to_index(capacity_in_bytes());
}
PtrQueueSet* qset() const { return _qset; }
// Process queue entries and release resources.
void flush_impl();
// Process (some of) the buffer and leave it in place for further use,
// or enqueue the buffer and allocate a new one.
virtual void handle_completed_buffer() = 0;
void allocate_buffer();
// Enqueue the current buffer in the qset and allocate a new buffer.
@ -120,6 +99,31 @@ protected:
public:
void** buffer() const { return _buf; }
void set_buffer(void** buffer) { _buf = buffer; }
size_t index_in_bytes() const {
return _index;
}
void set_index_in_bytes(size_t new_index) {
assert(is_aligned(new_index, _element_size), "precondition");
assert(new_index <= capacity_in_bytes(), "precondition");
_index = new_index;
}
size_t index() const {
return byte_index_to_index(index_in_bytes());
}
void set_index(size_t new_index) {
set_index_in_bytes(index_to_byte_index(new_index));
}
size_t capacity() const {
return byte_index_to_index(capacity_in_bytes());
}
// Forcibly set empty.
void reset() {
if (_buf != NULL) {
@ -127,20 +131,6 @@ public:
}
}
void enqueue(volatile void* ptr) {
enqueue((void*)(ptr));
}
// Enqueues the given "obj".
void enqueue(void* ptr) {
if (!_active) return;
else enqueue_known_active(ptr);
}
void handle_zero_index();
void enqueue_known_active(void* ptr);
// Return the size of the in-use region.
size_t size() const {
size_t result = 0;
@ -306,6 +296,21 @@ protected:
PtrQueueSet(BufferNode::Allocator* allocator);
~PtrQueueSet();
// Add value to queue's buffer, returning true. If buffer is full
// or if queue doesn't have a buffer, does nothing and returns false.
bool try_enqueue(PtrQueue& queue, void* value);
// Add value to queue's buffer. The queue must have a non-full buffer.
// Used after an initial try_enqueue has failed and the situation resolved.
void retry_enqueue(PtrQueue& queue, void* value);
// Installs a new buffer into queue.
// Returns the old buffer, or null if queue didn't have a buffer.
BufferNode* exchange_buffer_with_new(PtrQueue& queue);
// Installs a new buffer into queue.
void install_new_buffer(PtrQueue& queue);
public:
// Return the associated BufferNode allocator.

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,32 +54,6 @@ void SATBMarkQueue::flush() {
flush_impl();
}
// This method will first apply filtering to the buffer. If filtering
// retains a small enough collection in the buffer, we can continue to
// use the buffer as-is, instead of enqueueing and replacing it.
void SATBMarkQueue::handle_completed_buffer() {
// This method should only be called if there is a non-NULL buffer
// that is full.
assert(index() == 0, "pre-condition");
assert(_buf != NULL, "pre-condition");
filter();
size_t threshold = satb_qset()->buffer_enqueue_threshold();
// Ensure we'll enqueue completely full buffers.
assert(threshold > 0, "enqueue threshold = 0");
// Ensure we won't enqueue empty buffers.
assert(threshold <= capacity(),
"enqueue threshold " SIZE_FORMAT " exceeds capacity " SIZE_FORMAT,
threshold, capacity());
if (index() < threshold) {
// Buffer is sufficiently full; enqueue and allocate a new one.
enqueue_completed_buffer();
} // Else continue to accumulate in buffer.
}
void SATBMarkQueue::apply_closure_and_empty(SATBBufferClosure* cl) {
assert(SafepointSynchronize::is_at_safepoint(),
"SATB queues must only be processed at safepoints");
@ -254,6 +228,41 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl)
}
}
void SATBMarkQueueSet::enqueue_known_active(SATBMarkQueue& queue, oop obj) {
assert(queue.is_active(), "precondition");
void* value = cast_from_oop<void*>(obj);
if (!try_enqueue(queue, value)) {
handle_zero_index(queue);
retry_enqueue(queue, value);
}
}
void SATBMarkQueueSet::handle_zero_index(SATBMarkQueue& queue) {
assert(queue.index() == 0, "precondition");
if (queue.buffer() == nullptr) {
install_new_buffer(queue);
} else {
filter(queue);
if (should_enqueue_buffer(queue)) {
enqueue_completed_buffer(exchange_buffer_with_new(queue));
} // Else continue to use the existing buffer.
}
assert(queue.buffer() != nullptr, "post condition");
assert(queue.index() > 0, "post condition");
}
bool SATBMarkQueueSet::should_enqueue_buffer(SATBMarkQueue& queue) {
// Keep the current buffer if filtered index >= threshold.
size_t threshold = buffer_enqueue_threshold();
// Ensure we'll enqueue completely full buffers.
assert(threshold > 0, "enqueue threshold = 0");
// Ensure we won't enqueue empty buffers.
assert(threshold <= buffer_size(),
"enqueue threshold %zu exceeds capacity %zu",
threshold, buffer_size());
return queue.index() < threshold;
}
// SATB buffer life-cycle - Per-thread queues obtain buffers from the
// qset's buffer allocator, fill them, and push them onto the qset's
// list. The GC concurrently pops buffers from the qset, processes

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "gc/shared/ptrQueue.hpp"
#include "memory/allocation.hpp"
#include "memory/padded.hpp"
#include "oops/oopsHierarchy.hpp"
class Thread;
class Monitor;
@ -51,13 +52,6 @@ private:
// Filter out unwanted entries from the buffer.
inline void filter();
// Removes entries from the buffer that are no longer needed.
template<typename Filter>
inline void apply_filter(Filter filter_out);
protected:
virtual void handle_completed_buffer();
public:
SATBMarkQueue(SATBMarkQueueSet* qset);
@ -115,10 +109,14 @@ protected:
SATBMarkQueueSet(BufferNode::Allocator* allocator);
~SATBMarkQueueSet();
void handle_zero_index(SATBMarkQueue& queue);
// Return true if the queue's buffer should be enqueued, even if not full.
// The default method uses the buffer enqueue threshold.
virtual bool should_enqueue_buffer(SATBMarkQueue& queue);
template<typename Filter>
void apply_filter(Filter filter, SATBMarkQueue* queue) {
queue->apply_filter(filter);
}
void apply_filter(Filter filter, SATBMarkQueue& queue);
public:
virtual SATBMarkQueue& satb_queue_for_thread(Thread* const t) const = 0;
@ -134,14 +132,17 @@ public:
size_t buffer_enqueue_threshold() const { return _buffer_enqueue_threshold; }
void set_buffer_enqueue_threshold_percentage(uint value);
virtual void filter(SATBMarkQueue* queue) = 0;
// If there exists some completed buffer, pop and process it, and
// return true. Otherwise return false. Processing a buffer
// consists of applying the closure to the active range of the
// buffer; the leading entries may be excluded due to filtering.
bool apply_closure_to_completed_buffer(SATBBufferClosure* cl);
void enqueue(SATBMarkQueue& queue, oop obj) {
if (queue.is_active()) enqueue_known_active(queue, obj);
}
void enqueue_known_active(SATBMarkQueue& queue, oop obj);
virtual void filter(SATBMarkQueue& queue) = 0;
virtual void enqueue_completed_buffer(BufferNode* node);
// The number of buffers in the list. Racy and not updated atomically
@ -169,17 +170,17 @@ inline SATBMarkQueueSet* SATBMarkQueue::satb_qset() const {
}
inline void SATBMarkQueue::filter() {
satb_qset()->filter(this);
satb_qset()->filter(*this);
}
// Removes entries from the buffer that are no longer needed, as
// determined by filter. If e is a void* entry in the buffer,
// Removes entries from queue's buffer that are no longer needed, as
// determined by filter. If e is a void* entry in queue's buffer,
// filter_out(e) must be a valid expression whose value is convertible
// to bool. Entries are removed (filtered out) if the result is true,
// retained if false.
template<typename Filter>
inline void SATBMarkQueue::apply_filter(Filter filter_out) {
void** buf = this->_buf;
inline void SATBMarkQueueSet::apply_filter(Filter filter_out, SATBMarkQueue& queue) {
void** buf = queue.buffer();
if (buf == NULL) {
// nothing to do
@ -187,8 +188,8 @@ inline void SATBMarkQueue::apply_filter(Filter filter_out) {
}
// Two-fingered compaction toward the end.
void** src = &buf[this->index()];
void** dst = &buf[this->capacity()];
void** src = &buf[queue.index()];
void** dst = &buf[buffer_size()];
assert(src <= dst, "invariant");
for ( ; src < dst; ++src) {
// Search low to high for an entry to keep.
@ -206,7 +207,7 @@ inline void SATBMarkQueue::apply_filter(Filter filter_out) {
}
// dst points to the lowest retained entry, or the end of the buffer
// if all the entries were filtered out.
this->set_index(dst - buf);
queue.set_index(dst - buf);
}
#endif // SHARE_GC_SHARED_SATBMARKQUEUE_HPP

@ -142,7 +142,8 @@ inline void ShenandoahBarrierSet::enqueue(oop obj) {
// filtering here helps to avoid wasteful SATB queueing work to begin with.
if (!_heap->requires_marking(obj)) return;
ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).enqueue_known_active(obj);
SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(Thread::current());
_satb_mark_queue_set.enqueue_known_active(queue, obj);
}
template <DecoratorSet decorators, typename T>
@ -349,7 +350,7 @@ void ShenandoahBarrierSet::arraycopy_work(T* src, size_t count) {
obj = fwd;
}
if (ENQUEUE && !ctx->is_marked_strong(obj)) {
queue.enqueue_known_active(obj);
_satb_mark_queue_set.enqueue_known_active(queue, obj);
}
}
}

@ -47,7 +47,8 @@ JRT_LEAF(void, ShenandoahRuntime::write_ref_field_pre_entry(oopDesc* orig, JavaT
shenandoah_assert_correct(NULL, orig);
// store the original value that was in the field reference
assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "Shouldn't be here otherwise");
ShenandoahThreadLocalData::satb_mark_queue(thread).enqueue_known_active(orig);
SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
ShenandoahBarrierSet::satb_mark_queue_set().enqueue_known_active(queue, orig);
JRT_END
JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_strong(oopDesc* src, oop* load_addr))

@ -49,14 +49,15 @@ public:
}
};
void ShenandoahSATBMarkQueueSet::filter(SATBMarkQueue* queue) {
void ShenandoahSATBMarkQueueSet::filter(SATBMarkQueue& queue) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
apply_filter(ShenandoahSATBMarkQueueFilterFn(heap), queue);
}
void ShenandoahSATBMarkQueue::handle_completed_buffer() {
SATBMarkQueue::handle_completed_buffer();
if (!is_empty()) {
bool ShenandoahSATBMarkQueueSet::should_enqueue_buffer(SATBMarkQueue& queue) {
if (SATBMarkQueueSet::should_enqueue_buffer(queue)) {
return true;
} else if (queue.index() < buffer_size()) { // Is buffer not empty?
Thread* t = Thread::current();
if (ShenandoahThreadLocalData::is_force_satb_flush(t)) {
// Non-empty buffer is compacted, and we decided not to enqueue it.
@ -64,7 +65,8 @@ void ShenandoahSATBMarkQueue::handle_completed_buffer() {
// This avoid dealing with these leftovers during the final-mark, after
// the buffers are drained completely. See JDK-8205353 for more discussion.
ShenandoahThreadLocalData::set_force_satb_flush(t, false);
enqueue_completed_buffer();
return true;
}
}
return false;
}

@ -30,19 +30,15 @@
#include "runtime/mutex.hpp"
#include "runtime/thread.hpp"
class ShenandoahSATBMarkQueue: public SATBMarkQueue {
protected:
virtual void handle_completed_buffer();
public:
ShenandoahSATBMarkQueue(SATBMarkQueueSet* qset) : SATBMarkQueue(qset) {}
};
class ShenandoahSATBMarkQueueSet : public SATBMarkQueueSet {
protected:
virtual bool should_enqueue_buffer(SATBMarkQueue& queue);
public:
ShenandoahSATBMarkQueueSet(BufferNode::Allocator* allocator);
virtual SATBMarkQueue& satb_queue_for_thread(Thread* const t) const;
virtual void filter(SATBMarkQueue* queue);
virtual void filter(SATBMarkQueue& queue);
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHSATBMARKQUEUESET_HPP

@ -43,7 +43,7 @@ private:
// Evacuation OOM state
uint8_t _oom_scope_nesting_level;
bool _oom_during_evac;
ShenandoahSATBMarkQueue _satb_mark_queue;
SATBMarkQueue _satb_mark_queue;
PLAB* _gclab;
size_t _gclab_size;
uint _worker_id;

@ -51,7 +51,7 @@
#include "runtime/reflectionUtils.hpp"
#include "runtime/sharedRuntime.hpp"
#if INCLUDE_G1GC
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
#endif // INCLUDE_G1GC
// Simple helper to see if the caller of a runtime stub which
@ -482,13 +482,13 @@ JRT_END
#if INCLUDE_G1GC
JRT_LEAF(void, JVMCIRuntime::write_barrier_pre(JavaThread* thread, oopDesc* obj))
G1ThreadLocalData::satb_mark_queue(thread).enqueue(obj);
JRT_END
void JVMCIRuntime::write_barrier_pre(JavaThread* thread, oopDesc* obj) {
G1BarrierSetRuntime::write_ref_field_pre_entry(obj, thread);
}
JRT_LEAF(void, JVMCIRuntime::write_barrier_post(JavaThread* thread, void* card_addr))
G1ThreadLocalData::dirty_card_queue(thread).enqueue(card_addr);
JRT_END
void JVMCIRuntime::write_barrier_post(JavaThread* thread, volatile CardValue* card_addr) {
G1BarrierSetRuntime::write_ref_field_post_entry(card_addr, thread);
}
#endif // INCLUDE_G1GC

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,6 +30,9 @@
#include "jvmci/jvmciExceptions.hpp"
#include "jvmci/jvmciObject.hpp"
#include "utilities/linkedlist.hpp"
#if INCLUDE_G1GC
#include "gc/g1/g1CardTable.hpp"
#endif // INCLUDE_G1GC
class JVMCIEnv;
class JVMCICompiler;
@ -396,8 +399,9 @@ class JVMCIRuntime: public CHeapObj<mtJVMCI> {
// followed by its address.
static void log_object(JavaThread* thread, oopDesc* object, bool as_string, bool newline);
#if INCLUDE_G1GC
using CardValue = G1CardTable::CardValue;
static void write_barrier_pre(JavaThread* thread, oopDesc* obj);
static void write_barrier_post(JavaThread* thread, void* card);
static void write_barrier_post(JavaThread* thread, volatile CardValue* card);
#endif
static jboolean validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child);