8258742: Move PtrQueue reset to PtrQueueSet subclasses
Reviewed-by: tschatzl, iwalulya
This commit is contained in:
parent
b53d5cacf4
commit
6c4c96fadf
@ -142,8 +142,10 @@ void G1BarrierSet::on_thread_destroy(Thread* thread) {
|
||||
}
|
||||
|
||||
void G1BarrierSet::on_thread_attach(Thread* thread) {
|
||||
assert(!G1ThreadLocalData::satb_mark_queue(thread).is_active(), "SATB queue should not be active");
|
||||
assert(G1ThreadLocalData::satb_mark_queue(thread).is_empty(), "SATB queue should be empty");
|
||||
SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread);
|
||||
assert(!queue.is_active(), "SATB queue should not be active");
|
||||
assert(queue.buffer() == nullptr, "SATB queue should not have a buffer");
|
||||
assert(queue.index() == 0, "SATB queue index should be zero");
|
||||
// Can't assert that the DCQ is empty. There is early execution on
|
||||
// the main thread, before it gets added to the threads list, which
|
||||
// is where this is called. That execution may enqueue dirty cards.
|
||||
@ -151,8 +153,7 @@ void G1BarrierSet::on_thread_attach(Thread* thread) {
|
||||
// If we are creating the thread during a marking cycle, we should
|
||||
// set the active field of the SATB queue to true. That involves
|
||||
// copying the global is_active value to this thread's queue.
|
||||
bool is_satb_active = _satb_mark_queue_set.is_active();
|
||||
G1ThreadLocalData::satb_mark_queue(thread).set_active(is_satb_active);
|
||||
queue.set_active(_satb_mark_queue_set.is_active());
|
||||
}
|
||||
|
||||
void G1BarrierSet::on_thread_detach(Thread* thread) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1737,22 +1737,22 @@ public:
|
||||
};
|
||||
|
||||
class G1RemarkThreadsClosure : public ThreadClosure {
|
||||
G1CMSATBBufferClosure _cm_satb_cl;
|
||||
G1SATBMarkQueueSet& _qset;
|
||||
G1CMOopClosure _cm_cl;
|
||||
MarkingCodeBlobClosure _code_cl;
|
||||
uintx _claim_token;
|
||||
|
||||
public:
|
||||
G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
|
||||
_cm_satb_cl(task, g1h),
|
||||
_qset(G1BarrierSet::satb_mark_queue_set()),
|
||||
_cm_cl(g1h, task),
|
||||
_code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
|
||||
_claim_token(Threads::thread_claim_token()) {}
|
||||
|
||||
void do_thread(Thread* thread) {
|
||||
if (thread->claim_threads_do(true, _claim_token)) {
|
||||
SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread);
|
||||
queue.apply_closure_and_empty(&_cm_satb_cl);
|
||||
// Transfer any partial buffer to the qset for completed buffer processing.
|
||||
_qset.flush_queue(G1ThreadLocalData::satb_mark_queue(thread));
|
||||
if (thread->is_Java_thread()) {
|
||||
// In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
|
||||
// however the liveness of oops reachable from nmethods have very complex lifecycles:
|
||||
|
@ -622,12 +622,14 @@ void G1DirtyCardQueueSet::abandon_logs() {
|
||||
// Since abandon is done only at safepoints, we can safely manipulate
|
||||
// these queues.
|
||||
struct AbandonThreadLogClosure : public ThreadClosure {
|
||||
G1DirtyCardQueueSet& _qset;
|
||||
AbandonThreadLogClosure(G1DirtyCardQueueSet& qset) : _qset(qset) {}
|
||||
virtual void do_thread(Thread* t) {
|
||||
G1DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(t);
|
||||
dcq.reset();
|
||||
dcq.refinement_stats()->reset();
|
||||
G1DirtyCardQueue& queue = G1ThreadLocalData::dirty_card_queue(t);
|
||||
_qset.reset_queue(queue);
|
||||
queue.refinement_stats()->reset();
|
||||
}
|
||||
} closure;
|
||||
} closure(*this);
|
||||
Threads::threads_do(&closure);
|
||||
|
||||
G1BarrierSet::shared_dirty_card_queue().reset();
|
||||
|
@ -199,6 +199,12 @@ PtrQueueSet::PtrQueueSet(BufferNode::Allocator* allocator) :
|
||||
|
||||
PtrQueueSet::~PtrQueueSet() {}
|
||||
|
||||
void PtrQueueSet::reset_queue(PtrQueue& queue) {
|
||||
if (queue.buffer() != nullptr) {
|
||||
queue.set_index(buffer_size());
|
||||
}
|
||||
}
|
||||
|
||||
void PtrQueueSet::flush_queue(PtrQueue& queue) {
|
||||
void** buffer = queue.buffer();
|
||||
if (buffer != nullptr) {
|
||||
|
@ -48,7 +48,7 @@ class PtrQueue {
|
||||
PtrQueueSet* const _qset;
|
||||
|
||||
// The (byte) index at which an object was last enqueued. Starts at
|
||||
// capacity_in_bytes (indicating an empty buffer) and goes towards zero.
|
||||
// capacity (in bytes) (indicating an empty buffer) and goes towards zero.
|
||||
// Value is always pointer-size aligned.
|
||||
size_t _index;
|
||||
|
||||
@ -91,49 +91,19 @@ public:
|
||||
void** buffer() const { return _buf; }
|
||||
void set_buffer(void** buffer) { _buf = buffer; }
|
||||
|
||||
size_t index_in_bytes() const {
|
||||
return _index;
|
||||
}
|
||||
|
||||
void set_index_in_bytes(size_t new_index) {
|
||||
assert(is_aligned(new_index, _element_size), "precondition");
|
||||
assert(new_index <= capacity_in_bytes(), "precondition");
|
||||
_index = new_index;
|
||||
}
|
||||
|
||||
size_t index() const {
|
||||
return byte_index_to_index(index_in_bytes());
|
||||
return byte_index_to_index(_index);
|
||||
}
|
||||
|
||||
void set_index(size_t new_index) {
|
||||
set_index_in_bytes(index_to_byte_index(new_index));
|
||||
assert(new_index <= capacity(), "precondition");
|
||||
_index = index_to_byte_index(new_index);
|
||||
}
|
||||
|
||||
size_t capacity() const {
|
||||
return byte_index_to_index(capacity_in_bytes());
|
||||
}
|
||||
|
||||
// Forcibly set empty.
|
||||
void reset() {
|
||||
if (_buf != NULL) {
|
||||
_index = capacity_in_bytes();
|
||||
}
|
||||
}
|
||||
|
||||
// Return the size of the in-use region.
|
||||
size_t size() const {
|
||||
size_t result = 0;
|
||||
if (_buf != NULL) {
|
||||
assert(_index <= capacity_in_bytes(), "Invariant");
|
||||
result = byte_index_to_index(capacity_in_bytes() - _index);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool is_empty() const {
|
||||
return _buf == NULL || capacity_in_bytes() == _index;
|
||||
}
|
||||
|
||||
// To support compiler.
|
||||
|
||||
protected:
|
||||
@ -260,6 +230,9 @@ protected:
|
||||
PtrQueueSet(BufferNode::Allocator* allocator);
|
||||
~PtrQueueSet();
|
||||
|
||||
// Discard any buffered enqueued data.
|
||||
void reset_queue(PtrQueue& queue);
|
||||
|
||||
// If queue has any buffered enqueued data, transfer it to this qset.
|
||||
// Otherwise, deallocate queue's buffer.
|
||||
void flush_queue(PtrQueue& queue);
|
||||
|
@ -47,15 +47,6 @@ SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset) :
|
||||
_active(false)
|
||||
{ }
|
||||
|
||||
void SATBMarkQueue::apply_closure_and_empty(SATBBufferClosure* cl) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
"SATB queues must only be processed at safepoints");
|
||||
if (_buf != NULL) {
|
||||
cl->do_buffer(&_buf[index()], size());
|
||||
reset();
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Helpful for debugging
|
||||
|
||||
@ -359,12 +350,12 @@ void SATBMarkQueueSet::abandon_partial_marking() {
|
||||
abandon_completed_buffers();
|
||||
|
||||
class AbandonThreadQueueClosure : public ThreadClosure {
|
||||
SATBMarkQueueSet* _qset;
|
||||
SATBMarkQueueSet& _qset;
|
||||
public:
|
||||
AbandonThreadQueueClosure(SATBMarkQueueSet* qset) : _qset(qset) {}
|
||||
AbandonThreadQueueClosure(SATBMarkQueueSet& qset) : _qset(qset) {}
|
||||
virtual void do_thread(Thread* t) {
|
||||
_qset->satb_queue_for_thread(t).reset();
|
||||
_qset.reset_queue(_qset.satb_queue_for_thread(t));
|
||||
}
|
||||
} closure(this);
|
||||
} closure(*this);
|
||||
Threads::threads_do(&closure);
|
||||
}
|
||||
|
@ -65,10 +65,6 @@ public:
|
||||
|
||||
inline SATBMarkQueueSet* satb_qset() const;
|
||||
|
||||
// Apply cl to the active part of the buffer.
|
||||
// Prerequisite: Must be at a safepoint.
|
||||
void apply_closure_and_empty(SATBBufferClosure* cl);
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Helpful for debugging
|
||||
void print(const char* name);
|
||||
|
@ -105,7 +105,8 @@ void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
|
||||
"We should not be at a safepoint");
|
||||
SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
|
||||
assert(!queue.is_active(), "SATB queue should not be active");
|
||||
assert( queue.is_empty(), "SATB queue should be empty");
|
||||
assert(queue.buffer() == nullptr, "SATB queue should not have a buffer");
|
||||
assert(queue.index() == 0, "SATB queue index should be zero");
|
||||
queue.set_active(_satb_mark_queue_set.is_active());
|
||||
if (thread->is_Java_thread()) {
|
||||
ShenandoahThreadLocalData::set_gc_state(thread, _heap->gc_state());
|
||||
|
@ -105,19 +105,21 @@ public:
|
||||
|
||||
class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure {
|
||||
private:
|
||||
ShenandoahSATBBufferClosure* _satb_cl;
|
||||
OopClosure* const _cl;
|
||||
MarkingCodeBlobClosure* _code_cl;
|
||||
SATBMarkQueueSet& _satb_qset;
|
||||
OopClosure* const _cl;
|
||||
MarkingCodeBlobClosure* _code_cl;
|
||||
uintx _claim_token;
|
||||
|
||||
public:
|
||||
ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl, OopClosure* cl, MarkingCodeBlobClosure* code_cl) :
|
||||
_satb_cl(satb_cl), _cl(cl), _code_cl(code_cl),
|
||||
ShenandoahSATBAndRemarkCodeRootsThreadsClosure(SATBMarkQueueSet& satb_qset, OopClosure* cl, MarkingCodeBlobClosure* code_cl) :
|
||||
_satb_qset(satb_qset),
|
||||
_cl(cl), _code_cl(code_cl),
|
||||
_claim_token(Threads::thread_claim_token()) {}
|
||||
|
||||
void do_thread(Thread* thread) {
|
||||
if (thread->claim_threads_do(true, _claim_token)) {
|
||||
ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
|
||||
// Transfer any partial buffer to the qset for completed buffer processing.
|
||||
_satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
|
||||
if (thread->is_Java_thread()) {
|
||||
if (_cl != NULL) {
|
||||
ResourceMark rm;
|
||||
@ -165,7 +167,7 @@ public:
|
||||
bool do_nmethods = heap->unload_classes() && !ShenandoahConcurrentRoots::can_do_concurrent_class_unloading();
|
||||
ShenandoahMarkRefsClosure mark_cl(q, rp);
|
||||
MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations);
|
||||
ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
|
||||
ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(satb_mq_set,
|
||||
ShenandoahIUBarrier ? &mark_cl : NULL,
|
||||
do_nmethods ? &blobsCl : NULL);
|
||||
Threads::threads_do(&tc);
|
||||
|
Loading…
x
Reference in New Issue
Block a user