8221785: Let possibly_parallel_threads_do cover the same threads as threads_do

Reviewed-by: iwalulya, coleenp
This commit is contained in:
Thomas Schatzl 2023-01-26 15:39:22 +00:00
parent 14114c2515
commit 315398c245
7 changed files with 49 additions and 39 deletions

@ -1781,28 +1781,24 @@ class G1RemarkThreadsClosure : public ThreadClosure {
G1SATBMarkQueueSet& _qset;
G1CMOopClosure _cm_cl;
MarkingCodeBlobClosure _code_cl;
uintx _claim_token;
public:
G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) :
_qset(G1BarrierSet::satb_mark_queue_set()),
_cm_cl(g1h, task),
_code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations, true /* keepalive nmethods */),
_claim_token(Threads::thread_claim_token()) {}
_code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations, true /* keepalive nmethods */) {}
void do_thread(Thread* thread) {
if (thread->claim_threads_do(true, _claim_token)) {
// Transfer any partial buffer to the qset for completed buffer processing.
_qset.flush_queue(G1ThreadLocalData::satb_mark_queue(thread));
if (thread->is_Java_thread()) {
// In theory it should not be necessary to explicitly walk the nmethods to find roots for concurrent marking
// however the liveness of oops reachable from nmethods have very complex lifecycles:
// * Alive if on the stack of an executing method
// * Weakly reachable otherwise
// Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
// live by the SATB invariant but other oops recorded in nmethods may behave differently.
JavaThread::cast(thread)->nmethods_do(&_code_cl);
}
// Transfer any partial buffer to the qset for completed buffer processing.
_qset.flush_queue(G1ThreadLocalData::satb_mark_queue(thread));
if (thread->is_Java_thread()) {
// In theory it should not be necessary to explicitly walk the nmethods to find roots for concurrent marking
// however the liveness of oops reachable from nmethods have very complex lifecycles:
// * Alive if on the stack of an executing method
// * Weakly reachable otherwise
// Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
// live by the SATB invariant but other oops recorded in nmethods may behave differently.
JavaThread::cast(thread)->nmethods_do(&_code_cl);
}
}
};
@ -1817,7 +1813,7 @@ public:
ResourceMark rm;
G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task);
Threads::threads_do(&threads_f);
Threads::possibly_parallel_threads_do(true /* is_par */, &threads_f);
}
do {

@ -1974,7 +1974,7 @@ public:
}
PCAddThreadRootsMarkingTaskClosure closure(worker_id);
Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
Threads::possibly_parallel_threads_do(true /* is_par */, &closure);
// Mark from OopStorages
{

@ -331,7 +331,7 @@ public:
}
PSThreadRootsTaskClosure closure(worker_id);
Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
Threads::possibly_parallel_threads_do(true /* is_par */, &closure);
// Scavenge OopStorages
{

@ -78,18 +78,15 @@ private:
public:
ShenandoahSATBAndRemarkThreadsClosure(SATBMarkQueueSet& satb_qset, OopClosure* cl) :
_satb_qset(satb_qset),
_cl(cl),
_claim_token(Threads::thread_claim_token()) {}
_cl(cl) {}
void do_thread(Thread* thread) {
if (thread->claim_threads_do(true, _claim_token)) {
// Transfer any partial buffer to the qset for completed buffer processing.
_satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
if (thread->is_Java_thread()) {
if (_cl != NULL) {
ResourceMark rm;
thread->oops_do(_cl, NULL);
}
// Transfer any partial buffer to the qset for completed buffer processing.
_satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
if (thread->is_Java_thread()) {
if (_cl != NULL) {
ResourceMark rm;
thread->oops_do(_cl, NULL);
}
}
}
@ -125,7 +122,7 @@ public:
ShenandoahMarkRefsClosure mark_cl(q, rp);
ShenandoahSATBAndRemarkThreadsClosure tc(satb_mq_set,
ShenandoahIUBarrier ? &mark_cl : NULL);
Threads::threads_do(&tc);
Threads::possibly_parallel_threads_do(true /* is_par */, &tc);
}
_cm->mark_loop(worker_id, _terminator, rp,
false /*not cancellable*/,

@ -254,15 +254,19 @@ void Threads::threads_do(ThreadClosure* tc) {
}
void Threads::possibly_parallel_threads_do(bool is_par, ThreadClosure* tc) {
assert_at_safepoint();
uintx claim_token = Threads::thread_claim_token();
ALL_JAVA_THREADS(p) {
if (p->claim_threads_do(is_par, claim_token)) {
tc->do_thread(p);
}
}
VMThread* vmt = VMThread::vm_thread();
if (vmt->claim_threads_do(is_par, claim_token)) {
tc->do_thread(vmt);
for (NonJavaThread::Iterator njti; !njti.end(); njti.step()) {
Thread* current = njti.current();
if (current->claim_threads_do(is_par, claim_token)) {
tc->do_thread(current);
}
}
}
@ -1305,9 +1309,20 @@ void assert_thread_claimed(const char* kind, Thread* t, uintx expected) {
void Threads::assert_all_threads_claimed() {
ALL_JAVA_THREADS(p) {
assert_thread_claimed("Thread", p, _thread_claim_token);
assert_thread_claimed("JavaThread", p, _thread_claim_token);
}
assert_thread_claimed("VMThread", VMThread::vm_thread(), _thread_claim_token);
struct NJTClaimedVerifierClosure : public ThreadClosure {
uintx _thread_claim_token;
NJTClaimedVerifierClosure(uintx thread_claim_token) : ThreadClosure(), _thread_claim_token(thread_claim_token) { }
virtual void do_thread(Thread* thread) override {
assert_thread_claimed("Non-JavaThread", VMThread::vm_thread(), _thread_claim_token);
}
} tc(_thread_claim_token);
non_java_threads_do(&tc);
}
#endif // ASSERT

@ -82,6 +82,7 @@ class Threads: AllStatic {
// Does not include JNI_VERSION_1_1
static jboolean is_supported_jni_version(jint version);
private:
// The "thread claim token" provides a way for threads to be claimed
// by parallel worker tasks.
//
@ -98,6 +99,8 @@ class Threads: AllStatic {
// New threads get their token set to 0 and change_thread_claim_token()
// never sets the global token to 0.
static uintx thread_claim_token() { return _thread_claim_token; }
public:
static void change_thread_claim_token();
static void assert_all_threads_claimed() NOT_DEBUG_RETURN;

@ -147,18 +147,17 @@ public:
ASSERT_EQ(max_uintx, thread_claim_token());
CountThreads count2(thread_claim_token(), false); // Claimed by PPTD below
possibly_parallel_threads_do(true, &count2);
possibly_parallel_threads_do(true /* is_par */, &count2);
ASSERT_EQ(count1.java_threads_count(), count2.java_threads_count());
ASSERT_EQ(1u, count2.non_java_threads_count()); // Only VM thread
ASSERT_EQ(count1.non_java_threads_count(), count2.non_java_threads_count());
CheckClaims check2(thread_claim_token());
threads_do(&check2);
ASSERT_EQ(count2.java_threads_count(), check2.java_threads_claimed());
ASSERT_EQ(0u, check2.java_threads_unclaimed());
ASSERT_EQ(1u, check2.non_java_threads_claimed()); // Only VM thread
ASSERT_EQ(0u, check2.non_java_threads_unclaimed());
ASSERT_EQ(count1.non_java_threads_count(),
check2.non_java_threads_claimed() +
check2.non_java_threads_unclaimed());
check2.non_java_threads_claimed());
change_thread_claim_token(); // Expect overflow.
ASSERT_EQ(uintx(1), thread_claim_token());