8074101: Add verification that all tasks are actually claimed during roots processing

Reviewed-by: kbarrett, tschatzl
This commit is contained in:
Albert Mingkun Yang 2021-01-18 08:33:33 +00:00 committed by Thomas Schatzl
parent 917f7e9a95
commit e93f08e2ac
8 changed files with 73 additions and 44 deletions

View File

@ -73,7 +73,8 @@ void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_id)
}
}
_process_strong_tasks.all_tasks_completed(n_workers());
// CodeCache is already processed in java roots
_process_strong_tasks.all_tasks_completed(n_workers(), G1RP_PS_CodeCache_oops_do);
}
// Adaptor to pass the closures to the strong roots in the VM.
@ -102,7 +103,11 @@ void G1RootProcessor::process_strong_roots(OopClosure* oops,
process_java_roots(&closures, NULL, 0);
process_vm_roots(&closures, NULL, 0);
_process_strong_tasks.all_tasks_completed(n_workers());
// CodeCache is already processed in java roots
// refProcessor is not needed since we are inside a safe point
_process_strong_tasks.all_tasks_completed(n_workers(),
G1RP_PS_CodeCache_oops_do,
G1RP_PS_refProcessor_oops_do);
}
// Adaptor to pass the closures to all the roots in the VM.
@ -137,7 +142,8 @@ void G1RootProcessor::process_all_roots(OopClosure* oops,
process_code_cache_roots(blobs, NULL, 0);
_process_strong_tasks.all_tasks_completed(n_workers());
// refProcessor is not needed since we are inside a safe point
_process_strong_tasks.all_tasks_completed(n_workers(), G1RP_PS_refProcessor_oops_do);
}
void G1RootProcessor::process_java_roots(G1RootClosures* closures,
@ -181,10 +187,10 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
OopClosure* strong_roots = closures->strong_oops();
#if INCLUDE_AOT
if (UseAOT) {
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_id);
if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
AOTLoader::oops_do(strong_roots);
if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
if (UseAOT) {
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_id);
AOTLoader::oops_do(strong_roots);
}
}
#endif

View File

@ -53,10 +53,7 @@ class G1RootProcessor : public StackObj {
OopStorageSetStrongParState<false, false> _oop_storage_set_strong_par_state;
enum G1H_process_roots_tasks {
G1RP_PS_Universe_oops_do,
G1RP_PS_Management_oops_do,
G1RP_PS_ClassLoaderDataGraph_oops_do,
G1RP_PS_jvmti_oops_do,
G1RP_PS_CodeCache_oops_do,
AOT_ONLY(G1RP_PS_aot_oops_do COMMA)
G1RP_PS_refProcessor_oops_do,

View File

@ -98,10 +98,6 @@ void SerialHeap::young_process_roots(StrongRootsScope* scope,
process_roots(scope, SO_ScavengeCodeCache, root_closure,
cld_closure, cld_closure, &mark_code_closure);
if (_process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
}
rem_set()->at_younger_refs_iterate();
old_gen()->younger_refs_iterate(old_gen_closure, scope->n_threads());

View File

@ -819,8 +819,10 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p);
#if INCLUDE_AOT
if (UseAOT && _process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
AOTLoader::oops_do(strong_roots);
if (_process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
if (UseAOT) {
AOTLoader::oops_do(strong_roots);
}
}
#endif
if (_process_strong_tasks->try_claim_task(GCH_PS_OopStorageSet_oops_do)) {

View File

@ -109,7 +109,6 @@ protected:
GCH_PS_ClassLoaderDataGraph_oops_do,
GCH_PS_CodeCache_oops_do,
AOT_ONLY(GCH_PS_aot_oops_do COMMA)
GCH_PS_younger_gens,
// Leave this one last.
GCH_PS_NumElements
};

View File

@ -366,28 +366,32 @@ void SubTasksDone::clear() {
_tasks[i] = 0;
}
_threads_completed = 0;
#ifdef ASSERT
_claimed = 0;
#endif
}
bool SubTasksDone::try_claim_task(uint t) {
assert(t < _n_tasks, "bad task id.");
uint old = _tasks[t];
if (old == 0) {
old = Atomic::cmpxchg(&_tasks[t], 0u, 1u);
}
bool res = old == 0;
void SubTasksDone::all_tasks_completed_impl(uint n_threads,
uint skipped[],
size_t skipped_size) {
#ifdef ASSERT
if (res) {
assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
Atomic::inc(&_claimed);
// all non-skipped tasks are claimed
for (uint i = 0; i < _n_tasks; ++i) {
if (_tasks[i] == 0) {
auto is_skipped = false;
for (size_t j = 0; j < skipped_size; ++j) {
if (i == skipped[j]) {
is_skipped = true;
break;
}
}
assert(is_skipped, "%d not claimed.", i);
}
}
// all skipped tasks are *not* claimed
for (size_t i = 0; i < skipped_size; ++i) {
auto task_index = skipped[i];
assert(task_index < _n_tasks, "Array in range.");
assert(_tasks[task_index] == 0, "%d is both claimed and skipped.", task_index);
}
#endif
return res;
}
void SubTasksDone::all_tasks_completed(uint n_threads) {
uint observed = _threads_completed;
uint old;
do {
@ -401,6 +405,16 @@ void SubTasksDone::all_tasks_completed(uint n_threads) {
}
}
bool SubTasksDone::try_claim_task(uint t) {
assert(t < _n_tasks, "bad task id.");
uint old = _tasks[t];
if (old == 0) {
old = Atomic::cmpxchg(&_tasks[t], 0u, 1u);
}
bool res = old == 0;
return res;
}
SubTasksDone::~SubTasksDone() {
FREE_C_HEAP_ARRAY(uint, _tasks);

View File

@ -26,6 +26,8 @@
#define SHARE_GC_SHARED_WORKGROUP_HPP
#include "memory/allocation.hpp"
#include "metaprogramming/enableIf.hpp"
#include "metaprogramming/logical.hpp"
#include "runtime/globals.hpp"
#include "runtime/thread.hpp"
#include "gc/shared/gcId.hpp"
@ -303,13 +305,12 @@ class SubTasksDone: public CHeapObj<mtInternal> {
volatile uint* _tasks;
uint _n_tasks;
volatile uint _threads_completed;
#ifdef ASSERT
volatile uint _claimed;
#endif
// Set all tasks to unclaimed.
void clear();
void all_tasks_completed_impl(uint n_threads, uint skipped[], size_t skipped_size);
NONCOPYABLE(SubTasksDone);
public:
@ -326,13 +327,25 @@ public:
// to be within the range of "this".
bool try_claim_task(uint t);
// The calling thread asserts that it has attempted to claim all the
// tasks that it will try to claim. Every thread in the parallel task
// The calling thread asserts that it has attempted to claim all the tasks
// that it will try to claim. Tasks that are meant to be skipped must be
// explicitly passed as extra arguments. Every thread in the parallel task
// must execute this. (When the last thread does so, the task array is
// cleared.)
//
// n_threads - Number of threads executing the sub-tasks.
void all_tasks_completed(uint n_threads);
void all_tasks_completed(uint n_threads) {
all_tasks_completed_impl(n_threads, nullptr, 0);
}
// Augmented by variadic args, each for a skipped task.
template<typename T0, typename... Ts,
ENABLE_IF(Conjunction<std::is_same<T0, Ts>...>::value)>
void all_tasks_completed(uint n_threads, T0 first_skipped, Ts... more_skipped) {
static_assert(std::is_convertible<T0, uint>::value, "not convertible");
uint skipped[] = { static_cast<uint>(first_skipped), static_cast<uint>(more_skipped)... };
all_tasks_completed_impl(n_threads, skipped, ARRAY_SIZE(skipped));
}
// Destructor.
~SubTasksDone();

View File

@ -546,10 +546,12 @@ public:
Universe::heap()->uses_stack_watermark_barrier()) {}
void work(uint worker_id) {
if (_do_lazy_roots && _subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_LAZY_ROOT_PROCESSING)) {
Tracer t("lazy partial thread root processing");
ParallelSPCleanupThreadClosure cl;
Threads::threads_do(&cl);
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_LAZY_ROOT_PROCESSING)) {
if (_do_lazy_roots) {
Tracer t("lazy partial thread root processing");
ParallelSPCleanupThreadClosure cl;
Threads::threads_do(&cl);
}
}
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_UPDATE_INLINE_CACHES)) {