8238220: Rename OWSTTaskTerminator to TaskTerminator
Reviewed-by: sjohanss, sangheki
This commit is contained in:
parent
76675e93cf
commit
77ad678fce
src/hotspot/share/gc
@ -76,10 +76,10 @@
|
||||
#include "gc/shared/isGCActiveMark.hpp"
|
||||
#include "gc/shared/locationPrinter.inline.hpp"
|
||||
#include "gc/shared/oopStorageParState.hpp"
|
||||
#include "gc/shared/owstTaskTerminator.hpp"
|
||||
#include "gc/shared/preservedMarks.inline.hpp"
|
||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||
#include "gc/shared/referenceProcessor.inline.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
@ -1133,7 +1133,7 @@ void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_tr
|
||||
print_heap_after_gc();
|
||||
print_heap_regions();
|
||||
#ifdef TRACESPINNING
|
||||
OWSTTaskTerminator::print_termination_counts();
|
||||
TaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -3141,7 +3141,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
|
||||
verify_after_young_collection(verify_type);
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
OWSTTaskTerminator::print_termination_counts();
|
||||
TaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
|
||||
gc_epilogue(false);
|
||||
@ -3477,14 +3477,14 @@ class G1STWRefProcTaskProxy: public AbstractGangTask {
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadStateSet* _pss;
|
||||
RefToScanQueueSet* _task_queues;
|
||||
OWSTTaskTerminator* _terminator;
|
||||
TaskTerminator* _terminator;
|
||||
|
||||
public:
|
||||
G1STWRefProcTaskProxy(ProcessTask& proc_task,
|
||||
G1CollectedHeap* g1h,
|
||||
G1ParScanThreadStateSet* per_thread_states,
|
||||
RefToScanQueueSet *task_queues,
|
||||
OWSTTaskTerminator* terminator) :
|
||||
TaskTerminator* terminator) :
|
||||
AbstractGangTask("Process reference objects in parallel"),
|
||||
_proc_task(proc_task),
|
||||
_g1h(g1h),
|
||||
@ -3528,7 +3528,7 @@ void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers
|
||||
assert(_workers->active_workers() >= ergo_workers,
|
||||
"Ergonomically chosen workers (%u) should be less than or equal to active workers (%u)",
|
||||
ergo_workers, _workers->active_workers());
|
||||
OWSTTaskTerminator terminator(ergo_workers, _queues);
|
||||
TaskTerminator terminator(ergo_workers, _queues);
|
||||
G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator);
|
||||
|
||||
_workers->run_task(&proc_task_proxy, ergo_workers);
|
||||
@ -3815,7 +3815,7 @@ protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadStateSet* _per_thread_states;
|
||||
RefToScanQueueSet* _task_queues;
|
||||
OWSTTaskTerminator _terminator;
|
||||
TaskTerminator _terminator;
|
||||
uint _num_workers;
|
||||
|
||||
void evacuate_live_objects(G1ParScanThreadState* pss,
|
||||
|
@ -1482,18 +1482,18 @@ protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadState* _par_scan_state;
|
||||
RefToScanQueueSet* _queues;
|
||||
OWSTTaskTerminator* _terminator;
|
||||
TaskTerminator* _terminator;
|
||||
G1GCPhaseTimes::GCParPhases _phase;
|
||||
|
||||
G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
|
||||
RefToScanQueueSet* queues() { return _queues; }
|
||||
OWSTTaskTerminator* terminator() { return _terminator; }
|
||||
TaskTerminator* terminator() { return _terminator; }
|
||||
|
||||
public:
|
||||
G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadState* par_scan_state,
|
||||
RefToScanQueueSet* queues,
|
||||
OWSTTaskTerminator* terminator,
|
||||
TaskTerminator* terminator,
|
||||
G1GCPhaseTimes::GCParPhases phase)
|
||||
: _start_term(0.0), _term_time(0.0), _term_attempts(0),
|
||||
_g1h(g1h), _par_scan_state(par_scan_state),
|
||||
|
@ -46,10 +46,10 @@
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/gcVMOperations.hpp"
|
||||
#include "gc/shared/genOopClosures.inline.hpp"
|
||||
#include "gc/shared/owstTaskTerminator.hpp"
|
||||
#include "gc/shared/referencePolicy.hpp"
|
||||
#include "gc/shared/strongRootsScope.hpp"
|
||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1RegionMarkStatsCache.hpp"
|
||||
#include "gc/g1/heapRegionSet.hpp"
|
||||
#include "gc/shared/owstTaskTerminator.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "gc/shared/verifyOption.hpp"
|
||||
#include "gc/shared/workgroup.hpp"
|
||||
@ -329,7 +329,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
G1CMTask** _tasks; // Task queue array (max_worker_id length)
|
||||
|
||||
G1CMTaskQueueSet* _task_queues; // Task queue set
|
||||
OWSTTaskTerminator _terminator; // For termination
|
||||
TaskTerminator _terminator; // For termination
|
||||
|
||||
// Two sync barriers that are used to synchronize tasks when an
|
||||
// overflow occurs. The algorithm is the following. All tasks enter
|
||||
@ -418,7 +418,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
HeapWord* finger() { return _finger; }
|
||||
bool concurrent() { return _concurrent; }
|
||||
uint active_tasks() { return _num_active_tasks; }
|
||||
OWSTTaskTerminator* terminator() { return &_terminator; }
|
||||
TaskTerminator* terminator() { return &_terminator; }
|
||||
|
||||
// Claims the next available region to be scanned by a marking
|
||||
// task/thread. It might return NULL if the next region is empty or
|
||||
|
@ -36,7 +36,7 @@
|
||||
|
||||
class G1FullGCMarkTask : public G1FullGCTask {
|
||||
G1RootProcessor _root_processor;
|
||||
OWSTTaskTerminator _terminator;
|
||||
TaskTerminator _terminator;
|
||||
|
||||
public:
|
||||
G1FullGCMarkTask(G1FullCollector* collector);
|
||||
|
@ -25,8 +25,8 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "gc/g1/g1FullGCMarker.inline.hpp"
|
||||
#include "gc/shared/owstTaskTerminator.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "gc/shared/verifyOption.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
|
||||
@ -50,7 +50,7 @@ G1FullGCMarker::~G1FullGCMarker() {
|
||||
|
||||
void G1FullGCMarker::complete_marking(OopQueueSet* oop_stacks,
|
||||
ObjArrayTaskQueueSet* array_stacks,
|
||||
OWSTTaskTerminator* terminator) {
|
||||
TaskTerminator* terminator) {
|
||||
do {
|
||||
drain_stack();
|
||||
ObjArrayTask steal_array;
|
||||
|
@ -87,7 +87,7 @@ public:
|
||||
inline void drain_stack();
|
||||
void complete_marking(OopQueueSet* oop_stacks,
|
||||
ObjArrayTaskQueueSet* array_stacks,
|
||||
OWSTTaskTerminator* terminator);
|
||||
TaskTerminator* terminator);
|
||||
|
||||
// Closure getters
|
||||
CLDToOopClosure* cld_closure() { return &_cld_closure; }
|
||||
|
@ -61,7 +61,7 @@ private:
|
||||
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
||||
ProcessTask& _proc_task;
|
||||
G1FullCollector* _collector;
|
||||
OWSTTaskTerminator _terminator;
|
||||
TaskTerminator _terminator;
|
||||
|
||||
public:
|
||||
G1RefProcTaskProxy(ProcessTask& proc_task,
|
||||
|
@ -49,11 +49,11 @@
|
||||
#include "gc/shared/gcTrace.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/isGCActiveMark.hpp"
|
||||
#include "gc/shared/owstTaskTerminator.hpp"
|
||||
#include "gc/shared/referencePolicy.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
|
||||
#include "gc/shared/spaceDecorator.inline.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "gc/shared/weakProcessor.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "gc/shared/workgroup.hpp"
|
||||
@ -1970,7 +1970,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
|
||||
collection_exit.ticks());
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
OWSTTaskTerminator::print_termination_counts();
|
||||
TaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
|
||||
AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
|
||||
@ -2150,7 +2150,7 @@ static void mark_from_roots_work(ParallelRootType::Value root_type, uint worker_
|
||||
cm->follow_marking_stacks();
|
||||
}
|
||||
|
||||
static void steal_marking_work(OWSTTaskTerminator& terminator, uint worker_id) {
|
||||
static void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
ParCompactionManager* cm =
|
||||
@ -2174,7 +2174,7 @@ class MarkFromRootsTask : public AbstractGangTask {
|
||||
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
||||
StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
|
||||
SequentialSubTasksDone _subtasks;
|
||||
OWSTTaskTerminator _terminator;
|
||||
TaskTerminator _terminator;
|
||||
uint _active_workers;
|
||||
|
||||
public:
|
||||
@ -2207,7 +2207,7 @@ class PCRefProcTask : public AbstractGangTask {
|
||||
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
||||
ProcessTask& _task;
|
||||
uint _ergo_workers;
|
||||
OWSTTaskTerminator _terminator;
|
||||
TaskTerminator _terminator;
|
||||
|
||||
public:
|
||||
PCRefProcTask(ProcessTask& task, uint ergo_workers) :
|
||||
@ -2587,7 +2587,7 @@ void PSParallelCompact::write_block_fill_histogram()
|
||||
}
|
||||
#endif // #ifdef ASSERT
|
||||
|
||||
static void compaction_with_stealing_work(OWSTTaskTerminator* terminator, uint worker_id) {
|
||||
static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
ParCompactionManager* cm =
|
||||
@ -2623,7 +2623,7 @@ static void compaction_with_stealing_work(OWSTTaskTerminator* terminator, uint w
|
||||
class UpdateDensePrefixAndCompactionTask: public AbstractGangTask {
|
||||
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
||||
TaskQueue& _tq;
|
||||
OWSTTaskTerminator _terminator;
|
||||
TaskTerminator _terminator;
|
||||
uint _active_workers;
|
||||
|
||||
public:
|
||||
|
@ -43,12 +43,12 @@
|
||||
#include "gc/shared/gcTrace.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/isGCActiveMark.hpp"
|
||||
#include "gc/shared/owstTaskTerminator.hpp"
|
||||
#include "gc/shared/referencePolicy.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
|
||||
#include "gc/shared/scavengableNMethods.hpp"
|
||||
#include "gc/shared/spaceDecorator.inline.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "gc/shared/weakProcessor.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "gc/shared/workgroup.hpp"
|
||||
@ -139,7 +139,7 @@ static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_i
|
||||
pm->drain_stacks(false);
|
||||
}
|
||||
|
||||
static void steal_work(OWSTTaskTerminator& terminator, uint worker_id) {
|
||||
static void steal_work(TaskTerminator& terminator, uint worker_id) {
|
||||
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
PSPromotionManager* pm =
|
||||
@ -219,7 +219,7 @@ class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
|
||||
|
||||
class PSRefProcTask : public AbstractGangTask {
|
||||
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
|
||||
OWSTTaskTerminator _terminator;
|
||||
TaskTerminator _terminator;
|
||||
ProcessTask& _task;
|
||||
uint _active_workers;
|
||||
|
||||
@ -315,7 +315,7 @@ class ScavengeRootsTask : public AbstractGangTask {
|
||||
HeapWord* _gen_top;
|
||||
uint _active_workers;
|
||||
bool _is_empty;
|
||||
OWSTTaskTerminator _terminator;
|
||||
TaskTerminator _terminator;
|
||||
|
||||
public:
|
||||
ScavengeRootsTask(PSOldGen* old_gen,
|
||||
@ -732,7 +732,7 @@ bool PSScavenge::invoke_no_policy() {
|
||||
scavenge_exit.ticks());
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
OWSTTaskTerminator::print_termination_counts();
|
||||
TaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
|
||||
AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
|
||||
|
@ -678,7 +678,7 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
}
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
OWSTTaskTerminator::print_termination_counts();
|
||||
TaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
34
src/hotspot/share/gc/shared/owstTaskTerminator.cpp → src/hotspot/share/gc/shared/taskTerminator.cpp
34
src/hotspot/share/gc/shared/owstTaskTerminator.cpp → src/hotspot/share/gc/shared/taskTerminator.cpp
@ -25,26 +25,26 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#include "gc/shared/owstTaskTerminator.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
uint OWSTTaskTerminator::_total_yields = 0;
|
||||
uint OWSTTaskTerminator::_total_spins = 0;
|
||||
uint OWSTTaskTerminator::_total_peeks = 0;
|
||||
uint TaskTerminator::_total_yields = 0;
|
||||
uint TaskTerminator::_total_spins = 0;
|
||||
uint TaskTerminator::_total_peeks = 0;
|
||||
#endif
|
||||
|
||||
OWSTTaskTerminator::OWSTTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
|
||||
TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
|
||||
_n_threads(n_threads),
|
||||
_queue_set(queue_set),
|
||||
_offered_termination(0),
|
||||
_spin_master(NULL) {
|
||||
|
||||
_blocker = new Monitor(Mutex::leaf, "OWSTTaskTerminator", false, Monitor::_safepoint_check_never);
|
||||
_blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never);
|
||||
}
|
||||
|
||||
OWSTTaskTerminator::~OWSTTaskTerminator() {
|
||||
TaskTerminator::~TaskTerminator() {
|
||||
assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition");
|
||||
assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" );
|
||||
|
||||
@ -54,24 +54,24 @@ OWSTTaskTerminator::~OWSTTaskTerminator() {
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool OWSTTaskTerminator::peek_in_queue_set() {
|
||||
bool TaskTerminator::peek_in_queue_set() {
|
||||
return _queue_set->peek();
|
||||
}
|
||||
#endif
|
||||
|
||||
void OWSTTaskTerminator::yield() {
|
||||
void TaskTerminator::yield() {
|
||||
assert(_offered_termination <= _n_threads, "Invariant");
|
||||
os::naked_yield();
|
||||
}
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
void OWSTTaskTerminator::print_termination_counts() {
|
||||
void TaskTerminator::print_termination_counts() {
|
||||
log_trace(gc, task)("TaskTerminator Yields: %u Spins: %u Peeks: %u",
|
||||
total_yields(), total_spins(), total_peeks());
|
||||
}
|
||||
#endif
|
||||
|
||||
void OWSTTaskTerminator::reset_for_reuse() {
|
||||
void TaskTerminator::reset_for_reuse() {
|
||||
if (_offered_termination != 0) {
|
||||
assert(_offered_termination == _n_threads,
|
||||
"Terminator may still be in use");
|
||||
@ -79,20 +79,20 @@ void OWSTTaskTerminator::reset_for_reuse() {
|
||||
}
|
||||
}
|
||||
|
||||
void OWSTTaskTerminator::reset_for_reuse(uint n_threads) {
|
||||
void TaskTerminator::reset_for_reuse(uint n_threads) {
|
||||
reset_for_reuse();
|
||||
_n_threads = n_threads;
|
||||
}
|
||||
|
||||
bool OWSTTaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
|
||||
bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
|
||||
return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
|
||||
}
|
||||
|
||||
size_t OWSTTaskTerminator::tasks_in_queue_set() const {
|
||||
size_t TaskTerminator::tasks_in_queue_set() const {
|
||||
return _queue_set->tasks();
|
||||
}
|
||||
|
||||
bool OWSTTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
assert(_n_threads > 0, "Initialization is incorrect");
|
||||
assert(_offered_termination < _n_threads, "Invariant");
|
||||
assert(_blocker != NULL, "Invariant");
|
||||
@ -155,7 +155,7 @@ bool OWSTTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
}
|
||||
}
|
||||
|
||||
bool OWSTTaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
|
||||
bool TaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
|
||||
uint yield_count = 0;
|
||||
// Number of hard spin loops done since last yield
|
||||
uint hard_spin_count = 0;
|
||||
@ -209,7 +209,7 @@ bool OWSTTaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
log_develop_trace(gc, task)("OWSTTaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
|
||||
log_develop_trace(gc, task)("TaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
|
||||
p2i(Thread::current()), yield_count);
|
||||
yield_count = 0;
|
||||
|
23
src/hotspot/share/gc/shared/owstTaskTerminator.hpp → src/hotspot/share/gc/shared/taskTerminator.hpp
23
src/hotspot/share/gc/shared/owstTaskTerminator.hpp → src/hotspot/share/gc/shared/taskTerminator.hpp
@ -22,8 +22,8 @@
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
#ifndef SHARE_GC_SHARED_OWSTTASKTERMINATOR_HPP
|
||||
#define SHARE_GC_SHARED_OWSTTASKTERMINATOR_HPP
|
||||
#ifndef SHARE_GC_SHARED_TASKTERMINATOR_HPP
|
||||
#define SHARE_GC_SHARED_TASKTERMINATOR_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
@ -36,10 +36,12 @@ class TaskQueueSetSuper;
|
||||
class TerminatorTerminator;
|
||||
|
||||
/*
|
||||
* Provides a task termination protocol. OWST stands for Optimized Work Stealing Threads
|
||||
* Provides a task termination protocol.
|
||||
*
|
||||
* This is an enhanced implementation of Google's work stealing task termination
|
||||
* protocol, which is described in the paper:
|
||||
* This is an enhanced implementation of Google's OWST work stealing task termination
|
||||
* protocol (OWST stands for Optimized Work Stealing Threads).
|
||||
*
|
||||
* It is described in the paper:
|
||||
* "Wessam Hassanein. 2016. Understanding and improving JVM GC work
|
||||
* stealing at the data center scale. In Proceedings of the 2016 ACM
|
||||
* SIGPLAN International Symposium on Memory Management (ISMM 2016). ACM,
|
||||
@ -50,7 +52,7 @@ class TerminatorTerminator;
|
||||
* The intention of above enhancement is to reduce spin-master's latency on detecting new tasks
|
||||
* for stealing and termination condition.
|
||||
*/
|
||||
class OWSTTaskTerminator : public CHeapObj<mtGC> {
|
||||
class TaskTerminator : public CHeapObj<mtGC> {
|
||||
uint _n_threads;
|
||||
TaskQueueSetSuper* _queue_set;
|
||||
|
||||
@ -81,11 +83,11 @@ class OWSTTaskTerminator : public CHeapObj<mtGC> {
|
||||
// Return true if termination condition is detected, otherwise return false
|
||||
bool do_spin_master_work(TerminatorTerminator* terminator);
|
||||
|
||||
NONCOPYABLE(OWSTTaskTerminator);
|
||||
NONCOPYABLE(TaskTerminator);
|
||||
|
||||
public:
|
||||
OWSTTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
|
||||
~OWSTTaskTerminator();
|
||||
TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
|
||||
~TaskTerminator();
|
||||
|
||||
// The current thread has no work, and is ready to terminate if everyone
|
||||
// else is. If returns "true", all threads are terminated. If returns
|
||||
@ -117,5 +119,4 @@ public:
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
#endif // SHARE_GC_SHARED_OWSTTASKTERMINATOR_HPP
|
||||
#endif // SHARE_GC_SHARED_TASKTERMINATOR_HPP
|
@ -24,7 +24,6 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "gc/shared/owstTaskTerminator.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
@ -24,7 +24,8 @@
|
||||
|
||||
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP
|
||||
#define SHARE_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP
|
||||
#include "gc/shared/owstTaskTerminator.hpp"
|
||||
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
@ -340,7 +341,7 @@ public:
|
||||
|
||||
class ShenandoahTaskTerminator : public StackObj {
|
||||
private:
|
||||
OWSTTaskTerminator _terminator;
|
||||
TaskTerminator _terminator;
|
||||
public:
|
||||
ShenandoahTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user