2008-06-05 15:57:56 -07:00
|
|
|
/*
|
2019-03-05 19:54:33 -05:00
|
|
|
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
2008-06-05 15:57:56 -07:00
|
|
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
|
|
*
|
|
|
|
* This code is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 only, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* version 2 for more details (a copy is included in the LICENSE file that
|
|
|
|
* accompanied this code).
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License version
|
|
|
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
2010-05-27 19:08:38 -07:00
|
|
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
|
|
* or visit www.oracle.com if you need additional information or have any
|
|
|
|
* questions.
|
2008-06-05 15:57:56 -07:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2010-11-23 13:22:55 -08:00
|
|
|
#include "precompiled.hpp"
|
2018-08-18 13:59:25 -04:00
|
|
|
#include "gc/shared/satbMarkQueue.hpp"
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/shared/collectedHeap.hpp"
|
2018-08-14 00:15:56 -04:00
|
|
|
#include "logging/log.hpp"
|
2010-11-23 13:22:55 -08:00
|
|
|
#include "memory/allocation.inline.hpp"
|
6964458: Reimplement class meta-data storage to use native memory
Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Co-authored-by: Stefan Karlsson <stefan.karlsson@oracle.com>
Co-authored-by: Mikael Gerdin <mikael.gerdin@oracle.com>
Co-authored-by: Tom Rodriguez <tom.rodriguez@oracle.com>
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
2012-09-01 13:25:18 -04:00
|
|
|
#include "oops/oop.inline.hpp"
|
2019-06-26 13:18:38 -04:00
|
|
|
#include "runtime/atomic.hpp"
|
2010-11-23 13:22:55 -08:00
|
|
|
#include "runtime/mutexLocker.hpp"
|
2019-06-26 13:18:38 -04:00
|
|
|
#include "runtime/orderAccess.hpp"
|
2018-08-14 14:58:14 -04:00
|
|
|
#include "runtime/os.hpp"
|
2015-05-01 17:38:12 -04:00
|
|
|
#include "runtime/safepoint.hpp"
|
2010-11-23 13:22:55 -08:00
|
|
|
#include "runtime/thread.hpp"
|
2017-11-22 17:54:50 -08:00
|
|
|
#include "runtime/threadSMR.hpp"
|
2011-09-22 10:57:37 -07:00
|
|
|
#include "runtime/vmThread.hpp"
|
2019-06-26 13:18:38 -04:00
|
|
|
#include "utilities/globalCounter.inline.hpp"
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2019-03-22 15:42:43 -04:00
|
|
|
SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset) :
|
2015-11-04 13:09:57 -05:00
|
|
|
// SATB queues are only active during marking cycles. We create
|
|
|
|
// them with their active field set to false. If a thread is
|
|
|
|
// created during a cycle and its SATB queue needs to be activated
|
|
|
|
// before the thread starts running, we'll need to set its active
|
2018-08-14 13:16:26 -04:00
|
|
|
// field to true. This must be done in the collector-specific
|
2019-03-05 19:54:33 -05:00
|
|
|
// BarrierSet thread attachment protocol.
|
2019-03-22 15:42:43 -04:00
|
|
|
PtrQueue(qset, false /* active */)
|
2015-11-04 13:09:57 -05:00
|
|
|
{ }
|
|
|
|
|
2015-11-06 16:30:40 -05:00
|
|
|
void SATBMarkQueue::flush() {
|
2015-04-15 16:37:57 -04:00
|
|
|
// Filter now to possibly save work later. If filtering empties the
|
|
|
|
// buffer then flush_impl can deallocate the buffer.
|
2012-01-10 18:58:13 -05:00
|
|
|
filter();
|
2014-07-31 11:10:02 +02:00
|
|
|
flush_impl();
|
2012-01-10 18:58:13 -05:00
|
|
|
}
|
|
|
|
|
2018-08-01 19:14:04 -04:00
|
|
|
// This method will first apply filtering to the buffer. If filtering
|
|
|
|
// retains a small enough collection in the buffer, we can continue to
|
|
|
|
// use the buffer as-is, instead of enqueueing and replacing it.
|
2012-01-10 18:58:13 -05:00
|
|
|
|
2019-05-21 19:19:44 -04:00
|
|
|
void SATBMarkQueue::handle_completed_buffer() {
|
2012-01-10 18:58:13 -05:00
|
|
|
// This method should only be called if there is a non-NULL buffer
|
|
|
|
// that is full.
|
2017-05-08 07:16:10 -04:00
|
|
|
assert(index() == 0, "pre-condition");
|
2012-01-10 18:58:13 -05:00
|
|
|
assert(_buf != NULL, "pre-condition");
|
|
|
|
|
|
|
|
filter();
|
|
|
|
|
2019-05-21 19:19:44 -04:00
|
|
|
size_t threshold = satb_qset()->buffer_enqueue_threshold();
|
2018-08-14 13:16:26 -04:00
|
|
|
// Ensure we'll enqueue completely full buffers.
|
|
|
|
assert(threshold > 0, "enqueue threshold = 0");
|
|
|
|
// Ensure we won't enqueue empty buffers.
|
|
|
|
assert(threshold <= capacity(),
|
|
|
|
"enqueue threshold " SIZE_FORMAT " exceeds capacity " SIZE_FORMAT,
|
|
|
|
threshold, capacity());
|
2019-05-21 19:19:44 -04:00
|
|
|
|
|
|
|
if (index() < threshold) {
|
|
|
|
// Buffer is sufficiently full; enqueue and allocate a new one.
|
|
|
|
enqueue_completed_buffer();
|
|
|
|
} // Else continue to accumulate in buffer.
|
2011-01-19 09:35:17 -05:00
|
|
|
}
|
|
|
|
|
2015-11-06 16:30:40 -05:00
|
|
|
void SATBMarkQueue::apply_closure_and_empty(SATBBufferClosure* cl) {
|
2015-05-01 17:38:12 -04:00
|
|
|
assert(SafepointSynchronize::is_at_safepoint(),
|
|
|
|
"SATB queues must only be processed at safepoints");
|
2008-06-05 15:57:56 -07:00
|
|
|
if (_buf != NULL) {
|
2017-05-08 07:16:10 -04:00
|
|
|
cl->do_buffer(&_buf[index()], size());
|
|
|
|
reset();
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-10 18:58:13 -05:00
|
|
|
#ifndef PRODUCT
|
|
|
|
// Helpful for debugging
|
|
|
|
|
2017-05-08 07:16:10 -04:00
|
|
|
static void print_satb_buffer(const char* name,
|
|
|
|
void** buf,
|
|
|
|
size_t index,
|
|
|
|
size_t capacity) {
|
|
|
|
tty->print_cr(" SATB BUFFER [%s] buf: " PTR_FORMAT " index: " SIZE_FORMAT
|
|
|
|
" capacity: " SIZE_FORMAT,
|
|
|
|
name, p2i(buf), index, capacity);
|
2012-01-10 18:58:13 -05:00
|
|
|
}
|
|
|
|
|
2017-05-08 07:16:10 -04:00
|
|
|
void SATBMarkQueue::print(const char* name) {
|
|
|
|
print_satb_buffer(name, _buf, index(), capacity());
|
2012-01-10 18:58:13 -05:00
|
|
|
}
|
2017-05-08 07:16:10 -04:00
|
|
|
|
2012-01-10 18:58:13 -05:00
|
|
|
#endif // PRODUCT
|
|
|
|
|
2008-06-05 15:57:56 -07:00
|
|
|
SATBMarkQueueSet::SATBMarkQueueSet() :
|
2015-04-17 13:49:04 -04:00
|
|
|
PtrQueueSet(),
|
2019-06-26 13:18:38 -04:00
|
|
|
_list(),
|
|
|
|
_count_and_process_flag(0),
|
|
|
|
_process_completed_buffers_threshold(SIZE_MAX),
|
2018-08-14 13:16:26 -04:00
|
|
|
_buffer_enqueue_threshold(0)
|
2018-08-01 19:14:04 -04:00
|
|
|
{}
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2019-06-26 13:18:38 -04:00
|
|
|
SATBMarkQueueSet::~SATBMarkQueueSet() {
|
|
|
|
abandon_completed_buffers();
|
|
|
|
}
|
|
|
|
|
|
|
|
// _count_and_process_flag has flag in least significant bit, count in
|
|
|
|
// remaining bits. _process_completed_buffers_threshold is scaled
|
|
|
|
// accordingly, with the lsbit set, so a _count_and_process_flag value
|
|
|
|
// is directly comparable with the recorded threshold value. The
|
|
|
|
// process flag is set whenever the count exceeds the threshold, and
|
|
|
|
// remains set until the count is reduced to zero.
|
|
|
|
|
|
|
|
// Increment count. If count > threshold, set flag, else maintain flag.
|
|
|
|
static void increment_count(volatile size_t* cfptr, size_t threshold) {
|
|
|
|
size_t old;
|
|
|
|
size_t value = Atomic::load(cfptr);
|
|
|
|
do {
|
|
|
|
old = value;
|
|
|
|
value += 2;
|
|
|
|
assert(value > old, "overflow");
|
|
|
|
if (value > threshold) value |= 1;
|
|
|
|
value = Atomic::cmpxchg(value, cfptr, old);
|
|
|
|
} while (value != old);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decrement count. If count == 0, clear flag, else maintain flag.
|
|
|
|
static void decrement_count(volatile size_t* cfptr) {
|
|
|
|
size_t old;
|
|
|
|
size_t value = Atomic::load(cfptr);
|
|
|
|
do {
|
|
|
|
assert((value >> 1) != 0, "underflow");
|
|
|
|
old = value;
|
|
|
|
value -= 2;
|
|
|
|
if (value <= 1) value = 0;
|
|
|
|
value = Atomic::cmpxchg(value, cfptr, old);
|
|
|
|
} while (value != old);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scale requested threshold to align with count field. If scaling
|
|
|
|
// overflows, just use max value. Set process flag field to make
|
|
|
|
// comparison in increment_count exact.
|
|
|
|
static size_t scale_threshold(size_t value) {
|
|
|
|
size_t scaled_value = value << 1;
|
|
|
|
if ((scaled_value >> 1) != value) {
|
|
|
|
scaled_value = SIZE_MAX;
|
|
|
|
}
|
|
|
|
return scaled_value | 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SATBMarkQueueSet::initialize(BufferNode::Allocator* allocator,
|
2018-11-28 16:05:48 -05:00
|
|
|
size_t process_completed_buffers_threshold,
|
2019-03-05 19:54:33 -05:00
|
|
|
uint buffer_enqueue_threshold_percentage) {
|
2019-06-26 13:18:38 -04:00
|
|
|
PtrQueueSet::initialize(allocator);
|
|
|
|
_process_completed_buffers_threshold =
|
|
|
|
scale_threshold(process_completed_buffers_threshold);
|
2018-08-14 13:16:26 -04:00
|
|
|
assert(buffer_size() != 0, "buffer size not initialized");
|
|
|
|
// Minimum threshold of 1 ensures enqueuing of completely full buffers.
|
|
|
|
size_t size = buffer_size();
|
|
|
|
size_t enqueue_qty = (size * buffer_enqueue_threshold_percentage) / 100;
|
|
|
|
_buffer_enqueue_threshold = MAX2(size - enqueue_qty, (size_t)1);
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2010-03-18 12:14:59 -04:00
|
|
|
#ifdef ASSERT
|
2014-01-10 09:54:25 +01:00
|
|
|
void SATBMarkQueueSet::dump_active_states(bool expected_active) {
|
2016-02-11 08:55:36 +01:00
|
|
|
log_error(gc, verify)("Expected SATB active state: %s", expected_active ? "ACTIVE" : "INACTIVE");
|
|
|
|
log_error(gc, verify)("Actual SATB active states:");
|
|
|
|
log_error(gc, verify)(" Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE");
|
2019-03-05 19:54:33 -05:00
|
|
|
|
|
|
|
class DumpThreadStateClosure : public ThreadClosure {
|
|
|
|
SATBMarkQueueSet* _qset;
|
|
|
|
public:
|
|
|
|
DumpThreadStateClosure(SATBMarkQueueSet* qset) : _qset(qset) {}
|
|
|
|
virtual void do_thread(Thread* t) {
|
|
|
|
SATBMarkQueue& queue = _qset->satb_queue_for_thread(t);
|
|
|
|
log_error(gc, verify)(" Thread \"%s\" queue: %s",
|
|
|
|
t->name(),
|
|
|
|
queue.is_active() ? "ACTIVE" : "INACTIVE");
|
|
|
|
}
|
|
|
|
} closure(this);
|
|
|
|
Threads::threads_do(&closure);
|
2010-03-18 12:14:59 -04:00
|
|
|
}
|
|
|
|
|
2014-01-10 09:54:25 +01:00
|
|
|
void SATBMarkQueueSet::verify_active_states(bool expected_active) {
|
|
|
|
// Verify queue set state
|
|
|
|
if (is_active() != expected_active) {
|
|
|
|
dump_active_states(expected_active);
|
2019-03-05 19:54:33 -05:00
|
|
|
fatal("SATB queue set has an unexpected active state");
|
2014-01-10 09:54:25 +01:00
|
|
|
}
|
2010-03-18 12:14:59 -04:00
|
|
|
|
2014-01-10 09:54:25 +01:00
|
|
|
// Verify thread queue states
|
2019-03-05 19:54:33 -05:00
|
|
|
class VerifyThreadStatesClosure : public ThreadClosure {
|
|
|
|
SATBMarkQueueSet* _qset;
|
|
|
|
bool _expected_active;
|
|
|
|
public:
|
|
|
|
VerifyThreadStatesClosure(SATBMarkQueueSet* qset, bool expected_active) :
|
|
|
|
_qset(qset), _expected_active(expected_active) {}
|
|
|
|
virtual void do_thread(Thread* t) {
|
|
|
|
if (_qset->satb_queue_for_thread(t).is_active() != _expected_active) {
|
|
|
|
_qset->dump_active_states(_expected_active);
|
|
|
|
fatal("Thread SATB queue has an unexpected active state");
|
|
|
|
}
|
2014-01-10 09:54:25 +01:00
|
|
|
}
|
2019-03-05 19:54:33 -05:00
|
|
|
} closure(this, expected_active);
|
|
|
|
Threads::threads_do(&closure);
|
2014-01-10 09:54:25 +01:00
|
|
|
}
|
2010-03-18 12:14:59 -04:00
|
|
|
#endif // ASSERT
|
|
|
|
|
2014-01-10 09:54:25 +01:00
|
|
|
void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active) {
|
|
|
|
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
2010-03-18 12:14:59 -04:00
|
|
|
#ifdef ASSERT
|
2014-01-10 09:54:25 +01:00
|
|
|
verify_active_states(expected_active);
|
2010-03-18 12:14:59 -04:00
|
|
|
#endif // ASSERT
|
2019-04-01 17:11:38 -04:00
|
|
|
// Update the global state, synchronized with threads list management.
|
|
|
|
{
|
2019-04-25 10:56:31 -04:00
|
|
|
MutexLocker ml(NonJavaThreadsList_lock, Mutex::_no_safepoint_check_flag);
|
2019-04-01 17:11:38 -04:00
|
|
|
_all_active = active;
|
|
|
|
}
|
2019-03-05 19:54:33 -05:00
|
|
|
|
|
|
|
class SetThreadActiveClosure : public ThreadClosure {
|
|
|
|
SATBMarkQueueSet* _qset;
|
|
|
|
bool _active;
|
|
|
|
public:
|
|
|
|
SetThreadActiveClosure(SATBMarkQueueSet* qset, bool active) :
|
|
|
|
_qset(qset), _active(active) {}
|
|
|
|
virtual void do_thread(Thread* t) {
|
|
|
|
_qset->satb_queue_for_thread(t).set_active(_active);
|
|
|
|
}
|
|
|
|
} closure(this, active);
|
|
|
|
Threads::threads_do(&closure);
|
2012-01-10 18:58:13 -05:00
|
|
|
}
|
|
|
|
|
2015-05-01 17:38:12 -04:00
|
|
|
bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl) {
|
2018-12-26 19:24:00 -05:00
|
|
|
BufferNode* nd = get_completed_buffer();
|
2008-06-05 15:57:56 -07:00
|
|
|
if (nd != NULL) {
|
2009-12-16 15:12:51 -08:00
|
|
|
void **buf = BufferNode::make_buffer_from_node(nd);
|
2017-05-08 07:16:10 -04:00
|
|
|
size_t index = nd->index();
|
|
|
|
size_t size = buffer_size();
|
2016-03-10 16:21:46 -05:00
|
|
|
assert(index <= size, "invariant");
|
|
|
|
cl->do_buffer(buf + index, size - index);
|
|
|
|
deallocate_buffer(nd);
|
2008-06-05 15:57:56 -07:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-26 13:18:38 -04:00
|
|
|
// SATB buffer life-cycle - Per-thread queues obtain buffers from the
|
|
|
|
// qset's buffer allocator, fill them, and push them onto the qset's
|
|
|
|
// list. The GC concurrently pops buffers from the qset, processes
|
|
|
|
// them, and returns them to the buffer allocator for re-use. Both
|
|
|
|
// the allocator and the qset use lock-free stacks. The ABA problem
|
|
|
|
// is solved by having both allocation pops and GC pops performed
|
|
|
|
// within GlobalCounter critical sections, while the return of buffers
|
|
|
|
// to the allocator performs a GlobalCounter synchronize before
|
|
|
|
// pushing onto the allocator's list.
|
|
|
|
|
|
|
|
void SATBMarkQueueSet::enqueue_completed_buffer(BufferNode* node) {
|
|
|
|
assert(node != NULL, "precondition");
|
|
|
|
// Increment count and update flag appropriately. Done before
|
|
|
|
// pushing buffer so count is always at least the actual number in
|
|
|
|
// the list, and decrement never underflows.
|
|
|
|
increment_count(&_count_and_process_flag, _process_completed_buffers_threshold);
|
|
|
|
_list.push(*node);
|
|
|
|
}
|
|
|
|
|
|
|
|
BufferNode* SATBMarkQueueSet::get_completed_buffer() {
|
|
|
|
BufferNode* node;
|
|
|
|
{
|
|
|
|
GlobalCounter::CriticalSection cs(Thread::current());
|
|
|
|
node = _list.pop();
|
|
|
|
}
|
|
|
|
if (node != NULL) {
|
|
|
|
// Got a buffer so decrement count and update flag appropriately.
|
|
|
|
decrement_count(&_count_and_process_flag);
|
|
|
|
}
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
2012-01-10 18:58:13 -05:00
|
|
|
#ifndef PRODUCT
|
|
|
|
// Helpful for debugging
|
|
|
|
|
|
|
|
#define SATB_PRINTER_BUFFER_SIZE 256
|
|
|
|
|
|
|
|
void SATBMarkQueueSet::print_all(const char* msg) {
|
|
|
|
char buffer[SATB_PRINTER_BUFFER_SIZE];
|
|
|
|
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
|
|
|
|
2015-12-10 14:57:55 +01:00
|
|
|
tty->cr();
|
|
|
|
tty->print_cr("SATB BUFFERS [%s]", msg);
|
2012-01-10 18:58:13 -05:00
|
|
|
|
2019-06-26 13:18:38 -04:00
|
|
|
BufferNode* nd = _list.top();
|
2012-01-10 18:58:13 -05:00
|
|
|
int i = 0;
|
|
|
|
while (nd != NULL) {
|
|
|
|
void** buf = BufferNode::make_buffer_from_node(nd);
|
2018-08-14 14:58:14 -04:00
|
|
|
os::snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
|
2017-05-08 07:16:10 -04:00
|
|
|
print_satb_buffer(buffer, buf, nd->index(), buffer_size());
|
2012-01-10 18:58:13 -05:00
|
|
|
nd = nd->next();
|
|
|
|
i += 1;
|
|
|
|
}
|
|
|
|
|
2019-03-05 19:54:33 -05:00
|
|
|
class PrintThreadClosure : public ThreadClosure {
|
|
|
|
SATBMarkQueueSet* _qset;
|
|
|
|
char* _buffer;
|
2012-01-10 18:58:13 -05:00
|
|
|
|
2019-03-05 19:54:33 -05:00
|
|
|
public:
|
|
|
|
PrintThreadClosure(SATBMarkQueueSet* qset, char* buffer) :
|
|
|
|
_qset(qset), _buffer(buffer) {}
|
|
|
|
|
|
|
|
virtual void do_thread(Thread* t) {
|
|
|
|
os::snprintf(_buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name());
|
|
|
|
_qset->satb_queue_for_thread(t).print(_buffer);
|
|
|
|
}
|
|
|
|
} closure(this, buffer);
|
|
|
|
Threads::threads_do(&closure);
|
2012-01-10 18:58:13 -05:00
|
|
|
|
2015-12-10 14:57:55 +01:00
|
|
|
tty->cr();
|
2012-01-10 18:58:13 -05:00
|
|
|
}
|
|
|
|
#endif // PRODUCT
|
|
|
|
|
2019-06-26 13:18:38 -04:00
|
|
|
void SATBMarkQueueSet::abandon_completed_buffers() {
|
|
|
|
Atomic::store(size_t(0), &_count_and_process_flag);
|
|
|
|
BufferNode* buffers_to_delete = _list.pop_all();
|
|
|
|
while (buffers_to_delete != NULL) {
|
|
|
|
BufferNode* bn = buffers_to_delete;
|
|
|
|
buffers_to_delete = bn->next();
|
|
|
|
bn->set_next(NULL);
|
|
|
|
deallocate_buffer(bn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-06-05 15:57:56 -07:00
|
|
|
void SATBMarkQueueSet::abandon_partial_marking() {
|
|
|
|
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
2019-03-05 19:54:33 -05:00
|
|
|
abandon_completed_buffers();
|
|
|
|
|
|
|
|
class AbandonThreadQueueClosure : public ThreadClosure {
|
|
|
|
SATBMarkQueueSet* _qset;
|
|
|
|
public:
|
|
|
|
AbandonThreadQueueClosure(SATBMarkQueueSet* qset) : _qset(qset) {}
|
|
|
|
virtual void do_thread(Thread* t) {
|
|
|
|
_qset->satb_queue_for_thread(t).reset();
|
|
|
|
}
|
|
|
|
} closure(this);
|
|
|
|
Threads::threads_do(&closure);
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|