2008-06-05 15:57:56 -07:00
|
|
|
/*
|
2019-01-25 00:27:51 -05:00
|
|
|
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
2008-06-05 15:57:56 -07:00
|
|
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
|
|
*
|
|
|
|
* This code is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 only, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* version 2 for more details (a copy is included in the LICENSE file that
|
|
|
|
* accompanied this code).
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License version
|
|
|
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
2010-05-27 19:08:38 -07:00
|
|
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
|
|
* or visit www.oracle.com if you need additional information or have any
|
|
|
|
* questions.
|
2008-06-05 15:57:56 -07:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2010-11-23 13:22:55 -08:00
|
|
|
#include "precompiled.hpp"
|
2019-07-19 16:47:11 -04:00
|
|
|
#include "gc/g1/g1CardTableEntryClosure.hpp"
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
2019-02-13 17:38:14 -05:00
|
|
|
#include "gc/g1/g1DirtyCardQueue.hpp"
|
2019-01-25 00:27:51 -05:00
|
|
|
#include "gc/g1/g1FreeIdSet.hpp"
|
2019-07-19 16:47:11 -04:00
|
|
|
#include "gc/g1/g1RedirtyCardsQueue.hpp"
|
2017-07-12 12:26:57 +02:00
|
|
|
#include "gc/g1/g1RemSet.hpp"
|
2018-04-12 08:25:56 +02:00
|
|
|
#include "gc/g1/g1ThreadLocalData.hpp"
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/g1/heapRegionRemSet.hpp"
|
2018-11-13 22:08:44 -08:00
|
|
|
#include "gc/shared/suspendibleThreadSet.hpp"
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/shared/workgroup.hpp"
|
2016-08-21 20:56:37 -04:00
|
|
|
#include "runtime/atomic.hpp"
|
2018-11-28 16:05:48 -05:00
|
|
|
#include "runtime/flags/flagSetting.hpp"
|
2010-11-23 13:22:55 -08:00
|
|
|
#include "runtime/mutexLocker.hpp"
|
|
|
|
#include "runtime/safepoint.hpp"
|
2012-11-27 14:20:21 +01:00
|
|
|
#include "runtime/thread.inline.hpp"
|
2017-11-22 17:54:50 -08:00
|
|
|
#include "runtime/threadSMR.hpp"
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2017-07-12 12:26:57 +02:00
|
|
|
// Closure used for updating remembered sets and recording references that
|
|
|
|
// point into the collection set while the mutator is running.
|
|
|
|
// Assumed to be only executed concurrently with the mutator. Yields via
|
|
|
|
// SuspendibleThreadSet after every card.
|
2019-02-13 17:38:14 -05:00
|
|
|
class G1RefineCardConcurrentlyClosure: public G1CardTableEntryClosure {
|
2017-07-12 12:26:57 +02:00
|
|
|
public:
|
2019-03-13 21:01:56 +01:00
|
|
|
bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
|
2019-03-04 11:49:16 +01:00
|
|
|
G1CollectedHeap::heap()->rem_set()->refine_card_concurrently(card_ptr, worker_i);
|
2017-07-12 12:26:57 +02:00
|
|
|
|
|
|
|
if (SuspendibleThreadSet::should_yield()) {
|
|
|
|
// Caller will actually yield.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Otherwise, we finished successfully; return true.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-03-22 15:42:43 -04:00
|
|
|
G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset) :
|
2015-11-04 13:09:57 -05:00
|
|
|
// Dirty card queues are always active, so we create them with their
|
|
|
|
// active field set to true.
|
2019-03-22 15:42:43 -04:00
|
|
|
PtrQueue(qset, true /* active */)
|
2015-11-04 13:09:57 -05:00
|
|
|
{ }
|
|
|
|
|
2019-02-13 17:38:14 -05:00
|
|
|
G1DirtyCardQueue::~G1DirtyCardQueue() {
|
2019-03-22 15:42:43 -04:00
|
|
|
flush();
|
2015-11-04 13:09:57 -05:00
|
|
|
}
|
|
|
|
|
2019-05-21 19:19:44 -04:00
|
|
|
void G1DirtyCardQueue::handle_completed_buffer() {
|
|
|
|
assert(_buf != NULL, "precondition");
|
|
|
|
BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
|
|
|
|
G1DirtyCardQueueSet* dcqs = dirty_card_qset();
|
|
|
|
if (dcqs->process_or_enqueue_completed_buffer(node)) {
|
|
|
|
reset(); // Buffer fully processed, reset index.
|
|
|
|
} else {
|
|
|
|
allocate_buffer(); // Buffer enqueued, get a new one.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-13 17:38:14 -05:00
|
|
|
G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool notify_when_complete) :
|
2019-06-26 13:18:38 -04:00
|
|
|
PtrQueueSet(),
|
|
|
|
_cbl_mon(NULL),
|
|
|
|
_completed_buffers_head(NULL),
|
|
|
|
_completed_buffers_tail(NULL),
|
|
|
|
_n_completed_buffers(0),
|
|
|
|
_process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever),
|
|
|
|
_process_completed_buffers(false),
|
|
|
|
_notify_when_complete(notify_when_complete),
|
2019-05-21 19:19:44 -04:00
|
|
|
_max_completed_buffers(MaxCompletedBuffersUnlimited),
|
|
|
|
_completed_buffers_padding(0),
|
2008-06-05 15:57:56 -07:00
|
|
|
_free_ids(NULL),
|
2018-12-17 11:37:40 +01:00
|
|
|
_processed_buffers_mut(0),
|
2019-07-19 16:47:11 -04:00
|
|
|
_processed_buffers_rs_thread(0)
|
2008-06-05 15:57:56 -07:00
|
|
|
{
|
|
|
|
_all_active = true;
|
|
|
|
}
|
|
|
|
|
2019-02-13 17:38:14 -05:00
|
|
|
G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
|
2019-06-26 13:18:38 -04:00
|
|
|
abandon_completed_buffers();
|
2019-01-25 00:27:51 -05:00
|
|
|
delete _free_ids;
|
|
|
|
}
|
|
|
|
|
2009-05-18 11:52:46 -07:00
|
|
|
// Determines how many mutator threads can process the buffers in parallel.
|
2019-02-13 17:38:14 -05:00
|
|
|
uint G1DirtyCardQueueSet::num_par_ids() {
|
2016-07-26 11:04:20 +02:00
|
|
|
return (uint)os::initial_active_processor_count();
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2019-02-13 17:38:14 -05:00
|
|
|
void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
|
|
|
|
BufferNode::Allocator* allocator,
|
|
|
|
bool init_free_ids) {
|
2019-06-26 13:18:38 -04:00
|
|
|
PtrQueueSet::initialize(allocator);
|
|
|
|
assert(_cbl_mon == NULL, "Init order issue?");
|
|
|
|
_cbl_mon = cbl_mon;
|
2016-01-08 15:41:44 -05:00
|
|
|
if (init_free_ids) {
|
2019-01-25 00:27:51 -05:00
|
|
|
_free_ids = new G1FreeIdSet(0, num_par_ids());
|
2016-01-08 15:41:44 -05:00
|
|
|
}
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2019-03-05 19:54:33 -05:00
|
|
|
void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
|
2018-04-12 08:25:56 +02:00
|
|
|
G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2019-06-26 13:18:38 -04:00
|
|
|
void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
|
|
|
|
MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
|
|
|
cbn->set_next(NULL);
|
|
|
|
if (_completed_buffers_tail == NULL) {
|
|
|
|
assert(_completed_buffers_head == NULL, "Well-formedness");
|
|
|
|
_completed_buffers_head = cbn;
|
|
|
|
_completed_buffers_tail = cbn;
|
|
|
|
} else {
|
|
|
|
_completed_buffers_tail->set_next(cbn);
|
|
|
|
_completed_buffers_tail = cbn;
|
|
|
|
}
|
|
|
|
_n_completed_buffers++;
|
|
|
|
|
|
|
|
if (!process_completed_buffers() &&
|
|
|
|
(_n_completed_buffers > process_completed_buffers_threshold())) {
|
|
|
|
set_process_completed_buffers(true);
|
|
|
|
if (_notify_when_complete) {
|
|
|
|
_cbl_mon->notify_all();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert_completed_buffers_list_len_correct_locked();
|
|
|
|
}
|
|
|
|
|
|
|
|
BufferNode* G1DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
|
|
|
|
MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
|
|
|
|
|
|
|
if (_n_completed_buffers <= stop_at) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(_n_completed_buffers > 0, "invariant");
|
|
|
|
assert(_completed_buffers_head != NULL, "invariant");
|
|
|
|
assert(_completed_buffers_tail != NULL, "invariant");
|
|
|
|
|
|
|
|
BufferNode* bn = _completed_buffers_head;
|
|
|
|
_n_completed_buffers--;
|
|
|
|
_completed_buffers_head = bn->next();
|
|
|
|
if (_completed_buffers_head == NULL) {
|
|
|
|
assert(_n_completed_buffers == 0, "invariant");
|
|
|
|
_completed_buffers_tail = NULL;
|
|
|
|
set_process_completed_buffers(false);
|
|
|
|
}
|
|
|
|
assert_completed_buffers_list_len_correct_locked();
|
|
|
|
bn->set_next(NULL);
|
|
|
|
return bn;
|
|
|
|
}
|
|
|
|
|
|
|
|
void G1DirtyCardQueueSet::abandon_completed_buffers() {
|
|
|
|
BufferNode* buffers_to_delete = NULL;
|
|
|
|
{
|
|
|
|
MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
|
|
|
buffers_to_delete = _completed_buffers_head;
|
|
|
|
_completed_buffers_head = NULL;
|
|
|
|
_completed_buffers_tail = NULL;
|
|
|
|
_n_completed_buffers = 0;
|
|
|
|
set_process_completed_buffers(false);
|
|
|
|
}
|
|
|
|
while (buffers_to_delete != NULL) {
|
|
|
|
BufferNode* bn = buffers_to_delete;
|
|
|
|
buffers_to_delete = bn->next();
|
|
|
|
bn->set_next(NULL);
|
|
|
|
deallocate_buffer(bn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void G1DirtyCardQueueSet::notify_if_necessary() {
|
|
|
|
MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
|
|
|
if (_n_completed_buffers > process_completed_buffers_threshold()) {
|
|
|
|
set_process_completed_buffers(true);
|
|
|
|
if (_notify_when_complete)
|
|
|
|
_cbl_mon->notify();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef ASSERT
|
|
|
|
void G1DirtyCardQueueSet::assert_completed_buffers_list_len_correct_locked() {
|
|
|
|
assert_lock_strong(_cbl_mon);
|
|
|
|
size_t n = 0;
|
|
|
|
for (BufferNode* bn = _completed_buffers_head; bn != NULL; bn = bn->next()) {
|
|
|
|
++n;
|
|
|
|
}
|
|
|
|
assert(n == _n_completed_buffers,
|
|
|
|
"Completed buffer length is wrong: counted: " SIZE_FORMAT
|
|
|
|
", expected: " SIZE_FORMAT, n, _n_completed_buffers);
|
|
|
|
}
|
|
|
|
#endif // ASSERT
|
|
|
|
|
|
|
|
// Merge lists of buffers. Notify the processing threads.
|
|
|
|
// The source queue is emptied as a result. The queues
|
|
|
|
// must share the monitor.
|
2019-07-19 16:47:11 -04:00
|
|
|
void G1DirtyCardQueueSet::merge_bufferlists(G1RedirtyCardsQueueSet* src) {
|
|
|
|
assert(allocator() == src->allocator(), "precondition");
|
|
|
|
const G1RedirtyCardsBufferList from = src->take_all_completed_buffers();
|
|
|
|
if (from._head == NULL) return;
|
|
|
|
|
2019-06-26 13:18:38 -04:00
|
|
|
MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
|
|
|
if (_completed_buffers_tail == NULL) {
|
|
|
|
assert(_completed_buffers_head == NULL, "Well-formedness");
|
2019-07-19 16:47:11 -04:00
|
|
|
_completed_buffers_head = from._head;
|
|
|
|
_completed_buffers_tail = from._tail;
|
2019-06-26 13:18:38 -04:00
|
|
|
} else {
|
|
|
|
assert(_completed_buffers_head != NULL, "Well formedness");
|
2019-07-19 16:47:11 -04:00
|
|
|
_completed_buffers_tail->set_next(from._head);
|
|
|
|
_completed_buffers_tail = from._tail;
|
2019-06-26 13:18:38 -04:00
|
|
|
}
|
2019-07-19 16:47:11 -04:00
|
|
|
_n_completed_buffers += from._count;
|
2019-06-26 13:18:38 -04:00
|
|
|
|
|
|
|
assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
|
|
|
|
_completed_buffers_head != NULL && _completed_buffers_tail != NULL,
|
|
|
|
"Sanity");
|
|
|
|
assert_completed_buffers_list_len_correct_locked();
|
|
|
|
}
|
|
|
|
|
2019-02-13 17:38:14 -05:00
|
|
|
bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl,
|
|
|
|
BufferNode* node,
|
|
|
|
uint worker_i) {
|
2016-03-16 00:28:33 -04:00
|
|
|
if (cl == NULL) return true;
|
2016-03-25 15:50:31 -04:00
|
|
|
bool result = true;
|
2016-03-16 00:28:33 -04:00
|
|
|
void** buf = BufferNode::make_buffer_from_node(node);
|
2017-05-08 07:16:10 -04:00
|
|
|
size_t i = node->index();
|
|
|
|
size_t limit = buffer_size();
|
2016-03-25 15:50:31 -04:00
|
|
|
for ( ; i < limit; ++i) {
|
2019-03-13 21:01:56 +01:00
|
|
|
CardTable::CardValue* card_ptr = static_cast<CardTable::CardValue*>(buf[i]);
|
2016-03-16 00:28:33 -04:00
|
|
|
assert(card_ptr != NULL, "invariant");
|
|
|
|
if (!cl->do_card_ptr(card_ptr, worker_i)) {
|
2016-03-25 15:50:31 -04:00
|
|
|
result = false; // Incomplete processing.
|
|
|
|
break;
|
2016-03-16 00:28:33 -04:00
|
|
|
}
|
|
|
|
}
|
2019-07-19 16:47:11 -04:00
|
|
|
assert(i <= buffer_size(), "invariant");
|
|
|
|
node->set_index(i);
|
2016-03-25 15:50:31 -04:00
|
|
|
return result;
|
2016-03-16 00:28:33 -04:00
|
|
|
}
|
|
|
|
|
2016-03-25 15:50:31 -04:00
|
|
|
#ifndef ASSERT
|
|
|
|
#define assert_fully_consumed(node, buffer_size)
|
|
|
|
#else
|
|
|
|
#define assert_fully_consumed(node, buffer_size) \
|
|
|
|
do { \
|
|
|
|
size_t _afc_index = (node)->index(); \
|
|
|
|
size_t _afc_size = (buffer_size); \
|
|
|
|
assert(_afc_index == _afc_size, \
|
|
|
|
"Buffer was not fully consumed as claimed: index: " \
|
|
|
|
SIZE_FORMAT ", size: " SIZE_FORMAT, \
|
|
|
|
_afc_index, _afc_size); \
|
|
|
|
} while (0)
|
|
|
|
#endif // ASSERT
|
|
|
|
|
2019-05-21 19:19:44 -04:00
|
|
|
bool G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) {
|
|
|
|
if (Thread::current()->is_Java_thread()) {
|
|
|
|
// If the number of buffers exceeds the limit, make this Java
|
|
|
|
// thread do the processing itself. We don't lock to access
|
|
|
|
// buffer count or padding; it is fine to be imprecise here. The
|
|
|
|
// add of padding could overflow, which is treated as unlimited.
|
|
|
|
size_t max_buffers = max_completed_buffers();
|
|
|
|
size_t limit = max_buffers + completed_buffers_padding();
|
|
|
|
if ((completed_buffers_num() > limit) && (limit >= max_buffers)) {
|
|
|
|
if (mut_process_buffer(node)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
enqueue_completed_buffer(node);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-02-13 17:38:14 -05:00
|
|
|
bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
|
2016-01-08 15:41:44 -05:00
|
|
|
guarantee(_free_ids != NULL, "must be");
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2016-03-25 15:50:31 -04:00
|
|
|
uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
|
2017-07-12 12:26:57 +02:00
|
|
|
G1RefineCardConcurrentlyClosure cl;
|
2019-07-19 16:47:11 -04:00
|
|
|
bool result = apply_closure_to_buffer(&cl, node, worker_i);
|
2016-03-25 15:50:31 -04:00
|
|
|
_free_ids->release_par_id(worker_i); // release the id
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2016-03-25 15:50:31 -04:00
|
|
|
if (result) {
|
|
|
|
assert_fully_consumed(node, buffer_size());
|
2016-01-11 14:26:00 -05:00
|
|
|
Atomic::inc(&_processed_buffers_mut);
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
2016-03-25 15:50:31 -04:00
|
|
|
return result;
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2019-02-13 17:38:14 -05:00
|
|
|
bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
|
2017-07-12 12:26:57 +02:00
|
|
|
G1RefineCardConcurrentlyClosure cl;
|
|
|
|
return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
|
|
|
|
}
|
|
|
|
|
2019-02-13 17:38:14 -05:00
|
|
|
bool G1DirtyCardQueueSet::apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i) {
|
2018-03-03 23:56:08 -05:00
|
|
|
assert_at_safepoint();
|
2017-07-12 12:26:57 +02:00
|
|
|
return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
|
|
|
|
}
|
|
|
|
|
2019-02-13 17:38:14 -05:00
|
|
|
bool G1DirtyCardQueueSet::apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl,
|
|
|
|
uint worker_i,
|
|
|
|
size_t stop_at,
|
|
|
|
bool during_pause) {
|
2016-02-17 16:00:27 -05:00
|
|
|
assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
|
|
|
|
BufferNode* nd = get_completed_buffer(stop_at);
|
|
|
|
if (nd == NULL) {
|
|
|
|
return false;
|
|
|
|
} else {
|
2019-07-19 16:47:11 -04:00
|
|
|
if (apply_closure_to_buffer(cl, nd, worker_i)) {
|
2016-03-25 15:50:31 -04:00
|
|
|
assert_fully_consumed(nd, buffer_size());
|
2016-02-17 16:00:27 -05:00
|
|
|
// Done with fully processed buffer.
|
2016-03-10 16:21:46 -05:00
|
|
|
deallocate_buffer(nd);
|
2016-02-17 16:00:27 -05:00
|
|
|
Atomic::inc(&_processed_buffers_rs_thread);
|
2008-06-05 15:57:56 -07:00
|
|
|
} else {
|
2016-02-17 16:00:27 -05:00
|
|
|
// Return partially processed buffer to the queue.
|
2016-03-25 15:50:31 -04:00
|
|
|
guarantee(!during_pause, "Should never stop early");
|
2018-12-26 19:24:00 -05:00
|
|
|
enqueue_completed_buffer(nd);
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
2016-03-25 15:50:31 -04:00
|
|
|
return true;
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-13 17:38:14 -05:00
|
|
|
void G1DirtyCardQueueSet::abandon_logs() {
|
2010-08-02 12:51:43 -07:00
|
|
|
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
2018-12-26 19:24:00 -05:00
|
|
|
abandon_completed_buffers();
|
2019-03-05 19:54:33 -05:00
|
|
|
|
2008-06-05 15:57:56 -07:00
|
|
|
// Since abandon is done only at safepoints, we can safely manipulate
|
|
|
|
// these queues.
|
2019-03-05 19:54:33 -05:00
|
|
|
struct AbandonThreadLogClosure : public ThreadClosure {
|
|
|
|
virtual void do_thread(Thread* t) {
|
|
|
|
G1ThreadLocalData::dirty_card_queue(t).reset();
|
|
|
|
}
|
|
|
|
} closure;
|
|
|
|
Threads::threads_do(&closure);
|
|
|
|
|
2019-03-22 15:42:43 -04:00
|
|
|
G1BarrierSet::shared_dirty_card_queue().reset();
|
2016-03-10 16:21:46 -05:00
|
|
|
}
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2019-02-13 17:38:14 -05:00
|
|
|
void G1DirtyCardQueueSet::concatenate_logs() {
|
2008-06-05 15:57:56 -07:00
|
|
|
// Iterate over all the threads, if we find a partial log add it to
|
|
|
|
// the global list of logs. Temporarily turn off the limit on the number
|
|
|
|
// of outstanding buffers.
|
|
|
|
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
2018-12-26 19:24:00 -05:00
|
|
|
size_t old_limit = max_completed_buffers();
|
|
|
|
set_max_completed_buffers(MaxCompletedBuffersUnlimited);
|
2019-03-05 19:54:33 -05:00
|
|
|
|
2019-03-22 15:42:43 -04:00
|
|
|
struct ConcatenateThreadLogClosure : public ThreadClosure {
|
2019-03-05 19:54:33 -05:00
|
|
|
virtual void do_thread(Thread* t) {
|
2019-03-22 15:42:43 -04:00
|
|
|
G1DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(t);
|
|
|
|
if (!dcq.is_empty()) {
|
|
|
|
dcq.flush();
|
|
|
|
}
|
2019-03-05 19:54:33 -05:00
|
|
|
}
|
2019-03-22 15:42:43 -04:00
|
|
|
} closure;
|
2019-03-05 19:54:33 -05:00
|
|
|
Threads::threads_do(&closure);
|
|
|
|
|
2019-03-22 15:42:43 -04:00
|
|
|
G1BarrierSet::shared_dirty_card_queue().flush();
|
2018-12-26 19:24:00 -05:00
|
|
|
set_max_completed_buffers(old_limit);
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|