2008-06-05 15:57:56 -07:00
|
|
|
/*
|
2019-01-25 00:27:51 -05:00
|
|
|
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
2008-06-05 15:57:56 -07:00
|
|
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
|
|
*
|
|
|
|
* This code is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 only, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* version 2 for more details (a copy is included in the LICENSE file that
|
|
|
|
* accompanied this code).
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License version
|
|
|
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
2010-05-27 19:08:38 -07:00
|
|
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
|
|
* or visit www.oracle.com if you need additional information or have any
|
|
|
|
* questions.
|
2008-06-05 15:57:56 -07:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2010-11-23 13:22:55 -08:00
|
|
|
#include "precompiled.hpp"
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/g1/dirtyCardQueue.hpp"
|
|
|
|
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
2019-01-25 00:27:51 -05:00
|
|
|
#include "gc/g1/g1FreeIdSet.hpp"
|
2017-07-12 12:26:57 +02:00
|
|
|
#include "gc/g1/g1RemSet.hpp"
|
2018-04-12 08:25:56 +02:00
|
|
|
#include "gc/g1/g1ThreadLocalData.hpp"
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/g1/heapRegionRemSet.hpp"
|
2018-11-13 22:08:44 -08:00
|
|
|
#include "gc/shared/suspendibleThreadSet.hpp"
|
2015-05-13 15:16:06 +02:00
|
|
|
#include "gc/shared/workgroup.hpp"
|
2016-08-21 20:56:37 -04:00
|
|
|
#include "runtime/atomic.hpp"
|
2018-11-28 16:05:48 -05:00
|
|
|
#include "runtime/flags/flagSetting.hpp"
|
2010-11-23 13:22:55 -08:00
|
|
|
#include "runtime/mutexLocker.hpp"
|
|
|
|
#include "runtime/safepoint.hpp"
|
2012-11-27 14:20:21 +01:00
|
|
|
#include "runtime/thread.inline.hpp"
|
2017-11-22 17:54:50 -08:00
|
|
|
#include "runtime/threadSMR.hpp"
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2017-07-12 12:26:57 +02:00
|
|
|
// Closure used for updating remembered sets and recording references that
|
|
|
|
// point into the collection set while the mutator is running.
|
|
|
|
// Assumed to be only executed concurrently with the mutator. Yields via
|
|
|
|
// SuspendibleThreadSet after every card.
|
|
|
|
class G1RefineCardConcurrentlyClosure: public CardTableEntryClosure {
|
|
|
|
public:
|
|
|
|
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
|
|
|
|
G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
|
|
|
|
|
|
|
|
if (SuspendibleThreadSet::should_yield()) {
|
|
|
|
// Caller will actually yield.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Otherwise, we finished successfully; return true.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-11-04 13:09:57 -05:00
|
|
|
DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
|
|
|
|
// Dirty card queues are always active, so we create them with their
|
|
|
|
// active field set to true.
|
|
|
|
PtrQueue(qset, permanent, true /* active */)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
DirtyCardQueue::~DirtyCardQueue() {
|
|
|
|
if (!is_permanent()) {
|
|
|
|
flush();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-16 15:12:51 -08:00
|
|
|
DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
|
|
|
|
PtrQueueSet(notify_when_complete),
|
2015-11-04 13:09:57 -05:00
|
|
|
_shared_dirty_card_queue(this, true /* permanent */),
|
2008-06-05 15:57:56 -07:00
|
|
|
_free_ids(NULL),
|
2018-12-17 11:37:40 +01:00
|
|
|
_processed_buffers_mut(0),
|
|
|
|
_processed_buffers_rs_thread(0),
|
|
|
|
_cur_par_buffer_node(NULL)
|
2008-06-05 15:57:56 -07:00
|
|
|
{
|
|
|
|
_all_active = true;
|
|
|
|
}
|
|
|
|
|
2019-01-25 00:27:51 -05:00
|
|
|
DirtyCardQueueSet::~DirtyCardQueueSet() {
|
|
|
|
delete _free_ids;
|
|
|
|
}
|
|
|
|
|
2009-05-18 11:52:46 -07:00
|
|
|
// Determines how many mutator threads can process the buffers in parallel.
|
2014-04-03 17:49:31 +04:00
|
|
|
uint DirtyCardQueueSet::num_par_ids() {
|
2016-07-26 11:04:20 +02:00
|
|
|
return (uint)os::initial_active_processor_count();
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2017-07-12 12:26:57 +02:00
|
|
|
void DirtyCardQueueSet::initialize(Monitor* cbl_mon,
|
2018-11-15 19:59:10 -05:00
|
|
|
BufferNode::Allocator* allocator,
|
2015-11-04 13:09:57 -05:00
|
|
|
Mutex* lock,
|
2016-01-08 15:41:44 -05:00
|
|
|
bool init_free_ids) {
|
2018-11-28 16:05:48 -05:00
|
|
|
PtrQueueSet::initialize(cbl_mon, allocator);
|
2008-06-05 15:57:56 -07:00
|
|
|
_shared_dirty_card_queue.set_lock(lock);
|
2016-01-08 15:41:44 -05:00
|
|
|
if (init_free_ids) {
|
2019-01-25 00:27:51 -05:00
|
|
|
_free_ids = new G1FreeIdSet(0, num_par_ids());
|
2016-01-08 15:41:44 -05:00
|
|
|
}
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
|
2018-04-12 08:25:56 +02:00
|
|
|
G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2016-03-16 00:28:33 -04:00
|
|
|
bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl,
|
|
|
|
BufferNode* node,
|
|
|
|
bool consume,
|
|
|
|
uint worker_i) {
|
|
|
|
if (cl == NULL) return true;
|
2016-03-25 15:50:31 -04:00
|
|
|
bool result = true;
|
2016-03-16 00:28:33 -04:00
|
|
|
void** buf = BufferNode::make_buffer_from_node(node);
|
2017-05-08 07:16:10 -04:00
|
|
|
size_t i = node->index();
|
|
|
|
size_t limit = buffer_size();
|
2016-03-25 15:50:31 -04:00
|
|
|
for ( ; i < limit; ++i) {
|
2016-03-16 00:28:33 -04:00
|
|
|
jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
|
|
|
|
assert(card_ptr != NULL, "invariant");
|
|
|
|
if (!cl->do_card_ptr(card_ptr, worker_i)) {
|
2016-03-25 15:50:31 -04:00
|
|
|
result = false; // Incomplete processing.
|
|
|
|
break;
|
2016-03-16 00:28:33 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (consume) {
|
2017-05-08 07:16:10 -04:00
|
|
|
assert(i <= buffer_size(), "invariant");
|
|
|
|
node->set_index(i);
|
2016-03-16 00:28:33 -04:00
|
|
|
}
|
2016-03-25 15:50:31 -04:00
|
|
|
return result;
|
2016-03-16 00:28:33 -04:00
|
|
|
}
|
|
|
|
|
2016-03-25 15:50:31 -04:00
|
|
|
#ifndef ASSERT
|
|
|
|
#define assert_fully_consumed(node, buffer_size)
|
|
|
|
#else
|
|
|
|
#define assert_fully_consumed(node, buffer_size) \
|
|
|
|
do { \
|
|
|
|
size_t _afc_index = (node)->index(); \
|
|
|
|
size_t _afc_size = (buffer_size); \
|
|
|
|
assert(_afc_index == _afc_size, \
|
|
|
|
"Buffer was not fully consumed as claimed: index: " \
|
|
|
|
SIZE_FORMAT ", size: " SIZE_FORMAT, \
|
|
|
|
_afc_index, _afc_size); \
|
|
|
|
} while (0)
|
|
|
|
#endif // ASSERT
|
|
|
|
|
2016-03-10 16:21:46 -05:00
|
|
|
bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
|
2016-01-08 15:41:44 -05:00
|
|
|
guarantee(_free_ids != NULL, "must be");
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2016-03-25 15:50:31 -04:00
|
|
|
uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
|
2017-07-12 12:26:57 +02:00
|
|
|
G1RefineCardConcurrentlyClosure cl;
|
|
|
|
bool result = apply_closure_to_buffer(&cl, node, true, worker_i);
|
2016-03-25 15:50:31 -04:00
|
|
|
_free_ids->release_par_id(worker_i); // release the id
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2016-03-25 15:50:31 -04:00
|
|
|
if (result) {
|
|
|
|
assert_fully_consumed(node, buffer_size());
|
2016-01-11 14:26:00 -05:00
|
|
|
Atomic::inc(&_processed_buffers_mut);
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
2016-03-25 15:50:31 -04:00
|
|
|
return result;
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2017-07-12 12:26:57 +02:00
|
|
|
bool DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_i, size_t stop_at) {
|
|
|
|
G1RefineCardConcurrentlyClosure cl;
|
|
|
|
return apply_closure_to_completed_buffer(&cl, worker_i, stop_at, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DirtyCardQueueSet::apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i) {
|
2018-03-03 23:56:08 -05:00
|
|
|
assert_at_safepoint();
|
2017-07-12 12:26:57 +02:00
|
|
|
return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
|
|
|
|
}
|
|
|
|
|
2016-02-17 16:00:27 -05:00
|
|
|
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
|
|
|
|
uint worker_i,
|
2016-02-26 14:02:39 -05:00
|
|
|
size_t stop_at,
|
2016-02-17 16:00:27 -05:00
|
|
|
bool during_pause) {
|
|
|
|
assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
|
|
|
|
BufferNode* nd = get_completed_buffer(stop_at);
|
|
|
|
if (nd == NULL) {
|
|
|
|
return false;
|
|
|
|
} else {
|
2016-03-16 00:28:33 -04:00
|
|
|
if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
|
2016-03-25 15:50:31 -04:00
|
|
|
assert_fully_consumed(nd, buffer_size());
|
2016-02-17 16:00:27 -05:00
|
|
|
// Done with fully processed buffer.
|
2016-03-10 16:21:46 -05:00
|
|
|
deallocate_buffer(nd);
|
2016-02-17 16:00:27 -05:00
|
|
|
Atomic::inc(&_processed_buffers_rs_thread);
|
2008-06-05 15:57:56 -07:00
|
|
|
} else {
|
2016-02-17 16:00:27 -05:00
|
|
|
// Return partially processed buffer to the queue.
|
2016-03-25 15:50:31 -04:00
|
|
|
guarantee(!during_pause, "Should never stop early");
|
2018-12-26 19:24:00 -05:00
|
|
|
enqueue_completed_buffer(nd);
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
2016-03-25 15:50:31 -04:00
|
|
|
return true;
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-16 16:46:58 +02:00
|
|
|
void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
|
|
|
|
BufferNode* nd = _cur_par_buffer_node;
|
|
|
|
while (nd != NULL) {
|
2016-03-10 16:21:46 -05:00
|
|
|
BufferNode* next = nd->next();
|
2017-10-16 22:36:06 -04:00
|
|
|
BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
|
2014-04-16 16:46:58 +02:00
|
|
|
if (actual == nd) {
|
2016-03-16 00:28:33 -04:00
|
|
|
bool b = apply_closure_to_buffer(cl, nd, false);
|
2014-04-16 16:46:58 +02:00
|
|
|
guarantee(b, "Should not stop early.");
|
|
|
|
nd = next;
|
|
|
|
} else {
|
2017-10-16 22:36:06 -04:00
|
|
|
nd = actual;
|
2014-04-16 16:46:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-02 12:51:43 -07:00
|
|
|
void DirtyCardQueueSet::abandon_logs() {
|
|
|
|
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
2018-12-26 19:24:00 -05:00
|
|
|
abandon_completed_buffers();
|
2008-06-05 15:57:56 -07:00
|
|
|
// Since abandon is done only at safepoints, we can safely manipulate
|
|
|
|
// these queues.
|
2017-11-22 17:54:50 -08:00
|
|
|
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
|
2018-04-12 08:25:56 +02:00
|
|
|
G1ThreadLocalData::dirty_card_queue(t).reset();
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
shared_dirty_card_queue()->reset();
|
|
|
|
}
|
|
|
|
|
2016-03-10 16:21:46 -05:00
|
|
|
void DirtyCardQueueSet::concatenate_log(DirtyCardQueue& dcq) {
|
|
|
|
if (!dcq.is_empty()) {
|
2017-03-04 15:56:22 -05:00
|
|
|
dcq.flush();
|
2016-03-10 16:21:46 -05:00
|
|
|
}
|
|
|
|
}
|
2008-06-05 15:57:56 -07:00
|
|
|
|
|
|
|
void DirtyCardQueueSet::concatenate_logs() {
|
|
|
|
// Iterate over all the threads, if we find a partial log add it to
|
|
|
|
// the global list of logs. Temporarily turn off the limit on the number
|
|
|
|
// of outstanding buffers.
|
|
|
|
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
2018-12-26 19:24:00 -05:00
|
|
|
size_t old_limit = max_completed_buffers();
|
|
|
|
set_max_completed_buffers(MaxCompletedBuffersUnlimited);
|
2017-11-22 17:54:50 -08:00
|
|
|
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
|
2018-04-12 08:25:56 +02:00
|
|
|
concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
2016-03-10 16:21:46 -05:00
|
|
|
concatenate_log(_shared_dirty_card_queue);
|
2018-12-26 19:24:00 -05:00
|
|
|
set_max_completed_buffers(old_limit);
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|