2008-06-05 15:57:56 -07:00
|
|
|
/*
|
2018-04-12 08:25:30 +02:00
|
|
|
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
2008-06-05 15:57:56 -07:00
|
|
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
|
|
*
|
|
|
|
* This code is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 only, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* version 2 for more details (a copy is included in the LICENSE file that
|
|
|
|
* accompanied this code).
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License version
|
|
|
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
2010-05-27 19:08:38 -07:00
|
|
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
|
|
* or visit www.oracle.com if you need additional information or have any
|
|
|
|
* questions.
|
2008-06-05 15:57:56 -07:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2010-11-23 13:22:55 -08:00
|
|
|
#include "precompiled.hpp"
|
2018-04-12 08:25:30 +02:00
|
|
|
#include "gc/g1/g1BarrierSet.hpp"
|
2017-11-06 14:24:31 +01:00
|
|
|
#include "gc/g1/g1ConcurrentRefine.hpp"
|
|
|
|
#include "gc/g1/g1ConcurrentRefineThread.hpp"
|
2016-05-02 12:07:58 -04:00
|
|
|
#include "logging/log.hpp"
|
2017-11-28 21:43:45 +01:00
|
|
|
#include "memory/allocation.inline.hpp"
|
2013-12-09 08:20:45 +01:00
|
|
|
#include "runtime/java.hpp"
|
2016-05-02 12:07:58 -04:00
|
|
|
#include "runtime/thread.hpp"
|
2016-04-18 14:52:31 -04:00
|
|
|
#include "utilities/debug.hpp"
|
|
|
|
#include "utilities/globalDefinitions.hpp"
|
|
|
|
#include "utilities/pair.hpp"
|
|
|
|
#include <math.h>
|
2008-06-05 15:57:56 -07:00
|
|
|
|
2017-11-23 15:51:06 +01:00
|
|
|
G1ConcurrentRefineThread* G1ConcurrentRefineThreadControl::create_refinement_thread(uint worker_id, bool initializing) {
|
|
|
|
G1ConcurrentRefineThread* result = NULL;
|
|
|
|
if (initializing || !InjectGCWorkerCreationFailure) {
|
|
|
|
result = new G1ConcurrentRefineThread(_cr, worker_id);
|
|
|
|
}
|
|
|
|
if (result == NULL || result->osthread() == NULL) {
|
|
|
|
log_warning(gc)("Failed to create refinement thread %u, no more %s",
|
|
|
|
worker_id,
|
|
|
|
result == NULL ? "memory" : "OS threads");
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
G1ConcurrentRefineThreadControl::G1ConcurrentRefineThreadControl() :
|
|
|
|
_cr(NULL),
|
|
|
|
_threads(NULL),
|
|
|
|
_num_max_threads(0)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
G1ConcurrentRefineThreadControl::~G1ConcurrentRefineThreadControl() {
|
|
|
|
for (uint i = 0; i < _num_max_threads; i++) {
|
|
|
|
G1ConcurrentRefineThread* t = _threads[i];
|
|
|
|
if (t != NULL) {
|
|
|
|
delete t;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
FREE_C_HEAP_ARRAY(G1ConcurrentRefineThread*, _threads);
|
|
|
|
}
|
|
|
|
|
|
|
|
jint G1ConcurrentRefineThreadControl::initialize(G1ConcurrentRefine* cr, uint num_max_threads) {
|
|
|
|
assert(cr != NULL, "G1ConcurrentRefine must not be NULL");
|
|
|
|
_cr = cr;
|
|
|
|
_num_max_threads = num_max_threads;
|
|
|
|
|
|
|
|
_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, num_max_threads, mtGC);
|
|
|
|
if (_threads == NULL) {
|
|
|
|
vm_shutdown_during_initialization("Could not allocate thread holder array.");
|
|
|
|
return JNI_ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint i = 0; i < num_max_threads; i++) {
|
|
|
|
if (UseDynamicNumberOfGCThreads && i != 0 /* Always start first thread. */) {
|
|
|
|
_threads[i] = NULL;
|
|
|
|
} else {
|
|
|
|
_threads[i] = create_refinement_thread(i, true);
|
|
|
|
if (_threads[i] == NULL) {
|
|
|
|
vm_shutdown_during_initialization("Could not allocate refinement threads.");
|
|
|
|
return JNI_ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return JNI_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void G1ConcurrentRefineThreadControl::maybe_activate_next(uint cur_worker_id) {
|
|
|
|
assert(cur_worker_id < _num_max_threads,
|
|
|
|
"Activating another thread from %u not allowed since there can be at most %u",
|
|
|
|
cur_worker_id, _num_max_threads);
|
|
|
|
if (cur_worker_id == (_num_max_threads - 1)) {
|
|
|
|
// Already the last thread, there is no more thread to activate.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint worker_id = cur_worker_id + 1;
|
|
|
|
G1ConcurrentRefineThread* thread_to_activate = _threads[worker_id];
|
|
|
|
if (thread_to_activate == NULL) {
|
|
|
|
// Still need to create the thread...
|
|
|
|
_threads[worker_id] = create_refinement_thread(worker_id, false);
|
|
|
|
thread_to_activate = _threads[worker_id];
|
|
|
|
}
|
|
|
|
if (thread_to_activate != NULL && !thread_to_activate->is_active()) {
|
|
|
|
thread_to_activate->activate();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void G1ConcurrentRefineThreadControl::print_on(outputStream* st) const {
|
|
|
|
for (uint i = 0; i < _num_max_threads; ++i) {
|
|
|
|
if (_threads[i] != NULL) {
|
|
|
|
_threads[i]->print_on(st);
|
|
|
|
st->cr();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void G1ConcurrentRefineThreadControl::worker_threads_do(ThreadClosure* tc) {
|
|
|
|
for (uint i = 0; i < _num_max_threads; i++) {
|
|
|
|
if (_threads[i] != NULL) {
|
|
|
|
tc->do_thread(_threads[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void G1ConcurrentRefineThreadControl::stop() {
|
|
|
|
for (uint i = 0; i < _num_max_threads; i++) {
|
|
|
|
if (_threads[i] != NULL) {
|
|
|
|
_threads[i]->stop();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-18 14:52:31 -04:00
|
|
|
// Arbitrary but large limits, to simplify some of the zone calculations.
|
|
|
|
// The general idea is to allow expressions like
|
|
|
|
// MIN2(x OP y, max_XXX_zone)
|
|
|
|
// without needing to check for overflow in "x OP y", because the
|
|
|
|
// ranges for x and y have been restricted.
|
|
|
|
STATIC_ASSERT(sizeof(LP64_ONLY(jint) NOT_LP64(jshort)) <= (sizeof(size_t)/2));
|
|
|
|
const size_t max_yellow_zone = LP64_ONLY(max_jint) NOT_LP64(max_jshort);
|
|
|
|
const size_t max_green_zone = max_yellow_zone / 2;
|
2018-11-28 16:05:48 -05:00
|
|
|
const size_t max_red_zone = INT_MAX; // For dcqs.set_max_completed_buffers.
|
2016-04-18 14:52:31 -04:00
|
|
|
STATIC_ASSERT(max_yellow_zone <= max_red_zone);
|
|
|
|
|
|
|
|
// Range check assertions for green zone values.
|
|
|
|
#define assert_zone_constraints_g(green) \
|
|
|
|
do { \
|
|
|
|
size_t azc_g_green = (green); \
|
|
|
|
assert(azc_g_green <= max_green_zone, \
|
|
|
|
"green exceeds max: " SIZE_FORMAT, azc_g_green); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
// Range check assertions for green and yellow zone values.
|
|
|
|
#define assert_zone_constraints_gy(green, yellow) \
|
|
|
|
do { \
|
|
|
|
size_t azc_gy_green = (green); \
|
|
|
|
size_t azc_gy_yellow = (yellow); \
|
|
|
|
assert_zone_constraints_g(azc_gy_green); \
|
|
|
|
assert(azc_gy_yellow <= max_yellow_zone, \
|
|
|
|
"yellow exceeds max: " SIZE_FORMAT, azc_gy_yellow); \
|
|
|
|
assert(azc_gy_green <= azc_gy_yellow, \
|
|
|
|
"green (" SIZE_FORMAT ") exceeds yellow (" SIZE_FORMAT ")", \
|
|
|
|
azc_gy_green, azc_gy_yellow); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
// Range check assertions for green, yellow, and red zone values.
|
|
|
|
#define assert_zone_constraints_gyr(green, yellow, red) \
|
|
|
|
do { \
|
|
|
|
size_t azc_gyr_green = (green); \
|
|
|
|
size_t azc_gyr_yellow = (yellow); \
|
|
|
|
size_t azc_gyr_red = (red); \
|
|
|
|
assert_zone_constraints_gy(azc_gyr_green, azc_gyr_yellow); \
|
|
|
|
assert(azc_gyr_red <= max_red_zone, \
|
|
|
|
"red exceeds max: " SIZE_FORMAT, azc_gyr_red); \
|
|
|
|
assert(azc_gyr_yellow <= azc_gyr_red, \
|
|
|
|
"yellow (" SIZE_FORMAT ") exceeds red (" SIZE_FORMAT ")", \
|
|
|
|
azc_gyr_yellow, azc_gyr_red); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
// Logging tag sequence for refinement control updates.
|
|
|
|
#define CTRL_TAGS gc, ergo, refine
|
|
|
|
|
|
|
|
// For logging zone values, ensuring consistency of level and tags.
|
|
|
|
#define LOG_ZONES(...) log_debug( CTRL_TAGS )(__VA_ARGS__)
|
|
|
|
|
|
|
|
// Package for pair of refinement thread activation and deactivation
|
|
|
|
// thresholds. The activation and deactivation levels are resp. the first
|
|
|
|
// and second values of the pair.
|
|
|
|
typedef Pair<size_t, size_t> Thresholds;
|
|
|
|
inline size_t activation_level(const Thresholds& t) { return t.first; }
|
|
|
|
inline size_t deactivation_level(const Thresholds& t) { return t.second; }
|
|
|
|
|
|
|
|
static Thresholds calc_thresholds(size_t green_zone,
|
|
|
|
size_t yellow_zone,
|
|
|
|
uint worker_i) {
|
|
|
|
double yellow_size = yellow_zone - green_zone;
|
2017-11-23 15:51:06 +01:00
|
|
|
double step = yellow_size / G1ConcurrentRefine::max_num_threads();
|
2016-04-18 14:52:31 -04:00
|
|
|
if (worker_i == 0) {
|
|
|
|
// Potentially activate worker 0 more aggressively, to keep
|
|
|
|
// available buffers near green_zone value. When yellow_size is
|
|
|
|
// large we don't want to allow a full step to accumulate before
|
|
|
|
// doing any processing, as that might lead to significantly more
|
|
|
|
// than green_zone buffers to be processed by update_rs.
|
|
|
|
step = MIN2(step, ParallelGCThreads / 2.0);
|
|
|
|
}
|
|
|
|
size_t activate_offset = static_cast<size_t>(ceil(step * (worker_i + 1)));
|
|
|
|
size_t deactivate_offset = static_cast<size_t>(floor(step * worker_i));
|
|
|
|
return Thresholds(green_zone + activate_offset,
|
|
|
|
green_zone + deactivate_offset);
|
|
|
|
}
|
|
|
|
|
2017-11-06 14:24:31 +01:00
|
|
|
G1ConcurrentRefine::G1ConcurrentRefine(size_t green_zone,
|
2016-04-18 14:52:31 -04:00
|
|
|
size_t yellow_zone,
|
|
|
|
size_t red_zone,
|
|
|
|
size_t min_yellow_zone_size) :
|
2017-11-23 15:51:06 +01:00
|
|
|
_thread_control(),
|
2016-04-18 14:52:31 -04:00
|
|
|
_green_zone(green_zone),
|
|
|
|
_yellow_zone(yellow_zone),
|
|
|
|
_red_zone(red_zone),
|
2016-05-02 12:07:58 -04:00
|
|
|
_min_yellow_zone_size(min_yellow_zone_size)
|
2008-06-05 15:57:56 -07:00
|
|
|
{
|
2016-04-18 14:52:31 -04:00
|
|
|
assert_zone_constraints_gyr(green_zone, yellow_zone, red_zone);
|
|
|
|
}
|
|
|
|
|
2017-11-23 15:51:06 +01:00
|
|
|
jint G1ConcurrentRefine::initialize() {
|
|
|
|
return _thread_control.initialize(this, max_num_threads());
|
|
|
|
}
|
|
|
|
|
2016-04-18 14:52:31 -04:00
|
|
|
static size_t calc_min_yellow_zone_size() {
|
|
|
|
size_t step = G1ConcRefinementThresholdStep;
|
2017-11-23 15:51:06 +01:00
|
|
|
uint n_workers = G1ConcurrentRefine::max_num_threads();
|
2016-04-18 14:52:31 -04:00
|
|
|
if ((max_yellow_zone / step) < n_workers) {
|
|
|
|
return max_yellow_zone;
|
|
|
|
} else {
|
|
|
|
return step * n_workers;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t calc_init_green_zone() {
|
|
|
|
size_t green = G1ConcRefinementGreenZone;
|
2010-02-23 23:13:23 -05:00
|
|
|
if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
|
2016-04-18 14:52:31 -04:00
|
|
|
green = ParallelGCThreads;
|
2009-12-16 15:12:51 -08:00
|
|
|
}
|
2016-04-18 14:52:31 -04:00
|
|
|
return MIN2(green, max_green_zone);
|
|
|
|
}
|
2009-12-16 15:12:51 -08:00
|
|
|
|
2016-04-18 14:52:31 -04:00
|
|
|
static size_t calc_init_yellow_zone(size_t green, size_t min_size) {
|
|
|
|
size_t config = G1ConcRefinementYellowZone;
|
|
|
|
size_t size = 0;
|
2010-02-23 23:13:23 -05:00
|
|
|
if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
|
2016-04-18 14:52:31 -04:00
|
|
|
size = green * 2;
|
|
|
|
} else if (green < config) {
|
|
|
|
size = config - green;
|
2009-12-16 15:12:51 -08:00
|
|
|
}
|
2016-04-18 14:52:31 -04:00
|
|
|
size = MAX2(size, min_size);
|
|
|
|
size = MIN2(size, max_yellow_zone);
|
|
|
|
return MIN2(green + size, max_yellow_zone);
|
|
|
|
}
|
2009-12-16 15:12:51 -08:00
|
|
|
|
2016-04-18 14:52:31 -04:00
|
|
|
static size_t calc_init_red_zone(size_t green, size_t yellow) {
|
|
|
|
size_t size = yellow - green;
|
|
|
|
if (!FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
|
|
|
|
size_t config = G1ConcRefinementRedZone;
|
|
|
|
if (yellow < config) {
|
|
|
|
size = MAX2(size, config - yellow);
|
|
|
|
}
|
2009-12-16 15:12:51 -08:00
|
|
|
}
|
2016-04-18 14:52:31 -04:00
|
|
|
return MIN2(yellow + size, max_red_zone);
|
2015-09-09 09:19:32 -07:00
|
|
|
}
|
2013-05-09 11:16:39 -07:00
|
|
|
|
2017-11-06 14:24:31 +01:00
|
|
|
G1ConcurrentRefine* G1ConcurrentRefine::create(jint* ecode) {
|
2016-04-18 14:52:31 -04:00
|
|
|
size_t min_yellow_zone_size = calc_min_yellow_zone_size();
|
|
|
|
size_t green_zone = calc_init_green_zone();
|
|
|
|
size_t yellow_zone = calc_init_yellow_zone(green_zone, min_yellow_zone_size);
|
|
|
|
size_t red_zone = calc_init_red_zone(green_zone, yellow_zone);
|
|
|
|
|
|
|
|
LOG_ZONES("Initial Refinement Zones: "
|
|
|
|
"green: " SIZE_FORMAT ", "
|
|
|
|
"yellow: " SIZE_FORMAT ", "
|
|
|
|
"red: " SIZE_FORMAT ", "
|
|
|
|
"min yellow size: " SIZE_FORMAT,
|
|
|
|
green_zone, yellow_zone, red_zone, min_yellow_zone_size);
|
|
|
|
|
2017-11-06 14:24:31 +01:00
|
|
|
G1ConcurrentRefine* cr = new G1ConcurrentRefine(green_zone,
|
|
|
|
yellow_zone,
|
|
|
|
red_zone,
|
|
|
|
min_yellow_zone_size);
|
2016-04-18 14:52:31 -04:00
|
|
|
|
2017-11-06 14:24:31 +01:00
|
|
|
if (cr == NULL) {
|
2015-09-09 09:19:32 -07:00
|
|
|
*ecode = JNI_ENOMEM;
|
2017-11-06 14:24:31 +01:00
|
|
|
vm_shutdown_during_initialization("Could not create G1ConcurrentRefine");
|
2015-09-09 09:19:32 -07:00
|
|
|
return NULL;
|
|
|
|
}
|
2009-12-16 15:12:51 -08:00
|
|
|
|
2017-11-23 15:51:06 +01:00
|
|
|
*ecode = cr->initialize();
|
2017-11-06 14:24:31 +01:00
|
|
|
return cr;
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2017-11-06 14:24:31 +01:00
|
|
|
void G1ConcurrentRefine::stop() {
|
2017-11-23 15:51:06 +01:00
|
|
|
_thread_control.stop();
|
2009-12-16 15:12:51 -08:00
|
|
|
}
|
|
|
|
|
2017-11-06 14:24:31 +01:00
|
|
|
G1ConcurrentRefine::~G1ConcurrentRefine() {
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
|
|
|
|
2017-11-06 14:24:31 +01:00
|
|
|
void G1ConcurrentRefine::threads_do(ThreadClosure *tc) {
|
2017-11-23 15:51:06 +01:00
|
|
|
_thread_control.worker_threads_do(tc);
|
2013-05-28 09:32:06 +02:00
|
|
|
}
|
|
|
|
|
2017-11-23 15:51:06 +01:00
|
|
|
uint G1ConcurrentRefine::max_num_threads() {
|
2014-09-27 15:11:41 +02:00
|
|
|
return G1ConcRefinementThreads;
|
2008-06-05 15:57:56 -07:00
|
|
|
}
|
2009-10-02 16:12:07 -04:00
|
|
|
|
2017-11-06 14:25:18 +01:00
|
|
|
void G1ConcurrentRefine::print_threads_on(outputStream* st) const {
|
2017-11-23 15:51:06 +01:00
|
|
|
_thread_control.print_on(st);
|
2013-05-28 09:32:06 +02:00
|
|
|
}
|
2016-03-11 11:22:56 +01:00
|
|
|
|
2016-04-18 14:52:31 -04:00
|
|
|
static size_t calc_new_green_zone(size_t green,
|
|
|
|
double update_rs_time,
|
|
|
|
size_t update_rs_processed_buffers,
|
|
|
|
double goal_ms) {
|
|
|
|
// Adjust green zone based on whether we're meeting the time goal.
|
|
|
|
// Limit to max_green_zone.
|
|
|
|
const double inc_k = 1.1, dec_k = 0.9;
|
|
|
|
if (update_rs_time > goal_ms) {
|
|
|
|
if (green > 0) {
|
|
|
|
green = static_cast<size_t>(green * dec_k);
|
|
|
|
}
|
|
|
|
} else if (update_rs_time < goal_ms &&
|
|
|
|
update_rs_processed_buffers > green) {
|
|
|
|
green = static_cast<size_t>(MAX2(green * inc_k, green + 1.0));
|
|
|
|
green = MIN2(green, max_green_zone);
|
|
|
|
}
|
|
|
|
return green;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t calc_new_yellow_zone(size_t green, size_t min_yellow_size) {
|
|
|
|
size_t size = green * 2;
|
|
|
|
size = MAX2(size, min_yellow_size);
|
|
|
|
return MIN2(green + size, max_yellow_zone);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t calc_new_red_zone(size_t green, size_t yellow) {
|
|
|
|
return MIN2(yellow + (yellow - green), max_red_zone);
|
|
|
|
}
|
|
|
|
|
2017-11-06 14:24:31 +01:00
|
|
|
void G1ConcurrentRefine::update_zones(double update_rs_time,
|
2016-04-18 14:52:31 -04:00
|
|
|
size_t update_rs_processed_buffers,
|
|
|
|
double goal_ms) {
|
|
|
|
log_trace( CTRL_TAGS )("Updating Refinement Zones: "
|
|
|
|
"update_rs time: %.3fms, "
|
|
|
|
"update_rs buffers: " SIZE_FORMAT ", "
|
|
|
|
"update_rs goal time: %.3fms",
|
|
|
|
update_rs_time,
|
|
|
|
update_rs_processed_buffers,
|
|
|
|
goal_ms);
|
|
|
|
|
|
|
|
_green_zone = calc_new_green_zone(_green_zone,
|
|
|
|
update_rs_time,
|
|
|
|
update_rs_processed_buffers,
|
|
|
|
goal_ms);
|
|
|
|
_yellow_zone = calc_new_yellow_zone(_green_zone, _min_yellow_zone_size);
|
|
|
|
_red_zone = calc_new_red_zone(_green_zone, _yellow_zone);
|
|
|
|
|
|
|
|
assert_zone_constraints_gyr(_green_zone, _yellow_zone, _red_zone);
|
|
|
|
LOG_ZONES("Updated Refinement Zones: "
|
|
|
|
"green: " SIZE_FORMAT ", "
|
|
|
|
"yellow: " SIZE_FORMAT ", "
|
|
|
|
"red: " SIZE_FORMAT,
|
|
|
|
_green_zone, _yellow_zone, _red_zone);
|
|
|
|
}
|
|
|
|
|
2017-11-06 14:24:31 +01:00
|
|
|
void G1ConcurrentRefine::adjust(double update_rs_time,
|
2016-04-18 14:52:31 -04:00
|
|
|
size_t update_rs_processed_buffers,
|
2016-03-11 11:22:56 +01:00
|
|
|
double goal_ms) {
|
2018-04-12 08:25:30 +02:00
|
|
|
DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
|
2016-03-11 11:22:56 +01:00
|
|
|
|
|
|
|
if (G1UseAdaptiveConcRefinement) {
|
2016-04-18 14:52:31 -04:00
|
|
|
update_zones(update_rs_time, update_rs_processed_buffers, goal_ms);
|
2016-03-11 11:22:56 +01:00
|
|
|
|
2016-04-18 14:52:31 -04:00
|
|
|
// Change the barrier params
|
2017-11-23 15:51:06 +01:00
|
|
|
if (max_num_threads() == 0) {
|
2016-04-18 14:52:31 -04:00
|
|
|
// Disable dcqs notification when there are no threads to notify.
|
2018-11-28 16:05:48 -05:00
|
|
|
dcqs.set_process_completed_buffers_threshold(DirtyCardQueueSet::ProcessCompletedBuffersThresholdNever);
|
2016-03-11 11:22:56 +01:00
|
|
|
} else {
|
2016-04-18 14:52:31 -04:00
|
|
|
// Worker 0 is the primary; wakeup is via dcqs notification.
|
|
|
|
STATIC_ASSERT(max_yellow_zone <= INT_MAX);
|
2017-11-23 15:51:06 +01:00
|
|
|
size_t activate = activation_threshold(0);
|
2018-11-28 16:05:48 -05:00
|
|
|
dcqs.set_process_completed_buffers_threshold(activate);
|
2016-03-11 11:22:56 +01:00
|
|
|
}
|
2018-11-28 16:05:48 -05:00
|
|
|
dcqs.set_max_completed_buffers(red_zone());
|
2016-03-11 11:22:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t curr_queue_size = dcqs.completed_buffers_num();
|
2018-11-28 16:05:48 -05:00
|
|
|
if ((dcqs.max_completed_buffers() > 0) &&
|
|
|
|
(curr_queue_size >= yellow_zone())) {
|
|
|
|
dcqs.set_completed_buffers_padding(curr_queue_size);
|
2016-03-11 11:22:56 +01:00
|
|
|
} else {
|
2018-11-28 16:05:48 -05:00
|
|
|
dcqs.set_completed_buffers_padding(0);
|
2016-03-11 11:22:56 +01:00
|
|
|
}
|
|
|
|
dcqs.notify_if_necessary();
|
|
|
|
}
|
2017-11-23 15:51:06 +01:00
|
|
|
|
|
|
|
size_t G1ConcurrentRefine::activation_threshold(uint worker_id) const {
|
|
|
|
Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id);
|
|
|
|
return activation_level(thresholds);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t G1ConcurrentRefine::deactivation_threshold(uint worker_id) const {
|
|
|
|
Thresholds thresholds = calc_thresholds(_green_zone, _yellow_zone, worker_id);
|
|
|
|
return deactivation_level(thresholds);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint G1ConcurrentRefine::worker_id_offset() {
|
|
|
|
return DirtyCardQueueSet::num_par_ids();
|
|
|
|
}
|
|
|
|
|
|
|
|
void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers) {
|
|
|
|
if (num_cur_buffers > activation_threshold(worker_id + 1)) {
|
|
|
|
_thread_control.maybe_activate_next(worker_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool G1ConcurrentRefine::do_refinement_step(uint worker_id) {
|
2018-04-12 08:25:30 +02:00
|
|
|
DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
|
2017-11-23 15:51:06 +01:00
|
|
|
|
|
|
|
size_t curr_buffer_num = dcqs.completed_buffers_num();
|
|
|
|
// If the number of the buffers falls down into the yellow zone,
|
|
|
|
// that means that the transition period after the evacuation pause has ended.
|
|
|
|
// Since the value written to the DCQS is the same for all threads, there is no
|
|
|
|
// need to synchronize.
|
2018-11-28 16:05:48 -05:00
|
|
|
if (dcqs.completed_buffers_padding() > 0 && curr_buffer_num <= yellow_zone()) {
|
|
|
|
dcqs.set_completed_buffers_padding(0);
|
2017-11-23 15:51:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
maybe_activate_more_threads(worker_id, curr_buffer_num);
|
|
|
|
|
|
|
|
// Process the next buffer, if there are enough left.
|
|
|
|
return dcqs.refine_completed_buffer_concurrently(worker_id + worker_id_offset(),
|
|
|
|
deactivation_threshold(worker_id));
|
|
|
|
}
|