8237261: Concurrent refinement activation threshold not updated for card counts

Fix special-case threshold calculation for primary refinement thread

Reviewed-by: tschatzl, sjohanss
This commit is contained in:
Kim Barrett 2020-01-20 14:31:20 -05:00
parent b936939454
commit f779e49590

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -190,6 +190,10 @@ STATIC_ASSERT(max_yellow_zone <= max_red_zone);
// For logging zone values, ensuring consistency of level and tags. // For logging zone values, ensuring consistency of level and tags.
#define LOG_ZONES(...) log_debug( CTRL_TAGS )(__VA_ARGS__) #define LOG_ZONES(...) log_debug( CTRL_TAGS )(__VA_ARGS__)
static size_t buffers_to_cards(size_t value) {
return value * G1UpdateBufferSize;
}
// Package for pair of refinement thread activation and deactivation // Package for pair of refinement thread activation and deactivation
// thresholds. The activation and deactivation levels are resp. the first // thresholds. The activation and deactivation levels are resp. the first
// and second values of the pair. // and second values of the pair.
@ -207,8 +211,9 @@ static Thresholds calc_thresholds(size_t green_zone,
// available buffers near green_zone value. When yellow_size is // available buffers near green_zone value. When yellow_size is
// large we don't want to allow a full step to accumulate before // large we don't want to allow a full step to accumulate before
// doing any processing, as that might lead to significantly more // doing any processing, as that might lead to significantly more
// than green_zone buffers to be processed during scanning. // than green_zone buffers to be processed during pause. So limit
step = MIN2(step, ParallelGCThreads / 2.0); // to an extra half buffer per pause-time processing thread.
step = MIN2(step, buffers_to_cards(ParallelGCThreads) / 2.0);
} }
size_t activate_offset = static_cast<size_t>(ceil(step * (worker_id + 1))); size_t activate_offset = static_cast<size_t>(ceil(step * (worker_id + 1)));
size_t deactivate_offset = static_cast<size_t>(floor(step * worker_id)); size_t deactivate_offset = static_cast<size_t>(floor(step * worker_id));
@ -233,10 +238,6 @@ jint G1ConcurrentRefine::initialize() {
return _thread_control.initialize(this, max_num_threads()); return _thread_control.initialize(this, max_num_threads());
} }
static size_t buffers_to_cards(size_t value) {
return value * G1UpdateBufferSize;
}
static size_t calc_min_yellow_zone_size() { static size_t calc_min_yellow_zone_size() {
size_t step = buffers_to_cards(G1ConcRefinementThresholdStep); size_t step = buffers_to_cards(G1ConcRefinementThresholdStep);
uint n_workers = G1ConcurrentRefine::max_num_threads(); uint n_workers = G1ConcurrentRefine::max_num_threads();
@ -443,8 +444,8 @@ uint G1ConcurrentRefine::worker_id_offset() {
return G1DirtyCardQueueSet::num_par_ids(); return G1DirtyCardQueueSet::num_par_ids();
} }
void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers) { void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_cards) {
if (num_cur_buffers > activation_threshold(worker_id + 1)) { if (num_cur_cards > activation_threshold(worker_id + 1)) {
_thread_control.maybe_activate_next(worker_id); _thread_control.maybe_activate_next(worker_id);
} }
} }