8306541: Refactor collection set candidate handling to prepare for JDK-8140326

Reviewed-by: iwalulya, ayang
This commit is contained in:
Thomas Schatzl 2023-05-12 15:07:48 +00:00
parent 4b0f4213a5
commit e512a20679
25 changed files with 809 additions and 501 deletions

View File

@ -2621,8 +2621,8 @@ void G1CollectedHeap::set_humongous_stats(uint num_humongous_total, uint num_hum
}
bool G1CollectedHeap::should_sample_collection_set_candidates() const {
G1CollectionSetCandidates* candidates = G1CollectedHeap::heap()->collection_set()->candidates();
return candidates != nullptr && candidates->num_remaining() > 0;
const G1CollectionSetCandidates* candidates = collection_set()->candidates();
return !candidates->is_empty();
}
void G1CollectedHeap::set_collection_set_candidates_stats(G1MonotonicArenaMemoryStats& stats) {

View File

@ -918,6 +918,8 @@ public:
const G1CollectionSet* collection_set() const { return &_collection_set; }
G1CollectionSet* collection_set() { return &_collection_set; }
inline bool is_collection_set_candidate(const HeapRegion* r) const;
SoftRefPolicy* soft_ref_policy() override;
void initialize_serviceability() override;

View File

@ -282,4 +282,9 @@ inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
}
}
inline bool G1CollectedHeap::is_collection_set_candidate(const HeapRegion* r) const {
const G1CollectionSetCandidates* candidates = collection_set()->candidates();
return candidates->contains(r);
}
#endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP

View File

@ -50,22 +50,21 @@ G1GCPhaseTimes* G1CollectionSet::phase_times() {
G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
_g1h(g1h),
_policy(policy),
_candidates(nullptr),
_eden_region_length(0),
_survivor_region_length(0),
_old_region_length(0),
_candidates(),
_collection_set_regions(nullptr),
_collection_set_cur_length(0),
_collection_set_max_length(0),
_num_optional_regions(0),
_eden_region_length(0),
_survivor_region_length(0),
_initial_old_region_length(0),
_optional_old_regions(),
_inc_build_state(Inactive),
_inc_part_start(0) {
}
G1CollectionSet::~G1CollectionSet() {
FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
free_optional_regions();
clear_candidates();
abandon_all_candidates();
}
void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
@ -75,29 +74,27 @@ void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
_eden_region_length = eden_cset_region_length;
_survivor_region_length = survivor_cset_region_length;
assert((size_t) young_region_length() == _collection_set_cur_length,
assert((size_t)young_region_length() == _collection_set_cur_length,
"Young region length %u should match collection set length %u", young_region_length(), _collection_set_cur_length);
_old_region_length = 0;
free_optional_regions();
_initial_old_region_length = 0;
_optional_old_regions.clear();
}
void G1CollectionSet::initialize(uint max_region_length) {
guarantee(_collection_set_regions == nullptr, "Must only initialize once.");
_collection_set_max_length = max_region_length;
_collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
_candidates.initialize(max_region_length);
}
void G1CollectionSet::free_optional_regions() {
_num_optional_regions = 0;
void G1CollectionSet::abandon_all_candidates() {
_candidates.clear();
_initial_old_region_length = 0;
_optional_old_regions.clear();
}
void G1CollectionSet::clear_candidates() {
delete _candidates;
_candidates = nullptr;
}
// Add the heap region at the head of the non-incremental collection set
void G1CollectionSet::add_old_region(HeapRegion* hr) {
assert_at_safepoint_on_vm_thread();
@ -110,21 +107,11 @@ void G1CollectionSet::add_old_region(HeapRegion* hr) {
assert(_collection_set_cur_length < _collection_set_max_length, "Collection set now larger than maximum size.");
_collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
_old_region_length++;
_initial_old_region_length++;
_g1h->old_set_remove(hr);
}
void G1CollectionSet::add_optional_region(HeapRegion* hr) {
assert(hr->is_old(), "the region should be old");
assert(!hr->in_collection_set(), "should not already be in the CSet");
_g1h->register_optional_region_with_region_attr(hr);
hr->set_index_in_opt_cset(_num_optional_regions++);
}
void G1CollectionSet::start_incremental_building() {
assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
assert(_inc_build_state == Inactive, "Precondition");
@ -165,8 +152,7 @@ void G1CollectionSet::par_iterate(HeapRegionClosure* cl,
void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
assert_at_safepoint();
for (uint i = 0; i < _num_optional_regions; i++) {
HeapRegion* r = _candidates->at(i);
for (HeapRegion* r : _optional_old_regions) {
bool result = cl->do_heap_region(r);
guarantee(!result, "Must not cancel iteration");
}
@ -337,24 +323,22 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
if (collector_state()->in_mixed_phase()) {
candidates()->verify();
uint num_initial_old_regions;
uint num_optional_old_regions;
G1CollectionCandidateRegionList initial_old_regions;
assert(_optional_old_regions.length() == 0, "must be");
_policy->calculate_old_collection_set_regions(candidates(),
time_remaining_ms,
num_initial_old_regions,
num_optional_old_regions);
_policy->select_candidates_from_marking(&candidates()->marking_regions(),
time_remaining_ms,
&initial_old_regions,
&_optional_old_regions);
// Prepare initial old regions.
move_candidates_to_collection_set(num_initial_old_regions);
// Prepare optional old regions for evacuation.
uint candidate_idx = candidates()->cur_idx();
for (uint i = 0; i < num_optional_old_regions; i++) {
add_optional_region(candidates()->at(candidate_idx + i));
}
// Move initially selected old regions to collection set directly.
move_candidates_to_collection_set(&initial_old_regions);
// Only prepare selected optional regions for now.
prepare_optional_regions(&_optional_old_regions);
candidates()->verify();
} else {
log_debug(gc, ergo, cset)("No candidates to reclaim.");
}
stop_incremental_building();
@ -365,21 +349,24 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true);
}
void G1CollectionSet::move_candidates_to_collection_set(uint num_old_candidate_regions) {
if (num_old_candidate_regions == 0) {
return;
}
uint candidate_idx = candidates()->cur_idx();
for (uint i = 0; i < num_old_candidate_regions; i++) {
HeapRegion* r = candidates()->at(candidate_idx + i);
// This potentially optional candidate region is going to be an actual collection
// set region. Clear cset marker.
void G1CollectionSet::move_candidates_to_collection_set(G1CollectionCandidateRegionList* regions) {
for (HeapRegion* r : *regions) {
_g1h->clear_region_attr(r);
add_old_region(r);
}
candidates()->remove(num_old_candidate_regions);
candidates()->remove(regions);
}
candidates()->verify();
void G1CollectionSet::prepare_optional_regions(G1CollectionCandidateRegionList* regions){
uint cur_index = 0;
for (HeapRegion* r : *regions) {
assert(r->is_old(), "the region should be old");
assert(!r->in_collection_set(), "should not already be in the CSet");
_g1h->register_optional_region_with_region_attr(r);
r->set_index_in_opt_cset(cur_index++);
}
}
void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
@ -390,26 +377,24 @@ void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_m
bool G1CollectionSet::finalize_optional_for_evacuation(double remaining_pause_time) {
update_incremental_marker();
uint num_selected_regions;
_policy->calculate_optional_collection_set_regions(candidates(),
_num_optional_regions,
G1CollectionCandidateRegionList selected_regions;
_policy->calculate_optional_collection_set_regions(&_optional_old_regions,
remaining_pause_time,
num_selected_regions);
&selected_regions);
move_candidates_to_collection_set(num_selected_regions);
move_candidates_to_collection_set(&selected_regions);
_num_optional_regions -= num_selected_regions;
_optional_old_regions.remove_prefix(&selected_regions);
stop_incremental_building();
_g1h->verify_region_attr_remset_is_tracked();
return num_selected_regions > 0;
return selected_regions.length() > 0;
}
void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* pss) {
for (uint i = 0; i < _num_optional_regions; i++) {
HeapRegion* r = candidates()->at(candidates()->cur_idx() + i);
for (HeapRegion* r : _optional_old_regions) {
pss->record_unused_optional_region(r);
// Clear collection set marker and make sure that the remembered set information
// is correct as we still need it later.
@ -417,7 +402,7 @@ void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* p
_g1h->register_region_with_region_attr(r);
r->clear_index_in_opt_cset();
}
free_optional_regions();
_optional_old_regions.clear();
_g1h->verify_region_attr_remset_is_tracked();
}

View File

@ -25,11 +25,11 @@
#ifndef SHARE_GC_G1_G1COLLECTIONSET_HPP
#define SHARE_GC_G1_G1COLLECTIONSET_HPP
#include "gc/g1/g1CollectionSetCandidates.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
class G1CollectedHeap;
class G1CollectionSetCandidates;
class G1CollectorState;
class G1GCPhaseTimes;
class G1ParScanThreadStateSet;
@ -133,12 +133,8 @@ class G1CollectionSet {
G1CollectedHeap* _g1h;
G1Policy* _policy;
// All old gen collection set candidate regions for the current mixed phase.
G1CollectionSetCandidates* _candidates;
uint _eden_region_length;
uint _survivor_region_length;
uint _old_region_length;
// All old gen collection set candidate regions.
G1CollectionSetCandidates _candidates;
// The actual collection set as a set of region indices.
// All entries in _collection_set_regions below _collection_set_cur_length are
@ -150,11 +146,13 @@ class G1CollectionSet {
volatile uint _collection_set_cur_length;
uint _collection_set_max_length;
uint _eden_region_length;
uint _survivor_region_length;
uint _initial_old_region_length;
// When doing mixed collections we can add old regions to the collection set, which
// will be collected only if there is enough time. We call these optional regions.
// This member records the current number of regions that are of that type that
// correspond to the first x entries in the collection set candidates.
uint _num_optional_regions;
// will be collected only if there is enough time. We call these optional (old) regions.
G1CollectionCandidateRegionList _optional_old_regions;
enum CSetBuildType {
Active, // We are actively building the collection set
@ -172,14 +170,13 @@ class G1CollectionSet {
// Update the incremental collection set information when adding a region.
void add_young_region_common(HeapRegion* hr);
// Add old region "hr" to the collection set.
// Add the given old region to the head of the current collection set.
void add_old_region(HeapRegion* hr);
void free_optional_regions();
// Add old region "hr" to optional collection set.
void add_optional_region(HeapRegion* hr);
void move_candidates_to_collection_set(uint num_regions);
void move_candidates_to_collection_set(G1CollectionCandidateRegionList* regions);
// Prepares old regions in the given set for optional collection later. Does not
// add the region to collection set yet.
void prepare_optional_regions(G1CollectionCandidateRegionList* regions);
// Finalize the young part of the initial collection set. Relabel survivor regions
// as Eden and calculate a prediction on how long the evacuation of all young regions
@ -208,26 +205,25 @@ public:
// Initializes the collection set giving the maximum possible length of the collection set.
void initialize(uint max_region_length);
void clear_candidates();
void abandon_all_candidates();
void set_candidates(G1CollectionSetCandidates* candidates) {
assert(_candidates == nullptr, "Trying to replace collection set candidates.");
_candidates = candidates;
}
G1CollectionSetCandidates* candidates() { return _candidates; }
G1CollectionSetCandidates* candidates() { return &_candidates; }
const G1CollectionSetCandidates* candidates() const { return &_candidates; }
void init_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length);
uint region_length() const { return young_region_length() +
old_region_length(); }
initial_old_region_length(); }
uint young_region_length() const { return eden_region_length() +
survivor_region_length(); }
uint eden_region_length() const { return _eden_region_length; }
uint eden_region_length() const { return _eden_region_length; }
uint survivor_region_length() const { return _survivor_region_length; }
uint old_region_length() const { return _old_region_length; }
uint optional_region_length() const { return _num_optional_regions; }
uint initial_old_region_length() const { return _initial_old_region_length; }
uint optional_region_length() const { return _optional_old_regions.length(); }
bool only_contains_young_regions() const { return (initial_old_region_length() + optional_region_length()) == 0; }
// Reset the contents of the collection set.
void clear();

View File

@ -26,78 +26,245 @@
#include "gc/g1/g1CollectionSetCandidates.hpp"
#include "gc/g1/g1CollectionSetChooser.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/growableArray.hpp"
void G1CollectionSetCandidates::remove(uint num_regions) {
assert(num_regions <= num_remaining(), "Trying to remove more regions (%u) than available (%u)", num_regions, num_remaining());
for (uint i = 0; i < num_regions; i++) {
_remaining_reclaimable_bytes -= at(_front_idx)->reclaimable_bytes();
_front_idx++;
}
G1CollectionCandidateList::G1CollectionCandidateList() : _candidates(2, mtGC) { }
void G1CollectionCandidateList::set(G1CollectionCandidateList::CandidateInfo* candidate_infos, uint num_infos) {
assert(_candidates.is_empty(), "must be");
GrowableArrayFromArray<G1CollectionCandidateList::CandidateInfo> a(candidate_infos, (int)num_infos);
_candidates.appendAll(&a);
}
void G1CollectionSetCandidates::remove_from_end(uint num_remove, size_t wasted) {
assert(num_remove <= num_remaining(), "trying to remove more regions than remaining");
void G1CollectionCandidateList::remove(G1CollectionCandidateRegionList* other) {
guarantee((uint)_candidates.length() >= other->length(), "must be");
#ifdef ASSERT
size_t reclaimable = 0;
for (uint i = 0; i < num_remove; i++) {
uint cur_idx = _num_regions - i - 1;
reclaimable += at(cur_idx)->reclaimable_bytes();
// Make sure we crash if we access it.
_regions[cur_idx] = nullptr;
if (other->length() == 0) {
// Nothing to remove or nothing in the original set.
return;
}
assert(reclaimable == wasted, "Recalculated reclaimable inconsistent");
#endif
_num_regions -= num_remove;
_remaining_reclaimable_bytes -= wasted;
}
// Create a list from scratch, copying over the elements from the candidate
// list not in the other list. Finally deallocate and overwrite the old list.
int new_length = _candidates.length() - other->length();
GrowableArray<CandidateInfo> new_list(new_length, mtGC);
void G1CollectionSetCandidates::iterate(HeapRegionClosure* cl) {
for (uint i = _front_idx; i < _num_regions; i++) {
HeapRegion* r = _regions[i];
if (cl->do_heap_region(r)) {
cl->set_incomplete();
break;
uint other_idx = 0;
for (uint candidate_idx = 0; candidate_idx < (uint)_candidates.length(); candidate_idx++) {
if ((other_idx == other->length()) || _candidates.at(candidate_idx)._r != other->at(other_idx)) {
new_list.append(_candidates.at(candidate_idx));
} else {
other_idx++;
}
}
_candidates.swap(&new_list);
verify();
assert(_candidates.length() == new_length, "must be %u %u", _candidates.length(), new_length);
}
void G1CollectionSetCandidates::iterate_backwards(HeapRegionClosure* cl) {
for (uint i = _num_regions; i > _front_idx; i--) {
HeapRegion* r = _regions[i - 1];
if (cl->do_heap_region(r)) {
cl->set_incomplete();
break;
}
}
void G1CollectionCandidateList::clear() {
_candidates.clear();
}
#ifndef PRODUCT
void G1CollectionSetCandidates::verify() const {
guarantee(_front_idx <= _num_regions, "Index: %u Num_regions: %u", _front_idx, _num_regions);
uint idx = _front_idx;
size_t sum_of_reclaimable_bytes = 0;
HeapRegion *prev = nullptr;
for (; idx < _num_regions; idx++) {
HeapRegion *cur = _regions[idx];
guarantee(cur != nullptr, "Regions after _front_idx %u cannot be NULL but %u is", _front_idx, idx);
// Currently the decision whether young gc moves region contents is determined
// at region allocation time. It is not possible that a region becomes non-movable
// at a later point, which means below condition always holds true.
guarantee(G1CollectionSetChooser::should_add(cur),
"Region %u should be eligible for addition.", cur->hrm_index());
if (prev != nullptr) {
guarantee(prev->gc_efficiency() >= cur->gc_efficiency(),
"GC efficiency for region %u: %1.4f smaller than for region %u: %1.4f",
prev->hrm_index(), prev->gc_efficiency(), cur->hrm_index(), cur->gc_efficiency());
}
sum_of_reclaimable_bytes += cur->reclaimable_bytes();
prev = cur;
void G1CollectionCandidateList::verify() {
CandidateInfo* prev = nullptr;
for (uint i = 0; i < (uint)_candidates.length(); i++) {
CandidateInfo& ci = _candidates.at(i);
assert(prev == nullptr || prev->_gc_efficiency >= ci._gc_efficiency,
"Stored gc efficiency must be descending from region %u to %u",
prev->_r->hrm_index(), ci._r->hrm_index());
prev = &ci;
assert(ci._r->rem_set()->is_tracked(), "remset for region %u must be tracked", ci._r->hrm_index());
}
guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes,
"Inconsistent remaining_reclaimable bytes, remaining " SIZE_FORMAT " calculated " SIZE_FORMAT,
_remaining_reclaimable_bytes, sum_of_reclaimable_bytes);
}
#endif // !PRODUCT
#endif
int G1CollectionCandidateList::compare(CandidateInfo* ci1, CandidateInfo* ci2) {
// Make sure that null entries are moved to the end.
if (ci1->_r == nullptr) {
if (ci2->_r == nullptr) {
return 0;
} else {
return 1;
}
} else if (ci2->_r == nullptr) {
return -1;
}
double gc_eff1 = ci1->_gc_efficiency;
double gc_eff2 = ci2->_gc_efficiency;
if (gc_eff1 > gc_eff2) {
return -1;
} if (gc_eff1 < gc_eff2) {
return 1;
} else {
return 0;
}
}
G1CollectionCandidateRegionList::G1CollectionCandidateRegionList() : _regions(2, mtGC) { }
void G1CollectionCandidateRegionList::append(HeapRegion* r) {
assert(!_regions.contains(r), "must be");
_regions.append(r);
}
void G1CollectionCandidateRegionList::remove_prefix(G1CollectionCandidateRegionList* other) {
#ifdef ASSERT
// Check that the given list is a prefix of this list.
int i = 0;
for (HeapRegion* r : *other) {
assert(_regions.at(i) == r, "must be in order, but element %d is not", i);
i++;
}
#endif
if (other->length() == 0) {
return;
}
_regions.remove_till(other->length());
}
HeapRegion* G1CollectionCandidateRegionList::at(uint index) {
return _regions.at(index);
}
void G1CollectionCandidateRegionList::clear() {
_regions.clear();
}
G1CollectionSetCandidates::G1CollectionSetCandidates() :
_marking_regions(),
_contains_map(nullptr),
_max_regions(0),
_last_marking_candidates_length(0)
{ }
G1CollectionSetCandidates::~G1CollectionSetCandidates() {
FREE_C_HEAP_ARRAY(CandidateOrigin, _contains_map);
}
bool G1CollectionSetCandidates::is_from_marking(HeapRegion* r) const {
assert(contains(r), "must be");
return _contains_map[r->hrm_index()] == CandidateOrigin::Marking;
}
void G1CollectionSetCandidates::initialize(uint max_regions) {
assert(_contains_map == nullptr, "already initialized");
_max_regions = max_regions;
_contains_map = NEW_C_HEAP_ARRAY(CandidateOrigin, max_regions, mtGC);
clear();
}
void G1CollectionSetCandidates::clear() {
_marking_regions.clear();
for (uint i = 0; i < _max_regions; i++) {
_contains_map[i] = CandidateOrigin::Invalid;
}
_last_marking_candidates_length = 0;
}
void G1CollectionSetCandidates::set_candidates_from_marking(G1CollectionCandidateList::CandidateInfo* candidate_infos,
uint num_infos) {
assert(_marking_regions.length() == 0, "must be empty before adding new ones");
verify();
_marking_regions.set(candidate_infos, num_infos);
for (uint i = 0; i < num_infos; i++) {
HeapRegion* r = candidate_infos[i]._r;
assert(!contains(r), "must not contain region %u", r->hrm_index());
_contains_map[r->hrm_index()] = CandidateOrigin::Marking;
}
_last_marking_candidates_length = num_infos;
verify();
}
void G1CollectionSetCandidates::remove(G1CollectionCandidateRegionList* other) {
_marking_regions.remove(other);
for (HeapRegion* r : *other) {
assert(contains(r), "must contain region %u", r->hrm_index());
_contains_map[r->hrm_index()] = CandidateOrigin::Invalid;
}
verify();
}
bool G1CollectionSetCandidates::is_empty() const {
return length() == 0;
}
bool G1CollectionSetCandidates::has_more_marking_candidates() const {
return _marking_regions.length() != 0;
}
#ifndef PRODUCT
void G1CollectionSetCandidates::verify_helper(G1CollectionCandidateList* list, uint& from_marking, CandidateOrigin* verify_map) {
list->verify();
for (uint i = 0; i < (uint)list->length(); i++) {
HeapRegion* r = list->at(i)._r;
if (is_from_marking(r)) {
from_marking++;
}
const uint hrm_index = r->hrm_index();
assert(_contains_map[hrm_index] == CandidateOrigin::Marking,
"must be %u is %u", hrm_index, (uint)_contains_map[hrm_index]);
assert(verify_map[hrm_index] == CandidateOrigin::Invalid, "already added");
verify_map[hrm_index] = CandidateOrigin::Verify;
}
}
void G1CollectionSetCandidates::verify() {
uint from_marking = 0;
CandidateOrigin* verify_map = NEW_C_HEAP_ARRAY(CandidateOrigin, _max_regions, mtGC);
for (uint i = 0; i < _max_regions; i++) {
verify_map[i] = CandidateOrigin::Invalid;
}
verify_helper(&_marking_regions, from_marking, verify_map);
assert(from_marking == marking_regions_length(), "must be");
// Check whether the _contains_map is consistent with the list.
for (uint i = 0; i < _max_regions; i++) {
assert(_contains_map[i] == verify_map[i] ||
(_contains_map[i] != CandidateOrigin::Invalid && verify_map[i] == CandidateOrigin::Verify),
"Candidate origin does not match for region %u, is %u but should be %u",
i,
static_cast<std::underlying_type<CandidateOrigin>::type>(_contains_map[i]),
static_cast<std::underlying_type<CandidateOrigin>::type>(verify_map[i]));
}
FREE_C_HEAP_ARRAY(CandidateOrigin, verify_map);
}
#endif
bool G1CollectionSetCandidates::contains(const HeapRegion* r) const {
const uint index = r->hrm_index();
assert(index < _max_regions, "must be");
return _contains_map[index] != CandidateOrigin::Invalid;
}
const char* G1CollectionSetCandidates::get_short_type_str(const HeapRegion* r) const {
static const char* type_strings[] = {
"Ci", // Invalid
"Cm", // Marking
"Cv" // Verification
};
uint8_t kind = static_cast<std::underlying_type<CandidateOrigin>::type>(_contains_map[r->hrm_index()]);
return type_strings[kind];
}

View File

@ -29,72 +29,200 @@
#include "gc/shared/workerThread.hpp"
#include "memory/allocation.hpp"
#include "runtime/globals.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/growableArray.hpp"
class G1CollectionCandidateList;
class G1CollectionSetCandidates;
class HeapRegion;
class HeapRegionClosure;
// Set of collection set candidates, i.e. all old gen regions we consider worth
// collecting in the remainder of the current mixed phase. Regions are sorted by decreasing
// gc efficiency.
// Maintains a cursor into the list that specifies the next collection set candidate
// to put into the current collection set.
class G1CollectionSetCandidates : public CHeapObj<mtGC> {
HeapRegion** _regions;
uint _num_regions; // Total number of regions in the collection set candidate set.
using G1CollectionCandidateRegionListIterator = GrowableArrayIterator<HeapRegion*>;
// The sum of bytes that can be reclaimed in the remaining set of collection
// set candidates.
size_t _remaining_reclaimable_bytes;
// The index of the next candidate old region to be considered for
// addition to the current collection set.
uint _front_idx;
// A set of HeapRegion*, a thin wrapper around GrowableArray.
class G1CollectionCandidateRegionList {
GrowableArray<HeapRegion*> _regions;
public:
G1CollectionSetCandidates(HeapRegion** regions, uint num_regions, size_t remaining_reclaimable_bytes) :
_regions(regions),
_num_regions(num_regions),
_remaining_reclaimable_bytes(remaining_reclaimable_bytes),
_front_idx(0) { }
G1CollectionCandidateRegionList();
~G1CollectionSetCandidates() {
FREE_C_HEAP_ARRAY(HeapRegion*, _regions);
// Append a HeapRegion to the end of this list. The region must not be in the list
// already.
void append(HeapRegion* r);
// Remove the given list of HeapRegion* from this list. The given list must be a prefix
// of this list.
void remove_prefix(G1CollectionCandidateRegionList* list);
// Empty contents of the list.
void clear();
HeapRegion* at(uint index);
uint length() const { return (uint)_regions.length(); }
G1CollectionCandidateRegionListIterator begin() const { return _regions.begin(); }
G1CollectionCandidateRegionListIterator end() const { return _regions.end(); }
};
class G1CollectionCandidateListIterator : public StackObj {
G1CollectionCandidateList* _which;
uint _position;
public:
G1CollectionCandidateListIterator(G1CollectionCandidateList* which, uint position);
G1CollectionCandidateListIterator& operator++();
HeapRegion* operator*();
bool operator==(const G1CollectionCandidateListIterator& rhs);
bool operator!=(const G1CollectionCandidateListIterator& rhs);
};
// List of collection set candidates (regions with their efficiency) ordered by
// decreasing gc efficiency.
class G1CollectionCandidateList : public CHeapObj<mtGC> {
friend class G1CollectionCandidateListIterator;
public:
struct CandidateInfo {
HeapRegion* _r;
double _gc_efficiency;
CandidateInfo() : CandidateInfo(nullptr, 0.0) { }
CandidateInfo(HeapRegion* r, double gc_efficiency) : _r(r), _gc_efficiency(gc_efficiency) { }
};
private:
GrowableArray<CandidateInfo> _candidates;
public:
G1CollectionCandidateList();
// Put the given set of candidates into this list, preserving the efficiency ordering.
void set(CandidateInfo* candidate_infos, uint num_infos);
// Removes any HeapRegions stored in this list also in the other list. The other
// list may only contain regions in this list, sorted by gc efficiency. It need
// not be a prefix of this list. Returns the number of regions removed.
// E.g. if this list is "A B G H", the other list may be "A G H", but not "F" (not in
// this list) or "A H G" (wrong order).
void remove(G1CollectionCandidateRegionList* other);
void clear();
CandidateInfo& at(uint position) { return _candidates.at(position); }
uint length() const { return (uint)_candidates.length(); }
void verify() PRODUCT_RETURN;
// Comparison function to order regions in decreasing GC efficiency order. This
// will cause regions with a lot of live objects and large remembered sets to end
// up at the end of the list.
static int compare(CandidateInfo* ci1, CandidateInfo* ci2);
G1CollectionCandidateListIterator begin() {
return G1CollectionCandidateListIterator(this, 0);
}
// Returns the total number of collection set candidate old regions added.
uint num_regions() { return _num_regions; }
G1CollectionCandidateListIterator end() {
return G1CollectionCandidateListIterator(this, length());
}
};
uint cur_idx() const { return _front_idx; }
// Iterator for G1CollectionSetCandidates.
class G1CollectionSetCandidatesIterator : public StackObj {
G1CollectionSetCandidates* _which;
uint _marking_position;
HeapRegion* at(uint idx) const {
HeapRegion* res = nullptr;
if (idx < _num_regions) {
res = _regions[idx];
assert(res != nullptr, "Unexpected null HeapRegion at index %u", idx);
}
return res;
public:
G1CollectionSetCandidatesIterator(G1CollectionSetCandidates* which, uint marking_position);
G1CollectionSetCandidatesIterator& operator++();
HeapRegion* operator*();
bool operator==(const G1CollectionSetCandidatesIterator& rhs);
bool operator!=(const G1CollectionSetCandidatesIterator& rhs);
};
// Tracks all collection set candidates, i.e. regions that could/should be evacuated soon.
//
// These candidate regions are tracked in a list of regions, sorted by decreasing
// "gc efficiency".
//
// Currently there is only one type of such regions:
//
// * marking_regions: the set of regions selected by concurrent marking to be
// evacuated to keep overall heap occupancy stable.
// They are guaranteed to be evacuated and cleared out during
// the mixed phase.
//
class G1CollectionSetCandidates : public CHeapObj<mtGC> {
friend class G1CollectionSetCandidatesIterator;
enum class CandidateOrigin : uint8_t {
Invalid,
Marking, // This region has been determined as candidate by concurrent marking.
Verify // Special value for verification.
};
G1CollectionCandidateList _marking_regions;
CandidateOrigin* _contains_map;
uint _max_regions;
// The number of regions from the last merge of candidates from the marking.
uint _last_marking_candidates_length;
bool is_from_marking(HeapRegion* r) const;
public:
G1CollectionSetCandidates();
~G1CollectionSetCandidates();
G1CollectionCandidateList& marking_regions() { return _marking_regions; }
void initialize(uint max_regions);
void clear();
// Merge collection set candidates from marking into the current marking list
// (which needs to be empty).
void set_candidates_from_marking(G1CollectionCandidateList::CandidateInfo* candidate_infos,
uint num_infos);
// The most recent length of the list that had been merged last via
// set_candidates_from_marking(). Used for calculating minimum collection set
// regions.
uint last_marking_candidates_length() const { return _last_marking_candidates_length; }
// Remove the given regions from the candidates. All given regions must be part
// of the candidates.
void remove(G1CollectionCandidateRegionList* other);
bool contains(const HeapRegion* r) const;
const char* get_short_type_str(const HeapRegion* r) const;
bool is_empty() const;
bool has_more_marking_candidates() const;
uint marking_regions_length() const { return _marking_regions.length(); }
private:
void verify_helper(G1CollectionCandidateList* list, uint& from_marking, CandidateOrigin* verify_map) PRODUCT_RETURN;
public:
void verify() PRODUCT_RETURN;
uint length() const { return marking_regions_length(); }
// Iteration
G1CollectionSetCandidatesIterator begin() {
return G1CollectionSetCandidatesIterator(this, 0);
}
// Remove num_regions from the front of the collection set candidate list.
void remove(uint num_regions);
// Remove num_remove regions from the back of the collection set candidate list.
void remove_from_end(uint num_remove, size_t wasted);
// Iterate over all remaining collection set candidate regions.
void iterate(HeapRegionClosure* cl);
// Iterate over all remaining collection set candidate regions from the end
// to the beginning of the set.
void iterate_backwards(HeapRegionClosure* cl);
// Return the number of candidate regions remaining.
uint num_remaining() { return _num_regions - _front_idx; }
bool is_empty() { return num_remaining() == 0; }
// Return the amount of reclaimable bytes that may be collected by the remaining
// candidate regions.
size_t remaining_reclaimable_bytes() { return _remaining_reclaimable_bytes; }
void verify() const PRODUCT_RETURN;
G1CollectionSetCandidatesIterator end() {
return G1CollectionSetCandidatesIterator(this, marking_regions_length());
}
};
#endif /* SHARE_GC_G1_G1COLLECTIONSETCANDIDATES_HPP */

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1COLLECTIONSETCANDIDATES_INLINE_HPP
#define SHARE_GC_G1_G1COLLECTIONSETCANDIDATES_INLINE_HPP
#include "gc/g1/g1CollectionSetCandidates.hpp"
#include "utilities/growableArray.hpp"
inline G1CollectionCandidateListIterator::G1CollectionCandidateListIterator(G1CollectionCandidateList* which, uint position) :
_which(which), _position(position) { }
inline G1CollectionCandidateListIterator& G1CollectionCandidateListIterator::operator++() {
assert(_position < _which->length(), "must be");
_position++;
return *this;
}
inline HeapRegion* G1CollectionCandidateListIterator::operator*() {
return _which->_candidates.at(_position)._r;
}
inline bool G1CollectionCandidateListIterator::operator==(const G1CollectionCandidateListIterator& rhs) {
assert(_which == rhs._which, "iterator belongs to different array");
return _position == rhs._position;
}
inline bool G1CollectionCandidateListIterator::operator!=(const G1CollectionCandidateListIterator& rhs) {
return !(*this == rhs);
}
inline G1CollectionSetCandidatesIterator::G1CollectionSetCandidatesIterator(G1CollectionSetCandidates* which, uint marking_position) :
_which(which), _marking_position(marking_position) {
}
inline G1CollectionSetCandidatesIterator& G1CollectionSetCandidatesIterator::operator++() {
assert(_marking_position < _which->_marking_regions.length(),
"must not be at end already");
_marking_position++;
return *this;
}
inline HeapRegion* G1CollectionSetCandidatesIterator::operator*() {
return _which->_marking_regions.at(_marking_position)._r;
}
inline bool G1CollectionSetCandidatesIterator::operator==(const G1CollectionSetCandidatesIterator& rhs) {
assert(_which == rhs._which, "iterator belongs to different array");
return _marking_position == rhs._marking_position;
}
inline bool G1CollectionSetCandidatesIterator::operator!=(const G1CollectionSetCandidatesIterator& rhs) {
return !(*this == rhs);
}
#endif /* SHARE_GC_G1_G1COLLECTIONSETCANDIDATES_INLINE_HPP */

View File

@ -31,37 +31,6 @@
#include "runtime/atomic.hpp"
#include "utilities/quickSort.hpp"
// Order regions according to GC efficiency. This will cause regions with a lot
// of live objects and large remembered sets to end up at the end of the array.
// Given that we might skip collecting the last few old regions, if after a few
// mixed GCs the remaining have reclaimable bytes under a certain threshold, the
// hope is that the ones we'll skip are ones with both large remembered sets and
// a lot of live objects, not the ones with just a lot of live objects if we
// ordered according to the amount of reclaimable bytes per region.
static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
// Make sure that null entries are moved to the end.
if (hr1 == nullptr) {
if (hr2 == nullptr) {
return 0;
} else {
return 1;
}
} else if (hr2 == nullptr) {
return -1;
}
double gc_eff1 = hr1->gc_efficiency();
double gc_eff2 = hr2->gc_efficiency();
if (gc_eff1 > gc_eff2) {
return -1;
} if (gc_eff1 < gc_eff2) {
return 1;
} else {
return 0;
}
}
// Determine collection set candidates: For all regions determine whether they
// should be a collection set candidates, calculate their efficiency, sort and
// return them as G1CollectionSetCandidates instance.
@ -71,6 +40,8 @@ static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
// owner of this object.
class G1BuildCandidateRegionsTask : public WorkerTask {
using CandidateInfo = G1CollectionCandidateList::CandidateInfo;
// Work area for building the set of collection set candidates. Contains references
// to heap regions with their GC efficiencies calculated. To reduce contention
// on claiming array elements, worker threads claim parts of this array in chunks;
@ -82,7 +53,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
uint const _max_size;
uint const _chunk_size;
HeapRegion** _data;
CandidateInfo* _data;
uint volatile _cur_claim_idx;
@ -99,15 +70,15 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
G1BuildCandidateArray(uint max_num_regions, uint chunk_size, uint num_workers) :
_max_size(required_array_size(max_num_regions, chunk_size, num_workers)),
_chunk_size(chunk_size),
_data(NEW_C_HEAP_ARRAY(HeapRegion*, _max_size, mtGC)),
_data(NEW_C_HEAP_ARRAY(CandidateInfo, _max_size, mtGC)),
_cur_claim_idx(0) {
for (uint i = 0; i < _max_size; i++) {
_data[i] = nullptr;
_data[i] = CandidateInfo();
}
}
~G1BuildCandidateArray() {
FREE_C_HEAP_ARRAY(HeapRegion*, _data);
FREE_C_HEAP_ARRAY(CandidateInfo, _data);
}
// Claim a new chunk, returning its bounds [from, to[.
@ -123,25 +94,24 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
// Set element in array.
void set(uint idx, HeapRegion* hr) {
assert(idx < _max_size, "Index %u out of bounds %u", idx, _max_size);
assert(_data[idx] == nullptr, "Value must not have been set.");
_data[idx] = hr;
assert(_data[idx]._r == nullptr, "Value must not have been set.");
_data[idx] = CandidateInfo(hr, hr->calc_gc_efficiency());
}
void sort_and_copy_into(HeapRegion** dest, uint num_regions) {
void sort_by_efficiency() {
if (_cur_claim_idx == 0) {
return;
}
for (uint i = _cur_claim_idx; i < _max_size; i++) {
assert(_data[i] == nullptr, "must be");
assert(_data[i]._r == nullptr, "must be");
}
QuickSort::sort(_data, _cur_claim_idx, order_regions, true);
for (uint i = num_regions; i < _max_size; i++) {
assert(_data[i] == nullptr, "must be");
}
for (uint i = 0; i < num_regions; i++) {
dest[i] = _data[i];
qsort(_data, _cur_claim_idx, sizeof(_data[0]), (_sort_Fn)G1CollectionCandidateList::compare);
for (uint i = _cur_claim_idx; i < _max_size; i++) {
assert(_data[i]._r == nullptr, "must be");
}
}
CandidateInfo* array() const { return _data; }
};
// Per-region closure. In addition to determining whether a region should be
@ -154,7 +124,6 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
uint _cur_chunk_end;
uint _regions_added;
size_t _reclaimable_bytes_added;
void add_region(HeapRegion* hr) {
if (_cur_chunk_idx == _cur_chunk_end) {
@ -162,13 +131,11 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
}
assert(_cur_chunk_idx < _cur_chunk_end, "Must be");
hr->calc_gc_efficiency();
_array->set(_cur_chunk_idx, hr);
_cur_chunk_idx++;
_regions_added++;
_reclaimable_bytes_added += hr->reclaimable_bytes();
}
bool should_add(HeapRegion* hr) { return G1CollectionSetChooser::should_add(hr); }
@ -178,8 +145,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
_array(array),
_cur_chunk_idx(0),
_cur_chunk_end(0),
_regions_added(0),
_reclaimable_bytes_added(0) { }
_regions_added(0) { }
bool do_heap_region(HeapRegion* r) {
// We will skip any region that's currently used as an old GC
@ -188,8 +154,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
if (should_add(r) && !G1CollectedHeap::heap()->is_old_gc_alloc_region(r)) {
add_region(r);
} else if (r->is_old()) {
// Keep remembered sets for humongous regions, otherwise clean out remembered
// sets for old regions.
// Keep remembered sets for humongous regions, otherwise clean them out.
r->rem_set()->clear(true /* only_cardset */);
} else {
assert(!r->is_old() || !r->rem_set()->is_tracked(),
@ -200,48 +165,84 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
}
uint regions_added() const { return _regions_added; }
size_t reclaimable_bytes_added() const { return _reclaimable_bytes_added; }
};
G1CollectedHeap* _g1h;
HeapRegionClaimer _hrclaimer;
uint volatile _num_regions_added;
size_t volatile _reclaimable_bytes_added;
G1BuildCandidateArray _result;
void update_totals(uint num_regions, size_t reclaimable_bytes) {
void update_totals(uint num_regions) {
if (num_regions > 0) {
assert(reclaimable_bytes > 0, "invariant");
Atomic::add(&_num_regions_added, num_regions);
Atomic::add(&_reclaimable_bytes_added, reclaimable_bytes);
} else {
assert(reclaimable_bytes == 0, "invariant");
}
}
// Early prune (remove) regions meeting the G1HeapWastePercent criteria. That
// is, either until only the minimum amount of old collection set regions are
// available (for forward progress in evacuation) or the waste accumulated by the
// removed regions is above the maximum allowed waste.
// Updates number of candidates and reclaimable bytes given.
void prune(CandidateInfo* data) {
G1Policy* p = G1CollectedHeap::heap()->policy();
uint num_candidates = Atomic::load(&_num_regions_added);
uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates);
uint num_pruned = 0;
size_t wasted_bytes = 0;
if (min_old_cset_length >= num_candidates) {
// We take all of the candidate regions to provide some forward progress.
return;
}
size_t allowed_waste = p->allowed_waste_in_collection_set();
uint max_to_prune = num_candidates - min_old_cset_length;
while (true) {
HeapRegion* r = data[num_candidates - num_pruned - 1]._r;
size_t const reclaimable = r->reclaimable_bytes();
if (num_pruned >= max_to_prune ||
wasted_bytes + reclaimable > allowed_waste) {
break;
}
r->rem_set()->clear(true /* cardset_only */);
wasted_bytes += reclaimable;
num_pruned++;
}
log_debug(gc, ergo, cset)("Pruned %u regions out of %u, leaving " SIZE_FORMAT " bytes waste (allowed " SIZE_FORMAT ")",
num_pruned,
num_candidates,
wasted_bytes,
allowed_waste);
Atomic::sub(&_num_regions_added, num_pruned, memory_order_relaxed);
}
public:
G1BuildCandidateRegionsTask(uint max_num_regions, uint chunk_size, uint num_workers) :
WorkerTask("G1 Build Candidate Regions"),
_g1h(G1CollectedHeap::heap()),
_hrclaimer(num_workers),
_num_regions_added(0),
_reclaimable_bytes_added(0),
_result(max_num_regions, chunk_size, num_workers) { }
void work(uint worker_id) {
G1BuildCandidateRegionsClosure cl(&_result);
_g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
update_totals(cl.regions_added(), cl.reclaimable_bytes_added());
update_totals(cl.regions_added());
}
G1CollectionSetCandidates* get_sorted_candidates() {
HeapRegion** regions = NEW_C_HEAP_ARRAY(HeapRegion*, _num_regions_added, mtGC);
_result.sort_and_copy_into(regions, _num_regions_added);
return new G1CollectionSetCandidates(regions,
_num_regions_added,
_reclaimable_bytes_added);
void sort_and_prune_into(G1CollectionSetCandidates* candidates) {
_result.sort_by_efficiency();
prune(_result.array());
candidates->set_candidates_from_marking(_result.array(),
_num_regions_added);
}
};
@ -257,69 +258,13 @@ bool G1CollectionSetChooser::should_add(HeapRegion* hr) {
hr->rem_set()->is_complete();
}
// Closure implementing early pruning (removal) of regions meeting the
// G1HeapWastePercent criteria. That is, either until _max_pruned regions were
// removed (for forward progress in evacuation) or the waste accumulated by the
// removed regions is above max_wasted.
class G1PruneRegionClosure : public HeapRegionClosure {
uint _num_pruned;
size_t _cur_wasted;
uint const _max_pruned;
size_t const _max_wasted;
public:
G1PruneRegionClosure(uint max_pruned, size_t max_wasted) :
_num_pruned(0), _cur_wasted(0), _max_pruned(max_pruned), _max_wasted(max_wasted) { }
virtual bool do_heap_region(HeapRegion* r) {
size_t const reclaimable = r->reclaimable_bytes();
if (_num_pruned >= _max_pruned ||
_cur_wasted + reclaimable > _max_wasted) {
return true;
}
r->rem_set()->clear(true /* cardset_only */);
_cur_wasted += reclaimable;
_num_pruned++;
return false;
}
uint num_pruned() const { return _num_pruned; }
size_t wasted() const { return _cur_wasted; }
};
void G1CollectionSetChooser::prune(G1CollectionSetCandidates* candidates) {
G1Policy* p = G1CollectedHeap::heap()->policy();
uint min_old_cset_length = p->calc_min_old_cset_length(candidates->num_regions());
uint num_candidates = candidates->num_regions();
if (min_old_cset_length < num_candidates) {
size_t allowed_waste = p->allowed_waste_in_collection_set();
G1PruneRegionClosure prune_cl(num_candidates - min_old_cset_length,
allowed_waste);
candidates->iterate_backwards(&prune_cl);
log_debug(gc, ergo, cset)("Pruned %u regions out of %u, leaving " SIZE_FORMAT " bytes waste (allowed " SIZE_FORMAT ")",
prune_cl.num_pruned(),
candidates->num_regions(),
prune_cl.wasted(),
allowed_waste);
candidates->remove_from_end(prune_cl.num_pruned(), prune_cl.wasted());
}
}
G1CollectionSetCandidates* G1CollectionSetChooser::build(WorkerThreads* workers, uint max_num_regions) {
void G1CollectionSetChooser::build(WorkerThreads* workers, uint max_num_regions, G1CollectionSetCandidates* candidates) {
uint num_workers = workers->active_workers();
uint chunk_size = calculate_work_chunk_size(num_workers, max_num_regions);
G1BuildCandidateRegionsTask cl(max_num_regions, chunk_size, num_workers);
workers->run_task(&cl, num_workers);
G1CollectionSetCandidates* result = cl.get_sorted_candidates();
prune(result);
result->verify();
return result;
cl.sort_and_prune_into(candidates);
candidates->verify();
}

View File

@ -38,12 +38,7 @@ class WorkerThreads;
class G1CollectionSetChooser : public AllStatic {
static uint calculate_work_chunk_size(uint num_workers, uint num_regions);
// Remove regions in the collection set candidates as long as the G1HeapWastePercent
// criteria is met. Keep at least the minimum amount of old regions to guarantee
// some progress.
static void prune(G1CollectionSetCandidates* candidates);
public:
static size_t mixed_gc_live_threshold_bytes() {
return HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100;
}
@ -60,7 +55,7 @@ public:
// Build and return set of collection set candidates sorted by decreasing gc
// efficiency.
static G1CollectionSetCandidates* build(WorkerThreads* workers, uint max_num_regions);
static void build(WorkerThreads* workers, uint max_num_regions, G1CollectionSetCandidates* candidates);
};
#endif // SHARE_GC_G1_G1COLLECTIONSETCHOOSER_HPP

View File

@ -2978,7 +2978,7 @@ bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) {
size_t capacity_bytes = r->capacity();
size_t used_bytes = r->used();
size_t live_bytes = r->live_bytes();
double gc_eff = r->gc_efficiency();
double gc_eff = r->calc_gc_efficiency();
size_t remset_bytes = r->rem_set()->mem_size();
size_t code_roots_bytes = r->rem_set()->code_roots_mem_size();
const char* remset_type = r->rem_set()->get_short_state_str();

View File

@ -433,6 +433,8 @@ void G1HeapVerifier::verify_region_sets() {
VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
_g1h->heap_region_iterate(&cl);
cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
_g1h->collection_set()->candidates()->verify();
}
void G1HeapVerifier::prepare_for_verify() {

View File

@ -59,8 +59,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
PreservedMarks* preserved_marks,
uint worker_id,
uint num_workers,
size_t young_cset_length,
size_t optional_cset_length,
G1CollectionSet* collection_set,
G1EvacFailureRegions* evac_failure_regions)
: _g1h(g1h),
_task_queue(g1h->task_queue(worker_id)),
@ -78,12 +77,12 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
_trim_ticks(),
_surviving_young_words_base(nullptr),
_surviving_young_words(nullptr),
_surviving_words_length(young_cset_length + 1),
_surviving_words_length(collection_set->young_region_length() + 1),
_old_gen_is_full(false),
_partial_objarray_chunk_size(ParGCArrayScanChunk),
_partial_array_stepper(num_workers),
_string_dedup_requests(),
_max_num_optional_regions(optional_cset_length),
_max_num_optional_regions(collection_set->optional_region_length()),
_numa(g1h->numa()),
_obj_alloc_stat(nullptr),
EVAC_FAILURE_INJECTOR_ONLY(_evac_failure_inject_counter(0) COMMA)
@ -104,7 +103,9 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
_plab_allocator = new G1PLABAllocator(_g1h->allocator());
_closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
_closures = G1EvacuationRootClosures::create_root_closures(_g1h,
this,
collection_set->only_contains_young_regions());
_oops_into_optional_regions = new G1OopStarChunkedList[_max_num_optional_regions];
@ -569,8 +570,7 @@ G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id)
_preserved_marks_set.get(worker_id),
worker_id,
_num_workers,
_young_cset_length,
_optional_cset_length,
_collection_set,
_evac_failure_regions);
}
return _states[worker_id];
@ -690,16 +690,14 @@ void G1ParScanThreadState::update_numa_stats(uint node_index) {
G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
uint num_workers,
size_t young_cset_length,
size_t optional_cset_length,
G1CollectionSet* collection_set,
G1EvacFailureRegions* evac_failure_regions) :
_g1h(g1h),
_collection_set(collection_set),
_rdcqs(G1BarrierSet::dirty_card_queue_set().allocator()),
_preserved_marks_set(true /* in_c_heap */),
_states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, num_workers, mtGC)),
_surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length + 1, mtGC)),
_young_cset_length(young_cset_length),
_optional_cset_length(optional_cset_length),
_surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, collection_set->young_region_length() + 1, mtGC)),
_num_workers(num_workers),
_flushed(false),
_evac_failure_regions(evac_failure_regions) {
@ -707,7 +705,7 @@ G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
for (uint i = 0; i < num_workers; ++i) {
_states[i] = nullptr;
}
memset(_surviving_young_words_total, 0, (young_cset_length + 1) * sizeof(size_t));
memset(_surviving_young_words_total, 0, (collection_set->young_region_length() + 1) * sizeof(size_t));
}
G1ParScanThreadStateSet::~G1ParScanThreadStateSet() {

View File

@ -41,6 +41,7 @@
#include "utilities/ticks.hpp"
class G1CardTable;
class G1CollectionSet;
class G1EvacFailureRegions;
class G1EvacuationRootClosures;
class G1OopStarChunkedList;
@ -117,8 +118,7 @@ public:
PreservedMarks* preserved_marks,
uint worker_id,
uint num_workers,
size_t young_cset_length,
size_t optional_cset_length,
G1CollectionSet* collection_set,
G1EvacFailureRegions* evac_failure_regions);
virtual ~G1ParScanThreadState();
@ -231,12 +231,11 @@ public:
class G1ParScanThreadStateSet : public StackObj {
G1CollectedHeap* _g1h;
G1CollectionSet* _collection_set;
G1RedirtyCardsQueueSet _rdcqs;
PreservedMarksSet _preserved_marks_set;
G1ParScanThreadState** _states;
size_t* _surviving_young_words_total;
size_t _young_cset_length;
size_t _optional_cset_length;
uint _num_workers;
bool _flushed;
G1EvacFailureRegions* _evac_failure_regions;
@ -244,8 +243,7 @@ class G1ParScanThreadStateSet : public StackObj {
public:
G1ParScanThreadStateSet(G1CollectedHeap* g1h,
uint num_workers,
size_t young_cset_length,
size_t optional_cset_length,
G1CollectionSet* collection_set,
G1EvacFailureRegions* evac_failure_regions);
~G1ParScanThreadStateSet();

View File

@ -28,7 +28,7 @@
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectionSetCandidates.hpp"
#include "gc/g1/g1CollectionSetCandidates.inline.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
@ -485,21 +485,20 @@ uint G1Policy::calculate_desired_eden_length_before_young_only(double base_time_
uint G1Policy::calculate_desired_eden_length_before_mixed(double base_time_ms,
uint min_eden_length,
uint max_eden_length) const {
G1CollectionSetCandidates* candidates = _collection_set->candidates();
uint min_old_regions_end = MIN2(candidates->cur_idx() + calc_min_old_cset_length(candidates->num_regions()),
candidates->num_regions());
uint min_marking_candidates = MIN2(calc_min_old_cset_length(candidates()->last_marking_candidates_length()),
candidates()->marking_regions_length());
double predicted_region_evac_time_ms = base_time_ms;
for (uint i = candidates->cur_idx(); i < min_old_regions_end; i++) {
HeapRegion* r = candidates->at(i);
for (HeapRegion* r : candidates()->marking_regions()) {
if (min_marking_candidates == 0) {
break;
}
predicted_region_evac_time_ms += predict_region_total_time_ms(r, false /* for_young_only_phase */);
min_marking_candidates--;
}
uint desired_eden_length_by_min_cset_length =
calculate_desired_eden_length_before_young_only(predicted_region_evac_time_ms,
min_eden_length,
max_eden_length);
return desired_eden_length_by_min_cset_length;
return calculate_desired_eden_length_before_young_only(predicted_region_evac_time_ms,
min_eden_length,
max_eden_length);
}
double G1Policy::predict_survivor_regions_evac_time() const {
@ -537,7 +536,7 @@ void G1Policy::record_full_collection_start() {
// Release the future to-space so that it is available for compaction into.
collector_state()->set_in_young_only_phase(false);
collector_state()->set_in_full_gc(true);
_collection_set->clear_candidates();
_collection_set->abandon_all_candidates();
_pending_cards_at_gc_start = 0;
}
@ -665,6 +664,10 @@ void G1Policy::record_concurrent_mark_cleanup_start() {
_mark_cleanup_start_sec = os::elapsedTime();
}
G1CollectionSetCandidates* G1Policy::candidates() const {
return _collection_set->candidates();
}
double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
return phase_times()->average_time_ms(phase);
}
@ -797,7 +800,9 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
if (!next_gc_should_be_mixed("do not continue mixed GCs")) {
collector_state()->set_in_young_only_phase(true);
clear_collection_set_candidates();
assert(!candidates()->has_more_marking_candidates(),
"only end mixed if all candidates from marking were processed");
maybe_start_marking();
}
} else {
@ -858,9 +863,9 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
_collection_set->young_region_length());
}
if (_collection_set->old_region_length() > 0) {
if (_collection_set->initial_old_region_length() > 0) {
_analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
_collection_set->old_region_length());
_collection_set->initial_old_region_length());
}
_analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
@ -1212,7 +1217,7 @@ void G1Policy::decide_on_concurrent_start_pause() {
// active. The following remark might change the change the "evacuation efficiency" of
// the regions in this set, leading to failing asserts later.
// Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
clear_collection_set_candidates();
abandon_collection_set_candidates();
abort_time_to_mixed_tracking();
initiate_conc_mark();
log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)",
@ -1244,8 +1249,7 @@ void G1Policy::decide_on_concurrent_start_pause() {
void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_sets) {
bool mixed_gc_pending = false;
if (has_rebuilt_remembered_sets) {
G1CollectionSetCandidates* candidates = G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions());
_collection_set->set_candidates(candidates);
G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions(), candidates());
mixed_gc_pending = next_gc_should_be_mixed("request young-only gcs");
}
@ -1255,7 +1259,6 @@ void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_se
}
if (!mixed_gc_pending) {
clear_collection_set_candidates();
abort_time_to_mixed_tracking();
}
collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
@ -1269,26 +1272,13 @@ void G1Policy::record_concurrent_mark_cleanup_end(bool has_rebuilt_remembered_se
record_pause(G1GCPauseType::Cleanup, _mark_cleanup_start_sec, end_sec);
}
double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
return percent_of(reclaimable_bytes, _g1h->capacity());
}
class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
virtual bool do_heap_region(HeapRegion* r) {
r->rem_set()->clear_locked(true /* only_cardset */);
return false;
}
};
void G1Policy::clear_collection_set_candidates() {
if (_collection_set->candidates() == nullptr) {
return;
}
void G1Policy::abandon_collection_set_candidates() {
// Clear remembered sets of remaining candidate regions and the actual candidate
// set.
G1ClearCollectionSetCandidateRemSets cl;
_collection_set->candidates()->iterate(&cl);
_collection_set->clear_candidates();
for (HeapRegion* r : *candidates()) {
r->rem_set()->clear_locked(true /* only_cardset */);
}
_collection_set->abandon_all_candidates();
}
void G1Policy::maybe_start_marking() {
@ -1371,9 +1361,7 @@ void G1Policy::abort_time_to_mixed_tracking() {
}
bool G1Policy::next_gc_should_be_mixed(const char* no_candidates_str) const {
G1CollectionSetCandidates* candidates = _collection_set->candidates();
if (candidates == nullptr || candidates->is_empty()) {
if (!candidates()->has_more_marking_candidates()) {
if (no_candidates_str != nullptr) {
log_debug(gc, ergo)("%s (candidate old regions not available)", no_candidates_str);
}
@ -1414,48 +1402,52 @@ uint G1Policy::calc_max_old_cset_length() const {
return (uint)ceil(result);
}
void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
double time_remaining_ms,
uint& num_initial_regions,
uint& num_optional_regions) {
assert(candidates != nullptr, "Must be");
static void print_finish_message(const char* reason, bool from_marking) {
log_debug(gc, ergo, cset)("Finish adding %s candidates to collection set (%s).",
from_marking ? "marking" : "retained", reason);
}
double G1Policy::select_candidates_from_marking(G1CollectionCandidateList* marking_list,
double time_remaining_ms,
G1CollectionCandidateRegionList* initial_old_regions,
G1CollectionCandidateRegionList* optional_old_regions) {
assert(marking_list != nullptr, "must be");
num_initial_regions = 0;
num_optional_regions = 0;
uint num_expensive_regions = 0;
uint num_initial_regions_selected = 0;
uint num_optional_regions_selected = 0;
double predicted_initial_time_ms = 0.0;
double predicted_optional_time_ms = 0.0;
double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction();
const uint min_old_cset_length = calc_min_old_cset_length(candidates->num_regions());
const uint min_old_cset_length = calc_min_old_cset_length(candidates()->last_marking_candidates_length());
const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length());
const uint max_optional_regions = max_old_cset_length - min_old_cset_length;
bool check_time_remaining = use_adaptive_young_list_length();
uint candidate_idx = candidates->cur_idx();
log_debug(gc, ergo, cset)("Start adding old regions to collection set. Min %u regions, max %u regions, "
log_debug(gc, ergo, cset)("Start adding marking candidates to collection set. "
"Min %u regions, max %u regions, "
"time remaining %1.2fms, optional threshold %1.2fms",
min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms);
HeapRegion* hr = candidates->at(candidate_idx);
while (hr != nullptr) {
if (num_initial_regions + num_optional_regions >= max_old_cset_length) {
G1CollectionCandidateListIterator iter = marking_list->begin();
for (; iter != marking_list->end(); ++iter) {
if (num_initial_regions_selected + num_optional_regions_selected >= max_old_cset_length) {
// Added maximum number of old regions to the CSet.
log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). "
"Initial %u regions, optional %u regions",
num_initial_regions, num_optional_regions);
print_finish_message("Maximum number of regions reached", true);
break;
}
HeapRegion* hr = *iter;
double predicted_time_ms = predict_region_total_time_ms(hr, false);
time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
// Add regions to old set until we reach the minimum amount
if (num_initial_regions < min_old_cset_length) {
if (initial_old_regions->length() < min_old_cset_length) {
initial_old_regions->append(hr);
num_initial_regions_selected++;
predicted_initial_time_ms += predicted_time_ms;
num_initial_regions++;
// Record the number of regions added with no time remaining
if (time_remaining_ms == 0.0) {
num_expensive_regions++;
@ -1463,53 +1455,54 @@ void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* c
} else if (!check_time_remaining) {
// In the non-auto-tuning case, we'll finish adding regions
// to the CSet if we reach the minimum.
log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min).");
print_finish_message("Region amount reached min", true);
break;
} else {
// Keep adding regions to old set until we reach the optional threshold
if (time_remaining_ms > optional_threshold_ms) {
predicted_initial_time_ms += predicted_time_ms;
num_initial_regions++;
initial_old_regions->append(hr);
num_initial_regions_selected++;
} else if (time_remaining_ms > 0) {
// Keep adding optional regions until time is up.
assert(num_optional_regions < max_optional_regions, "Should not be possible.");
assert(optional_old_regions->length() < max_optional_regions, "Should not be possible.");
predicted_optional_time_ms += predicted_time_ms;
num_optional_regions++;
optional_old_regions->append(hr);
num_optional_regions_selected++;
} else {
log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Predicted time too high).");
print_finish_message("Predicted time too high", true);
break;
}
}
hr = candidates->at(++candidate_idx);
}
if (hr == nullptr) {
log_debug(gc, ergo, cset)("Old candidate collection set empty.");
if (iter == marking_list->end()) {
log_debug(gc, ergo, cset)("Marking candidates exhausted.");
}
if (num_expensive_regions > 0) {
log_debug(gc, ergo, cset)("Added %u initial old regions to collection set although the predicted time was too high.",
log_debug(gc, ergo, cset)("Added %u marking candidates to collection set although the predicted time was too high.",
num_expensive_regions);
}
log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, "
"predicted initial time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
num_initial_regions, num_optional_regions,
log_debug(gc, ergo, cset)("Finish adding marking candidates to collection set. Initial: %u, optional: %u, "
"predicted initial time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2fms",
num_initial_regions_selected, num_optional_regions_selected,
predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms);
assert(initial_old_regions->length() == num_initial_regions_selected, "must be");
assert(optional_old_regions->length() == num_optional_regions_selected, "must be");
return time_remaining_ms;
}
void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
uint const max_optional_regions,
void G1Policy::calculate_optional_collection_set_regions(G1CollectionCandidateRegionList* optional_regions,
double time_remaining_ms,
uint& num_optional_regions) {
assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase");
G1CollectionCandidateRegionList* selected_regions) {
assert(_collection_set->optional_region_length() > 0,
"Should only be called when there are optional regions");
num_optional_regions = 0;
double total_prediction_ms = 0.0;
uint candidate_idx = candidates->cur_idx();
HeapRegion* r = candidates->at(candidate_idx);
while (num_optional_regions < max_optional_regions) {
assert(r != nullptr, "Region must exist");
for (HeapRegion* r : *optional_regions) {
double prediction_ms = predict_region_total_time_ms(r, false);
if (prediction_ms > time_remaining_ms) {
@ -1521,12 +1514,12 @@ void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidat
total_prediction_ms += prediction_ms;
time_remaining_ms -= prediction_ms;
num_optional_regions++;
r = candidates->at(++candidate_idx);
selected_regions->append(r);
}
log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Total predicted time: %.3fms",
num_optional_regions, max_optional_regions, total_prediction_ms);
selected_regions->length(), optional_regions->length(), total_prediction_ms);
}
void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {

View File

@ -46,8 +46,10 @@
class HeapRegion;
class G1CollectionSet;
class G1CollectionCandidateList;
class G1CollectionSetCandidates;
class G1CollectionSetChooser;
class G1CollectionCandidateRegionList;
class G1IHOPControl;
class G1Analytics;
class G1SurvivorRegions;
@ -181,6 +183,7 @@ public:
private:
G1CollectionSet* _collection_set;
G1CollectionSetCandidates* candidates() const;
double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
double other_time_ms(double pause_time_ms) const;
@ -265,13 +268,8 @@ public:
// during a mixed GC.
uint calc_max_old_cset_length() const;
// Returns the given amount of reclaimable bytes (that represents
// the amount of reclaimable space still to be collected) as a
// percentage of the current heap capacity.
double reclaimable_bytes_percent(size_t reclaimable_bytes) const;
private:
void clear_collection_set_candidates();
void abandon_collection_set_candidates();
// Sets up marking if proper conditions are met.
void maybe_start_marking();
// Manage time-to-mixed tracking.
@ -340,20 +338,20 @@ public:
// Amount of allowed waste in bytes in the collection set.
size_t allowed_waste_in_collection_set() const;
// Calculate and return the number of initial and optional old gen regions from
// the given collection set candidates and the remaining time.
void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
double time_remaining_ms,
uint& num_initial_regions,
uint& num_optional_regions);
// Calculate and fill in the initial and optional old gen candidate regions from
// the given candidate list and the remaining time.
// Returns the remaining time.
double select_candidates_from_marking(G1CollectionCandidateList* marking_list,
double time_remaining_ms,
G1CollectionCandidateRegionList* initial_old_regions,
G1CollectionCandidateRegionList* optional_old_regions);
// Calculate the number of optional regions from the given collection set candidates,
// the remaining time and the maximum number of these regions and return the number
// of actually selected regions in num_optional_regions.
void calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
uint const max_optional_regions,
void calculate_optional_collection_set_regions(G1CollectionCandidateRegionList* optional_old_regions,
double time_remaining_ms,
uint& num_optional_regions);
G1CollectionCandidateRegionList* selected);
private:
@ -422,12 +420,12 @@ private:
// Fraction used when predicting how many optional regions to include in
// the CSet. This fraction of the available time is used for optional regions,
// the rest is used to add old regions to the normal CSet.
double optional_prediction_fraction() { return 0.2; }
double optional_prediction_fraction() const { return 0.2; }
public:
// Fraction used when evacuating the optional regions. This fraction of the
// remaining time is used to choose what regions to include in the evacuation.
double optional_evacuation_fraction() { return 0.75; }
double optional_evacuation_fraction() const { return 0.75; }
uint tenuring_threshold() const { return _tenuring_threshold; }

View File

@ -69,7 +69,9 @@ public:
CodeBlobClosure* weak_codeblobs() { return &_weak._codeblobs; }
};
G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1CollectedHeap* g1h,
G1ParScanThreadState* pss,
bool process_only_dirty_klasses) {
G1EvacuationRootClosures* res = nullptr;
if (g1h->collector_state()->in_concurrent_start_gc()) {
if (ClassUnloadingWithConcurrentMark) {
@ -78,7 +80,7 @@ G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParSc
res = new G1ConcurrentStartMarkClosures<true>(g1h, pss);
}
} else {
res = new G1EvacuationClosures(g1h, pss, g1h->collector_state()->in_young_only_phase());
res = new G1EvacuationClosures(g1h, pss, process_only_dirty_klasses);
}
return res;
}

View File

@ -49,7 +49,9 @@ public:
// Applied to code blobs treated as weak roots.
virtual CodeBlobClosure* weak_codeblobs() = 0;
static G1EvacuationRootClosures* create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h);
static G1EvacuationRootClosures* create_root_closures(G1CollectedHeap* g1h,
G1ParScanThreadState* pss,
bool process_only_dirty_klasses);
};
#endif // SHARE_GC_G1_G1ROOTCLOSURES_HPP

View File

@ -268,7 +268,7 @@ void G1YoungCollector::calculate_collection_set(G1EvacInfo* evacuation_info, dou
collection_set()->finalize_initial_collection_set(target_pause_time_ms, survivor_regions());
evacuation_info->set_collection_set_regions(collection_set()->region_length() +
collection_set()->optional_region_length());
collection_set()->optional_region_length());
concurrent_mark()->verify_no_collection_set_oops();
@ -1014,7 +1014,7 @@ void G1YoungCollector::collect() {
G1YoungGCJFRTracerMark jtm(gc_timer_stw(), gc_tracer_stw(), _gc_cause);
// JStat/MXBeans
G1YoungGCMonitoringScope ms(monitoring_support(),
collector_state()->in_mixed_phase() /* all_memory_pools_affected */);
!collection_set()->candidates()->is_empty() /* all_memory_pools_affected */);
// Create the heap printer before internal pause timing to have
// heap information printed as last part of detailed GC log.
G1HeapPrinterMark hpm(_g1h);
@ -1043,8 +1043,7 @@ void G1YoungCollector::collect() {
G1ParScanThreadStateSet per_thread_states(_g1h,
workers()->active_workers(),
collection_set()->young_region_length(),
collection_set()->optional_region_length(),
collection_set(),
&_evac_failure_regions);
bool may_do_optional_evacuation = collection_set()->optional_region_length() != 0;

View File

@ -28,7 +28,7 @@
#include "gc/g1/g1CardSetMemory.hpp"
#include "gc/g1/g1CardTableEntryClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSetCandidates.hpp"
#include "gc/g1/g1CollectionSetCandidates.inline.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1EvacInfo.hpp"
@ -81,21 +81,14 @@ public:
}
void do_work(uint worker_id) override {
class G1SampleCollectionSetCandidatesClosure : public HeapRegionClosure {
public:
G1MonotonicArenaMemoryStats _total;
bool do_heap_region(HeapRegion* r) override {
_total.add(r->rem_set()->card_set_memory_stats());
return false;
}
} cl;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->collection_set()->candidates()->iterate(&cl);
g1h->set_collection_set_candidates_stats(cl._total);
G1MonotonicArenaMemoryStats _total;
G1CollectionSetCandidates* candidates = g1h->collection_set()->candidates();
for (HeapRegion* r : *candidates) {
_total.add(r->rem_set()->card_set_memory_stats());
}
g1h->set_collection_set_candidates_stats(_total);
}
};
@ -357,7 +350,6 @@ class G1PostEvacuateCollectionSetCleanupTask2::ClearRetainedRegionBitmaps : publ
};
public:
ClearRetainedRegionBitmaps(G1EvacFailureRegions* evac_failure_regions) :
G1AbstractSubTask(G1GCPhaseTimes::ClearRetainedRegionBitmaps),
_evac_failure_regions(evac_failure_regions),

View File

@ -69,8 +69,8 @@ class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedTask {
class ClearRetainedRegionBitmaps;
class RedirtyLoggedCardsTask;
class RestorePreservedMarksTask;
class ResizeTLABsTask;
class FreeCollectionSetTask;
class ResizeTLABsTask;
public:
G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,

View File

@ -28,6 +28,7 @@
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectionSetCandidates.inline.hpp"
#include "gc/g1/g1HeapRegionTraceType.hpp"
#include "gc/g1/g1NUMA.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
@ -127,8 +128,6 @@ void HeapRegion::hr_clear(bool clear_space) {
init_top_at_mark_start();
if (clear_space) clear(SpaceDecorator::Mangle);
_gc_efficiency = -1.0;
}
void HeapRegion::clear_cardtable() {
@ -136,7 +135,7 @@ void HeapRegion::clear_cardtable() {
ct->clear_MemRegion(MemRegion(bottom(), end()));
}
void HeapRegion::calc_gc_efficiency() {
double HeapRegion::calc_gc_efficiency() {
// GC efficiency is the ratio of how much space would be
// reclaimed over how long we predict it would take to reclaim it.
G1Policy* policy = G1CollectedHeap::heap()->policy();
@ -145,7 +144,7 @@ void HeapRegion::calc_gc_efficiency() {
// a mixed gc because the region will only be evacuated during a
// mixed gc.
double region_elapsed_time_ms = policy->predict_region_total_time_ms(this, false /* for_young_only_phase */);
_gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
return (double)reclaimable_bytes() / region_elapsed_time_ms;
}
void HeapRegion::set_free() {
@ -233,7 +232,8 @@ HeapRegion::HeapRegion(uint hrm_index,
_parsable_bottom(nullptr),
_garbage_bytes(0),
_young_index_in_cset(-1),
_surv_rate_group(nullptr), _age_index(G1SurvRateGroup::InvalidAgeIndex), _gc_efficiency(-1.0),
_surv_rate_group(nullptr),
_age_index(G1SurvRateGroup::InvalidAgeIndex),
_node_index(G1NUMA::UnknownNodeIndex)
{
assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
@ -263,7 +263,7 @@ void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
used());
}
void HeapRegion::note_evacuation_failure(bool during_concurrent_start) {
void HeapRegion::note_evacuation_failure(bool during_concurrent_start) {
// PB must be bottom - we only evacuate old gen regions after scrubbing, and
// young gen regions never have their PB set to anything other than bottom.
assert(parsable_bottom_acquire() == bottom(), "must be");
@ -429,6 +429,9 @@ void HeapRegion::print_on(outputStream* st) const {
st->print("|%2s", get_short_type_str());
if (in_collection_set()) {
st->print("|CS");
} else if (is_collection_set_candidate()) {
G1CollectionSetCandidates* candidates = G1CollectedHeap::heap()->collection_set()->candidates();
st->print("|%s", candidates->get_short_type_str(this));
} else {
st->print("| ");
}

View File

@ -250,11 +250,7 @@ private:
G1SurvRateGroup* _surv_rate_group;
int _age_index;
// Cached attributes used in the collection set policy information
// The calculated GC efficiency of the region.
double _gc_efficiency;
// NUMA node.
uint _node_index;
void report_region_type_change(G1HeapRegionTraceType::Type to);
@ -341,10 +337,12 @@ public:
// since it will also be reclaimed if we collect the region.
size_t reclaimable_bytes() {
size_t known_live_bytes = live_bytes();
assert(known_live_bytes <= capacity(), "sanity");
assert(known_live_bytes <= capacity(), "sanity %u %zu %zu %zu", hrm_index(), known_live_bytes, used(), garbage_bytes());
return capacity() - known_live_bytes;
}
inline bool is_collection_set_candidate() const;
// Get the start of the unmarked area in this region.
HeapWord* top_at_mark_start() const;
void set_top_at_mark_start(HeapWord* value);
@ -378,7 +376,7 @@ public:
// This set only includes old regions - humongous regions only
// contain a single object which is either dead or live, and young regions are never even
// considered during concurrent scrub.
bool needs_scrubbing() const { return is_old(); }
bool needs_scrubbing() const;
// Same question as above, during full gc. Full gc needs to scrub any region that
// might be skipped for compaction. This includes young generation regions as the
// region relabeling to old happens later than scrubbing.
@ -440,6 +438,8 @@ public:
inline bool in_collection_set() const;
inline const char* collection_set_candidate_short_type_str() const;
void prepare_remset_for_scan();
// Methods used by the HeapRegionSetBase class and subclasses.
@ -501,8 +501,7 @@ public:
void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; }
void clear_index_in_opt_cset() { _index_in_opt_cset = InvalidCSetIndex; }
void calc_gc_efficiency(void);
double gc_efficiency() const { return _gc_efficiency;}
double calc_gc_efficiency();
uint young_index_in_cset() const { return _young_index_in_cset; }
void clear_young_index_in_cset() { _young_index_in_cset = 0; }

View File

@ -162,6 +162,10 @@ inline HeapWord* HeapRegion::next_live_in_unparsable(const HeapWord* p, HeapWord
return next_live_in_unparsable(bitmap, p, limit);
}
inline bool HeapRegion::is_collection_set_candidate() const {
return G1CollectedHeap::heap()->is_collection_set_candidate(this);
}
inline size_t HeapRegion::block_size(const HeapWord* p) const {
return block_size(p, parsable_bottom());
}
@ -290,14 +294,18 @@ inline void HeapRegion::reset_parsable_bottom() {
}
inline void HeapRegion::note_start_of_marking() {
set_top_at_mark_start(top());
_gc_efficiency = -1.0;
assert(top_at_mark_start() == bottom(), "CA region's TAMS must always be at bottom");
if (is_old_or_humongous()) {
set_top_at_mark_start(top());
}
}
inline void HeapRegion::note_end_of_marking(size_t marked_bytes) {
assert_at_safepoint();
_garbage_bytes = byte_size(bottom(), top_at_mark_start()) - marked_bytes;
if (top_at_mark_start() != bottom()) {
_garbage_bytes = byte_size(bottom(), top_at_mark_start()) - marked_bytes;
}
if (needs_scrubbing()) {
_parsable_bottom = top_at_mark_start();
@ -325,6 +333,10 @@ inline void HeapRegion::reset_top_at_mark_start() {
set_top_at_mark_start(bottom());
}
inline bool HeapRegion::needs_scrubbing() const {
return is_old();
}
inline bool HeapRegion::in_collection_set() const {
return G1CollectedHeap::heap()->is_in_cset(this);
}

View File

@ -341,6 +341,14 @@ public:
template<typename E>
const GrowableArrayView<E> GrowableArrayView<E>::EMPTY(nullptr, 0, 0);
template <typename E>
class GrowableArrayFromArray : public GrowableArrayView<E> {
public:
GrowableArrayFromArray<E>(E* data, int len) :
GrowableArrayView<E>(data, len, len) {}
};
// GrowableArrayWithAllocator extends the "view" with
// the capability to grow and deallocate the data array.
//