2016-03-07 17:23:59 +01:00
|
|
|
/*
|
2019-01-10 15:13:51 -05:00
|
|
|
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
2016-03-07 17:23:59 +01:00
|
|
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
|
|
*
|
|
|
|
* This code is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License version 2 only, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* version 2 for more details (a copy is included in the LICENSE file that
|
|
|
|
* accompanied this code).
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License version
|
|
|
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*
|
|
|
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
|
|
* or visit www.oracle.com if you need additional information or have any
|
|
|
|
* questions.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2019-01-10 15:13:51 -05:00
|
|
|
#ifndef SHARE_GC_G1_G1COLLECTIONSET_HPP
|
|
|
|
#define SHARE_GC_G1_G1COLLECTIONSET_HPP
|
2016-03-07 17:23:59 +01:00
|
|
|
|
|
|
|
#include "utilities/debug.hpp"
|
|
|
|
#include "utilities/globalDefinitions.hpp"
|
|
|
|
|
|
|
|
class G1CollectedHeap;
|
2019-02-08 12:55:20 +01:00
|
|
|
class G1CollectionSetCandidates;
|
2016-03-07 17:23:59 +01:00
|
|
|
class G1CollectorState;
|
|
|
|
class G1GCPhaseTimes;
|
2018-12-07 13:54:45 +01:00
|
|
|
class G1ParScanThreadStateSet;
|
2016-03-18 15:20:43 +01:00
|
|
|
class G1Policy;
|
2016-05-03 12:33:10 +02:00
|
|
|
class G1SurvivorRegions;
|
2016-03-07 17:23:59 +01:00
|
|
|
class HeapRegion;
|
2019-02-08 12:55:20 +01:00
|
|
|
class HeapRegionClosure;
|
2016-03-07 17:23:59 +01:00
|
|
|
|
2018-03-14 07:27:19 -04:00
|
|
|
class G1CollectionSet {
|
2018-04-18 11:36:48 +02:00
|
|
|
G1CollectedHeap* _g1h;
|
2016-03-18 15:20:43 +01:00
|
|
|
G1Policy* _policy;
|
2016-03-07 17:23:59 +01:00
|
|
|
|
2019-02-08 12:55:20 +01:00
|
|
|
// All old gen collection set candidate regions for the current mixed gc phase.
|
|
|
|
G1CollectionSetCandidates* _candidates;
|
2016-03-07 17:23:59 +01:00
|
|
|
|
|
|
|
uint _eden_region_length;
|
|
|
|
uint _survivor_region_length;
|
|
|
|
uint _old_region_length;
|
|
|
|
|
2016-07-06 11:22:55 +02:00
|
|
|
// The actual collection set as a set of region indices.
|
|
|
|
// All entries in _collection_set_regions below _collection_set_cur_length are
|
|
|
|
// assumed to be valid entries.
|
|
|
|
// We assume that at any time there is at most only one writer and (one or more)
|
|
|
|
// concurrent readers. This means we are good with using storestore and loadload
|
|
|
|
// barriers on the writer and reader respectively only.
|
|
|
|
uint* _collection_set_regions;
|
|
|
|
volatile size_t _collection_set_cur_length;
|
|
|
|
size_t _collection_set_max_length;
|
2016-03-07 17:23:59 +01:00
|
|
|
|
2018-12-07 13:54:45 +01:00
|
|
|
// When doing mixed collections we can add old regions to the collection, which
|
|
|
|
// can be collected if there is enough time. We call these optional regions and
|
|
|
|
// the pointer to these regions are stored in the array below.
|
|
|
|
HeapRegion** _optional_regions;
|
|
|
|
uint _optional_region_length;
|
|
|
|
uint _optional_region_max_length;
|
|
|
|
|
2016-03-07 17:23:59 +01:00
|
|
|
// The number of bytes in the collection set before the pause. Set from
|
|
|
|
// the incrementally built collection set at the start of an evacuation
|
|
|
|
// pause, and incremented in finalize_old_part() when adding old regions
|
|
|
|
// (if any) to the collection set.
|
|
|
|
size_t _bytes_used_before;
|
|
|
|
|
|
|
|
size_t _recorded_rs_lengths;
|
|
|
|
|
|
|
|
// The associated information that is maintained while the incremental
|
|
|
|
// collection set is being built with young regions. Used to populate
|
|
|
|
// the recorded info for the evacuation pause.
|
|
|
|
|
|
|
|
enum CSetBuildType {
|
|
|
|
Active, // We are actively building the collection set
|
|
|
|
Inactive // We are not actively building the collection set
|
|
|
|
};
|
|
|
|
|
|
|
|
CSetBuildType _inc_build_state;
|
|
|
|
|
|
|
|
// The number of bytes in the incrementally built collection set.
|
|
|
|
// Used to set _collection_set_bytes_used_before at the start of
|
|
|
|
// an evacuation pause.
|
|
|
|
size_t _inc_bytes_used_before;
|
|
|
|
|
|
|
|
// The RSet lengths recorded for regions in the CSet. It is updated
|
|
|
|
// by the thread that adds a new region to the CSet. We assume that
|
|
|
|
// only one thread can be allocating a new CSet region (currently,
|
|
|
|
// it does so after taking the Heap_lock) hence no need to
|
|
|
|
// synchronize updates to this field.
|
|
|
|
size_t _inc_recorded_rs_lengths;
|
|
|
|
|
|
|
|
// A concurrent refinement thread periodically samples the young
|
|
|
|
// region RSets and needs to update _inc_recorded_rs_lengths as
|
|
|
|
// the RSets grow. Instead of having to synchronize updates to that
|
|
|
|
// field we accumulate them in this field and add it to
|
|
|
|
// _inc_recorded_rs_lengths_diffs at the start of a GC.
|
|
|
|
ssize_t _inc_recorded_rs_lengths_diffs;
|
|
|
|
|
|
|
|
// The predicted elapsed time it will take to collect the regions in
|
|
|
|
// the CSet. This is updated by the thread that adds a new region to
|
|
|
|
// the CSet. See the comment for _inc_recorded_rs_lengths about
|
|
|
|
// MT-safety assumptions.
|
|
|
|
double _inc_predicted_elapsed_time_ms;
|
|
|
|
|
|
|
|
// See the comment for _inc_recorded_rs_lengths_diffs.
|
|
|
|
double _inc_predicted_elapsed_time_ms_diffs;
|
|
|
|
|
|
|
|
G1CollectorState* collector_state();
|
|
|
|
G1GCPhaseTimes* phase_times();
|
|
|
|
|
2016-04-27 11:25:16 +02:00
|
|
|
void verify_young_cset_indices() const NOT_DEBUG_RETURN;
|
2018-12-07 13:54:45 +01:00
|
|
|
void add_as_optional(HeapRegion* hr);
|
|
|
|
void add_as_old(HeapRegion* hr);
|
|
|
|
bool optional_is_full();
|
|
|
|
|
2016-03-07 17:23:59 +01:00
|
|
|
public:
|
2016-03-18 15:20:43 +01:00
|
|
|
G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
|
2016-03-07 17:23:59 +01:00
|
|
|
~G1CollectionSet();
|
|
|
|
|
2016-07-06 11:22:55 +02:00
|
|
|
// Initializes the collection set giving the maximum possible length of the collection set.
|
|
|
|
void initialize(uint max_region_length);
|
2018-12-07 13:54:45 +01:00
|
|
|
void initialize_optional(uint max_length);
|
|
|
|
void free_optional_regions();
|
2016-07-06 11:22:55 +02:00
|
|
|
|
2019-02-08 12:55:20 +01:00
|
|
|
void clear_candidates();
|
|
|
|
|
|
|
|
void set_candidates(G1CollectionSetCandidates* candidates) {
|
|
|
|
assert(_candidates == NULL, "Trying to replace collection set candidates.");
|
|
|
|
_candidates = candidates;
|
|
|
|
}
|
|
|
|
G1CollectionSetCandidates* candidates() { return _candidates; }
|
2016-03-07 17:23:59 +01:00
|
|
|
|
|
|
|
void init_region_lengths(uint eden_cset_region_length,
|
|
|
|
uint survivor_cset_region_length);
|
|
|
|
|
|
|
|
void set_recorded_rs_lengths(size_t rs_lengths);
|
|
|
|
|
|
|
|
uint region_length() const { return young_region_length() +
|
|
|
|
old_region_length(); }
|
|
|
|
uint young_region_length() const { return eden_region_length() +
|
|
|
|
survivor_region_length(); }
|
|
|
|
|
|
|
|
uint eden_region_length() const { return _eden_region_length; }
|
|
|
|
uint survivor_region_length() const { return _survivor_region_length; }
|
|
|
|
uint old_region_length() const { return _old_region_length; }
|
2018-12-07 13:54:45 +01:00
|
|
|
uint optional_region_length() const { return _optional_region_length; }
|
2016-03-07 17:23:59 +01:00
|
|
|
|
2016-07-06 11:22:55 +02:00
|
|
|
// Incremental collection set support
|
2016-03-07 17:23:59 +01:00
|
|
|
|
|
|
|
// Initialize incremental collection set info.
|
|
|
|
void start_incremental_building();
|
|
|
|
|
2016-07-06 11:22:55 +02:00
|
|
|
// Perform any final calculations on the incremental collection set fields
|
2016-03-07 17:23:59 +01:00
|
|
|
// before we can use them.
|
|
|
|
void finalize_incremental_building();
|
|
|
|
|
2016-07-06 11:22:55 +02:00
|
|
|
// Reset the contents of the collection set.
|
|
|
|
void clear();
|
2016-03-07 17:23:59 +01:00
|
|
|
|
2016-07-06 11:22:55 +02:00
|
|
|
// Iterate over the collection set, applying the given HeapRegionClosure on all of them.
|
|
|
|
// If may_be_aborted is true, iteration may be aborted using the return value of the
|
|
|
|
// called closure method.
|
|
|
|
void iterate(HeapRegionClosure* cl) const;
|
2016-03-07 17:23:59 +01:00
|
|
|
|
2016-07-06 11:22:55 +02:00
|
|
|
// Iterate over the collection set, applying the given HeapRegionClosure on all of them,
|
|
|
|
// trying to optimally spread out starting position of total_workers workers given the
|
|
|
|
// caller's worker_id.
|
|
|
|
void iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
|
2016-03-07 17:23:59 +01:00
|
|
|
|
2016-07-06 11:22:55 +02:00
|
|
|
// Stop adding regions to the incremental collection set.
|
|
|
|
void stop_incremental_building() { _inc_build_state = Inactive; }
|
2016-03-07 17:23:59 +01:00
|
|
|
|
|
|
|
size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
|
|
|
|
|
|
|
|
size_t bytes_used_before() const {
|
|
|
|
return _bytes_used_before;
|
|
|
|
}
|
|
|
|
|
|
|
|
void reset_bytes_used_before() {
|
|
|
|
_bytes_used_before = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Choose a new collection set. Marks the chosen regions as being
|
2016-07-06 11:22:55 +02:00
|
|
|
// "in_collection_set".
|
2016-05-03 12:33:10 +02:00
|
|
|
double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
|
2016-03-07 17:23:59 +01:00
|
|
|
void finalize_old_part(double time_remaining_ms);
|
|
|
|
|
2016-07-06 11:22:55 +02:00
|
|
|
// Add old region "hr" to the collection set.
|
2016-03-07 17:23:59 +01:00
|
|
|
void add_old_region(HeapRegion* hr);
|
|
|
|
|
2018-12-07 13:54:45 +01:00
|
|
|
// Add old region "hr" to optional collection set.
|
|
|
|
void add_optional_region(HeapRegion* hr);
|
|
|
|
|
2016-03-07 17:23:59 +01:00
|
|
|
// Update information about hr in the aggregated information for
|
|
|
|
// the incrementally built collection set.
|
|
|
|
void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
|
|
|
|
|
2016-07-06 11:22:55 +02:00
|
|
|
// Add eden region to the collection set.
|
2016-03-07 17:23:59 +01:00
|
|
|
void add_eden_region(HeapRegion* hr);
|
|
|
|
|
2016-07-06 11:22:55 +02:00
|
|
|
// Add survivor region to the collection set.
|
2016-03-07 17:23:59 +01:00
|
|
|
void add_survivor_regions(HeapRegion* hr);
|
|
|
|
|
|
|
|
#ifndef PRODUCT
|
2016-07-06 11:22:55 +02:00
|
|
|
bool verify_young_ages();
|
|
|
|
|
|
|
|
void print(outputStream* st);
|
2016-03-07 17:23:59 +01:00
|
|
|
#endif // !PRODUCT
|
|
|
|
|
2018-12-07 13:54:45 +01:00
|
|
|
double predict_region_elapsed_time_ms(HeapRegion* hr);
|
|
|
|
|
|
|
|
void clear_optional_region(const HeapRegion* hr);
|
|
|
|
|
|
|
|
HeapRegion* optional_region_at(uint i) const {
|
|
|
|
assert(_optional_regions != NULL, "Not yet initialized");
|
|
|
|
assert(i < _optional_region_length, "index %u out of bounds (%u)", i, _optional_region_length);
|
|
|
|
return _optional_regions[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
HeapRegion* remove_last_optional_region() {
|
|
|
|
assert(_optional_regions != NULL, "Not yet initialized");
|
|
|
|
assert(_optional_region_length != 0, "No region to remove");
|
|
|
|
_optional_region_length--;
|
|
|
|
HeapRegion* removed = _optional_regions[_optional_region_length];
|
|
|
|
_optional_regions[_optional_region_length] = NULL;
|
|
|
|
return removed;
|
|
|
|
}
|
|
|
|
|
2016-03-07 17:23:59 +01:00
|
|
|
private:
|
2016-07-06 11:22:55 +02:00
|
|
|
// Update the incremental collection set information when adding a region.
|
2016-03-07 17:23:59 +01:00
|
|
|
void add_young_region_common(HeapRegion* hr);
|
|
|
|
};
|
|
|
|
|
2018-12-07 13:54:45 +01:00
|
|
|
// Helper class to manage the optional regions in a Mixed collection.
|
|
|
|
class G1OptionalCSet : public StackObj {
|
|
|
|
private:
|
|
|
|
G1CollectionSet* _cset;
|
|
|
|
G1ParScanThreadStateSet* _pset;
|
|
|
|
uint _current_index;
|
|
|
|
uint _current_limit;
|
|
|
|
bool _prepare_failed;
|
|
|
|
bool _evacuation_failed;
|
|
|
|
|
|
|
|
void prepare_to_evacuate_optional_region(HeapRegion* hr);
|
|
|
|
|
|
|
|
public:
|
|
|
|
static const uint InvalidCSetIndex = UINT_MAX;
|
|
|
|
|
|
|
|
G1OptionalCSet(G1CollectionSet* cset, G1ParScanThreadStateSet* pset) :
|
|
|
|
_cset(cset),
|
|
|
|
_pset(pset),
|
|
|
|
_current_index(0),
|
|
|
|
_current_limit(0),
|
|
|
|
_prepare_failed(false),
|
|
|
|
_evacuation_failed(false) { }
|
2019-02-08 12:55:20 +01:00
|
|
|
// The destructor returns regions to the collection set candidates set and
|
|
|
|
// frees the optional structure in the collection set.
|
2018-12-07 13:54:45 +01:00
|
|
|
~G1OptionalCSet();
|
|
|
|
|
|
|
|
uint current_index() { return _current_index; }
|
|
|
|
uint current_limit() { return _current_limit; }
|
|
|
|
|
|
|
|
uint size();
|
|
|
|
bool is_empty();
|
|
|
|
|
|
|
|
HeapRegion* region_at(uint index);
|
|
|
|
|
|
|
|
// Prepare a set of regions for optional evacuation.
|
|
|
|
void prepare_evacuation(double time_left_ms);
|
|
|
|
bool prepare_failed();
|
|
|
|
|
|
|
|
// Complete the evacuation of the previously prepared
|
|
|
|
// regions by updating their state and check for failures.
|
|
|
|
void complete_evacuation();
|
|
|
|
bool evacuation_failed();
|
|
|
|
};
|
|
|
|
|
2019-01-10 15:13:51 -05:00
|
|
|
#endif // SHARE_GC_G1_G1COLLECTIONSET_HPP
|