8213890: Implementation of JEP 344: Abortable Mixed Collections for G1
Co-authored-by: Erik Helin <erik.helin@oracle.com> Reviewed-by: tschatzl, kbarrett
This commit is contained in:
parent
0874f1945e
commit
37f135132e
src/hotspot/share/gc
g1
g1CollectedHeap.cppg1CollectedHeap.hppg1CollectionSet.cppg1CollectionSet.hppg1GCPhaseTimes.cppg1GCPhaseTimes.hppg1InCSetState.hppg1OopClosures.hppg1OopClosures.inline.hppg1OopStarChunkedList.cppg1OopStarChunkedList.hppg1OopStarChunkedList.inline.hppg1ParScanThreadState.cppg1ParScanThreadState.hppg1ParScanThreadState.inline.hppg1Policy.hppg1RemSet.cppg1RemSet.hppheapRegion.cppheapRegion.hpp
shared
@ -3002,11 +3002,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// Initialize the GC alloc regions.
|
||||
_allocator->init_gc_alloc_regions(evacuation_info);
|
||||
|
||||
G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
|
||||
G1ParScanThreadStateSet per_thread_states(this,
|
||||
workers()->active_workers(),
|
||||
collection_set()->young_region_length(),
|
||||
collection_set()->optional_region_length());
|
||||
pre_evacuate_collection_set();
|
||||
|
||||
// Actually do the work...
|
||||
evacuate_collection_set(&per_thread_states);
|
||||
evacuate_optional_collection_set(&per_thread_states);
|
||||
|
||||
post_evacuate_collection_set(evacuation_info, &per_thread_states);
|
||||
|
||||
@ -3197,11 +3201,11 @@ void G1ParEvacuateFollowersClosure::do_void() {
|
||||
EventGCPhaseParallel event;
|
||||
G1ParScanThreadState* const pss = par_scan_state();
|
||||
pss->trim_queue();
|
||||
event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ObjCopy));
|
||||
event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
|
||||
do {
|
||||
EventGCPhaseParallel event;
|
||||
pss->steal_and_trim_queue(queues());
|
||||
event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ObjCopy));
|
||||
event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
|
||||
} while (!offer_termination());
|
||||
}
|
||||
|
||||
@ -3256,7 +3260,7 @@ public:
|
||||
size_t evac_term_attempts = 0;
|
||||
{
|
||||
double start = os::elapsedTime();
|
||||
G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
|
||||
G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::ObjCopy);
|
||||
evac.do_void();
|
||||
|
||||
evac_term_attempts = evac.term_attempts();
|
||||
@ -3547,7 +3551,7 @@ public:
|
||||
G1CopyingKeepAliveClosure keep_alive(_g1h, pss);
|
||||
|
||||
// Complete GC closure
|
||||
G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
|
||||
G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator, G1GCPhaseTimes::ObjCopy);
|
||||
|
||||
// Call the reference processing task's work routine.
|
||||
_proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
|
||||
@ -3719,6 +3723,145 @@ void G1CollectedHeap::evacuate_collection_set(G1ParScanThreadStateSet* per_threa
|
||||
phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
|
||||
}
|
||||
|
||||
class G1EvacuateOptionalRegionTask : public AbstractGangTask {
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadStateSet* _per_thread_states;
|
||||
G1OptionalCSet* _optional;
|
||||
RefToScanQueueSet* _queues;
|
||||
ParallelTaskTerminator _terminator;
|
||||
|
||||
Tickspan trim_ticks(G1ParScanThreadState* pss) {
|
||||
Tickspan copy_time = pss->trim_ticks();
|
||||
pss->reset_trim_ticks();
|
||||
return copy_time;
|
||||
}
|
||||
|
||||
void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
|
||||
G1EvacuationRootClosures* root_cls = pss->closures();
|
||||
G1ScanObjsDuringScanRSClosure obj_cl(_g1h, pss);
|
||||
|
||||
size_t scanned = 0;
|
||||
size_t claimed = 0;
|
||||
size_t skipped = 0;
|
||||
size_t used_memory = 0;
|
||||
|
||||
Ticks start = Ticks::now();
|
||||
Tickspan copy_time;
|
||||
|
||||
for (uint i = _optional->current_index(); i < _optional->current_limit(); i++) {
|
||||
HeapRegion* hr = _optional->region_at(i);
|
||||
G1ScanRSForOptionalClosure scan_opt_cl(&obj_cl);
|
||||
pss->oops_into_optional_region(hr)->oops_do(&scan_opt_cl, root_cls->raw_strong_oops());
|
||||
copy_time += trim_ticks(pss);
|
||||
|
||||
G1ScanRSForRegionClosure scan_rs_cl(_g1h->g1_rem_set()->scan_state(), &obj_cl, pss, G1GCPhaseTimes::OptScanRS, worker_id);
|
||||
scan_rs_cl.do_heap_region(hr);
|
||||
copy_time += trim_ticks(pss);
|
||||
scanned += scan_rs_cl.cards_scanned();
|
||||
claimed += scan_rs_cl.cards_claimed();
|
||||
skipped += scan_rs_cl.cards_skipped();
|
||||
|
||||
// Chunk lists for this region is no longer needed.
|
||||
used_memory += pss->oops_into_optional_region(hr)->used_memory();
|
||||
}
|
||||
|
||||
Tickspan scan_time = (Ticks::now() - start) - copy_time;
|
||||
G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
|
||||
p->record_or_add_time_secs(G1GCPhaseTimes::OptScanRS, worker_id, scan_time.seconds());
|
||||
p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, copy_time.seconds());
|
||||
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, scanned, G1GCPhaseTimes::OptCSetScannedCards);
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, claimed, G1GCPhaseTimes::OptCSetClaimedCards);
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, skipped, G1GCPhaseTimes::OptCSetSkippedCards);
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
|
||||
}
|
||||
|
||||
void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
|
||||
Ticks start = Ticks::now();
|
||||
G1ParEvacuateFollowersClosure cl(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::OptObjCopy);
|
||||
cl.do_void();
|
||||
|
||||
Tickspan evac_time = (Ticks::now() - start);
|
||||
G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
|
||||
p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, evac_time.seconds());
|
||||
assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming done during optional evacuation");
|
||||
}
|
||||
|
||||
public:
|
||||
G1EvacuateOptionalRegionTask(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadStateSet* per_thread_states,
|
||||
G1OptionalCSet* cset,
|
||||
RefToScanQueueSet* queues,
|
||||
uint n_workers) :
|
||||
AbstractGangTask("G1 Evacuation Optional Region Task"),
|
||||
_g1h(g1h),
|
||||
_per_thread_states(per_thread_states),
|
||||
_optional(cset),
|
||||
_queues(queues),
|
||||
_terminator(n_workers, _queues) {
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
|
||||
pss->set_ref_discoverer(_g1h->ref_processor_stw());
|
||||
|
||||
scan_roots(pss, worker_id);
|
||||
evacuate_live_objects(pss, worker_id);
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset) {
|
||||
class G1MarkScope : public MarkScope {};
|
||||
G1MarkScope code_mark_scope;
|
||||
|
||||
G1EvacuateOptionalRegionTask task(this, per_thread_states, ocset, _task_queues, workers()->active_workers());
|
||||
workers()->run_task(&task);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
|
||||
G1OptionalCSet optional_cset(&_collection_set, per_thread_states);
|
||||
if (optional_cset.is_empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (evacuation_failed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
|
||||
const double gc_start_time_ms = phase_times->cur_collection_start_sec() * 1000.0;
|
||||
|
||||
double start_time_sec = os::elapsedTime();
|
||||
|
||||
do {
|
||||
double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
|
||||
double time_left_ms = MaxGCPauseMillis - time_used_ms;
|
||||
|
||||
if (time_left_ms < 0) {
|
||||
log_trace(gc, ergo, cset)("Skipping %u optional regions, pause time exceeded %.3fms", optional_cset.size(), time_used_ms);
|
||||
break;
|
||||
}
|
||||
|
||||
optional_cset.prepare_evacuation(time_left_ms * _g1_policy->optional_evacuation_fraction());
|
||||
if (optional_cset.prepare_failed()) {
|
||||
log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms);
|
||||
break;
|
||||
}
|
||||
|
||||
evacuate_optional_regions(per_thread_states, &optional_cset);
|
||||
|
||||
optional_cset.complete_evacuation();
|
||||
if (optional_cset.evacuation_failed()) {
|
||||
break;
|
||||
}
|
||||
} while (!optional_cset.is_empty());
|
||||
|
||||
phase_times->record_optional_evacuation((os::elapsedTime() - start_time_sec) * 1000.0);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
|
||||
// Also cleans the card table from temporary duplicate detection information used
|
||||
// during UpdateRS/ScanRS.
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "gc/g1/g1EdenRegions.hpp"
|
||||
#include "gc/g1/g1EvacFailure.hpp"
|
||||
#include "gc/g1/g1EvacStats.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1HeapTransition.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1HRPrinter.hpp"
|
||||
@ -567,6 +568,9 @@ public:
|
||||
void register_old_region_with_cset(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_in_old(r->hrm_index());
|
||||
}
|
||||
void register_optional_region_with_cset(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_optional(r->hrm_index());
|
||||
}
|
||||
void clear_in_cset(const HeapRegion* hr) {
|
||||
_in_cset_fast_test.clear(hr);
|
||||
}
|
||||
@ -723,6 +727,8 @@ private:
|
||||
|
||||
// Actually do the work of evacuating the collection set.
|
||||
void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
|
||||
void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
|
||||
void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);
|
||||
|
||||
void pre_evacuate_collection_set();
|
||||
void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
|
||||
@ -1405,6 +1411,7 @@ protected:
|
||||
G1ParScanThreadState* _par_scan_state;
|
||||
RefToScanQueueSet* _queues;
|
||||
ParallelTaskTerminator* _terminator;
|
||||
G1GCPhaseTimes::GCParPhases _phase;
|
||||
|
||||
G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
|
||||
RefToScanQueueSet* queues() { return _queues; }
|
||||
@ -1414,10 +1421,11 @@ public:
|
||||
G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadState* par_scan_state,
|
||||
RefToScanQueueSet* queues,
|
||||
ParallelTaskTerminator* terminator)
|
||||
ParallelTaskTerminator* terminator,
|
||||
G1GCPhaseTimes::GCParPhases phase)
|
||||
: _start_term(0.0), _term_time(0.0), _term_attempts(0),
|
||||
_g1h(g1h), _par_scan_state(par_scan_state),
|
||||
_queues(queues), _terminator(terminator) {}
|
||||
_queues(queues), _terminator(terminator), _phase(phase) {}
|
||||
|
||||
void do_void();
|
||||
|
||||
|
@ -23,15 +23,17 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectionSet.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1ParScanThreadState.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "gc/g1/heapRegionSet.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
|
||||
G1CollectorState* G1CollectionSet::collector_state() {
|
||||
@ -60,6 +62,9 @@ G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
|
||||
_collection_set_regions(NULL),
|
||||
_collection_set_cur_length(0),
|
||||
_collection_set_max_length(0),
|
||||
_optional_regions(NULL),
|
||||
_optional_region_length(0),
|
||||
_optional_region_max_length(0),
|
||||
_bytes_used_before(0),
|
||||
_recorded_rs_lengths(0),
|
||||
_inc_build_state(Inactive),
|
||||
@ -74,6 +79,7 @@ G1CollectionSet::~G1CollectionSet() {
|
||||
if (_collection_set_regions != NULL) {
|
||||
FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
|
||||
}
|
||||
free_optional_regions();
|
||||
delete _cset_chooser;
|
||||
}
|
||||
|
||||
@ -88,6 +94,7 @@ void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
|
||||
"Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
|
||||
|
||||
_old_region_length = 0;
|
||||
_optional_region_length = 0;
|
||||
}
|
||||
|
||||
void G1CollectionSet::initialize(uint max_region_length) {
|
||||
@ -96,6 +103,23 @@ void G1CollectionSet::initialize(uint max_region_length) {
|
||||
_collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
|
||||
}
|
||||
|
||||
void G1CollectionSet::initialize_optional(uint max_length) {
|
||||
assert(_optional_regions == NULL, "Already initialized");
|
||||
assert(_optional_region_length == 0, "Already initialized");
|
||||
assert(_optional_region_max_length == 0, "Already initialized");
|
||||
_optional_region_max_length = max_length;
|
||||
_optional_regions = NEW_C_HEAP_ARRAY(HeapRegion*, _optional_region_max_length, mtGC);
|
||||
}
|
||||
|
||||
void G1CollectionSet::free_optional_regions() {
|
||||
_optional_region_length = 0;
|
||||
_optional_region_max_length = 0;
|
||||
if (_optional_regions != NULL) {
|
||||
FREE_C_HEAP_ARRAY(HeapRegion*, _optional_regions);
|
||||
_optional_regions = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
|
||||
_recorded_rs_lengths = rs_lengths;
|
||||
}
|
||||
@ -104,7 +128,8 @@ void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
|
||||
void G1CollectionSet::add_old_region(HeapRegion* hr) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
|
||||
assert(_inc_build_state == Active, "Precondition");
|
||||
assert(_inc_build_state == Active || hr->index_in_opt_cset() != G1OptionalCSet::InvalidCSetIndex,
|
||||
"Precondition, actively building cset or adding optional later on");
|
||||
assert(hr->is_old(), "the region should be old");
|
||||
|
||||
assert(!hr->in_collection_set(), "should not already be in the CSet");
|
||||
@ -117,6 +142,22 @@ void G1CollectionSet::add_old_region(HeapRegion* hr) {
|
||||
size_t rs_length = hr->rem_set()->occupied();
|
||||
_recorded_rs_lengths += rs_length;
|
||||
_old_region_length += 1;
|
||||
|
||||
log_trace(gc, cset)("Added old region %d to collection set", hr->hrm_index());
|
||||
}
|
||||
|
||||
void G1CollectionSet::add_optional_region(HeapRegion* hr) {
|
||||
assert(!optional_is_full(), "Precondition, must have room left for this region");
|
||||
assert(hr->is_old(), "the region should be old");
|
||||
assert(!hr->in_collection_set(), "should not already be in the CSet");
|
||||
|
||||
_g1h->register_optional_region_with_cset(hr);
|
||||
|
||||
_optional_regions[_optional_region_length] = hr;
|
||||
uint index = _optional_region_length++;
|
||||
hr->set_index_in_opt_cset(index);
|
||||
|
||||
log_trace(gc, cset)("Added region %d to optional collection set (%u)", hr->hrm_index(), _optional_region_length);
|
||||
}
|
||||
|
||||
// Initialize the per-collection-set information
|
||||
@ -168,6 +209,7 @@ void G1CollectionSet::finalize_incremental_building() {
|
||||
void G1CollectionSet::clear() {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
_collection_set_cur_length = 0;
|
||||
_optional_region_length = 0;
|
||||
}
|
||||
|
||||
void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
|
||||
@ -396,6 +438,30 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi
|
||||
return time_remaining_ms;
|
||||
}
|
||||
|
||||
void G1CollectionSet::add_as_old(HeapRegion* hr) {
|
||||
cset_chooser()->pop(); // already have region via peek()
|
||||
_g1h->old_set_remove(hr);
|
||||
add_old_region(hr);
|
||||
}
|
||||
|
||||
void G1CollectionSet::add_as_optional(HeapRegion* hr) {
|
||||
assert(_optional_regions != NULL, "Must not be called before array is allocated");
|
||||
cset_chooser()->pop(); // already have region via peek()
|
||||
_g1h->old_set_remove(hr);
|
||||
add_optional_region(hr);
|
||||
}
|
||||
|
||||
bool G1CollectionSet::optional_is_full() {
|
||||
assert(_optional_region_length <= _optional_region_max_length, "Invariant");
|
||||
return _optional_region_length == _optional_region_max_length;
|
||||
}
|
||||
|
||||
void G1CollectionSet::clear_optional_region(const HeapRegion* hr) {
|
||||
assert(_optional_regions != NULL, "Must not be called before array is allocated");
|
||||
uint index = hr->index_in_opt_cset();
|
||||
_optional_regions[index] = NULL;
|
||||
}
|
||||
|
||||
static int compare_region_idx(const uint a, const uint b) {
|
||||
if (a > b) {
|
||||
return 1;
|
||||
@ -409,21 +475,28 @@ static int compare_region_idx(const uint a, const uint b) {
|
||||
void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
|
||||
double non_young_start_time_sec = os::elapsedTime();
|
||||
double predicted_old_time_ms = 0.0;
|
||||
double predicted_optional_time_ms = 0.0;
|
||||
double optional_threshold_ms = time_remaining_ms * _policy->optional_prediction_fraction();
|
||||
uint expensive_region_num = 0;
|
||||
|
||||
if (collector_state()->in_mixed_phase()) {
|
||||
cset_chooser()->verify();
|
||||
const uint min_old_cset_length = _policy->calc_min_old_cset_length();
|
||||
const uint max_old_cset_length = _policy->calc_max_old_cset_length();
|
||||
|
||||
uint expensive_region_num = 0;
|
||||
const uint max_old_cset_length = MAX2(min_old_cset_length, _policy->calc_max_old_cset_length());
|
||||
bool check_time_remaining = _policy->adaptive_young_list_length();
|
||||
|
||||
initialize_optional(max_old_cset_length - min_old_cset_length);
|
||||
log_debug(gc, ergo, cset)("Start adding old regions for mixed gc. min %u regions, max %u regions, "
|
||||
"time remaining %1.2fms, optional threshold %1.2fms",
|
||||
min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms);
|
||||
|
||||
HeapRegion* hr = cset_chooser()->peek();
|
||||
while (hr != NULL) {
|
||||
if (old_region_length() >= max_old_cset_length) {
|
||||
if (old_region_length() + optional_region_length() >= max_old_cset_length) {
|
||||
// Added maximum number of old regions to the CSet.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",
|
||||
old_region_length(), max_old_cset_length);
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). "
|
||||
"old %u regions, optional %u regions",
|
||||
old_region_length(), optional_region_length());
|
||||
break;
|
||||
}
|
||||
|
||||
@ -437,69 +510,66 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
|
||||
// reclaimable space is at or below the waste threshold. Stop
|
||||
// adding old regions to the CSet.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
|
||||
"old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
|
||||
old_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
|
||||
"reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%",
|
||||
byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes),
|
||||
reclaimable_percent, G1HeapWastePercent);
|
||||
break;
|
||||
}
|
||||
|
||||
double predicted_time_ms = predict_region_elapsed_time_ms(hr);
|
||||
if (check_time_remaining) {
|
||||
if (predicted_time_ms > time_remaining_ms) {
|
||||
// Too expensive for the current CSet.
|
||||
|
||||
if (old_region_length() >= min_old_cset_length) {
|
||||
// We have added the minimum number of old regions to the CSet,
|
||||
// we are done with this CSet.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
|
||||
"predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
|
||||
predicted_time_ms, time_remaining_ms, old_region_length(), min_old_cset_length);
|
||||
break;
|
||||
}
|
||||
|
||||
// We'll add it anyway given that we haven't reached the
|
||||
// minimum number of old regions.
|
||||
expensive_region_num += 1;
|
||||
time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
|
||||
// Add regions to old set until we reach minimum amount
|
||||
if (old_region_length() < min_old_cset_length) {
|
||||
predicted_old_time_ms += predicted_time_ms;
|
||||
add_as_old(hr);
|
||||
// Record the number of regions added when no time remaining
|
||||
if (time_remaining_ms == 0.0) {
|
||||
expensive_region_num++;
|
||||
}
|
||||
} else {
|
||||
if (old_region_length() >= min_old_cset_length) {
|
||||
// In the non-auto-tuning case, we'll finish adding regions
|
||||
// to the CSet if we reach the minimum.
|
||||
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",
|
||||
old_region_length(), min_old_cset_length);
|
||||
// In the non-auto-tuning case, we'll finish adding regions
|
||||
// to the CSet if we reach the minimum.
|
||||
if (!check_time_remaining) {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min).");
|
||||
break;
|
||||
}
|
||||
// Keep adding regions to old set until we reach optional threshold
|
||||
if (time_remaining_ms > optional_threshold_ms) {
|
||||
predicted_old_time_ms += predicted_time_ms;
|
||||
add_as_old(hr);
|
||||
} else if (time_remaining_ms > 0) {
|
||||
// Keep adding optional regions until time is up
|
||||
if (!optional_is_full()) {
|
||||
predicted_optional_time_ms += predicted_time_ms;
|
||||
add_as_optional(hr);
|
||||
} else {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (optional set full).");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high).");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// We will add this region to the CSet.
|
||||
time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
|
||||
predicted_old_time_ms += predicted_time_ms;
|
||||
cset_chooser()->pop(); // already have region via peek()
|
||||
_g1h->old_set_remove(hr);
|
||||
add_old_region(hr);
|
||||
|
||||
hr = cset_chooser()->peek();
|
||||
}
|
||||
if (hr == NULL) {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
|
||||
}
|
||||
|
||||
if (expensive_region_num > 0) {
|
||||
// We print the information once here at the end, predicated on
|
||||
// whether we added any apparently expensive regions or not, to
|
||||
// avoid generating output per region.
|
||||
log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
|
||||
"old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
|
||||
old_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
|
||||
}
|
||||
|
||||
cset_chooser()->verify();
|
||||
}
|
||||
|
||||
stop_incremental_building();
|
||||
|
||||
log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
|
||||
old_region_length(), predicted_old_time_ms, time_remaining_ms);
|
||||
log_debug(gc, ergo, cset)("Finish choosing CSet regions old: %u, optional: %u, "
|
||||
"predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
|
||||
old_region_length(), optional_region_length(),
|
||||
predicted_old_time_ms, predicted_optional_time_ms, time_remaining_ms);
|
||||
if (expensive_region_num > 0) {
|
||||
log_debug(gc, ergo, cset)("CSet contains %u old regions that were added although the predicted time was too high.",
|
||||
expensive_region_num);
|
||||
}
|
||||
|
||||
double non_young_end_time_sec = os::elapsedTime();
|
||||
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
|
||||
@ -507,6 +577,86 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
|
||||
QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true);
|
||||
}
|
||||
|
||||
HeapRegion* G1OptionalCSet::region_at(uint index) {
|
||||
return _cset->optional_region_at(index);
|
||||
}
|
||||
|
||||
void G1OptionalCSet::prepare_evacuation(double time_limit) {
|
||||
assert(_current_index == _current_limit, "Before prepare no regions should be ready for evac");
|
||||
|
||||
uint prepared_regions = 0;
|
||||
double prediction_ms = 0;
|
||||
|
||||
_prepare_failed = true;
|
||||
for (uint i = _current_index; i < _cset->optional_region_length(); i++) {
|
||||
HeapRegion* hr = region_at(i);
|
||||
prediction_ms += _cset->predict_region_elapsed_time_ms(hr);
|
||||
if (prediction_ms > time_limit) {
|
||||
log_debug(gc, cset)("Prepared %u regions for optional evacuation. Predicted time: %.3fms", prepared_regions, prediction_ms);
|
||||
return;
|
||||
}
|
||||
|
||||
// This region will be included in the next optional evacuation.
|
||||
prepare_to_evacuate_optional_region(hr);
|
||||
prepared_regions++;
|
||||
_current_limit++;
|
||||
_prepare_failed = false;
|
||||
}
|
||||
|
||||
log_debug(gc, cset)("Prepared all %u regions for optional evacuation. Predicted time: %.3fms",
|
||||
prepared_regions, prediction_ms);
|
||||
}
|
||||
|
||||
bool G1OptionalCSet::prepare_failed() {
|
||||
return _prepare_failed;
|
||||
}
|
||||
|
||||
void G1OptionalCSet::complete_evacuation() {
|
||||
_evacuation_failed = false;
|
||||
for (uint i = _current_index; i < _current_limit; i++) {
|
||||
HeapRegion* hr = region_at(i);
|
||||
_cset->clear_optional_region(hr);
|
||||
if (hr->evacuation_failed()){
|
||||
_evacuation_failed = true;
|
||||
}
|
||||
}
|
||||
_current_index = _current_limit;
|
||||
}
|
||||
|
||||
bool G1OptionalCSet::evacuation_failed() {
|
||||
return _evacuation_failed;
|
||||
}
|
||||
|
||||
G1OptionalCSet::~G1OptionalCSet() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
while (!is_empty()) {
|
||||
// We want to return regions not evacuated to the
|
||||
// chooser in reverse order to maintain the old order.
|
||||
HeapRegion* hr = _cset->remove_last_optional_region();
|
||||
assert(hr != NULL, "Should be valid region left");
|
||||
_pset->record_unused_optional_region(hr);
|
||||
g1h->old_set_add(hr);
|
||||
g1h->clear_in_cset(hr);
|
||||
hr->set_index_in_opt_cset(InvalidCSetIndex);
|
||||
_cset->cset_chooser()->push(hr);
|
||||
}
|
||||
_cset->free_optional_regions();
|
||||
}
|
||||
|
||||
uint G1OptionalCSet::size() {
|
||||
return _cset->optional_region_length() - _current_index;
|
||||
}
|
||||
|
||||
bool G1OptionalCSet::is_empty() {
|
||||
return size() == 0;
|
||||
}
|
||||
|
||||
void G1OptionalCSet::prepare_to_evacuate_optional_region(HeapRegion* hr) {
|
||||
log_trace(gc, cset)("Adding region %u for optional evacuation", hr->hrm_index());
|
||||
G1CollectedHeap::heap()->clear_in_cset(hr);
|
||||
_cset->add_old_region(hr);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
|
||||
private:
|
||||
|
@ -32,6 +32,7 @@
|
||||
class G1CollectedHeap;
|
||||
class G1CollectorState;
|
||||
class G1GCPhaseTimes;
|
||||
class G1ParScanThreadStateSet;
|
||||
class G1Policy;
|
||||
class G1SurvivorRegions;
|
||||
class HeapRegion;
|
||||
@ -56,6 +57,13 @@ class G1CollectionSet {
|
||||
volatile size_t _collection_set_cur_length;
|
||||
size_t _collection_set_max_length;
|
||||
|
||||
// When doing mixed collections we can add old regions to the collection, which
|
||||
// can be collected if there is enough time. We call these optional regions and
|
||||
// the pointer to these regions are stored in the array below.
|
||||
HeapRegion** _optional_regions;
|
||||
uint _optional_region_length;
|
||||
uint _optional_region_max_length;
|
||||
|
||||
// The number of bytes in the collection set before the pause. Set from
|
||||
// the incrementally built collection set at the start of an evacuation
|
||||
// pause, and incremented in finalize_old_part() when adding old regions
|
||||
@ -106,15 +114,19 @@ class G1CollectionSet {
|
||||
G1CollectorState* collector_state();
|
||||
G1GCPhaseTimes* phase_times();
|
||||
|
||||
double predict_region_elapsed_time_ms(HeapRegion* hr);
|
||||
|
||||
void verify_young_cset_indices() const NOT_DEBUG_RETURN;
|
||||
void add_as_optional(HeapRegion* hr);
|
||||
void add_as_old(HeapRegion* hr);
|
||||
bool optional_is_full();
|
||||
|
||||
public:
|
||||
G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
|
||||
~G1CollectionSet();
|
||||
|
||||
// Initializes the collection set giving the maximum possible length of the collection set.
|
||||
void initialize(uint max_region_length);
|
||||
void initialize_optional(uint max_length);
|
||||
void free_optional_regions();
|
||||
|
||||
CollectionSetChooser* cset_chooser();
|
||||
|
||||
@ -131,6 +143,7 @@ public:
|
||||
uint eden_region_length() const { return _eden_region_length; }
|
||||
uint survivor_region_length() const { return _survivor_region_length; }
|
||||
uint old_region_length() const { return _old_region_length; }
|
||||
uint optional_region_length() const { return _optional_region_length; }
|
||||
|
||||
// Incremental collection set support
|
||||
|
||||
@ -175,6 +188,9 @@ public:
|
||||
// Add old region "hr" to the collection set.
|
||||
void add_old_region(HeapRegion* hr);
|
||||
|
||||
// Add old region "hr" to optional collection set.
|
||||
void add_optional_region(HeapRegion* hr);
|
||||
|
||||
// Update information about hr in the aggregated information for
|
||||
// the incrementally built collection set.
|
||||
void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
|
||||
@ -191,10 +207,73 @@ public:
|
||||
void print(outputStream* st);
|
||||
#endif // !PRODUCT
|
||||
|
||||
double predict_region_elapsed_time_ms(HeapRegion* hr);
|
||||
|
||||
void clear_optional_region(const HeapRegion* hr);
|
||||
|
||||
HeapRegion* optional_region_at(uint i) const {
|
||||
assert(_optional_regions != NULL, "Not yet initialized");
|
||||
assert(i < _optional_region_length, "index %u out of bounds (%u)", i, _optional_region_length);
|
||||
return _optional_regions[i];
|
||||
}
|
||||
|
||||
HeapRegion* remove_last_optional_region() {
|
||||
assert(_optional_regions != NULL, "Not yet initialized");
|
||||
assert(_optional_region_length != 0, "No region to remove");
|
||||
_optional_region_length--;
|
||||
HeapRegion* removed = _optional_regions[_optional_region_length];
|
||||
_optional_regions[_optional_region_length] = NULL;
|
||||
return removed;
|
||||
}
|
||||
|
||||
private:
|
||||
// Update the incremental collection set information when adding a region.
|
||||
void add_young_region_common(HeapRegion* hr);
|
||||
};
|
||||
|
||||
// Helper class to manage the optional regions in a Mixed collection.
|
||||
class G1OptionalCSet : public StackObj {
|
||||
private:
|
||||
G1CollectionSet* _cset;
|
||||
G1ParScanThreadStateSet* _pset;
|
||||
uint _current_index;
|
||||
uint _current_limit;
|
||||
bool _prepare_failed;
|
||||
bool _evacuation_failed;
|
||||
|
||||
void prepare_to_evacuate_optional_region(HeapRegion* hr);
|
||||
|
||||
public:
|
||||
static const uint InvalidCSetIndex = UINT_MAX;
|
||||
|
||||
G1OptionalCSet(G1CollectionSet* cset, G1ParScanThreadStateSet* pset) :
|
||||
_cset(cset),
|
||||
_pset(pset),
|
||||
_current_index(0),
|
||||
_current_limit(0),
|
||||
_prepare_failed(false),
|
||||
_evacuation_failed(false) { }
|
||||
// The destructor returns regions to the cset-chooser and
|
||||
// frees the optional structure in the cset.
|
||||
~G1OptionalCSet();
|
||||
|
||||
uint current_index() { return _current_index; }
|
||||
uint current_limit() { return _current_limit; }
|
||||
|
||||
uint size();
|
||||
bool is_empty();
|
||||
|
||||
HeapRegion* region_at(uint index);
|
||||
|
||||
// Prepare a set of regions for optional evacuation.
|
||||
void prepare_evacuation(double time_left_ms);
|
||||
bool prepare_failed();
|
||||
|
||||
// Complete the evacuation of the previously prepared
|
||||
// regions by updating their state and check for failures.
|
||||
void complete_evacuation();
|
||||
bool evacuation_failed();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
|
||||
|
||||
|
@ -73,11 +73,13 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
_gc_par_phases[ScanHCC] = NULL;
|
||||
}
|
||||
_gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms):");
|
||||
_gc_par_phases[OptScanRS] = new WorkerDataArray<double>(max_gc_threads, "Optional Scan RS (ms):");
|
||||
_gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms):");
|
||||
#if INCLUDE_AOT
|
||||
_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scanning (ms):");
|
||||
#endif
|
||||
_gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
|
||||
_gc_par_phases[OptObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Optional Object Copy (ms):");
|
||||
_gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");
|
||||
_gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms):");
|
||||
_gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms):");
|
||||
@ -90,6 +92,15 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
_scan_rs_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
|
||||
_gc_par_phases[ScanRS]->link_thread_work_items(_scan_rs_skipped_cards, ScanRSSkippedCards);
|
||||
|
||||
_opt_cset_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_scanned_cards, OptCSetScannedCards);
|
||||
_opt_cset_claimed_cards = new WorkerDataArray<size_t>(max_gc_threads, "Claimed Cards:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_claimed_cards, OptCSetClaimedCards);
|
||||
_opt_cset_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_skipped_cards, OptCSetSkippedCards);
|
||||
_opt_cset_used_memory = new WorkerDataArray<size_t>(max_gc_threads, "Used Memory:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_used_memory, OptCSetUsedMemory);
|
||||
|
||||
_update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers:");
|
||||
_gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers, UpdateRSProcessedBuffers);
|
||||
_update_rs_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
|
||||
@ -120,6 +131,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
|
||||
void G1GCPhaseTimes::reset() {
|
||||
_cur_collection_par_time_ms = 0.0;
|
||||
_cur_optional_evac_ms = 0.0;
|
||||
_cur_collection_code_root_fixup_time_ms = 0.0;
|
||||
_cur_strong_code_root_purge_time_ms = 0.0;
|
||||
_cur_evac_fail_recalc_used = 0.0;
|
||||
@ -227,11 +239,11 @@ void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_i, double secs
|
||||
_gc_par_phases[phase]->add(worker_i, secs);
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::record_or_add_objcopy_time_secs(uint worker_i, double secs) {
|
||||
if (_gc_par_phases[ObjCopy]->get(worker_i) == _gc_par_phases[ObjCopy]->uninitialized()) {
|
||||
record_time_secs(ObjCopy, worker_i, secs);
|
||||
void G1GCPhaseTimes::record_or_add_time_secs(GCParPhases phase, uint worker_i, double secs) {
|
||||
if (_gc_par_phases[phase]->get(worker_i) == _gc_par_phases[phase]->uninitialized()) {
|
||||
record_time_secs(phase, worker_i, secs);
|
||||
} else {
|
||||
add_time_secs(ObjCopy, worker_i, secs);
|
||||
add_time_secs(phase, worker_i, secs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -239,6 +251,10 @@ void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, s
|
||||
_gc_par_phases[phase]->set_thread_work_item(worker_i, count, index);
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::record_or_add_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index) {
|
||||
_gc_par_phases[phase]->set_or_add_thread_work_item(worker_i, count, index);
|
||||
}
|
||||
|
||||
// return the average time for a phase in milliseconds
|
||||
double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
|
||||
return _gc_par_phases[phase]->average() * 1000.0;
|
||||
@ -348,6 +364,16 @@ double G1GCPhaseTimes::print_pre_evacuate_collection_set() const {
|
||||
return sum_ms;
|
||||
}
|
||||
|
||||
double G1GCPhaseTimes::print_evacuate_optional_collection_set() const {
|
||||
const double sum_ms = _cur_optional_evac_ms;
|
||||
if (sum_ms > 0) {
|
||||
info_time("Evacuate Optional Collection Set", sum_ms);
|
||||
debug_phase(_gc_par_phases[OptScanRS]);
|
||||
debug_phase(_gc_par_phases[OptObjCopy]);
|
||||
}
|
||||
return sum_ms;
|
||||
}
|
||||
|
||||
double G1GCPhaseTimes::print_evacuate_collection_set() const {
|
||||
const double sum_ms = _cur_collection_par_time_ms;
|
||||
|
||||
@ -457,6 +483,7 @@ void G1GCPhaseTimes::print() {
|
||||
double accounted_ms = 0.0;
|
||||
accounted_ms += print_pre_evacuate_collection_set();
|
||||
accounted_ms += print_evacuate_collection_set();
|
||||
accounted_ms += print_evacuate_optional_collection_set();
|
||||
accounted_ms += print_post_evacuate_collection_set();
|
||||
print_other(accounted_ms);
|
||||
|
||||
@ -485,11 +512,13 @@ const char* G1GCPhaseTimes::phase_name(GCParPhases phase) {
|
||||
"UpdateRS",
|
||||
"ScanHCC",
|
||||
"ScanRS",
|
||||
"OptScanRS",
|
||||
"CodeRoots",
|
||||
#if INCLUDE_AOT
|
||||
"AOTCodeRoots",
|
||||
#endif
|
||||
"ObjCopy",
|
||||
"OptObjCopy",
|
||||
"Termination",
|
||||
"Other",
|
||||
"GCWorkerTotal",
|
||||
@ -561,7 +590,7 @@ G1EvacPhaseTimesTracker::~G1EvacPhaseTimesTracker() {
|
||||
_trim_tracker.stop();
|
||||
// Exclude trim time by increasing the start time.
|
||||
_start_time += _trim_time;
|
||||
_phase_times->record_or_add_objcopy_time_secs(_worker_id, _trim_time.seconds());
|
||||
_phase_times->record_or_add_time_secs(G1GCPhaseTimes::ObjCopy, _worker_id, _trim_time.seconds());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -63,11 +63,13 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
UpdateRS,
|
||||
ScanHCC,
|
||||
ScanRS,
|
||||
OptScanRS,
|
||||
CodeRoots,
|
||||
#if INCLUDE_AOT
|
||||
AOTCodeRoots,
|
||||
#endif
|
||||
ObjCopy,
|
||||
OptObjCopy,
|
||||
Termination,
|
||||
Other,
|
||||
GCWorkerTotal,
|
||||
@ -92,6 +94,13 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
UpdateRSSkippedCards
|
||||
};
|
||||
|
||||
enum GCOptCSetWorkItems {
|
||||
OptCSetScannedCards,
|
||||
OptCSetClaimedCards,
|
||||
OptCSetSkippedCards,
|
||||
OptCSetUsedMemory
|
||||
};
|
||||
|
||||
private:
|
||||
// Markers for grouping the phases in the GCPhases enum above
|
||||
static const int GCMainParPhasesLast = GCWorkerEnd;
|
||||
@ -108,11 +117,17 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
WorkerDataArray<size_t>* _scan_rs_claimed_cards;
|
||||
WorkerDataArray<size_t>* _scan_rs_skipped_cards;
|
||||
|
||||
WorkerDataArray<size_t>* _opt_cset_scanned_cards;
|
||||
WorkerDataArray<size_t>* _opt_cset_claimed_cards;
|
||||
WorkerDataArray<size_t>* _opt_cset_skipped_cards;
|
||||
WorkerDataArray<size_t>* _opt_cset_used_memory;
|
||||
|
||||
WorkerDataArray<size_t>* _termination_attempts;
|
||||
|
||||
WorkerDataArray<size_t>* _redirtied_cards;
|
||||
|
||||
double _cur_collection_par_time_ms;
|
||||
double _cur_optional_evac_ms;
|
||||
double _cur_collection_code_root_fixup_time_ms;
|
||||
double _cur_strong_code_root_purge_time_ms;
|
||||
|
||||
@ -184,6 +199,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
|
||||
double print_pre_evacuate_collection_set() const;
|
||||
double print_evacuate_collection_set() const;
|
||||
double print_evacuate_optional_collection_set() const;
|
||||
double print_post_evacuate_collection_set() const;
|
||||
void print_other(double accounted_ms) const;
|
||||
|
||||
@ -199,10 +215,12 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
// add a number of seconds to a phase
|
||||
void add_time_secs(GCParPhases phase, uint worker_i, double secs);
|
||||
|
||||
void record_or_add_objcopy_time_secs(uint worker_i, double secs);
|
||||
void record_or_add_time_secs(GCParPhases phase, uint worker_i, double secs);
|
||||
|
||||
void record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index = 0);
|
||||
|
||||
void record_or_add_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index = 0);
|
||||
|
||||
// return the average time for a phase in milliseconds
|
||||
double average_time_ms(GCParPhases phase);
|
||||
|
||||
@ -234,6 +252,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
_cur_collection_par_time_ms = ms;
|
||||
}
|
||||
|
||||
void record_optional_evacuation(double ms) {
|
||||
_cur_optional_evac_ms = ms;
|
||||
}
|
||||
|
||||
void record_code_root_fixup_time(double ms) {
|
||||
_cur_collection_code_root_fixup_time_ms = ms;
|
||||
}
|
||||
|
@ -56,7 +56,8 @@ struct InCSetState {
|
||||
// makes getting the next generation fast by a simple increment. They are also
|
||||
// used to index into arrays.
|
||||
// The negative values are used for objects requiring various special cases,
|
||||
// for example eager reclamation of humongous objects.
|
||||
// for example eager reclamation of humongous objects or optional regions.
|
||||
Optional = -2, // The region is optional
|
||||
Humongous = -1, // The region is humongous
|
||||
NotInCSet = 0, // The region is not in the collection set.
|
||||
Young = 1, // The region is in the collection set and a young region.
|
||||
@ -78,10 +79,11 @@ struct InCSetState {
|
||||
bool is_humongous() const { return _value == Humongous; }
|
||||
bool is_young() const { return _value == Young; }
|
||||
bool is_old() const { return _value == Old; }
|
||||
bool is_optional() const { return _value == Optional; }
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_default() const { return _value == NotInCSet; }
|
||||
bool is_valid() const { return (_value >= Humongous) && (_value < Num); }
|
||||
bool is_valid() const { return (_value >= Optional) && (_value < Num); }
|
||||
bool is_valid_gen() const { return (_value >= Young && _value <= Old); }
|
||||
#endif
|
||||
};
|
||||
@ -101,6 +103,12 @@ class G1InCSetStateFastTestBiasedMappedArray : public G1BiasedMappedArray<InCSet
|
||||
protected:
|
||||
InCSetState default_value() const { return InCSetState::NotInCSet; }
|
||||
public:
|
||||
void set_optional(uintptr_t index) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
|
||||
set_by_index(index, InCSetState::Optional);
|
||||
}
|
||||
|
||||
void set_humongous(uintptr_t index) {
|
||||
assert(get_by_index(index).is_default(),
|
||||
"State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
|
||||
|
@ -83,6 +83,16 @@ public:
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
};
|
||||
|
||||
// Used during Optional RS scanning to make sure we trim the queues in a timely manner.
|
||||
class G1ScanRSForOptionalClosure : public OopClosure {
|
||||
G1ScanObjsDuringScanRSClosure* _scan_cl;
|
||||
public:
|
||||
G1ScanRSForOptionalClosure(G1ScanObjsDuringScanRSClosure* cl) : _scan_cl(cl) { }
|
||||
|
||||
template <class T> void do_oop_work(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
};
|
||||
|
||||
// This closure is applied to the fields of the objects that have just been copied during evacuation.
|
||||
class G1ScanEvacuatedObjClosure : public G1ScanClosureBase {
|
||||
|
@ -64,6 +64,8 @@ template <class T>
|
||||
inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) {
|
||||
if (state.is_humongous()) {
|
||||
_g1h->set_humongous_is_live(obj);
|
||||
} else if (state.is_optional()) {
|
||||
_par_scan_state->remember_reference_into_optional_region(p);
|
||||
}
|
||||
}
|
||||
|
||||
@ -195,6 +197,12 @@ inline void G1ScanObjsDuringScanRSClosure::do_oop_work(T* p) {
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void G1ScanRSForOptionalClosure::do_oop_work(T* p) {
|
||||
_scan_cl->do_oop_work(p);
|
||||
_scan_cl->trim_queue_partially();
|
||||
}
|
||||
|
||||
void G1ParCopyHelper::do_cld_barrier(oop new_obj) {
|
||||
if (_g1h->heap_region_containing(new_obj)->is_young()) {
|
||||
_scanned_cld->record_modified_oops();
|
||||
@ -243,6 +251,8 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
} else {
|
||||
if (state.is_humongous()) {
|
||||
_g1h->set_humongous_is_live(obj);
|
||||
} else if (state.is_optional()) {
|
||||
_par_scan_state->remember_root_into_optional_region(p);
|
||||
}
|
||||
|
||||
// The object is not in collection set. If we're a root scanning
|
||||
|
40
src/hotspot/share/gc/g1/g1OopStarChunkedList.cpp
Normal file
40
src/hotspot/share/gc/g1/g1OopStarChunkedList.cpp
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1OopStarChunkedList.inline.hpp"
|
||||
|
||||
G1OopStarChunkedList::~G1OopStarChunkedList() {
|
||||
delete_list(_roots);
|
||||
delete_list(_croots);
|
||||
delete_list(_oops);
|
||||
delete_list(_coops);
|
||||
}
|
||||
|
||||
void G1OopStarChunkedList::oops_do(OopClosure* obj_cl, OopClosure* root_cl) {
|
||||
chunks_do(_roots, root_cl);
|
||||
chunks_do(_croots, root_cl);
|
||||
chunks_do(_oops, obj_cl);
|
||||
chunks_do(_coops, obj_cl);
|
||||
}
|
64
src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp
Normal file
64
src/hotspot/share/gc/g1/g1OopStarChunkedList.hpp
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_G1_G1OOPSTARCHUNKEDLIST_HPP
|
||||
#define SHARE_GC_G1_G1OOPSTARCHUNKEDLIST_HPP
|
||||
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "utilities/chunkedList.hpp"
|
||||
|
||||
class OopClosure;
|
||||
|
||||
class G1OopStarChunkedList : public CHeapObj<mtGC> {
|
||||
size_t _used_memory;
|
||||
|
||||
ChunkedList<oop*, mtGC>* _roots;
|
||||
ChunkedList<narrowOop*, mtGC>* _croots;
|
||||
ChunkedList<oop*, mtGC>* _oops;
|
||||
ChunkedList<narrowOop*, mtGC>* _coops;
|
||||
|
||||
template <typename T> void delete_list(ChunkedList<T*, mtGC>* c);
|
||||
|
||||
template <typename T>
|
||||
void chunks_do(ChunkedList<T*, mtGC>* head,
|
||||
OopClosure* cl);
|
||||
|
||||
template <typename T>
|
||||
inline void push(ChunkedList<T*, mtGC>** field, T* p);
|
||||
|
||||
public:
|
||||
G1OopStarChunkedList() : _used_memory(0), _roots(NULL), _croots(NULL), _oops(NULL), _coops(NULL) {}
|
||||
~G1OopStarChunkedList();
|
||||
|
||||
size_t used_memory() { return _used_memory; }
|
||||
|
||||
void oops_do(OopClosure* obj_cl, OopClosure* root_cl);
|
||||
|
||||
inline void push_oop(oop* p);
|
||||
inline void push_oop(narrowOop* p);
|
||||
inline void push_root(oop* p);
|
||||
inline void push_root(narrowOop* p);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1OOPSTARCHUNKEDLIST_HPP
|
84
src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp
Normal file
84
src/hotspot/share/gc/g1/g1OopStarChunkedList.inline.hpp
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_G1_G1OOPSTARCHUNKEDLIST_INLINE_HPP
|
||||
#define SHARE_GC_G1_G1OOPSTARCHUNKEDLIST_INLINE_HPP
|
||||
|
||||
#include "gc/g1/g1OopStarChunkedList.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
|
||||
template <typename T>
|
||||
inline void G1OopStarChunkedList::push(ChunkedList<T*, mtGC>** field, T* p) {
|
||||
ChunkedList<T*, mtGC>* list = *field;
|
||||
if (list == NULL) {
|
||||
*field = new ChunkedList<T*, mtGC>();
|
||||
_used_memory += sizeof(ChunkedList<T*, mtGC>);
|
||||
} else if (list->is_full()) {
|
||||
ChunkedList<T*, mtGC>* next = new ChunkedList<T*, mtGC>();
|
||||
next->set_next_used(list);
|
||||
*field = next;
|
||||
_used_memory += sizeof(ChunkedList<T*, mtGC>);
|
||||
}
|
||||
|
||||
(*field)->push(p);
|
||||
}
|
||||
|
||||
inline void G1OopStarChunkedList::push_root(narrowOop* p) {
|
||||
push(&_croots, p);
|
||||
}
|
||||
|
||||
inline void G1OopStarChunkedList::push_root(oop* p) {
|
||||
push(&_roots, p);
|
||||
}
|
||||
|
||||
inline void G1OopStarChunkedList::push_oop(narrowOop* p) {
|
||||
push(&_coops, p);
|
||||
}
|
||||
|
||||
inline void G1OopStarChunkedList::push_oop(oop* p) {
|
||||
push(&_oops, p);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void G1OopStarChunkedList::delete_list(ChunkedList<T*, mtGC>* c) {
|
||||
while (c != NULL) {
|
||||
ChunkedList<T*, mtGC>* next = c->next_used();
|
||||
delete c;
|
||||
c = next;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void G1OopStarChunkedList::chunks_do(ChunkedList<T*, mtGC>* head, OopClosure* cl) {
|
||||
for (ChunkedList<T*, mtGC>* c = head; c != NULL; c = c->next_used()) {
|
||||
for (size_t i = 0; i < c->size(); i++) {
|
||||
T* p = c->at(i);
|
||||
cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_G1OOPSTARCHUNKEDLIST_INLINE_HPP
|
@ -37,7 +37,10 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length)
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
|
||||
uint worker_id,
|
||||
size_t young_cset_length,
|
||||
size_t optional_cset_length)
|
||||
: _g1h(g1h),
|
||||
_refs(g1h->task_queue(worker_id)),
|
||||
_dcq(&g1h->dirty_card_queue_set()),
|
||||
@ -51,7 +54,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id,
|
||||
_stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
|
||||
_stack_trim_lower_threshold(GCDrainStackTargetSize),
|
||||
_trim_ticks(),
|
||||
_old_gen_is_full(false)
|
||||
_old_gen_is_full(false),
|
||||
_num_optional_regions(optional_cset_length)
|
||||
{
|
||||
// we allocate G1YoungSurvRateNumRegions plus one entries, since
|
||||
// we "sacrifice" entry 0 to keep track of surviving bytes for
|
||||
@ -78,6 +82,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id,
|
||||
_dest[InCSetState::Old] = InCSetState::Old;
|
||||
|
||||
_closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
|
||||
|
||||
_oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
|
||||
}
|
||||
|
||||
// Pass locally gathered statistics to global state.
|
||||
@ -97,6 +103,7 @@ G1ParScanThreadState::~G1ParScanThreadState() {
|
||||
delete _plab_allocator;
|
||||
delete _closures;
|
||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
|
||||
delete[] _oops_into_optional_regions;
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) {
|
||||
@ -324,7 +331,8 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
|
||||
G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
|
||||
assert(worker_id < _n_workers, "out of bounds access");
|
||||
if (_states[worker_id] == NULL) {
|
||||
_states[worker_id] = new G1ParScanThreadState(_g1h, worker_id, _young_cset_length);
|
||||
_states[worker_id] =
|
||||
new G1ParScanThreadState(_g1h, worker_id, _young_cset_length, _optional_cset_length);
|
||||
}
|
||||
return _states[worker_id];
|
||||
}
|
||||
@ -351,6 +359,19 @@ void G1ParScanThreadStateSet::flush() {
|
||||
_flushed = true;
|
||||
}
|
||||
|
||||
void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
|
||||
for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
|
||||
G1ParScanThreadState* pss = _states[worker_index];
|
||||
|
||||
if (pss == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
|
||||
_g1h->g1_policy()->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
|
||||
}
|
||||
}
|
||||
|
||||
oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
|
||||
assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
|
||||
|
||||
@ -381,11 +402,15 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
|
||||
return forward_ptr;
|
||||
}
|
||||
}
|
||||
G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length) :
|
||||
G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
|
||||
uint n_workers,
|
||||
size_t young_cset_length,
|
||||
size_t optional_cset_length) :
|
||||
_g1h(g1h),
|
||||
_states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
|
||||
_surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length, mtGC)),
|
||||
_young_cset_length(young_cset_length),
|
||||
_optional_cset_length(optional_cset_length),
|
||||
_n_workers(n_workers),
|
||||
_flushed(false) {
|
||||
for (uint i = 0; i < n_workers; ++i) {
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include "oops/oop.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
class G1OopStarChunkedList;
|
||||
class G1PLABAllocator;
|
||||
class G1EvacuationRootClosures;
|
||||
class HeapRegion;
|
||||
@ -87,8 +88,14 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
|
||||
return _dest[original.value()];
|
||||
}
|
||||
|
||||
size_t _num_optional_regions;
|
||||
G1OopStarChunkedList* _oops_into_optional_regions;
|
||||
|
||||
public:
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length);
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h,
|
||||
uint worker_id,
|
||||
size_t young_cset_length,
|
||||
size_t optional_cset_length);
|
||||
virtual ~G1ParScanThreadState();
|
||||
|
||||
void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
|
||||
@ -206,6 +213,13 @@ public:
|
||||
|
||||
// An attempt to evacuate "obj" has failed; take necessary steps.
|
||||
oop handle_evacuation_failure_par(oop obj, markOop m);
|
||||
|
||||
template <typename T>
|
||||
inline void remember_root_into_optional_region(T* p);
|
||||
template <typename T>
|
||||
inline void remember_reference_into_optional_region(T* p);
|
||||
|
||||
inline G1OopStarChunkedList* oops_into_optional_region(const HeapRegion* hr);
|
||||
};
|
||||
|
||||
class G1ParScanThreadStateSet : public StackObj {
|
||||
@ -213,14 +227,19 @@ class G1ParScanThreadStateSet : public StackObj {
|
||||
G1ParScanThreadState** _states;
|
||||
size_t* _surviving_young_words_total;
|
||||
size_t _young_cset_length;
|
||||
size_t _optional_cset_length;
|
||||
uint _n_workers;
|
||||
bool _flushed;
|
||||
|
||||
public:
|
||||
G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length);
|
||||
G1ParScanThreadStateSet(G1CollectedHeap* g1h,
|
||||
uint n_workers,
|
||||
size_t young_cset_length,
|
||||
size_t optional_cset_length);
|
||||
~G1ParScanThreadStateSet();
|
||||
|
||||
void flush();
|
||||
void record_unused_optional_region(HeapRegion* hr);
|
||||
|
||||
G1ParScanThreadState* state_for_worker(uint worker_id);
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1OopStarChunkedList.inline.hpp"
|
||||
#include "gc/g1/g1ParScanThreadState.hpp"
|
||||
#include "gc/g1/g1RemSet.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
@ -203,4 +204,23 @@ inline void G1ParScanThreadState::reset_trim_ticks() {
|
||||
_trim_ticks = Tickspan();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void G1ParScanThreadState::remember_root_into_optional_region(T* p) {
|
||||
oop o = RawAccess<IS_NOT_NULL>::oop_load(p);
|
||||
uint index = _g1h->heap_region_containing(o)->index_in_opt_cset();
|
||||
_oops_into_optional_regions[index].push_root(p);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void G1ParScanThreadState::remember_reference_into_optional_region(T* p) {
|
||||
oop o = RawAccess<IS_NOT_NULL>::oop_load(p);
|
||||
uint index = _g1h->heap_region_containing(o)->index_in_opt_cset();
|
||||
_oops_into_optional_regions[index].push_oop(p);
|
||||
DEBUG_ONLY(verify_ref(p);)
|
||||
}
|
||||
|
||||
G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region(const HeapRegion* hr) {
|
||||
return &_oops_into_optional_regions[hr->index_in_opt_cset()];
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
|
||||
|
@ -401,6 +401,14 @@ private:
|
||||
|
||||
size_t desired_survivor_size() const;
|
||||
public:
|
||||
// Fraction used when predicting how many optional regions to include in
|
||||
// the CSet. This fraction of the available time is used for optional regions,
|
||||
// the rest is used to add old regions to the normal CSet.
|
||||
double optional_prediction_fraction() { return 0.2; }
|
||||
// Fraction used when evacuating the optional regions. This fraction of the
|
||||
// remaining time is used to choose what regions to include in the evacuation.
|
||||
double optional_evacuation_fraction() { return 0.75; }
|
||||
|
||||
uint tenuring_threshold() const { return _tenuring_threshold; }
|
||||
|
||||
uint max_survivor_regions() {
|
||||
|
@ -311,12 +311,14 @@ void G1RemSet::initialize(size_t capacity, uint max_regions) {
|
||||
G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
|
||||
G1ScanObjsDuringScanRSClosure* scan_obj_on_card,
|
||||
G1ParScanThreadState* pss,
|
||||
G1GCPhaseTimes::GCParPhases phase,
|
||||
uint worker_i) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_ct(_g1h->card_table()),
|
||||
_pss(pss),
|
||||
_scan_objs_on_card_cl(scan_obj_on_card),
|
||||
_scan_state(scan_state),
|
||||
_phase(phase),
|
||||
_worker_i(worker_i),
|
||||
_cards_scanned(0),
|
||||
_cards_claimed(0),
|
||||
@ -402,7 +404,7 @@ void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) {
|
||||
|
||||
scan_card(mr, region_idx_for_card);
|
||||
}
|
||||
event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ScanRS));
|
||||
event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(_phase));
|
||||
}
|
||||
|
||||
void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
|
||||
@ -437,7 +439,7 @@ bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
|
||||
|
||||
void G1RemSet::scan_rem_set(G1ParScanThreadState* pss, uint worker_i) {
|
||||
G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss);
|
||||
G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, worker_i);
|
||||
G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, G1GCPhaseTimes::ScanRS, worker_i);
|
||||
_g1h->collection_set_iterate_from(&cl, worker_i);
|
||||
|
||||
G1GCPhaseTimes* p = _g1p->phase_times();
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc/g1/dirtyCardQueue.hpp"
|
||||
#include "gc/g1/g1CardTable.hpp"
|
||||
#include "gc/g1/g1OopClosures.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1RemSetSummary.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
@ -138,6 +139,8 @@ class G1ScanRSForRegionClosure : public HeapRegionClosure {
|
||||
|
||||
G1RemSetScanState* _scan_state;
|
||||
|
||||
G1GCPhaseTimes::GCParPhases _phase;
|
||||
|
||||
uint _worker_i;
|
||||
|
||||
size_t _cards_scanned;
|
||||
@ -159,6 +162,7 @@ public:
|
||||
G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
|
||||
G1ScanObjsDuringScanRSClosure* scan_obj_on_card,
|
||||
G1ParScanThreadState* pss,
|
||||
G1GCPhaseTimes::GCParPhases phase,
|
||||
uint worker_i);
|
||||
|
||||
bool do_heap_region(HeapRegion* r);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "code/nmethod.hpp"
|
||||
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectionSet.hpp"
|
||||
#include "gc/g1/g1HeapRegionTraceType.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
@ -240,7 +241,8 @@ HeapRegion::HeapRegion(uint hrm_index,
|
||||
_containing_set(NULL),
|
||||
#endif
|
||||
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
|
||||
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
|
||||
_index_in_opt_cset(G1OptionalCSet::InvalidCSetIndex), _young_index_in_cset(-1),
|
||||
_surv_rate_group(NULL), _age_index(-1),
|
||||
_prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL),
|
||||
_recorded_rs_length(0), _predicted_elapsed_time_ms(0)
|
||||
{
|
||||
|
@ -250,6 +250,9 @@ class HeapRegion: public G1ContiguousSpace {
|
||||
// The calculated GC efficiency of the region.
|
||||
double _gc_efficiency;
|
||||
|
||||
// The index in the optional regions array, if this region
|
||||
// is considered optional during a mixed collections.
|
||||
uint _index_in_opt_cset;
|
||||
int _young_index_in_cset;
|
||||
SurvRateGroup* _surv_rate_group;
|
||||
int _age_index;
|
||||
@ -546,6 +549,9 @@ class HeapRegion: public G1ContiguousSpace {
|
||||
void calc_gc_efficiency(void);
|
||||
double gc_efficiency() { return _gc_efficiency;}
|
||||
|
||||
uint index_in_opt_cset() const { return _index_in_opt_cset; }
|
||||
void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; }
|
||||
|
||||
int young_index_in_cset() const { return _young_index_in_cset; }
|
||||
void set_young_index_in_cset(int index) {
|
||||
assert( (index == -1) || is_young(), "pre-condition" );
|
||||
|
@ -34,7 +34,7 @@ template <class T>
|
||||
class WorkerDataArray : public CHeapObj<mtGC> {
|
||||
friend class WDAPrinter;
|
||||
public:
|
||||
static const uint MaxThreadWorkItems = 3;
|
||||
static const uint MaxThreadWorkItems = 4;
|
||||
private:
|
||||
T* _data;
|
||||
uint _length;
|
||||
@ -49,6 +49,8 @@ private:
|
||||
void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items, uint index = 0);
|
||||
void set_thread_work_item(uint worker_i, size_t value, uint index = 0);
|
||||
void add_thread_work_item(uint worker_i, size_t value, uint index = 0);
|
||||
void set_or_add_thread_work_item(uint worker_i, size_t value, uint index = 0);
|
||||
|
||||
WorkerDataArray<size_t>* thread_work_items(uint index = 0) const {
|
||||
assert(index < MaxThreadWorkItems, "Tried to access thread work item %u max %u", index, MaxThreadWorkItems);
|
||||
return _thread_work_items[index];
|
||||
|
@ -80,6 +80,17 @@ void WorkerDataArray<T>::add_thread_work_item(uint worker_i, size_t value, uint
|
||||
_thread_work_items[index]->add(worker_i, value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void WorkerDataArray<T>::set_or_add_thread_work_item(uint worker_i, size_t value, uint index) {
|
||||
assert(index < MaxThreadWorkItems, "Tried to access thread work item %u (max %u)", index, MaxThreadWorkItems);
|
||||
assert(_thread_work_items[index] != NULL, "No sub count");
|
||||
if (_thread_work_items[index]->get(worker_i) == _thread_work_items[index]->uninitialized()) {
|
||||
_thread_work_items[index]->set(worker_i, value);
|
||||
} else {
|
||||
_thread_work_items[index]->add(worker_i, value);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void WorkerDataArray<T>::add(uint worker_i, T value) {
|
||||
assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
|
||||
|
Loading…
x
Reference in New Issue
Block a user