Merge
This commit is contained in:
commit
9fba6da283
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1322,234 +1322,240 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
|
||||
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
||||
|
||||
TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
|
||||
TraceCollectorStats tcs(g1mm()->full_collection_counters());
|
||||
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
|
||||
|
||||
double start = os::elapsedTime();
|
||||
g1_policy()->record_full_collection_start();
|
||||
|
||||
// Note: When we have a more flexible GC logging framework that
|
||||
// allows us to add optional attributes to a GC log record we
|
||||
// could consider timing and reporting how long we wait in the
|
||||
// following two methods.
|
||||
wait_while_free_regions_coming();
|
||||
// If we start the compaction before the CM threads finish
|
||||
// scanning the root regions we might trip them over as we'll
|
||||
// be moving objects / updating references. So let's wait until
|
||||
// they are done. By telling them to abort, they should complete
|
||||
// early.
|
||||
_cm->root_regions()->abort();
|
||||
_cm->root_regions()->wait_until_scan_finished();
|
||||
append_secondary_free_list_if_not_empty_with_lock();
|
||||
|
||||
gc_prologue(true);
|
||||
increment_total_collections(true /* full gc */);
|
||||
increment_old_marking_cycles_started();
|
||||
|
||||
size_t g1h_prev_used = used();
|
||||
assert(used() == recalculate_used(), "Should be equal");
|
||||
|
||||
verify_before_gc();
|
||||
|
||||
pre_full_gc_dump();
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
// Disable discovery and empty the discovered lists
|
||||
// for the CM ref processor.
|
||||
ref_processor_cm()->disable_discovery();
|
||||
ref_processor_cm()->abandon_partial_discovery();
|
||||
ref_processor_cm()->verify_no_references_recorded();
|
||||
|
||||
// Abandon current iterations of concurrent marking and concurrent
|
||||
// refinement, if any are in progress. We have to do this before
|
||||
// wait_until_scan_finished() below.
|
||||
concurrent_mark()->abort();
|
||||
|
||||
// Make sure we'll choose a new allocation region afterwards.
|
||||
release_mutator_alloc_region();
|
||||
abandon_gc_alloc_regions();
|
||||
g1_rem_set()->cleanupHRRS();
|
||||
|
||||
// We should call this after we retire any currently active alloc
|
||||
// regions so that all the ALLOC / RETIRE events are generated
|
||||
// before the start GC event.
|
||||
_hr_printer.start_gc(true /* full */, (size_t) total_collections());
|
||||
|
||||
// We may have added regions to the current incremental collection
|
||||
// set between the last GC or pause and now. We need to clear the
|
||||
// incremental collection set and then start rebuilding it afresh
|
||||
// after this full GC.
|
||||
abandon_collection_set(g1_policy()->inc_cset_head());
|
||||
g1_policy()->clear_incremental_cset();
|
||||
g1_policy()->stop_incremental_cset_building();
|
||||
|
||||
tear_down_region_sets(false /* free_list_only */);
|
||||
g1_policy()->set_gcs_are_young(true);
|
||||
|
||||
// See the comments in g1CollectedHeap.hpp and
|
||||
// G1CollectedHeap::ref_processing_init() about
|
||||
// how reference processing currently works in G1.
|
||||
|
||||
// Temporarily make discovery by the STW ref processor single threaded (non-MT).
|
||||
ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
|
||||
|
||||
// Temporarily clear the STW ref processor's _is_alive_non_header field.
|
||||
ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
|
||||
|
||||
ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
|
||||
ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
|
||||
|
||||
// Do collection work
|
||||
{
|
||||
HandleMark hm; // Discard invalid handles created during gc
|
||||
G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
|
||||
}
|
||||
TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
|
||||
TraceCollectorStats tcs(g1mm()->full_collection_counters());
|
||||
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
|
||||
|
||||
assert(free_regions() == 0, "we should not have added any free regions");
|
||||
rebuild_region_sets(false /* free_list_only */);
|
||||
double start = os::elapsedTime();
|
||||
g1_policy()->record_full_collection_start();
|
||||
|
||||
// Enqueue any discovered reference objects that have
|
||||
// not been removed from the discovered lists.
|
||||
ref_processor_stw()->enqueue_discovered_references();
|
||||
// Note: When we have a more flexible GC logging framework that
|
||||
// allows us to add optional attributes to a GC log record we
|
||||
// could consider timing and reporting how long we wait in the
|
||||
// following two methods.
|
||||
wait_while_free_regions_coming();
|
||||
// If we start the compaction before the CM threads finish
|
||||
// scanning the root regions we might trip them over as we'll
|
||||
// be moving objects / updating references. So let's wait until
|
||||
// they are done. By telling them to abort, they should complete
|
||||
// early.
|
||||
_cm->root_regions()->abort();
|
||||
_cm->root_regions()->wait_until_scan_finished();
|
||||
append_secondary_free_list_if_not_empty_with_lock();
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||
gc_prologue(true);
|
||||
increment_total_collections(true /* full gc */);
|
||||
increment_old_marking_cycles_started();
|
||||
|
||||
MemoryService::track_memory_usage();
|
||||
assert(used() == recalculate_used(), "Should be equal");
|
||||
|
||||
verify_after_gc();
|
||||
verify_before_gc();
|
||||
|
||||
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
|
||||
ref_processor_stw()->verify_no_references_recorded();
|
||||
pre_full_gc_dump();
|
||||
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
ClassLoaderDataGraph::purge();
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
|
||||
// Note: since we've just done a full GC, concurrent
|
||||
// marking is no longer active. Therefore we need not
|
||||
// re-enable reference discovery for the CM ref processor.
|
||||
// That will be done at the start of the next marking cycle.
|
||||
assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
|
||||
ref_processor_cm()->verify_no_references_recorded();
|
||||
// Disable discovery and empty the discovered lists
|
||||
// for the CM ref processor.
|
||||
ref_processor_cm()->disable_discovery();
|
||||
ref_processor_cm()->abandon_partial_discovery();
|
||||
ref_processor_cm()->verify_no_references_recorded();
|
||||
|
||||
reset_gc_time_stamp();
|
||||
// Since everything potentially moved, we will clear all remembered
|
||||
// sets, and clear all cards. Later we will rebuild remebered
|
||||
// sets. We will also reset the GC time stamps of the regions.
|
||||
clear_rsets_post_compaction();
|
||||
check_gc_time_stamps();
|
||||
// Abandon current iterations of concurrent marking and concurrent
|
||||
// refinement, if any are in progress. We have to do this before
|
||||
// wait_until_scan_finished() below.
|
||||
concurrent_mark()->abort();
|
||||
|
||||
// Resize the heap if necessary.
|
||||
resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
|
||||
// Make sure we'll choose a new allocation region afterwards.
|
||||
release_mutator_alloc_region();
|
||||
abandon_gc_alloc_regions();
|
||||
g1_rem_set()->cleanupHRRS();
|
||||
|
||||
if (_hr_printer.is_active()) {
|
||||
// We should do this after we potentially resize the heap so
|
||||
// that all the COMMIT / UNCOMMIT events are generated before
|
||||
// the end GC event.
|
||||
// We should call this after we retire any currently active alloc
|
||||
// regions so that all the ALLOC / RETIRE events are generated
|
||||
// before the start GC event.
|
||||
_hr_printer.start_gc(true /* full */, (size_t) total_collections());
|
||||
|
||||
print_hrs_post_compaction();
|
||||
_hr_printer.end_gc(true /* full */, (size_t) total_collections());
|
||||
}
|
||||
// We may have added regions to the current incremental collection
|
||||
// set between the last GC or pause and now. We need to clear the
|
||||
// incremental collection set and then start rebuilding it afresh
|
||||
// after this full GC.
|
||||
abandon_collection_set(g1_policy()->inc_cset_head());
|
||||
g1_policy()->clear_incremental_cset();
|
||||
g1_policy()->stop_incremental_cset_building();
|
||||
|
||||
if (_cg1r->use_cache()) {
|
||||
_cg1r->clear_and_record_card_counts();
|
||||
_cg1r->clear_hot_cache();
|
||||
}
|
||||
tear_down_region_sets(false /* free_list_only */);
|
||||
g1_policy()->set_gcs_are_young(true);
|
||||
|
||||
// Rebuild remembered sets of all regions.
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
uint n_workers =
|
||||
AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
|
||||
workers()->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
n_workers == workers()->total_workers(),
|
||||
"If not dynamic should be using all the workers");
|
||||
workers()->set_active_workers(n_workers);
|
||||
// Set parallel threads in the heap (_n_par_threads) only
|
||||
// before a parallel phase and always reset it to 0 after
|
||||
// the phase so that the number of parallel threads does
|
||||
// no get carried forward to a serial phase where there
|
||||
// may be code that is "possibly_parallel".
|
||||
set_par_threads(n_workers);
|
||||
// See the comments in g1CollectedHeap.hpp and
|
||||
// G1CollectedHeap::ref_processing_init() about
|
||||
// how reference processing currently works in G1.
|
||||
|
||||
ParRebuildRSTask rebuild_rs_task(this);
|
||||
assert(check_heap_region_claim_values(
|
||||
HeapRegion::InitialClaimValue), "sanity check");
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
workers()->active_workers() == workers()->total_workers(),
|
||||
"Unless dynamic should use total workers");
|
||||
// Use the most recent number of active workers
|
||||
assert(workers()->active_workers() > 0,
|
||||
"Active workers not properly set");
|
||||
set_par_threads(workers()->active_workers());
|
||||
workers()->run_task(&rebuild_rs_task);
|
||||
set_par_threads(0);
|
||||
assert(check_heap_region_claim_values(
|
||||
HeapRegion::RebuildRSClaimValue), "sanity check");
|
||||
reset_heap_region_claim_values();
|
||||
} else {
|
||||
RebuildRSOutOfRegionClosure rebuild_rs(this);
|
||||
heap_region_iterate(&rebuild_rs);
|
||||
}
|
||||
// Temporarily make discovery by the STW ref processor single threaded (non-MT).
|
||||
ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
|
||||
|
||||
if (G1Log::fine()) {
|
||||
print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
|
||||
}
|
||||
// Temporarily clear the STW ref processor's _is_alive_non_header field.
|
||||
ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
|
||||
|
||||
if (true) { // FIXME
|
||||
MetaspaceGC::compute_new_size();
|
||||
}
|
||||
ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
|
||||
ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
|
||||
|
||||
// Start a new incremental collection set for the next pause
|
||||
assert(g1_policy()->collection_set() == NULL, "must be");
|
||||
g1_policy()->start_incremental_cset_building();
|
||||
// Do collection work
|
||||
{
|
||||
HandleMark hm; // Discard invalid handles created during gc
|
||||
G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
|
||||
}
|
||||
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the next
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
assert(free_regions() == 0, "we should not have added any free regions");
|
||||
rebuild_region_sets(false /* free_list_only */);
|
||||
|
||||
init_mutator_alloc_region();
|
||||
// Enqueue any discovered reference objects that have
|
||||
// not been removed from the discovered lists.
|
||||
ref_processor_stw()->enqueue_discovered_references();
|
||||
|
||||
double end = os::elapsedTime();
|
||||
g1_policy()->record_full_collection_end();
|
||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||
|
||||
MemoryService::track_memory_usage();
|
||||
|
||||
verify_after_gc();
|
||||
|
||||
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
|
||||
ref_processor_stw()->verify_no_references_recorded();
|
||||
|
||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||
ClassLoaderDataGraph::purge();
|
||||
|
||||
// Note: since we've just done a full GC, concurrent
|
||||
// marking is no longer active. Therefore we need not
|
||||
// re-enable reference discovery for the CM ref processor.
|
||||
// That will be done at the start of the next marking cycle.
|
||||
assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
|
||||
ref_processor_cm()->verify_no_references_recorded();
|
||||
|
||||
reset_gc_time_stamp();
|
||||
// Since everything potentially moved, we will clear all remembered
|
||||
// sets, and clear all cards. Later we will rebuild remebered
|
||||
// sets. We will also reset the GC time stamps of the regions.
|
||||
clear_rsets_post_compaction();
|
||||
check_gc_time_stamps();
|
||||
|
||||
// Resize the heap if necessary.
|
||||
resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
|
||||
|
||||
if (_hr_printer.is_active()) {
|
||||
// We should do this after we potentially resize the heap so
|
||||
// that all the COMMIT / UNCOMMIT events are generated before
|
||||
// the end GC event.
|
||||
|
||||
print_hrs_post_compaction();
|
||||
_hr_printer.end_gc(true /* full */, (size_t) total_collections());
|
||||
}
|
||||
|
||||
if (_cg1r->use_cache()) {
|
||||
_cg1r->clear_and_record_card_counts();
|
||||
_cg1r->clear_hot_cache();
|
||||
}
|
||||
|
||||
// Rebuild remembered sets of all regions.
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
uint n_workers =
|
||||
AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
|
||||
workers()->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
n_workers == workers()->total_workers(),
|
||||
"If not dynamic should be using all the workers");
|
||||
workers()->set_active_workers(n_workers);
|
||||
// Set parallel threads in the heap (_n_par_threads) only
|
||||
// before a parallel phase and always reset it to 0 after
|
||||
// the phase so that the number of parallel threads does
|
||||
// no get carried forward to a serial phase where there
|
||||
// may be code that is "possibly_parallel".
|
||||
set_par_threads(n_workers);
|
||||
|
||||
ParRebuildRSTask rebuild_rs_task(this);
|
||||
assert(check_heap_region_claim_values(
|
||||
HeapRegion::InitialClaimValue), "sanity check");
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
workers()->active_workers() == workers()->total_workers(),
|
||||
"Unless dynamic should use total workers");
|
||||
// Use the most recent number of active workers
|
||||
assert(workers()->active_workers() > 0,
|
||||
"Active workers not properly set");
|
||||
set_par_threads(workers()->active_workers());
|
||||
workers()->run_task(&rebuild_rs_task);
|
||||
set_par_threads(0);
|
||||
assert(check_heap_region_claim_values(
|
||||
HeapRegion::RebuildRSClaimValue), "sanity check");
|
||||
reset_heap_region_claim_values();
|
||||
} else {
|
||||
RebuildRSOutOfRegionClosure rebuild_rs(this);
|
||||
heap_region_iterate(&rebuild_rs);
|
||||
}
|
||||
|
||||
if (true) { // FIXME
|
||||
MetaspaceGC::compute_new_size();
|
||||
}
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
ParallelTaskTerminator::print_termination_counts();
|
||||
ParallelTaskTerminator::print_termination_counts();
|
||||
#endif
|
||||
|
||||
gc_epilogue(true);
|
||||
// Discard all rset updates
|
||||
JavaThread::dirty_card_queue_set().abandon_logs();
|
||||
assert(!G1DeferredRSUpdate
|
||||
|| (G1DeferredRSUpdate &&
|
||||
(dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
|
||||
|
||||
// Discard all rset updates
|
||||
JavaThread::dirty_card_queue_set().abandon_logs();
|
||||
assert(!G1DeferredRSUpdate
|
||||
|| (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
|
||||
_young_list->reset_sampled_info();
|
||||
// At this point there should be no regions in the
|
||||
// entire heap tagged as young.
|
||||
assert(check_young_list_empty(true /* check_heap */),
|
||||
"young list should be empty at this point");
|
||||
|
||||
_young_list->reset_sampled_info();
|
||||
// At this point there should be no regions in the
|
||||
// entire heap tagged as young.
|
||||
assert( check_young_list_empty(true /* check_heap */),
|
||||
"young list should be empty at this point");
|
||||
// Update the number of full collections that have been completed.
|
||||
increment_old_marking_cycles_completed(false /* concurrent */);
|
||||
|
||||
// Update the number of full collections that have been completed.
|
||||
increment_old_marking_cycles_completed(false /* concurrent */);
|
||||
_hrs.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
|
||||
_hrs.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
// Start a new incremental collection set for the next pause
|
||||
assert(g1_policy()->collection_set() == NULL, "must be");
|
||||
g1_policy()->start_incremental_cset_building();
|
||||
|
||||
// Clear the _cset_fast_test bitmap in anticipation of adding
|
||||
// regions to the incremental collection set for the next
|
||||
// evacuation pause.
|
||||
clear_cset_fast_test();
|
||||
|
||||
init_mutator_alloc_region();
|
||||
|
||||
double end = os::elapsedTime();
|
||||
g1_policy()->record_full_collection_end();
|
||||
|
||||
if (G1Log::fine()) {
|
||||
g1_policy()->print_heap_transition();
|
||||
}
|
||||
|
||||
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
|
||||
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
|
||||
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
|
||||
// before any GC notifications are raised.
|
||||
g1mm()->update_sizes();
|
||||
|
||||
gc_epilogue(true);
|
||||
}
|
||||
|
||||
if (G1Log::finer()) {
|
||||
g1_policy()->print_detailed_heap_transition();
|
||||
}
|
||||
|
||||
print_heap_after_gc();
|
||||
|
||||
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
|
||||
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
|
||||
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
|
||||
// before any GC notifications are raised.
|
||||
g1mm()->update_sizes();
|
||||
post_full_gc_dump();
|
||||
}
|
||||
|
||||
post_full_gc_dump();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -3838,7 +3844,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// The elapsed time induced by the start time below deliberately elides
|
||||
// the possible verification above.
|
||||
double sample_start_time_sec = os::elapsedTime();
|
||||
size_t start_used_bytes = used();
|
||||
|
||||
#if YOUNG_LIST_VERBOSE
|
||||
gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
|
||||
@ -3846,8 +3851,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
||||
#endif // YOUNG_LIST_VERBOSE
|
||||
|
||||
g1_policy()->record_collection_pause_start(sample_start_time_sec,
|
||||
start_used_bytes);
|
||||
g1_policy()->record_collection_pause_start(sample_start_time_sec);
|
||||
|
||||
double scan_wait_start = os::elapsedTime();
|
||||
// We have to wait until the CM threads finish scanning the
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -406,7 +406,6 @@ void G1CollectorPolicy::init() {
|
||||
}
|
||||
_free_regions_at_end_of_collection = _g1->free_regions();
|
||||
update_young_list_target_length();
|
||||
_prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
|
||||
|
||||
// We may immediately start allocating regions and placing them on the
|
||||
// collection set list. Initialize the per-collection set info
|
||||
@ -746,6 +745,7 @@ G1CollectorPolicy::verify_young_ages(HeapRegion* head,
|
||||
|
||||
void G1CollectorPolicy::record_full_collection_start() {
|
||||
_full_collection_start_sec = os::elapsedTime();
|
||||
record_heap_size_info_at_start();
|
||||
// Release the future to-space so that it is available for compaction into.
|
||||
_g1->set_full_collection();
|
||||
}
|
||||
@ -788,8 +788,7 @@ void G1CollectorPolicy::record_stop_world_start() {
|
||||
_stop_world_start = os::elapsedTime();
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
||||
size_t start_used) {
|
||||
void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
|
||||
// We only need to do this here as the policy will only be applied
|
||||
// to the GC we're about to start. so, no point is calculating this
|
||||
// every time we calculate / recalculate the target young length.
|
||||
@ -803,19 +802,14 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
||||
_trace_gen0_time_data.record_start_collection(s_w_t_ms);
|
||||
_stop_world_start = 0.0;
|
||||
|
||||
record_heap_size_info_at_start();
|
||||
|
||||
phase_times()->record_cur_collection_start_sec(start_time_sec);
|
||||
_cur_collection_pause_used_at_start_bytes = start_used;
|
||||
_cur_collection_pause_used_regions_at_start = _g1->used_regions();
|
||||
_pending_cards = _g1->pending_card_num();
|
||||
|
||||
_collection_set_bytes_used_before = 0;
|
||||
_bytes_copied_during_gc = 0;
|
||||
|
||||
YoungList* young_list = _g1->young_list();
|
||||
_eden_bytes_before_gc = young_list->eden_used_bytes();
|
||||
_survivor_bytes_before_gc = young_list->survivor_used_bytes();
|
||||
_capacity_before_gc = _g1->capacity();
|
||||
|
||||
_last_gc_was_young = false;
|
||||
|
||||
// do that for any other surv rate groups
|
||||
@ -1153,6 +1147,21 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
|
||||
byte_size_in_proper_unit((double)(bytes)), \
|
||||
proper_unit_for_byte_size((bytes))
|
||||
|
||||
void G1CollectorPolicy::record_heap_size_info_at_start() {
|
||||
YoungList* young_list = _g1->young_list();
|
||||
_eden_bytes_before_gc = young_list->eden_used_bytes();
|
||||
_survivor_bytes_before_gc = young_list->survivor_used_bytes();
|
||||
_capacity_before_gc = _g1->capacity();
|
||||
|
||||
_cur_collection_pause_used_at_start_bytes = _g1->used();
|
||||
_cur_collection_pause_used_regions_at_start = _g1->used_regions();
|
||||
|
||||
size_t eden_capacity_before_gc =
|
||||
(_young_list_target_length * HeapRegion::GrainBytes) - _survivor_bytes_before_gc;
|
||||
|
||||
_prev_eden_capacity = eden_capacity_before_gc;
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::print_heap_transition() {
|
||||
_g1->print_size_transition(gclog_or_tty,
|
||||
_cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
|
||||
@ -1183,8 +1192,6 @@ void G1CollectorPolicy::print_detailed_heap_transition() {
|
||||
EXT_SIZE_PARAMS(_capacity_before_gc),
|
||||
EXT_SIZE_PARAMS(used),
|
||||
EXT_SIZE_PARAMS(capacity));
|
||||
|
||||
_prev_eden_capacity = eden_capacity;
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -671,34 +671,36 @@ public:
|
||||
|
||||
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
|
||||
|
||||
// Update the heuristic info to record a collection pause of the given
|
||||
// start time, where the given number of bytes were used at the start.
|
||||
// This may involve changing the desired size of a collection set.
|
||||
// Record the start and end of an evacuation pause.
|
||||
void record_collection_pause_start(double start_time_sec);
|
||||
void record_collection_pause_end(double pause_time_ms);
|
||||
|
||||
void record_stop_world_start();
|
||||
|
||||
void record_collection_pause_start(double start_time_sec, size_t start_used);
|
||||
// Record the start and end of a full collection.
|
||||
void record_full_collection_start();
|
||||
void record_full_collection_end();
|
||||
|
||||
// Must currently be called while the world is stopped.
|
||||
void record_concurrent_mark_init_end(double
|
||||
mark_init_elapsed_time_ms);
|
||||
void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
|
||||
|
||||
// Record start and end of remark.
|
||||
void record_concurrent_mark_remark_start();
|
||||
void record_concurrent_mark_remark_end();
|
||||
|
||||
// Record start, end, and completion of cleanup.
|
||||
void record_concurrent_mark_cleanup_start();
|
||||
void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
|
||||
void record_concurrent_mark_cleanup_completed();
|
||||
|
||||
void record_concurrent_pause();
|
||||
// Records the information about the heap size for reporting in
|
||||
// print_detailed_heap_transition
|
||||
void record_heap_size_info_at_start();
|
||||
|
||||
void record_collection_pause_end(double pause_time);
|
||||
// Print heap sizing transition (with less and more detail).
|
||||
void print_heap_transition();
|
||||
void print_detailed_heap_transition();
|
||||
|
||||
// Record the fact that a full collection occurred.
|
||||
void record_full_collection_start();
|
||||
void record_full_collection_end();
|
||||
void record_stop_world_start();
|
||||
void record_concurrent_pause();
|
||||
|
||||
// Record how much space we copied during a GC. This is typically
|
||||
// called when a GC alloc region is being retired.
|
||||
|
Loading…
Reference in New Issue
Block a user