Merge
This commit is contained in:
commit
01ba4cd795
@ -1816,9 +1816,7 @@ void ConcurrentMark::cleanup() {
|
|||||||
|
|
||||||
// this will also free any regions totally full of garbage objects,
|
// this will also free any regions totally full of garbage objects,
|
||||||
// and sort the regions.
|
// and sort the regions.
|
||||||
g1h->g1_policy()->record_concurrent_mark_cleanup_end(
|
g1h->g1_policy()->record_concurrent_mark_cleanup_end();
|
||||||
g1_par_note_end_task.freed_bytes(),
|
|
||||||
g1_par_note_end_task.max_live_bytes());
|
|
||||||
|
|
||||||
// Statistics.
|
// Statistics.
|
||||||
double end = os::elapsedTime();
|
double end = os::elapsedTime();
|
||||||
|
@ -215,19 +215,19 @@ void ConcurrentMarkThread::run() {
|
|||||||
gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
|
gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now do the remainder of the cleanup operation.
|
// Now do the concurrent cleanup operation.
|
||||||
_cm->completeCleanup();
|
_cm->completeCleanup();
|
||||||
// Notify anyone who's waiting that there are no more free
|
|
||||||
// regions coming. We have to do this before we join the STS,
|
|
||||||
// otherwise we might deadlock: a GC worker could be blocked
|
|
||||||
// waiting for the notification whereas this thread will be
|
|
||||||
// blocked for the pause to finish while it's trying to join
|
|
||||||
// the STS, which is conditional on the GC workers finishing.
|
|
||||||
g1h->reset_free_regions_coming();
|
|
||||||
|
|
||||||
_sts.join();
|
// Notify anyone who's waiting that there are no more free
|
||||||
g1_policy->record_concurrent_mark_cleanup_completed();
|
// regions coming. We have to do this before we join the STS
|
||||||
_sts.leave();
|
// (in fact, we should not attempt to join the STS in the
|
||||||
|
// interval between finishing the cleanup pause and clearing
|
||||||
|
// the free_regions_coming flag) otherwise we might deadlock:
|
||||||
|
// a GC worker could be blocked waiting for the notification
|
||||||
|
// whereas this thread will be blocked for the pause to finish
|
||||||
|
// while it's trying to join the STS, which is conditional on
|
||||||
|
// the GC workers finishing.
|
||||||
|
g1h->reset_free_regions_coming();
|
||||||
|
|
||||||
double cleanup_end_sec = os::elapsedTime();
|
double cleanup_end_sec = os::elapsedTime();
|
||||||
if (PrintGC) {
|
if (PrintGC) {
|
||||||
@ -240,6 +240,36 @@ void ConcurrentMarkThread::run() {
|
|||||||
guarantee(cm()->cleanup_list_is_empty(),
|
guarantee(cm()->cleanup_list_is_empty(),
|
||||||
"at this point there should be no regions on the cleanup list");
|
"at this point there should be no regions on the cleanup list");
|
||||||
|
|
||||||
|
// There is a tricky race before recording that the concurrent
|
||||||
|
// cleanup has completed and a potential Full GC starting around
|
||||||
|
// the same time. We want to make sure that the Full GC calls
|
||||||
|
// abort() on concurrent mark after
|
||||||
|
// record_concurrent_mark_cleanup_completed(), since abort() is
|
||||||
|
// the method that will reset the concurrent mark state. If we
|
||||||
|
// end up calling record_concurrent_mark_cleanup_completed()
|
||||||
|
// after abort() then we might incorrectly undo some of the work
|
||||||
|
// abort() did. Checking the has_aborted() flag after joining
|
||||||
|
// the STS allows the correct ordering of the two methods. There
|
||||||
|
// are two scenarios:
|
||||||
|
//
|
||||||
|
// a) If we reach here before the Full GC, the fact that we have
|
||||||
|
// joined the STS means that the Full GC cannot start until we
|
||||||
|
// leave the STS, so record_concurrent_mark_cleanup_completed()
|
||||||
|
// will complete before abort() is called.
|
||||||
|
//
|
||||||
|
// b) If we reach here during the Full GC, we'll be held up from
|
||||||
|
// joining the STS until the Full GC is done, which means that
|
||||||
|
// abort() will have completed and has_aborted() will return
|
||||||
|
// true to prevent us from calling
|
||||||
|
// record_concurrent_mark_cleanup_completed() (and, in fact, it's
|
||||||
|
// not needed any more as the concurrent mark state has been
|
||||||
|
// already reset).
|
||||||
|
_sts.join();
|
||||||
|
if (!cm()->has_aborted()) {
|
||||||
|
g1_policy->record_concurrent_mark_cleanup_completed();
|
||||||
|
}
|
||||||
|
_sts.leave();
|
||||||
|
|
||||||
if (cm()->has_aborted()) {
|
if (cm()->has_aborted()) {
|
||||||
if (PrintGC) {
|
if (PrintGC) {
|
||||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||||
@ -248,7 +278,7 @@ void ConcurrentMarkThread::run() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// we now want to allow clearing of the marking bitmap to be
|
// We now want to allow clearing of the marking bitmap to be
|
||||||
// suspended by a collection pause.
|
// suspended by a collection pause.
|
||||||
_sts.join();
|
_sts.join();
|
||||||
_cm->clearNextBitmap();
|
_cm->clearNextBitmap();
|
||||||
|
@ -2011,8 +2011,6 @@ jint G1CollectedHeap::initialize() {
|
|||||||
// Perform any initialization actions delegated to the policy.
|
// Perform any initialization actions delegated to the policy.
|
||||||
g1_policy()->init();
|
g1_policy()->init();
|
||||||
|
|
||||||
g1_policy()->note_start_of_mark_thread();
|
|
||||||
|
|
||||||
_refine_cte_cl =
|
_refine_cte_cl =
|
||||||
new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
|
new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
|
||||||
g1_rem_set(),
|
g1_rem_set(),
|
||||||
@ -3960,9 +3958,6 @@ void G1CollectedHeap::remove_self_forwarding_pointers() {
|
|||||||
// _next_top_at_mark_start == top, _next_marked_bytes == 0
|
// _next_top_at_mark_start == top, _next_marked_bytes == 0
|
||||||
// _next_marked_bytes == next_marked_bytes.
|
// _next_marked_bytes == next_marked_bytes.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now make sure the region has the right index in the sorted array.
|
|
||||||
g1_policy()->note_change_in_marked_bytes(cur);
|
|
||||||
}
|
}
|
||||||
cur = cur->next_in_collection_set();
|
cur = cur->next_in_collection_set();
|
||||||
}
|
}
|
||||||
@ -5073,7 +5068,7 @@ public:
|
|||||||
|
|
||||||
// Select discovered lists [i, i+stride, i+2*stride,...,limit)
|
// Select discovered lists [i, i+stride, i+2*stride,...,limit)
|
||||||
for (int idx = i; idx < limit; idx += stride) {
|
for (int idx = i; idx < limit; idx += stride) {
|
||||||
DiscoveredList& ref_list = rp->discovered_soft_refs()[idx];
|
DiscoveredList& ref_list = rp->discovered_refs()[idx];
|
||||||
|
|
||||||
DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
|
DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
|
||||||
while (iter.has_next()) {
|
while (iter.has_next()) {
|
||||||
|
@ -225,16 +225,12 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
|||||||
_recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
_recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||||
|
|
||||||
_recent_avg_pause_time_ratio(0.0),
|
_recent_avg_pause_time_ratio(0.0),
|
||||||
_num_markings(0),
|
|
||||||
_n_marks(0),
|
|
||||||
_n_pauses_at_mark_end(0),
|
|
||||||
|
|
||||||
_all_full_gc_times_ms(new NumberSeq()),
|
_all_full_gc_times_ms(new NumberSeq()),
|
||||||
|
|
||||||
// G1PausesBtwnConcMark defaults to -1
|
// G1PausesBtwnConcMark defaults to -1
|
||||||
// so the hack is to do the cast QQQ FIXME
|
// so the hack is to do the cast QQQ FIXME
|
||||||
_pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
|
_pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
|
||||||
_n_marks_since_last_pause(0),
|
|
||||||
_initiate_conc_mark_if_possible(false),
|
_initiate_conc_mark_if_possible(false),
|
||||||
_during_initial_mark_pause(false),
|
_during_initial_mark_pause(false),
|
||||||
_should_revert_to_full_young_gcs(false),
|
_should_revert_to_full_young_gcs(false),
|
||||||
@ -440,6 +436,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
|||||||
_reserve_regions = 0;
|
_reserve_regions = 0;
|
||||||
|
|
||||||
initialize_all();
|
initialize_all();
|
||||||
|
_collectionSetChooser = new CollectionSetChooser();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Increment "i", mod "len"
|
// Increment "i", mod "len"
|
||||||
@ -921,6 +918,7 @@ void G1CollectorPolicy::record_full_collection_end() {
|
|||||||
// Reset survivors SurvRateGroup.
|
// Reset survivors SurvRateGroup.
|
||||||
_survivor_surv_rate_group->reset();
|
_survivor_surv_rate_group->reset();
|
||||||
update_young_list_target_length();
|
update_young_list_target_length();
|
||||||
|
_collectionSetChooser->updateAfterFullCollection();
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectorPolicy::record_stop_world_start() {
|
void G1CollectorPolicy::record_stop_world_start() {
|
||||||
@ -1029,39 +1027,7 @@ void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
|
|||||||
_mark_cleanup_start_sec = os::elapsedTime();
|
_mark_cleanup_start_sec = os::elapsedTime();
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
|
||||||
G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
|
|
||||||
size_t max_live_bytes) {
|
|
||||||
record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
|
|
||||||
record_concurrent_mark_cleanup_end_work2();
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
G1CollectorPolicy::
|
|
||||||
record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
|
|
||||||
size_t max_live_bytes) {
|
|
||||||
if (_n_marks < 2) {
|
|
||||||
_n_marks++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The important thing about this is that it includes "os::elapsedTime".
|
|
||||||
void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
|
|
||||||
double end_time_sec = os::elapsedTime();
|
|
||||||
double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
|
|
||||||
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
|
|
||||||
_cur_mark_stop_world_time_ms += elapsed_time_ms;
|
|
||||||
_prev_collection_pause_end_ms += elapsed_time_ms;
|
|
||||||
|
|
||||||
_mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
|
|
||||||
|
|
||||||
_num_markings++;
|
|
||||||
_n_pauses_at_mark_end = _n_pauses;
|
|
||||||
_n_marks_since_last_pause++;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
|
|
||||||
_should_revert_to_full_young_gcs = false;
|
_should_revert_to_full_young_gcs = false;
|
||||||
_last_full_young_gc = true;
|
_last_full_young_gc = true;
|
||||||
_in_marking_window = false;
|
_in_marking_window = false;
|
||||||
@ -1501,11 +1467,9 @@ void G1CollectorPolicy::record_collection_pause_end() {
|
|||||||
summary->record_other_time_ms(other_time_ms);
|
summary->record_other_time_ms(other_time_ms);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < _aux_num; ++i)
|
for (int i = 0; i < _aux_num; ++i)
|
||||||
if (_cur_aux_times_set[i])
|
if (_cur_aux_times_set[i]) {
|
||||||
_all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
|
_all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
|
||||||
|
}
|
||||||
// Reset marks-between-pauses counter.
|
|
||||||
_n_marks_since_last_pause = 0;
|
|
||||||
|
|
||||||
// Update the efficiency-since-mark vars.
|
// Update the efficiency-since-mark vars.
|
||||||
double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
|
double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
|
||||||
@ -1729,6 +1693,8 @@ void G1CollectorPolicy::record_collection_pause_end() {
|
|||||||
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
|
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
|
||||||
adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
|
adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
|
||||||
// </NEW PREDICTION>
|
// </NEW PREDICTION>
|
||||||
|
|
||||||
|
assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define EXT_SIZE_FORMAT "%d%s"
|
#define EXT_SIZE_FORMAT "%d%s"
|
||||||
@ -2156,10 +2122,6 @@ size_t G1CollectorPolicy::expansion_amount() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectorPolicy::note_start_of_mark_thread() {
|
|
||||||
_mark_thread_startup_sec = os::elapsedTime();
|
|
||||||
}
|
|
||||||
|
|
||||||
class CountCSClosure: public HeapRegionClosure {
|
class CountCSClosure: public HeapRegionClosure {
|
||||||
G1CollectorPolicy* _g1_policy;
|
G1CollectorPolicy* _g1_policy;
|
||||||
public:
|
public:
|
||||||
@ -2446,7 +2408,7 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
|
bool G1CollectorPolicy::assertMarkedBytesDataOK() {
|
||||||
HRSortIndexIsOKClosure cl(_collectionSetChooser);
|
HRSortIndexIsOKClosure cl(_collectionSetChooser);
|
||||||
_g1->heap_region_iterate(&cl);
|
_g1->heap_region_iterate(&cl);
|
||||||
return true;
|
return true;
|
||||||
@ -2532,12 +2494,6 @@ G1CollectorPolicy::decide_on_conc_mark_initiation() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
G1CollectorPolicy_BestRegionsFirst::
|
|
||||||
record_collection_pause_start(double start_time_sec, size_t start_used) {
|
|
||||||
G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
|
|
||||||
}
|
|
||||||
|
|
||||||
class KnownGarbageClosure: public HeapRegionClosure {
|
class KnownGarbageClosure: public HeapRegionClosure {
|
||||||
CollectionSetChooser* _hrSorted;
|
CollectionSetChooser* _hrSorted;
|
||||||
|
|
||||||
@ -2645,20 +2601,20 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
void
|
void
|
||||||
G1CollectorPolicy_BestRegionsFirst::
|
G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
|
||||||
record_concurrent_mark_cleanup_end(size_t freed_bytes,
|
double start_sec;
|
||||||
size_t max_live_bytes) {
|
if (G1PrintParCleanupStats) {
|
||||||
double start;
|
start_sec = os::elapsedTime();
|
||||||
if (G1PrintParCleanupStats) start = os::elapsedTime();
|
}
|
||||||
record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
|
|
||||||
|
|
||||||
_collectionSetChooser->clearMarkedHeapRegions();
|
_collectionSetChooser->clearMarkedHeapRegions();
|
||||||
double clear_marked_end;
|
double clear_marked_end_sec;
|
||||||
if (G1PrintParCleanupStats) {
|
if (G1PrintParCleanupStats) {
|
||||||
clear_marked_end = os::elapsedTime();
|
clear_marked_end_sec = os::elapsedTime();
|
||||||
gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
|
gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.",
|
||||||
(clear_marked_end - start)*1000.0);
|
(clear_marked_end_sec - start_sec) * 1000.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
const size_t OverpartitionFactor = 4;
|
const size_t OverpartitionFactor = 4;
|
||||||
const size_t MinWorkUnit = 8;
|
const size_t MinWorkUnit = 8;
|
||||||
@ -2677,27 +2633,25 @@ record_concurrent_mark_cleanup_end(size_t freed_bytes,
|
|||||||
KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
|
KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
|
||||||
_g1->heap_region_iterate(&knownGarbagecl);
|
_g1->heap_region_iterate(&knownGarbagecl);
|
||||||
}
|
}
|
||||||
double known_garbage_end;
|
double known_garbage_end_sec;
|
||||||
if (G1PrintParCleanupStats) {
|
if (G1PrintParCleanupStats) {
|
||||||
known_garbage_end = os::elapsedTime();
|
known_garbage_end_sec = os::elapsedTime();
|
||||||
gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
|
gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
|
||||||
(known_garbage_end - clear_marked_end)*1000.0);
|
(known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
|
||||||
}
|
|
||||||
_collectionSetChooser->sortMarkedHeapRegions();
|
|
||||||
double sort_end;
|
|
||||||
if (G1PrintParCleanupStats) {
|
|
||||||
sort_end = os::elapsedTime();
|
|
||||||
gclog_or_tty->print_cr(" sorting: %8.3f ms.",
|
|
||||||
(sort_end - known_garbage_end)*1000.0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
record_concurrent_mark_cleanup_end_work2();
|
_collectionSetChooser->sortMarkedHeapRegions();
|
||||||
double work2_end;
|
double end_sec = os::elapsedTime();
|
||||||
if (G1PrintParCleanupStats) {
|
if (G1PrintParCleanupStats) {
|
||||||
work2_end = os::elapsedTime();
|
gclog_or_tty->print_cr(" sorting: %8.3f ms.",
|
||||||
gclog_or_tty->print_cr(" work2: %8.3f ms.",
|
(end_sec - known_garbage_end_sec) * 1000.0);
|
||||||
(work2_end - sort_end)*1000.0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
|
||||||
|
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
|
||||||
|
_cur_mark_stop_world_time_ms += elapsed_time_ms;
|
||||||
|
_prev_collection_pause_end_ms += elapsed_time_ms;
|
||||||
|
_mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the heap region at the head of the non-incremental collection set
|
// Add the heap region at the head of the non-incremental collection set
|
||||||
@ -2912,9 +2866,7 @@ void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream
|
|||||||
}
|
}
|
||||||
#endif // !PRODUCT
|
#endif // !PRODUCT
|
||||||
|
|
||||||
void
|
void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
|
||||||
G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
|
|
||||||
double target_pause_time_ms) {
|
|
||||||
// Set this here - in case we're not doing young collections.
|
// Set this here - in case we're not doing young collections.
|
||||||
double non_young_start_time_sec = os::elapsedTime();
|
double non_young_start_time_sec = os::elapsedTime();
|
||||||
|
|
||||||
@ -3115,14 +3067,3 @@ G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
|
|||||||
_recorded_non_young_cset_choice_time_ms =
|
_recorded_non_young_cset_choice_time_ms =
|
||||||
(non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
|
(non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
|
|
||||||
G1CollectorPolicy::record_full_collection_end();
|
|
||||||
_collectionSetChooser->updateAfterFullCollection();
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectorPolicy_BestRegionsFirst::
|
|
||||||
record_collection_pause_end() {
|
|
||||||
G1CollectorPolicy::record_collection_pause_end();
|
|
||||||
assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
|
|
||||||
}
|
|
||||||
|
@ -84,7 +84,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
class G1CollectorPolicy: public CollectorPolicy {
|
class G1CollectorPolicy: public CollectorPolicy {
|
||||||
protected:
|
private:
|
||||||
// The number of pauses during the execution.
|
// The number of pauses during the execution.
|
||||||
long _n_pauses;
|
long _n_pauses;
|
||||||
|
|
||||||
@ -106,10 +106,7 @@ protected:
|
|||||||
initialize_perm_generation(PermGen::MarkSweepCompact);
|
initialize_perm_generation(PermGen::MarkSweepCompact);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual size_t default_init_heap_size() {
|
CollectionSetChooser* _collectionSetChooser;
|
||||||
// Pick some reasonable default.
|
|
||||||
return 8*M;
|
|
||||||
}
|
|
||||||
|
|
||||||
double _cur_collection_start_sec;
|
double _cur_collection_start_sec;
|
||||||
size_t _cur_collection_pause_used_at_start_bytes;
|
size_t _cur_collection_pause_used_at_start_bytes;
|
||||||
@ -316,7 +313,6 @@ private:
|
|||||||
double update_rs_processed_buffers,
|
double update_rs_processed_buffers,
|
||||||
double goal_ms);
|
double goal_ms);
|
||||||
|
|
||||||
protected:
|
|
||||||
double _pause_time_target_ms;
|
double _pause_time_target_ms;
|
||||||
double _recorded_young_cset_choice_time_ms;
|
double _recorded_young_cset_choice_time_ms;
|
||||||
double _recorded_non_young_cset_choice_time_ms;
|
double _recorded_non_young_cset_choice_time_ms;
|
||||||
@ -554,7 +550,7 @@ public:
|
|||||||
return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
|
return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
private:
|
||||||
void print_stats(int level, const char* str, double value);
|
void print_stats(int level, const char* str, double value);
|
||||||
void print_stats(int level, const char* str, int value);
|
void print_stats(int level, const char* str, int value);
|
||||||
|
|
||||||
@ -588,10 +584,6 @@ protected:
|
|||||||
// Statistics kept per GC stoppage, pause or full.
|
// Statistics kept per GC stoppage, pause or full.
|
||||||
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
|
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
|
||||||
|
|
||||||
// We track markings.
|
|
||||||
int _num_markings;
|
|
||||||
double _mark_thread_startup_sec; // Time at startup of marking thread
|
|
||||||
|
|
||||||
// Add a new GC of the given duration and end time to the record.
|
// Add a new GC of the given duration and end time to the record.
|
||||||
void update_recent_gc_times(double end_time_sec, double elapsed_ms);
|
void update_recent_gc_times(double end_time_sec, double elapsed_ms);
|
||||||
|
|
||||||
@ -664,12 +656,6 @@ protected:
|
|||||||
// young list/collection set).
|
// young list/collection set).
|
||||||
size_t _inc_cset_predicted_bytes_to_copy;
|
size_t _inc_cset_predicted_bytes_to_copy;
|
||||||
|
|
||||||
// Info about marking.
|
|
||||||
int _n_marks; // Sticky at 2, so we know when we've done at least 2.
|
|
||||||
|
|
||||||
// The number of collection pauses at the end of the last mark.
|
|
||||||
size_t _n_pauses_at_mark_end;
|
|
||||||
|
|
||||||
// Stash a pointer to the g1 heap.
|
// Stash a pointer to the g1 heap.
|
||||||
G1CollectedHeap* _g1;
|
G1CollectedHeap* _g1;
|
||||||
|
|
||||||
@ -737,8 +723,6 @@ protected:
|
|||||||
// Number of pauses between concurrent marking.
|
// Number of pauses between concurrent marking.
|
||||||
size_t _pauses_btwn_concurrent_mark;
|
size_t _pauses_btwn_concurrent_mark;
|
||||||
|
|
||||||
size_t _n_marks_since_last_pause;
|
|
||||||
|
|
||||||
// At the end of a pause we check the heap occupancy and we decide
|
// At the end of a pause we check the heap occupancy and we decide
|
||||||
// whether we will start a marking cycle during the next pause. If
|
// whether we will start a marking cycle during the next pause. If
|
||||||
// we decide that we want to do that, we will set this parameter to
|
// we decide that we want to do that, we will set this parameter to
|
||||||
@ -810,6 +794,11 @@ protected:
|
|||||||
bool predict_will_fit(size_t young_length, double base_time_ms,
|
bool predict_will_fit(size_t young_length, double base_time_ms,
|
||||||
size_t base_free_regions, double target_pause_time_ms);
|
size_t base_free_regions, double target_pause_time_ms);
|
||||||
|
|
||||||
|
// Count the number of bytes used in the CS.
|
||||||
|
void count_CS_bytes_used();
|
||||||
|
|
||||||
|
void update_young_list_size_using_newratio(size_t number_of_heap_regions);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
G1CollectorPolicy();
|
G1CollectorPolicy();
|
||||||
@ -836,22 +825,9 @@ public:
|
|||||||
// This should be called after the heap is resized.
|
// This should be called after the heap is resized.
|
||||||
void record_new_heap_size(size_t new_number_of_regions);
|
void record_new_heap_size(size_t new_number_of_regions);
|
||||||
|
|
||||||
protected:
|
|
||||||
|
|
||||||
// Count the number of bytes used in the CS.
|
|
||||||
void count_CS_bytes_used();
|
|
||||||
|
|
||||||
// Together these do the base cleanup-recording work. Subclasses might
|
|
||||||
// want to put something between them.
|
|
||||||
void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
|
|
||||||
size_t max_live_bytes);
|
|
||||||
void record_concurrent_mark_cleanup_end_work2();
|
|
||||||
|
|
||||||
void update_young_list_size_using_newratio(size_t number_of_heap_regions);
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
virtual void init();
|
void init();
|
||||||
|
|
||||||
// Create jstat counters for the policy.
|
// Create jstat counters for the policy.
|
||||||
virtual void initialize_gc_policy_counters();
|
virtual void initialize_gc_policy_counters();
|
||||||
@ -876,10 +852,9 @@ public:
|
|||||||
// start time, where the given number of bytes were used at the start.
|
// start time, where the given number of bytes were used at the start.
|
||||||
// This may involve changing the desired size of a collection set.
|
// This may involve changing the desired size of a collection set.
|
||||||
|
|
||||||
virtual void record_stop_world_start();
|
void record_stop_world_start();
|
||||||
|
|
||||||
virtual void record_collection_pause_start(double start_time_sec,
|
void record_collection_pause_start(double start_time_sec, size_t start_used);
|
||||||
size_t start_used);
|
|
||||||
|
|
||||||
// Must currently be called while the world is stopped.
|
// Must currently be called while the world is stopped.
|
||||||
void record_concurrent_mark_init_end(double
|
void record_concurrent_mark_init_end(double
|
||||||
@ -887,23 +862,22 @@ public:
|
|||||||
|
|
||||||
void record_mark_closure_time(double mark_closure_time_ms);
|
void record_mark_closure_time(double mark_closure_time_ms);
|
||||||
|
|
||||||
virtual void record_concurrent_mark_remark_start();
|
void record_concurrent_mark_remark_start();
|
||||||
virtual void record_concurrent_mark_remark_end();
|
void record_concurrent_mark_remark_end();
|
||||||
|
|
||||||
virtual void record_concurrent_mark_cleanup_start();
|
void record_concurrent_mark_cleanup_start();
|
||||||
virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
|
void record_concurrent_mark_cleanup_end();
|
||||||
size_t max_live_bytes);
|
void record_concurrent_mark_cleanup_completed();
|
||||||
virtual void record_concurrent_mark_cleanup_completed();
|
|
||||||
|
|
||||||
virtual void record_concurrent_pause();
|
void record_concurrent_pause();
|
||||||
virtual void record_concurrent_pause_end();
|
void record_concurrent_pause_end();
|
||||||
|
|
||||||
virtual void record_collection_pause_end();
|
void record_collection_pause_end();
|
||||||
void print_heap_transition();
|
void print_heap_transition();
|
||||||
|
|
||||||
// Record the fact that a full collection occurred.
|
// Record the fact that a full collection occurred.
|
||||||
virtual void record_full_collection_start();
|
void record_full_collection_start();
|
||||||
virtual void record_full_collection_end();
|
void record_full_collection_end();
|
||||||
|
|
||||||
void record_gc_worker_start_time(int worker_i, double ms) {
|
void record_gc_worker_start_time(int worker_i, double ms) {
|
||||||
_par_last_gc_worker_start_times_ms[worker_i] = ms;
|
_par_last_gc_worker_start_times_ms[worker_i] = ms;
|
||||||
@ -1022,7 +996,7 @@ public:
|
|||||||
// Choose a new collection set. Marks the chosen regions as being
|
// Choose a new collection set. Marks the chosen regions as being
|
||||||
// "in_collection_set", and links them together. The head and number of
|
// "in_collection_set", and links them together. The head and number of
|
||||||
// the collection set are available via access methods.
|
// the collection set are available via access methods.
|
||||||
virtual void choose_collection_set(double target_pause_time_ms) = 0;
|
void choose_collection_set(double target_pause_time_ms);
|
||||||
|
|
||||||
// The head of the list (via "next_in_collection_set()") representing the
|
// The head of the list (via "next_in_collection_set()") representing the
|
||||||
// current collection set.
|
// current collection set.
|
||||||
@ -1107,19 +1081,12 @@ public:
|
|||||||
|
|
||||||
// If an expansion would be appropriate, because recent GC overhead had
|
// If an expansion would be appropriate, because recent GC overhead had
|
||||||
// exceeded the desired limit, return an amount to expand by.
|
// exceeded the desired limit, return an amount to expand by.
|
||||||
virtual size_t expansion_amount();
|
size_t expansion_amount();
|
||||||
|
|
||||||
// note start of mark thread
|
|
||||||
void note_start_of_mark_thread();
|
|
||||||
|
|
||||||
// The marked bytes of the "r" has changed; reclassify it's desirability
|
|
||||||
// for marking. Also asserts that "r" is eligible for a CS.
|
|
||||||
virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
// Check any appropriate marked bytes info, asserting false if
|
// Check any appropriate marked bytes info, asserting false if
|
||||||
// something's wrong, else returning "true".
|
// something's wrong, else returning "true".
|
||||||
virtual bool assertMarkedBytesDataOK() = 0;
|
bool assertMarkedBytesDataOK();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Print tracing information.
|
// Print tracing information.
|
||||||
@ -1182,10 +1149,10 @@ public:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
//
|
//
|
||||||
// Survivor regions policy.
|
// Survivor regions policy.
|
||||||
//
|
//
|
||||||
protected:
|
|
||||||
|
|
||||||
// Current tenuring threshold, set to 0 if the collector reaches the
|
// Current tenuring threshold, set to 0 if the collector reaches the
|
||||||
// maximum amount of suvivors regions.
|
// maximum amount of suvivors regions.
|
||||||
@ -1265,51 +1232,6 @@ public:
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// This encapsulates a particular strategy for a g1 Collector.
|
|
||||||
//
|
|
||||||
// Start a concurrent mark when our heap size is n bytes
|
|
||||||
// greater then our heap size was at the last concurrent
|
|
||||||
// mark. Where n is a function of the CMSTriggerRatio
|
|
||||||
// and the MinHeapFreeRatio.
|
|
||||||
//
|
|
||||||
// Start a g1 collection pause when we have allocated the
|
|
||||||
// average number of bytes currently being freed in
|
|
||||||
// a collection, but only if it is at least one region
|
|
||||||
// full
|
|
||||||
//
|
|
||||||
// Resize Heap based on desired
|
|
||||||
// allocation space, where desired allocation space is
|
|
||||||
// a function of survival rate and desired future to size.
|
|
||||||
//
|
|
||||||
// Choose collection set by first picking all older regions
|
|
||||||
// which have a survival rate which beats our projected young
|
|
||||||
// survival rate. Then fill out the number of needed regions
|
|
||||||
// with young regions.
|
|
||||||
|
|
||||||
class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
|
|
||||||
CollectionSetChooser* _collectionSetChooser;
|
|
||||||
|
|
||||||
virtual void choose_collection_set(double target_pause_time_ms);
|
|
||||||
virtual void record_collection_pause_start(double start_time_sec,
|
|
||||||
size_t start_used);
|
|
||||||
virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
|
|
||||||
size_t max_live_bytes);
|
|
||||||
virtual void record_full_collection_end();
|
|
||||||
|
|
||||||
public:
|
|
||||||
G1CollectorPolicy_BestRegionsFirst() {
|
|
||||||
_collectionSetChooser = new CollectionSetChooser();
|
|
||||||
}
|
|
||||||
void record_collection_pause_end();
|
|
||||||
// This is not needed any more, after the CSet choosing code was
|
|
||||||
// changed to use the pause prediction work. But let's leave the
|
|
||||||
// hook in just in case.
|
|
||||||
void note_change_in_marked_bytes(HeapRegion* r) { }
|
|
||||||
#ifndef PRODUCT
|
|
||||||
bool assertMarkedBytesDataOK();
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
// This should move to some place more general...
|
// This should move to some place more general...
|
||||||
|
|
||||||
// If we have "n" measurements, and we've kept track of their "sum" and the
|
// If we have "n" measurements, and we've kept track of their "sum" and the
|
||||||
|
@ -105,19 +105,22 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
|
|||||||
_discovery_is_mt = mt_discovery;
|
_discovery_is_mt = mt_discovery;
|
||||||
_num_q = MAX2(1, mt_processing_degree);
|
_num_q = MAX2(1, mt_processing_degree);
|
||||||
_max_num_q = MAX2(_num_q, mt_discovery_degree);
|
_max_num_q = MAX2(_num_q, mt_discovery_degree);
|
||||||
_discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList,
|
_discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList,
|
||||||
_max_num_q * number_of_subclasses_of_ref());
|
_max_num_q * number_of_subclasses_of_ref());
|
||||||
if (_discoveredSoftRefs == NULL) {
|
if (_discovered_refs == NULL) {
|
||||||
vm_exit_during_initialization("Could not allocated RefProc Array");
|
vm_exit_during_initialization("Could not allocated RefProc Array");
|
||||||
}
|
}
|
||||||
|
_discoveredSoftRefs = &_discovered_refs[0];
|
||||||
_discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q];
|
_discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q];
|
||||||
_discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q];
|
_discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q];
|
||||||
_discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
|
_discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
|
||||||
// Initialized all entries to NULL
|
|
||||||
|
// Initialize all entries to NULL
|
||||||
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
||||||
_discoveredSoftRefs[i].set_head(NULL);
|
_discovered_refs[i].set_head(NULL);
|
||||||
_discoveredSoftRefs[i].set_length(0);
|
_discovered_refs[i].set_length(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we do barriers, cache a copy of the barrier set.
|
// If we do barriers, cache a copy of the barrier set.
|
||||||
if (discovered_list_needs_barrier) {
|
if (discovered_list_needs_barrier) {
|
||||||
_bs = Universe::heap()->barrier_set();
|
_bs = Universe::heap()->barrier_set();
|
||||||
@ -129,7 +132,7 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
|
|||||||
void ReferenceProcessor::verify_no_references_recorded() {
|
void ReferenceProcessor::verify_no_references_recorded() {
|
||||||
guarantee(!_discovering_refs, "Discovering refs?");
|
guarantee(!_discovering_refs, "Discovering refs?");
|
||||||
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
||||||
guarantee(_discoveredSoftRefs[i].is_empty(),
|
guarantee(_discovered_refs[i].is_empty(),
|
||||||
"Found non-empty discovered list");
|
"Found non-empty discovered list");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -138,9 +141,9 @@ void ReferenceProcessor::verify_no_references_recorded() {
|
|||||||
void ReferenceProcessor::weak_oops_do(OopClosure* f) {
|
void ReferenceProcessor::weak_oops_do(OopClosure* f) {
|
||||||
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
|
f->do_oop((narrowOop*)_discovered_refs[i].adr_head());
|
||||||
} else {
|
} else {
|
||||||
f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
|
f->do_oop((oop*)_discovered_refs[i].adr_head());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -423,15 +426,15 @@ void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr
|
|||||||
AbstractRefProcTaskExecutor* task_executor) {
|
AbstractRefProcTaskExecutor* task_executor) {
|
||||||
if (_processing_is_mt && task_executor != NULL) {
|
if (_processing_is_mt && task_executor != NULL) {
|
||||||
// Parallel code
|
// Parallel code
|
||||||
RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
|
RefProcEnqueueTask tsk(*this, _discovered_refs,
|
||||||
pending_list_addr, _max_num_q);
|
pending_list_addr, _max_num_q);
|
||||||
task_executor->execute(tsk);
|
task_executor->execute(tsk);
|
||||||
} else {
|
} else {
|
||||||
// Serial code: call the parent class's implementation
|
// Serial code: call the parent class's implementation
|
||||||
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
||||||
enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
|
enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr);
|
||||||
_discoveredSoftRefs[i].set_head(NULL);
|
_discovered_refs[i].set_head(NULL);
|
||||||
_discoveredSoftRefs[i].set_length(0);
|
_discovered_refs[i].set_length(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -691,7 +694,7 @@ void ReferenceProcessor::abandon_partial_discovery() {
|
|||||||
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
|
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
|
||||||
gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
|
gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
|
||||||
}
|
}
|
||||||
abandon_partial_discovered_list(_discoveredSoftRefs[i]);
|
abandon_partial_discovered_list(_discovered_refs[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -952,7 +955,7 @@ void ReferenceProcessor::clean_up_discovered_references() {
|
|||||||
"\nScrubbing %s discovered list of Null referents",
|
"\nScrubbing %s discovered list of Null referents",
|
||||||
list_name(i));
|
list_name(i));
|
||||||
}
|
}
|
||||||
clean_up_discovered_reflist(_discoveredSoftRefs[i]);
|
clean_up_discovered_reflist(_discovered_refs[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1402,7 +1405,7 @@ void ReferenceProcessor::verify_ok_to_handle_reflists() {
|
|||||||
void ReferenceProcessor::clear_discovered_references() {
|
void ReferenceProcessor::clear_discovered_references() {
|
||||||
guarantee(!_discovering_refs, "Discovering refs?");
|
guarantee(!_discovering_refs, "Discovering refs?");
|
||||||
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
|
||||||
clear_discovered_references(_discoveredSoftRefs[i]);
|
clear_discovered_references(_discovered_refs[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,7 +255,11 @@ class ReferenceProcessor : public CHeapObj {
|
|||||||
int _num_q;
|
int _num_q;
|
||||||
// The maximum MT'ness degree of the queues below
|
// The maximum MT'ness degree of the queues below
|
||||||
int _max_num_q;
|
int _max_num_q;
|
||||||
// Arrays of lists of oops, one per thread
|
|
||||||
|
// Master array of discovered oops
|
||||||
|
DiscoveredList* _discovered_refs;
|
||||||
|
|
||||||
|
// Arrays of lists of oops, one per thread (pointers into master array above)
|
||||||
DiscoveredList* _discoveredSoftRefs;
|
DiscoveredList* _discoveredSoftRefs;
|
||||||
DiscoveredList* _discoveredWeakRefs;
|
DiscoveredList* _discoveredWeakRefs;
|
||||||
DiscoveredList* _discoveredFinalRefs;
|
DiscoveredList* _discoveredFinalRefs;
|
||||||
@ -267,7 +271,8 @@ class ReferenceProcessor : public CHeapObj {
|
|||||||
int num_q() { return _num_q; }
|
int num_q() { return _num_q; }
|
||||||
int max_num_q() { return _max_num_q; }
|
int max_num_q() { return _max_num_q; }
|
||||||
void set_active_mt_degree(int v) { _num_q = v; }
|
void set_active_mt_degree(int v) { _num_q = v; }
|
||||||
DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
|
|
||||||
|
DiscoveredList* discovered_refs() { return _discovered_refs; }
|
||||||
|
|
||||||
ReferencePolicy* setup_policy(bool always_clear) {
|
ReferencePolicy* setup_policy(bool always_clear) {
|
||||||
_current_soft_ref_policy = always_clear ?
|
_current_soft_ref_policy = always_clear ?
|
||||||
@ -411,6 +416,7 @@ class ReferenceProcessor : public CHeapObj {
|
|||||||
// constructor
|
// constructor
|
||||||
ReferenceProcessor():
|
ReferenceProcessor():
|
||||||
_span((HeapWord*)NULL, (HeapWord*)NULL),
|
_span((HeapWord*)NULL, (HeapWord*)NULL),
|
||||||
|
_discovered_refs(NULL),
|
||||||
_discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL),
|
_discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL),
|
||||||
_discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
|
_discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
|
||||||
_discovering_refs(false),
|
_discovering_refs(false),
|
||||||
|
@ -893,7 +893,7 @@ jint Universe::initialize_heap() {
|
|||||||
|
|
||||||
} else if (UseG1GC) {
|
} else if (UseG1GC) {
|
||||||
#ifndef SERIALGC
|
#ifndef SERIALGC
|
||||||
G1CollectorPolicy* g1p = new G1CollectorPolicy_BestRegionsFirst();
|
G1CollectorPolicy* g1p = new G1CollectorPolicy();
|
||||||
G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
|
G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
|
||||||
Universe::_collectedHeap = g1h;
|
Universe::_collectedHeap = g1h;
|
||||||
#else // SERIALGC
|
#else // SERIALGC
|
||||||
|
Loading…
Reference in New Issue
Block a user