8262068: Improve G1 Full GC by skipping compaction for regions with high survival ratio
Co-authored-by: Shoubing Ma <mashoubing1@huawei.com> Reviewed-by: sjohanss, ayang
This commit is contained in:
parent
f71be8b5d7
commit
be0d46c142
@ -1088,7 +1088,8 @@ void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_tr
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool G1CollectedHeap::do_full_collection(bool explicit_gc,
|
bool G1CollectedHeap::do_full_collection(bool explicit_gc,
|
||||||
bool clear_all_soft_refs) {
|
bool clear_all_soft_refs,
|
||||||
|
bool do_maximum_compaction) {
|
||||||
assert_at_safepoint_on_vm_thread();
|
assert_at_safepoint_on_vm_thread();
|
||||||
|
|
||||||
if (GCLocker::check_active_before_gc()) {
|
if (GCLocker::check_active_before_gc()) {
|
||||||
@ -1099,7 +1100,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
|
|||||||
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
|
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
|
||||||
soft_ref_policy()->should_clear_all_soft_refs();
|
soft_ref_policy()->should_clear_all_soft_refs();
|
||||||
|
|
||||||
G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
|
G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs, do_maximum_compaction);
|
||||||
GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
|
GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
|
||||||
|
|
||||||
collector.prepare_collection();
|
collector.prepare_collection();
|
||||||
@ -1114,8 +1115,12 @@ void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
|
|||||||
// Currently, there is no facility in the do_full_collection(bool) API to notify
|
// Currently, there is no facility in the do_full_collection(bool) API to notify
|
||||||
// the caller that the collection did not succeed (e.g., because it was locked
|
// the caller that the collection did not succeed (e.g., because it was locked
|
||||||
// out by the GC locker). So, right now, we'll ignore the return value.
|
// out by the GC locker). So, right now, we'll ignore the return value.
|
||||||
|
// When clear_all_soft_refs is set we want to do a maximum compaction
|
||||||
|
// not leaving any dead wood.
|
||||||
|
bool do_maximum_compaction = clear_all_soft_refs;
|
||||||
bool dummy = do_full_collection(true, /* explicit_gc */
|
bool dummy = do_full_collection(true, /* explicit_gc */
|
||||||
clear_all_soft_refs);
|
clear_all_soft_refs,
|
||||||
|
do_maximum_compaction);
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::resize_heap_if_necessary() {
|
void G1CollectedHeap::resize_heap_if_necessary() {
|
||||||
@ -1157,9 +1162,13 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (do_gc) {
|
if (do_gc) {
|
||||||
|
// When clear_all_soft_refs is set we want to do a maximum compaction
|
||||||
|
// not leaving any dead wood.
|
||||||
|
bool do_maximum_compaction = clear_all_soft_refs;
|
||||||
// Expansion didn't work, we'll try to do a Full GC.
|
// Expansion didn't work, we'll try to do a Full GC.
|
||||||
*gc_succeeded = do_full_collection(false, /* explicit_gc */
|
*gc_succeeded = do_full_collection(false, /* explicit_gc */
|
||||||
clear_all_soft_refs);
|
clear_all_soft_refs,
|
||||||
|
do_maximum_compaction);
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -2871,7 +2880,8 @@ bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_
|
|||||||
if (should_upgrade_to_full_gc(gc_cause())) {
|
if (should_upgrade_to_full_gc(gc_cause())) {
|
||||||
log_info(gc, ergo)("Attempting maximally compacting collection");
|
log_info(gc, ergo)("Attempting maximally compacting collection");
|
||||||
bool result = do_full_collection(false /* explicit gc */,
|
bool result = do_full_collection(false /* explicit gc */,
|
||||||
true /* clear_all_soft_refs */);
|
true /* clear_all_soft_refs */,
|
||||||
|
false /* do_maximum_compaction */);
|
||||||
// do_full_collection only fails if blocked by GC locker, but
|
// do_full_collection only fails if blocked by GC locker, but
|
||||||
// we've already checked for that above.
|
// we've already checked for that above.
|
||||||
assert(result, "invariant");
|
assert(result, "invariant");
|
||||||
|
@ -499,10 +499,13 @@ private:
|
|||||||
// otherwise it's for a failed allocation.
|
// otherwise it's for a failed allocation.
|
||||||
// - if clear_all_soft_refs is true, all soft references should be
|
// - if clear_all_soft_refs is true, all soft references should be
|
||||||
// cleared during the GC.
|
// cleared during the GC.
|
||||||
|
// - if do_maximum_compaction is true, full gc will do a maximally
|
||||||
|
// compacting collection, leaving no dead wood.
|
||||||
// - it returns false if it is unable to do the collection due to the
|
// - it returns false if it is unable to do the collection due to the
|
||||||
// GC locker being active, true otherwise.
|
// GC locker being active, true otherwise.
|
||||||
bool do_full_collection(bool explicit_gc,
|
bool do_full_collection(bool explicit_gc,
|
||||||
bool clear_all_soft_refs);
|
bool clear_all_soft_refs,
|
||||||
|
bool do_maximum_compaction);
|
||||||
|
|
||||||
// Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
|
// Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
|
||||||
virtual void do_full_collection(bool clear_all_soft_refs);
|
virtual void do_full_collection(bool clear_all_soft_refs);
|
||||||
|
@ -105,9 +105,12 @@ uint G1FullCollector::calc_active_workers() {
|
|||||||
return worker_count;
|
return worker_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
G1FullCollector::G1FullCollector(G1CollectedHeap* heap, bool explicit_gc, bool clear_soft_refs) :
|
G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
|
||||||
|
bool explicit_gc,
|
||||||
|
bool clear_soft_refs,
|
||||||
|
bool do_maximum_compaction) :
|
||||||
_heap(heap),
|
_heap(heap),
|
||||||
_scope(heap->g1mm(), explicit_gc, clear_soft_refs),
|
_scope(heap->g1mm(), explicit_gc, clear_soft_refs, do_maximum_compaction),
|
||||||
_num_workers(calc_active_workers()),
|
_num_workers(calc_active_workers()),
|
||||||
_oop_queue_set(_num_workers),
|
_oop_queue_set(_num_workers),
|
||||||
_array_queue_set(_num_workers),
|
_array_queue_set(_num_workers),
|
||||||
@ -225,15 +228,15 @@ void G1FullCollector::complete_collection() {
|
|||||||
_heap->print_heap_after_full_collection(scope()->heap_transition());
|
_heap->print_heap_after_full_collection(scope()->heap_transition());
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1FullCollector::update_attribute_table(HeapRegion* hr) {
|
void G1FullCollector::update_attribute_table(HeapRegion* hr, bool force_pinned) {
|
||||||
if (hr->is_free()) {
|
if (hr->is_free()) {
|
||||||
return;
|
_region_attr_table.set_invalid(hr->hrm_index());
|
||||||
}
|
} else if (hr->is_closed_archive()) {
|
||||||
if (hr->is_closed_archive()) {
|
|
||||||
_region_attr_table.set_closed_archive(hr->hrm_index());
|
_region_attr_table.set_closed_archive(hr->hrm_index());
|
||||||
} else if (hr->is_pinned()) {
|
} else if (hr->is_pinned() || force_pinned) {
|
||||||
_region_attr_table.set_pinned(hr->hrm_index());
|
_region_attr_table.set_pinned(hr->hrm_index());
|
||||||
} else {
|
} else {
|
||||||
|
// Everything else is processed normally.
|
||||||
_region_attr_table.set_normal(hr->hrm_index());
|
_region_attr_table.set_normal(hr->hrm_index());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,10 @@ class G1FullCollector : StackObj {
|
|||||||
G1FullGCHeapRegionAttr _region_attr_table;
|
G1FullGCHeapRegionAttr _region_attr_table;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
G1FullCollector(G1CollectedHeap* heap, bool explicit_gc, bool clear_soft_refs);
|
G1FullCollector(G1CollectedHeap* heap,
|
||||||
|
bool explicit_gc,
|
||||||
|
bool clear_soft_refs,
|
||||||
|
bool do_maximum_compaction);
|
||||||
~G1FullCollector();
|
~G1FullCollector();
|
||||||
|
|
||||||
void prepare_collection();
|
void prepare_collection();
|
||||||
@ -95,11 +98,16 @@ public:
|
|||||||
G1FullGCCompactionPoint* serial_compaction_point() { return &_serial_compaction_point; }
|
G1FullGCCompactionPoint* serial_compaction_point() { return &_serial_compaction_point; }
|
||||||
G1CMBitMap* mark_bitmap();
|
G1CMBitMap* mark_bitmap();
|
||||||
ReferenceProcessor* reference_processor();
|
ReferenceProcessor* reference_processor();
|
||||||
|
size_t live_words(uint region_index) {
|
||||||
|
assert(region_index < _heap->max_regions(), "sanity");
|
||||||
|
return _live_stats[region_index]._live_words;
|
||||||
|
}
|
||||||
|
|
||||||
void update_attribute_table(HeapRegion* hr);
|
void update_attribute_table(HeapRegion* hr, bool force_pinned = false);
|
||||||
|
|
||||||
inline bool is_in_pinned_or_closed(oop obj) const;
|
inline bool is_in_pinned_or_closed(oop obj) const;
|
||||||
inline bool is_in_pinned(oop obj) const;
|
inline bool is_in_pinned(oop obj) const;
|
||||||
|
inline bool is_in_pinned(uint region_index) const;
|
||||||
inline bool is_in_closed(oop obj) const;
|
inline bool is_in_closed(oop obj) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -37,6 +37,10 @@ bool G1FullCollector::is_in_pinned(oop obj) const {
|
|||||||
return _region_attr_table.is_pinned(cast_from_oop<HeapWord*>(obj));
|
return _region_attr_table.is_pinned(cast_from_oop<HeapWord*>(obj));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool G1FullCollector::is_in_pinned(uint region_index) const {
|
||||||
|
return _region_attr_table.is_pinned(region_index);
|
||||||
|
}
|
||||||
|
|
||||||
bool G1FullCollector::is_in_closed(oop obj) const {
|
bool G1FullCollector::is_in_closed(oop obj) const {
|
||||||
return _region_attr_table.is_closed_archive(cast_from_oop<HeapWord*>(obj));
|
return _region_attr_table.is_closed_archive(cast_from_oop<HeapWord*>(obj));
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "gc/g1/g1CollectedHeap.hpp"
|
#include "gc/g1/g1CollectedHeap.hpp"
|
||||||
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
|
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
|
||||||
#include "gc/g1/g1FullCollector.hpp"
|
#include "gc/g1/g1FullCollector.hpp"
|
||||||
|
#include "gc/g1/g1FullCollector.inline.hpp"
|
||||||
#include "gc/g1/g1FullGCCompactionPoint.hpp"
|
#include "gc/g1/g1FullGCCompactionPoint.hpp"
|
||||||
#include "gc/g1/g1FullGCCompactTask.hpp"
|
#include "gc/g1/g1FullGCCompactTask.hpp"
|
||||||
#include "gc/g1/heapRegion.inline.hpp"
|
#include "gc/g1/heapRegion.inline.hpp"
|
||||||
@ -35,18 +36,21 @@
|
|||||||
#include "utilities/ticks.hpp"
|
#include "utilities/ticks.hpp"
|
||||||
|
|
||||||
class G1ResetPinnedClosure : public HeapRegionClosure {
|
class G1ResetPinnedClosure : public HeapRegionClosure {
|
||||||
G1CMBitMap* _bitmap;
|
G1FullCollector* _collector;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
G1ResetPinnedClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { }
|
G1ResetPinnedClosure(G1FullCollector* collector) : _collector(collector) { }
|
||||||
|
|
||||||
bool do_heap_region(HeapRegion* r) {
|
bool do_heap_region(HeapRegion* r) {
|
||||||
if (!r->is_pinned()) {
|
uint region_index = r->hrm_index();
|
||||||
|
if (!_collector->is_in_pinned(region_index)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
assert(!r->is_starts_humongous() || _bitmap->is_marked(cast_to_oop(r->bottom())),
|
assert(_collector->live_words(region_index) > _collector->scope()->region_compaction_threshold() ||
|
||||||
|
!r->is_starts_humongous() ||
|
||||||
|
_collector->mark_bitmap()->is_marked(cast_to_oop(r->bottom())),
|
||||||
"must be, otherwise reclaimed earlier");
|
"must be, otherwise reclaimed earlier");
|
||||||
r->reset_pinned_after_full_gc();
|
r->reset_not_compacted_after_full_gc();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -91,7 +95,7 @@ void G1FullGCCompactTask::work(uint worker_id) {
|
|||||||
compact_region(*it);
|
compact_region(*it);
|
||||||
}
|
}
|
||||||
|
|
||||||
G1ResetPinnedClosure hc(collector()->mark_bitmap());
|
G1ResetPinnedClosure hc(collector());
|
||||||
G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&hc, &_claimer, worker_id);
|
G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&hc, &_claimer, worker_id);
|
||||||
log_task("Compaction task", worker_id, start);
|
log_task("Compaction task", worker_id, start);
|
||||||
}
|
}
|
||||||
|
@ -31,6 +31,11 @@
|
|||||||
// fast access during the full collection. In particular some parts of the region
|
// fast access during the full collection. In particular some parts of the region
|
||||||
// type information is encoded in these per-region bytes.
|
// type information is encoded in these per-region bytes.
|
||||||
// Value encoding has been specifically chosen to make required accesses fast.
|
// Value encoding has been specifically chosen to make required accesses fast.
|
||||||
|
// In particular, the table collects whether a region should be considered pinned
|
||||||
|
// during full gc (only), and that there are two reasons a
|
||||||
|
// region is pinned (and excluded from compaction):
|
||||||
|
// (1) the HeapRegion itself has been pinned at the start of Full GC.
|
||||||
|
// (2) the occupancy of the region is too high to be considered eligible for compaction.
|
||||||
class G1FullGCHeapRegionAttr : public G1BiasedMappedArray<uint8_t> {
|
class G1FullGCHeapRegionAttr : public G1BiasedMappedArray<uint8_t> {
|
||||||
static const uint8_t Normal = 0; // Other kind of region
|
static const uint8_t Normal = 0; // Other kind of region
|
||||||
static const uint8_t Pinned = 1; // Region is a pinned (non-Closed Archive) region
|
static const uint8_t Pinned = 1; // Region is a pinned (non-Closed Archive) region
|
||||||
@ -48,6 +53,8 @@ protected:
|
|||||||
uint8_t default_value() const { return Invalid; }
|
uint8_t default_value() const { return Invalid; }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
void set_invalid(uint idx) { set_by_index(idx, Invalid); }
|
||||||
|
|
||||||
void set_closed_archive(uint idx) { set_by_index(idx, ClosedArchive); }
|
void set_closed_archive(uint idx) { set_by_index(idx, ClosedArchive); }
|
||||||
|
|
||||||
bool is_closed_archive(HeapWord* obj) const {
|
bool is_closed_archive(HeapWord* obj) const {
|
||||||
@ -67,6 +74,10 @@ public:
|
|||||||
return get_by_address(obj) == Pinned;
|
return get_by_address(obj) == Pinned;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_pinned(uint idx) const {
|
||||||
|
return get_by_index(idx) == Pinned;
|
||||||
|
}
|
||||||
|
|
||||||
void set_normal(uint idx) { set_by_index(idx, Normal); }
|
void set_normal(uint idx) { set_by_index(idx, Normal); }
|
||||||
|
|
||||||
bool is_normal(HeapWord* obj) const {
|
bool is_normal(HeapWord* obj) const {
|
||||||
|
@ -40,7 +40,11 @@
|
|||||||
#include "utilities/ticks.hpp"
|
#include "utilities/ticks.hpp"
|
||||||
|
|
||||||
bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
|
bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
|
||||||
if (hr->is_pinned()) {
|
bool force_pinned = false;
|
||||||
|
if (should_compact(hr)) {
|
||||||
|
assert(!hr->is_humongous(), "moving humongous objects not supported.");
|
||||||
|
prepare_for_compaction(hr);
|
||||||
|
} else {
|
||||||
// There is no need to iterate and forward objects in pinned regions ie.
|
// There is no need to iterate and forward objects in pinned regions ie.
|
||||||
// prepare them for compaction. The adjust pointers phase will skip
|
// prepare them for compaction. The adjust pointers phase will skip
|
||||||
// work for them.
|
// work for them.
|
||||||
@ -54,20 +58,23 @@ bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion*
|
|||||||
if (is_empty) {
|
if (is_empty) {
|
||||||
free_open_archive_region(hr);
|
free_open_archive_region(hr);
|
||||||
}
|
}
|
||||||
|
} else if (hr->is_closed_archive()) {
|
||||||
|
// nothing to do with closed archive region
|
||||||
} else {
|
} else {
|
||||||
// There are no other pinned regions than humongous or all kinds of archive regions
|
assert(MarkSweepDeadRatio > 0,
|
||||||
// at this time.
|
"it should not trigger skipping compaction, when MarkSweepDeadRatio == 0");
|
||||||
assert(hr->is_closed_archive(), "Only closed archive regions can also be pinned.");
|
|
||||||
|
// Force the high live ration region pinned,
|
||||||
|
// as we need skip these regions in the later compact step.
|
||||||
|
force_pinned = true;
|
||||||
|
log_debug(gc, phases)("Phase 2: skip compaction region index: %u, live words: " SIZE_FORMAT,
|
||||||
|
hr->hrm_index(), _collector->live_words(hr->hrm_index()));
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
assert(!hr->is_humongous(), "moving humongous objects not supported.");
|
|
||||||
prepare_for_compaction(hr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset data structures not valid after Full GC.
|
// Reset data structures not valid after Full GC.
|
||||||
reset_region_metadata(hr);
|
reset_region_metadata(hr);
|
||||||
|
_collector->update_attribute_table(hr, force_pinned);
|
||||||
_collector->update_attribute_table(hr);
|
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -140,6 +147,16 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::free_open_archive_region(H
|
|||||||
dummy_free_list.remove_all();
|
dummy_free_list.remove_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool G1FullGCPrepareTask::G1CalculatePointersClosure::should_compact(HeapRegion* hr) {
|
||||||
|
if (hr->is_pinned()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
size_t live_words = _collector->live_words(hr->hrm_index());
|
||||||
|
size_t live_words_threshold = _collector->scope()->region_compaction_threshold();
|
||||||
|
// High live ratio region will not be compacted.
|
||||||
|
return live_words <= live_words_threshold;
|
||||||
|
}
|
||||||
|
|
||||||
void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
|
void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
|
||||||
hr->rem_set()->clear();
|
hr->rem_set()->clear();
|
||||||
hr->clear_cardtable();
|
hr->clear_cardtable();
|
||||||
|
@ -58,6 +58,7 @@ protected:
|
|||||||
G1FullGCCompactionPoint* _cp;
|
G1FullGCCompactionPoint* _cp;
|
||||||
bool _regions_freed;
|
bool _regions_freed;
|
||||||
|
|
||||||
|
bool should_compact(HeapRegion* hr);
|
||||||
virtual void prepare_for_compaction(HeapRegion* hr);
|
virtual void prepare_for_compaction(HeapRegion* hr);
|
||||||
void prepare_for_compaction_work(G1FullGCCompactionPoint* cp, HeapRegion* hr);
|
void prepare_for_compaction_work(G1FullGCCompactionPoint* cp, HeapRegion* hr);
|
||||||
void free_humongous_region(HeapRegion* hr);
|
void free_humongous_region(HeapRegion* hr);
|
||||||
|
@ -25,7 +25,10 @@
|
|||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc/g1/g1FullGCScope.hpp"
|
#include "gc/g1/g1FullGCScope.hpp"
|
||||||
|
|
||||||
G1FullGCScope::G1FullGCScope(G1MonitoringSupport* monitoring_support, bool explicit_gc, bool clear_soft) :
|
G1FullGCScope::G1FullGCScope(G1MonitoringSupport* monitoring_support,
|
||||||
|
bool explicit_gc,
|
||||||
|
bool clear_soft,
|
||||||
|
bool do_maximum_compaction) :
|
||||||
_rm(),
|
_rm(),
|
||||||
_explicit_gc(explicit_gc),
|
_explicit_gc(explicit_gc),
|
||||||
_g1h(G1CollectedHeap::heap()),
|
_g1h(G1CollectedHeap::heap()),
|
||||||
@ -37,7 +40,10 @@ G1FullGCScope::G1FullGCScope(G1MonitoringSupport* monitoring_support, bool expli
|
|||||||
_cpu_time(),
|
_cpu_time(),
|
||||||
_soft_refs(clear_soft, _g1h->soft_ref_policy()),
|
_soft_refs(clear_soft, _g1h->soft_ref_policy()),
|
||||||
_monitoring_scope(monitoring_support, true /* full_gc */, true /* all_memory_pools_affected */),
|
_monitoring_scope(monitoring_support, true /* full_gc */, true /* all_memory_pools_affected */),
|
||||||
_heap_transition(_g1h) {
|
_heap_transition(_g1h),
|
||||||
|
_region_compaction_threshold(do_maximum_compaction ?
|
||||||
|
HeapRegion::GrainWords :
|
||||||
|
(1 - MarkSweepDeadRatio / 100.0) * HeapRegion::GrainWords) {
|
||||||
_timer.register_gc_start();
|
_timer.register_gc_start();
|
||||||
_tracer.report_gc_start(_g1h->gc_cause(), _timer.gc_start());
|
_tracer.report_gc_start(_g1h->gc_cause(), _timer.gc_start());
|
||||||
_g1h->pre_full_gc_dump(&_timer);
|
_g1h->pre_full_gc_dump(&_timer);
|
||||||
@ -75,3 +81,7 @@ G1FullGCTracer* G1FullGCScope::tracer() {
|
|||||||
G1HeapTransition* G1FullGCScope::heap_transition() {
|
G1HeapTransition* G1FullGCScope::heap_transition() {
|
||||||
return &_heap_transition;
|
return &_heap_transition;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t G1FullGCScope::region_compaction_threshold() {
|
||||||
|
return _region_compaction_threshold;
|
||||||
|
}
|
||||||
|
@ -53,9 +53,13 @@ class G1FullGCScope : public StackObj {
|
|||||||
ClearedAllSoftRefs _soft_refs;
|
ClearedAllSoftRefs _soft_refs;
|
||||||
G1MonitoringScope _monitoring_scope;
|
G1MonitoringScope _monitoring_scope;
|
||||||
G1HeapTransition _heap_transition;
|
G1HeapTransition _heap_transition;
|
||||||
|
size_t _region_compaction_threshold;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
G1FullGCScope(G1MonitoringSupport* monitoring_support, bool explicit_gc, bool clear_soft);
|
G1FullGCScope(G1MonitoringSupport* monitoring_support,
|
||||||
|
bool explicit_gc,
|
||||||
|
bool clear_soft,
|
||||||
|
bool do_maximal_compaction);
|
||||||
~G1FullGCScope();
|
~G1FullGCScope();
|
||||||
|
|
||||||
bool is_explicit_gc();
|
bool is_explicit_gc();
|
||||||
@ -64,6 +68,7 @@ public:
|
|||||||
STWGCTimer* timer();
|
STWGCTimer* timer();
|
||||||
G1FullGCTracer* tracer();
|
G1FullGCTracer* tracer();
|
||||||
G1HeapTransition* heap_transition();
|
G1HeapTransition* heap_transition();
|
||||||
|
size_t region_compaction_threshold();
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_GC_G1_G1FULLGCSCOPE_HPP
|
#endif // SHARE_GC_G1_G1FULLGCSCOPE_HPP
|
||||||
|
@ -46,8 +46,8 @@ G1RegionMarkStatsCache::~G1RegionMarkStatsCache() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1RegionMarkStatsCache::add_live_words(oop obj) {
|
void G1RegionMarkStatsCache::add_live_words(oop obj) {
|
||||||
uint hr_index = G1CollectedHeap::heap()->addr_to_region(cast_from_oop<HeapWord*>(obj));
|
uint region_index = G1CollectedHeap::heap()->addr_to_region(cast_from_oop<HeapWord*>(obj));
|
||||||
add_live_words(hr_index, (size_t) obj->size());
|
add_live_words(region_index, (size_t) obj->size());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Evict all remaining statistics, returning cache hits and misses.
|
// Evict all remaining statistics, returning cache hits and misses.
|
||||||
|
@ -39,7 +39,9 @@
|
|||||||
void VM_G1CollectFull::doit() {
|
void VM_G1CollectFull::doit() {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
GCCauseSetter x(g1h, _gc_cause);
|
GCCauseSetter x(g1h, _gc_cause);
|
||||||
_gc_succeeded = g1h->do_full_collection(true /* explicit_gc */, false /* clear_all_soft_refs */);
|
_gc_succeeded = g1h->do_full_collection(true /* explicit_gc */,
|
||||||
|
false /* clear_all_soft_refs */,
|
||||||
|
false /* do_maximum_compaction */);
|
||||||
}
|
}
|
||||||
|
|
||||||
VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before,
|
VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before,
|
||||||
|
@ -171,7 +171,7 @@ public:
|
|||||||
// Update heap region that has been compacted to be consistent after Full GC.
|
// Update heap region that has been compacted to be consistent after Full GC.
|
||||||
void reset_compacted_after_full_gc();
|
void reset_compacted_after_full_gc();
|
||||||
// Update pinned heap region (not compacted) to be consistent after Full GC.
|
// Update pinned heap region (not compacted) to be consistent after Full GC.
|
||||||
void reset_pinned_after_full_gc();
|
void reset_not_compacted_after_full_gc();
|
||||||
|
|
||||||
// All allocated blocks are occupied by objects in a HeapRegion
|
// All allocated blocks are occupied by objects in a HeapRegion
|
||||||
bool block_is_obj(const HeapWord* p) const;
|
bool block_is_obj(const HeapWord* p) const;
|
||||||
|
@ -198,9 +198,8 @@ inline void HeapRegion::reset_compacted_after_full_gc() {
|
|||||||
reset_after_full_gc_common();
|
reset_after_full_gc_common();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void HeapRegion::reset_pinned_after_full_gc() {
|
inline void HeapRegion::reset_not_compacted_after_full_gc() {
|
||||||
assert(!is_free(), "should not have compacted free region");
|
assert(!is_free(), "should not have compacted free region");
|
||||||
assert(is_pinned(), "must be");
|
|
||||||
|
|
||||||
assert(compaction_top() == bottom(),
|
assert(compaction_top() == bottom(),
|
||||||
"region %u compaction_top " PTR_FORMAT " must not be different from bottom " PTR_FORMAT,
|
"region %u compaction_top " PTR_FORMAT " must not be different from bottom " PTR_FORMAT,
|
||||||
|
@ -671,7 +671,11 @@
|
|||||||
"Par compact uses a variable scale based on the density of the " \
|
"Par compact uses a variable scale based on the density of the " \
|
||||||
"generation and treats this as the maximum value when the heap " \
|
"generation and treats this as the maximum value when the heap " \
|
||||||
"is either completely full or completely empty. Par compact " \
|
"is either completely full or completely empty. Par compact " \
|
||||||
"also has a smaller default value; see arguments.cpp.") \
|
"also has a smaller default value; see arguments.cpp. " \
|
||||||
|
"G1 full gc treats this as an allowed garbage threshold to skip " \
|
||||||
|
"compaction of heap regions, i.e. if a heap region has less " \
|
||||||
|
"garbage than this value, then the region will not be compacted" \
|
||||||
|
"during G1 full GC.") \
|
||||||
range(0, 100) \
|
range(0, 100) \
|
||||||
\
|
\
|
||||||
product(uint, MarkSweepAlwaysCompactCount, 4, \
|
product(uint, MarkSweepAlwaysCompactCount, 4, \
|
||||||
|
87
test/hotspot/jtreg/gc/g1/TestG1SkipCompaction.java
Normal file
87
test/hotspot/jtreg/gc/g1/TestG1SkipCompaction.java
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2021, Huawei Technologies Co. Ltd. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation. Alibaba designates this
|
||||||
|
* particular file as subject to the "Classpath" exception as provided
|
||||||
|
* by Oracle in the LICENSE file that accompanied this code.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test TestG1SkipCompaction
|
||||||
|
* @summary Test for JDK-8262068 Improve G1 Full GC by skipping compaction
|
||||||
|
* for regions with high survival ratio.
|
||||||
|
* @requires vm.gc.G1
|
||||||
|
* @library /test/lib
|
||||||
|
* @modules java.base/jdk.internal.misc
|
||||||
|
* java.management
|
||||||
|
* @run main/othervm -Xms256m -Xmx256m TestG1SkipCompaction
|
||||||
|
*/
|
||||||
|
import java.util.regex.Matcher;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import jdk.test.lib.Platform;
|
||||||
|
import jdk.test.lib.process.OutputAnalyzer;
|
||||||
|
import jdk.test.lib.process.ProcessTools;
|
||||||
|
|
||||||
|
public class TestG1SkipCompaction {
|
||||||
|
public static void runTest() throws Exception {
|
||||||
|
final String[] arguments = {
|
||||||
|
"-XX:+UseG1GC",
|
||||||
|
"-XX:MarkSweepDeadRatio=3",
|
||||||
|
"-Xmx8m",
|
||||||
|
"-Xms8M",
|
||||||
|
"-Xlog:gc+phases=debug",
|
||||||
|
"-XX:G1HeapRegionSize=1m",
|
||||||
|
GCTest.class.getName()
|
||||||
|
};
|
||||||
|
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(arguments);
|
||||||
|
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||||
|
System.out.println(output.getStdout());
|
||||||
|
|
||||||
|
String pattern = ".*skip compaction region.*";
|
||||||
|
Pattern r = Pattern.compile(pattern);
|
||||||
|
Matcher m = r.matcher(output.getStdout());
|
||||||
|
|
||||||
|
if (!m.find()) {
|
||||||
|
throw new RuntimeException("Could not find any no moving region output");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
runTest();
|
||||||
|
}
|
||||||
|
|
||||||
|
static class GCTest {
|
||||||
|
public static List<char[]> memory;
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
memory = new ArrayList<>();
|
||||||
|
try {
|
||||||
|
while (true) {
|
||||||
|
memory.add(new char[8 * 1024]);
|
||||||
|
System.gc();
|
||||||
|
}
|
||||||
|
} catch (OutOfMemoryError e) {
|
||||||
|
memory = null;
|
||||||
|
System.gc();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user