8265681: G1: general cleanup for G1FullGCHeapRegionAttr

Reviewed-by: tschatzl, sjohanss
This commit is contained in:
Albert Mingkun Yang 2021-04-26 06:32:34 +00:00
parent 578a0b3c3d
commit 56fbef0fc0
10 changed files with 66 additions and 48 deletions

@ -160,7 +160,7 @@ public:
bool do_heap_region(HeapRegion* hr) {
G1CollectedHeap::heap()->prepare_region_for_full_compaction(hr);
_collector->update_attribute_table(hr);
_collector->before_marking_update_attribute_table(hr);
return false;
}
};
@ -229,16 +229,17 @@ void G1FullCollector::complete_collection() {
_heap->print_heap_after_full_collection(scope()->heap_transition());
}
void G1FullCollector::update_attribute_table(HeapRegion* hr, bool force_not_compacted) {
void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) {
if (hr->is_free()) {
_region_attr_table.set_invalid(hr->hrm_index());
// Set as Invalid by default.
_region_attr_table.verify_is_invalid(hr->hrm_index());
} else if (hr->is_closed_archive()) {
_region_attr_table.set_skip_marking(hr->hrm_index());
} else if (hr->is_pinned() || force_not_compacted) {
_region_attr_table.set_not_compacted(hr->hrm_index());
} else if (hr->is_pinned()) {
_region_attr_table.set_skip_compacting(hr->hrm_index());
} else {
// Everything else is processed normally.
_region_attr_table.set_compacted(hr->hrm_index());
// Everything else should be compacted.
_region_attr_table.set_compacting(hr->hrm_index());
}
}

@ -103,12 +103,15 @@ public:
return _live_stats[region_index]._live_words;
}
void update_attribute_table(HeapRegion* hr, bool force_not_compacted = false);
void before_marking_update_attribute_table(HeapRegion* hr);
inline bool is_compacted(oop obj) const;
inline bool is_compacted_or_skip_marking(uint region_index) const;
inline bool is_compacting(oop obj) const;
inline bool is_skip_compacting(uint region_index) const;
inline bool is_skip_marking(oop obj) const;
inline void set_invalid(uint region_idx);
inline void update_from_compacting_to_skip_compacting(uint region_idx);
private:
void phase1_mark_live_objects();
void phase2_prepare_compaction();

@ -30,17 +30,26 @@
#include "oops/oopsHierarchy.hpp"
bool G1FullCollector::is_compacted(oop obj) const {
return _region_attr_table.is_compacted(cast_from_oop<HeapWord*>(obj));
bool G1FullCollector::is_compacting(oop obj) const {
return _region_attr_table.is_compacting(cast_from_oop<HeapWord *>(obj));
}
bool G1FullCollector::is_compacted_or_skip_marking(uint region_index) const {
return _region_attr_table.is_compacted_or_skip_marking(region_index);
bool G1FullCollector::is_skip_compacting(uint region_index) const {
return _region_attr_table.is_skip_compacting(region_index);
}
bool G1FullCollector::is_skip_marking(oop obj) const {
return _region_attr_table.is_skip_marking(cast_from_oop<HeapWord*>(obj));
}
void G1FullCollector::set_invalid(uint region_idx) {
_region_attr_table.set_invalid(region_idx);
}
void G1FullCollector::update_from_compacting_to_skip_compacting(uint region_idx) {
_region_attr_table.verify_is_compacting(region_idx);
_region_attr_table.set_skip_compacting(region_idx);
}
#endif // SHARE_GC_G1_G1FULLCOLLECTOR_INLINE_HPP

@ -35,24 +35,25 @@
#include "oops/oop.inline.hpp"
#include "utilities/ticks.hpp"
// Do work for all not-compacted regions.
class G1ResetNotCompactedClosure : public HeapRegionClosure {
// Do work for all skip-compacting regions.
class G1ResetSkipCompactingClosure : public HeapRegionClosure {
G1FullCollector* _collector;
public:
G1ResetNotCompactedClosure(G1FullCollector* collector) : _collector(collector) { }
G1ResetSkipCompactingClosure(G1FullCollector* collector) : _collector(collector) { }
bool do_heap_region(HeapRegion* r) {
uint region_index = r->hrm_index();
// There is nothing to do for compacted or skip marking regions.
if (_collector->is_compacted_or_skip_marking(region_index)) {
// Only for skip-compaction regions; early return otherwise.
if (!_collector->is_skip_compacting(region_index)) {
return false;
}
assert(_collector->live_words(region_index) > _collector->scope()->region_compaction_threshold() ||
!r->is_starts_humongous() ||
_collector->mark_bitmap()->is_marked(cast_to_oop(r->bottom())),
"must be, otherwise reclaimed earlier");
r->reset_not_compacted_after_full_gc();
!r->is_starts_humongous() ||
_collector->mark_bitmap()->is_marked(cast_to_oop(r->bottom())),
"must be, otherwise reclaimed earlier");
r->reset_skip_compacting_after_full_gc();
return false;
}
};
@ -97,7 +98,7 @@ void G1FullGCCompactTask::work(uint worker_id) {
compact_region(*it);
}
G1ResetNotCompactedClosure hc(collector());
G1ResetSkipCompactingClosure hc(collector());
G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&hc, &_claimer, worker_id);
log_task("Compaction task", worker_id, start);
}

@ -28,18 +28,18 @@
#include "gc/g1/g1BiasedArray.hpp"
// This table is used to store attribute values of all HeapRegions that need
// fast access during the full collection. In particular some parts of the region
// type information is encoded in these per-region bytes.
// Value encoding has been specifically chosen to make required accesses fast.
// In particular, the table collects whether a region should be compacted, not
// compacted, or marking (liveness analysis) completely skipped.
// fast access during the full collection. In particular some parts of the
// region type information is encoded in these per-region bytes. Value encoding
// has been specifically chosen to make required accesses fast. In particular,
// the table specifies whether a Full GC cycle should be compacting, skip
// compacting, or skip marking (liveness analysis) a region.
// Reasons for not compacting a region:
// (1) the HeapRegion itself has been pinned at the start of Full GC.
// (2) the occupancy of the region is too high to be considered eligible for compaction.
// The only examples for skipping marking for regions are Closed Archive regions.
class G1FullGCHeapRegionAttr : public G1BiasedMappedArray<uint8_t> {
static const uint8_t Compacted = 0; // Region will be compacted.
static const uint8_t NotCompacted = 1; // Region should not be compacted, but otherwise handled as usual.
static const uint8_t Compacting = 0; // Region will be compacted.
static const uint8_t SkipCompacting = 1; // Region should not be compacted, but otherwise handled as usual.
static const uint8_t SkipMarking = 2; // Region contents are not even marked through, but contain live objects.
static const uint8_t Invalid = 255;
@ -53,23 +53,28 @@ protected:
public:
void set_invalid(uint idx) { set_by_index(idx, Invalid); }
void set_compacted(uint idx) { set_by_index(idx, Compacted); }
void set_compacting(uint idx) { set_by_index(idx, Compacting); }
void set_skip_marking(uint idx) { set_by_index(idx, SkipMarking); }
void set_not_compacted(uint idx) { set_by_index(idx, NotCompacted); }
void set_skip_compacting(uint idx) { set_by_index(idx, SkipCompacting); }
bool is_skip_marking(HeapWord* obj) const {
assert(!is_invalid(obj), "not initialized yet");
return get_by_address(obj) == SkipMarking;
}
bool is_compacted(HeapWord* obj) const {
bool is_compacting(HeapWord* obj) const {
assert(!is_invalid(obj), "not initialized yet");
return get_by_address(obj) == Compacted;
return get_by_address(obj) == Compacting;
}
bool is_compacted_or_skip_marking(uint idx) const {
return get_by_index(idx) != NotCompacted;
bool is_skip_compacting(uint idx) const {
return get_by_index(idx) == SkipCompacting;
}
void verify_is_compacting(uint idx) { assert(get_by_index(idx) == Compacting, "invariant"); }
void verify_is_invalid(uint idx) { assert(get_by_index(idx) == Invalid, "invariant"); }
};
#endif // SHARE_GC_G1_G1FULLGCHEAPREGIONATTR_HPP

@ -57,7 +57,7 @@ inline bool G1FullGCMarker::mark_object(oop obj) {
if (obj->mark_must_be_preserved(mark) &&
// It is not necessary to preserve marks for objects in regions we do not
// compact because we do not change their headers (i.e. forward them).
_collector->is_compacted(obj)) {
_collector->is_compacting(obj)) {
preserved_stack()->push(obj, mark);
}

@ -70,8 +70,8 @@ template <class T> inline void G1AdjustClosure::adjust_pointer(T* p) {
oop obj = CompressedOops::decode_not_null(heap_oop);
assert(Universe::heap()->is_in(obj), "should be in heap");
if (!_collector->is_compacted(obj)) {
// We never forward objects in non-compacted regions so there is no need to
if (!_collector->is_compacting(obj)) {
// We never forward objects in non-compacting regions so there is no need to
// process them further.
return;
}

@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
#include "gc/g1/g1FullCollector.hpp"
#include "gc/g1/g1FullCollector.inline.hpp"
#include "gc/g1/g1FullGCCompactionPoint.hpp"
#include "gc/g1/g1FullGCMarker.hpp"
#include "gc/g1/g1FullGCOopClosures.inline.hpp"
@ -48,6 +48,7 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::free_pinned_region(HeapReg
_g1h->free_region(hr, nullptr);
}
prepare_for_compaction(hr);
_collector->set_invalid(hr->hrm_index());
}
bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
@ -76,9 +77,8 @@ bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion*
assert(MarkSweepDeadRatio > 0,
"only skip compaction for other regions when MarkSweepDeadRatio > 0");
// Force the high live ratio region as not-compacting to skip these regions in the
// later compaction step.
force_not_compacted = true;
// Too many live objects; skip compacting it.
_collector->update_from_compacting_to_skip_compacting(hr->hrm_index());
if (hr->is_young()) {
// G1 updates the BOT for old region contents incrementally, but young regions
// lack BOT information for performance reasons.
@ -93,7 +93,6 @@ bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion*
// Reset data structures not valid after Full GC.
reset_region_metadata(hr);
_collector->update_attribute_table(hr, force_not_compacted);
return false;
}

@ -170,8 +170,8 @@ public:
// Update heap region that has been compacted to be consistent after Full GC.
void reset_compacted_after_full_gc();
// Update pinned heap region (not compacted) to be consistent after Full GC.
void reset_not_compacted_after_full_gc();
// Update skip-compacting heap region to be consistent after Full GC.
void reset_skip_compacting_after_full_gc();
// All allocated blocks are occupied by objects in a HeapRegion
bool block_is_obj(const HeapWord* p) const;

@ -205,7 +205,7 @@ inline void HeapRegion::reset_compacted_after_full_gc() {
reset_after_full_gc_common();
}
inline void HeapRegion::reset_not_compacted_after_full_gc() {
inline void HeapRegion::reset_skip_compacting_after_full_gc() {
assert(!is_free(), "should not have compacted free region");
assert(compaction_top() == bottom(),