8304393: Provide method to iterate over regions of humongous object in G1

Reviewed-by: iwalulya, ayang
This commit is contained in:
Thomas Schatzl 2023-03-20 16:25:53 +00:00
parent eb73fa833c
commit 4ed7350573
7 changed files with 45 additions and 52 deletions

@ -1084,9 +1084,10 @@ public:
inline HeapRegion* region_at(uint index) const;
inline HeapRegion* region_at_or_null(uint index) const;
// Return the next region (by index) that is part of the same
// humongous object that hr is part of.
inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
// Iterate over the regions that the humongous object starting at the given
// region and apply the given method with the signature f(HeapRegion*) on them.
template <typename Func>
void humongous_obj_regions_iterate(HeapRegion* start, const Func& f);
// Calculate the region index of the given address. Given address must be
// within the heap.

@ -107,8 +107,15 @@ inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at
// Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); }
inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
return _hrm.next_region_in_humongous(hr);
template <typename Func>
inline void G1CollectedHeap::humongous_obj_regions_iterate(HeapRegion* start, const Func& f) {
assert(start->is_starts_humongous(), "must be");
do {
HeapRegion* next = _hrm.next_region_in_humongous(start);
f(start);
start = next;
} while (start != nullptr);
}
inline uint G1CollectedHeap::addr_to_region(const void* addr) const {

@ -475,7 +475,8 @@ void G1ConcurrentMark::reset() {
_root_regions.reset();
}
void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
uint region_idx = r->hrm_index();
for (uint j = 0; j < _max_num_tasks; ++j) {
_tasks[j]->clear_mark_stats_cache(region_idx);
}
@ -483,21 +484,9 @@ void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
_region_mark_stats[region_idx].clear();
}
void G1ConcurrentMark::clear_statistics(HeapRegion* r) {
uint const region_idx = r->hrm_index();
if (r->is_humongous()) {
assert(r->is_starts_humongous(), "Got humongous continues region here");
uint const size_in_regions = (uint)_g1h->humongous_obj_size_in_regions(cast_to_oop(r->humongous_start_region()->bottom())->size());
for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
clear_statistics_in_region(j);
}
} else {
clear_statistics_in_region(region_idx);
}
}
void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
assert_at_safepoint();
assert(r->is_starts_humongous(), "Got humongous continues region here");
// Need to clear mark bit of the humongous object. Doing this unconditionally is fine.
mark_bitmap()->clear(r->bottom());
@ -507,7 +496,10 @@ void G1ConcurrentMark::humongous_object_eagerly_reclaimed(HeapRegion* r) {
}
// Clear any statistics about the region gathered so far.
clear_statistics(r);
_g1h->humongous_obj_regions_iterate(r,
[&] (HeapRegion* r) {
clear_statistics(r);
});
}
void G1ConcurrentMark::reset_marking_for_restart() {
@ -1124,10 +1116,7 @@ class G1UpdateRemSetTrackingBeforeRebuildTask : public WorkerTask {
// Distribute the given marked bytes across the humongous object starting
// with hr and note end of marking for these regions.
void distribute_marked_bytes(HeapRegion* hr, size_t marked_bytes) {
uint const region_idx = hr->hrm_index();
size_t const obj_size_in_words = cast_to_oop(hr->bottom())->size();
uint const num_regions_in_humongous = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words);
// "Distributing" zero words means that we only note end of marking for these
// regions.
@ -1135,18 +1124,19 @@ class G1UpdateRemSetTrackingBeforeRebuildTask : public WorkerTask {
"Marked bytes should either be 0 or the same as humongous object (%zu) but is %zu",
obj_size_in_words * HeapWordSize, marked_bytes);
for (uint i = region_idx; i < (region_idx + num_regions_in_humongous); i++) {
HeapRegion* const r = _g1h->region_at(i);
auto distribute_bytes = [&] (HeapRegion* r) {
size_t const bytes_to_add = MIN2(HeapRegion::GrainBytes, marked_bytes);
log_trace(gc, marking)("Adding %zu bytes to humongous region %u (%s)",
bytes_to_add, i, r->get_type_str());
bytes_to_add, r->hrm_index(), r->get_type_str());
add_marked_bytes_and_note_end(r, bytes_to_add);
marked_bytes -= bytes_to_add;
}
};
_g1h->humongous_obj_regions_iterate(hr, distribute_bytes);
assert(marked_bytes == 0,
"%zu bytes left after distributing space across %u regions",
marked_bytes, num_regions_in_humongous);
"%zu bytes left after distributing space across %zu regions",
marked_bytes, G1CollectedHeap::humongous_obj_size_in_regions(obj_size_in_words));
}
void update_marked_bytes(HeapRegion* hr) {
@ -1369,7 +1359,7 @@ class G1ReclaimEmptyRegionsTask : public WorkerTask {
_g1h->free_region(hr, _local_cleanup_list);
}
hr->clear_cardtable();
_g1h->concurrent_mark()->clear_statistics_in_region(hr->hrm_index());
_g1h->concurrent_mark()->clear_statistics(hr);
}
return false;

@ -382,10 +382,6 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
// After reclaiming empty regions, update heap sizes.
void compute_new_sizes();
// Clear statistics gathered during the concurrent cycle for the given region after
// it has been reclaimed.
void clear_statistics(HeapRegion* r);
// Resets all the marking data structures. Called when we have to restart
// marking or when marking completes (via set_non_marking_state below).
void reset_marking_for_restart();
@ -481,7 +477,7 @@ public:
// Clear statistics gathered during the concurrent cycle for the given region after
// it has been reclaimed.
void clear_statistics_in_region(uint region_idx);
void clear_statistics(HeapRegion* r);
// Notification for eagerly reclaimed regions to clean up.
void humongous_object_eagerly_reclaimed(HeapRegion* r);
// Manipulation of the global mark stack.

@ -138,11 +138,11 @@ void G1FullGCCompactionPoint::add_humongous(HeapRegion* hr) {
_collector->add_humongous_region(hr);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
do {
add(hr);
_collector->update_from_skip_compacting_to_compacting(hr->hrm_index());
hr = g1h->next_region_in_humongous(hr);
} while (hr != nullptr);
g1h->humongous_obj_regions_iterate(hr,
[&] (HeapRegion* r) {
add(r);
_collector->update_from_skip_compacting_to_compacting(r->hrm_index());
});
}
uint G1FullGCCompactionPoint::forward_humongous(HeapRegion* hr) {

@ -148,14 +148,12 @@ void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
// cycle as e.g. remembered set entries will always be added.
if (r->is_starts_humongous() && !g1h->is_potential_eager_reclaim_candidate(r)) {
// Handle HC regions with the HS region.
uint const size_in_regions = (uint)g1h->humongous_obj_size_in_regions(cast_to_oop(r->bottom())->size());
uint const region_idx = r->hrm_index();
for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
HeapRegion* const cur = g1h->region_at(j);
assert(!cur->is_continues_humongous() || cur->rem_set()->is_empty(),
"Continues humongous region %u remset should be empty", j);
cur->rem_set()->clear_locked(true /* only_cardset */);
}
g1h->humongous_obj_regions_iterate(r,
[&] (HeapRegion* r) {
assert(!r->is_continues_humongous() || r->rem_set()->is_empty(),
"Continues humongous region %u remset should be empty", r->hrm_index());
r->rem_set()->clear_locked(true /* only_cardset */);
});
}
G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
log_trace(gc, remset, tracking)("After rebuild region %u "

@ -213,15 +213,16 @@ public:
region_index,
BOOL_TO_STR(cm->is_marked_in_bitmap(obj)));
_humongous_objects_reclaimed++;
do {
HeapRegion* next = _g1h->next_region_in_humongous(r);
auto free_humongous_region = [&] (HeapRegion* r) {
_freed_bytes += r->used();
r->set_containing_set(nullptr);
_humongous_regions_reclaimed++;
_g1h->free_humongous_region(r, nullptr);
_g1h->hr_printer()->cleanup(r);
r = next;
} while (r != nullptr);
};
_g1h->humongous_obj_regions_iterate(r, free_humongous_region);
return false;
}