8191565: Last-ditch Full GC should also move humongous objects
Reviewed-by: tschatzl, sjohanss
This commit is contained in:
parent
f629152021
commit
96889bf3e4
src/hotspot/share
gc/g1
g1CollectedHeap.cppg1CollectedHeap.hppg1FullCollector.cppg1FullCollector.hppg1FullCollector.inline.hppg1FullGCCompactTask.cppg1FullGCCompactTask.hppg1FullGCCompactionPoint.cppg1FullGCCompactionPoint.hppg1FullGCHeapRegionAttr.hppg1FullGCPrepareTask.inline.hppheapRegion.cppheapRegion.inline.hpp
utilities
test/hotspot/jtreg/gc
@ -193,80 +193,56 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size,
|
||||
return res;
|
||||
}
|
||||
|
||||
HeapWord*
|
||||
G1CollectedHeap::humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
|
||||
uint num_regions,
|
||||
size_t word_size) {
|
||||
assert(first_hr != NULL, "pre-condition");
|
||||
assert(is_humongous(word_size), "word_size should be humongous");
|
||||
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
|
||||
|
||||
// Index of last region in the series.
|
||||
uint first = first_hr->hrm_index();
|
||||
uint last = first + num_regions - 1;
|
||||
|
||||
// We need to initialize the region(s) we just discovered. This is
|
||||
// a bit tricky given that it can happen concurrently with
|
||||
// refinement threads refining cards on these regions and
|
||||
// potentially wanting to refine the BOT as they are scanning
|
||||
// those cards (this can happen shortly after a cleanup; see CR
|
||||
// 6991377). So we have to set up the region(s) carefully and in
|
||||
// a specific order.
|
||||
|
||||
// The word size sum of all the regions we will allocate.
|
||||
size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
|
||||
void G1CollectedHeap::set_humongous_metadata(HeapRegion* first_hr,
|
||||
uint num_regions,
|
||||
size_t word_size,
|
||||
bool update_remsets) {
|
||||
// Calculate the new top of the humongous object.
|
||||
HeapWord* obj_top = first_hr->bottom() + word_size;
|
||||
// The word size sum of all the regions used
|
||||
size_t word_size_sum = num_regions * HeapRegion::GrainWords;
|
||||
assert(word_size <= word_size_sum, "sanity");
|
||||
|
||||
// The passed in hr will be the "starts humongous" region. The header
|
||||
// of the new object will be placed at the bottom of this region.
|
||||
HeapWord* new_obj = first_hr->bottom();
|
||||
// This will be the new top of the new object.
|
||||
HeapWord* obj_top = new_obj + word_size;
|
||||
|
||||
// First, we need to zero the header of the space that we will be
|
||||
// allocating. When we update top further down, some refinement
|
||||
// threads might try to scan the region. By zeroing the header we
|
||||
// ensure that any thread that will try to scan the region will
|
||||
// come across the zero klass word and bail out.
|
||||
//
|
||||
// NOTE: It would not have been correct to have used
|
||||
// CollectedHeap::fill_with_object() and make the space look like
|
||||
// an int array. The thread that is doing the allocation will
|
||||
// later update the object header to a potentially different array
|
||||
// type and, for a very short period of time, the klass and length
|
||||
// fields will be inconsistent. This could cause a refinement
|
||||
// thread to calculate the object size incorrectly.
|
||||
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
|
||||
|
||||
// Next, pad out the unused tail of the last region with filler
|
||||
// objects, for improved usage accounting.
|
||||
// How many words we use for filler objects.
|
||||
size_t word_fill_size = word_size_sum - word_size;
|
||||
|
||||
// How many words memory we "waste" which cannot hold a filler object.
|
||||
size_t words_not_fillable = 0;
|
||||
|
||||
if (word_fill_size >= min_fill_size()) {
|
||||
fill_with_objects(obj_top, word_fill_size);
|
||||
} else if (word_fill_size > 0) {
|
||||
// Pad out the unused tail of the last region with filler
|
||||
// objects, for improved usage accounting.
|
||||
|
||||
// How many words can we use for filler objects.
|
||||
size_t words_fillable = word_size_sum - word_size;
|
||||
|
||||
if (words_fillable >= G1CollectedHeap::min_fill_size()) {
|
||||
G1CollectedHeap::fill_with_objects(obj_top, words_fillable);
|
||||
} else {
|
||||
// We have space to fill, but we cannot fit an object there.
|
||||
words_not_fillable = word_fill_size;
|
||||
word_fill_size = 0;
|
||||
words_not_fillable = words_fillable;
|
||||
words_fillable = 0;
|
||||
}
|
||||
|
||||
// We will set up the first region as "starts humongous". This
|
||||
// will also update the BOT covering all the regions to reflect
|
||||
// that there is a single object that starts at the bottom of the
|
||||
// first region.
|
||||
first_hr->set_starts_humongous(obj_top, word_fill_size);
|
||||
_policy->remset_tracker()->update_at_allocate(first_hr);
|
||||
// Then, if there are any, we will set up the "continues
|
||||
// humongous" regions.
|
||||
HeapRegion* hr = NULL;
|
||||
first_hr->hr_clear(false /* clear_space */);
|
||||
first_hr->set_starts_humongous(obj_top, words_fillable);
|
||||
|
||||
if (update_remsets) {
|
||||
_policy->remset_tracker()->update_at_allocate(first_hr);
|
||||
}
|
||||
|
||||
// Indices of first and last regions in the series.
|
||||
uint first = first_hr->hrm_index();
|
||||
uint last = first + num_regions - 1;
|
||||
|
||||
HeapRegion* hr = nullptr;
|
||||
for (uint i = first + 1; i <= last; ++i) {
|
||||
hr = region_at(i);
|
||||
hr->hr_clear(false /* clear_space */);
|
||||
hr->set_continues_humongous(first_hr);
|
||||
_policy->remset_tracker()->update_at_allocate(hr);
|
||||
if (update_remsets) {
|
||||
_policy->remset_tracker()->update_at_allocate(hr);
|
||||
}
|
||||
}
|
||||
|
||||
// Up to this point no concurrent thread would have been able to
|
||||
@ -297,11 +273,57 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
|
||||
assert(words_not_fillable == 0 ||
|
||||
first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
|
||||
"Miscalculation in humongous allocation");
|
||||
}
|
||||
|
||||
increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
|
||||
HeapWord*
|
||||
G1CollectedHeap::humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
|
||||
uint num_regions,
|
||||
size_t word_size) {
|
||||
assert(first_hr != NULL, "pre-condition");
|
||||
assert(is_humongous(word_size), "word_size should be humongous");
|
||||
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
|
||||
|
||||
// Index of last region in the series.
|
||||
uint first = first_hr->hrm_index();
|
||||
uint last = first + num_regions - 1;
|
||||
|
||||
// We need to initialize the region(s) we just discovered. This is
|
||||
// a bit tricky given that it can happen concurrently with
|
||||
// refinement threads refining cards on these regions and
|
||||
// potentially wanting to refine the BOT as they are scanning
|
||||
// those cards (this can happen shortly after a cleanup; see CR
|
||||
// 6991377). So we have to set up the region(s) carefully and in
|
||||
// a specific order.
|
||||
|
||||
// The passed in hr will be the "starts humongous" region. The header
|
||||
// of the new object will be placed at the bottom of this region.
|
||||
HeapWord* new_obj = first_hr->bottom();
|
||||
|
||||
// First, we need to zero the header of the space that we will be
|
||||
// allocating. When we update top further down, some refinement
|
||||
// threads might try to scan the region. By zeroing the header we
|
||||
// ensure that any thread that will try to scan the region will
|
||||
// come across the zero klass word and bail out.
|
||||
//
|
||||
// NOTE: It would not have been correct to have used
|
||||
// CollectedHeap::fill_with_object() and make the space look like
|
||||
// an int array. The thread that is doing the allocation will
|
||||
// later update the object header to a potentially different array
|
||||
// type and, for a very short period of time, the klass and length
|
||||
// fields will be inconsistent. This could cause a refinement
|
||||
// thread to calculate the object size incorrectly.
|
||||
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
|
||||
|
||||
// Next, update the metadata for the regions.
|
||||
set_humongous_metadata(first_hr, num_regions, word_size, true);
|
||||
|
||||
HeapRegion* last_hr = region_at(last);
|
||||
size_t used = byte_size(first_hr->bottom(), last_hr->top());
|
||||
|
||||
increase_used(used);
|
||||
|
||||
for (uint i = first; i <= last; ++i) {
|
||||
hr = region_at(i);
|
||||
HeapRegion *hr = region_at(i);
|
||||
_humongous_set.add(hr);
|
||||
_hr_printer.alloc(hr);
|
||||
}
|
||||
|
@ -606,6 +606,11 @@ public:
|
||||
// Register the given region to be part of the collection set.
|
||||
inline void register_humongous_candidate_region_with_region_attr(uint index);
|
||||
|
||||
void set_humongous_metadata(HeapRegion* first_hr,
|
||||
uint num_regions,
|
||||
size_t word_size,
|
||||
bool update_remsets);
|
||||
|
||||
// We register a region with the fast "in collection set" test. We
|
||||
// simply set to true the array slot corresponding to this region.
|
||||
void register_young_region_with_region_attr(HeapRegion* r) {
|
||||
|
@ -40,7 +40,7 @@
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/preservedMarks.hpp"
|
||||
#include "gc/shared/preservedMarks.inline.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shared/verifyOption.hpp"
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
@ -119,12 +119,15 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap,
|
||||
_scope(heap->monitoring_support(), explicit_gc, clear_soft_refs, do_maximal_compaction, tracer),
|
||||
_num_workers(calc_active_workers()),
|
||||
_has_compaction_targets(false),
|
||||
_has_humongous(false),
|
||||
_oop_queue_set(_num_workers),
|
||||
_array_queue_set(_num_workers),
|
||||
_preserved_marks_set(true),
|
||||
_serial_compaction_point(this),
|
||||
_humongous_compaction_point(this),
|
||||
_is_alive(this, heap->concurrent_mark()->mark_bitmap()),
|
||||
_is_alive_mutator(heap->ref_processor_stw(), &_is_alive),
|
||||
_humongous_compaction_regions(8),
|
||||
_always_subject_to_discovery(),
|
||||
_is_subject_mutator(heap->ref_processor_stw(), &_always_subject_to_discovery),
|
||||
_region_attr_table() {
|
||||
@ -155,6 +158,7 @@ G1FullCollector::~G1FullCollector() {
|
||||
delete _markers[i];
|
||||
delete _compaction_points[i];
|
||||
}
|
||||
|
||||
FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
|
||||
FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
|
||||
FREE_C_HEAP_ARRAY(HeapWord*, _compaction_tops);
|
||||
@ -246,6 +250,8 @@ void G1FullCollector::complete_collection() {
|
||||
_heap->gc_epilogue(true);
|
||||
|
||||
_heap->verify_after_full_collection();
|
||||
|
||||
_heap->print_heap_after_full_collection();
|
||||
}
|
||||
|
||||
void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) {
|
||||
@ -343,6 +349,12 @@ void G1FullCollector::phase2_prepare_compaction() {
|
||||
// maximally compact the tail regions of the compaction queues serially.
|
||||
if (scope()->do_maximal_compaction() || !has_free_compaction_targets) {
|
||||
phase2c_prepare_serial_compaction();
|
||||
|
||||
if (scope()->do_maximal_compaction() &&
|
||||
has_humongous() &&
|
||||
serial_compaction_point()->has_regions()) {
|
||||
phase2d_prepare_humongous_compaction();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -418,6 +430,35 @@ void G1FullCollector::phase2c_prepare_serial_compaction() {
|
||||
serial_cp->update();
|
||||
}
|
||||
|
||||
void G1FullCollector::phase2d_prepare_humongous_compaction() {
|
||||
GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare humongous compaction", scope()->timer());
|
||||
G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
|
||||
assert(serial_cp->has_regions(), "Sanity!" );
|
||||
|
||||
uint last_serial_target = serial_cp->current_region()->hrm_index();
|
||||
uint region_index = last_serial_target + 1;
|
||||
uint max_reserved_regions = _heap->max_reserved_regions();
|
||||
|
||||
G1FullGCCompactionPoint* humongous_cp = humongous_compaction_point();
|
||||
|
||||
while (region_index < max_reserved_regions) {
|
||||
HeapRegion* hr = _heap->region_at_or_null(region_index);
|
||||
|
||||
if (hr == nullptr) {
|
||||
region_index++;
|
||||
continue;
|
||||
} else if (hr->is_starts_humongous()) {
|
||||
uint num_regions = humongous_cp->forward_humongous(hr);
|
||||
region_index += num_regions; // Skip over the continues humongous regions.
|
||||
continue;
|
||||
} else if (is_compaction_target(region_index)) {
|
||||
// Add the region to the humongous compaction point.
|
||||
humongous_cp->add(hr);
|
||||
}
|
||||
region_index++;
|
||||
}
|
||||
}
|
||||
|
||||
void G1FullCollector::phase3_adjust_pointers() {
|
||||
// Adjust the pointers to reflect the new locations
|
||||
GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
|
||||
@ -436,6 +477,11 @@ void G1FullCollector::phase4_do_compaction() {
|
||||
if (serial_compaction_point()->has_regions()) {
|
||||
task.serial_compaction();
|
||||
}
|
||||
|
||||
if (!_humongous_compaction_regions.is_empty()) {
|
||||
assert(scope()->do_maximal_compaction(), "Only compact humongous during maximal compaction");
|
||||
task.humongous_compaction();
|
||||
}
|
||||
}
|
||||
|
||||
void G1FullCollector::phase5_reset_metadata() {
|
||||
|
@ -76,15 +76,18 @@ class G1FullCollector : StackObj {
|
||||
G1FullGCScope _scope;
|
||||
uint _num_workers;
|
||||
bool _has_compaction_targets;
|
||||
bool _has_humongous;
|
||||
G1FullGCMarker** _markers;
|
||||
G1FullGCCompactionPoint** _compaction_points;
|
||||
OopQueueSet _oop_queue_set;
|
||||
ObjArrayTaskQueueSet _array_queue_set;
|
||||
PreservedMarksSet _preserved_marks_set;
|
||||
G1FullGCCompactionPoint _serial_compaction_point;
|
||||
G1FullGCCompactionPoint _humongous_compaction_point;
|
||||
G1IsAliveClosure _is_alive;
|
||||
ReferenceProcessorIsAliveMutator _is_alive_mutator;
|
||||
G1RegionMarkStats* _live_stats;
|
||||
GrowableArrayCHeap<HeapRegion*, mtGC> _humongous_compaction_regions;
|
||||
|
||||
static uint calc_active_workers();
|
||||
|
||||
@ -115,6 +118,7 @@ public:
|
||||
ObjArrayTaskQueueSet* array_queue_set() { return &_array_queue_set; }
|
||||
PreservedMarksSet* preserved_mark_set() { return &_preserved_marks_set; }
|
||||
G1FullGCCompactionPoint* serial_compaction_point() { return &_serial_compaction_point; }
|
||||
G1FullGCCompactionPoint* humongous_compaction_point() { return &_humongous_compaction_point; }
|
||||
G1CMBitMap* mark_bitmap();
|
||||
ReferenceProcessor* reference_processor();
|
||||
size_t live_words(uint region_index) const {
|
||||
@ -134,6 +138,7 @@ public:
|
||||
inline void set_free(uint region_idx);
|
||||
inline bool is_free(uint region_idx) const;
|
||||
inline void update_from_compacting_to_skip_compacting(uint region_idx);
|
||||
inline void update_from_skip_compacting_to_compacting(uint region_idx);
|
||||
|
||||
inline void set_compaction_top(HeapRegion* r, HeapWord* value);
|
||||
inline HeapWord* compaction_top(HeapRegion* r) const;
|
||||
@ -141,8 +146,14 @@ public:
|
||||
inline void set_has_compaction_targets();
|
||||
inline bool has_compaction_targets() const;
|
||||
|
||||
inline void add_humongous_region(HeapRegion* hr);
|
||||
inline GrowableArrayCHeap<HeapRegion*, mtGC>& humongous_compaction_regions();
|
||||
|
||||
uint truncate_parallel_cps();
|
||||
|
||||
inline void set_has_humongous();
|
||||
inline bool has_humongous();
|
||||
|
||||
private:
|
||||
void phase1_mark_live_objects();
|
||||
void phase2_prepare_compaction();
|
||||
@ -150,6 +161,7 @@ private:
|
||||
void phase2a_determine_worklists();
|
||||
bool phase2b_forward_oops();
|
||||
void phase2c_prepare_serial_compaction();
|
||||
void phase2d_prepare_humongous_compaction();
|
||||
|
||||
void phase3_adjust_pointers();
|
||||
void phase4_do_compaction();
|
||||
|
@ -61,6 +61,11 @@ void G1FullCollector::update_from_compacting_to_skip_compacting(uint region_idx)
|
||||
_region_attr_table.set_skip_compacting(region_idx);
|
||||
}
|
||||
|
||||
void G1FullCollector::update_from_skip_compacting_to_compacting(uint region_idx) {
|
||||
DEBUG_ONLY(_region_attr_table.verify_is_skip_compacting(region_idx);)
|
||||
_region_attr_table.set_compacting(region_idx);
|
||||
}
|
||||
|
||||
void G1FullCollector::set_compaction_top(HeapRegion* r, HeapWord* value) {
|
||||
Atomic::store(&_compaction_tops[r->hrm_index()], value);
|
||||
}
|
||||
@ -79,5 +84,23 @@ bool G1FullCollector::has_compaction_targets() const {
|
||||
return _has_compaction_targets;
|
||||
}
|
||||
|
||||
void G1FullCollector::set_has_humongous() {
|
||||
if (!_has_humongous) {
|
||||
_has_humongous = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool G1FullCollector::has_humongous() {
|
||||
return _has_humongous;
|
||||
}
|
||||
|
||||
void G1FullCollector::add_humongous_region(HeapRegion* hr) {
|
||||
_humongous_compaction_regions.append(hr);
|
||||
}
|
||||
|
||||
GrowableArrayCHeap<HeapRegion*, mtGC>& G1FullCollector::humongous_compaction_regions() {
|
||||
return _humongous_compaction_regions;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_G1FULLCOLLECTOR_INLINE_HPP
|
||||
|
||||
|
@ -42,16 +42,7 @@ void G1FullGCCompactTask::G1CompactRegionClosure::clear_in_bitmap(oop obj) {
|
||||
size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) {
|
||||
size_t size = obj->size();
|
||||
if (obj->is_forwarded()) {
|
||||
HeapWord* destination = cast_from_oop<HeapWord*>(obj->forwardee());
|
||||
|
||||
// copy object and reinit its mark
|
||||
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
|
||||
assert(obj_addr != destination, "everything in this pass should be moving");
|
||||
Copy::aligned_conjoint_words(obj_addr, destination, size);
|
||||
|
||||
// There is no need to transform stack chunks - marking already did that.
|
||||
cast_to_oop(destination)->init_mark();
|
||||
assert(cast_to_oop(destination)->klass() != NULL, "should have a class");
|
||||
G1FullGCCompactTask::copy_object_to_new_location(obj);
|
||||
}
|
||||
|
||||
// Clear the mark for the compacted object to allow reuse of the
|
||||
@ -60,6 +51,21 @@ size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) {
|
||||
return size;
|
||||
}
|
||||
|
||||
void G1FullGCCompactTask::copy_object_to_new_location(oop obj) {
|
||||
assert(obj->is_forwarded(), "Sanity!");
|
||||
assert(obj->forwardee() != obj, "Object must have a new location");
|
||||
|
||||
size_t size = obj->size();
|
||||
// Copy object and reinit its mark.
|
||||
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
|
||||
HeapWord* destination = cast_from_oop<HeapWord*>(obj->forwardee());
|
||||
Copy::aligned_conjoint_words(obj_addr, destination, size);
|
||||
|
||||
// There is no need to transform stack chunks - marking already did that.
|
||||
cast_to_oop(destination)->init_mark();
|
||||
assert(cast_to_oop(destination)->klass() != nullptr, "should have a class");
|
||||
}
|
||||
|
||||
void G1FullGCCompactTask::compact_region(HeapRegion* hr) {
|
||||
assert(!hr->is_pinned(), "Should be no pinned region in compaction queue");
|
||||
assert(!hr->is_humongous(), "Should be no humongous regions in compaction queue");
|
||||
@ -98,3 +104,49 @@ void G1FullGCCompactTask::serial_compaction() {
|
||||
compact_region(*it);
|
||||
}
|
||||
}
|
||||
|
||||
void G1FullGCCompactTask::humongous_compaction() {
|
||||
GCTraceTime(Debug, gc, phases) tm("Phase 4: Humonguous Compaction", collector()->scope()->timer());
|
||||
|
||||
for (HeapRegion* hr : collector()->humongous_compaction_regions()) {
|
||||
assert(collector()->is_compaction_target(hr->hrm_index()), "Sanity");
|
||||
compact_humongous_obj(hr);
|
||||
}
|
||||
}
|
||||
|
||||
void G1FullGCCompactTask::compact_humongous_obj(HeapRegion* src_hr) {
|
||||
assert(src_hr->is_starts_humongous(), "Should be start region of the humongous object");
|
||||
|
||||
oop obj = cast_to_oop(src_hr->bottom());
|
||||
size_t word_size = obj->size();
|
||||
|
||||
uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(word_size);
|
||||
HeapWord* destination = cast_from_oop<HeapWord*>(obj->forwardee());
|
||||
|
||||
assert(collector()->mark_bitmap()->is_marked(obj), "Should only compact marked objects");
|
||||
collector()->mark_bitmap()->clear(obj);
|
||||
|
||||
copy_object_to_new_location(obj);
|
||||
|
||||
uint dest_start_idx = _g1h->addr_to_region(destination);
|
||||
// Update the metadata for the destination regions.
|
||||
_g1h->set_humongous_metadata(_g1h->region_at(dest_start_idx), num_regions, word_size, false);
|
||||
|
||||
// Free the source regions that do not overlap with the destination regions.
|
||||
uint src_start_idx = src_hr->hrm_index();
|
||||
free_non_overlapping_regions(src_start_idx, dest_start_idx, num_regions);
|
||||
}
|
||||
|
||||
void G1FullGCCompactTask::free_non_overlapping_regions(uint src_start_idx, uint dest_start_idx, uint num_regions) {
|
||||
uint dest_end_idx = dest_start_idx + num_regions -1;
|
||||
uint src_end_idx = src_start_idx + num_regions - 1;
|
||||
|
||||
uint non_overlapping_start = dest_end_idx < src_start_idx ?
|
||||
src_start_idx :
|
||||
dest_end_idx + 1;
|
||||
|
||||
for (uint i = non_overlapping_start; i <= src_end_idx; ++i) {
|
||||
HeapRegion* hr = _g1h->region_at(i);
|
||||
_g1h->free_humongous_region(hr, nullptr);
|
||||
}
|
||||
}
|
||||
|
@ -38,16 +38,24 @@ class G1FullCollector;
|
||||
class G1FullGCCompactTask : public G1FullGCTask {
|
||||
G1FullCollector* _collector;
|
||||
HeapRegionClaimer _claimer;
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
void compact_region(HeapRegion* hr);
|
||||
void compact_humongous_obj(HeapRegion* hr);
|
||||
void free_non_overlapping_regions(uint src_start_idx, uint dest_start_idx, uint num_regions);
|
||||
|
||||
static void copy_object_to_new_location(oop obj);
|
||||
|
||||
public:
|
||||
G1FullGCCompactTask(G1FullCollector* collector) :
|
||||
G1FullGCTask("G1 Compact Task", collector),
|
||||
_collector(collector),
|
||||
_claimer(collector->workers()) { }
|
||||
_claimer(collector->workers()),
|
||||
_g1h(G1CollectedHeap::heap()) { }
|
||||
|
||||
void work(uint worker_id);
|
||||
void serial_compaction();
|
||||
void humongous_compaction();
|
||||
|
||||
class G1CompactRegionClosure : public StackObj {
|
||||
G1CMBitMap* _bitmap;
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/g1/g1FullCollector.inline.hpp"
|
||||
#include "gc/g1/g1FullGCCompactionPoint.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/shared/preservedMarks.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
@ -130,3 +131,84 @@ void G1FullGCCompactionPoint::remove_at_or_above(uint bottom) {
|
||||
assert(start_index >= 0, "Should have at least one region");
|
||||
_compaction_regions->trunc_to(start_index);
|
||||
}
|
||||
|
||||
void G1FullGCCompactionPoint::add_humongous(HeapRegion* hr) {
|
||||
assert(hr->is_starts_humongous(), "Sanity!");
|
||||
|
||||
_collector->add_humongous_region(hr);
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
do {
|
||||
add(hr);
|
||||
_collector->update_from_skip_compacting_to_compacting(hr->hrm_index());
|
||||
hr = g1h->next_region_in_humongous(hr);
|
||||
} while (hr != nullptr);
|
||||
}
|
||||
|
||||
uint G1FullGCCompactionPoint::forward_humongous(HeapRegion* hr) {
|
||||
assert(hr->is_starts_humongous(), "Sanity!");
|
||||
|
||||
oop obj = cast_to_oop(hr->bottom());
|
||||
size_t obj_size = obj->size();
|
||||
uint num_regions = (uint)G1CollectedHeap::humongous_obj_size_in_regions(obj_size);
|
||||
|
||||
if (!has_regions()) {
|
||||
return num_regions;
|
||||
}
|
||||
|
||||
// Find contiguous compaction target regions for the humongous object.
|
||||
uint range_begin = find_contiguous_before(hr, num_regions);
|
||||
|
||||
if (range_begin == UINT_MAX) {
|
||||
// No contiguous compaction target regions found, so the object cannot be moved.
|
||||
return num_regions;
|
||||
}
|
||||
|
||||
// Preserve the mark for the humongous object as the region was initially not compacting.
|
||||
_collector->marker(0)->preserved_stack()->push_if_necessary(obj, obj->mark());
|
||||
|
||||
HeapRegion* dest_hr = _compaction_regions->at(range_begin);
|
||||
obj->forward_to(cast_to_oop(dest_hr->bottom()));
|
||||
assert(obj->is_forwarded(), "Object must be forwarded!");
|
||||
|
||||
// Add the humongous object regions to the compaction point.
|
||||
add_humongous(hr);
|
||||
|
||||
// Remove covered regions from compaction target candidates.
|
||||
_compaction_regions->remove_range(range_begin, (range_begin + num_regions));
|
||||
|
||||
return num_regions;
|
||||
}
|
||||
|
||||
uint G1FullGCCompactionPoint::find_contiguous_before(HeapRegion* hr, uint num_regions) {
|
||||
assert(num_regions > 0, "Sanity!");
|
||||
assert(has_regions(), "Sanity!");
|
||||
|
||||
if (num_regions == 1) {
|
||||
// If only one region, return the first region.
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint contiguous_region_count = 1;
|
||||
|
||||
uint range_end = 1;
|
||||
uint range_limit = (uint)_compaction_regions->length();
|
||||
|
||||
for (; range_end < range_limit; range_end++) {
|
||||
if (contiguous_region_count == num_regions) {
|
||||
break;
|
||||
}
|
||||
// Check if the current region and the previous region are contiguous.
|
||||
bool regions_are_contiguous = (_compaction_regions->at(range_end)->hrm_index() - _compaction_regions->at(range_end - 1)->hrm_index()) == 1;
|
||||
contiguous_region_count = regions_are_contiguous ? contiguous_region_count + 1 : 1;
|
||||
}
|
||||
|
||||
if (contiguous_region_count < num_regions &&
|
||||
hr->hrm_index() - _compaction_regions->at(range_end-1)->hrm_index() != 1) {
|
||||
// We reached the end but the final region is not contiguous with the target region;
|
||||
// no contiguous regions to move to.
|
||||
return UINT_MAX;
|
||||
}
|
||||
// Return the index of the first region in the range of contiguous regions.
|
||||
return range_end - contiguous_region_count;
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/pair.hpp"
|
||||
|
||||
class G1FullCollector;
|
||||
class HeapRegion;
|
||||
@ -43,6 +44,7 @@ class G1FullGCCompactionPoint : public CHeapObj<mtGC> {
|
||||
void initialize_values();
|
||||
void switch_region();
|
||||
HeapRegion* next_region();
|
||||
uint find_contiguous_before(HeapRegion* hr, uint num_regions);
|
||||
|
||||
public:
|
||||
G1FullGCCompactionPoint(G1FullCollector* collector);
|
||||
@ -53,7 +55,9 @@ public:
|
||||
void initialize(HeapRegion* hr);
|
||||
void update();
|
||||
void forward(oop object, size_t size);
|
||||
uint forward_humongous(HeapRegion* hr);
|
||||
void add(HeapRegion* hr);
|
||||
void add_humongous(HeapRegion* hr);
|
||||
|
||||
void remove_at_or_above(uint bottom);
|
||||
HeapRegion* current_region();
|
||||
|
@ -84,6 +84,8 @@ public:
|
||||
|
||||
void verify_is_compacting(uint idx) { assert(get_by_index(idx) == Compacting, "invariant"); }
|
||||
|
||||
void verify_is_skip_compacting(uint idx) { assert(get_by_index(idx) == SkipCompacting, "invariant"); }
|
||||
|
||||
void verify_is_invalid(uint idx) { assert(get_by_index(idx) == Invalid, "invariant"); }
|
||||
};
|
||||
|
||||
|
@ -89,6 +89,8 @@ inline bool G1DetermineCompactionQueueClosure::do_heap_region(HeapRegion* hr) {
|
||||
bool is_empty = !_collector->mark_bitmap()->is_marked(obj);
|
||||
if (is_empty) {
|
||||
free_pinned_region<true>(hr);
|
||||
} else {
|
||||
_collector->set_has_humongous();
|
||||
}
|
||||
} else if (hr->is_open_archive()) {
|
||||
bool is_empty = _collector->live_words(hr->hrm_index()) == 0;
|
||||
|
@ -116,9 +116,7 @@ void HeapRegion::unlink_from_list() {
|
||||
}
|
||||
|
||||
void HeapRegion::hr_clear(bool clear_space) {
|
||||
assert(_humongous_start_region == NULL,
|
||||
"we should have already filtered out humongous regions");
|
||||
|
||||
set_top(bottom());
|
||||
clear_young_index_in_cset();
|
||||
clear_index_in_opt_cset();
|
||||
uninstall_surv_rate_group();
|
||||
|
@ -181,8 +181,6 @@ inline size_t HeapRegion::block_size(const HeapWord* p, HeapWord* const pb) cons
|
||||
}
|
||||
|
||||
inline void HeapRegion::reset_compacted_after_full_gc(HeapWord* new_top) {
|
||||
assert(!is_pinned(), "must be");
|
||||
|
||||
set_top(new_top);
|
||||
// After a compaction the mark bitmap in a non-pinned regions is invalid.
|
||||
// But all objects are live, we get this by setting TAMS to bottom.
|
||||
|
@ -254,10 +254,18 @@ public:
|
||||
|
||||
// Remove all elements up to the index (exclusive). The order is preserved.
|
||||
void remove_till(int idx) {
|
||||
for (int i = 0, j = idx; j < length(); i++, j++) {
|
||||
remove_range(0, idx);
|
||||
}
|
||||
|
||||
// Remove all elements in the range [start - end). The order is preserved.
|
||||
void remove_range(int start, int end) {
|
||||
assert(0 <= start, "illegal index");
|
||||
assert(start < end && end <= _len, "erase called with invalid range");
|
||||
|
||||
for (int i = start, j = end; j < length(); i++, j++) {
|
||||
at_put(i, at(j));
|
||||
}
|
||||
trunc_to(length() - idx);
|
||||
trunc_to(length() - (end - start));
|
||||
}
|
||||
|
||||
// The order is changed.
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -164,6 +165,18 @@
|
||||
* TestAllocHumongousFragment
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test id=g1
|
||||
* @summary Make sure G1 can recover from humongous allocation fragmentation
|
||||
* @key randomness
|
||||
* @requires vm.gc.G1
|
||||
* @library /test/lib
|
||||
*
|
||||
* @run main/othervm -Xlog:gc+region=trace -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g
|
||||
* -XX:VerifyGCType=full -XX:+VerifyDuringGC -XX:+VerifyAfterGC
|
||||
* TestAllocHumongousFragment
|
||||
*/
|
||||
|
||||
import java.util.*;
|
||||
import jdk.test.lib.Utils;
|
||||
|
Loading…
x
Reference in New Issue
Block a user