8200545: Improve filter for enqueued deferred cards

Reviewed-by: kbarrett, sangheki
This commit is contained in:
Thomas Schatzl 2019-05-14 15:36:26 +02:00
parent 2a48a29c33
commit 3d149df158
24 changed files with 422 additions and 401 deletions

@ -27,7 +27,7 @@
#include "gc/g1/heapRegion.hpp" #include "gc/g1/heapRegion.hpp"
#include "gc/g1/g1EvacStats.hpp" #include "gc/g1/g1EvacStats.hpp"
#include "gc/g1/g1InCSetState.hpp" #include "gc/g1/g1HeapRegionAttr.hpp"
class G1CollectedHeap; class G1CollectedHeap;
@ -249,14 +249,14 @@ public:
class G1GCAllocRegion : public G1AllocRegion { class G1GCAllocRegion : public G1AllocRegion {
protected: protected:
G1EvacStats* _stats; G1EvacStats* _stats;
InCSetState::in_cset_state_t _purpose; G1HeapRegionAttr::region_type_t _purpose;
virtual HeapRegion* allocate_new_region(size_t word_size, bool force); virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
virtual size_t retire(bool fill_up); virtual size_t retire(bool fill_up);
G1GCAllocRegion(const char* name, bool bot_updates, G1EvacStats* stats, InCSetState::in_cset_state_t purpose) G1GCAllocRegion(const char* name, bool bot_updates, G1EvacStats* stats, G1HeapRegionAttr::region_type_t purpose)
: G1AllocRegion(name, bot_updates), _stats(stats), _purpose(purpose) { : G1AllocRegion(name, bot_updates), _stats(stats), _purpose(purpose) {
assert(stats != NULL, "Must pass non-NULL PLAB statistics"); assert(stats != NULL, "Must pass non-NULL PLAB statistics");
} }
@ -265,13 +265,13 @@ protected:
class SurvivorGCAllocRegion : public G1GCAllocRegion { class SurvivorGCAllocRegion : public G1GCAllocRegion {
public: public:
SurvivorGCAllocRegion(G1EvacStats* stats) SurvivorGCAllocRegion(G1EvacStats* stats)
: G1GCAllocRegion("Survivor GC Alloc Region", false /* bot_updates */, stats, InCSetState::Young) { } : G1GCAllocRegion("Survivor GC Alloc Region", false /* bot_updates */, stats, G1HeapRegionAttr::Young) { }
}; };
class OldGCAllocRegion : public G1GCAllocRegion { class OldGCAllocRegion : public G1GCAllocRegion {
public: public:
OldGCAllocRegion(G1EvacStats* stats) OldGCAllocRegion(G1EvacStats* stats)
: G1GCAllocRegion("Old GC Alloc Region", true /* bot_updates */, stats, InCSetState::Old) { } : G1GCAllocRegion("Old GC Alloc Region", true /* bot_updates */, stats, G1HeapRegionAttr::Old) { }
// This specialization of release() makes sure that the last card that has // This specialization of release() makes sure that the last card that has
// been allocated into has been completely filled by a dummy object. This // been allocated into has been completely filled by a dummy object. This

@ -39,8 +39,8 @@ G1Allocator::G1Allocator(G1CollectedHeap* heap) :
_survivor_is_full(false), _survivor_is_full(false),
_old_is_full(false), _old_is_full(false),
_mutator_alloc_region(), _mutator_alloc_region(),
_survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)), _survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)),
_old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)), _old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)),
_retained_old_gc_alloc_region(NULL) { _retained_old_gc_alloc_region(NULL) {
} }
@ -161,7 +161,7 @@ size_t G1Allocator::used_in_alloc_regions() {
} }
HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
size_t word_size) { size_t word_size) {
size_t temp = 0; size_t temp = 0;
HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp); HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp);
@ -171,14 +171,14 @@ HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
return result; return result;
} }
HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest, HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
size_t min_word_size, size_t min_word_size,
size_t desired_word_size, size_t desired_word_size,
size_t* actual_word_size) { size_t* actual_word_size) {
switch (dest.value()) { switch (dest.type()) {
case InCSetState::Young: case G1HeapRegionAttr::Young:
return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size); return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
case InCSetState::Old: case G1HeapRegionAttr::Old:
return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size); return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
@ -246,22 +246,22 @@ uint G1PLABAllocator::calc_survivor_alignment_bytes() {
G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) : G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
_g1h(G1CollectedHeap::heap()), _g1h(G1CollectedHeap::heap()),
_allocator(allocator), _allocator(allocator),
_surviving_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Young)), _surviving_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Young)),
_tenured_alloc_buffer(_g1h->desired_plab_sz(InCSetState::Old)), _tenured_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Old)),
_survivor_alignment_bytes(calc_survivor_alignment_bytes()) { _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
for (uint state = 0; state < InCSetState::Num; state++) { for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
_direct_allocated[state] = 0; _direct_allocated[state] = 0;
_alloc_buffers[state] = NULL; _alloc_buffers[state] = NULL;
} }
_alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer; _alloc_buffers[G1HeapRegionAttr::Young] = &_surviving_alloc_buffer;
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer; _alloc_buffers[G1HeapRegionAttr::Old] = &_tenured_alloc_buffer;
} }
bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const { bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct); return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct);
} }
HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest, HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
size_t word_sz, size_t word_sz,
bool* plab_refill_failed) { bool* plab_refill_failed) {
size_t plab_word_size = _g1h->desired_plab_sz(dest); size_t plab_word_size = _g1h->desired_plab_sz(dest);
@ -300,17 +300,17 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
// Try direct allocation. // Try direct allocation.
HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz); HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz);
if (result != NULL) { if (result != NULL) {
_direct_allocated[dest.value()] += word_sz; _direct_allocated[dest.type()] += word_sz;
} }
return result; return result;
} }
void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz) { void G1PLABAllocator::undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz) {
alloc_buffer(dest)->undo_allocation(obj, word_sz); alloc_buffer(dest)->undo_allocation(obj, word_sz);
} }
void G1PLABAllocator::flush_and_retire_stats() { void G1PLABAllocator::flush_and_retire_stats() {
for (uint state = 0; state < InCSetState::Num; state++) { for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
PLAB* const buf = _alloc_buffers[state]; PLAB* const buf = _alloc_buffers[state];
if (buf != NULL) { if (buf != NULL) {
G1EvacStats* stats = _g1h->alloc_buffer_stats(state); G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
@ -323,7 +323,7 @@ void G1PLABAllocator::flush_and_retire_stats() {
size_t G1PLABAllocator::waste() const { size_t G1PLABAllocator::waste() const {
size_t result = 0; size_t result = 0;
for (uint state = 0; state < InCSetState::Num; state++) { for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
PLAB * const buf = _alloc_buffers[state]; PLAB * const buf = _alloc_buffers[state];
if (buf != NULL) { if (buf != NULL) {
result += buf->waste(); result += buf->waste();
@ -334,7 +334,7 @@ size_t G1PLABAllocator::waste() const {
size_t G1PLABAllocator::undo_waste() const { size_t G1PLABAllocator::undo_waste() const {
size_t result = 0; size_t result = 0;
for (uint state = 0; state < InCSetState::Num; state++) { for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
PLAB * const buf = _alloc_buffers[state]; PLAB * const buf = _alloc_buffers[state];
if (buf != NULL) { if (buf != NULL) {
result += buf->undo_waste(); result += buf->undo_waste();

@ -26,7 +26,7 @@
#define SHARE_GC_G1_G1ALLOCATOR_HPP #define SHARE_GC_G1_G1ALLOCATOR_HPP
#include "gc/g1/g1AllocRegion.hpp" #include "gc/g1/g1AllocRegion.hpp"
#include "gc/g1/g1InCSetState.hpp" #include "gc/g1/g1HeapRegionAttr.hpp"
#include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectedHeap.hpp"
#include "gc/shared/plab.hpp" #include "gc/shared/plab.hpp"
@ -112,10 +112,10 @@ public:
// allocation region, either by picking one or expanding the // allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block // heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region. // may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(InCSetState dest, HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest,
size_t word_size); size_t word_size);
HeapWord* par_allocate_during_gc(InCSetState dest, HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest,
size_t min_word_size, size_t min_word_size,
size_t desired_word_size, size_t desired_word_size,
size_t* actual_word_size); size_t* actual_word_size);
@ -132,7 +132,7 @@ private:
PLAB _surviving_alloc_buffer; PLAB _surviving_alloc_buffer;
PLAB _tenured_alloc_buffer; PLAB _tenured_alloc_buffer;
PLAB* _alloc_buffers[InCSetState::Num]; PLAB* _alloc_buffers[G1HeapRegionAttr::Num];
// The survivor alignment in effect in bytes. // The survivor alignment in effect in bytes.
// == 0 : don't align survivors // == 0 : don't align survivors
@ -142,10 +142,10 @@ private:
const uint _survivor_alignment_bytes; const uint _survivor_alignment_bytes;
// Number of words allocated directly (not counting PLAB allocation). // Number of words allocated directly (not counting PLAB allocation).
size_t _direct_allocated[InCSetState::Num]; size_t _direct_allocated[G1HeapRegionAttr::Num];
void flush_and_retire_stats(); void flush_and_retire_stats();
inline PLAB* alloc_buffer(InCSetState dest); inline PLAB* alloc_buffer(G1HeapRegionAttr dest);
// Calculate the survivor space object alignment in bytes. Returns that or 0 if // Calculate the survivor space object alignment in bytes. Returns that or 0 if
// there are no restrictions on survivor alignment. // there are no restrictions on survivor alignment.
@ -162,20 +162,20 @@ public:
// allocating a new PLAB. Returns the address of the allocated memory, NULL if // allocating a new PLAB. Returns the address of the allocated memory, NULL if
// not successful. Plab_refill_failed indicates whether an attempt to refill the // not successful. Plab_refill_failed indicates whether an attempt to refill the
// PLAB failed or not. // PLAB failed or not.
HeapWord* allocate_direct_or_new_plab(InCSetState dest, HeapWord* allocate_direct_or_new_plab(G1HeapRegionAttr dest,
size_t word_sz, size_t word_sz,
bool* plab_refill_failed); bool* plab_refill_failed);
// Allocate word_sz words in the PLAB of dest. Returns the address of the // Allocate word_sz words in the PLAB of dest. Returns the address of the
// allocated memory, NULL if not successful. // allocated memory, NULL if not successful.
inline HeapWord* plab_allocate(InCSetState dest, inline HeapWord* plab_allocate(G1HeapRegionAttr dest,
size_t word_sz); size_t word_sz);
inline HeapWord* allocate(InCSetState dest, inline HeapWord* allocate(G1HeapRegionAttr dest,
size_t word_sz, size_t word_sz,
bool* refill_failed); bool* refill_failed);
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz); void undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz);
}; };
// G1ArchiveRegionMap is a boolean array used to mark G1 regions as // G1ArchiveRegionMap is a boolean array used to mark G1 regions as

@ -63,15 +63,15 @@ inline HeapWord* G1Allocator::attempt_allocation_force(size_t word_size) {
return mutator_alloc_region()->attempt_allocation_force(word_size); return mutator_alloc_region()->attempt_allocation_force(word_size);
} }
inline PLAB* G1PLABAllocator::alloc_buffer(InCSetState dest) { inline PLAB* G1PLABAllocator::alloc_buffer(G1HeapRegionAttr dest) {
assert(dest.is_valid(), assert(dest.is_valid(),
"Allocation buffer index out of bounds: " CSETSTATE_FORMAT, dest.value()); "Allocation buffer index out of bounds: %s", dest.get_type_str());
assert(_alloc_buffers[dest.value()] != NULL, assert(_alloc_buffers[dest.type()] != NULL,
"Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()); "Allocation buffer is NULL: %s", dest.get_type_str());
return _alloc_buffers[dest.value()]; return _alloc_buffers[dest.type()];
} }
inline HeapWord* G1PLABAllocator::plab_allocate(InCSetState dest, inline HeapWord* G1PLABAllocator::plab_allocate(G1HeapRegionAttr dest,
size_t word_sz) { size_t word_sz) {
PLAB* buffer = alloc_buffer(dest); PLAB* buffer = alloc_buffer(dest);
if (_survivor_alignment_bytes == 0 || !dest.is_young()) { if (_survivor_alignment_bytes == 0 || !dest.is_young()) {
@ -81,7 +81,7 @@ inline HeapWord* G1PLABAllocator::plab_allocate(InCSetState dest,
} }
} }
inline HeapWord* G1PLABAllocator::allocate(InCSetState dest, inline HeapWord* G1PLABAllocator::allocate(G1HeapRegionAttr dest,
size_t word_sz, size_t word_sz,
bool* refill_failed) { bool* refill_failed) {
HeapWord* const obj = plab_allocate(dest, word_sz); HeapWord* const obj = plab_allocate(dest, word_sz);

@ -128,6 +128,11 @@ public:
return biased_base()[biased_index]; return biased_base()[biased_index];
} }
T* get_ref_by_index(uintptr_t index) const {
verify_index(index);
return &this->base()[index];
}
// Return the index of the element of the given array that covers the given // Return the index of the element of the given array that covers the given
// word in the heap. // word in the heap.
idx_t get_index_by_address(HeapWord* value) const { idx_t get_index_by_address(HeapWord* value) const {

@ -1536,7 +1536,7 @@ G1CollectedHeap::G1CollectedHeap() :
_ref_processor_cm(NULL), _ref_processor_cm(NULL),
_is_alive_closure_cm(this), _is_alive_closure_cm(this),
_is_subject_to_discovery_cm(this), _is_subject_to_discovery_cm(this),
_in_cset_fast_test() { _region_attr() {
_verifier = new G1HeapVerifier(this); _verifier = new G1HeapVerifier(this);
@ -1772,7 +1772,7 @@ jint G1CollectedHeap::initialize() {
HeapWord* end = _hrm->reserved().end(); HeapWord* end = _hrm->reserved().end();
size_t granularity = HeapRegion::GrainBytes; size_t granularity = HeapRegion::GrainBytes;
_in_cset_fast_test.initialize(start, end, granularity); _region_attr.initialize(start, end, granularity);
_humongous_reclaim_candidates.initialize(start, end, granularity); _humongous_reclaim_candidates.initialize(start, end, granularity);
} }
@ -2626,7 +2626,7 @@ bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const
G1EagerReclaimHumongousObjects && rem_set->is_empty(); G1EagerReclaimHumongousObjects && rem_set->is_empty();
} }
class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure { class RegisterRegionsWithRegionAttrTableClosure : public HeapRegionClosure {
private: private:
size_t _total_humongous; size_t _total_humongous;
size_t _candidate_humongous; size_t _candidate_humongous;
@ -2690,24 +2690,26 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
} }
public: public:
RegisterHumongousWithInCSetFastTestClosure() RegisterRegionsWithRegionAttrTableClosure()
: _total_humongous(0), : _total_humongous(0),
_candidate_humongous(0), _candidate_humongous(0),
_dcq(&G1BarrierSet::dirty_card_queue_set()) { _dcq(&G1BarrierSet::dirty_card_queue_set()) {
} }
virtual bool do_heap_region(HeapRegion* r) { virtual bool do_heap_region(HeapRegion* r) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
if (!r->is_starts_humongous()) { if (!r->is_starts_humongous()) {
g1h->register_region_with_region_attr(r);
return false; return false;
} }
G1CollectedHeap* g1h = G1CollectedHeap::heap();
bool is_candidate = humongous_region_is_candidate(g1h, r); bool is_candidate = humongous_region_is_candidate(g1h, r);
uint rindex = r->hrm_index(); uint rindex = r->hrm_index();
g1h->set_humongous_reclaim_candidate(rindex, is_candidate); g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
if (is_candidate) { if (is_candidate) {
_candidate_humongous++; _candidate_humongous++;
g1h->register_humongous_region_with_cset(rindex); g1h->register_humongous_region_with_region_attr(rindex);
// Is_candidate already filters out humongous object with large remembered sets. // Is_candidate already filters out humongous object with large remembered sets.
// If we have a humongous object with a few remembered sets, we simply flush these // If we have a humongous object with a few remembered sets, we simply flush these
// remembered set entries into the DCQS. That will result in automatic // remembered set entries into the DCQS. That will result in automatic
@ -2743,8 +2745,14 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
// collecting remembered set entries for humongous regions that were not // collecting remembered set entries for humongous regions that were not
// reclaimed. // reclaimed.
r->rem_set()->set_state_complete(); r->rem_set()->set_state_complete();
#ifdef ASSERT
G1HeapRegionAttr region_attr = g1h->region_attr(oop(r->bottom()));
assert(region_attr.needs_remset_update(), "must be");
#endif
} }
assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty."); assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
} else {
g1h->register_region_with_region_attr(r);
} }
_total_humongous++; _total_humongous++;
@ -2757,19 +2765,13 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
void flush_rem_set_entries() { _dcq.flush(); } void flush_rem_set_entries() { _dcq.flush(); }
}; };
void G1CollectedHeap::register_humongous_regions_with_cset() { void G1CollectedHeap::register_regions_with_region_attr() {
if (!G1EagerReclaimHumongousObjects) { Ticks start = Ticks::now();
phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
return;
}
double time = os::elapsed_counter();
// Collect reclaim candidate information and register candidates with cset. RegisterRegionsWithRegionAttrTableClosure cl;
RegisterHumongousWithInCSetFastTestClosure cl;
heap_region_iterate(&cl); heap_region_iterate(&cl);
time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0; phase_times()->record_register_regions((Ticks::now() - start).seconds() * 1000.0,
phase_times()->record_fast_reclaim_humongous_stats(time,
cl.total_humongous(), cl.total_humongous(),
cl.candidate_humongous()); cl.candidate_humongous());
_has_humongous_reclaim_candidates = cl.candidate_humongous() > 0; _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
@ -2861,7 +2863,7 @@ void G1CollectedHeap::start_new_collection_set() {
collection_set()->start_incremental_building(); collection_set()->start_incremental_building();
clear_cset_fast_test(); clear_region_attr();
guarantee(_eden.length() == 0, "eden should have been cleared"); guarantee(_eden.length() == 0, "eden should have been cleared");
policy()->transfer_survivors_to_cset(survivor()); policy()->transfer_survivors_to_cset(survivor());
@ -3302,17 +3304,17 @@ public:
oop obj = *p; oop obj = *p;
assert(obj != NULL, "the caller should have filtered out NULL values"); assert(obj != NULL, "the caller should have filtered out NULL values");
const InCSetState cset_state =_g1h->in_cset_state(obj); const G1HeapRegionAttr region_attr =_g1h->region_attr(obj);
if (!cset_state.is_in_cset_or_humongous()) { if (!region_attr.is_in_cset_or_humongous()) {
return; return;
} }
if (cset_state.is_in_cset()) { if (region_attr.is_in_cset()) {
assert( obj->is_forwarded(), "invariant" ); assert( obj->is_forwarded(), "invariant" );
*p = obj->forwardee(); *p = obj->forwardee();
} else { } else {
assert(!obj->is_forwarded(), "invariant" ); assert(!obj->is_forwarded(), "invariant" );
assert(cset_state.is_humongous(), assert(region_attr.is_humongous(),
"Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()); "Only allowed G1HeapRegionAttr state is IsHumongous, but is %d", region_attr.type());
_g1h->set_humongous_is_live(obj); _g1h->set_humongous_is_live(obj);
} }
} }
@ -3572,7 +3574,7 @@ void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_i
// Initialize the GC alloc regions. // Initialize the GC alloc regions.
_allocator->init_gc_alloc_regions(evacuation_info); _allocator->init_gc_alloc_regions(evacuation_info);
register_humongous_regions_with_cset(); register_regions_with_region_attr();
assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table."); assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
rem_set()->prepare_for_oops_into_collection_set_do(); rem_set()->prepare_for_oops_into_collection_set_do();
@ -3970,7 +3972,7 @@ private:
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index()); assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
g1h->clear_in_cset(r); g1h->clear_region_attr(r);
if (r->is_young()) { if (r->is_young()) {
assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(), assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
@ -4031,7 +4033,7 @@ private:
G1Policy* policy = g1h->policy(); G1Policy* policy = g1h->policy();
policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc); policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words); g1h->alloc_buffer_stats(G1HeapRegionAttr::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
} }
}; };
@ -4365,7 +4367,7 @@ class G1AbandonCollectionSetClosure : public HeapRegionClosure {
public: public:
virtual bool do_heap_region(HeapRegion* r) { virtual bool do_heap_region(HeapRegion* r) {
assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index()); assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
G1CollectedHeap::heap()->clear_in_cset(r); G1CollectedHeap::heap()->clear_region_attr(r);
r->set_young_index_in_cset(-1); r->set_young_index_in_cset(-1);
return false; return false;
} }
@ -4582,7 +4584,7 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
// Methods for the GC alloc regions // Methods for the GC alloc regions
bool G1CollectedHeap::has_more_regions(InCSetState dest) { bool G1CollectedHeap::has_more_regions(G1HeapRegionAttr dest) {
if (dest.is_old()) { if (dest.is_old()) {
return true; return true;
} else { } else {
@ -4590,7 +4592,7 @@ bool G1CollectedHeap::has_more_regions(InCSetState dest) {
} }
} }
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) { HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest) {
assert(FreeList_lock->owned_by_self(), "pre-condition"); assert(FreeList_lock->owned_by_self(), "pre-condition");
if (!has_more_regions(dest)) { if (!has_more_regions(dest)) {
@ -4618,6 +4620,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState d
_verifier->check_bitmaps("Old Region Allocation", new_alloc_region); _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
} }
_policy->remset_tracker()->update_at_allocate(new_alloc_region); _policy->remset_tracker()->update_at_allocate(new_alloc_region);
register_region_with_region_attr(new_alloc_region);
_hr_printer.alloc(new_alloc_region); _hr_printer.alloc(new_alloc_region);
return new_alloc_region; return new_alloc_region;
} }
@ -4626,12 +4629,12 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState d
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes, size_t allocated_bytes,
InCSetState dest) { G1HeapRegionAttr dest) {
policy()->record_bytes_copied_during_gc(allocated_bytes); policy()->record_bytes_copied_during_gc(allocated_bytes);
if (dest.is_old()) { if (dest.is_old()) {
old_set_add(alloc_region); old_set_add(alloc_region);
} else { } else {
assert(dest.is_young(), "Retiring alloc region should be young(%d)", dest.value()); assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type());
_survivor.add_used_bytes(allocated_bytes); _survivor.add_used_bytes(allocated_bytes);
} }

@ -40,7 +40,7 @@
#include "gc/g1/g1HeapTransition.hpp" #include "gc/g1/g1HeapTransition.hpp"
#include "gc/g1/g1HeapVerifier.hpp" #include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1HRPrinter.hpp" #include "gc/g1/g1HRPrinter.hpp"
#include "gc/g1/g1InCSetState.hpp" #include "gc/g1/g1HeapRegionAttr.hpp"
#include "gc/g1/g1MonitoringSupport.hpp" #include "gc/g1/g1MonitoringSupport.hpp"
#include "gc/g1/g1SurvivorRegions.hpp" #include "gc/g1/g1SurvivorRegions.hpp"
#include "gc/g1/g1YCTypes.hpp" #include "gc/g1/g1YCTypes.hpp"
@ -464,10 +464,10 @@ private:
size_t allocated_bytes); size_t allocated_bytes);
// For GC alloc regions. // For GC alloc regions.
bool has_more_regions(InCSetState dest); bool has_more_regions(G1HeapRegionAttr dest);
HeapRegion* new_gc_alloc_region(size_t word_size, InCSetState dest); HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest);
void retire_gc_alloc_region(HeapRegion* alloc_region, void retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes, InCSetState dest); size_t allocated_bytes, G1HeapRegionAttr dest);
// - if explicit_gc is true, the GC is for a System.gc() etc, // - if explicit_gc is true, the GC is for a System.gc() etc,
// otherwise it's for a failed allocation. // otherwise it's for a failed allocation.
@ -551,10 +551,10 @@ public:
bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL); bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
// Returns the PLAB statistics for a given destination. // Returns the PLAB statistics for a given destination.
inline G1EvacStats* alloc_buffer_stats(InCSetState dest); inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
// Determines PLAB size for a given destination. // Determines PLAB size for a given destination.
inline size_t desired_plab_sz(InCSetState dest); inline size_t desired_plab_sz(G1HeapRegionAttr dest);
// Do anything common to GC's. // Do anything common to GC's.
void gc_prologue(bool full); void gc_prologue(bool full);
@ -573,27 +573,24 @@ public:
inline void set_humongous_is_live(oop obj); inline void set_humongous_is_live(oop obj);
// Register the given region to be part of the collection set. // Register the given region to be part of the collection set.
inline void register_humongous_region_with_cset(uint index); inline void register_humongous_region_with_region_attr(uint index);
// Register regions with humongous objects (actually on the start region) in // Update region attributes table with information about all regions.
// the in_cset_fast_test table. void register_regions_with_region_attr();
void register_humongous_regions_with_cset();
// We register a region with the fast "in collection set" test. We // We register a region with the fast "in collection set" test. We
// simply set to true the array slot corresponding to this region. // simply set to true the array slot corresponding to this region.
void register_young_region_with_cset(HeapRegion* r) { void register_young_region_with_region_attr(HeapRegion* r) {
_in_cset_fast_test.set_in_young(r->hrm_index()); _region_attr.set_in_young(r->hrm_index());
} }
void register_old_region_with_cset(HeapRegion* r) { inline void register_region_with_region_attr(HeapRegion* r);
_in_cset_fast_test.set_in_old(r->hrm_index()); inline void register_old_region_with_region_attr(HeapRegion* r);
} inline void register_optional_region_with_region_attr(HeapRegion* r);
void register_optional_region_with_cset(HeapRegion* r) {
_in_cset_fast_test.set_optional(r->hrm_index()); void clear_region_attr(const HeapRegion* hr) {
} _region_attr.clear(hr);
void clear_in_cset(const HeapRegion* hr) {
_in_cset_fast_test.clear(hr);
} }
void clear_cset_fast_test() { void clear_region_attr() {
_in_cset_fast_test.clear(); _region_attr.clear();
} }
bool is_user_requested_concurrent_full_gc(GCCause::Cause cause); bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
@ -1110,11 +1107,11 @@ public:
// This array is used for a quick test on whether a reference points into // This array is used for a quick test on whether a reference points into
// the collection set or not. Each of the array's elements denotes whether the // the collection set or not. Each of the array's elements denotes whether the
// corresponding region is in the collection set or not. // corresponding region is in the collection set or not.
G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test; G1HeapRegionAttrBiasedMappedArray _region_attr;
public: public:
inline InCSetState in_cset_state(const oop obj); inline G1HeapRegionAttr region_attr(const oop obj);
// Return "TRUE" iff the given object address is in the reserved // Return "TRUE" iff the given object address is in the reserved
// region of g1. // region of g1.

@ -30,6 +30,7 @@
#include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1Policy.hpp" #include "gc/g1/g1Policy.hpp"
#include "gc/g1/heapRegionManager.inline.hpp" #include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.inline.hpp" #include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/shared/taskqueue.inline.hpp" #include "gc/shared/taskqueue.inline.hpp"
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
@ -38,11 +39,11 @@ G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
return _policy->phase_times(); return _policy->phase_times();
} }
G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) { G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
switch (dest.value()) { switch (dest.type()) {
case InCSetState::Young: case G1HeapRegionAttr::Young:
return &_survivor_evac_stats; return &_survivor_evac_stats;
case InCSetState::Old: case G1HeapRegionAttr::Old:
return &_old_evac_stats; return &_old_evac_stats;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
@ -50,7 +51,7 @@ G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
} }
} }
size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) { size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers()); size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
// Prevent humongous PLAB sizes for two reasons: // Prevent humongous PLAB sizes for two reasons:
// * PLABs are allocated using a similar paths as oops, but should // * PLABs are allocated using a similar paths as oops, but should
@ -150,23 +151,35 @@ inline bool G1CollectedHeap::is_in_cset(oop obj) {
} }
inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) { inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
return _in_cset_fast_test.is_in_cset(addr); return _region_attr.is_in_cset(addr);
} }
bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) { bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
return _in_cset_fast_test.is_in_cset(hr); return _region_attr.is_in_cset(hr);
} }
bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) { bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj); return _region_attr.is_in_cset_or_humongous((HeapWord*)obj);
} }
InCSetState G1CollectedHeap::in_cset_state(const oop obj) { G1HeapRegionAttr G1CollectedHeap::region_attr(const oop obj) {
return _in_cset_fast_test.at((HeapWord*)obj); return _region_attr.at((HeapWord*)obj);
} }
void G1CollectedHeap::register_humongous_region_with_cset(uint index) { void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
_in_cset_fast_test.set_humongous(index); _region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
}
void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
_region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
}
void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
_region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
}
void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
_region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -294,7 +307,7 @@ inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
// thread (i.e. within the VM thread). // thread (i.e. within the VM thread).
if (is_humongous_reclaim_candidate(region)) { if (is_humongous_reclaim_candidate(region)) {
set_humongous_reclaim_candidate(region, false); set_humongous_reclaim_candidate(region, false);
_in_cset_fast_test.clear_humongous(region); _region_attr.clear_humongous(region);
} }
} }

@ -121,7 +121,7 @@ void G1CollectionSet::add_old_region(HeapRegion* hr) {
assert(hr->is_old(), "the region should be old"); assert(hr->is_old(), "the region should be old");
assert(!hr->in_collection_set(), "should not already be in the collection set"); assert(!hr->in_collection_set(), "should not already be in the collection set");
_g1h->register_old_region_with_cset(hr); _g1h->register_old_region_with_region_attr(hr);
_collection_set_regions[_collection_set_cur_length++] = hr->hrm_index(); _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size."); assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
@ -137,7 +137,7 @@ void G1CollectionSet::add_optional_region(HeapRegion* hr) {
assert(hr->is_old(), "the region should be old"); assert(hr->is_old(), "the region should be old");
assert(!hr->in_collection_set(), "should not already be in the CSet"); assert(!hr->in_collection_set(), "should not already be in the CSet");
_g1h->register_optional_region_with_cset(hr); _g1h->register_optional_region_with_region_attr(hr);
hr->set_index_in_opt_cset(_num_optional_regions++); hr->set_index_in_opt_cset(_num_optional_regions++);
} }
@ -316,7 +316,7 @@ void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
} }
assert(!hr->in_collection_set(), "invariant"); assert(!hr->in_collection_set(), "invariant");
_g1h->register_young_region_with_cset(hr); _g1h->register_young_region_with_region_attr(hr);
} }
void G1CollectionSet::add_survivor_regions(HeapRegion* hr) { void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
@ -492,7 +492,7 @@ void G1CollectionSet::move_candidates_to_collection_set(uint num_old_candidate_r
HeapRegion* r = candidates()->at(candidate_idx + i); HeapRegion* r = candidates()->at(candidate_idx + i);
// This potentially optional candidate region is going to be an actual collection // This potentially optional candidate region is going to be an actual collection
// set region. Clear cset marker. // set region. Clear cset marker.
_g1h->clear_in_cset(r); _g1h->clear_region_attr(r);
add_old_region(r); add_old_region(r);
} }
candidates()->remove(num_old_candidate_regions); candidates()->remove(num_old_candidate_regions);
@ -526,7 +526,7 @@ void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* p
for (uint i = 0; i < _num_optional_regions; i++) { for (uint i = 0; i < _num_optional_regions; i++) {
HeapRegion* r = candidates()->at(candidates()->cur_idx() + i); HeapRegion* r = candidates()->at(candidates()->cur_idx() + i);
pss->record_unused_optional_region(r); pss->record_unused_optional_region(r);
_g1h->clear_in_cset(r); _g1h->clear_region_attr(r);
r->clear_index_in_opt_cset(); r->clear_index_in_opt_cset();
} }
free_optional_regions(); free_optional_regions();

@ -170,7 +170,7 @@ void G1GCPhaseTimes::reset() {
_recorded_total_free_cset_time_ms = 0.0; _recorded_total_free_cset_time_ms = 0.0;
_recorded_serial_free_cset_time_ms = 0.0; _recorded_serial_free_cset_time_ms = 0.0;
_cur_fast_reclaim_humongous_time_ms = 0.0; _cur_fast_reclaim_humongous_time_ms = 0.0;
_cur_fast_reclaim_humongous_register_time_ms = 0.0; _cur_region_register_time = 0.0;
_cur_fast_reclaim_humongous_total = 0; _cur_fast_reclaim_humongous_total = 0;
_cur_fast_reclaim_humongous_candidates = 0; _cur_fast_reclaim_humongous_candidates = 0;
_cur_fast_reclaim_humongous_reclaimed = 0; _cur_fast_reclaim_humongous_reclaimed = 0;
@ -364,7 +364,7 @@ double G1GCPhaseTimes::print_pre_evacuate_collection_set() const {
const double sum_ms = _root_region_scan_wait_time_ms + const double sum_ms = _root_region_scan_wait_time_ms +
_recorded_young_cset_choice_time_ms + _recorded_young_cset_choice_time_ms +
_recorded_non_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms +
_cur_fast_reclaim_humongous_register_time_ms + _cur_region_register_time +
_recorded_clear_claimed_marks_time_ms; _recorded_clear_claimed_marks_time_ms;
info_time("Pre Evacuate Collection Set", sum_ms); info_time("Pre Evacuate Collection Set", sum_ms);
@ -374,8 +374,8 @@ double G1GCPhaseTimes::print_pre_evacuate_collection_set() const {
} }
debug_time("Prepare TLABs", _cur_prepare_tlab_time_ms); debug_time("Prepare TLABs", _cur_prepare_tlab_time_ms);
debug_time("Choose Collection Set", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms)); debug_time("Choose Collection Set", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms));
debug_time("Region Register", _cur_region_register_time);
if (G1EagerReclaimHumongousObjects) { if (G1EagerReclaimHumongousObjects) {
debug_time("Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
trace_count("Humongous Total", _cur_fast_reclaim_humongous_total); trace_count("Humongous Total", _cur_fast_reclaim_humongous_total);
trace_count("Humongous Candidate", _cur_fast_reclaim_humongous_candidates); trace_count("Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
} }

@ -176,8 +176,9 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _recorded_serial_free_cset_time_ms; double _recorded_serial_free_cset_time_ms;
double _cur_region_register_time;
double _cur_fast_reclaim_humongous_time_ms; double _cur_fast_reclaim_humongous_time_ms;
double _cur_fast_reclaim_humongous_register_time_ms;
size_t _cur_fast_reclaim_humongous_total; size_t _cur_fast_reclaim_humongous_total;
size_t _cur_fast_reclaim_humongous_candidates; size_t _cur_fast_reclaim_humongous_candidates;
size_t _cur_fast_reclaim_humongous_reclaimed; size_t _cur_fast_reclaim_humongous_reclaimed;
@ -305,8 +306,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_recorded_serial_free_cset_time_ms = time_ms; _recorded_serial_free_cset_time_ms = time_ms;
} }
void record_fast_reclaim_humongous_stats(double time_ms, size_t total, size_t candidates) { void record_register_regions(double time_ms, size_t total, size_t candidates) {
_cur_fast_reclaim_humongous_register_time_ms = time_ms; _cur_region_register_time = time_ms;
_cur_fast_reclaim_humongous_total = total; _cur_fast_reclaim_humongous_total = total;
_cur_fast_reclaim_humongous_candidates = candidates; _cur_fast_reclaim_humongous_candidates = candidates;
} }

@ -0,0 +1,164 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1HEAPREGIONATTR_HPP
#define SHARE_GC_G1_G1HEAPREGIONATTR_HPP
#include "gc/g1/g1BiasedArray.hpp"
#include "gc/g1/heapRegion.hpp"
// Per-region attributes often used during garbage collection to avoid costly
// lookups for that information all over the place.
struct G1HeapRegionAttr {
public:
// We use different types to represent the state value depending on platform as
// some have issues loading parts of words.
#ifdef SPARC
typedef int32_t region_type_t;
typedef uint32_t needs_remset_update_t;
#else
typedef int8_t region_type_t;
typedef uint8_t needs_remset_update_t;
#endif
private:
needs_remset_update_t _needs_remset_update;
region_type_t _type;
public:
// Selection of the values for the _type field were driven to micro-optimize the
// encoding and frequency of the checks.
// The most common check for a given reference is whether the region is in the
// collection set or not, and which generation this region is in.
// The selected encoding allows us to use a single check (> NotInCSet) for the
// former.
//
// The other values are used for objects requiring various special cases,
// for example eager reclamation of humongous objects or optional regions.
static const region_type_t Optional = -2; // The region is optional and NOT in the current collection set.
static const region_type_t Humongous = -1; // The region is a humongous candidate not in the current collection set.
static const region_type_t NotInCSet = 0; // The region is not in the collection set.
static const region_type_t Young = 1; // The region is in the collection set and a young region.
static const region_type_t Old = 2; // The region is in the collection set and an old region.
static const region_type_t Num = 3;
G1HeapRegionAttr(region_type_t type = NotInCSet, bool needs_remset_update = false) :
_needs_remset_update(needs_remset_update), _type(type) {
assert(is_valid(), "Invalid type %d", _type);
}
region_type_t type() const { return _type; }
const char* get_type_str() const {
switch (type()) {
case Optional: return "Optional";
case Humongous: return "Humongous";
case NotInCSet: return "NotInCSet";
case Young: return "Young";
case Old: return "Old";
default: ShouldNotReachHere(); return "";
}
}
bool needs_remset_update() const { return _needs_remset_update != 0; }
void set_old() { _type = Old; }
void clear_humongous() {
assert(is_humongous() || !is_in_cset(), "must be");
_type = NotInCSet;
}
void set_has_remset(bool value) { _needs_remset_update = value ? 1 : 0; }
bool is_in_cset_or_humongous() const { return is_in_cset() || is_humongous(); }
bool is_in_cset() const { return type() > NotInCSet; }
bool is_humongous() const { return type() == Humongous; }
bool is_young() const { return type() == Young; }
bool is_old() const { return type() == Old; }
bool is_optional() const { return type() == Optional; }
#ifdef ASSERT
bool is_default() const { return type() == NotInCSet; }
bool is_valid() const { return (type() >= Optional && type() < Num); }
bool is_valid_gen() const { return (type() >= Young && type() <= Old); }
#endif
};
// Table for all regions in the heap for above.
//
// We use this to speed up reference processing during young collection and
// quickly reclaim humongous objects. For the latter, at the start of GC, by adding
// it as a humongous region we enable special handling for that region. During the
// reference iteration closures, when we see a humongous region, we then simply mark
// it as referenced, i.e. live, and remove it from this table to prevent further
// processing on it.
//
// This means that this does NOT completely correspond to the information stored
// in a HeapRegion, but only to what is interesting for the current young collection.
class G1HeapRegionAttrBiasedMappedArray : public G1BiasedMappedArray<G1HeapRegionAttr> {
protected:
G1HeapRegionAttr default_value() const { return G1HeapRegionAttr(G1HeapRegionAttr::NotInCSet); }
public:
void set_optional(uintptr_t index, bool needs_remset_update) {
assert(get_by_index(index).is_default(),
"Region attributes at index " INTPTR_FORMAT " should be default but is %s", index, get_by_index(index).get_type_str());
set_by_index(index, G1HeapRegionAttr(G1HeapRegionAttr::Optional, needs_remset_update));
}
void set_humongous(uintptr_t index, bool needs_remset_update) {
assert(get_by_index(index).is_default(),
"Region attributes at index " INTPTR_FORMAT " should be default but is %s", index, get_by_index(index).get_type_str());
set_by_index(index, G1HeapRegionAttr(G1HeapRegionAttr::Humongous, needs_remset_update));
}
void clear_humongous(uintptr_t index) {
get_ref_by_index(index)->clear_humongous();
}
void set_has_remset(uintptr_t index, bool needs_remset_update) {
get_ref_by_index(index)->set_has_remset(needs_remset_update);
}
void set_in_young(uintptr_t index) {
assert(get_by_index(index).is_default(),
"Region attributes at index " INTPTR_FORMAT " should be default but is %s", index, get_by_index(index).get_type_str());
set_by_index(index, G1HeapRegionAttr(G1HeapRegionAttr::Young, true));
}
void set_in_old(uintptr_t index, bool needs_remset_update) {
assert(get_by_index(index).is_default(),
"Region attributes at index " INTPTR_FORMAT " should be default but is %s", index, get_by_index(index).get_type_str());
set_by_index(index, G1HeapRegionAttr(G1HeapRegionAttr::Old, needs_remset_update));
}
bool is_in_cset_or_humongous(HeapWord* addr) const { return at(addr).is_in_cset_or_humongous(); }
bool is_in_cset(HeapWord* addr) const { return at(addr).is_in_cset(); }
bool is_in_cset(const HeapRegion* hr) const { return get_by_index(hr->hrm_index()).is_in_cset(); }
G1HeapRegionAttr at(HeapWord* addr) const { return get_by_address(addr); }
void clear() { G1BiasedMappedArray<G1HeapRegionAttr>::clear(); }
void clear(const HeapRegion* hr) { return set_by_index(hr->hrm_index(), G1HeapRegionAttr(G1HeapRegionAttr::NotInCSet)); }
};
#endif // SHARE_GC_G1_G1HEAPREGIONATTR_HPP

@ -790,50 +790,50 @@ class G1CheckCSetFastTableClosure : public HeapRegionClosure {
virtual bool do_heap_region(HeapRegion* hr) { virtual bool do_heap_region(HeapRegion* hr) {
uint i = hr->hrm_index(); uint i = hr->hrm_index();
InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i); G1HeapRegionAttr region_attr = (G1HeapRegionAttr) G1CollectedHeap::heap()->_region_attr.get_by_index(i);
if (hr->is_humongous()) { if (hr->is_humongous()) {
if (hr->in_collection_set()) { if (hr->in_collection_set()) {
log_error(gc, verify)("## humongous region %u in CSet", i); log_error(gc, verify)("## humongous region %u in CSet", i);
_failures = true; _failures = true;
return true; return true;
} }
if (cset_state.is_in_cset()) { if (region_attr.is_in_cset()) {
log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i); log_error(gc, verify)("## inconsistent region attr type %s for humongous region %u", region_attr.get_type_str(), i);
_failures = true; _failures = true;
return true; return true;
} }
if (hr->is_continues_humongous() && cset_state.is_humongous()) { if (hr->is_continues_humongous() && region_attr.is_humongous()) {
log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i); log_error(gc, verify)("## inconsistent region attr type %s for continues humongous region %u", region_attr.get_type_str(), i);
_failures = true; _failures = true;
return true; return true;
} }
} else { } else {
if (cset_state.is_humongous()) { if (region_attr.is_humongous()) {
log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i); log_error(gc, verify)("## inconsistent region attr type %s for non-humongous region %u", region_attr.get_type_str(), i);
_failures = true; _failures = true;
return true; return true;
} }
if (hr->in_collection_set() != cset_state.is_in_cset()) { if (hr->in_collection_set() != region_attr.is_in_cset()) {
log_error(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u", log_error(gc, verify)("## in CSet %d / region attr type %s inconsistency for region %u",
hr->in_collection_set(), cset_state.value(), i); hr->in_collection_set(), region_attr.get_type_str(), i);
_failures = true; _failures = true;
return true; return true;
} }
if (cset_state.is_in_cset()) { if (region_attr.is_in_cset()) {
if (hr->is_archive()) { if (hr->is_archive()) {
log_error(gc, verify)("## is_archive in collection set for region %u", i); log_error(gc, verify)("## is_archive in collection set for region %u", i);
_failures = true; _failures = true;
return true; return true;
} }
if (hr->is_young() != (cset_state.is_young())) { if (hr->is_young() != (region_attr.is_young())) {
log_error(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u", log_error(gc, verify)("## is_young %d / region attr type %s inconsistency for region %u",
hr->is_young(), cset_state.value(), i); hr->is_young(), region_attr.get_type_str(), i);
_failures = true; _failures = true;
return true; return true;
} }
if (hr->is_old() != (cset_state.is_old())) { if (hr->is_old() != (region_attr.is_old())) {
log_error(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u", log_error(gc, verify)("## is_old %d / region attr type %s inconsistency for region %u",
hr->is_old(), cset_state.value(), i); hr->is_old(), region_attr.get_type_str(), i);
_failures = true; _failures = true;
return true; return true;
} }

@ -1,142 +0,0 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1INCSETSTATE_HPP
#define SHARE_GC_G1_G1INCSETSTATE_HPP
#include "gc/g1/g1BiasedArray.hpp"
#include "gc/g1/heapRegion.hpp"
// Per-region state during garbage collection.
struct InCSetState {
public:
// We use different types to represent the state value. Particularly SPARC puts
// values in structs from "left to right", i.e. MSB to LSB. This results in many
// unnecessary shift operations when loading and storing values of this type.
// This degrades performance significantly (>10%) on that platform.
// Other tested ABIs do not seem to have this problem, and actually tend to
// favor smaller types, so we use the smallest usable type there.
#ifdef SPARC
#define CSETSTATE_FORMAT INTPTR_FORMAT
typedef intptr_t in_cset_state_t;
#else
#define CSETSTATE_FORMAT "%d"
typedef int8_t in_cset_state_t;
#endif
private:
in_cset_state_t _value;
public:
enum {
// Selection of the values were driven to micro-optimize the encoding and
// frequency of the checks.
// The most common check is whether the region is in the collection set or not,
// this encoding allows us to use an > 0 check.
// The positive values are encoded in increasing generation order, which
// makes getting the next generation fast by a simple increment. They are also
// used to index into arrays.
// The negative values are used for objects requiring various special cases,
// for example eager reclamation of humongous objects or optional regions.
Optional = -2, // The region is optional
Humongous = -1, // The region is humongous
NotInCSet = 0, // The region is not in the collection set.
Young = 1, // The region is in the collection set and a young region.
Old = 2, // The region is in the collection set and an old region.
Num
};
InCSetState(in_cset_state_t value = NotInCSet) : _value(value) {
assert(is_valid(), "Invalid state %d", _value);
}
in_cset_state_t value() const { return _value; }
void set_old() { _value = Old; }
bool is_in_cset_or_humongous() const { return is_in_cset() || is_humongous(); }
bool is_in_cset() const { return _value > NotInCSet; }
bool is_humongous() const { return _value == Humongous; }
bool is_young() const { return _value == Young; }
bool is_old() const { return _value == Old; }
bool is_optional() const { return _value == Optional; }
#ifdef ASSERT
bool is_default() const { return _value == NotInCSet; }
bool is_valid() const { return (_value >= Optional) && (_value < Num); }
bool is_valid_gen() const { return (_value >= Young && _value <= Old); }
#endif
};
// Instances of this class are used for quick tests on whether a reference points
// into the collection set and into which generation or is a humongous object
//
// Each of the array's elements indicates whether the corresponding region is in
// the collection set and if so in which generation, or a humongous region.
//
// We use this to speed up reference processing during young collection and
// quickly reclaim humongous objects. For the latter, by making a humongous region
// succeed this test, we sort-of add it to the collection set. During the reference
// iteration closures, when we see a humongous region, we then simply mark it as
// referenced, i.e. live.
class G1InCSetStateFastTestBiasedMappedArray : public G1BiasedMappedArray<InCSetState> {
protected:
InCSetState default_value() const { return InCSetState::NotInCSet; }
public:
void set_optional(uintptr_t index) {
assert(get_by_index(index).is_default(),
"State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
set_by_index(index, InCSetState::Optional);
}
void set_humongous(uintptr_t index) {
assert(get_by_index(index).is_default(),
"State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
set_by_index(index, InCSetState::Humongous);
}
void clear_humongous(uintptr_t index) {
set_by_index(index, InCSetState::NotInCSet);
}
void set_in_young(uintptr_t index) {
assert(get_by_index(index).is_default(),
"State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
set_by_index(index, InCSetState::Young);
}
void set_in_old(uintptr_t index) {
assert(get_by_index(index).is_default(),
"State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
set_by_index(index, InCSetState::Old);
}
bool is_in_cset_or_humongous(HeapWord* addr) const { return at(addr).is_in_cset_or_humongous(); }
bool is_in_cset(HeapWord* addr) const { return at(addr).is_in_cset(); }
bool is_in_cset(const HeapRegion* hr) const { return get_by_index(hr->hrm_index()).is_in_cset(); }
InCSetState at(HeapWord* addr) const { return get_by_address(addr); }
void clear() { G1BiasedMappedArray<InCSetState>::clear(); }
void clear(const HeapRegion* hr) { return set_by_index(hr->hrm_index(), InCSetState::NotInCSet); }
};
#endif // SHARE_GC_G1_G1INCSETSTATE_HPP

@ -25,7 +25,7 @@
#ifndef SHARE_GC_G1_G1OOPCLOSURES_HPP #ifndef SHARE_GC_G1_G1OOPCLOSURES_HPP
#define SHARE_GC_G1_G1OOPCLOSURES_HPP #define SHARE_GC_G1_G1OOPCLOSURES_HPP
#include "gc/g1/g1InCSetState.hpp" #include "gc/g1/g1HeapRegionAttr.hpp"
#include "memory/iterator.hpp" #include "memory/iterator.hpp"
#include "oops/markOop.hpp" #include "oops/markOop.hpp"
@ -52,17 +52,17 @@ protected:
inline void prefetch_and_push(T* p, oop const obj); inline void prefetch_and_push(T* p, oop const obj);
template <class T> template <class T>
inline void handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj); inline void handle_non_cset_obj_common(G1HeapRegionAttr const region_attr, T* p, oop const obj);
public: public:
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; } virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
inline void trim_queue_partially(); inline void trim_queue_partially();
}; };
// Used during the Update RS phase to refine remaining cards in the DCQ during garbage collection. // Used to scan cards from the DCQS or the remembered sets during garbage collection.
class G1ScanObjsDuringUpdateRSClosure : public G1ScanClosureBase { class G1ScanCardClosure : public G1ScanClosureBase {
public: public:
G1ScanObjsDuringUpdateRSClosure(G1CollectedHeap* g1h, G1ScanCardClosure(G1CollectedHeap* g1h,
G1ParScanThreadState* pss) : G1ParScanThreadState* pss) :
G1ScanClosureBase(g1h, pss) { } G1ScanClosureBase(g1h, pss) { }
@ -71,23 +71,11 @@ public:
virtual void do_oop(oop* p) { do_oop_work(p); } virtual void do_oop(oop* p) { do_oop_work(p); }
}; };
// Used during the Scan RS phase to scan cards from the remembered set during garbage collection.
class G1ScanObjsDuringScanRSClosure : public G1ScanClosureBase {
public:
G1ScanObjsDuringScanRSClosure(G1CollectedHeap* g1h,
G1ParScanThreadState* par_scan_state):
G1ScanClosureBase(g1h, par_scan_state) { }
template <class T> void do_oop_work(T* p);
virtual void do_oop(oop* p) { do_oop_work(p); }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
};
// Used during Optional RS scanning to make sure we trim the queues in a timely manner. // Used during Optional RS scanning to make sure we trim the queues in a timely manner.
class G1ScanRSForOptionalClosure : public OopClosure { class G1ScanRSForOptionalClosure : public OopClosure {
G1ScanObjsDuringScanRSClosure* _scan_cl; G1ScanCardClosure* _scan_cl;
public: public:
G1ScanRSForOptionalClosure(G1ScanObjsDuringScanRSClosure* cl) : _scan_cl(cl) { } G1ScanRSForOptionalClosure(G1ScanCardClosure* cl) : _scan_cl(cl) { }
template <class T> void do_oop_work(T* p); template <class T> void do_oop_work(T* p);
virtual void do_oop(oop* p) { do_oop_work(p); } virtual void do_oop(oop* p) { do_oop_work(p); }

@ -61,10 +61,10 @@ inline void G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) {
} }
template <class T> template <class T>
inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) { inline void G1ScanClosureBase::handle_non_cset_obj_common(G1HeapRegionAttr const region_attr, T* p, oop const obj) {
if (state.is_humongous()) { if (region_attr.is_humongous()) {
_g1h->set_humongous_is_live(obj); _g1h->set_humongous_is_live(obj);
} else if (state.is_optional()) { } else if (region_attr.is_optional()) {
_par_scan_state->remember_reference_into_optional_region(p); _par_scan_state->remember_reference_into_optional_region(p);
} }
} }
@ -81,16 +81,16 @@ inline void G1ScanEvacuatedObjClosure::do_oop_work(T* p) {
return; return;
} }
oop obj = CompressedOops::decode_not_null(heap_oop); oop obj = CompressedOops::decode_not_null(heap_oop);
const InCSetState state = _g1h->in_cset_state(obj); const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
if (state.is_in_cset()) { if (region_attr.is_in_cset()) {
prefetch_and_push(p, obj); prefetch_and_push(p, obj);
} else if (!HeapRegion::is_in_same_region(p, obj)) { } else if (!HeapRegion::is_in_same_region(p, obj)) {
handle_non_cset_obj_common(state, p, obj); handle_non_cset_obj_common(region_attr, p, obj);
assert(_scanning_in_young != Uninitialized, "Scan location has not been initialized."); assert(_scanning_in_young != Uninitialized, "Scan location has not been initialized.");
if (_scanning_in_young == True) { if (_scanning_in_young == True) {
return; return;
} }
_par_scan_state->enqueue_card_if_tracked(p, obj); _par_scan_state->enqueue_card_if_tracked(region_attr, p, obj);
} }
} }
@ -160,7 +160,7 @@ inline void G1ConcurrentRefineOopClosure::do_oop_work(T* p) {
} }
template <class T> template <class T>
inline void G1ScanObjsDuringUpdateRSClosure::do_oop_work(T* p) { inline void G1ScanCardClosure::do_oop_work(T* p) {
T o = RawAccess<>::oop_load(p); T o = RawAccess<>::oop_load(p);
if (CompressedOops::is_null(o)) { if (CompressedOops::is_null(o)) {
return; return;
@ -169,31 +169,15 @@ inline void G1ScanObjsDuringUpdateRSClosure::do_oop_work(T* p) {
check_obj_during_refinement(p, obj); check_obj_during_refinement(p, obj);
assert(!_g1h->is_in_cset((HeapWord*)p), "Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set.", p2i(p), _g1h->addr_to_region((HeapWord*)p)); // We can not check for references from the collection set: the remembered sets
const InCSetState state = _g1h->in_cset_state(obj); // may contain such entries and we do not filter them before.
if (state.is_in_cset()) {
// Since the source is always from outside the collection set, here we implicitly know const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
// that this is a cross-region reference too. if (region_attr.is_in_cset()) {
prefetch_and_push(p, obj); prefetch_and_push(p, obj);
} else if (!HeapRegion::is_in_same_region(p, obj)) { } else if (!HeapRegion::is_in_same_region(p, obj)) {
handle_non_cset_obj_common(state, p, obj); handle_non_cset_obj_common(region_attr, p, obj);
_par_scan_state->enqueue_card_if_tracked(p, obj); _par_scan_state->enqueue_card_if_tracked(region_attr, p, obj);
}
}
template <class T>
inline void G1ScanObjsDuringScanRSClosure::do_oop_work(T* p) {
T heap_oop = RawAccess<>::oop_load(p);
if (CompressedOops::is_null(heap_oop)) {
return;
}
oop obj = CompressedOops::decode_not_null(heap_oop);
const InCSetState state = _g1h->in_cset_state(obj);
if (state.is_in_cset()) {
prefetch_and_push(p, obj);
} else if (!HeapRegion::is_in_same_region(p, obj)) {
handle_non_cset_obj_common(state, p, obj);
} }
} }
@ -233,7 +217,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
assert(_worker_id == _par_scan_state->worker_id(), "sanity"); assert(_worker_id == _par_scan_state->worker_id(), "sanity");
const InCSetState state = _g1h->in_cset_state(obj); const G1HeapRegionAttr state = _g1h->region_attr(obj);
if (state.is_in_cset()) { if (state.is_in_cset()) {
oop forwardee; oop forwardee;
markOop m = obj->mark_raw(); markOop m = obj->mark_raw();

@ -75,11 +75,11 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
_plab_allocator = new G1PLABAllocator(_g1h->allocator()); _plab_allocator = new G1PLABAllocator(_g1h->allocator());
_dest[InCSetState::NotInCSet] = InCSetState::NotInCSet; _dest[G1HeapRegionAttr::NotInCSet] = G1HeapRegionAttr::NotInCSet;
// The dest for Young is used when the objects are aged enough to // The dest for Young is used when the objects are aged enough to
// need to be moved to the next space. // need to be moved to the next space.
_dest[InCSetState::Young] = InCSetState::Old; _dest[G1HeapRegionAttr::Young] = G1HeapRegionAttr::Old;
_dest[InCSetState::Old] = InCSetState::Old; _dest[G1HeapRegionAttr::Old] = G1HeapRegionAttr::Old;
_closures = G1EvacuationRootClosures::create_root_closures(this, _g1h); _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
@ -157,18 +157,18 @@ void G1ParScanThreadState::trim_queue() {
} while (!_refs->is_empty()); } while (!_refs->is_empty());
} }
HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state, HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr const region_attr,
InCSetState* dest, G1HeapRegionAttr* dest,
size_t word_sz, size_t word_sz,
bool previous_plab_refill_failed) { bool previous_plab_refill_failed) {
assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value()); assert(region_attr.is_in_cset_or_humongous(), "Unexpected region attr type: %s", region_attr.get_type_str());
assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value()); assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str());
// Right now we only have two types of regions (young / old) so // Right now we only have two types of regions (young / old) so
// let's keep the logic here simple. We can generalize it when necessary. // let's keep the logic here simple. We can generalize it when necessary.
if (dest->is_young()) { if (dest->is_young()) {
bool plab_refill_in_old_failed = false; bool plab_refill_in_old_failed = false;
HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old, HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
word_sz, word_sz,
&plab_refill_in_old_failed); &plab_refill_in_old_failed);
// Make sure that we won't attempt to copy any other objects out // Make sure that we won't attempt to copy any other objects out
@ -190,38 +190,38 @@ HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
return obj_ptr; return obj_ptr;
} else { } else {
_old_gen_is_full = previous_plab_refill_failed; _old_gen_is_full = previous_plab_refill_failed;
assert(dest->is_old(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value()); assert(dest->is_old(), "Unexpected dest region attr: %s", dest->get_type_str());
// no other space to try. // no other space to try.
return NULL; return NULL;
} }
} }
InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) { G1HeapRegionAttr G1ParScanThreadState::next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age) {
if (state.is_young()) { if (region_attr.is_young()) {
age = !m->has_displaced_mark_helper() ? m->age() age = !m->has_displaced_mark_helper() ? m->age()
: m->displaced_mark_helper()->age(); : m->displaced_mark_helper()->age();
if (age < _tenuring_threshold) { if (age < _tenuring_threshold) {
return state; return region_attr;
} }
} }
return dest(state); return dest(region_attr);
} }
void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state, void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
oop const old, size_t word_sz, uint age, oop const old, size_t word_sz, uint age,
HeapWord * const obj_ptr) const { HeapWord * const obj_ptr) const {
PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state); PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr);
if (alloc_buf->contains(obj_ptr)) { if (alloc_buf->contains(obj_ptr)) {
_g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age, _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
dest_state.value() == InCSetState::Old, dest_attr.type() == G1HeapRegionAttr::Old,
alloc_buf->word_sz() * HeapWordSize); alloc_buf->word_sz() * HeapWordSize);
} else { } else {
_g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age, _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
dest_state.value() == InCSetState::Old); dest_attr.type() == G1HeapRegionAttr::Old);
} }
} }
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state, oop G1ParScanThreadState::copy_to_survivor_space(G1HeapRegionAttr const region_attr,
oop const old, oop const old,
markOop const old_mark) { markOop const old_mark) {
const size_t word_sz = old->size(); const size_t word_sz = old->size();
@ -232,21 +232,21 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
(!from_region->is_young() && young_index == 0), "invariant" ); (!from_region->is_young() && young_index == 0), "invariant" );
uint age = 0; uint age = 0;
InCSetState dest_state = next_state(state, old_mark, age); G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
// The second clause is to prevent premature evacuation failure in case there // The second clause is to prevent premature evacuation failure in case there
// is still space in survivor, but old gen is full. // is still space in survivor, but old gen is full.
if (_old_gen_is_full && dest_state.is_old()) { if (_old_gen_is_full && dest_attr.is_old()) {
return handle_evacuation_failure_par(old, old_mark); return handle_evacuation_failure_par(old, old_mark);
} }
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz); HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz);
// PLAB allocations should succeed most of the time, so we'll // PLAB allocations should succeed most of the time, so we'll
// normally check against NULL once and that's it. // normally check against NULL once and that's it.
if (obj_ptr == NULL) { if (obj_ptr == NULL) {
bool plab_refill_failed = false; bool plab_refill_failed = false;
obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, &plab_refill_failed); obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed);
if (obj_ptr == NULL) { if (obj_ptr == NULL) {
obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, plab_refill_failed); obj_ptr = allocate_in_next_plab(region_attr, &dest_attr, word_sz, plab_refill_failed);
if (obj_ptr == NULL) { if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has // This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer. // installed a forwarding pointer.
@ -255,7 +255,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
} }
if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
// The events are checked individually as part of the actual commit // The events are checked individually as part of the actual commit
report_promotion_event(dest_state, old, word_sz, age, obj_ptr); report_promotion_event(dest_attr, old, word_sz, age, obj_ptr);
} }
} }
@ -267,7 +267,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
if (_g1h->evacuation_should_fail()) { if (_g1h->evacuation_should_fail()) {
// Doing this after all the allocation attempts also tests the // Doing this after all the allocation attempts also tests the
// undo_allocation() method too. // undo_allocation() method too.
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz); _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz);
return handle_evacuation_failure_par(old, old_mark); return handle_evacuation_failure_par(old, old_mark);
} }
#endif // !PRODUCT #endif // !PRODUCT
@ -280,7 +280,7 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
if (forward_ptr == NULL) { if (forward_ptr == NULL) {
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
if (dest_state.is_young()) { if (dest_attr.is_young()) {
if (age < markOopDesc::max_age) { if (age < markOopDesc::max_age) {
age++; age++;
} }
@ -300,8 +300,8 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
} }
if (G1StringDedup::is_enabled()) { if (G1StringDedup::is_enabled()) {
const bool is_from_young = state.is_young(); const bool is_from_young = region_attr.is_young();
const bool is_to_young = dest_state.is_young(); const bool is_to_young = dest_attr.is_young();
assert(is_from_young == _g1h->heap_region_containing(old)->is_young(), assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
"sanity"); "sanity");
assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(), assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
@ -322,12 +322,12 @@ oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
oop* old_p = set_partial_array_mask(old); oop* old_p = set_partial_array_mask(old);
do_oop_partial_array(old_p); do_oop_partial_array(old_p);
} else { } else {
G1ScanInYoungSetter x(&_scanner, dest_state.is_young()); G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
obj->oop_iterate_backwards(&_scanner); obj->oop_iterate_backwards(&_scanner);
} }
return obj; return obj;
} else { } else {
_plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz); _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz);
return forward_ptr; return forward_ptr;
} }
} }

@ -53,7 +53,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
G1PLABAllocator* _plab_allocator; G1PLABAllocator* _plab_allocator;
AgeTable _age_table; AgeTable _age_table;
InCSetState _dest[InCSetState::Num]; G1HeapRegionAttr _dest[G1HeapRegionAttr::Num];
// Local tenuring threshold. // Local tenuring threshold.
uint _tenuring_threshold; uint _tenuring_threshold;
G1ScanEvacuatedObjClosure _scanner; G1ScanEvacuatedObjClosure _scanner;
@ -80,12 +80,12 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
G1DirtyCardQueue& dirty_card_queue() { return _dcq; } G1DirtyCardQueue& dirty_card_queue() { return _dcq; }
G1CardTable* ct() { return _ct; } G1CardTable* ct() { return _ct; }
InCSetState dest(InCSetState original) const { G1HeapRegionAttr dest(G1HeapRegionAttr original) const {
assert(original.is_valid(), assert(original.is_valid(),
"Original state invalid: " CSETSTATE_FORMAT, original.value()); "Original region attr invalid: %s", original.get_type_str());
assert(_dest[original.value()].is_valid_gen(), assert(_dest[original.type()].is_valid_gen(),
"Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value()); "Dest region attr is invalid: %s", _dest[original.type()].get_type_str());
return _dest[original.value()]; return _dest[original.type()];
} }
size_t _num_optional_regions; size_t _num_optional_regions;
@ -111,10 +111,19 @@ public:
template <class T> void do_oop_ext(T* ref); template <class T> void do_oop_ext(T* ref);
template <class T> void push_on_queue(T* ref); template <class T> void push_on_queue(T* ref);
template <class T> void enqueue_card_if_tracked(T* p, oop o) { template <class T> void enqueue_card_if_tracked(G1HeapRegionAttr region_attr, T* p, oop o) {
assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already."); assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already."); assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already.");
if (!_g1h->heap_region_containing((HeapWord*)o)->rem_set()->is_tracked()) {
#ifdef ASSERT
HeapRegion* const hr_obj = _g1h->heap_region_containing((HeapWord*)o);
assert(region_attr.needs_remset_update() == hr_obj->rem_set()->is_tracked(),
"State flag indicating remset tracking disagrees (%s) with actual remembered set (%s) for region %u",
BOOL_TO_STR(region_attr.needs_remset_update()),
BOOL_TO_STR(hr_obj->rem_set()->is_tracked()),
hr_obj->hrm_index());
#endif
if (!region_attr.needs_remset_update()) {
return; return;
} }
size_t card_index = ct()->index_for(p); size_t card_index = ct()->index_for(p);
@ -184,14 +193,14 @@ private:
// Returns a non-NULL pointer if successful, and updates dest if required. // Returns a non-NULL pointer if successful, and updates dest if required.
// Also determines whether we should continue to try to allocate into the various // Also determines whether we should continue to try to allocate into the various
// generations or just end trying to allocate. // generations or just end trying to allocate.
HeapWord* allocate_in_next_plab(InCSetState const state, HeapWord* allocate_in_next_plab(G1HeapRegionAttr const region_attr,
InCSetState* dest, G1HeapRegionAttr* dest,
size_t word_sz, size_t word_sz,
bool previous_plab_refill_failed); bool previous_plab_refill_failed);
inline InCSetState next_state(InCSetState const state, markOop const m, uint& age); inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markOop const m, uint& age);
void report_promotion_event(InCSetState const dest_state, void report_promotion_event(G1HeapRegionAttr const dest_attr,
oop const old, size_t word_sz, uint age, oop const old, size_t word_sz, uint age,
HeapWord * const obj_ptr) const; HeapWord * const obj_ptr) const;
@ -200,7 +209,7 @@ private:
inline void trim_queue_to_threshold(uint threshold); inline void trim_queue_to_threshold(uint threshold);
public: public:
oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark); oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markOop const old_mark);
void trim_queue(); void trim_queue();
void trim_queue_partially(); void trim_queue_partially();

@ -41,14 +41,14 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
// than one thread might claim the same card. So the same card may be // than one thread might claim the same card. So the same card may be
// processed multiple times, and so we might get references into old gen here. // processed multiple times, and so we might get references into old gen here.
// So we need to redo this check. // So we need to redo this check.
const InCSetState in_cset_state = _g1h->in_cset_state(obj); const G1HeapRegionAttr region_attr = _g1h->region_attr(obj);
// References pushed onto the work stack should never point to a humongous region // References pushed onto the work stack should never point to a humongous region
// as they are not added to the collection set due to above precondition. // as they are not added to the collection set due to above precondition.
assert(!in_cset_state.is_humongous(), assert(!region_attr.is_humongous(),
"Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT, "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
p2i(obj), _g1h->addr_to_region((HeapWord*)obj), p2i(p)); p2i(obj), _g1h->addr_to_region((HeapWord*)obj), p2i(p));
if (!in_cset_state.is_in_cset()) { if (!region_attr.is_in_cset()) {
// In this case somebody else already did all the work. // In this case somebody else already did all the work.
return; return;
} }
@ -57,7 +57,7 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
if (m->is_marked()) { if (m->is_marked()) {
obj = (oop) m->decode_pointer(); obj = (oop) m->decode_pointer();
} else { } else {
obj = copy_to_survivor_space(in_cset_state, obj, m); obj = copy_to_survivor_space(region_attr, obj, m);
} }
RawAccess<IS_NOT_NULL>::oop_store(p, obj); RawAccess<IS_NOT_NULL>::oop_store(p, obj);
@ -67,7 +67,7 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
} }
HeapRegion* from = _g1h->heap_region_containing(p); HeapRegion* from = _g1h->heap_region_containing(p);
if (!from->is_young()) { if (!from->is_young()) {
enqueue_card_if_tracked(p, obj); enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj);
} }
} }

@ -27,7 +27,7 @@
#include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1InCSetState.hpp" #include "gc/g1/g1HeapRegionAttr.hpp"
#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp" #include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
#include "gc/g1/g1MMUTracker.hpp" #include "gc/g1/g1MMUTracker.hpp"
#include "gc/g1/g1RemSetTrackingPolicy.hpp" #include "gc/g1/g1RemSetTrackingPolicy.hpp"

@ -305,7 +305,7 @@ void G1RemSet::initialize(size_t capacity, uint max_regions) {
} }
G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state, G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
G1ScanObjsDuringScanRSClosure* scan_obj_on_card, G1ScanCardClosure* scan_obj_on_card,
G1ParScanThreadState* pss, G1ParScanThreadState* pss,
G1GCPhaseTimes::GCParPhases phase, G1GCPhaseTimes::GCParPhases phase,
uint worker_i) : uint worker_i) :
@ -345,7 +345,7 @@ void G1ScanRSForRegionClosure::scan_opt_rem_set_roots(HeapRegion* r) {
G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r); G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r);
G1ScanObjsDuringScanRSClosure scan_cl(_g1h, _pss); G1ScanCardClosure scan_cl(_g1h, _pss);
G1ScanRSForOptionalClosure cl(&scan_cl); G1ScanRSForOptionalClosure cl(&scan_cl);
_opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->raw_strong_oops()); _opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->raw_strong_oops());
_opt_refs_memory_used += opt_rem_set_list->used_memory(); _opt_refs_memory_used += opt_rem_set_list->used_memory();
@ -464,7 +464,7 @@ void G1RemSet::scan_rem_set(G1ParScanThreadState* pss,
G1GCPhaseTimes::GCParPhases coderoots_phase) { G1GCPhaseTimes::GCParPhases coderoots_phase) {
assert(pss->trim_ticks().value() == 0, "Queues must have been trimmed before entering."); assert(pss->trim_ticks().value() == 0, "Queues must have been trimmed before entering.");
G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss); G1ScanCardClosure scan_cl(_g1h, pss);
G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, scan_phase, worker_i); G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, scan_phase, worker_i);
_g1h->collection_set_iterate_increment_from(&cl, worker_i); _g1h->collection_set_iterate_increment_from(&cl, worker_i);
@ -489,12 +489,12 @@ void G1RemSet::scan_rem_set(G1ParScanThreadState* pss,
// Closure used for updating rem sets. Only called during an evacuation pause. // Closure used for updating rem sets. Only called during an evacuation pause.
class G1RefineCardClosure: public G1CardTableEntryClosure { class G1RefineCardClosure: public G1CardTableEntryClosure {
G1RemSet* _g1rs; G1RemSet* _g1rs;
G1ScanObjsDuringUpdateRSClosure* _update_rs_cl; G1ScanCardClosure* _update_rs_cl;
size_t _cards_scanned; size_t _cards_scanned;
size_t _cards_skipped; size_t _cards_skipped;
public: public:
G1RefineCardClosure(G1CollectedHeap* g1h, G1ScanObjsDuringUpdateRSClosure* update_rs_cl) : G1RefineCardClosure(G1CollectedHeap* g1h, G1ScanCardClosure* update_rs_cl) :
_g1rs(g1h->rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0) _g1rs(g1h->rem_set()), _update_rs_cl(update_rs_cl), _cards_scanned(0), _cards_skipped(0)
{} {}
@ -527,7 +527,7 @@ void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
if (G1HotCardCache::default_use_cache()) { if (G1HotCardCache::default_use_cache()) {
G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::ScanHCC, worker_i); G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::ScanHCC, worker_i);
G1ScanObjsDuringUpdateRSClosure scan_hcc_cl(_g1h, pss); G1ScanCardClosure scan_hcc_cl(_g1h, pss);
G1RefineCardClosure refine_card_cl(_g1h, &scan_hcc_cl); G1RefineCardClosure refine_card_cl(_g1h, &scan_hcc_cl);
_g1h->iterate_hcc_closure(&refine_card_cl, worker_i); _g1h->iterate_hcc_closure(&refine_card_cl, worker_i);
} }
@ -536,7 +536,7 @@ void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
{ {
G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::UpdateRS, worker_i); G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::UpdateRS, worker_i);
G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1h, pss); G1ScanCardClosure update_rs_cl(_g1h, pss);
G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl); G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl);
_g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i); _g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i);
@ -712,7 +712,7 @@ void G1RemSet::refine_card_concurrently(CardValue* card_ptr,
} }
bool G1RemSet::refine_card_during_gc(CardValue* card_ptr, bool G1RemSet::refine_card_during_gc(CardValue* card_ptr,
G1ScanObjsDuringUpdateRSClosure* update_rs_cl) { G1ScanCardClosure* update_rs_cl) {
assert(_g1h->is_gc_active(), "Only call during GC"); assert(_g1h->is_gc_active(), "Only call during GC");
// Construct the region representing the card. // Construct the region representing the card.

@ -47,8 +47,7 @@ class G1HotCardCache;
class G1RemSetScanState; class G1RemSetScanState;
class G1ParScanThreadState; class G1ParScanThreadState;
class G1Policy; class G1Policy;
class G1ScanObjsDuringScanRSClosure; class G1ScanCardClosure;
class G1ScanObjsDuringUpdateRSClosure;
class HeapRegionClaimer; class HeapRegionClaimer;
// A G1RemSet in which each heap region has a rem set that records the // A G1RemSet in which each heap region has a rem set that records the
@ -115,7 +114,7 @@ public:
// Refine the card corresponding to "card_ptr", applying the given closure to // Refine the card corresponding to "card_ptr", applying the given closure to
// all references found. Must only be called during gc. // all references found. Must only be called during gc.
// Returns whether the card has been scanned. // Returns whether the card has been scanned.
bool refine_card_during_gc(CardValue* card_ptr, G1ScanObjsDuringUpdateRSClosure* update_rs_cl); bool refine_card_during_gc(CardValue* card_ptr, G1ScanCardClosure* update_rs_cl);
// Print accumulated summary info from the start of the VM. // Print accumulated summary info from the start of the VM.
void print_summary_info(); void print_summary_info();
@ -135,7 +134,7 @@ class G1ScanRSForRegionClosure : public HeapRegionClosure {
G1CardTable *_ct; G1CardTable *_ct;
G1ParScanThreadState* _pss; G1ParScanThreadState* _pss;
G1ScanObjsDuringScanRSClosure* _scan_objs_on_card_cl; G1ScanCardClosure* _scan_objs_on_card_cl;
G1RemSetScanState* _scan_state; G1RemSetScanState* _scan_state;
@ -164,7 +163,7 @@ class G1ScanRSForRegionClosure : public HeapRegionClosure {
void scan_strong_code_roots(HeapRegion* r); void scan_strong_code_roots(HeapRegion* r);
public: public:
G1ScanRSForRegionClosure(G1RemSetScanState* scan_state, G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
G1ScanObjsDuringScanRSClosure* scan_obj_on_card, G1ScanCardClosure* scan_obj_on_card,
G1ParScanThreadState* pss, G1ParScanThreadState* pss,
G1GCPhaseTimes::GCParPhases phase, G1GCPhaseTimes::GCParPhases phase,
uint worker_i); uint worker_i);

@ -71,7 +71,7 @@ public class TestEagerReclaimHumongousRegionsLog {
// This gives an array of lines containing eager reclaim of humongous regions // This gives an array of lines containing eager reclaim of humongous regions
// log messages contents after the ":" in the following order for every GC: // log messages contents after the ":" in the following order for every GC:
// Humongous Register: a.ams // Region Register: a.ams
// Humongous Total: b // Humongous Total: b
// Humongous Candidate: c // Humongous Candidate: c
// Humongous Reclaim: d.dms // Humongous Reclaim: d.dms
@ -79,7 +79,7 @@ public class TestEagerReclaimHumongousRegionsLog {
// Humongous Regions: f->g // Humongous Regions: f->g
String[] lines = Arrays.stream(output.getStdout().split("\\R")) String[] lines = Arrays.stream(output.getStdout().split("\\R"))
.filter(s -> s.contains("Humongous")).map(s -> s.substring(s.indexOf(LogSeparator) + LogSeparator.length())) .filter(s -> (s.contains("Humongous") || s.contains("Region Register"))).map(s -> s.substring(s.indexOf(LogSeparator) + LogSeparator.length()))
.toArray(String[]::new); .toArray(String[]::new);
Asserts.assertTrue(lines.length % 6 == 0, "There seems to be an unexpected amount of log messages (total: " + lines.length + ") per GC"); Asserts.assertTrue(lines.length % 6 == 0, "There seems to be an unexpected amount of log messages (total: " + lines.length + ") per GC");

@ -132,6 +132,7 @@ public class TestGCLogMessages {
new LogMessageWithLevel("Queue Fixup", Level.DEBUG), new LogMessageWithLevel("Queue Fixup", Level.DEBUG),
new LogMessageWithLevel("Table Fixup", Level.DEBUG), new LogMessageWithLevel("Table Fixup", Level.DEBUG),
new LogMessageWithLevel("Expand Heap After Collection", Level.DEBUG), new LogMessageWithLevel("Expand Heap After Collection", Level.DEBUG),
new LogMessageWithLevel("Region Register", Level.DEBUG),
// Free CSet // Free CSet
new LogMessageWithLevel("Free Collection Set", Level.DEBUG), new LogMessageWithLevel("Free Collection Set", Level.DEBUG),
new LogMessageWithLevel("Free Collection Set Serial", Level.TRACE), new LogMessageWithLevel("Free Collection Set Serial", Level.TRACE),
@ -139,7 +140,6 @@ public class TestGCLogMessages {
new LogMessageWithLevel("Non-Young Free Collection Set", Level.TRACE), new LogMessageWithLevel("Non-Young Free Collection Set", Level.TRACE),
// Humongous Eager Reclaim // Humongous Eager Reclaim
new LogMessageWithLevel("Humongous Reclaim", Level.DEBUG), new LogMessageWithLevel("Humongous Reclaim", Level.DEBUG),
new LogMessageWithLevel("Humongous Register", Level.DEBUG),
// Merge PSS // Merge PSS
new LogMessageWithLevel("Merge Per-Thread State", Level.DEBUG), new LogMessageWithLevel("Merge Per-Thread State", Level.DEBUG),
// TLAB handling // TLAB handling