8278756: Parallel: Drop PSOldGen::_reserved

Reviewed-by: tschatzl, mli
This commit is contained in:
Albert Mingkun Yang 2021-12-16 13:41:19 +00:00
parent 271d26f141
commit f15a59ce72
9 changed files with 38 additions and 52 deletions

View File

@ -41,6 +41,7 @@ class ObjectStartArray : public CHeapObj<mtGC> {
private: private:
PSVirtualSpace _virtual_space; PSVirtualSpace _virtual_space;
MemRegion _reserved_region; MemRegion _reserved_region;
// The committed (old-gen heap) virtual space this object-start-array covers.
MemRegion _covered_region; MemRegion _covered_region;
MemRegion _blocks_region; MemRegion _blocks_region;
jbyte* _raw_base; jbyte* _raw_base;

View File

@ -645,8 +645,10 @@ void ParallelScavengeHeap::prepare_for_verify() {
PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() { PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
PSOldGen* old = old_gen(); PSOldGen* old = old_gen();
HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr(); HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end()); HeapWord* old_reserved_start = old->reserved().start();
SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes()); HeapWord* old_reserved_end = old->reserved().end();
VirtualSpaceSummary old_summary(old_reserved_start, old_committed_end, old_reserved_end);
SpaceSummary old_space(old_reserved_start, old_committed_end, old->used_in_bytes());
PSYoungGen* young = young_gen(); PSYoungGen* young = young_gen();
VirtualSpaceSummary young_summary(young->reserved().start(), VirtualSpaceSummary young_summary(young->reserved().start(),

View File

@ -50,10 +50,6 @@ void PSOldGen::initialize(ReservedSpace rs, size_t initial_size, size_t alignmen
initialize_virtual_space(rs, initial_size, alignment); initialize_virtual_space(rs, initial_size, alignment);
initialize_work(perf_data_name, level); initialize_work(perf_data_name, level);
// The old gen can grow to max_gen_size(). _reserve reflects only
// the current maximum that can be committed.
assert(_reserved.byte_size() <= max_gen_size(), "Consistency check");
initialize_performance_counters(perf_data_name, level); initialize_performance_counters(perf_data_name, level);
} }
@ -69,66 +65,51 @@ void PSOldGen::initialize_virtual_space(ReservedSpace rs,
} }
void PSOldGen::initialize_work(const char* perf_data_name, int level) { void PSOldGen::initialize_work(const char* perf_data_name, int level) {
// MemRegion const reserved_mr = reserved();
// Basic memory initialization assert(reserved_mr.byte_size() == max_gen_size(), "invariant");
//
MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(), // Object start stuff: for all reserved memory
heap_word_size(max_gen_size())); start_array()->initialize(reserved_mr);
assert(limit_reserved.byte_size() == max_gen_size(),
"word vs bytes confusion");
//
// Object start stuff
//
start_array()->initialize(limit_reserved); // Card table stuff: for all committed memory
MemRegion committed_mr((HeapWord*)virtual_space()->low(),
_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
(HeapWord*)virtual_space()->high_boundary());
//
// Card table stuff
//
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high()); (HeapWord*)virtual_space()->high());
if (ZapUnusedHeapArea) { if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately rather than // Mangle newly committed space immediately rather than
// waiting for the initialization of the space even though // waiting for the initialization of the space even though
// mangling is related to spaces. Doing it here eliminates // mangling is related to spaces. Doing it here eliminates
// the need to carry along information that a complete mangling // the need to carry along information that a complete mangling
// (bottom to end) needs to be done. // (bottom to end) needs to be done.
SpaceMangler::mangle_region(cmr); SpaceMangler::mangle_region(committed_mr);
} }
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSCardTable* ct = heap->card_table(); PSCardTable* ct = heap->card_table();
ct->resize_covered_region(cmr); ct->resize_covered_region(committed_mr);
// Verify that the start and end of this generation is the start of a card. // Verify that the start and end of this generation is the start of a card.
// If this wasn't true, a single card could span more than one generation, // If this wasn't true, a single card could span more than one generation,
// which would cause problems when we commit/uncommit memory, and when we // which would cause problems when we commit/uncommit memory, and when we
// clear and dirty cards. // clear and dirty cards.
guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned"); guarantee(ct->is_card_aligned(reserved_mr.start()), "generation must be card aligned");
if (_reserved.end() != heap->reserved_region().end()) { // Check the heap layout documented at `class ParallelScavengeHeap`.
// Don't check at the very end of the heap as we'll assert that we're probing off assert(reserved_mr.end() != heap->reserved_region().end(), "invariant");
// the end if we try. guarantee(ct->is_card_aligned(reserved_mr.end()), "generation must be card aligned");
guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
}
// //
// ObjectSpace stuff // ObjectSpace stuff
// //
_object_space = new MutableSpace(virtual_space()->alignment()); _object_space = new MutableSpace(virtual_space()->alignment());
object_space()->initialize(cmr, object_space()->initialize(committed_mr,
SpaceDecorator::Clear, SpaceDecorator::Clear,
SpaceDecorator::Mangle, SpaceDecorator::Mangle,
MutableSpace::SetupPages, MutableSpace::SetupPages,
&ParallelScavengeHeap::heap()->workers()); &ParallelScavengeHeap::heap()->workers());
// Update the start_array // Update the start_array
start_array()->set_covered_region(cmr); start_array()->set_covered_region(committed_mr);
} }
void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) { void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
@ -314,7 +295,6 @@ void PSOldGen::resize(size_t desired_free_space) {
// Adjust according to our min and max // Adjust according to our min and max
new_size = clamp(new_size, min_gen_size(), max_gen_size()); new_size = clamp(new_size, min_gen_size(), max_gen_size());
assert(max_gen_size() >= reserved().byte_size(), "max new size problem?");
new_size = align_up(new_size, alignment); new_size = align_up(new_size, alignment);
const size_t current_size = capacity_in_bytes(); const size_t current_size = capacity_in_bytes();

View File

@ -34,9 +34,7 @@
class PSOldGen : public CHeapObj<mtGC> { class PSOldGen : public CHeapObj<mtGC> {
friend class VMStructs; friend class VMStructs;
private: private:
MemRegion _reserved; // Used for simple containment tests
PSVirtualSpace* _virtual_space; // Controls mapping and unmapping of virtual mem PSVirtualSpace* _virtual_space; // Controls mapping and unmapping of virtual mem
ObjectStartArray _start_array; // Keeps track of where objects start in a 512b block ObjectStartArray _start_array; // Keeps track of where objects start in a 512b block
MutableSpace* _object_space; // Where all the objects live MutableSpace* _object_space; // Where all the objects live
@ -99,16 +97,20 @@ class PSOldGen : public CHeapObj<mtGC> {
PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size, PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size,
size_t max_size, const char* perf_data_name, int level); size_t max_size, const char* perf_data_name, int level);
MemRegion reserved() const { return _reserved; } MemRegion reserved() const {
return MemRegion((HeapWord*)(_virtual_space->low_boundary()),
(HeapWord*)(_virtual_space->high_boundary()));
}
size_t max_gen_size() const { return _max_gen_size; } size_t max_gen_size() const { return _max_gen_size; }
size_t min_gen_size() const { return _min_gen_size; } size_t min_gen_size() const { return _min_gen_size; }
bool is_in(const void* p) const { bool is_in(const void* p) const {
return _virtual_space->contains((void *)p); return _virtual_space->is_in_committed((void *)p);
} }
bool is_in_reserved(const void* p) const { bool is_in_reserved(const void* p) const {
return reserved().contains(p); return _virtual_space->is_in_reserved(p);
} }
MutableSpace* object_space() const { return _object_space; } MutableSpace* object_space() const { return _object_space; }

View File

@ -1041,9 +1041,9 @@ void PSParallelCompact::post_compact()
PSCardTable* ct = heap->card_table(); PSCardTable* ct = heap->card_table();
MemRegion old_mr = heap->old_gen()->reserved(); MemRegion old_mr = heap->old_gen()->reserved();
if (young_gen_empty) { if (young_gen_empty) {
ct->clear(MemRegion(old_mr.start(), old_mr.end())); ct->clear(old_mr);
} else { } else {
ct->invalidate(MemRegion(old_mr.start(), old_mr.end())); ct->invalidate(old_mr);
} }
// Delete metaspaces for unloaded class loaders and clean up loader_data graph // Delete metaspaces for unloaded class loaders and clean up loader_data graph

View File

@ -66,11 +66,6 @@ PSVirtualSpace::~PSVirtualSpace() {
release(); release();
} }
bool PSVirtualSpace::contains(void* p) const {
char* const cp = (char*)p;
return cp >= committed_low_addr() && cp < committed_high_addr();
}
void PSVirtualSpace::release() { void PSVirtualSpace::release() {
DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this)); DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this));
// This may not release memory it didn't reserve. // This may not release memory it didn't reserve.

View File

@ -75,7 +75,13 @@ class PSVirtualSpace : public CHeapObj<mtGC> {
PSVirtualSpace(); PSVirtualSpace();
void initialize(ReservedSpace rs); void initialize(ReservedSpace rs);
bool contains(void* p) const; bool is_in_committed(const void* p) const {
return (p >= committed_low_addr()) && (p < committed_high_addr());
}
bool is_in_reserved(const void* p) const {
return (p >= reserved_low_addr()) && (p < reserved_high_addr());
}
// Accessors (all sizes are bytes). // Accessors (all sizes are bytes).
size_t alignment() const { return _alignment; } size_t alignment() const { return _alignment; }
@ -85,6 +91,7 @@ class PSVirtualSpace : public CHeapObj<mtGC> {
char* committed_high_addr() const { return _committed_high_addr; } char* committed_high_addr() const { return _committed_high_addr; }
bool special() const { return _special; } bool special() const { return _special; }
// Return size in bytes
inline size_t committed_size() const; inline size_t committed_size() const;
inline size_t reserved_size() const; inline size_t reserved_size() const;
inline size_t uncommitted_size() const; inline size_t uncommitted_size() const;

View File

@ -90,7 +90,7 @@ class PSYoungGen : public CHeapObj<mtGC> {
MemRegion reserved() const { return _reserved; } MemRegion reserved() const { return _reserved; }
bool is_in(const void* p) const { bool is_in(const void* p) const {
return _virtual_space->contains((void *)p); return _virtual_space->is_in_committed(p);
} }
bool is_in_reserved(const void* p) const { bool is_in_reserved(const void* p) const {

View File

@ -57,7 +57,6 @@
nonstatic_field(PSYoungGen, _min_gen_size, const size_t) \ nonstatic_field(PSYoungGen, _min_gen_size, const size_t) \
nonstatic_field(PSYoungGen, _max_gen_size, const size_t) \ nonstatic_field(PSYoungGen, _max_gen_size, const size_t) \
\ \
nonstatic_field(PSOldGen, _reserved, MemRegion) \
nonstatic_field(PSOldGen, _virtual_space, PSVirtualSpace*) \ nonstatic_field(PSOldGen, _virtual_space, PSVirtualSpace*) \
nonstatic_field(PSOldGen, _object_space, MutableSpace*) \ nonstatic_field(PSOldGen, _object_space, MutableSpace*) \
nonstatic_field(PSOldGen, _min_gen_size, const size_t) \ nonstatic_field(PSOldGen, _min_gen_size, const size_t) \