8303534: Merge CompactibleSpace into ContiguousSpace

Reviewed-by: cjplummer, tschatzl
This commit is contained in:
Albert Mingkun Yang 2023-03-07 08:12:26 +00:00
parent 97c25df4b8
commit 7fbfc884f0
14 changed files with 75 additions and 170 deletions

@ -1110,7 +1110,7 @@ const char* DefNewGeneration::name() const {
}
// Moved from inline file as they are not called inline
CompactibleSpace* DefNewGeneration::first_compaction_space() const {
ContiguousSpace* DefNewGeneration::first_compaction_space() const {
return eden();
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -167,7 +167,7 @@ protected:
ContiguousSpace* from() const { return _from_space; }
ContiguousSpace* to() const { return _to_space; }
virtual CompactibleSpace* first_compaction_space() const;
virtual ContiguousSpace* first_compaction_space() const;
// Space enquiries
size_t capacity() const;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,7 +33,7 @@
class BlockOffsetSharedArray;
class CardTableRS;
class CompactibleSpace;
class ContiguousSpace;
// TenuredGeneration models the heap containing old (promoted/tenured) objects
// contained in a single contiguous space. This generation is covered by a card
@ -103,7 +103,7 @@ class TenuredGeneration: public Generation {
bool is_in(const void* p) const;
CompactibleSpace* first_compaction_space() const;
ContiguousSpace* first_compaction_space() const;
TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size,

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,7 +49,7 @@ inline bool TenuredGeneration::is_in(const void* p) const {
return space()->is_in(p);
}
inline CompactibleSpace* TenuredGeneration::first_compaction_space() const {
inline ContiguousSpace* TenuredGeneration::first_compaction_space() const {
return space();
}

@ -284,7 +284,7 @@ void Generation::object_iterate(ObjectClosure* cl) {
void Generation::prepare_for_compaction(CompactPoint* cp) {
// Generic implementation, can be specialized
CompactibleSpace* space = first_compaction_space();
ContiguousSpace* space = first_compaction_space();
while (space != nullptr) {
space->prepare_for_compaction(cp);
space = space->next_compaction_space();
@ -306,7 +306,7 @@ void Generation::adjust_pointers() {
}
void Generation::compact() {
CompactibleSpace* sp = first_compaction_space();
ContiguousSpace* sp = first_compaction_space();
while (sp != nullptr) {
sp->compact();
sp = sp->next_compaction_space();

@ -51,7 +51,6 @@
class DefNewGeneration;
class GCMemoryManager;
class GenerationSpec;
class CompactibleSpace;
class ContiguousSpace;
class CompactPoint;
class OopClosure;
@ -202,7 +201,7 @@ class Generation: public CHeapObj<mtGC> {
// Returns the first space, if any, in the generation that can participate
// in compaction, or else "null".
virtual CompactibleSpace* first_compaction_space() const = 0;
virtual ContiguousSpace* first_compaction_space() const = 0;
// Returns "true" iff this generation should be used to allocate an
// object of the given size. Young generations might

@ -167,7 +167,10 @@ void Space::clear(bool mangle_space) {
}
}
ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(nullptr) {
ContiguousSpace::ContiguousSpace(): Space(),
_compaction_top(nullptr),
_next_compaction_space(nullptr),
_top(nullptr) {
_mangler = new GenSpaceMangler(this);
}
@ -179,13 +182,16 @@ void ContiguousSpace::initialize(MemRegion mr,
bool clear_space,
bool mangle_space)
{
CompactibleSpace::initialize(mr, clear_space, mangle_space);
Space::initialize(mr, clear_space, mangle_space);
set_compaction_top(bottom());
_next_compaction_space = nullptr;
}
void ContiguousSpace::clear(bool mangle_space) {
set_top(bottom());
set_saved_mark();
CompactibleSpace::clear(mangle_space);
Space::clear(mangle_space);
_compaction_top = bottom();
}
bool ContiguousSpace::is_free_block(const HeapWord* p) const {
@ -238,20 +244,8 @@ void ContiguousSpace::mangle_unused_area_complete() {
}
#endif // NOT_PRODUCT
void CompactibleSpace::initialize(MemRegion mr,
bool clear_space,
bool mangle_space) {
Space::initialize(mr, clear_space, mangle_space);
set_compaction_top(bottom());
_next_compaction_space = nullptr;
}
void CompactibleSpace::clear(bool mangle_space) {
Space::clear(mangle_space);
_compaction_top = bottom();
}
HeapWord* CompactibleSpace::forward(oop q, size_t size,
HeapWord* ContiguousSpace::forward(oop q, size_t size,
CompactPoint* cp, HeapWord* compact_top) {
// q is alive
// First check if we should switch compaction space
@ -375,7 +369,7 @@ void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
cp->space->set_compaction_top(compact_top);
}
void CompactibleSpace::adjust_pointers() {
void ContiguousSpace::adjust_pointers() {
// Check first is there is any work to do.
if (used() == 0) {
return; // Nothing to do.
@ -412,7 +406,7 @@ void CompactibleSpace::adjust_pointers() {
assert(cur_obj == end_of_live, "just checking");
}
void CompactibleSpace::compact() {
void ContiguousSpace::compact() {
// Copy all live objects to their new location
// Used by MarkSweep::mark_sweep_phase4()

@ -53,7 +53,7 @@ class BlockOffsetArrayContigSpace;
class BlockOffsetTable;
#endif
class Generation;
class CompactibleSpace;
class ContiguousSpace;
class CardTableRS;
class DirtyCardToOopClosure;
class FilteringClosure;
@ -290,33 +290,52 @@ public:
class CompactPoint : public StackObj {
public:
Generation* gen;
CompactibleSpace* space;
ContiguousSpace* space;
CompactPoint(Generation* g = nullptr) :
gen(g), space(nullptr) {}
};
// A space that supports compaction operations. This is usually, but not
// necessarily, a space that is normally contiguous. But, for example, a
// free-list-based space whose normal collection is a mark-sweep without
// compaction could still support compaction in full GC's.
class CompactibleSpace: public Space {
class GenSpaceMangler;
// A space in which the free area is contiguous. It therefore supports
// faster allocation, and compaction.
class ContiguousSpace: public Space {
friend class VMStructs;
private:
HeapWord* _compaction_top;
CompactibleSpace* _next_compaction_space;
ContiguousSpace* _next_compaction_space;
template <class SpaceType>
static inline void verify_up_to_first_dead(SpaceType* space) NOT_DEBUG_RETURN;
static inline void verify_up_to_first_dead(ContiguousSpace* space) NOT_DEBUG_RETURN;
template <class SpaceType>
static inline void clear_empty_region(SpaceType* space);
static inline void clear_empty_region(ContiguousSpace* space);
public:
CompactibleSpace() :
_compaction_top(nullptr), _next_compaction_space(nullptr) {}
protected:
HeapWord* _top;
// A helper for mangling the unused area of the space in debug builds.
GenSpaceMangler* _mangler;
// Used during compaction.
HeapWord* _first_dead;
HeapWord* _end_of_live;
// This the function to invoke when an allocation of an object covering
// "start" to "end" occurs to update other internal data structures.
virtual void alloc_block(HeapWord* start, HeapWord* the_end) { }
GenSpaceMangler* mangler() { return _mangler; }
// Allocation helpers (return null if full).
inline HeapWord* allocate_impl(size_t word_size);
inline HeapWord* par_allocate_impl(size_t word_size);
public:
ContiguousSpace();
~ContiguousSpace();
void initialize(MemRegion mr, bool clear_space, bool mangle_space) override;
void clear(bool mangle_space) override;
// Used temporarily during a compaction phase to hold the value
@ -329,19 +348,15 @@ public:
_compaction_top = value;
}
// Perform operations on the space needed after a compaction
// has been performed.
virtual void reset_after_compaction() = 0;
// Returns the next space (in the current generation) to be compacted in
// the global compaction order. Also is used to select the next
// space into which to compact.
virtual CompactibleSpace* next_compaction_space() const {
virtual ContiguousSpace* next_compaction_space() const {
return _next_compaction_space;
}
void set_next_compaction_space(CompactibleSpace* csp) {
void set_next_compaction_space(ContiguousSpace* csp) {
_next_compaction_space = csp;
}
@ -356,7 +371,7 @@ public:
// "cp->compaction_space" up-to-date. Offset tables may be updated in
// this phase as if the final copy had occurred; if so, "cp->threshold"
// indicates when the next such action should be taken.
virtual void prepare_for_compaction(CompactPoint* cp) = 0;
void prepare_for_compaction(CompactPoint* cp);
// MarkSweep support phase3
void adjust_pointers() override;
// MarkSweep support phase4
@ -385,40 +400,6 @@ public:
// space.
virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
HeapWord* compact_top);
protected:
// Used during compaction.
HeapWord* _first_dead;
HeapWord* _end_of_live;
// This the function to invoke when an allocation of an object covering
// "start" to "end" occurs to update other internal data structures.
virtual void alloc_block(HeapWord* start, HeapWord* the_end) { }
};
class GenSpaceMangler;
// A space in which the free area is contiguous. It therefore supports
// faster allocation, and compaction.
class ContiguousSpace: public CompactibleSpace {
friend class VMStructs;
protected:
HeapWord* _top;
// A helper for mangling the unused area of the space in debug builds.
GenSpaceMangler* _mangler;
GenSpaceMangler* mangler() { return _mangler; }
// Allocation helpers (return null if full).
inline HeapWord* allocate_impl(size_t word_size);
inline HeapWord* par_allocate_impl(size_t word_size);
public:
ContiguousSpace();
~ContiguousSpace();
void initialize(MemRegion mr, bool clear_space, bool mangle_space) override;
void clear(bool mangle_space) override;
// Accessors
HeapWord* top() const { return _top; }
@ -467,7 +448,7 @@ class ContiguousSpace: public CompactibleSpace {
void object_iterate(ObjectClosure* blk) override;
// Compaction support
void reset_after_compaction() override {
void reset_after_compaction() {
assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
set_top(compaction_top());
}
@ -497,11 +478,6 @@ class ContiguousSpace: public CompactibleSpace {
HeapWord** top_addr() { return &_top; }
HeapWord** end_addr() { return &_end; }
#if INCLUDE_SERIALGC
// Overrides for more efficient compaction support.
void prepare_for_compaction(CompactPoint* cp) override;
#endif
void print_on(outputStream* st) const override;
// Checked dynamic downcasts.

@ -80,10 +80,10 @@ TenuredSpace::block_start_const(const void* p) const {
class DeadSpacer : StackObj {
size_t _allowed_deadspace_words;
bool _active;
CompactibleSpace* _space;
ContiguousSpace* _space;
public:
DeadSpacer(CompactibleSpace* space) : _allowed_deadspace_words(0), _space(space) {
DeadSpacer(ContiguousSpace* space) : _allowed_deadspace_words(0), _space(space) {
size_t ratio = _space->allowed_dead_ratio();
_active = ratio > 0;
@ -102,7 +102,6 @@ public:
}
}
bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) {
if (!_active) {
return false;
@ -125,12 +124,10 @@ public:
return false;
}
}
};
#ifdef ASSERT
template <class SpaceType>
inline void CompactibleSpace::verify_up_to_first_dead(SpaceType* space) {
inline void ContiguousSpace::verify_up_to_first_dead(ContiguousSpace* space) {
HeapWord* cur_obj = space->bottom();
if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !cast_to_oop(cur_obj)->is_gc_marked()) {
@ -149,8 +146,7 @@ inline void CompactibleSpace::verify_up_to_first_dead(SpaceType* space) {
}
#endif
template <class SpaceType>
inline void CompactibleSpace::clear_empty_region(SpaceType* space) {
inline void ContiguousSpace::clear_empty_region(ContiguousSpace* space) {
// Let's remember if we were empty before we did the compaction.
bool was_empty = space->used_region().is_empty();
// Reset space after compaction is complete

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,9 +102,9 @@
nonstatic_field(CollectedHeap, _is_gc_active, bool) \
nonstatic_field(CollectedHeap, _total_collections, unsigned int) \
\
nonstatic_field(CompactibleSpace, _compaction_top, HeapWord*) \
nonstatic_field(CompactibleSpace, _first_dead, HeapWord*) \
nonstatic_field(CompactibleSpace, _end_of_live, HeapWord*) \
nonstatic_field(ContiguousSpace, _compaction_top, HeapWord*) \
nonstatic_field(ContiguousSpace, _first_dead, HeapWord*) \
nonstatic_field(ContiguousSpace, _end_of_live, HeapWord*) \
\
nonstatic_field(ContiguousSpace, _top, HeapWord*) \
nonstatic_field(ContiguousSpace, _saved_mark_word, HeapWord*) \
@ -162,8 +162,7 @@
declare_type(GenCollectedHeap, CollectedHeap) \
declare_toplevel_type(Generation) \
declare_toplevel_type(Space) \
declare_type(CompactibleSpace, Space) \
declare_type(ContiguousSpace, CompactibleSpace) \
declare_type(ContiguousSpace, Space) \
declare_toplevel_type(BarrierSet) \
declare_type(ModRefBarrierSet, BarrierSet) \
declare_type(CardTableBarrierSet, ModRefBarrierSet) \

@ -231,7 +231,6 @@ public:
// SpaceClosure is used for iterating over spaces
class Space;
class CompactibleSpace;
class SpaceClosure : public StackObj {
public:

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,7 @@ import sun.jvm.hotspot.utilities.Observable;
import sun.jvm.hotspot.utilities.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.debugger.OopHandle;
import sun.jvm.hotspot.gc.shared.CompactibleSpace;
import sun.jvm.hotspot.gc.shared.ContiguousSpace;
import sun.jvm.hotspot.gc.shared.LiveRegionsProvider;
import sun.jvm.hotspot.memory.MemRegion;
import sun.jvm.hotspot.runtime.VM;
@ -44,7 +44,7 @@ import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegion. Currently we don't actually include
// any of its fields but only iterate over it.
public class HeapRegion extends CompactibleSpace implements LiveRegionsProvider {
public class HeapRegion extends ContiguousSpace implements LiveRegionsProvider {
private static AddressField bottomField;
private static AddressField topField;
private static AddressField endField;

@ -1,58 +0,0 @@
/*
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.gc.shared;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.Observable;
import sun.jvm.hotspot.utilities.Observer;
/** A space that supports compaction operations. This is usually, but
not necessarily, a space that is normally contiguous. But, for
example, a free-list-based space whose normal collection is a
mark-sweep without compaction could still support compaction in
full GC's. */
public abstract class CompactibleSpace extends Space {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("CompactibleSpace");
}
public CompactibleSpace(Address addr) {
super(addr);
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@ import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.Observable;
import sun.jvm.hotspot.utilities.Observer;
public class ContiguousSpace extends CompactibleSpace implements LiveRegionsProvider {
public class ContiguousSpace extends Space implements LiveRegionsProvider {
private static AddressField topField;
static {