8234502: Merge GenCollectedHeap and SerialHeap

Reviewed-by: ayang, cjplummer
This commit is contained in:
Lei Zaakjyu 2024-01-12 10:56:50 +00:00 committed by Albert Mingkun Yang
parent ed18222365
commit 7dc9dd6fdf
21 changed files with 1537 additions and 1607 deletions

View File

@ -228,7 +228,7 @@ class Generation: public CHeapObj<mtGC> {
// this generation. See comment below.
// This is a generic implementation which can be overridden.
//
// Note: in the current (1.4) implementation, when genCollectedHeap's
// Note: in the current (1.4) implementation, when serialHeap's
// incremental_collection_will_fail flag is set, all allocations are
// slow path (the only fast-path place to allocate is DefNew, which
// will be full if the flag is set).

File diff suppressed because it is too large Load Diff

View File

@ -27,9 +27,17 @@
#include "gc/serial/defNewGeneration.hpp"
#include "gc/serial/tenuredGeneration.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "utilities/growableArray.hpp"
#include "gc/serial/generation.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/preGCValues.hpp"
#include "gc/shared/softRefPolicy.hpp"
class CardTableRS;
class GCPolicyCounters;
class GCMemoryManager;
class MemoryPool;
class OopIterateClosure;
@ -55,7 +63,299 @@ class TenuredGeneration;
// +-----------------+--------+--------+--------+---------------+-------------------+
// |<- committed ->| |<- committed ->|
//
class SerialHeap : public GenCollectedHeap {
class SerialHeap : public CollectedHeap {
friend class Generation;
friend class DefNewGeneration;
friend class TenuredGeneration;
friend class GenMarkSweep;
friend class VM_GenCollectForAllocation;
friend class VM_GenCollectFull;
friend class VM_GC_HeapInspection;
friend class VM_HeapDumper;
friend class HeapInspection;
friend class GCCauseSetter;
friend class VMStructs;
public:
friend class VM_PopulateDumpSharedSpace;
enum GenerationType {
YoungGen,
OldGen
};
private:
DefNewGeneration* _young_gen;
TenuredGeneration* _old_gen;
private:
// The singleton CardTable Remembered Set.
CardTableRS* _rem_set;
SoftRefPolicy _soft_ref_policy;
GCPolicyCounters* _gc_policy_counters;
// Indicates that the most recent previous incremental collection failed.
// The flag is cleared when an action is taken that might clear the
// condition that caused that incremental collection to fail.
bool _incremental_collection_failed;
// In support of ExplicitGCInvokesConcurrent functionality
unsigned int _full_collections_completed;
// Collects the given generation.
void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
bool run_verification, bool clear_soft_refs);
// Reserve aligned space for the heap as needed by the contained generations.
ReservedHeapSpace allocate(size_t alignment);
PreGenGCValues get_pre_gc_values() const;
private:
GCMemoryManager* _young_manager;
GCMemoryManager* _old_manager;
// Helper functions for allocation
HeapWord* attempt_allocation(size_t size,
bool is_tlab,
bool first_only);
// Helper function for two callbacks below.
// Considers collection of the first max_level+1 generations.
void do_collection(bool full,
bool clear_all_soft_refs,
size_t size,
bool is_tlab,
GenerationType max_generation);
// Callback from VM_GenCollectForAllocation operation.
// This function does everything necessary/possible to satisfy an
// allocation request that failed in the youngest generation that should
// have handled it (including collection, expansion, etc.)
HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
// Callback from VM_GenCollectFull operation.
// Perform a full collection of the first max_level+1 generations.
void do_full_collection(bool clear_all_soft_refs) override;
void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation);
// Does the "cause" of GC indicate that
// we absolutely __must__ clear soft refs?
bool must_clear_all_soft_refs();
public:
// Returns JNI_OK on success
jint initialize() override;
virtual CardTableRS* create_rem_set(const MemRegion& reserved_region);
// Does operations required after initialization has been done.
void post_initialize() override;
bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
MemRegion reserved_region() const { return _reserved; }
bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
SoftRefPolicy* soft_ref_policy() override { return &_soft_ref_policy; }
// Performance Counter support
GCPolicyCounters* counters() { return _gc_policy_counters; }
size_t capacity() const override;
size_t used() const override;
// Save the "used_region" for both generations.
void save_used_regions();
size_t max_capacity() const override;
HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override;
// Perform a full collection of the heap; intended for use in implementing
// "System.gc". This implies as full a collection as the CollectedHeap
// supports. Caller does not hold the Heap_lock on entry.
void collect(GCCause::Cause cause) override;
// Returns "TRUE" iff "p" points into the committed areas of the heap.
// The methods is_in() and is_in_youngest() may be expensive to compute
// in general, so, to prevent their inadvertent use in product jvm's, we
// restrict their use to assertion checking or verification only.
bool is_in(const void* p) const override;
// Returns true if p points into the reserved space for the young generation.
// Assumes the young gen address range is less than that of the old gen.
bool is_in_young(const void* p) const;
bool requires_barriers(stackChunkOop obj) const override;
#ifdef ASSERT
bool is_in_partial_collection(const void* p);
#endif
// Optimized nmethod scanning support routines
void register_nmethod(nmethod* nm) override;
void unregister_nmethod(nmethod* nm) override;
void verify_nmethod(nmethod* nm) override;
void prune_scavengable_nmethods();
void prune_unlinked_nmethods();
// Iteration functions.
void object_iterate(ObjectClosure* cl) override;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
// each address in the (reserved) heap is a member of exactly
// one block. The defining characteristic of a block is that it is
// possible to find its size, and thus to progress forward to the next
// block. (Blocks may be of different sizes.) Thus, blocks may
// represent Java objects, or they might be free blocks in a
// free-list-based heap (or subheap), as long as the two kinds are
// distinguishable and the size of each is determinable.
// Returns the address of the start of the "block" that contains the
// address "addr". We say "blocks" instead of "object" since some heaps
// may not pack objects densely; a chunk may either be an object or a
// non-object.
HeapWord* block_start(const void* addr) const;
// Requires "addr" to be the start of a block, and returns "TRUE" iff
// the block is an object. Assumes (and verifies in non-product
// builds) that addr is in the allocated part of the heap and is
// the start of a chunk.
bool block_is_obj(const HeapWord* addr) const;
// Section on TLAB's.
size_t tlab_capacity(Thread* thr) const override;
size_t tlab_used(Thread* thr) const override;
size_t unsafe_max_tlab_alloc(Thread* thr) const override;
HeapWord* allocate_new_tlab(size_t min_size,
size_t requested_size,
size_t* actual_size) override;
// Total number of full collections completed.
unsigned int total_full_collections_completed() {
assert(_full_collections_completed <= _total_full_collections,
"Can't complete more collections than were started");
return _full_collections_completed;
}
// Update above counter, as appropriate, at the end of a stop-world GC cycle
unsigned int update_full_collections_completed();
// Update the gc statistics for each generation.
void update_gc_stats(Generation* current_generation, bool full) {
_old_gen->update_gc_stats(current_generation, full);
}
bool no_gc_in_progress() { return !is_gc_active(); }
void prepare_for_verify() override;
void verify(VerifyOption option) override;
void print_on(outputStream* st) const override;
void gc_threads_do(ThreadClosure* tc) const override;
void print_tracing_info() const override;
// Used to print information about locations in the hs_err file.
bool print_location(outputStream* st, void* addr) const override;
void print_heap_change(const PreGenGCValues& pre_gc_values) const;
// The functions below are helper functions that a subclass of
// "CollectedHeap" can use in the implementation of its virtual
// functions.
class GenClosure : public StackObj {
public:
virtual void do_generation(Generation* gen) = 0;
};
// Apply "cl.do_generation" to all generations in the heap
// If "old_to_young" determines the order.
void generation_iterate(GenClosure* cl, bool old_to_young);
// Return "true" if all generations have reached the
// maximal committed limit that they can reach, without a garbage
// collection.
virtual bool is_maximal_no_gc() const override;
// This function returns the CardTableRS object that allows us to scan
// generations in a fully generational heap.
CardTableRS* rem_set() { return _rem_set; }
// The ScanningOption determines which of the roots
// the closure is applied to:
// "SO_None" does none;
enum ScanningOption {
SO_None = 0x0,
SO_AllCodeCache = 0x8,
SO_ScavengeCodeCache = 0x10
};
protected:
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
public:
// Apply closures on various roots in Young GC or marking/adjust phases of Full GC.
void process_roots(ScanningOption so,
OopClosure* strong_roots,
CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure,
CodeBlobToOopClosure* code_roots);
// Set the saved marks of generations, if that makes sense.
// In particular, if any generation might iterate over the oops
// in other generations, it should call this method.
void save_marks();
// Returns "true" iff no allocations have occurred since the last
// call to "save_marks".
bool no_allocs_since_save_marks();
// Returns true if an incremental collection is likely to fail.
// We optionally consult the young gen, if asked to do so;
// otherwise we base our answer on whether the previous incremental
// collection attempt failed with no corrective action as of yet.
bool incremental_collection_will_fail(bool consult_young) {
// The first disjunct remembers if an incremental collection failed, even
// when we thought (second disjunct) that it would not.
return incremental_collection_failed() ||
(consult_young && !_young_gen->collection_attempt_is_safe());
}
// If a generation bails out of an incremental collection,
// it sets this flag.
bool incremental_collection_failed() const {
return _incremental_collection_failed;
}
void set_incremental_collection_failed() {
_incremental_collection_failed = true;
}
void clear_incremental_collection_failed() {
_incremental_collection_failed = false;
}
private:
// Return true if an allocation should be attempted in the older generation
// if it fails in the younger generation. Return false, otherwise.
bool should_try_older_generation_allocation(size_t word_size) const;
// Try to allocate space by expanding the heap.
HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
HeapWord* mem_allocate_work(size_t size,
bool is_tlab);
// Save the tops of the spaces in all generations
void record_gen_tops_before_GC() PRODUCT_RETURN;
// Return true if we need to perform full collection.
bool should_do_full_collection(size_t size, bool full,
bool is_tlab, GenerationType max_gen) const;
private:
MemoryPool* _eden_pool;
MemoryPool* _survivor_pool;

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/serial/serialVMOperations.hpp"
#include "gc/shared/gcLocker.hpp"
void VM_GenCollectForAllocation::doit() {
SvcGCMarker sgcm(SvcGCMarker::MINOR);
SerialHeap* gch = SerialHeap::heap();
GCCauseSetter gccs(gch, _gc_cause);
_result = gch->satisfy_failed_allocation(_word_size, _tlab);
assert(_result == nullptr || gch->is_in_reserved(_result), "result not in heap");
if (_result == nullptr && GCLocker::is_active_and_needs_gc()) {
set_gc_locked();
}
}
void VM_GenCollectFull::doit() {
SvcGCMarker sgcm(SvcGCMarker::FULL);
SerialHeap* gch = SerialHeap::heap();
GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SERIAL_SERIALVMOPERATIONS_HPP
#define SHARE_GC_SERIAL_SERIALVMOPERATIONS_HPP
#include "gc/shared/gcVMOperations.hpp"
#include "gc/serial/serialHeap.hpp"
class VM_GenCollectForAllocation : public VM_CollectForAllocation {
private:
bool _tlab; // alloc is of a tlab.
public:
VM_GenCollectForAllocation(size_t word_size,
bool tlab,
uint gc_count_before)
: VM_CollectForAllocation(word_size, gc_count_before, GCCause::_allocation_failure),
_tlab(tlab) {
assert(word_size != 0, "An allocation should always be requested with this operation.");
}
~VM_GenCollectForAllocation() {}
virtual VMOp_Type type() const { return VMOp_GenCollectForAllocation; }
virtual void doit();
};
// VM operation to invoke a collection of the heap as a
// SerialHeap heap.
class VM_GenCollectFull: public VM_GC_Operation {
private:
SerialHeap::GenerationType _max_generation;
public:
VM_GenCollectFull(uint gc_count_before,
uint full_gc_count_before,
GCCause::Cause gc_cause,
SerialHeap::GenerationType max_generation)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before,
max_generation != SerialHeap::YoungGen /* full */),
_max_generation(max_generation) { }
~VM_GenCollectFull() {}
virtual VMOp_Type type() const { return VMOp_GenCollectFull; }
virtual void doit();
};
#endif // SHARE_GC_SERIAL_SERIALVMOPERATIONS_HPP

View File

@ -53,12 +53,15 @@
nonstatic_field(SerialBlockOffsetSharedArray, _vs, VirtualSpace) \
nonstatic_field(SerialBlockOffsetSharedArray, _offset_array, u_char*) \
\
nonstatic_field(TenuredSpace, _offsets, SerialBlockOffsetTable)
nonstatic_field(TenuredSpace, _offsets, SerialBlockOffsetTable) \
\
nonstatic_field(SerialHeap, _young_gen, DefNewGeneration*) \
nonstatic_field(SerialHeap, _old_gen, TenuredGeneration*) \
#define VM_TYPES_SERIALGC(declare_type, \
declare_toplevel_type, \
declare_integer_type) \
declare_type(SerialHeap, GenCollectedHeap) \
declare_type(SerialHeap, CollectedHeap) \
declare_type(TenuredGeneration, Generation) \
declare_type(TenuredSpace, ContiguousSpace) \
\

View File

@ -97,7 +97,7 @@ void CardTableBarrierSet::print_on(outputStream* st) const {
// to a newly allocated object along the fast-path. We
// compensate for such elided card-marks as follows:
// (a) Generational, non-concurrent collectors, such as
// GenCollectedHeap(DefNew,Tenured) and
// SerialHeap(DefNew,Tenured) and
// ParallelScavengeHeap(ParallelGC, ParallelOldGC)
// need the card-mark if and only if the region is
// in the old gen, and do not care if the card-mark

View File

@ -84,8 +84,7 @@ public:
//
// CollectedHeap
// GenCollectedHeap
// SerialHeap
// SerialHeap
// G1CollectedHeap
// ParallelScavengeHeap
// ShenandoahHeap

View File

@ -30,7 +30,7 @@
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "interpreter/oopMapCache.hpp"
#include "logging/log.hpp"
#include "memory/classLoaderMetaspace.hpp"
@ -194,28 +194,6 @@ void VM_GC_HeapInspection::doit() {
}
}
void VM_GenCollectForAllocation::doit() {
SvcGCMarker sgcm(SvcGCMarker::MINOR);
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause);
_result = gch->satisfy_failed_allocation(_word_size, _tlab);
assert(_result == nullptr || gch->is_in_reserved(_result), "result not in heap");
if (_result == nullptr && GCLocker::is_active_and_needs_gc()) {
set_gc_locked();
}
}
void VM_GenCollectFull::doit() {
SvcGCMarker sgcm(SvcGCMarker::FULL);
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
}
VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
size_t size,
Metaspace::MetadataType mdtype,

View File

@ -26,7 +26,7 @@
#define SHARE_GC_SHARED_GCVMOPERATIONS_HPP
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/collectorCounters.hpp"
#include "memory/metaspace.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/handles.hpp"
@ -192,40 +192,6 @@ class VM_CollectForAllocation : public VM_GC_Operation {
}
};
class VM_GenCollectForAllocation : public VM_CollectForAllocation {
private:
bool _tlab; // alloc is of a tlab.
public:
VM_GenCollectForAllocation(size_t word_size,
bool tlab,
uint gc_count_before)
: VM_CollectForAllocation(word_size, gc_count_before, GCCause::_allocation_failure),
_tlab(tlab) {
assert(word_size != 0, "An allocation should always be requested with this operation.");
}
~VM_GenCollectForAllocation() {}
virtual VMOp_Type type() const { return VMOp_GenCollectForAllocation; }
virtual void doit();
};
// VM operation to invoke a collection of the heap as a
// GenCollectedHeap heap.
class VM_GenCollectFull: public VM_GC_Operation {
private:
GenCollectedHeap::GenerationType _max_generation;
public:
VM_GenCollectFull(uint gc_count_before,
uint full_gc_count_before,
GCCause::Cause gc_cause,
GenCollectedHeap::GenerationType max_generation)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before,
max_generation != GenCollectedHeap::YoungGen /* full */),
_max_generation(max_generation) { }
~VM_GenCollectFull() {}
virtual VMOp_Type type() const { return VMOp_GenCollectFull; }
virtual void doit();
};
class VM_CollectForMetadataAllocation: public VM_GC_Operation {
private:
MetaWord* _result;

File diff suppressed because it is too large Load Diff

View File

@ -1,346 +0,0 @@
/*
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_GENCOLLECTEDHEAP_HPP
#define SHARE_GC_SHARED_GENCOLLECTEDHEAP_HPP
#include "gc/serial/generation.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/preGCValues.hpp"
#include "gc/shared/softRefPolicy.hpp"
class CardTableRS;
class GCPolicyCounters;
// A "GenCollectedHeap" is a CollectedHeap that uses generational
// collection. It has two generations, young and old.
class GenCollectedHeap : public CollectedHeap {
friend class Generation;
friend class DefNewGeneration;
friend class TenuredGeneration;
friend class GenMarkSweep;
friend class VM_GenCollectForAllocation;
friend class VM_GenCollectFull;
friend class VM_GC_HeapInspection;
friend class VM_HeapDumper;
friend class HeapInspection;
friend class GCCauseSetter;
friend class VMStructs;
public:
friend class VM_PopulateDumpSharedSpace;
enum GenerationType {
YoungGen,
OldGen
};
protected:
Generation* _young_gen;
Generation* _old_gen;
private:
// The singleton CardTable Remembered Set.
CardTableRS* _rem_set;
SoftRefPolicy _soft_ref_policy;
GCPolicyCounters* _gc_policy_counters;
// Indicates that the most recent previous incremental collection failed.
// The flag is cleared when an action is taken that might clear the
// condition that caused that incremental collection to fail.
bool _incremental_collection_failed;
// In support of ExplicitGCInvokesConcurrent functionality
unsigned int _full_collections_completed;
// Collects the given generation.
void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
bool run_verification, bool clear_soft_refs);
// Reserve aligned space for the heap as needed by the contained generations.
ReservedHeapSpace allocate(size_t alignment);
PreGenGCValues get_pre_gc_values() const;
protected:
GCMemoryManager* _young_manager;
GCMemoryManager* _old_manager;
// Helper functions for allocation
HeapWord* attempt_allocation(size_t size,
bool is_tlab,
bool first_only);
// Helper function for two callbacks below.
// Considers collection of the first max_level+1 generations.
void do_collection(bool full,
bool clear_all_soft_refs,
size_t size,
bool is_tlab,
GenerationType max_generation);
// Callback from VM_GenCollectForAllocation operation.
// This function does everything necessary/possible to satisfy an
// allocation request that failed in the youngest generation that should
// have handled it (including collection, expansion, etc.)
HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
// Callback from VM_GenCollectFull operation.
// Perform a full collection of the first max_level+1 generations.
void do_full_collection(bool clear_all_soft_refs) override;
void do_full_collection(bool clear_all_soft_refs, GenerationType max_generation);
// Does the "cause" of GC indicate that
// we absolutely __must__ clear soft refs?
bool must_clear_all_soft_refs();
GenCollectedHeap(Generation::Name young,
Generation::Name old,
const char* policy_counters_name);
public:
// Returns JNI_OK on success
jint initialize() override;
virtual CardTableRS* create_rem_set(const MemRegion& reserved_region);
// Does operations required after initialization has been done.
void post_initialize() override;
Generation* young_gen() const { return _young_gen; }
Generation* old_gen() const { return _old_gen; }
bool is_young_gen(const Generation* gen) const { return gen == _young_gen; }
bool is_old_gen(const Generation* gen) const { return gen == _old_gen; }
MemRegion reserved_region() const { return _reserved; }
bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
SoftRefPolicy* soft_ref_policy() override { return &_soft_ref_policy; }
// Performance Counter support
GCPolicyCounters* counters() { return _gc_policy_counters; }
size_t capacity() const override;
size_t used() const override;
// Save the "used_region" for both generations.
void save_used_regions();
size_t max_capacity() const override;
HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) override;
// Perform a full collection of the heap; intended for use in implementing
// "System.gc". This implies as full a collection as the CollectedHeap
// supports. Caller does not hold the Heap_lock on entry.
void collect(GCCause::Cause cause) override;
// Returns "TRUE" iff "p" points into the committed areas of the heap.
// The methods is_in() and is_in_youngest() may be expensive to compute
// in general, so, to prevent their inadvertent use in product jvm's, we
// restrict their use to assertion checking or verification only.
bool is_in(const void* p) const override;
// Returns true if p points into the reserved space for the young generation.
// Assumes the young gen address range is less than that of the old gen.
bool is_in_young(const void* p) const;
bool requires_barriers(stackChunkOop obj) const override;
#ifdef ASSERT
bool is_in_partial_collection(const void* p);
#endif
// Optimized nmethod scanning support routines
void register_nmethod(nmethod* nm) override;
void unregister_nmethod(nmethod* nm) override;
void verify_nmethod(nmethod* nm) override;
void prune_scavengable_nmethods();
void prune_unlinked_nmethods();
// Iteration functions.
void object_iterate(ObjectClosure* cl) override;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
// each address in the (reserved) heap is a member of exactly
// one block. The defining characteristic of a block is that it is
// possible to find its size, and thus to progress forward to the next
// block. (Blocks may be of different sizes.) Thus, blocks may
// represent Java objects, or they might be free blocks in a
// free-list-based heap (or subheap), as long as the two kinds are
// distinguishable and the size of each is determinable.
// Returns the address of the start of the "block" that contains the
// address "addr". We say "blocks" instead of "object" since some heaps
// may not pack objects densely; a chunk may either be an object or a
// non-object.
HeapWord* block_start(const void* addr) const;
// Requires "addr" to be the start of a block, and returns "TRUE" iff
// the block is an object. Assumes (and verifies in non-product
// builds) that addr is in the allocated part of the heap and is
// the start of a chunk.
bool block_is_obj(const HeapWord* addr) const;
// Section on TLAB's.
size_t tlab_capacity(Thread* thr) const override;
size_t tlab_used(Thread* thr) const override;
size_t unsafe_max_tlab_alloc(Thread* thr) const override;
HeapWord* allocate_new_tlab(size_t min_size,
size_t requested_size,
size_t* actual_size) override;
// Total number of full collections completed.
unsigned int total_full_collections_completed() {
assert(_full_collections_completed <= _total_full_collections,
"Can't complete more collections than were started");
return _full_collections_completed;
}
// Update above counter, as appropriate, at the end of a stop-world GC cycle
unsigned int update_full_collections_completed();
// Update the gc statistics for each generation.
void update_gc_stats(Generation* current_generation, bool full) {
_old_gen->update_gc_stats(current_generation, full);
}
bool no_gc_in_progress() { return !is_gc_active(); }
void prepare_for_verify() override;
void verify(VerifyOption option) override;
void print_on(outputStream* st) const override;
void gc_threads_do(ThreadClosure* tc) const override;
void print_tracing_info() const override;
// Used to print information about locations in the hs_err file.
bool print_location(outputStream* st, void* addr) const override;
void print_heap_change(const PreGenGCValues& pre_gc_values) const;
// The functions below are helper functions that a subclass of
// "CollectedHeap" can use in the implementation of its virtual
// functions.
class GenClosure : public StackObj {
public:
virtual void do_generation(Generation* gen) = 0;
};
// Apply "cl.do_generation" to all generations in the heap
// If "old_to_young" determines the order.
void generation_iterate(GenClosure* cl, bool old_to_young);
// Return "true" if all generations have reached the
// maximal committed limit that they can reach, without a garbage
// collection.
virtual bool is_maximal_no_gc() const override;
// This function returns the CardTableRS object that allows us to scan
// generations in a fully generational heap.
CardTableRS* rem_set() { return _rem_set; }
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
static GenCollectedHeap* heap();
// The ScanningOption determines which of the roots
// the closure is applied to:
// "SO_None" does none;
enum ScanningOption {
SO_None = 0x0,
SO_AllCodeCache = 0x8,
SO_ScavengeCodeCache = 0x10
};
protected:
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
public:
// Apply closures on various roots in Young GC or marking/adjust phases of Full GC.
void process_roots(ScanningOption so,
OopClosure* strong_roots,
CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure,
CodeBlobToOopClosure* code_roots);
// Set the saved marks of generations, if that makes sense.
// In particular, if any generation might iterate over the oops
// in other generations, it should call this method.
void save_marks();
// Returns "true" iff no allocations have occurred since the last
// call to "save_marks".
bool no_allocs_since_save_marks();
// Returns true if an incremental collection is likely to fail.
// We optionally consult the young gen, if asked to do so;
// otherwise we base our answer on whether the previous incremental
// collection attempt failed with no corrective action as of yet.
bool incremental_collection_will_fail(bool consult_young) {
// The first disjunct remembers if an incremental collection failed, even
// when we thought (second disjunct) that it would not.
return incremental_collection_failed() ||
(consult_young && !_young_gen->collection_attempt_is_safe());
}
// If a generation bails out of an incremental collection,
// it sets this flag.
bool incremental_collection_failed() const {
return _incremental_collection_failed;
}
void set_incremental_collection_failed() {
_incremental_collection_failed = true;
}
void clear_incremental_collection_failed() {
_incremental_collection_failed = false;
}
private:
// Return true if an allocation should be attempted in the older generation
// if it fails in the younger generation. Return false, otherwise.
bool should_try_older_generation_allocation(size_t word_size) const;
// Try to allocate space by expanding the heap.
HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
HeapWord* mem_allocate_work(size_t size,
bool is_tlab);
// Save the tops of the spaces in all generations
void record_gen_tops_before_GC() PRODUCT_RETURN;
// Return true if we need to perform full collection.
bool should_do_full_collection(size_t size, bool full,
bool is_tlab, GenerationType max_gen) const;
};
#endif // SHARE_GC_SHARED_GENCOLLECTEDHEAP_HPP

View File

@ -26,7 +26,6 @@
#include "classfile/vmClasses.hpp"
#include "classfile/vmSymbols.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/space.hpp"
#include "gc/shared/space.inline.hpp"
#include "gc/shared/spaceDecorator.inline.hpp"

View File

@ -65,7 +65,7 @@ class SpaceDecorator: public AllStatic {
// area and provides the methods for doing the piece meal mangling.
// Methods for doing spaces and full checking of the mangling are
// included. The full checking is done if DEBUG_MANGLING is defined.
// GenSpaceMangler is used with the GenCollectedHeap collectors and
// GenSpaceMangler is used with the SerialHeap collectors and
// MutableSpaceMangler is used with the ParallelScavengeHeap collectors.
// These subclasses abstract the differences in the types of spaces used
// by each heap.
@ -122,7 +122,7 @@ class SpaceMangler: public CHeapObj<mtGC> {
class ContiguousSpace;
class MutableSpace;
// For use with GenCollectedHeap's
// For use with SerialHeap's
class GenSpaceMangler: public SpaceMangler {
ContiguousSpace* _sp;

View File

@ -28,7 +28,6 @@
#include "gc/shared/ageTable.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/space.hpp"
#if INCLUDE_EPSILONGC
@ -109,9 +108,6 @@
nonstatic_field(Generation::StatRecord, invocations, int) \
nonstatic_field(Generation::StatRecord, accumulated_time, elapsedTimer) \
\
nonstatic_field(GenCollectedHeap, _young_gen, Generation*) \
nonstatic_field(GenCollectedHeap, _old_gen, Generation*) \
\
nonstatic_field(MemRegion, _start, HeapWord*) \
nonstatic_field(MemRegion, _word_size, size_t) \
\
@ -146,7 +142,6 @@
/******************************************/ \
\
declare_toplevel_type(CollectedHeap) \
declare_type(GenCollectedHeap, CollectedHeap) \
declare_toplevel_type(Generation) \
declare_toplevel_type(Space) \
declare_type(ContiguousSpace, Space) \
@ -175,9 +170,6 @@
declare_toplevel_type(CardTableBarrierSet**) \
declare_toplevel_type(CollectedHeap*) \
declare_toplevel_type(ContiguousSpace*) \
declare_toplevel_type(DefNewGeneration*) \
declare_toplevel_type(GenCollectedHeap*) \
declare_toplevel_type(Generation*) \
declare_toplevel_type(HeapWord*) \
declare_toplevel_type(HeapWord* volatile) \
declare_toplevel_type(MemRegion*) \

View File

@ -36,6 +36,7 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.gc.epsilon.*;
import sun.jvm.hotspot.gc.parallel.*;
import sun.jvm.hotspot.gc.shared.*;
import sun.jvm.hotspot.gc.serial.*;
import sun.jvm.hotspot.gc.shenandoah.*;
import sun.jvm.hotspot.gc.g1.*;
import sun.jvm.hotspot.gc.x.*;
@ -1076,8 +1077,8 @@ public class HSDB implements ObjectHistogramPanel.Listener, SAListener {
CollectedHeap collHeap = VM.getVM().getUniverse().heap();
boolean bad = true;
anno = "BAD OOP";
if (collHeap instanceof GenCollectedHeap) {
GenCollectedHeap heap = (GenCollectedHeap) collHeap;
if (collHeap instanceof SerialHeap) {
SerialHeap heap = (SerialHeap) collHeap;
for (int i = 0; i < heap.nGens(); i++) {
if (heap.getGen(i).isIn(handle)) {
if (i == 0) {

View File

@ -24,11 +24,15 @@
package sun.jvm.hotspot.gc.serial;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.gc.shared.GenCollectedHeap;
import sun.jvm.hotspot.gc.shared.CollectedHeapName;
import java.io.*;
public class SerialHeap extends GenCollectedHeap {
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.gc.shared.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class SerialHeap extends CollectedHeap {
public SerialHeap(Address addr) {
super(addr);
@ -37,4 +41,91 @@ public class SerialHeap extends GenCollectedHeap {
public CollectedHeapName kind() {
return CollectedHeapName.SERIAL;
}
private static AddressField youngGenField;
private static AddressField oldGenField;
private static GenerationFactory genFactory;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("SerialHeap");
youngGenField = type.getAddressField("_young_gen");
oldGenField = type.getAddressField("_old_gen");
genFactory = new GenerationFactory();
}
public int nGens() {
return 2; // Young + Old
}
public Generation getGen(int i) {
if (Assert.ASSERTS_ENABLED) {
Assert.that((i == 0) || (i == 1), "Index " + i +
" out of range (should be 0 or 1)");
}
switch (i) {
case 0:
return genFactory.newObject(youngGenField.getValue(addr));
case 1:
return genFactory.newObject(oldGenField.getValue(addr));
default:
// no generation for i, and assertions disabled.
return null;
}
}
public boolean isIn(Address a) {
for (int i = 0; i < nGens(); i++) {
Generation gen = getGen(i);
if (gen.isIn(a)) {
return true;
}
}
return false;
}
public long capacity() {
long capacity = 0;
for (int i = 0; i < nGens(); i++) {
capacity += getGen(i).capacity();
}
return capacity;
}
public long used() {
long used = 0;
for (int i = 0; i < nGens(); i++) {
used += getGen(i).used();
}
return used;
}
public void liveRegionsIterate(LiveRegionsClosure closure) {
// Run through all generations, obtaining bottom-top pairs.
for (int i = 0; i < nGens(); i++) {
Generation gen = getGen(i);
gen.liveRegionsIterate(closure);
}
}
public void printOn(PrintStream tty) {
for (int i = 0; i < nGens(); i++) {
tty.print("Gen " + i + ": ");
getGen(i).printOn(tty);
tty.println("Invocations: " + getGen(i).invocations());
tty.println();
}
}
}

View File

@ -1,129 +0,0 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.gc.shared;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.gc.shared.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
import sun.jvm.hotspot.utilities.Observable;
import sun.jvm.hotspot.utilities.Observer;
public abstract class GenCollectedHeap extends CollectedHeap {
private static AddressField youngGenField;
private static AddressField oldGenField;
private static GenerationFactory genFactory;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("GenCollectedHeap");
youngGenField = type.getAddressField("_young_gen");
oldGenField = type.getAddressField("_old_gen");
genFactory = new GenerationFactory();
}
public GenCollectedHeap(Address addr) {
super(addr);
}
public int nGens() {
return 2; // Young + Old
}
public Generation getGen(int i) {
if (Assert.ASSERTS_ENABLED) {
Assert.that((i == 0) || (i == 1), "Index " + i +
" out of range (should be 0 or 1)");
}
switch (i) {
case 0:
return genFactory.newObject(youngGenField.getValue(addr));
case 1:
return genFactory.newObject(oldGenField.getValue(addr));
default:
// no generation for i, and assertions disabled.
return null;
}
}
public boolean isIn(Address a) {
for (int i = 0; i < nGens(); i++) {
Generation gen = getGen(i);
if (gen.isIn(a)) {
return true;
}
}
return false;
}
public long capacity() {
long capacity = 0;
for (int i = 0; i < nGens(); i++) {
capacity += getGen(i).capacity();
}
return capacity;
}
public long used() {
long used = 0;
for (int i = 0; i < nGens(); i++) {
used += getGen(i).used();
}
return used;
}
public void liveRegionsIterate(LiveRegionsClosure closure) {
// Run through all generations, obtaining bottom-top pairs.
for (int i = 0; i < nGens(); i++) {
Generation gen = getGen(i);
gen.liveRegionsIterate(closure);
}
}
public void printOn(PrintStream tty) {
for (int i = 0; i < nGens(); i++) {
tty.print("Gen " + i + ": ");
getGen(i).printOn(tty);
tty.println("Invocations: " + getGen(i).invocations());
tty.println();
}
}
}

View File

@ -93,10 +93,10 @@ public class HeapSummary extends Tool {
System.out.println();
System.out.println("Heap Usage:");
if (heap instanceof GenCollectedHeap) {
GenCollectedHeap genHeap = (GenCollectedHeap) heap;
for (int n = 0; n < genHeap.nGens(); n++) {
Generation gen = genHeap.getGen(n);
if (heap instanceof SerialHeap) {
SerialHeap sh = (SerialHeap) heap;
for (int n = 0; n < sh.nGens(); n++) {
Generation gen = sh.getGen(n);
if (gen instanceof DefNewGeneration) {
System.out.println("New Generation (Eden + 1 Survivor Space):");
printGen(gen);

View File

@ -27,6 +27,7 @@ package sun.jvm.hotspot.utilities;
import sun.jvm.hotspot.code.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
import sun.jvm.hotspot.gc.serial.*;
import sun.jvm.hotspot.gc.shared.*;
import sun.jvm.hotspot.interpreter.*;
import sun.jvm.hotspot.memory.*;
@ -84,12 +85,12 @@ public class PointerFinder {
// Check if address is in the java heap.
CollectedHeap heap = VM.getVM().getUniverse().heap();
if (heap instanceof GenCollectedHeap) {
GenCollectedHeap genheap = (GenCollectedHeap) heap;
if (genheap.isIn(a)) {
if (heap instanceof SerialHeap) {
SerialHeap sh = (SerialHeap) heap;
if (sh.isIn(a)) {
loc.heap = heap;
for (int i = 0; i < genheap.nGens(); i++) {
Generation g = genheap.getGen(i);
for (int i = 0; i < sh.nGens(); i++) {
Generation g = sh.getGen(i);
if (g.isIn(a)) {
loc.gen = g;
break;

View File

@ -28,6 +28,7 @@ import java.io.*;
import sun.jvm.hotspot.code.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.cdbg.*;
import sun.jvm.hotspot.gc.serial.*;
import sun.jvm.hotspot.gc.shared.*;
import sun.jvm.hotspot.interpreter.*;
import sun.jvm.hotspot.memory.*;
@ -111,11 +112,11 @@ public class PointerLocation {
}
public boolean isInNewGen() {
return ((gen != null) && (gen.equals(((GenCollectedHeap)heap).getGen(0))));
return ((gen != null) && (gen.equals(((SerialHeap)heap).getGen(0))));
}
public boolean isInOldGen() {
return ((gen != null) && (gen.equals(((GenCollectedHeap)heap).getGen(1))));
return ((gen != null) && (gen.equals(((SerialHeap)heap).getGen(1))));
}
public boolean inOtherGen() {