8076454: Clean up/move things out of SharedHeap

Reviewed-by: stefank, sjohanss, david
This commit is contained in:
Bengt Rutisson 2015-04-02 16:06:07 +02:00
parent 66fc45f602
commit c3b72f7f5b
13 changed files with 66 additions and 181 deletions

View File

@ -32,6 +32,7 @@
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/resourceArea.hpp"
#include "memory/space.inline.hpp"
#include "memory/universe.inline.hpp"
@ -2442,11 +2443,10 @@ void CompactibleFreeListSpace::verify() const {
{
VerifyAllOopsClosure cl(_collector, this, span, past_remark,
_collector->markBitMap());
CollectedHeap* ch = Universe::heap();
// Iterate over all oops in the heap. Uses the _no_header version
// since we are not interested in following the klass pointers.
ch->oop_iterate_no_header(&cl);
GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
}
if (VerifyObjectStartArray) {

View File

@ -1770,6 +1770,11 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_g1h = this;
_workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
/* are_ConcurrentGC_threads */false);
_workers->initialize_workers();
_allocator = G1Allocator::create_allocator(_g1h);
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
@ -2035,6 +2040,11 @@ size_t G1CollectedHeap::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
void G1CollectedHeap::post_initialize() {
CollectedHeap::post_initialize();
ref_processing_init();
}
void G1CollectedHeap::ref_processing_init() {
// Reference processing in G1 currently works as follows:
//
@ -2071,7 +2081,6 @@ void G1CollectedHeap::ref_processing_init() {
// * Discovery is atomic - i.e. not concurrent.
// * Reference discovery will not need a barrier.
SharedHeap::ref_processing_init();
MemRegion mr = reserved_region();
// Concurrent Mark ref processor
@ -2463,11 +2472,6 @@ public:
}
};
void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
IterateOopClosureRegionClosure blk(cl);
heap_region_iterate(&blk);
}
// Iterates an ObjectClosure over all objects within a HeapRegion.
class IterateObjectClosureRegionClosure: public HeapRegionClosure {
@ -2487,23 +2491,6 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
heap_region_iterate(&blk);
}
// Calls a SpaceClosure on a HeapRegion.
class SpaceClosureRegionClosure: public HeapRegionClosure {
SpaceClosure* _cl;
public:
SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
bool doHeapRegion(HeapRegion* r) {
_cl->do_space(r);
return false;
}
};
void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
SpaceClosureRegionClosure blk(cl);
heap_region_iterate(&blk);
}
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
_hrm.iterate(cl);
}
@ -2640,23 +2627,19 @@ HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) cons
return result;
}
Space* G1CollectedHeap::space_containing(const void* addr) const {
return heap_region_containing(addr);
}
HeapWord* G1CollectedHeap::block_start(const void* addr) const {
Space* sp = space_containing(addr);
return sp->block_start(addr);
HeapRegion* hr = heap_region_containing(addr);
return hr->block_start(addr);
}
size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
Space* sp = space_containing(addr);
return sp->block_size(addr);
HeapRegion* hr = heap_region_containing(addr);
return hr->block_size(addr);
}
bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
Space* sp = space_containing(addr);
return sp->block_is_obj(addr);
HeapRegion* hr = heap_region_containing(addr);
return hr->block_is_obj(addr);
}
bool G1CollectedHeap::supports_tlab_allocation() const {

View File

@ -76,6 +76,7 @@ class G1OldTracer;
class EvacuationFailedInfo;
class nmethod;
class Ticks;
class FlexibleWorkGang;
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
@ -204,6 +205,8 @@ private:
// The one and only G1CollectedHeap, so static functions can find it.
static G1CollectedHeap* _g1h;
FlexibleWorkGang* _workers;
static size_t _humongous_object_threshold_in_words;
// The secondary free list which contains regions that have been
@ -605,6 +608,7 @@ protected:
void enqueue_discovered_references(uint no_of_gc_workers);
public:
FlexibleWorkGang* workers() const { return _workers; }
G1Allocator* allocator() {
return _allocator;
@ -630,8 +634,8 @@ public:
inline AllocationContextStats& allocation_context_stats();
// Do anything common to GC's.
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
void gc_prologue(bool full);
void gc_epilogue(bool full);
inline void set_humongous_is_live(oop obj);
@ -1000,8 +1004,11 @@ public:
// Return the (conservative) maximum heap alignment for any G1 heap
static size_t conservative_max_heap_alignment();
// Does operations required after initialization has been done.
void post_initialize();
// Initialize weak reference processing.
virtual void ref_processing_init();
void ref_processing_init();
// Explicitly import set_par_threads into this scope
using SharedHeap::set_par_threads;
@ -1251,10 +1258,6 @@ public:
// Iteration functions.
// Iterate over all the ref-containing fields of all objects, calling
// "cl.do_oop" on each.
virtual void oop_iterate(ExtendedOopClosure* cl);
// Iterate over all objects, calling "cl.do_object" on each.
virtual void object_iterate(ObjectClosure* cl);
@ -1262,9 +1265,6 @@ public:
object_iterate(cl);
}
// Iterate over all spaces in use in the heap, in ascending address order.
virtual void space_iterate(SpaceClosure* cl);
// Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true".
void heap_region_iterate(HeapRegionClosure* blk) const;
@ -1307,10 +1307,6 @@ public:
HeapRegion* next_compaction_region(const HeapRegion* from) const;
// A CollectedHeap will contain some number of spaces. This finds the
// space containing a given address, or else returns NULL.
virtual Space* space_containing(const void* addr) const;
// Returns the HeapRegion that contains addr. addr must not be NULL.
template <class T>
inline HeapRegion* heap_region_containing_raw(const T addr) const;

View File

@ -78,13 +78,7 @@ jint ParallelScavengeHeap::initialize() {
CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
barrier_set->initialize();
_barrier_set = barrier_set;
oopDesc::set_bs(_barrier_set);
if (_barrier_set == NULL) {
vm_shutdown_during_initialization(
"Could not reserve enough space for barrier set");
return JNI_ENOMEM;
}
set_barrier_set(barrier_set);
// Make up the generations
// Calculate the maximum size that a generation can grow. This
@ -522,10 +516,6 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
VMThread::execute(&op);
}
void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
Unimplemented();
}
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
young_gen()->object_iterate(cl);
old_gen()->object_iterate(cl);

View File

@ -201,7 +201,6 @@ class ParallelScavengeHeap : public CollectedHeap {
// initializing stores to an object at this address.
virtual bool can_elide_initializing_store_barrier(oop new_obj);
void oop_iterate(ExtendedOopClosure* cl);
void object_iterate(ObjectClosure* cl);
void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }

View File

@ -220,6 +220,11 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
}
}
void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
_barrier_set = barrier_set;
oopDesc::set_bs(_barrier_set);
}
void CollectedHeap::pre_initialize() {
// Used for ReduceInitialCardMarks (when COMPILER2 is used);
// otherwise remains unused.

View File

@ -205,7 +205,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// In many heaps, there will be a need to perform some initialization activities
// after the Universe is fully formed, but before general heap allocation is allowed.
// This is the correct place to place such initialization methods.
virtual void post_initialize() = 0;
virtual void post_initialize();
// Stop any onging concurrent work and prepare for exit.
virtual void stop() {}
@ -470,6 +470,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Returns the barrier set for this heap
BarrierSet* barrier_set() { return _barrier_set; }
void set_barrier_set(BarrierSet* barrier_set);
// Returns "true" iff there is a stop-world GC in progress. (I assume
// that it should answer "false" for the concurrent part of a concurrent
@ -497,12 +498,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Return the CollectorPolicy for the heap
virtual CollectorPolicy* collector_policy() const = 0;
void oop_iterate_no_header(OopClosure* cl);
// Iterate over all the ref-containing fields of all objects, calling
// "cl.do_oop" on each.
virtual void oop_iterate(ExtendedOopClosure* cl) = 0;
// Iterate over all objects, calling "cl.do_object" on each.
virtual void object_iterate(ObjectClosure* cl) = 0;

View File

@ -236,12 +236,6 @@ oop CollectedHeap::array_allocate_nozero(KlassHandle klass,
return (oop)obj;
}
inline void CollectedHeap::oop_iterate_no_header(OopClosure* cl) {
NoHeaderExtendedOopClosure no_header_cl(cl);
oop_iterate(&no_header_cl);
}
inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
HeapWord* end,
unsigned short alignment_in_bytes) {

View File

@ -26,6 +26,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/cardTableModRefBS.inline.hpp"
#include "memory/cardTableRS.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/sharedHeap.hpp"
#include "memory/space.hpp"
#include "memory/space.inline.hpp"

View File

@ -85,6 +85,15 @@ GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
_full_collections_completed(0)
{
assert(policy != NULL, "Sanity check");
if (UseConcMarkSweepGC) {
_workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
/* are_ConcurrentGC_threads */false);
_workers->initialize_workers();
} else {
// Serial GC does not use workers.
_workers = NULL;
}
}
jint GenCollectedHeap::initialize() {
@ -166,7 +175,8 @@ char* GenCollectedHeap::allocate(size_t alignment,
}
void GenCollectedHeap::post_initialize() {
SharedHeap::post_initialize();
CollectedHeap::post_initialize();
ref_processing_init();
GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
guarantee(policy->is_generation_policy(), "Illegal policy type");
assert((_young_gen->kind() == Generation::DefNew) ||
@ -185,7 +195,6 @@ void GenCollectedHeap::post_initialize() {
}
void GenCollectedHeap::ref_processing_init() {
SharedHeap::ref_processing_init();
_young_gen->ref_processor_init();
_old_gen->ref_processor_init();
}
@ -560,7 +569,8 @@ HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab)
}
void GenCollectedHeap::set_par_threads(uint t) {
SharedHeap::set_par_threads(t);
assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
CollectedHeap::set_par_threads(t);
set_n_termination(t);
}
@ -924,6 +934,11 @@ bool GenCollectedHeap::is_in_partial_collection(const void* p) {
}
#endif
void GenCollectedHeap::oop_iterate_no_header(OopClosure* cl) {
NoHeaderExtendedOopClosure no_header_cl(cl);
oop_iterate(&no_header_cl);
}
void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
_young_gen->oop_iterate(cl);
_old_gen->oop_iterate(cl);
@ -1093,11 +1108,6 @@ void GenCollectedHeap::generation_iterate(GenClosure* cl,
}
}
void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
_young_gen->space_iterate(cl, true);
_old_gen->space_iterate(cl, true);
}
bool GenCollectedHeap::is_maximal_no_gc() const {
return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
}

View File

@ -31,6 +31,7 @@
#include "memory/sharedHeap.hpp"
class SubTasksDone;
class FlexibleWorkGang;
// A "GenCollectedHeap" is a SharedHeap that uses generational
// collection. It has two generations, young and old.
@ -93,6 +94,8 @@ public:
// In block contents verification, the number of header words to skip
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
FlexibleWorkGang* _workers;
protected:
// Helper functions for allocation
HeapWord* attempt_allocation(size_t size,
@ -125,6 +128,8 @@ protected:
public:
GenCollectedHeap(GenCollectorPolicy *policy);
FlexibleWorkGang* workers() const { return _workers; }
GCStats* gc_stats(int level) const;
// Returns JNI_OK on success
@ -223,6 +228,7 @@ public:
}
// Iteration functions.
void oop_iterate_no_header(OopClosure* cl);
void oop_iterate(ExtendedOopClosure* cl);
void object_iterate(ObjectClosure* cl);
void safe_object_iterate(ObjectClosure* cl);
@ -331,7 +337,6 @@ public:
_old_gen->update_gc_stats(current_level, full);
}
// Override.
bool no_gc_in_progress() { return !is_gc_active(); }
// Override.
@ -363,8 +368,6 @@ public:
// If "old_to_young" determines the order.
void generation_iterate(GenClosure* cl, bool old_to_young);
void space_iterate(SpaceClosure* cl);
// Return "true" if all generations have reached the
// maximal committed limit that they can reach, without a garbage
// collection.
@ -531,8 +534,8 @@ private:
void record_gen_tops_before_GC() PRODUCT_RETURN;
protected:
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
void gc_prologue(bool full);
void gc_epilogue(bool full);
};
#endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP

View File

@ -36,25 +36,8 @@
#include "utilities/workgroup.hpp"
SharedHeap::SharedHeap() :
CollectedHeap(),
_workers(NULL)
{
if (UseConcMarkSweepGC || UseG1GC) {
_workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
/* are_ConcurrentGC_threads */false);
if (_workers == NULL) {
vm_exit_during_initialization("Failed necessary allocation.");
} else {
_workers->initialize_workers();
}
}
}
void SharedHeap::set_par_threads(uint t) {
assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
_n_par_threads = t;
}
CollectedHeap()
{}
SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
: MarkScope(activate), _sh(heap)
@ -69,16 +52,3 @@ SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
SharedHeap::StrongRootsScope::~StrongRootsScope() {
Threads::assert_all_threads_claimed();
}
void SharedHeap::set_barrier_set(BarrierSet* bs) {
_barrier_set = bs;
// Cached barrier set for fast access in oops
oopDesc::set_bs(bs);
}
void SharedHeap::post_initialize() {
CollectedHeap::post_initialize();
ref_processing_init();
}
void SharedHeap::ref_processing_init() {}

View File

@ -26,27 +26,12 @@
#define SHARE_VM_MEMORY_SHAREDHEAP_HPP
#include "gc_interface/collectedHeap.hpp"
#include "memory/generation.hpp"
// A "SharedHeap" is an implementation of a java heap for HotSpot. This
// is an abstract class: there may be many different kinds of heaps. This
// class defines the functions that a heap must implement, and contains
// infrastructure common to all heaps.
class Generation;
class BarrierSet;
class GenRemSet;
class Space;
class SpaceClosure;
class OopClosure;
class OopsInGenClosure;
class ObjectClosure;
class SubTasksDone;
class WorkGang;
class FlexibleWorkGang;
class CollectorPolicy;
class KlassClosure;
// Note on use of FlexibleWorkGang's for GC.
// There are three places where task completion is determined.
// In
@ -101,39 +86,12 @@ class KlassClosure;
class SharedHeap : public CollectedHeap {
friend class VMStructs;
friend class VM_GC_Operation;
friend class VM_CGC_Operation;
protected:
// If we're doing parallel GC, use this gang of threads.
FlexibleWorkGang* _workers;
// Full initialization is done in a concrete subtype's "initialize"
// function.
SharedHeap();
public:
void set_barrier_set(BarrierSet* bs);
// Does operations required after initialization has been done.
virtual void post_initialize();
// Initialization of ("weak") reference processing support
virtual void ref_processing_init();
// Iteration functions.
void oop_iterate(ExtendedOopClosure* cl) = 0;
// Iterate over all spaces in use in the heap, in an undefined order.
virtual void space_iterate(SpaceClosure* cl) = 0;
// A SharedHeap will contain some number of spaces. This finds the
// space whose reserved area contains the given address, or else returns
// NULL.
virtual Space* space_containing(const void* addr) const = 0;
bool no_gc_in_progress() { return !is_gc_active(); }
// Note, the below comment needs to be updated to reflect the changes
// introduced by JDK-8076225. This should be done as part of JDK-8076289.
//
@ -174,25 +132,6 @@ public:
StrongRootsScope(SharedHeap* heap, bool activate = true);
~StrongRootsScope();
};
private:
public:
FlexibleWorkGang* workers() const { return _workers; }
// The functions below are helper functions that a subclass of
// "SharedHeap" can use in the implementation of its virtual
// functions.
public:
// Do anything common to GC's.
virtual void gc_prologue(bool full) = 0;
virtual void gc_epilogue(bool full) = 0;
// Sets the number of parallel threads that will be doing tasks
// (such as process roots) subsequently.
virtual void set_par_threads(uint t);
};
};
#endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP