8076289: Move the StrongRootsScope out of SharedHeap

Reviewed-by: stefank, sjohanss, david
This commit is contained in:
Bengt Rutisson 2015-04-02 16:07:27 +02:00
parent c3b72f7f5b
commit 7c5c5d80b7
14 changed files with 132 additions and 158 deletions

View File

@ -53,6 +53,7 @@
#include "memory/padded.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
#include "memory/strongRootsScope.hpp"
#include "memory/tenuredGeneration.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
@ -3014,10 +3015,10 @@ void CMSCollector::checkpointRootsInitialWork() {
gch->set_par_threads(n_workers);
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
if (n_workers > 1) {
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
workers->run_task(&tsk);
} else {
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
tsk.work(0);
}
gch->set_par_threads(0);
@ -5112,11 +5113,11 @@ void CMSCollector::do_remark_parallel() {
// necessarily be so, since it's possible that we are doing
// ST marking.
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
workers->run_task(&tsk);
} else {
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
tsk.work(0);
}
@ -5184,7 +5185,7 @@ void CMSCollector::do_remark_non_parallel() {
verify_work_stacks_empty();
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
gch->gen_process_roots(_cmsGen->level(),
true, // younger gens as roots

View File

@ -46,6 +46,7 @@
#include "memory/genOopClosures.inline.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
#include "memory/strongRootsScope.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
@ -2650,7 +2651,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
g1h->ensure_parsability(false);
G1CollectedHeap::StrongRootsScope srs(g1h);
StrongRootsScope srs;
// this is remark, so we'll use up all active threads
uint active_workers = g1h->workers()->active_workers();
if (active_workers == 0) {

View File

@ -116,7 +116,7 @@ void G1RootProcessor::wait_until_all_strong_classes_discovered() {
G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) :
_g1h(g1h),
_process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)),
_srs(g1h),
_srs(),
_lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
_n_workers_discovered_strong_classes(0) {}

View File

@ -26,7 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_ROOTPROCESSOR_HPP
#include "memory/allocation.hpp"
#include "memory/sharedHeap.hpp"
#include "memory/strongRootsScope.hpp"
#include "runtime/mutex.hpp"
class CLDClosure;
@ -46,7 +46,7 @@ class SubTasksDone;
class G1RootProcessor : public StackObj {
G1CollectedHeap* _g1h;
SubTasksDone* _process_strong_tasks;
SharedHeap::StrongRootsScope _srs;
StrongRootsScope _srs;
// Used to implement the Thread work barrier.
Monitor _lock;

View File

@ -42,7 +42,7 @@
#include "memory/generation.hpp"
#include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
#include "memory/strongRootsScope.hpp"
#include "memory/space.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
@ -974,10 +974,10 @@ void ParNewGeneration::collect(bool full,
// in the multi-threaded case, but we special-case n=1 here to get
// repeatable measurements of the 1-thread overhead of the parallel code.
if (n_workers > 1) {
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
workers->run_task(&tsk);
} else {
GenCollectedHeap::StrongRootsScope srs(gch);
StrongRootsScope srs;
tsk.work(0);
}
thread_state_set.reset(0 /* Bad value in debug if not reset */,

View File

@ -34,6 +34,7 @@
#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/collectorPolicy.hpp"
#include "memory/strongRootsScope.hpp"
#include "utilities/ostream.hpp"
class AdjoiningGenerations;
@ -237,7 +238,7 @@ class ParallelScavengeHeap : public CollectedHeap {
void gen_mangle_unused_area() PRODUCT_RETURN;
// Call these in sequential code around the processing of strong roots.
class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
class ParStrongRootsScope : public MarkScope {
public:
ParStrongRootsScope();
~ParStrongRootsScope();

View File

@ -39,7 +39,7 @@
#include "memory/genOopClosures.inline.hpp"
#include "memory/generationSpec.hpp"
#include "memory/resourceArea.hpp"
#include "memory/sharedHeap.hpp"
#include "memory/strongRootsScope.hpp"
#include "memory/space.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/biasedLocking.hpp"
@ -596,7 +596,7 @@ void GenCollectedHeap::process_roots(bool activate_scope,
CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure,
CodeBlobClosure* code_roots) {
StrongRootsScope srs(this, activate_scope);
StrongRootsScope srs(activate_scope);
// General roots.
assert(Threads::thread_claim_parity() != 0, "must have called prologue code");

View File

@ -64,16 +64,6 @@ void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
}
}
MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate)
: _active(activate)
{
if (_active) nmethod::oops_do_marking_prologue();
}
MarkingCodeBlobClosure::MarkScope::~MarkScope() {
if (_active) nmethod::oops_do_marking_epilogue();
}
void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL && !nm->test_set_oops_do_mark()) {

View File

@ -288,16 +288,6 @@ class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
// Called for each code blob, but at most once per unique blob.
virtual void do_code_blob(CodeBlob* cb);
class MarkScope : public StackObj {
protected:
bool _active;
public:
MarkScope(bool activate = true);
// = { if (active) nmethod::oops_do_marking_prologue(); }
~MarkScope();
// = { if (active) nmethod::oops_do_marking_epilogue(); }
};
};
// MonitorClosure is used for iterating over monitors in the monitors cache

View File

@ -23,32 +23,9 @@
*/
#include "precompiled.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "memory/sharedHeap.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/java.hpp"
#include "utilities/copy.hpp"
#include "utilities/workgroup.hpp"
SharedHeap::SharedHeap() :
CollectedHeap()
{}
SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
: MarkScope(activate), _sh(heap)
{
if (_active) {
Threads::change_thread_claim_parity();
// Zero the claimed high water mark in the StringTable
StringTable::clear_parallel_claimed_index();
}
}
SharedHeap::StrongRootsScope::~StrongRootsScope() {
Threads::assert_all_threads_claimed();
}

View File

@ -27,62 +27,6 @@
#include "gc_interface/collectedHeap.hpp"
// A "SharedHeap" is an implementation of a java heap for HotSpot. This
// is an abstract class: there may be many different kinds of heaps. This
// class defines the functions that a heap must implement, and contains
// infrastructure common to all heaps.
// Note on use of FlexibleWorkGang's for GC.
// There are three places where task completion is determined.
// In
// 1) ParallelTaskTerminator::offer_termination() where _n_threads
// must be set to the correct value so that count of workers that
// have offered termination will exactly match the number
// working on the task. Tasks such as those derived from GCTask
// use ParallelTaskTerminator's. Tasks that want load balancing
// by work stealing use this method to gauge completion.
// 2) SubTasksDone has a variable _n_threads that is used in
// all_tasks_completed() to determine completion. all_tasks_complete()
// counts the number of tasks that have been done and then reset
// the SubTasksDone so that it can be used again. When the number of
// tasks is set to the number of GC workers, then _n_threads must
// be set to the number of active GC workers. G1RootProcessor and
// GenCollectedHeap have SubTasksDone.
// 3) SequentialSubTasksDone has an _n_threads that is used in
// a way similar to SubTasksDone and has the same dependency on the
// number of active GC workers. CompactibleFreeListSpace and Space
// have SequentialSubTasksDone's.
//
// Examples of using SubTasksDone and SequentialSubTasksDone:
// G1RootProcessor and GenCollectedHeap::process_roots() use
// SubTasksDone* _process_strong_tasks to claim tasks for workers
//
// GenCollectedHeap::gen_process_roots() calls
// rem_set()->younger_refs_iterate()
// to scan the card table and which eventually calls down into
// CardTableModRefBS::par_non_clean_card_iterate_work(). This method
// uses SequentialSubTasksDone* _pst to claim tasks.
// Both SubTasksDone and SequentialSubTasksDone call their method
// all_tasks_completed() to count the number of GC workers that have
// finished their work. That logic is "when all the workers are
// finished the tasks are finished".
//
// The pattern that appears in the code is to set _n_threads
// to a value > 1 before a task that you would like executed in parallel
// and then to set it to 0 after that task has completed. A value of
// 0 is a "special" value in set_n_threads() which translates to
// setting _n_threads to 1.
//
// Some code uses _n_termination to decide if work should be done in
// parallel. The notorious possibly_parallel_oops_do() in threads.cpp
// is an example of such code. Look for variable "is_par" for other
// examples.
//
// The active_workers is not reset to 0 after a parallel phase. It's
// value may be used in later phases and in one instance at least
// (the parallel remark) it has to be used (the parallel remark depends
// on the partitioning done in the previous parallel scavenge).
class SharedHeap : public CollectedHeap {
friend class VMStructs;
@ -90,48 +34,6 @@ protected:
// Full initialization is done in a concrete subtype's "initialize"
// function.
SharedHeap();
public:
// Note, the below comment needs to be updated to reflect the changes
// introduced by JDK-8076225. This should be done as part of JDK-8076289.
//
//Some collectors will perform "process_strong_roots" in parallel.
// Such a call will involve claiming some fine-grained tasks, such as
// scanning of threads. To make this process simpler, we provide the
// "strong_roots_parity()" method. Collectors that start parallel tasks
// whose threads invoke "process_strong_roots" must
// call "change_strong_roots_parity" in sequential code starting such a
// task. (This also means that a parallel thread may only call
// process_strong_roots once.)
//
// For calls to process_roots by sequential code, the parity is
// updated automatically.
//
// The idea is that objects representing fine-grained tasks, such as
// threads, will contain a "parity" field. A task will is claimed in the
// current "process_roots" call only if its parity field is the
// same as the "strong_roots_parity"; task claiming is accomplished by
// updating the parity field to the strong_roots_parity with a CAS.
//
// If the client meats this spec, then strong_roots_parity() will have
// the following properties:
// a) to return a different value than was returned before the last
// call to change_strong_roots_parity, and
// c) to never return a distinguished value (zero) with which such
// task-claiming variables may be initialized, to indicate "never
// claimed".
public:
// Call these in sequential code around process_roots.
// strong_roots_prologue calls change_strong_roots_parity, if
// parallel tasks are enabled.
class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
SharedHeap* _sh;
public:
StrongRootsScope(SharedHeap* heap, bool activate = true);
~StrongRootsScope();
};
};
#endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/stringTable.hpp"
#include "code/nmethod.hpp"
#include "memory/strongRootsScope.hpp"
#include "runtime/thread.hpp"
MarkScope::MarkScope(bool activate) : _active(activate) {
if (_active) {
nmethod::oops_do_marking_prologue();
}
}
MarkScope::~MarkScope() {
if (_active) {
nmethod::oops_do_marking_epilogue();
}
}
StrongRootsScope::StrongRootsScope(bool activate) : MarkScope(activate) {
if (_active) {
Threads::change_thread_claim_parity();
// Zero the claimed high water mark in the StringTable
StringTable::clear_parallel_claimed_index();
}
}
StrongRootsScope::~StrongRootsScope() {
Threads::assert_all_threads_claimed();
}

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_STRONGROOTSSCOPE_HPP
#define SHARE_VM_MEMORY_STRONGROOTSSCOPE_HPP
#include "memory/allocation.hpp"
class MarkScope : public StackObj {
protected:
bool _active;
public:
MarkScope(bool activate = true);
~MarkScope();
};
// Sets up and tears down the required state for parallel root processing.
class StrongRootsScope : public MarkScope {
public:
StrongRootsScope(bool activate = true);
~StrongRootsScope();
};
#endif // SHARE_VM_MEMORY_STRONGROOTSSCOPE_HPP

View File

@ -1886,10 +1886,23 @@ class Threads: AllStatic {
// Does not include JNI_VERSION_1_1
static jboolean is_supported_jni_version(jint version);
// The "thread claim parity" provides a way for threads to be claimed
// by parallel worker tasks.
//
// Each thread contains a a "parity" field. A task will claim the
// thread only if its parity field is the same as the global parity,
// which is updated by calling change_thread_claim_parity().
//
// For this to work change_thread_claim_parity() needs to be called
// exactly once in sequential code before starting parallel tasks
// that should claim threads.
//
// New threads get their parity set to 0 and change_thread_claim_parity()
// never set the global parity to 0.
static int thread_claim_parity() { return _thread_claim_parity; }
static void change_thread_claim_parity();
static void assert_all_threads_claimed() PRODUCT_RETURN;
// Apply "f->do_oop" to all root oops in all threads.
// This version may only be called by sequential code.
static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);