Merge
This commit is contained in:
commit
7ec8aadd33
hotspot
agent/src/share/classes/sun/jvm/hotspot
gc_implementation/g1
gc_interface
memory
tools
make/solaris/makefiles
src/share/vm
gc_implementation
concurrentMarkSweep
adaptiveFreeList.cppcmsOopClosures.cppcompactibleFreeListSpace.cppconcurrentMarkSweepGeneration.cpppromotionInfo.cpp
g1
concurrentMark.cppconcurrentMark.hppconcurrentMark.inline.hppg1CollectedHeap.cppg1CollectedHeap.hppg1CollectorPolicy.cppg1MarkSweep.cppg1OopClosures.cppg1PageBasedVirtualSpace.cppg1PageBasedVirtualSpace.hppg1RegionToSpaceMapper.cppg1RegionToSpaceMapper.hppg1RootProcessor.cppg1RootProcessor.hppheapRegionManager.cppheapRegionSet.cppsatbQueue.cppvmStructs_g1.hppvm_operations_g1.cpp
parNew
parallelScavenge
parallelScavengeHeap.cppparallelScavengeHeap.hpppcTasks.cpppsCompactionManager.cpppsCompactionManager.inline.hpppsParallelCompact.cpppsParallelCompact.hpppsParallelCompact.inline.hpppsPromotionManager.cpppsPromotionManager.inline.hpp
shared
gc_interface
memory
cardTableModRefBS.cppcardTableRS.cppdefNewGeneration.cppfreeList.cppgcLocker.cppgenCollectedHeap.cppgenCollectedHeap.hppgenOopClosures.cppgenOopClosures.inline.hppiterator.cppiterator.hppiterator.inline.hppsharedHeap.cppsharedHeap.hppspace.cppstrongRootsScope.cppstrongRootsScope.hpptenuredGeneration.cpp
oops
instanceClassLoaderKlass.cppinstanceClassLoaderKlass.hppinstanceClassLoaderKlass.inline.hppinstanceKlass.cppinstanceKlass.hppinstanceKlass.inline.hppinstanceMirrorKlass.cppinstanceMirrorKlass.hppinstanceMirrorKlass.inline.hppinstanceRefKlass.cppinstanceRefKlass.hppinstanceRefKlass.inline.hppklass.cppklass.hppklassPS.hppobjArrayKlass.cppobjArrayKlass.hppobjArrayKlass.inline.hppoop.hppoop.inline.hppoop.pcgc.inline.hpptypeArrayKlass.cpptypeArrayKlass.hpptypeArrayKlass.inline.hpp
precompiled
prims
runtime
@ -29,9 +29,9 @@ import java.util.Observable;
|
||||
import java.util.Observer;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.gc_interface.CollectedHeap;
|
||||
import sun.jvm.hotspot.gc_interface.CollectedHeapName;
|
||||
import sun.jvm.hotspot.memory.MemRegion;
|
||||
import sun.jvm.hotspot.memory.SharedHeap;
|
||||
import sun.jvm.hotspot.memory.SpaceClosure;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.VMObjectFactory;
|
||||
@ -41,7 +41,7 @@ import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
// Mirror class for G1CollectedHeap.
|
||||
|
||||
public class G1CollectedHeap extends SharedHeap {
|
||||
public class G1CollectedHeap extends CollectedHeap {
|
||||
// HeapRegionManager _hrm;
|
||||
static private long hrmFieldOffset;
|
||||
// MemRegion _g1_reserved;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,7 +32,7 @@ import sun.jvm.hotspot.memory.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
|
||||
public class CollectedHeap extends VMObject {
|
||||
public abstract class CollectedHeap extends VMObject {
|
||||
private static long reservedFieldOffset;
|
||||
|
||||
static {
|
||||
@ -73,9 +73,7 @@ public class CollectedHeap extends VMObject {
|
||||
return reservedRegion().contains(a);
|
||||
}
|
||||
|
||||
public CollectedHeapName kind() {
|
||||
return CollectedHeapName.ABSTRACT;
|
||||
}
|
||||
public abstract CollectedHeapName kind();
|
||||
|
||||
public void print() { printOn(System.out); }
|
||||
public void printOn(PrintStream tty) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,8 +31,6 @@ public class CollectedHeapName {
|
||||
|
||||
private CollectedHeapName(String name) { this.name = name; }
|
||||
|
||||
public static final CollectedHeapName ABSTRACT = new CollectedHeapName("abstract");
|
||||
public static final CollectedHeapName SHARED_HEAP = new CollectedHeapName("SharedHeap");
|
||||
public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap");
|
||||
public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap");
|
||||
public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap");
|
||||
|
@ -33,8 +33,7 @@ import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
public class GenCollectedHeap extends SharedHeap {
|
||||
private static CIntegerField nGensField;
|
||||
public class GenCollectedHeap extends CollectedHeap {
|
||||
private static AddressField youngGenField;
|
||||
private static AddressField oldGenField;
|
||||
|
||||
@ -54,7 +53,6 @@ public class GenCollectedHeap extends SharedHeap {
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("GenCollectedHeap");
|
||||
|
||||
nGensField = type.getCIntegerField("_n_gens");
|
||||
youngGenField = type.getAddressField("_young_gen");
|
||||
oldGenField = type.getAddressField("_old_gen");
|
||||
|
||||
@ -70,7 +68,7 @@ public class GenCollectedHeap extends SharedHeap {
|
||||
}
|
||||
|
||||
public int nGens() {
|
||||
return (int) nGensField.getValue(addr);
|
||||
return 2; // Young + Old
|
||||
}
|
||||
|
||||
public Generation getGen(int i) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -112,11 +112,7 @@ public class Universe {
|
||||
return "";
|
||||
}
|
||||
public CollectedHeap heap() {
|
||||
try {
|
||||
return (CollectedHeap) heapConstructor.instantiateWrapperFor(collectedHeapField.getValue());
|
||||
} catch (WrongTypeException e) {
|
||||
return new CollectedHeap(collectedHeapField.getValue());
|
||||
}
|
||||
return (CollectedHeap) heapConstructor.instantiateWrapperFor(collectedHeapField.getValue());
|
||||
}
|
||||
|
||||
public static long getNarrowOopBase() {
|
||||
|
@ -81,53 +81,48 @@ public class HeapSummary extends Tool {
|
||||
System.out.println();
|
||||
System.out.println("Heap Usage:");
|
||||
|
||||
if (heap instanceof SharedHeap) {
|
||||
SharedHeap sharedHeap = (SharedHeap) heap;
|
||||
if (sharedHeap instanceof GenCollectedHeap) {
|
||||
GenCollectedHeap genHeap = (GenCollectedHeap) sharedHeap;
|
||||
for (int n = 0; n < genHeap.nGens(); n++) {
|
||||
Generation gen = genHeap.getGen(n);
|
||||
if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
|
||||
System.out.println("New Generation (Eden + 1 Survivor Space):");
|
||||
printGen(gen);
|
||||
if (heap instanceof GenCollectedHeap) {
|
||||
GenCollectedHeap genHeap = (GenCollectedHeap) heap;
|
||||
for (int n = 0; n < genHeap.nGens(); n++) {
|
||||
Generation gen = genHeap.getGen(n);
|
||||
if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
|
||||
System.out.println("New Generation (Eden + 1 Survivor Space):");
|
||||
printGen(gen);
|
||||
|
||||
ContiguousSpace eden = ((DefNewGeneration)gen).eden();
|
||||
System.out.println("Eden Space:");
|
||||
printSpace(eden);
|
||||
ContiguousSpace eden = ((DefNewGeneration)gen).eden();
|
||||
System.out.println("Eden Space:");
|
||||
printSpace(eden);
|
||||
|
||||
ContiguousSpace from = ((DefNewGeneration)gen).from();
|
||||
System.out.println("From Space:");
|
||||
printSpace(from);
|
||||
ContiguousSpace from = ((DefNewGeneration)gen).from();
|
||||
System.out.println("From Space:");
|
||||
printSpace(from);
|
||||
|
||||
ContiguousSpace to = ((DefNewGeneration)gen).to();
|
||||
System.out.println("To Space:");
|
||||
printSpace(to);
|
||||
} else {
|
||||
System.out.println(gen.name() + ":");
|
||||
printGen(gen);
|
||||
}
|
||||
ContiguousSpace to = ((DefNewGeneration)gen).to();
|
||||
System.out.println("To Space:");
|
||||
printSpace(to);
|
||||
} else {
|
||||
System.out.println(gen.name() + ":");
|
||||
printGen(gen);
|
||||
}
|
||||
} else if (sharedHeap instanceof G1CollectedHeap) {
|
||||
G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
|
||||
G1MonitoringSupport g1mm = g1h.g1mm();
|
||||
long edenRegionNum = g1mm.edenRegionNum();
|
||||
long survivorRegionNum = g1mm.survivorRegionNum();
|
||||
HeapRegionSetBase oldSet = g1h.oldSet();
|
||||
HeapRegionSetBase humongousSet = g1h.humongousSet();
|
||||
long oldRegionNum = oldSet.count().length()
|
||||
+ humongousSet.count().capacity() / HeapRegion.grainBytes();
|
||||
printG1Space("G1 Heap:", g1h.n_regions(),
|
||||
g1h.used(), g1h.capacity());
|
||||
System.out.println("G1 Young Generation:");
|
||||
printG1Space("Eden Space:", edenRegionNum,
|
||||
g1mm.edenUsed(), g1mm.edenCommitted());
|
||||
printG1Space("Survivor Space:", survivorRegionNum,
|
||||
g1mm.survivorUsed(), g1mm.survivorCommitted());
|
||||
printG1Space("G1 Old Generation:", oldRegionNum,
|
||||
g1mm.oldUsed(), g1mm.oldCommitted());
|
||||
} else {
|
||||
throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
|
||||
}
|
||||
} else if (heap instanceof G1CollectedHeap) {
|
||||
G1CollectedHeap g1h = (G1CollectedHeap) heap;
|
||||
G1MonitoringSupport g1mm = g1h.g1mm();
|
||||
long edenRegionNum = g1mm.edenRegionNum();
|
||||
long survivorRegionNum = g1mm.survivorRegionNum();
|
||||
HeapRegionSetBase oldSet = g1h.oldSet();
|
||||
HeapRegionSetBase humongousSet = g1h.humongousSet();
|
||||
long oldRegionNum = oldSet.count().length()
|
||||
+ humongousSet.count().capacity() / HeapRegion.grainBytes();
|
||||
printG1Space("G1 Heap:", g1h.n_regions(),
|
||||
g1h.used(), g1h.capacity());
|
||||
System.out.println("G1 Young Generation:");
|
||||
printG1Space("Eden Space:", edenRegionNum,
|
||||
g1mm.edenUsed(), g1mm.edenCommitted());
|
||||
printG1Space("Survivor Space:", survivorRegionNum,
|
||||
g1mm.survivorUsed(), g1mm.survivorCommitted());
|
||||
printG1Space("G1 Old Generation:", oldRegionNum,
|
||||
g1mm.oldUsed(), g1mm.oldCommitted());
|
||||
} else if (heap instanceof ParallelScavengeHeap) {
|
||||
ParallelScavengeHeap psh = (ParallelScavengeHeap) heap;
|
||||
PSYoungGen youngGen = psh.youngGen();
|
||||
|
@ -37,6 +37,11 @@ ifndef USE_GCC
|
||||
OPT_CFLAGS/ciEnv.o = $(OPT_CFLAGS) -xinline=no%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_
|
||||
endif
|
||||
|
||||
# Need extra inlining to get oop_ps_push_contents functions to perform well enough.
|
||||
ifndef USE_GCC
|
||||
OPT_CFLAGS/psPromotionManager.o = $(OPT_CFLAGS) -W2,-Ainline:inc=1000
|
||||
endif
|
||||
|
||||
# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
|
||||
ifeq ("${Platform_compiler}", "sparcWorks")
|
||||
|
||||
|
@ -25,8 +25,8 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
|
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/specialized_oop_closures.hpp"
|
||||
|
||||
// Generate CMS specialized oop_oop_iterate functions.
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
|
@ -32,6 +32,7 @@
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/blockOffsetTable.inline.hpp"
|
||||
#include "memory/genCollectedHeap.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/space.inline.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
@ -673,10 +674,10 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,
|
||||
HeapWord* bottom, \
|
||||
HeapWord* top, \
|
||||
ClosureType* cl) { \
|
||||
bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
|
||||
bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0; \
|
||||
if (is_par) { \
|
||||
assert(SharedHeap::heap()->n_par_threads() == \
|
||||
SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
|
||||
assert(GenCollectedHeap::heap()->n_par_threads() == \
|
||||
GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
|
||||
walk_mem_region_with_cl_par(mr, bottom, top, cl); \
|
||||
} else { \
|
||||
walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
|
||||
@ -1907,11 +1908,11 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
|
||||
assert(chunk->is_free() && ffc->is_free(), "Error");
|
||||
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
|
||||
if (rem_sz < SmallForDictionary) {
|
||||
bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
|
||||
bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
|
||||
if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
|
||||
assert(!is_par ||
|
||||
(SharedHeap::heap()->n_par_threads() ==
|
||||
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
|
||||
(GenCollectedHeap::heap()->n_par_threads() ==
|
||||
GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
|
||||
returnChunkToFreeList(ffc);
|
||||
split(size, rem_sz);
|
||||
if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
|
||||
@ -1982,7 +1983,7 @@ void CompactibleFreeListSpace::save_marks() {
|
||||
|
||||
bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
|
||||
assert(_promoInfo.tracking(), "No preceding save_marks?");
|
||||
assert(SharedHeap::heap()->n_par_threads() == 0,
|
||||
assert(GenCollectedHeap::heap()->n_par_threads() == 0,
|
||||
"Shouldn't be called if using parallel gc.");
|
||||
return _promoInfo.noPromotions();
|
||||
}
|
||||
@ -1991,7 +1992,7 @@ bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
|
||||
\
|
||||
void CompactibleFreeListSpace:: \
|
||||
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
|
||||
assert(SharedHeap::heap()->n_par_threads() == 0, \
|
||||
assert(GenCollectedHeap::heap()->n_par_threads() == 0, \
|
||||
"Shouldn't be called (yet) during parallel part of gc."); \
|
||||
_promoInfo.promoted_oops_iterate##nv_suffix(blk); \
|
||||
/* \
|
||||
@ -2442,11 +2443,10 @@ void CompactibleFreeListSpace::verify() const {
|
||||
{
|
||||
VerifyAllOopsClosure cl(_collector, this, span, past_remark,
|
||||
_collector->markBitMap());
|
||||
CollectedHeap* ch = Universe::heap();
|
||||
|
||||
// Iterate over all oops in the heap. Uses the _no_header version
|
||||
// since we are not interested in following the klass pointers.
|
||||
ch->oop_iterate_no_header(&cl);
|
||||
GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
|
||||
}
|
||||
|
||||
if (VerifyObjectStartArray) {
|
||||
|
@ -53,6 +53,7 @@
|
||||
#include "memory/padded.hpp"
|
||||
#include "memory/referencePolicy.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/strongRootsScope.hpp"
|
||||
#include "memory/tenuredGeneration.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
@ -208,10 +209,6 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||
use_adaptive_freelists,
|
||||
dictionaryChoice);
|
||||
NOT_PRODUCT(debug_cms_space = _cmsSpace;)
|
||||
if (_cmsSpace == NULL) {
|
||||
vm_exit_during_initialization(
|
||||
"CompactibleFreeListSpace allocation failure");
|
||||
}
|
||||
_cmsSpace->_gen = this;
|
||||
|
||||
_gc_stats = new CMSGCStats();
|
||||
@ -230,14 +227,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||
typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
|
||||
_par_gc_thread_states =
|
||||
NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
|
||||
if (_par_gc_thread_states == NULL) {
|
||||
vm_exit_during_initialization("Could not allocate par gc structs");
|
||||
}
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
_par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
|
||||
if (_par_gc_thread_states[i] == NULL) {
|
||||
vm_exit_during_initialization("Could not allocate par gc structs");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_par_gc_thread_states = NULL;
|
||||
@ -586,11 +577,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
return;
|
||||
}
|
||||
_hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
|
||||
if (_hash_seed == NULL) {
|
||||
warning("_hash_seed array allocation failure");
|
||||
return;
|
||||
}
|
||||
|
||||
typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
|
||||
@ -633,12 +619,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
_eden_chunk_index = 0;
|
||||
_eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
|
||||
_eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
|
||||
if (_eden_chunk_array == NULL) {
|
||||
_eden_chunk_capacity = 0;
|
||||
warning("GC/CMS: _eden_chunk_array allocation failure");
|
||||
}
|
||||
}
|
||||
assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
|
||||
|
||||
// Support for parallelizing survivor space rescan
|
||||
if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
|
||||
@ -648,52 +629,15 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
_survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
|
||||
_survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
|
||||
_cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
|
||||
if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
|
||||
|| _cursor == NULL) {
|
||||
warning("Failed to allocate survivor plab/chunk array");
|
||||
if (_survivor_plab_array != NULL) {
|
||||
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
|
||||
_survivor_plab_array = NULL;
|
||||
}
|
||||
if (_survivor_chunk_array != NULL) {
|
||||
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
|
||||
_survivor_chunk_array = NULL;
|
||||
}
|
||||
if (_cursor != NULL) {
|
||||
FREE_C_HEAP_ARRAY(size_t, _cursor);
|
||||
_cursor = NULL;
|
||||
}
|
||||
} else {
|
||||
_survivor_chunk_capacity = 2*max_plab_samples;
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
|
||||
if (vec == NULL) {
|
||||
warning("Failed to allocate survivor plab array");
|
||||
for (int j = i; j > 0; j--) {
|
||||
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array());
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array);
|
||||
FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array);
|
||||
_survivor_plab_array = NULL;
|
||||
_survivor_chunk_array = NULL;
|
||||
_survivor_chunk_capacity = 0;
|
||||
break;
|
||||
} else {
|
||||
ChunkArray* cur =
|
||||
::new (&_survivor_plab_array[i]) ChunkArray(vec,
|
||||
max_plab_samples);
|
||||
assert(cur->end() == 0, "Should be 0");
|
||||
assert(cur->array() == vec, "Should be vec");
|
||||
assert(cur->capacity() == max_plab_samples, "Error");
|
||||
}
|
||||
}
|
||||
_survivor_chunk_capacity = 2*max_plab_samples;
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
|
||||
ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
|
||||
assert(cur->end() == 0, "Should be 0");
|
||||
assert(cur->array() == vec, "Should be vec");
|
||||
assert(cur->capacity() == max_plab_samples, "Error");
|
||||
}
|
||||
}
|
||||
assert( ( _survivor_plab_array != NULL
|
||||
&& _survivor_chunk_array != NULL)
|
||||
|| ( _survivor_chunk_capacity == 0
|
||||
&& _survivor_chunk_index == 0),
|
||||
"Error");
|
||||
|
||||
NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
|
||||
_gc_counters = new CollectorCounters("CMS", 1);
|
||||
@ -3071,10 +3015,10 @@ void CMSCollector::checkpointRootsInitialWork() {
|
||||
gch->set_par_threads(n_workers);
|
||||
initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
|
||||
if (n_workers > 1) {
|
||||
GenCollectedHeap::StrongRootsScope srs(gch);
|
||||
StrongRootsScope srs;
|
||||
workers->run_task(&tsk);
|
||||
} else {
|
||||
GenCollectedHeap::StrongRootsScope srs(gch);
|
||||
StrongRootsScope srs;
|
||||
tsk.work(0);
|
||||
}
|
||||
gch->set_par_threads(0);
|
||||
@ -5169,11 +5113,11 @@ void CMSCollector::do_remark_parallel() {
|
||||
// necessarily be so, since it's possible that we are doing
|
||||
// ST marking.
|
||||
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
|
||||
GenCollectedHeap::StrongRootsScope srs(gch);
|
||||
StrongRootsScope srs;
|
||||
workers->run_task(&tsk);
|
||||
} else {
|
||||
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
|
||||
GenCollectedHeap::StrongRootsScope srs(gch);
|
||||
StrongRootsScope srs;
|
||||
tsk.work(0);
|
||||
}
|
||||
|
||||
@ -5241,7 +5185,7 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
verify_work_stacks_empty();
|
||||
|
||||
gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
|
||||
GenCollectedHeap::StrongRootsScope srs(gch);
|
||||
StrongRootsScope srs;
|
||||
|
||||
gch->gen_process_roots(_cmsGen->level(),
|
||||
true, // younger gens as roots
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/genOopClosures.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
|
||||
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
|
||||
#include "oops/markOop.inline.hpp"
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/referencePolicy.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/strongRootsScope.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -115,7 +116,7 @@ void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
|
||||
}
|
||||
|
||||
size_t CMBitMap::compute_size(size_t heap_size) {
|
||||
return heap_size / mark_distance();
|
||||
return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
|
||||
}
|
||||
|
||||
size_t CMBitMap::mark_distance() {
|
||||
@ -2650,7 +2651,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
|
||||
|
||||
g1h->ensure_parsability(false);
|
||||
|
||||
G1CollectedHeap::StrongRootsScope srs(g1h);
|
||||
StrongRootsScope srs;
|
||||
// this is remark, so we'll use up all active threads
|
||||
uint active_workers = g1h->workers()->active_workers();
|
||||
if (active_workers == 0) {
|
||||
|
@ -1100,6 +1100,10 @@ private:
|
||||
void regular_clock_call();
|
||||
bool concurrent() { return _concurrent; }
|
||||
|
||||
// Test whether objAddr might have already been passed over by the
|
||||
// mark bitmap scan, and so needs to be pushed onto the mark stack.
|
||||
bool is_below_finger(HeapWord* objAddr, HeapWord* global_finger) const;
|
||||
|
||||
public:
|
||||
// It resets the task; it should be called right at the beginning of
|
||||
// a marking phase.
|
||||
|
@ -259,14 +259,35 @@ inline void CMTask::push(oop obj) {
|
||||
++_local_pushes );
|
||||
}
|
||||
|
||||
// This determines whether the method below will check both the local
|
||||
// and global fingers when determining whether to push on the stack a
|
||||
// gray object (value 1) or whether it will only check the global one
|
||||
// (value 0). The tradeoffs are that the former will be a bit more
|
||||
// accurate and possibly push less on the stack, but it might also be
|
||||
// a little bit slower.
|
||||
inline bool CMTask::is_below_finger(HeapWord* objAddr,
|
||||
HeapWord* global_finger) const {
|
||||
// If objAddr is above the global finger, then the mark bitmap scan
|
||||
// will find it later, and no push is needed. Similarly, if we have
|
||||
// a current region and objAddr is between the local finger and the
|
||||
// end of the current region, then no push is needed. The tradeoff
|
||||
// of checking both vs only checking the global finger is that the
|
||||
// local check will be more accurate and so result in fewer pushes,
|
||||
// but may also be a little slower.
|
||||
if (_finger != NULL) {
|
||||
// We have a current region.
|
||||
|
||||
#define _CHECK_BOTH_FINGERS_ 1
|
||||
// Finger and region values are all NULL or all non-NULL. We
|
||||
// use _finger to check since we immediately use its value.
|
||||
assert(_curr_region != NULL, "invariant");
|
||||
assert(_region_limit != NULL, "invariant");
|
||||
assert(_region_limit <= global_finger, "invariant");
|
||||
|
||||
// True if objAddr is less than the local finger, or is between
|
||||
// the region limit and the global finger.
|
||||
if (objAddr < _finger) {
|
||||
return true;
|
||||
} else if (objAddr < _region_limit) {
|
||||
return false;
|
||||
} // Else check global finger.
|
||||
}
|
||||
// Check global finger.
|
||||
return objAddr < global_finger;
|
||||
}
|
||||
|
||||
inline void CMTask::deal_with_reference(oop obj) {
|
||||
if (_cm->verbose_high()) {
|
||||
@ -297,50 +318,29 @@ inline void CMTask::deal_with_reference(oop obj) {
|
||||
// CAS done in CMBitMap::parMark() call in the routine above.
|
||||
HeapWord* global_finger = _cm->finger();
|
||||
|
||||
#if _CHECK_BOTH_FINGERS_
|
||||
// we will check both the local and global fingers
|
||||
|
||||
if (_finger != NULL && objAddr < _finger) {
|
||||
// We only need to push a newly grey object on the mark
|
||||
// stack if it is in a section of memory the mark bitmap
|
||||
// scan has already examined. Mark bitmap scanning
|
||||
// maintains progress "fingers" for determining that.
|
||||
//
|
||||
// Notice that the global finger might be moving forward
|
||||
// concurrently. This is not a problem. In the worst case, we
|
||||
// mark the object while it is above the global finger and, by
|
||||
// the time we read the global finger, it has moved forward
|
||||
// past this object. In this case, the object will probably
|
||||
// be visited when a task is scanning the region and will also
|
||||
// be pushed on the stack. So, some duplicate work, but no
|
||||
// correctness problems.
|
||||
if (is_below_finger(objAddr, global_finger)) {
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] below the local finger ("PTR_FORMAT"), "
|
||||
"pushing it", _worker_id, p2i(_finger));
|
||||
}
|
||||
push(obj);
|
||||
} else if (_curr_region != NULL && objAddr < _region_limit) {
|
||||
// do nothing
|
||||
} else if (objAddr < global_finger) {
|
||||
// Notice that the global finger might be moving forward
|
||||
// concurrently. This is not a problem. In the worst case, we
|
||||
// mark the object while it is above the global finger and, by
|
||||
// the time we read the global finger, it has moved forward
|
||||
// passed this object. In this case, the object will probably
|
||||
// be visited when a task is scanning the region and will also
|
||||
// be pushed on the stack. So, some duplicate work, but no
|
||||
// correctness problems.
|
||||
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] below the global finger "
|
||||
"("PTR_FORMAT"), pushing it",
|
||||
_worker_id, p2i(global_finger));
|
||||
}
|
||||
push(obj);
|
||||
} else {
|
||||
// do nothing
|
||||
}
|
||||
#else // _CHECK_BOTH_FINGERS_
|
||||
// we will only check the global finger
|
||||
|
||||
if (objAddr < global_finger) {
|
||||
// see long comment above
|
||||
|
||||
if (_cm->verbose_high()) {
|
||||
gclog_or_tty->print_cr("[%u] below the global finger "
|
||||
"("PTR_FORMAT"), pushing it",
|
||||
_worker_id, p2i(global_finger));
|
||||
gclog_or_tty->print_cr("[%u] below a finger (local: " PTR_FORMAT
|
||||
", global: " PTR_FORMAT ") pushing "
|
||||
PTR_FORMAT " on mark stack",
|
||||
_worker_id, p2i(_finger),
|
||||
p2i(global_finger), p2i(objAddr));
|
||||
}
|
||||
push(obj);
|
||||
}
|
||||
#endif // _CHECK_BOTH_FINGERS_
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1728,7 +1728,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
|
||||
|
||||
|
||||
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
SharedHeap(),
|
||||
CollectedHeap(),
|
||||
_g1_policy(policy_),
|
||||
_dirty_card_queue_set(false),
|
||||
_into_cset_dirty_card_queue_set(false),
|
||||
@ -1770,6 +1770,11 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
|
||||
_g1h = this;
|
||||
|
||||
_workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
|
||||
/* are_GC_task_threads */true,
|
||||
/* are_ConcurrentGC_threads */false);
|
||||
_workers->initialize_workers();
|
||||
|
||||
_allocator = G1Allocator::create_allocator(_g1h);
|
||||
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
|
||||
|
||||
@ -1797,6 +1802,25 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
guarantee(_task_queues != NULL, "task_queues allocation failure.");
|
||||
}
|
||||
|
||||
G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
|
||||
size_t size,
|
||||
size_t translation_factor) {
|
||||
// Allocate a new reserved space, preferring to use large pages.
|
||||
ReservedSpace rs(size, true);
|
||||
G1RegionToSpaceMapper* result =
|
||||
G1RegionToSpaceMapper::create_mapper(rs,
|
||||
size,
|
||||
rs.alignment(),
|
||||
HeapRegion::GrainBytes,
|
||||
translation_factor,
|
||||
mtGC);
|
||||
if (TracePageSizes) {
|
||||
gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
|
||||
description, rs.alignment(), p2i(rs.base()), rs.size(), rs.alignment(), size);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
jint G1CollectedHeap::initialize() {
|
||||
CollectedHeap::pre_initialize();
|
||||
os::enable_vtime();
|
||||
@ -1864,57 +1888,35 @@ jint G1CollectedHeap::initialize() {
|
||||
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
|
||||
G1RegionToSpaceMapper* heap_storage =
|
||||
G1RegionToSpaceMapper::create_mapper(g1_rs,
|
||||
g1_rs.size(),
|
||||
UseLargePages ? os::large_page_size() : os::vm_page_size(),
|
||||
HeapRegion::GrainBytes,
|
||||
1,
|
||||
mtJavaHeap);
|
||||
heap_storage->set_mapping_changed_listener(&_listener);
|
||||
|
||||
// Reserve space for the block offset table. We do not support automatic uncommit
|
||||
// for the card table at this time. BOT only.
|
||||
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
|
||||
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
|
||||
G1RegionToSpaceMapper* bot_storage =
|
||||
G1RegionToSpaceMapper::create_mapper(bot_rs,
|
||||
os::vm_page_size(),
|
||||
HeapRegion::GrainBytes,
|
||||
G1BlockOffsetSharedArray::N_bytes,
|
||||
mtGC);
|
||||
create_aux_memory_mapper("Block offset table",
|
||||
G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1BlockOffsetSharedArray::N_bytes);
|
||||
|
||||
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
|
||||
G1RegionToSpaceMapper* cardtable_storage =
|
||||
G1RegionToSpaceMapper::create_mapper(cardtable_rs,
|
||||
os::vm_page_size(),
|
||||
HeapRegion::GrainBytes,
|
||||
G1BlockOffsetSharedArray::N_bytes,
|
||||
mtGC);
|
||||
create_aux_memory_mapper("Card table",
|
||||
G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1BlockOffsetSharedArray::N_bytes);
|
||||
|
||||
// Reserve space for the card counts table.
|
||||
ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
|
||||
G1RegionToSpaceMapper* card_counts_storage =
|
||||
G1RegionToSpaceMapper::create_mapper(card_counts_rs,
|
||||
os::vm_page_size(),
|
||||
HeapRegion::GrainBytes,
|
||||
G1BlockOffsetSharedArray::N_bytes,
|
||||
mtGC);
|
||||
create_aux_memory_mapper("Card counts table",
|
||||
G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
|
||||
G1BlockOffsetSharedArray::N_bytes);
|
||||
|
||||
// Reserve space for prev and next bitmap.
|
||||
size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
|
||||
|
||||
ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
|
||||
G1RegionToSpaceMapper* prev_bitmap_storage =
|
||||
G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
|
||||
os::vm_page_size(),
|
||||
HeapRegion::GrainBytes,
|
||||
CMBitMap::mark_distance(),
|
||||
mtGC);
|
||||
|
||||
ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
|
||||
create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance());
|
||||
G1RegionToSpaceMapper* next_bitmap_storage =
|
||||
G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
|
||||
os::vm_page_size(),
|
||||
HeapRegion::GrainBytes,
|
||||
CMBitMap::mark_distance(),
|
||||
mtGC);
|
||||
create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance());
|
||||
|
||||
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
||||
g1_barrier_set()->initialize(cardtable_storage);
|
||||
@ -2035,6 +2037,11 @@ size_t G1CollectedHeap::conservative_max_heap_alignment() {
|
||||
return HeapRegion::max_region_size();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::post_initialize() {
|
||||
CollectedHeap::post_initialize();
|
||||
ref_processing_init();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::ref_processing_init() {
|
||||
// Reference processing in G1 currently works as follows:
|
||||
//
|
||||
@ -2071,7 +2078,6 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
// * Discovery is atomic - i.e. not concurrent.
|
||||
// * Reference discovery will not need a barrier.
|
||||
|
||||
SharedHeap::ref_processing_init();
|
||||
MemRegion mr = reserved_region();
|
||||
|
||||
// Concurrent Mark ref processor
|
||||
@ -2128,6 +2134,7 @@ void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
|
||||
private:
|
||||
unsigned _gc_time_stamp;
|
||||
@ -2462,11 +2469,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
|
||||
IterateOopClosureRegionClosure blk(cl);
|
||||
heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
// Iterates an ObjectClosure over all objects within a HeapRegion.
|
||||
|
||||
class IterateObjectClosureRegionClosure: public HeapRegionClosure {
|
||||
@ -2486,23 +2488,6 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
|
||||
heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
// Calls a SpaceClosure on a HeapRegion.
|
||||
|
||||
class SpaceClosureRegionClosure: public HeapRegionClosure {
|
||||
SpaceClosure* _cl;
|
||||
public:
|
||||
SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
_cl->do_space(r);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
|
||||
SpaceClosureRegionClosure blk(cl);
|
||||
heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
|
||||
_hrm.iterate(cl);
|
||||
}
|
||||
@ -2639,23 +2624,19 @@ HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) cons
|
||||
return result;
|
||||
}
|
||||
|
||||
Space* G1CollectedHeap::space_containing(const void* addr) const {
|
||||
return heap_region_containing(addr);
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::block_start(const void* addr) const {
|
||||
Space* sp = space_containing(addr);
|
||||
return sp->block_start(addr);
|
||||
HeapRegion* hr = heap_region_containing(addr);
|
||||
return hr->block_start(addr);
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
|
||||
Space* sp = space_containing(addr);
|
||||
return sp->block_size(addr);
|
||||
HeapRegion* hr = heap_region_containing(addr);
|
||||
return hr->block_size(addr);
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
|
||||
Space* sp = space_containing(addr);
|
||||
return sp->block_is_obj(addr);
|
||||
HeapRegion* hr = heap_region_containing(addr);
|
||||
return hr->block_is_obj(addr);
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::supports_tlab_allocation() const {
|
||||
@ -3336,8 +3317,6 @@ void G1CollectedHeap::print_all_rsets() {
|
||||
#endif // PRODUCT
|
||||
|
||||
G1CollectedHeap* G1CollectedHeap::heap() {
|
||||
assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
|
||||
"not a garbage-first heap");
|
||||
return _g1h;
|
||||
}
|
||||
|
||||
@ -6163,8 +6142,6 @@ void G1CollectedHeap::wait_while_free_regions_coming() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
|
||||
assert(heap_lock_held_for_gc(),
|
||||
"the heap lock should already be held by or for this thread");
|
||||
_young_list->push_region(hr);
|
||||
}
|
||||
|
||||
|
@ -40,9 +40,9 @@
|
||||
#include "gc_implementation/g1/heapRegionSet.hpp"
|
||||
#include "gc_implementation/shared/hSpaceCounters.hpp"
|
||||
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/barrierSet.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "utilities/stack.hpp"
|
||||
|
||||
// A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
|
||||
@ -76,6 +76,7 @@ class G1OldTracer;
|
||||
class EvacuationFailedInfo;
|
||||
class nmethod;
|
||||
class Ticks;
|
||||
class FlexibleWorkGang;
|
||||
|
||||
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
|
||||
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
|
||||
@ -177,7 +178,7 @@ class G1RegionMappingChangedListener : public G1MappingChangedListener {
|
||||
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
|
||||
};
|
||||
|
||||
class G1CollectedHeap : public SharedHeap {
|
||||
class G1CollectedHeap : public CollectedHeap {
|
||||
friend class VM_CollectForMetadataAllocation;
|
||||
friend class VM_G1CollectForAllocation;
|
||||
friend class VM_G1CollectFull;
|
||||
@ -204,6 +205,8 @@ private:
|
||||
// The one and only G1CollectedHeap, so static functions can find it.
|
||||
static G1CollectedHeap* _g1h;
|
||||
|
||||
FlexibleWorkGang* _workers;
|
||||
|
||||
static size_t _humongous_object_threshold_in_words;
|
||||
|
||||
// The secondary free list which contains regions that have been
|
||||
@ -351,6 +354,12 @@ private:
|
||||
// heap after a compaction.
|
||||
void print_hrm_post_compaction();
|
||||
|
||||
// Create a memory mapper for auxiliary data structures of the given size and
|
||||
// translation factor.
|
||||
static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
|
||||
size_t size,
|
||||
size_t translation_factor);
|
||||
|
||||
double verify(bool guard, const char* msg);
|
||||
void verify_before_gc();
|
||||
void verify_after_gc();
|
||||
@ -605,6 +614,7 @@ protected:
|
||||
void enqueue_discovered_references(uint no_of_gc_workers);
|
||||
|
||||
public:
|
||||
FlexibleWorkGang* workers() const { return _workers; }
|
||||
|
||||
G1Allocator* allocator() {
|
||||
return _allocator;
|
||||
@ -630,8 +640,8 @@ public:
|
||||
inline AllocationContextStats& allocation_context_stats();
|
||||
|
||||
// Do anything common to GC's.
|
||||
virtual void gc_prologue(bool full);
|
||||
virtual void gc_epilogue(bool full);
|
||||
void gc_prologue(bool full);
|
||||
void gc_epilogue(bool full);
|
||||
|
||||
inline void set_humongous_is_live(oop obj);
|
||||
|
||||
@ -1000,11 +1010,14 @@ public:
|
||||
// Return the (conservative) maximum heap alignment for any G1 heap
|
||||
static size_t conservative_max_heap_alignment();
|
||||
|
||||
// Does operations required after initialization has been done.
|
||||
void post_initialize();
|
||||
|
||||
// Initialize weak reference processing.
|
||||
virtual void ref_processing_init();
|
||||
void ref_processing_init();
|
||||
|
||||
// Explicitly import set_par_threads into this scope
|
||||
using SharedHeap::set_par_threads;
|
||||
using CollectedHeap::set_par_threads;
|
||||
// Set _n_par_threads according to a policy TBD.
|
||||
void set_par_threads();
|
||||
|
||||
@ -1251,10 +1264,6 @@ public:
|
||||
|
||||
// Iteration functions.
|
||||
|
||||
// Iterate over all the ref-containing fields of all objects, calling
|
||||
// "cl.do_oop" on each.
|
||||
virtual void oop_iterate(ExtendedOopClosure* cl);
|
||||
|
||||
// Iterate over all objects, calling "cl.do_object" on each.
|
||||
virtual void object_iterate(ObjectClosure* cl);
|
||||
|
||||
@ -1262,9 +1271,6 @@ public:
|
||||
object_iterate(cl);
|
||||
}
|
||||
|
||||
// Iterate over all spaces in use in the heap, in ascending address order.
|
||||
virtual void space_iterate(SpaceClosure* cl);
|
||||
|
||||
// Iterate over heap regions, in address order, terminating the
|
||||
// iteration early if the "doHeapRegion" method returns "true".
|
||||
void heap_region_iterate(HeapRegionClosure* blk) const;
|
||||
@ -1307,10 +1313,6 @@ public:
|
||||
|
||||
HeapRegion* next_compaction_region(const HeapRegion* from) const;
|
||||
|
||||
// A CollectedHeap will contain some number of spaces. This finds the
|
||||
// space containing a given address, or else returns NULL.
|
||||
virtual Space* space_containing(const void* addr) const;
|
||||
|
||||
// Returns the HeapRegion that contains addr. addr must not be NULL.
|
||||
template <class T>
|
||||
inline HeapRegion* heap_region_containing_raw(const T addr) const;
|
||||
|
@ -1460,7 +1460,7 @@ void G1CollectorPolicy::update_survivors_policy() {
|
||||
_max_survivor_regions = (uint) ceil(max_survivor_regions_d);
|
||||
|
||||
_tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
|
||||
HeapRegion::GrainWords * _max_survivor_regions);
|
||||
HeapRegion::GrainWords * _max_survivor_regions, counters());
|
||||
}
|
||||
|
||||
bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
|
||||
|
@ -61,9 +61,8 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
|
||||
bool clear_all_softrefs) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
||||
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
#ifdef ASSERT
|
||||
if (sh->collector_policy()->should_clear_all_soft_refs()) {
|
||||
if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) {
|
||||
assert(clear_all_softrefs, "Policy should have been checked earler");
|
||||
}
|
||||
#endif
|
||||
|
@ -23,9 +23,11 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1ParScanThreadState.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
|
||||
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
|
||||
G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
|
||||
@ -50,3 +52,6 @@ void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan
|
||||
assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u),
|
||||
err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u)));
|
||||
}
|
||||
|
||||
// Generate G1 specialized oop_oop_iterate functions.
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,37 +44,45 @@
|
||||
#endif
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL),
|
||||
_high_boundary(NULL), _committed(), _page_size(0), _special(false),
|
||||
G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) :
|
||||
_low_boundary(NULL), _high_boundary(NULL), _committed(), _page_size(0), _special(false),
|
||||
_dirty(), _executable(false) {
|
||||
initialize_with_page_size(rs, used_size, page_size);
|
||||
}
|
||||
|
||||
bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
|
||||
if (!rs.is_reserved()) {
|
||||
return false; // Allocation failed.
|
||||
}
|
||||
assert(_low_boundary == NULL, "VirtualSpace already initialized");
|
||||
assert(page_size > 0, "Granularity must be non-zero.");
|
||||
void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) {
|
||||
guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");
|
||||
|
||||
vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
|
||||
vmassert(page_size > 0, "Page size must be non-zero.");
|
||||
|
||||
guarantee(is_ptr_aligned(rs.base(), page_size),
|
||||
err_msg("Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size));
|
||||
guarantee(is_size_aligned(used_size, os::vm_page_size()),
|
||||
err_msg("Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size));
|
||||
guarantee(used_size <= rs.size(),
|
||||
err_msg("Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size()));
|
||||
guarantee(is_size_aligned(rs.size(), page_size),
|
||||
err_msg("Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size));
|
||||
|
||||
_low_boundary = rs.base();
|
||||
_high_boundary = _low_boundary + rs.size();
|
||||
_high_boundary = _low_boundary + used_size;
|
||||
|
||||
_special = rs.special();
|
||||
_executable = rs.executable();
|
||||
|
||||
_page_size = page_size;
|
||||
|
||||
assert(_committed.size() == 0, "virtual space initialized more than once");
|
||||
uintx size_in_bits = rs.size() / page_size;
|
||||
_committed.resize(size_in_bits, /* in_resource_area */ false);
|
||||
vmassert(_committed.size() == 0, "virtual space initialized more than once");
|
||||
BitMap::idx_t size_in_pages = rs.size() / page_size;
|
||||
_committed.resize(size_in_pages, /* in_resource_area */ false);
|
||||
if (_special) {
|
||||
_dirty.resize(size_in_bits, /* in_resource_area */ false);
|
||||
_dirty.resize(size_in_pages, /* in_resource_area */ false);
|
||||
}
|
||||
|
||||
return true;
|
||||
_tail_size = used_size % _page_size;
|
||||
}
|
||||
|
||||
|
||||
G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
|
||||
release();
|
||||
}
|
||||
@ -87,12 +95,18 @@ void G1PageBasedVirtualSpace::release() {
|
||||
_special = false;
|
||||
_executable = false;
|
||||
_page_size = 0;
|
||||
_tail_size = 0;
|
||||
_committed.resize(0, false);
|
||||
_dirty.resize(0, false);
|
||||
}
|
||||
|
||||
size_t G1PageBasedVirtualSpace::committed_size() const {
|
||||
return _committed.count_one_bits() * _page_size;
|
||||
size_t result = _committed.count_one_bits() * _page_size;
|
||||
// The last page might not be in full.
|
||||
if (is_last_page_partial() && _committed.at(_committed.size() - 1)) {
|
||||
result -= _page_size - _tail_size;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t G1PageBasedVirtualSpace::reserved_size() const {
|
||||
@ -103,65 +117,134 @@ size_t G1PageBasedVirtualSpace::uncommitted_size() const {
|
||||
return reserved_size() - committed_size();
|
||||
}
|
||||
|
||||
uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
|
||||
size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
|
||||
return (addr - _low_boundary) / _page_size;
|
||||
}
|
||||
|
||||
bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const {
|
||||
uintptr_t end = start + size_in_pages;
|
||||
return _committed.get_next_zero_offset(start, end) >= end;
|
||||
bool G1PageBasedVirtualSpace::is_area_committed(size_t start_page, size_t size_in_pages) const {
|
||||
size_t end_page = start_page + size_in_pages;
|
||||
return _committed.get_next_zero_offset(start_page, end_page) >= end_page;
|
||||
}
|
||||
|
||||
bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const {
|
||||
uintptr_t end = start + size_in_pages;
|
||||
return _committed.get_next_one_offset(start, end) >= end;
|
||||
bool G1PageBasedVirtualSpace::is_area_uncommitted(size_t start_page, size_t size_in_pages) const {
|
||||
size_t end_page = start_page + size_in_pages;
|
||||
return _committed.get_next_one_offset(start_page, end_page) >= end_page;
|
||||
}
|
||||
|
||||
char* G1PageBasedVirtualSpace::page_start(uintptr_t index) {
|
||||
char* G1PageBasedVirtualSpace::page_start(size_t index) const {
|
||||
return _low_boundary + index * _page_size;
|
||||
}
|
||||
|
||||
size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) {
|
||||
return num * _page_size;
|
||||
bool G1PageBasedVirtualSpace::is_after_last_page(size_t index) const {
|
||||
guarantee(index <= _committed.size(),
|
||||
err_msg("Given boundary page " SIZE_FORMAT " is beyond managed page count " SIZE_FORMAT, index, _committed.size()));
|
||||
return index == _committed.size();
|
||||
}
|
||||
|
||||
bool G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) {
|
||||
void G1PageBasedVirtualSpace::commit_preferred_pages(size_t start, size_t num_pages) {
|
||||
vmassert(num_pages > 0, "No full pages to commit");
|
||||
vmassert(start + num_pages <= _committed.size(),
|
||||
err_msg("Tried to commit area from page " SIZE_FORMAT " to page " SIZE_FORMAT " "
|
||||
"that is outside of managed space of " SIZE_FORMAT " pages",
|
||||
start, start + num_pages, _committed.size()));
|
||||
|
||||
char* start_addr = page_start(start);
|
||||
size_t size = num_pages * _page_size;
|
||||
|
||||
os::commit_memory_or_exit(start_addr, size, _page_size, _executable,
|
||||
err_msg("Failed to commit area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
|
||||
p2i(start_addr), p2i(start_addr + size), size));
|
||||
}
|
||||
|
||||
void G1PageBasedVirtualSpace::commit_tail() {
|
||||
vmassert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here");
|
||||
|
||||
char* const aligned_end_address = (char*)align_ptr_down(_high_boundary, _page_size);
|
||||
os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), _executable,
|
||||
err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
|
||||
p2i(aligned_end_address), p2i(_high_boundary), _tail_size));
|
||||
}
|
||||
|
||||
void G1PageBasedVirtualSpace::commit_internal(size_t start_page, size_t end_page) {
|
||||
guarantee(start_page < end_page,
|
||||
err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
|
||||
guarantee(end_page <= _committed.size(),
|
||||
err_msg("Given end page " SIZE_FORMAT " is beyond end of managed page amount of " SIZE_FORMAT, end_page, _committed.size()));
|
||||
|
||||
size_t pages = end_page - start_page;
|
||||
bool need_to_commit_tail = is_after_last_page(end_page) && is_last_page_partial();
|
||||
|
||||
// If we have to commit some (partial) tail area, decrease the amount of pages to avoid
|
||||
// committing that in the full-page commit code.
|
||||
if (need_to_commit_tail) {
|
||||
pages--;
|
||||
}
|
||||
|
||||
if (pages > 0) {
|
||||
commit_preferred_pages(start_page, pages);
|
||||
}
|
||||
|
||||
if (need_to_commit_tail) {
|
||||
commit_tail();
|
||||
}
|
||||
}
|
||||
|
||||
char* G1PageBasedVirtualSpace::bounded_end_addr(size_t end_page) const {
|
||||
return MIN2(_high_boundary, page_start(end_page));
|
||||
}
|
||||
|
||||
void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_page) {
|
||||
guarantee(start_page < end_page,
|
||||
err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
|
||||
|
||||
os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page));
|
||||
}
|
||||
|
||||
bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
|
||||
// We need to make sure to commit all pages covered by the given area.
|
||||
guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted");
|
||||
guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted");
|
||||
|
||||
bool zero_filled = true;
|
||||
uintptr_t end = start + size_in_pages;
|
||||
size_t end_page = start_page + size_in_pages;
|
||||
|
||||
if (_special) {
|
||||
// Check for dirty pages and update zero_filled if any found.
|
||||
if (_dirty.get_next_one_offset(start,end) < end) {
|
||||
if (_dirty.get_next_one_offset(start_page, end_page) < end_page) {
|
||||
zero_filled = false;
|
||||
_dirty.clear_range(start, end);
|
||||
_dirty.clear_range(start_page, end_page);
|
||||
}
|
||||
} else {
|
||||
os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable,
|
||||
err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
|
||||
commit_internal(start_page, end_page);
|
||||
}
|
||||
_committed.set_range(start, end);
|
||||
_committed.set_range(start_page, end_page);
|
||||
|
||||
if (AlwaysPreTouch) {
|
||||
os::pretouch_memory(page_start(start), page_start(end));
|
||||
pretouch_internal(start_page, end_page);
|
||||
}
|
||||
return zero_filled;
|
||||
}
|
||||
|
||||
void G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) {
|
||||
guarantee(is_area_committed(start, size_in_pages), "checking");
|
||||
void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) {
|
||||
guarantee(start_page < end_page,
|
||||
err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
|
||||
|
||||
char* start_addr = page_start(start_page);
|
||||
os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
|
||||
}
|
||||
|
||||
void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
|
||||
guarantee(is_area_committed(start_page, size_in_pages), "checking");
|
||||
|
||||
size_t end_page = start_page + size_in_pages;
|
||||
if (_special) {
|
||||
// Mark that memory is dirty. If committed again the memory might
|
||||
// need to be cleared explicitly.
|
||||
_dirty.set_range(start, start + size_in_pages);
|
||||
_dirty.set_range(start_page, end_page);
|
||||
} else {
|
||||
os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages));
|
||||
uncommit_internal(start_page, end_page);
|
||||
}
|
||||
|
||||
_committed.clear_range(start, start + size_in_pages);
|
||||
_committed.clear_range(start_page, end_page);
|
||||
}
|
||||
|
||||
bool G1PageBasedVirtualSpace::contains(const void* p) const {
|
||||
@ -175,7 +258,8 @@ void G1PageBasedVirtualSpace::print_on(outputStream* out) {
|
||||
out->cr();
|
||||
out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
|
||||
out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
|
||||
out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary));
|
||||
out->print_cr(" - preferred page size: " SIZE_FORMAT, _page_size);
|
||||
out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary));
|
||||
}
|
||||
|
||||
void G1PageBasedVirtualSpace::print() {
|
||||
|
@ -34,6 +34,12 @@
|
||||
// granularity.
|
||||
// (De-)Allocation requests are always OS page aligned by passing a page index
|
||||
// and multiples of pages.
|
||||
// For systems that only commits of memory in a given size (always greater than
|
||||
// page size) the base address is required to be aligned to that page size.
|
||||
// The actual size requested need not be aligned to that page size, but the size
|
||||
// of the reservation passed may be rounded up to this page size. Any fragment
|
||||
// (less than the page size) of the actual size at the tail of the request will
|
||||
// be committed using OS small pages.
|
||||
// The implementation gives an error when trying to commit or uncommit pages that
|
||||
// have already been committed or uncommitted.
|
||||
class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
|
||||
@ -43,7 +49,11 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
|
||||
char* _low_boundary;
|
||||
char* _high_boundary;
|
||||
|
||||
// The commit/uncommit granularity in bytes.
|
||||
// The size of the tail in bytes of the handled space that needs to be committed
|
||||
// using small pages.
|
||||
size_t _tail_size;
|
||||
|
||||
// The preferred page size used for commit/uncommit in bytes.
|
||||
size_t _page_size;
|
||||
|
||||
// Bitmap used for verification of commit/uncommit operations.
|
||||
@ -62,30 +72,55 @@ class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
|
||||
// Indicates whether the committed space should be executable.
|
||||
bool _executable;
|
||||
|
||||
// Helper function for committing memory. Commit the given memory range by using
|
||||
// _page_size pages as much as possible and the remainder with small sized pages.
|
||||
void commit_internal(size_t start_page, size_t end_page);
|
||||
// Commit num_pages pages of _page_size size starting from start. All argument
|
||||
// checking has been performed.
|
||||
void commit_preferred_pages(size_t start_page, size_t end_page);
|
||||
// Commit space at the high end of the space that needs to be committed with small
|
||||
// sized pages.
|
||||
void commit_tail();
|
||||
|
||||
// Uncommit the given memory range.
|
||||
void uncommit_internal(size_t start_page, size_t end_page);
|
||||
|
||||
// Pretouch the given memory range.
|
||||
void pretouch_internal(size_t start_page, size_t end_page);
|
||||
|
||||
// Returns the index of the page which contains the given address.
|
||||
uintptr_t addr_to_page_index(char* addr) const;
|
||||
// Returns the address of the given page index.
|
||||
char* page_start(uintptr_t index);
|
||||
// Returns the byte size of the given number of pages.
|
||||
size_t byte_size_for_pages(size_t num);
|
||||
char* page_start(size_t index) const;
|
||||
|
||||
// Is the given page index the last page?
|
||||
bool is_last_page(size_t index) const { return index == (_committed.size() - 1); }
|
||||
// Is the given page index the first after last page?
|
||||
bool is_after_last_page(size_t index) const;
|
||||
// Is the last page only partially covered by this space?
|
||||
bool is_last_page_partial() const { return !is_ptr_aligned(_high_boundary, _page_size); }
|
||||
// Returns the end address of the given page bounded by the reserved space.
|
||||
char* bounded_end_addr(size_t end_page) const;
|
||||
|
||||
// Returns true if the entire area is backed by committed memory.
|
||||
bool is_area_committed(uintptr_t start, size_t size_in_pages) const;
|
||||
bool is_area_committed(size_t start_page, size_t size_in_pages) const;
|
||||
// Returns true if the entire area is not backed by committed memory.
|
||||
bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const;
|
||||
bool is_area_uncommitted(size_t start_page, size_t size_in_pages) const;
|
||||
|
||||
void initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size);
|
||||
public:
|
||||
|
||||
// Commit the given area of pages starting at start being size_in_pages large.
|
||||
// Returns true if the given area is zero filled upon completion.
|
||||
bool commit(uintptr_t start, size_t size_in_pages);
|
||||
bool commit(size_t start_page, size_t size_in_pages);
|
||||
|
||||
// Uncommit the given area of pages starting at start being size_in_pages large.
|
||||
void uncommit(uintptr_t start, size_t size_in_pages);
|
||||
void uncommit(size_t start_page, size_t size_in_pages);
|
||||
|
||||
// Initialization
|
||||
G1PageBasedVirtualSpace();
|
||||
bool initialize_with_granularity(ReservedSpace rs, size_t page_size);
|
||||
// Initialize the given reserved space with the given base address and the size
|
||||
// actually used.
|
||||
// Prefer to commit in page_size chunks.
|
||||
G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size);
|
||||
|
||||
// Destruction
|
||||
~G1PageBasedVirtualSpace();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,17 +31,16 @@
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
|
||||
size_t commit_granularity,
|
||||
size_t used_size,
|
||||
size_t page_size,
|
||||
size_t region_granularity,
|
||||
MemoryType type) :
|
||||
_storage(),
|
||||
_commit_granularity(commit_granularity),
|
||||
_storage(rs, used_size, page_size),
|
||||
_region_granularity(region_granularity),
|
||||
_listener(NULL),
|
||||
_commit_map() {
|
||||
guarantee(is_power_of_2(commit_granularity), "must be");
|
||||
guarantee(is_power_of_2(page_size), "must be");
|
||||
guarantee(is_power_of_2(region_granularity), "must be");
|
||||
_storage.initialize_with_granularity(rs, commit_granularity);
|
||||
|
||||
MemTracker::record_virtual_memory_type((address)rs.base(), type);
|
||||
}
|
||||
@ -55,25 +54,26 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
|
||||
|
||||
public:
|
||||
G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
|
||||
size_t os_commit_granularity,
|
||||
size_t actual_size,
|
||||
size_t page_size,
|
||||
size_t alloc_granularity,
|
||||
size_t commit_factor,
|
||||
MemoryType type) :
|
||||
G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
|
||||
_pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {
|
||||
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
|
||||
_pages_per_region(alloc_granularity / (page_size * commit_factor)) {
|
||||
|
||||
guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
|
||||
guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
|
||||
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
|
||||
}
|
||||
|
||||
virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
|
||||
bool zero_filled = _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region);
|
||||
virtual void commit_regions(uint start_idx, size_t num_regions) {
|
||||
bool zero_filled = _storage.commit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
|
||||
_commit_map.set_range(start_idx, start_idx + num_regions);
|
||||
fire_on_commit(start_idx, num_regions, zero_filled);
|
||||
}
|
||||
|
||||
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
|
||||
_storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region);
|
||||
virtual void uncommit_regions(uint start_idx, size_t num_regions) {
|
||||
_storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
|
||||
_commit_map.clear_range(start_idx, start_idx + num_regions);
|
||||
}
|
||||
};
|
||||
@ -98,22 +98,23 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
|
||||
|
||||
public:
|
||||
G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
|
||||
size_t os_commit_granularity,
|
||||
size_t actual_size,
|
||||
size_t page_size,
|
||||
size_t alloc_granularity,
|
||||
size_t commit_factor,
|
||||
MemoryType type) :
|
||||
G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
|
||||
_regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {
|
||||
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
|
||||
_regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
|
||||
|
||||
guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
|
||||
_refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
|
||||
guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
|
||||
_refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size);
|
||||
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
|
||||
}
|
||||
|
||||
virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
|
||||
for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
|
||||
assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i));
|
||||
uintptr_t idx = region_idx_to_page_idx(i);
|
||||
virtual void commit_regions(uint start_idx, size_t num_regions) {
|
||||
for (uint i = start_idx; i < start_idx + num_regions; i++) {
|
||||
assert(!_commit_map.at(i), err_msg("Trying to commit storage at region %u that is already committed", i));
|
||||
size_t idx = region_idx_to_page_idx(i);
|
||||
uint old_refcount = _refcounts.get_by_index(idx);
|
||||
bool zero_filled = false;
|
||||
if (old_refcount == 0) {
|
||||
@ -125,10 +126,10 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
|
||||
}
|
||||
}
|
||||
|
||||
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
|
||||
for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
|
||||
assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i));
|
||||
uintptr_t idx = region_idx_to_page_idx(i);
|
||||
virtual void uncommit_regions(uint start_idx, size_t num_regions) {
|
||||
for (uint i = start_idx; i < start_idx + num_regions; i++) {
|
||||
assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region %u that is not committed", i));
|
||||
size_t idx = region_idx_to_page_idx(i);
|
||||
uint old_refcount = _refcounts.get_by_index(idx);
|
||||
assert(old_refcount > 0, "must be");
|
||||
if (old_refcount == 1) {
|
||||
@ -147,14 +148,15 @@ void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, b
|
||||
}
|
||||
|
||||
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
|
||||
size_t os_commit_granularity,
|
||||
size_t actual_size,
|
||||
size_t page_size,
|
||||
size_t region_granularity,
|
||||
size_t commit_factor,
|
||||
MemoryType type) {
|
||||
|
||||
if (region_granularity >= (os_commit_granularity * commit_factor)) {
|
||||
return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
|
||||
if (region_granularity >= (page_size * commit_factor)) {
|
||||
return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
|
||||
} else {
|
||||
return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
|
||||
return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
|
||||
}
|
||||
}
|
||||
|
@ -46,12 +46,12 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
|
||||
protected:
|
||||
// Backing storage.
|
||||
G1PageBasedVirtualSpace _storage;
|
||||
size_t _commit_granularity;
|
||||
|
||||
size_t _region_granularity;
|
||||
// Mapping management
|
||||
BitMap _commit_map;
|
||||
|
||||
G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type);
|
||||
G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, MemoryType type);
|
||||
|
||||
void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
|
||||
public:
|
||||
@ -70,16 +70,20 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
|
||||
return _commit_map.at(idx);
|
||||
}
|
||||
|
||||
virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
|
||||
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
|
||||
virtual void commit_regions(uint start_idx, size_t num_regions = 1) = 0;
|
||||
virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
|
||||
|
||||
// Creates an appropriate G1RegionToSpaceMapper for the given parameters.
|
||||
// The actual space to be used within the given reservation is given by actual_size.
|
||||
// This is because some OSes need to round up the reservation size to guarantee
|
||||
// alignment of page_size.
|
||||
// The byte_translation_factor defines how many bytes in a region correspond to
|
||||
// a single byte in the data structure this mapper is for.
|
||||
// Eg. in the card table, this value corresponds to the size a single card
|
||||
// table entry corresponds to.
|
||||
// table entry corresponds to in the heap.
|
||||
static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
|
||||
size_t os_commit_granularity,
|
||||
size_t actual_size,
|
||||
size_t page_size,
|
||||
size_t region_granularity,
|
||||
size_t byte_translation_factor,
|
||||
MemoryType type);
|
||||
|
@ -116,7 +116,7 @@ void G1RootProcessor::wait_until_all_strong_classes_discovered() {
|
||||
G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) :
|
||||
_g1h(g1h),
|
||||
_process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)),
|
||||
_srs(g1h),
|
||||
_srs(),
|
||||
_lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
|
||||
_n_workers_discovered_strong_classes(0) {}
|
||||
|
||||
@ -253,7 +253,8 @@ void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
|
||||
|
||||
{
|
||||
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
|
||||
Threads::possibly_parallel_oops_do(strong_roots, thread_stack_clds, strong_code);
|
||||
bool is_par = _g1h->n_par_threads() > 0;
|
||||
Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_ROOTPROCESSOR_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "memory/strongRootsScope.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
|
||||
class CLDClosure;
|
||||
@ -46,7 +46,7 @@ class SubTasksDone;
|
||||
class G1RootProcessor : public StackObj {
|
||||
G1CollectedHeap* _g1h;
|
||||
SubTasksDone* _process_strong_tasks;
|
||||
SharedHeap::StrongRootsScope _srs;
|
||||
StrongRootsScope _srs;
|
||||
|
||||
// Used to implement the Thread work barrier.
|
||||
Monitor _lock;
|
||||
|
@ -330,8 +330,12 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, Heap
|
||||
assert(!hrclaimer->is_region_claimed(ch_index),
|
||||
"Must not have been claimed yet because claiming of humongous continuation first claims the start region");
|
||||
|
||||
// There's no need to actually claim the continues humongous region, but we can do it in an assert as an extra precaution.
|
||||
assert(hrclaimer->claim_region(ch_index), "We should always be able to claim the continuesHumongous part of the humongous object");
|
||||
// Claim the region so no other worker tries to process the region. When a worker processes a
|
||||
// starts_humongous region it may also process the associated continues_humongous regions.
|
||||
// The continues_humongous regions can be changed to free regions. Unless this worker claims
|
||||
// all of these regions, other workers might try claim and process these newly free regions.
|
||||
bool claim_result = hrclaimer->claim_region(ch_index);
|
||||
guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
|
||||
|
||||
bool res2 = blk->doHeapRegion(chr);
|
||||
if (res2) {
|
||||
|
@ -419,6 +419,7 @@ void FreeRegionList_test() {
|
||||
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
|
||||
G1RegionToSpaceMapper* bot_storage =
|
||||
G1RegionToSpaceMapper::create_mapper(bot_rs,
|
||||
bot_rs.size(),
|
||||
os::vm_page_size(),
|
||||
HeapRegion::GrainBytes,
|
||||
G1BlockOffsetSharedArray::N_bytes,
|
||||
|
@ -25,8 +25,8 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/satbQueue.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
@ -70,7 +70,7 @@
|
||||
\
|
||||
declare_toplevel_type(G1HeapRegionTable) \
|
||||
\
|
||||
declare_type(G1CollectedHeap, SharedHeap) \
|
||||
declare_type(G1CollectedHeap, CollectedHeap) \
|
||||
\
|
||||
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
|
||||
declare_type(HeapRegion, G1OffsetTableContigSpace) \
|
||||
|
@ -225,15 +225,10 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() {
|
||||
|
||||
void VM_CGC_Operation::doit() {
|
||||
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
|
||||
GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
// This could go away if CollectedHeap gave access to _gc_is_active...
|
||||
if (sh != NULL) {
|
||||
IsGCActiveMark x;
|
||||
_cl->do_void();
|
||||
} else {
|
||||
_cl->do_void();
|
||||
}
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm(), g1h->concurrent_mark()->concurrent_gc_id());
|
||||
IsGCActiveMark x;
|
||||
_cl->do_void();
|
||||
}
|
||||
|
||||
bool VM_CGC_Operation::doit_prologue() {
|
||||
@ -244,14 +239,12 @@ bool VM_CGC_Operation::doit_prologue() {
|
||||
}
|
||||
|
||||
Heap_lock->lock();
|
||||
SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void VM_CGC_Operation::doit_epilogue() {
|
||||
// Note the relative order of the unlocks must match that in
|
||||
// VM_GC_Operation::doit_epilogue()
|
||||
SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
|
||||
Heap_lock->unlock();
|
||||
if (_needs_pll) {
|
||||
release_and_notify_pending_list_lock();
|
||||
|
@ -23,10 +23,10 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
#include "memory/cardTableRS.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "memory/space.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
@ -42,7 +42,7 @@
|
||||
#include "memory/generation.hpp"
|
||||
#include "memory/referencePolicy.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "memory/strongRootsScope.hpp"
|
||||
#include "memory/space.hpp"
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -596,8 +596,6 @@ void ParNewGenTask::work(uint worker_id) {
|
||||
// and handle marks.
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
// We would need multiple old-gen queues otherwise.
|
||||
assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
|
||||
|
||||
ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
|
||||
assert(_state_set->is_valid(worker_id), "Should not have been called");
|
||||
@ -922,8 +920,6 @@ void ParNewGeneration::collect(bool full,
|
||||
workers->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
workers->set_active_workers(active_workers);
|
||||
assert(gch->n_gens() == 2,
|
||||
"Par collection currently only works with single older gen.");
|
||||
_old_gen = gch->old_gen();
|
||||
|
||||
// If the next generation is too full to accommodate worst-case promotion
|
||||
@ -974,10 +970,10 @@ void ParNewGeneration::collect(bool full,
|
||||
// in the multi-threaded case, but we special-case n=1 here to get
|
||||
// repeatable measurements of the 1-thread overhead of the parallel code.
|
||||
if (n_workers > 1) {
|
||||
GenCollectedHeap::StrongRootsScope srs(gch);
|
||||
StrongRootsScope srs;
|
||||
workers->run_task(&tsk);
|
||||
} else {
|
||||
GenCollectedHeap::StrongRootsScope srs(gch);
|
||||
StrongRootsScope srs;
|
||||
tsk.work(0);
|
||||
}
|
||||
thread_state_set.reset(0 /* Bad value in debug if not reset */,
|
||||
|
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/specialized_oop_closures.hpp"
|
||||
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
|
||||
|
||||
// Generate ParNew specialized oop_oop_iterate functions.
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(ALL_KLASS_OOP_OOP_ITERATE_DEFN);
|
@ -78,13 +78,7 @@ jint ParallelScavengeHeap::initialize() {
|
||||
|
||||
CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
|
||||
barrier_set->initialize();
|
||||
_barrier_set = barrier_set;
|
||||
oopDesc::set_bs(_barrier_set);
|
||||
if (_barrier_set == NULL) {
|
||||
vm_shutdown_during_initialization(
|
||||
"Could not reserve enough space for barrier set");
|
||||
return JNI_ENOMEM;
|
||||
}
|
||||
set_barrier_set(barrier_set);
|
||||
|
||||
// Make up the generations
|
||||
// Calculate the maximum size that a generation can grow. This
|
||||
@ -522,10 +516,6 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
|
||||
young_gen()->object_iterate(cl);
|
||||
old_gen()->object_iterate(cl);
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "gc_implementation/shared/gcWhen.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/collectorPolicy.hpp"
|
||||
#include "memory/strongRootsScope.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
class AdjoiningGenerations;
|
||||
@ -201,7 +202,6 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
// initializing stores to an object at this address.
|
||||
virtual bool can_elide_initializing_store_barrier(oop new_obj);
|
||||
|
||||
void oop_iterate(ExtendedOopClosure* cl);
|
||||
void object_iterate(ObjectClosure* cl);
|
||||
void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
|
||||
|
||||
@ -238,7 +238,7 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
void gen_mangle_unused_area() PRODUCT_RETURN;
|
||||
|
||||
// Call these in sequential code around the processing of strong roots.
|
||||
class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
|
||||
class ParStrongRootsScope : public MarkScope {
|
||||
public:
|
||||
ParStrongRootsScope();
|
||||
~ParStrongRootsScope();
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "runtime/thread.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "services/management.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
//
|
||||
// ThreadRootsMarkingTask
|
||||
|
@ -30,7 +30,10 @@
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.inline.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
@ -174,6 +177,142 @@ ParCompactionManager::gc_thread_compaction_manager(int index) {
|
||||
return _manager_array[index];
|
||||
}
|
||||
|
||||
void InstanceKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
|
||||
assert(obj != NULL, "can't follow the content of NULL object");
|
||||
|
||||
PSParallelCompact::follow_klass(cm, this);
|
||||
// Only mark the header and let the scan of the meta-data mark
|
||||
// everything else.
|
||||
|
||||
PSParallelCompact::MarkAndPushClosure cl(cm);
|
||||
InstanceKlass::oop_oop_iterate_oop_maps<true>(obj, &cl);
|
||||
}
|
||||
|
||||
void InstanceMirrorKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
|
||||
InstanceKlass::oop_pc_follow_contents(obj, cm);
|
||||
|
||||
// Follow the klass field in the mirror.
|
||||
Klass* klass = java_lang_Class::as_Klass(obj);
|
||||
if (klass != NULL) {
|
||||
// An anonymous class doesn't have its own class loader, so the call
|
||||
// to follow_klass will mark and push its java mirror instead of the
|
||||
// class loader. When handling the java mirror for an anonymous class
|
||||
// we need to make sure its class loader data is claimed, this is done
|
||||
// by calling follow_class_loader explicitly. For non-anonymous classes
|
||||
// the call to follow_class_loader is made when the class loader itself
|
||||
// is handled.
|
||||
if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
|
||||
PSParallelCompact::follow_class_loader(cm, klass->class_loader_data());
|
||||
} else {
|
||||
PSParallelCompact::follow_klass(cm, klass);
|
||||
}
|
||||
} else {
|
||||
// If klass is NULL then this a mirror for a primitive type.
|
||||
// We don't have to follow them, since they are handled as strong
|
||||
// roots in Universe::oops_do.
|
||||
assert(java_lang_Class::is_primitive(obj), "Sanity check");
|
||||
}
|
||||
|
||||
PSParallelCompact::MarkAndPushClosure cl(cm);
|
||||
oop_oop_iterate_statics<true>(obj, &cl);
|
||||
}
|
||||
|
||||
void InstanceClassLoaderKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
|
||||
InstanceKlass::oop_pc_follow_contents(obj, cm);
|
||||
|
||||
ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
|
||||
if (loader_data != NULL) {
|
||||
PSParallelCompact::follow_class_loader(cm, loader_data);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
|
||||
PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) {
|
||||
// reference already enqueued, referent will be traversed later
|
||||
klass->InstanceKlass::oop_pc_follow_contents(obj, cm);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL enqueued " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
return;
|
||||
} else {
|
||||
// treat referent as normal oop
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL normal " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
PSParallelCompact::mark_and_push(cm, referent_addr);
|
||||
}
|
||||
}
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
if (ReferenceProcessor::pending_list_uses_discovered_field()) {
|
||||
// Treat discovered as normal oop, if ref is not "active",
|
||||
// i.e. if next is non-NULL.
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process discovered as normal "
|
||||
PTR_FORMAT, p2i(discovered_addr));
|
||||
}
|
||||
)
|
||||
PSParallelCompact::mark_and_push(cm, discovered_addr);
|
||||
}
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
// In the case of older JDKs which do not use the discovered
|
||||
// field for the pending list, an inactive ref (next != NULL)
|
||||
// must always have a NULL discovered field.
|
||||
T next = oopDesc::load_heap_oop(next_addr);
|
||||
oop discovered = java_lang_ref_Reference::discovered(obj);
|
||||
assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
|
||||
err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
|
||||
p2i(obj)));
|
||||
#endif
|
||||
}
|
||||
PSParallelCompact::mark_and_push(cm, next_addr);
|
||||
klass->InstanceKlass::oop_pc_follow_contents(obj, cm);
|
||||
}
|
||||
|
||||
|
||||
void InstanceRefKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
|
||||
if (UseCompressedOops) {
|
||||
oop_pc_follow_contents_specialized<narrowOop>(this, obj, cm);
|
||||
} else {
|
||||
oop_pc_follow_contents_specialized<oop>(this, obj, cm);
|
||||
}
|
||||
}
|
||||
|
||||
void ObjArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
|
||||
PSParallelCompact::follow_klass(cm, this);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
oop_pc_follow_contents_specialized<narrowOop>(objArrayOop(obj), 0, cm);
|
||||
} else {
|
||||
oop_pc_follow_contents_specialized<oop>(objArrayOop(obj), 0, cm);
|
||||
}
|
||||
}
|
||||
|
||||
void TypeArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
}
|
||||
|
||||
void ParCompactionManager::follow_marking_stacks() {
|
||||
do {
|
||||
// Drain the overflow stack first, to allow stealing from the marking stack.
|
||||
|
@ -26,9 +26,11 @@
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/oop.pcgc.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.inline.hpp"
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
void ParCompactionManager::push_objarray(oop obj, size_t index)
|
||||
{
|
||||
@ -49,16 +51,42 @@ void ParCompactionManager::push_region(size_t index)
|
||||
}
|
||||
|
||||
inline void ParCompactionManager::follow_contents(oop obj) {
|
||||
obj->follow_contents(this);
|
||||
assert(PSParallelCompact::mark_bitmap()->is_marked(obj), "should be marked");
|
||||
obj->pc_follow_contents(this);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void oop_pc_follow_contents_specialized(objArrayOop obj, int index, ParCompactionManager* cm) {
|
||||
const size_t len = size_t(obj->length());
|
||||
const size_t beg_index = size_t(index);
|
||||
assert(beg_index < len || len == 0, "index too large");
|
||||
|
||||
const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
|
||||
const size_t end_index = beg_index + stride;
|
||||
T* const base = (T*)obj->base();
|
||||
T* const beg = base + beg_index;
|
||||
T* const end = base + end_index;
|
||||
|
||||
// Push the non-NULL elements of the next stride on the marking stack.
|
||||
for (T* e = beg; e < end; e++) {
|
||||
PSParallelCompact::mark_and_push<T>(cm, e);
|
||||
}
|
||||
|
||||
if (end_index < len) {
|
||||
cm->push_objarray(obj, end_index); // Push the continuation.
|
||||
}
|
||||
}
|
||||
|
||||
inline void ParCompactionManager::follow_contents(objArrayOop obj, int index) {
|
||||
ObjArrayKlass* k = (ObjArrayKlass*)obj->klass();
|
||||
k->oop_follow_contents(this, obj, index);
|
||||
if (UseCompressedOops) {
|
||||
oop_pc_follow_contents_specialized<narrowOop>(obj, index, this);
|
||||
} else {
|
||||
oop_pc_follow_contents_specialized<oop>(obj, index, this);
|
||||
}
|
||||
}
|
||||
|
||||
inline void ParCompactionManager::update_contents(oop obj) {
|
||||
obj->update_contents(this);
|
||||
obj->pc_update_contents();
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_INLINE_HPP
|
||||
|
@ -34,7 +34,7 @@
|
||||
#include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
|
||||
@ -48,7 +48,10 @@
|
||||
#include "memory/gcLocker.inline.hpp"
|
||||
#include "memory/referencePolicy.hpp"
|
||||
#include "memory/referenceProcessor.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "runtime/fprofiler.hpp"
|
||||
@ -823,16 +826,8 @@ void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompa
|
||||
PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
|
||||
PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
|
||||
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); }
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
|
||||
|
||||
void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
|
||||
|
||||
void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) {
|
||||
mark_and_push(_compaction_manager, p);
|
||||
}
|
||||
void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); }
|
||||
|
||||
void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) {
|
||||
klass->oops_do(_mark_and_push_closure);
|
||||
}
|
||||
@ -3338,6 +3333,71 @@ void MoveAndUpdateClosure::copy_partial_obj()
|
||||
update_state(words);
|
||||
}
|
||||
|
||||
void InstanceKlass::oop_pc_update_pointers(oop obj) {
|
||||
oop_oop_iterate_oop_maps<true>(obj, PSParallelCompact::adjust_pointer_closure());
|
||||
}
|
||||
|
||||
void InstanceMirrorKlass::oop_pc_update_pointers(oop obj) {
|
||||
InstanceKlass::oop_pc_update_pointers(obj);
|
||||
|
||||
oop_oop_iterate_statics<true>(obj, PSParallelCompact::adjust_pointer_closure());
|
||||
}
|
||||
|
||||
void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj) {
|
||||
InstanceKlass::oop_pc_update_pointers(obj);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
template <class T> static void trace_reference_gc(const char *s, oop obj,
|
||||
T* referent_addr,
|
||||
T* next_addr,
|
||||
T* discovered_addr) {
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("%s obj " PTR_FORMAT, s, p2i(obj));
|
||||
gclog_or_tty->print_cr(" referent_addr/* " PTR_FORMAT " / "
|
||||
PTR_FORMAT, p2i(referent_addr),
|
||||
referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
|
||||
gclog_or_tty->print_cr(" next_addr/* " PTR_FORMAT " / "
|
||||
PTR_FORMAT, p2i(next_addr),
|
||||
next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
|
||||
gclog_or_tty->print_cr(" discovered_addr/* " PTR_FORMAT " / "
|
||||
PTR_FORMAT, p2i(discovered_addr),
|
||||
discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
template <class T>
|
||||
static void oop_pc_update_pointers_specialized(oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
PSParallelCompact::adjust_pointer(referent_addr);
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
PSParallelCompact::adjust_pointer(next_addr);
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
PSParallelCompact::adjust_pointer(discovered_addr);
|
||||
debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
|
||||
referent_addr, next_addr, discovered_addr);)
|
||||
}
|
||||
|
||||
void InstanceRefKlass::oop_pc_update_pointers(oop obj) {
|
||||
InstanceKlass::oop_pc_update_pointers(obj);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
oop_pc_update_pointers_specialized<narrowOop>(obj);
|
||||
} else {
|
||||
oop_pc_update_pointers_specialized<oop>(obj);
|
||||
}
|
||||
}
|
||||
|
||||
void ObjArrayKlass::oop_pc_update_pointers(oop obj) {
|
||||
assert(obj->is_objArray(), "obj must be obj array");
|
||||
oop_oop_iterate_elements<true>(objArrayOop(obj), PSParallelCompact::adjust_pointer_closure());
|
||||
}
|
||||
|
||||
void TypeArrayKlass::oop_pc_update_pointers(oop obj) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
}
|
||||
|
||||
ParMarkBitMapClosure::IterationStatus
|
||||
MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
|
||||
assert(destination() != NULL, "sanity");
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
|
||||
#include "gc_implementation/shared/collectorCounters.hpp"
|
||||
#include "gc_implementation/shared/mutableSpace.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
class ParallelScavengeHeap;
|
||||
@ -951,12 +951,14 @@ class PSParallelCompact : AllStatic {
|
||||
virtual void do_void();
|
||||
};
|
||||
|
||||
class AdjustPointerClosure: public OopClosure {
|
||||
class AdjustPointerClosure: public ExtendedOopClosure {
|
||||
public:
|
||||
template <typename T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
// do not walk from thread stacks to the code cache on this phase
|
||||
virtual void do_code_blob(CodeBlob* cb) const { }
|
||||
|
||||
// This closure provides its own oop verification code.
|
||||
debug_only(virtual bool should_verify_oops() { return false; })
|
||||
};
|
||||
|
||||
class AdjustKlassClosure : public KlassClosure {
|
||||
@ -1139,13 +1141,18 @@ class PSParallelCompact : AllStatic {
|
||||
static void reset_millis_since_last_gc();
|
||||
|
||||
public:
|
||||
class MarkAndPushClosure: public OopClosure {
|
||||
class MarkAndPushClosure: public ExtendedOopClosure {
|
||||
private:
|
||||
ParCompactionManager* _compaction_manager;
|
||||
public:
|
||||
MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
|
||||
|
||||
template <typename T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
|
||||
// This closure provides its own oop verification code.
|
||||
debug_only(virtual bool should_verify_oops() { return false; })
|
||||
};
|
||||
|
||||
// The one and only place to start following the classes.
|
||||
@ -1177,7 +1184,9 @@ class PSParallelCompact : AllStatic {
|
||||
static bool initialize();
|
||||
|
||||
// Closure accessors
|
||||
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; }
|
||||
static PSParallelCompact::AdjustPointerClosure* adjust_pointer_closure() {
|
||||
return &_adjust_pointer_closure;
|
||||
}
|
||||
static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; }
|
||||
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; }
|
||||
|
||||
@ -1332,39 +1341,6 @@ inline bool PSParallelCompact::is_marked(oop obj) {
|
||||
return mark_bitmap()->is_marked(obj);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
|
||||
cm->push(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::adjust_pointer(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
oop new_obj = (oop)summary_data().calc_new_pointer(obj);
|
||||
assert(new_obj != NULL, // is forwarding ptr?
|
||||
"should be forwarded");
|
||||
// Just always do the update unconditionally?
|
||||
if (new_obj != NULL) {
|
||||
assert(Universe::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
|
||||
oop holder = klass->klass_holder();
|
||||
PSParallelCompact::mark_and_push(cm, &holder);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
|
||||
mark_and_push(_compaction_manager, p);
|
||||
|
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
template <typename T>
|
||||
inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
|
||||
cm->push(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void PSParallelCompact::MarkAndPushClosure::do_oop_nv(T* p) {
|
||||
mark_and_push(_compaction_manager, p);
|
||||
}
|
||||
|
||||
inline void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); }
|
||||
inline void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
|
||||
inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
|
||||
oop holder = klass->klass_holder();
|
||||
mark_and_push(cm, &holder);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::adjust_pointer(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
oop new_obj = (oop)summary_data().calc_new_pointer(obj);
|
||||
assert(new_obj != NULL, // is forwarding ptr?
|
||||
"should be forwarded");
|
||||
// Just always do the update unconditionally?
|
||||
if (new_obj != NULL) {
|
||||
assert(Universe::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void PSParallelCompact::AdjustPointerClosure::do_oop_nv(T* p) {
|
||||
adjust_pointer(p);
|
||||
}
|
||||
|
||||
inline void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { do_oop_nv(p); }
|
||||
inline void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP
|
@ -32,6 +32,9 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/padded.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
@ -308,6 +311,118 @@ void PSPromotionManager::process_array_chunk(oop old) {
|
||||
}
|
||||
}
|
||||
|
||||
class PushContentsClosure : public ExtendedOopClosure {
|
||||
PSPromotionManager* _pm;
|
||||
public:
|
||||
PushContentsClosure(PSPromotionManager* pm) : _pm(pm) {}
|
||||
|
||||
template <typename T> void do_oop_nv(T* p) {
|
||||
if (PSScavenge::should_scavenge(p)) {
|
||||
_pm->claim_or_forward_depth(p);
|
||||
}
|
||||
}
|
||||
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
|
||||
// Don't use the oop verification code in the oop_oop_iterate framework.
|
||||
debug_only(virtual bool should_verify_oops() { return false; })
|
||||
};
|
||||
|
||||
void InstanceKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
|
||||
PushContentsClosure cl(pm);
|
||||
oop_oop_iterate_oop_maps_reverse<true>(obj, &cl);
|
||||
}
|
||||
|
||||
void InstanceMirrorKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
|
||||
// Note that we don't have to follow the mirror -> klass pointer, since all
|
||||
// klasses that are dirty will be scavenged when we iterate over the
|
||||
// ClassLoaderData objects.
|
||||
|
||||
InstanceKlass::oop_ps_push_contents(obj, pm);
|
||||
|
||||
PushContentsClosure cl(pm);
|
||||
oop_oop_iterate_statics<true>(obj, &cl);
|
||||
}
|
||||
|
||||
void InstanceClassLoaderKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
|
||||
InstanceKlass::oop_ps_push_contents(obj, pm);
|
||||
|
||||
// This is called by the young collector. It will already have taken care of
|
||||
// all class loader data. So, we don't have to follow the class loader ->
|
||||
// class loader data link.
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
if (PSScavenge::should_scavenge(referent_addr)) {
|
||||
ReferenceProcessor* rp = PSScavenge::reference_processor();
|
||||
if (rp->discover_reference(obj, klass->reference_type())) {
|
||||
// reference already enqueued, referent and next will be traversed later
|
||||
klass->InstanceKlass::oop_ps_push_contents(obj, pm);
|
||||
return;
|
||||
} else {
|
||||
// treat referent as normal oop
|
||||
pm->claim_or_forward_depth(referent_addr);
|
||||
}
|
||||
}
|
||||
// Treat discovered as normal oop, if ref is not "active",
|
||||
// i.e. if next is non-NULL.
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
if (ReferenceProcessor::pending_list_uses_discovered_field()) {
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process discovered as normal "
|
||||
PTR_FORMAT, p2i(discovered_addr));
|
||||
}
|
||||
)
|
||||
if (PSScavenge::should_scavenge(discovered_addr)) {
|
||||
pm->claim_or_forward_depth(discovered_addr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
// In the case of older JDKs which do not use the discovered
|
||||
// field for the pending list, an inactive ref (next != NULL)
|
||||
// must always have a NULL discovered field.
|
||||
oop next = oopDesc::load_decode_heap_oop(next_addr);
|
||||
oop discovered = java_lang_ref_Reference::discovered(obj);
|
||||
assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
|
||||
err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
|
||||
p2i(obj)));
|
||||
#endif
|
||||
}
|
||||
|
||||
// Treat next as normal oop; next is a link in the reference queue.
|
||||
if (PSScavenge::should_scavenge(next_addr)) {
|
||||
pm->claim_or_forward_depth(next_addr);
|
||||
}
|
||||
klass->InstanceKlass::oop_ps_push_contents(obj, pm);
|
||||
}
|
||||
|
||||
void InstanceRefKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
|
||||
if (UseCompressedOops) {
|
||||
oop_ps_push_contents_specialized<narrowOop>(obj, this, pm);
|
||||
} else {
|
||||
oop_ps_push_contents_specialized<oop>(obj, this, pm);
|
||||
}
|
||||
}
|
||||
|
||||
void ObjArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
|
||||
assert(obj->is_objArray(), "obj must be obj array");
|
||||
PushContentsClosure cl(pm);
|
||||
oop_oop_iterate_elements<true>(objArrayOop(obj), &cl);
|
||||
}
|
||||
|
||||
void TypeArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
|
||||
assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
#include "oops/oop.psgc.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
|
||||
assert(_manager_array != NULL, "access of NULL manager_array");
|
||||
@ -92,7 +92,7 @@ inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
|
||||
}
|
||||
|
||||
inline void PSPromotionManager::push_contents(oop obj) {
|
||||
obj->push_contents(this);
|
||||
obj->ps_push_contents(this);
|
||||
}
|
||||
//
|
||||
// This method is pretty bulky. It would be nice to split it up
|
||||
|
@ -25,9 +25,9 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/shared/ageTable.hpp"
|
||||
#include "gc_implementation/shared/gcPolicyCounters.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/collectorPolicy.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
@ -79,7 +79,7 @@ void ageTable::merge_par(ageTable* subTable) {
|
||||
}
|
||||
}
|
||||
|
||||
uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
|
||||
uint ageTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters) {
|
||||
size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
|
||||
uint result;
|
||||
|
||||
@ -126,9 +126,6 @@ uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
|
||||
age++;
|
||||
}
|
||||
if (UsePerfData) {
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
CollectorPolicy* policy = sh->collector_policy();
|
||||
GCPolicyCounters* gc_counters = policy->counters();
|
||||
gc_counters->tenuring_threshold()->set_value(result);
|
||||
gc_counters->desired_survivor_size()->set_value(
|
||||
desired_survivor_size*oopSize);
|
||||
|
@ -29,6 +29,8 @@
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
|
||||
class GCPolicyCounters;
|
||||
|
||||
/* Copyright (c) 1992-2009 Oracle and/or its affiliates, and Stanford University.
|
||||
See the LICENSE file for license information. */
|
||||
|
||||
@ -69,7 +71,7 @@ class ageTable VALUE_OBJ_CLASS_SPEC {
|
||||
void merge_par(ageTable* subTable);
|
||||
|
||||
// calculate new tenuring threshold based on age information
|
||||
uint compute_tenuring_threshold(size_t survivor_capacity);
|
||||
uint compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters);
|
||||
|
||||
private:
|
||||
PerfVariable* _perf_sizes[table_size];
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include "gc_implementation/shared/gcTrace.hpp"
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -55,16 +57,183 @@ MarkSweep::MarkAndPushClosure MarkSweep::mark_and_push_closure;
|
||||
CLDToOopClosure MarkSweep::follow_cld_closure(&mark_and_push_closure);
|
||||
CLDToOopClosure MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
|
||||
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); }
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
|
||||
template <typename T>
|
||||
void MarkSweep::MarkAndPushClosure::do_oop_nv(T* p) { mark_and_push(p); }
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); }
|
||||
void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
|
||||
void MarkSweep::follow_class_loader(ClassLoaderData* cld) {
|
||||
MarkSweep::follow_cld_closure.do_cld(cld);
|
||||
}
|
||||
|
||||
void InstanceKlass::oop_ms_follow_contents(oop obj) {
|
||||
assert(obj != NULL, "can't follow the content of NULL object");
|
||||
MarkSweep::follow_klass(this);
|
||||
|
||||
oop_oop_iterate_oop_maps<true>(obj, &MarkSweep::mark_and_push_closure);
|
||||
}
|
||||
|
||||
void InstanceMirrorKlass::oop_ms_follow_contents(oop obj) {
|
||||
InstanceKlass::oop_ms_follow_contents(obj);
|
||||
|
||||
// Follow the klass field in the mirror
|
||||
Klass* klass = java_lang_Class::as_Klass(obj);
|
||||
if (klass != NULL) {
|
||||
// An anonymous class doesn't have its own class loader, so the call
|
||||
// to follow_klass will mark and push its java mirror instead of the
|
||||
// class loader. When handling the java mirror for an anonymous class
|
||||
// we need to make sure its class loader data is claimed, this is done
|
||||
// by calling follow_class_loader explicitly. For non-anonymous classes
|
||||
// the call to follow_class_loader is made when the class loader itself
|
||||
// is handled.
|
||||
if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
|
||||
MarkSweep::follow_class_loader(klass->class_loader_data());
|
||||
} else {
|
||||
MarkSweep::follow_klass(klass);
|
||||
}
|
||||
} else {
|
||||
// If klass is NULL then this a mirror for a primitive type.
|
||||
// We don't have to follow them, since they are handled as strong
|
||||
// roots in Universe::oops_do.
|
||||
assert(java_lang_Class::is_primitive(obj), "Sanity check");
|
||||
}
|
||||
|
||||
oop_oop_iterate_statics<true>(obj, &MarkSweep::mark_and_push_closure);
|
||||
}
|
||||
|
||||
void InstanceClassLoaderKlass::oop_ms_follow_contents(oop obj) {
|
||||
InstanceKlass::oop_ms_follow_contents(obj);
|
||||
|
||||
ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
|
||||
|
||||
// We must NULL check here, since the class loader
|
||||
// can be found before the loader data has been set up.
|
||||
if(loader_data != NULL) {
|
||||
MarkSweep::follow_class_loader(loader_data);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static void oop_ms_follow_contents_specialized(InstanceRefKlass* klass, oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("InstanceRefKlass::oop_ms_follow_contents_specialized " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!referent->is_gc_marked() &&
|
||||
MarkSweep::ref_processor()->discover_reference(obj, klass->reference_type())) {
|
||||
// reference was discovered, referent will be traversed later
|
||||
klass->InstanceKlass::oop_ms_follow_contents(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL enqueued " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
return;
|
||||
} else {
|
||||
// treat referent as normal oop
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL normal " PTR_FORMAT, p2i(obj));
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(referent_addr);
|
||||
}
|
||||
}
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
if (ReferenceProcessor::pending_list_uses_discovered_field()) {
|
||||
// Treat discovered as normal oop, if ref is not "active",
|
||||
// i.e. if next is non-NULL.
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process discovered as normal "
|
||||
PTR_FORMAT, p2i(discovered_addr));
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(discovered_addr);
|
||||
}
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
// In the case of older JDKs which do not use the discovered
|
||||
// field for the pending list, an inactive ref (next != NULL)
|
||||
// must always have a NULL discovered field.
|
||||
oop next = oopDesc::load_decode_heap_oop(next_addr);
|
||||
oop discovered = java_lang_ref_Reference::discovered(obj);
|
||||
assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
|
||||
err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
|
||||
p2i(obj)));
|
||||
#endif
|
||||
}
|
||||
// treat next as normal oop. next is a link in the reference queue.
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process next as normal " PTR_FORMAT, p2i(next_addr));
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(next_addr);
|
||||
klass->InstanceKlass::oop_ms_follow_contents(obj);
|
||||
}
|
||||
|
||||
void InstanceRefKlass::oop_ms_follow_contents(oop obj) {
|
||||
if (UseCompressedOops) {
|
||||
oop_ms_follow_contents_specialized<narrowOop>(this, obj);
|
||||
} else {
|
||||
oop_ms_follow_contents_specialized<oop>(this, obj);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static void oop_ms_follow_contents_specialized(oop obj, int index) {
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
const size_t len = size_t(a->length());
|
||||
const size_t beg_index = size_t(index);
|
||||
assert(beg_index < len || len == 0, "index too large");
|
||||
|
||||
const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
|
||||
const size_t end_index = beg_index + stride;
|
||||
T* const base = (T*)a->base();
|
||||
T* const beg = base + beg_index;
|
||||
T* const end = base + end_index;
|
||||
|
||||
// Push the non-NULL elements of the next stride on the marking stack.
|
||||
for (T* e = beg; e < end; e++) {
|
||||
MarkSweep::mark_and_push<T>(e);
|
||||
}
|
||||
|
||||
if (end_index < len) {
|
||||
MarkSweep::push_objarray(a, end_index); // Push the continuation.
|
||||
}
|
||||
}
|
||||
|
||||
void ObjArrayKlass::oop_ms_follow_contents(oop obj) {
|
||||
assert (obj->is_array(), "obj must be array");
|
||||
MarkSweep::follow_klass(this);
|
||||
if (UseCompressedOops) {
|
||||
oop_ms_follow_contents_specialized<narrowOop>(obj, 0);
|
||||
} else {
|
||||
oop_ms_follow_contents_specialized<oop>(obj, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void TypeArrayKlass::oop_ms_follow_contents(oop obj) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
}
|
||||
|
||||
void MarkSweep::follow_array(objArrayOop array, int index) {
|
||||
ObjArrayKlass* k = (ObjArrayKlass*)array->klass();
|
||||
k->oop_follow_contents(array, index);
|
||||
if (UseCompressedOops) {
|
||||
oop_ms_follow_contents_specialized<narrowOop>(array, index);
|
||||
} else {
|
||||
oop_ms_follow_contents_specialized<oop>(array, index);
|
||||
}
|
||||
}
|
||||
|
||||
void MarkSweep::follow_stack() {
|
||||
@ -112,8 +281,10 @@ void MarkSweep::preserve_mark(oop obj, markOop mark) {
|
||||
|
||||
MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
|
||||
|
||||
void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); }
|
||||
void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
|
||||
template <typename T>
|
||||
void MarkSweep::AdjustPointerClosure::do_oop_nv(T* p) { adjust_pointer(p); }
|
||||
void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { do_oop_nv(p); }
|
||||
void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
|
||||
void MarkSweep::adjust_marks() {
|
||||
assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(),
|
||||
@ -175,3 +346,84 @@ void MarkSweep::trace(const char* msg) {
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int InstanceKlass::oop_ms_adjust_pointers(oop obj) {
|
||||
int size = size_helper();
|
||||
oop_oop_iterate_oop_maps<true>(obj, &MarkSweep::adjust_pointer_closure);
|
||||
return size;
|
||||
}
|
||||
|
||||
int InstanceMirrorKlass::oop_ms_adjust_pointers(oop obj) {
|
||||
int size = oop_size(obj);
|
||||
InstanceKlass::oop_ms_adjust_pointers(obj);
|
||||
|
||||
oop_oop_iterate_statics<true>(obj, &MarkSweep::adjust_pointer_closure);
|
||||
return size;
|
||||
}
|
||||
|
||||
int InstanceClassLoaderKlass::oop_ms_adjust_pointers(oop obj) {
|
||||
return InstanceKlass::oop_ms_adjust_pointers(obj);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
template <class T> static void trace_reference_gc(const char *s, oop obj,
|
||||
T* referent_addr,
|
||||
T* next_addr,
|
||||
T* discovered_addr) {
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("%s obj " PTR_FORMAT, s, p2i(obj));
|
||||
gclog_or_tty->print_cr(" referent_addr/* " PTR_FORMAT " / "
|
||||
PTR_FORMAT, p2i(referent_addr),
|
||||
p2i(referent_addr ?
|
||||
(address)oopDesc::load_decode_heap_oop(referent_addr) : NULL));
|
||||
gclog_or_tty->print_cr(" next_addr/* " PTR_FORMAT " / "
|
||||
PTR_FORMAT, p2i(next_addr),
|
||||
p2i(next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL));
|
||||
gclog_or_tty->print_cr(" discovered_addr/* " PTR_FORMAT " / "
|
||||
PTR_FORMAT, p2i(discovered_addr),
|
||||
p2i(discovered_addr ?
|
||||
(address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
template <class T> void static adjust_object_specialized(oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
MarkSweep::adjust_pointer(referent_addr);
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
MarkSweep::adjust_pointer(next_addr);
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
MarkSweep::adjust_pointer(discovered_addr);
|
||||
debug_only(trace_reference_gc("InstanceRefKlass::oop_ms_adjust_pointers", obj,
|
||||
referent_addr, next_addr, discovered_addr);)
|
||||
}
|
||||
|
||||
int InstanceRefKlass::oop_ms_adjust_pointers(oop obj) {
|
||||
int size = size_helper();
|
||||
InstanceKlass::oop_ms_adjust_pointers(obj);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
adjust_object_specialized<narrowOop>(obj);
|
||||
} else {
|
||||
adjust_object_specialized<oop>(obj);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
int ObjArrayKlass::oop_ms_adjust_pointers(oop obj) {
|
||||
assert(obj->is_objArray(), "obj must be obj array");
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
// Get size before changing pointers.
|
||||
// Don't call size() or oop_size() since that is a virtual call.
|
||||
int size = a->object_size();
|
||||
oop_oop_iterate_elements<true>(a, &MarkSweep::adjust_pointer_closure);
|
||||
return size;
|
||||
}
|
||||
|
||||
int TypeArrayKlass::oop_ms_adjust_pointers(oop obj) {
|
||||
assert(obj->is_typeArray(), "must be a type array");
|
||||
typeArrayOop t = typeArrayOop(obj);
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
return t->object_size();
|
||||
}
|
||||
|
@ -60,8 +60,9 @@ class MarkSweep : AllStatic {
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
|
||||
class MarkAndPushClosure: public OopClosure {
|
||||
class MarkAndPushClosure: public ExtendedOopClosure {
|
||||
public:
|
||||
template <typename T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
};
|
||||
@ -73,8 +74,12 @@ class MarkSweep : AllStatic {
|
||||
|
||||
class AdjustPointerClosure: public OopsInGenClosure {
|
||||
public:
|
||||
template <typename T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p);
|
||||
virtual void do_oop(narrowOop* p);
|
||||
|
||||
// This closure provides its own oop verification code.
|
||||
debug_only(virtual bool should_verify_oops() { return false; })
|
||||
};
|
||||
|
||||
// Used for java/lang/ref handling
|
||||
|
@ -28,11 +28,15 @@
|
||||
#include "gc_implementation/shared/markSweep.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "oops/markOop.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/instanceRefKlass.inline.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/g1/g1StringDedup.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
inline void MarkSweep::mark_object(oop obj) {
|
||||
@ -59,7 +63,9 @@ inline void MarkSweep::follow_klass(Klass* klass) {
|
||||
}
|
||||
|
||||
inline void MarkSweep::follow_object(oop obj) {
|
||||
obj->follow_contents();
|
||||
assert(obj->is_gc_marked(), "should be marked");
|
||||
|
||||
obj->ms_follow_contents();
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::follow_root(T* p) {
|
||||
@ -95,13 +101,15 @@ void MarkSweep::push_objarray(oop obj, size_t index) {
|
||||
}
|
||||
|
||||
inline int MarkSweep::adjust_pointers(oop obj) {
|
||||
return obj->adjust_pointers();
|
||||
return obj->ms_adjust_pointers();
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
oop new_obj = oop(obj->mark()->decode_pointer());
|
||||
assert(new_obj != NULL || // is forwarding ptr?
|
||||
obj->mark() == markOopDesc::prototype() || // not gc marked?
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/shared/mutableNUMASpace.hpp"
|
||||
#include "gc_implementation/shared/spaceDecorator.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
@ -116,8 +116,6 @@ bool VM_GC_Operation::doit_prologue() {
|
||||
_prologue_succeeded = false;
|
||||
} else {
|
||||
_prologue_succeeded = true;
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true;
|
||||
}
|
||||
return _prologue_succeeded;
|
||||
}
|
||||
@ -126,8 +124,6 @@ bool VM_GC_Operation::doit_prologue() {
|
||||
void VM_GC_Operation::doit_epilogue() {
|
||||
assert(Thread::current()->is_Java_thread(), "just checking");
|
||||
// Release the Heap_lock first.
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false;
|
||||
Heap_lock->unlock();
|
||||
release_and_notify_pending_list_lock();
|
||||
}
|
||||
|
@ -220,6 +220,11 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
||||
}
|
||||
}
|
||||
|
||||
void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
|
||||
_barrier_set = barrier_set;
|
||||
oopDesc::set_bs(_barrier_set);
|
||||
}
|
||||
|
||||
void CollectedHeap::pre_initialize() {
|
||||
// Used for ReduceInitialCardMarks (when COMPILER2 is used);
|
||||
// otherwise remains unused.
|
||||
|
@ -75,9 +75,8 @@ class GCHeapLog : public EventLogBase<GCMessage> {
|
||||
|
||||
//
|
||||
// CollectedHeap
|
||||
// SharedHeap
|
||||
// GenCollectedHeap
|
||||
// G1CollectedHeap
|
||||
// GenCollectedHeap
|
||||
// G1CollectedHeap
|
||||
// ParallelScavengeHeap
|
||||
//
|
||||
class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
@ -205,7 +204,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// In many heaps, there will be a need to perform some initialization activities
|
||||
// after the Universe is fully formed, but before general heap allocation is allowed.
|
||||
// This is the correct place to place such initialization methods.
|
||||
virtual void post_initialize() = 0;
|
||||
virtual void post_initialize();
|
||||
|
||||
// Stop any onging concurrent work and prepare for exit.
|
||||
virtual void stop() {}
|
||||
@ -470,6 +469,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
|
||||
// Returns the barrier set for this heap
|
||||
BarrierSet* barrier_set() { return _barrier_set; }
|
||||
void set_barrier_set(BarrierSet* barrier_set);
|
||||
|
||||
// Returns "true" iff there is a stop-world GC in progress. (I assume
|
||||
// that it should answer "false" for the concurrent part of a concurrent
|
||||
@ -497,12 +497,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// Return the CollectorPolicy for the heap
|
||||
virtual CollectorPolicy* collector_policy() const = 0;
|
||||
|
||||
void oop_iterate_no_header(OopClosure* cl);
|
||||
|
||||
// Iterate over all the ref-containing fields of all objects, calling
|
||||
// "cl.do_oop" on each.
|
||||
virtual void oop_iterate(ExtendedOopClosure* cl) = 0;
|
||||
|
||||
// Iterate over all objects, calling "cl.do_object" on each.
|
||||
virtual void object_iterate(ObjectClosure* cl) = 0;
|
||||
|
||||
|
@ -236,12 +236,6 @@ oop CollectedHeap::array_allocate_nozero(KlassHandle klass,
|
||||
return (oop)obj;
|
||||
}
|
||||
|
||||
inline void CollectedHeap::oop_iterate_no_header(OopClosure* cl) {
|
||||
NoHeaderExtendedOopClosure no_header_cl(cl);
|
||||
oop_iterate(&no_header_cl);
|
||||
}
|
||||
|
||||
|
||||
inline HeapWord* CollectedHeap::align_allocation_or_fail(HeapWord* addr,
|
||||
HeapWord* end,
|
||||
unsigned short alignment_in_bytes) {
|
||||
|
@ -23,10 +23,11 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/cardTableModRefBS.inline.hpp"
|
||||
#include "memory/cardTableRS.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "memory/genCollectedHeap.hpp"
|
||||
#include "memory/space.hpp"
|
||||
#include "memory/space.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
@ -450,21 +451,20 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
|
||||
// This is an example of where n_par_threads() is used instead
|
||||
// of workers()->active_workers(). n_par_threads can be set to 0 to
|
||||
// turn off parallelism. For example when this code is called as
|
||||
// part of verification and SharedHeap::process_roots() is being
|
||||
// used, then n_par_threads() may have been set to 0. active_workers
|
||||
// is not overloaded with the meaning that it is a switch to disable
|
||||
// parallelism and so keeps the meaning of the number of
|
||||
// active gc workers. If parallelism has not been shut off by
|
||||
// setting n_par_threads to 0, then n_par_threads should be
|
||||
// equal to active_workers. When a different mechanism for shutting
|
||||
// off parallelism is used, then active_workers can be used in
|
||||
// part of verification during root processing then n_par_threads()
|
||||
// may have been set to 0. active_workers is not overloaded with
|
||||
// the meaning that it is a switch to disable parallelism and so keeps
|
||||
// the meaning of the number of active gc workers. If parallelism has
|
||||
// not been shut off by setting n_par_threads to 0, then n_par_threads
|
||||
// should be equal to active_workers. When a different mechanism for
|
||||
// shutting off parallelism is used, then active_workers can be used in
|
||||
// place of n_par_threads.
|
||||
int n_threads = SharedHeap::heap()->n_par_threads();
|
||||
int n_threads = GenCollectedHeap::heap()->n_par_threads();
|
||||
bool is_par = n_threads > 0;
|
||||
if (is_par) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
assert(SharedHeap::heap()->n_par_threads() ==
|
||||
SharedHeap::heap()->workers()->active_workers(), "Mismatch");
|
||||
assert(GenCollectedHeap::heap()->n_par_threads() ==
|
||||
GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch");
|
||||
non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
|
||||
#else // INCLUDE_ALL_GCS
|
||||
fatal("Parallel gc not supported here.");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,12 +42,15 @@ CardTableRS::CardTableRS(MemRegion whole_heap) :
|
||||
_ct_bs = new CardTableModRefBSForCTRS(whole_heap);
|
||||
_ct_bs->initialize();
|
||||
set_bs(_ct_bs);
|
||||
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
|
||||
// max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
|
||||
// (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
|
||||
uint max_gens = 2;
|
||||
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1,
|
||||
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||
if (_last_cur_val_in_gen == NULL) {
|
||||
vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
|
||||
}
|
||||
for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) {
|
||||
for (uint i = 0; i < max_gens + 1; i++) {
|
||||
_last_cur_val_in_gen[i] = clean_card_val();
|
||||
}
|
||||
_ct_bs->set_CTRS(this);
|
||||
@ -167,10 +170,10 @@ ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
|
||||
// Cannot yet substitute active_workers for n_par_threads
|
||||
// in the case where parallelism is being turned off by
|
||||
// setting n_par_threads to 0.
|
||||
_is_par = (SharedHeap::heap()->n_par_threads() > 0);
|
||||
_is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
|
||||
assert(!_is_par ||
|
||||
(SharedHeap::heap()->n_par_threads() ==
|
||||
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
|
||||
(GenCollectedHeap::heap()->n_par_threads() ==
|
||||
GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
|
||||
}
|
||||
|
||||
bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) {
|
||||
|
@ -48,6 +48,9 @@
|
||||
#include "utilities/copy.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/parNew/parOopClosures.hpp"
|
||||
#endif
|
||||
|
||||
//
|
||||
// DefNewGeneration functions.
|
||||
@ -378,8 +381,7 @@ void DefNewGeneration::compute_new_size() {
|
||||
|
||||
int next_level = level() + 1;
|
||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||
assert(next_level < gch->n_gens(),
|
||||
"DefNewGeneration cannot be an oldest gen");
|
||||
assert(next_level == 1, "DefNewGeneration must be a young gen");
|
||||
|
||||
Generation* old_gen = gch->old_gen();
|
||||
size_t old_size = old_gen->capacity();
|
||||
@ -550,8 +552,9 @@ HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
|
||||
|
||||
void DefNewGeneration::adjust_desired_tenuring_threshold() {
|
||||
// Set the desired survivor size to half the real survivor space
|
||||
GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->collector_policy()->counters();
|
||||
_tenuring_threshold =
|
||||
age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
|
||||
age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters);
|
||||
}
|
||||
|
||||
void DefNewGeneration::collect(bool full,
|
||||
|
@ -23,10 +23,10 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "memory/freeList.hpp"
|
||||
#include "memory/metachunk.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
|
@ -23,9 +23,9 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/gcLocker.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
|
@ -39,7 +39,7 @@
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/generationSpec.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "memory/strongRootsScope.hpp"
|
||||
#include "memory/space.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
@ -78,21 +78,27 @@ enum GCH_strong_roots_tasks {
|
||||
};
|
||||
|
||||
GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
|
||||
SharedHeap(),
|
||||
CollectedHeap(),
|
||||
_rem_set(NULL),
|
||||
_gen_policy(policy),
|
||||
_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
|
||||
_full_collections_completed(0)
|
||||
{
|
||||
assert(policy != NULL, "Sanity check");
|
||||
if (UseConcMarkSweepGC) {
|
||||
_workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
|
||||
/* are_GC_task_threads */true,
|
||||
/* are_ConcurrentGC_threads */false);
|
||||
_workers->initialize_workers();
|
||||
} else {
|
||||
// Serial GC does not use workers.
|
||||
_workers = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
jint GenCollectedHeap::initialize() {
|
||||
CollectedHeap::pre_initialize();
|
||||
|
||||
_n_gens = gen_policy()->number_of_generations();
|
||||
assert(_n_gens == 2, "There is no support for more than two generations");
|
||||
|
||||
// While there are no constraints in the GC code that HeapWordSize
|
||||
// be any particular value, there are multiple other areas in the
|
||||
// system which believe this to be true (e.g. oop->object_size in some
|
||||
@ -166,7 +172,8 @@ char* GenCollectedHeap::allocate(size_t alignment,
|
||||
}
|
||||
|
||||
void GenCollectedHeap::post_initialize() {
|
||||
SharedHeap::post_initialize();
|
||||
CollectedHeap::post_initialize();
|
||||
ref_processing_init();
|
||||
GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
|
||||
guarantee(policy->is_generation_policy(), "Illegal policy type");
|
||||
assert((_young_gen->kind() == Generation::DefNew) ||
|
||||
@ -185,7 +192,6 @@ void GenCollectedHeap::post_initialize() {
|
||||
}
|
||||
|
||||
void GenCollectedHeap::ref_processing_init() {
|
||||
SharedHeap::ref_processing_init();
|
||||
_young_gen->ref_processor_init();
|
||||
_old_gen->ref_processor_init();
|
||||
}
|
||||
@ -200,8 +206,7 @@ size_t GenCollectedHeap::used() const {
|
||||
|
||||
// Save the "used_region" for generations level and lower.
|
||||
void GenCollectedHeap::save_used_regions(int level) {
|
||||
assert(level >= 0, "Illegal level parameter");
|
||||
assert(level < _n_gens, "Illegal level parameter");
|
||||
assert(level == 0 || level == 1, "Illegal level parameter");
|
||||
if (level == 1) {
|
||||
_old_gen->save_used_region();
|
||||
}
|
||||
@ -417,7 +422,6 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
assert(Heap_lock->is_locked(),
|
||||
"the requesting thread should have the Heap_lock");
|
||||
guarantee(!is_gc_active(), "collection is not reentrant");
|
||||
assert(max_level < n_gens(), "sanity check");
|
||||
|
||||
if (GC_locker::check_active_before_gc()) {
|
||||
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
|
||||
@ -435,7 +439,7 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
{
|
||||
FlagSetting fl(_is_gc_active, true);
|
||||
|
||||
bool complete = full && (max_level == (n_gens()-1));
|
||||
bool complete = full && (max_level == 1 /* old */);
|
||||
const char* gc_cause_prefix = complete ? "Full GC" : "GC";
|
||||
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
|
||||
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
|
||||
@ -507,7 +511,7 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
// Update "complete" boolean wrt what actually transpired --
|
||||
// for instance, a promotion failure could have led to
|
||||
// a whole heap collection.
|
||||
complete = complete || (max_level_collected == n_gens() - 1);
|
||||
complete = complete || (max_level_collected == 1 /* old */);
|
||||
|
||||
if (complete) { // We did a "major" collection
|
||||
// FIXME: See comment at pre_full_gc_dump call
|
||||
@ -524,7 +528,7 @@ void GenCollectedHeap::do_collection(bool full,
|
||||
}
|
||||
|
||||
// Adjust generation sizes.
|
||||
if (max_level_collected == 1) {
|
||||
if (max_level_collected == 1 /* old */) {
|
||||
_old_gen->compute_new_size();
|
||||
}
|
||||
_young_gen->compute_new_size();
|
||||
@ -560,7 +564,8 @@ HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab)
|
||||
}
|
||||
|
||||
void GenCollectedHeap::set_par_threads(uint t) {
|
||||
SharedHeap::set_par_threads(t);
|
||||
assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
|
||||
CollectedHeap::set_par_threads(t);
|
||||
set_n_termination(t);
|
||||
}
|
||||
|
||||
@ -586,7 +591,7 @@ void GenCollectedHeap::process_roots(bool activate_scope,
|
||||
CLDClosure* strong_cld_closure,
|
||||
CLDClosure* weak_cld_closure,
|
||||
CodeBlobClosure* code_roots) {
|
||||
StrongRootsScope srs(this, activate_scope);
|
||||
StrongRootsScope srs(activate_scope);
|
||||
|
||||
// General roots.
|
||||
assert(Threads::thread_claim_parity() != 0, "must have called prologue code");
|
||||
@ -606,7 +611,8 @@ void GenCollectedHeap::process_roots(bool activate_scope,
|
||||
// Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
|
||||
CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
|
||||
|
||||
Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
|
||||
bool is_par = n_par_threads() > 0;
|
||||
Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_clds_p, roots_from_code_p);
|
||||
|
||||
if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
|
||||
Universe::oops_do(strong_roots);
|
||||
@ -771,19 +777,19 @@ void GenCollectedHeap::collect(GCCause::Cause cause) {
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
} else if (cause == GCCause::_wb_young_gc) {
|
||||
// minor collection for WhiteBox API
|
||||
collect(cause, 0);
|
||||
collect(cause, 0 /* young */);
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
if (cause == GCCause::_scavenge_alot) {
|
||||
// minor collection only
|
||||
collect(cause, 0);
|
||||
collect(cause, 0 /* young */);
|
||||
} else {
|
||||
// Stop-the-world full collection
|
||||
collect(cause, n_gens() - 1);
|
||||
collect(cause, 1 /* old */);
|
||||
}
|
||||
#else
|
||||
// Stop-the-world full collection
|
||||
collect(cause, n_gens() - 1);
|
||||
collect(cause, 1 /* old */);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@ -798,7 +804,7 @@ void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
|
||||
void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
|
||||
// The caller has the Heap_lock
|
||||
assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
|
||||
collect_locked(cause, n_gens() - 1);
|
||||
collect_locked(cause, 1 /* old */);
|
||||
}
|
||||
|
||||
// this is the private collection interface
|
||||
@ -854,7 +860,7 @@ void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
do_full_collection(clear_all_soft_refs, _n_gens - 1);
|
||||
do_full_collection(clear_all_soft_refs, 1 /* old */);
|
||||
}
|
||||
|
||||
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
|
||||
@ -886,7 +892,7 @@ void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
|
||||
clear_all_soft_refs /* clear_all_soft_refs */,
|
||||
0 /* size */,
|
||||
false /* is_tlab */,
|
||||
n_gens() - 1 /* max_level */);
|
||||
1 /* old */ /* max_level */);
|
||||
}
|
||||
}
|
||||
|
||||
@ -923,6 +929,11 @@ bool GenCollectedHeap::is_in_partial_collection(const void* p) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void GenCollectedHeap::oop_iterate_no_header(OopClosure* cl) {
|
||||
NoHeaderExtendedOopClosure no_header_cl(cl);
|
||||
oop_iterate(&no_header_cl);
|
||||
}
|
||||
|
||||
void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
|
||||
_young_gen->oop_iterate(cl);
|
||||
_old_gen->oop_iterate(cl);
|
||||
@ -1092,11 +1103,6 @@ void GenCollectedHeap::generation_iterate(GenClosure* cl,
|
||||
}
|
||||
}
|
||||
|
||||
void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
|
||||
_young_gen->space_iterate(cl, true);
|
||||
_old_gen->space_iterate(cl, true);
|
||||
}
|
||||
|
||||
bool GenCollectedHeap::is_maximal_no_gc() const {
|
||||
return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
|
||||
}
|
||||
@ -1112,9 +1118,7 @@ GenCollectedHeap* GenCollectedHeap::heap() {
|
||||
return _gch;
|
||||
}
|
||||
|
||||
|
||||
void GenCollectedHeap::prepare_for_compaction() {
|
||||
guarantee(_n_gens = 2, "Wrong number of generations");
|
||||
// Start by compacting into same gen.
|
||||
CompactPoint cp(_old_gen);
|
||||
_old_gen->prepare_for_compaction(&cp);
|
||||
|
@ -26,15 +26,16 @@
|
||||
#define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
|
||||
|
||||
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/collectorPolicy.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
|
||||
class SubTasksDone;
|
||||
class FlexibleWorkGang;
|
||||
|
||||
// A "GenCollectedHeap" is a SharedHeap that uses generational
|
||||
// A "GenCollectedHeap" is a CollectedHeap that uses generational
|
||||
// collection. It has two generations, young and old.
|
||||
class GenCollectedHeap : public SharedHeap {
|
||||
class GenCollectedHeap : public CollectedHeap {
|
||||
friend class GenCollectorPolicy;
|
||||
friend class Generation;
|
||||
friend class DefNewGeneration;
|
||||
@ -51,10 +52,6 @@ class GenCollectedHeap : public SharedHeap {
|
||||
friend class GCCauseSetter;
|
||||
friend class VMStructs;
|
||||
public:
|
||||
enum SomeConstants {
|
||||
max_gens = 10
|
||||
};
|
||||
|
||||
friend class VM_PopulateDumpSharedSpace;
|
||||
|
||||
protected:
|
||||
@ -62,8 +59,6 @@ public:
|
||||
static GenCollectedHeap* _gch;
|
||||
|
||||
private:
|
||||
int _n_gens;
|
||||
|
||||
Generation* _young_gen;
|
||||
Generation* _old_gen;
|
||||
|
||||
@ -93,6 +88,8 @@ public:
|
||||
// In block contents verification, the number of header words to skip
|
||||
NOT_PRODUCT(static size_t _skip_header_HeapWords;)
|
||||
|
||||
FlexibleWorkGang* _workers;
|
||||
|
||||
protected:
|
||||
// Helper functions for allocation
|
||||
HeapWord* attempt_allocation(size_t size,
|
||||
@ -125,6 +122,8 @@ protected:
|
||||
public:
|
||||
GenCollectedHeap(GenCollectorPolicy *policy);
|
||||
|
||||
FlexibleWorkGang* workers() const { return _workers; }
|
||||
|
||||
GCStats* gc_stats(int level) const;
|
||||
|
||||
// Returns JNI_OK on success
|
||||
@ -223,6 +222,7 @@ public:
|
||||
}
|
||||
|
||||
// Iteration functions.
|
||||
void oop_iterate_no_header(OopClosure* cl);
|
||||
void oop_iterate(ExtendedOopClosure* cl);
|
||||
void object_iterate(ObjectClosure* cl);
|
||||
void safe_object_iterate(ObjectClosure* cl);
|
||||
@ -331,7 +331,6 @@ public:
|
||||
_old_gen->update_gc_stats(current_level, full);
|
||||
}
|
||||
|
||||
// Override.
|
||||
bool no_gc_in_progress() { return !is_gc_active(); }
|
||||
|
||||
// Override.
|
||||
@ -363,18 +362,11 @@ public:
|
||||
// If "old_to_young" determines the order.
|
||||
void generation_iterate(GenClosure* cl, bool old_to_young);
|
||||
|
||||
void space_iterate(SpaceClosure* cl);
|
||||
|
||||
// Return "true" if all generations have reached the
|
||||
// maximal committed limit that they can reach, without a garbage
|
||||
// collection.
|
||||
virtual bool is_maximal_no_gc() const;
|
||||
|
||||
int n_gens() const {
|
||||
assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
|
||||
return _n_gens;
|
||||
}
|
||||
|
||||
// This function returns the "GenRemSet" object that allows us to scan
|
||||
// generations in a fully generational heap.
|
||||
GenRemSet* rem_set() { return _rem_set; }
|
||||
@ -531,8 +523,8 @@ private:
|
||||
void record_gen_tops_before_GC() PRODUCT_RETURN;
|
||||
|
||||
protected:
|
||||
virtual void gc_prologue(bool full);
|
||||
virtual void gc_epilogue(bool full);
|
||||
void gc_prologue(bool full);
|
||||
void gc_epilogue(bool full);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
|
||||
|
30
hotspot/src/share/vm/memory/genOopClosures.cpp
Normal file
30
hotspot/src/share/vm/memory/genOopClosures.cpp
Normal file
@ -0,0 +1,30 @@
|
||||
/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/specialized_oop_closures.hpp"
|
||||
|
||||
// Generate Serial GC specialized oop_oop_iterate functions.
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
|
@ -31,7 +31,6 @@
|
||||
#include "memory/genOopClosures.hpp"
|
||||
#include "memory/genRemSet.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "memory/space.hpp"
|
||||
|
||||
inline OopsInGenClosure::OopsInGenClosure(Generation* gen) :
|
||||
|
@ -23,8 +23,11 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
void KlassToOopClosure::do_klass(Klass* k) {
|
||||
assert(_oop_closure != NULL, "Not initialized?");
|
||||
@ -61,19 +64,18 @@ void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
|
||||
}
|
||||
}
|
||||
|
||||
MarkingCodeBlobClosure::MarkScope::MarkScope(bool activate)
|
||||
: _active(activate)
|
||||
{
|
||||
if (_active) nmethod::oops_do_marking_prologue();
|
||||
}
|
||||
|
||||
MarkingCodeBlobClosure::MarkScope::~MarkScope() {
|
||||
if (_active) nmethod::oops_do_marking_epilogue();
|
||||
}
|
||||
|
||||
void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL && !nm->test_set_oops_do_mark()) {
|
||||
do_nmethod(nm);
|
||||
}
|
||||
}
|
||||
|
||||
// Generate the *Klass::oop_oop_iterate functions for the base class
|
||||
// of the oop closures. These versions use the virtual do_oop calls,
|
||||
// instead of the devirtualized do_oop_nv version.
|
||||
ALL_KLASS_OOP_OOP_ITERATE_DEFN(ExtendedOopClosure, _v)
|
||||
|
||||
// Generate the *Klass::oop_oop_iterate functions
|
||||
// for the NoHeaderExtendedOopClosure helper class.
|
||||
ALL_KLASS_OOP_OOP_ITERATE_DEFN(NoHeaderExtendedOopClosure, _nv)
|
||||
|
@ -44,9 +44,7 @@ class Closure : public StackObj { };
|
||||
class OopClosure : public Closure {
|
||||
public:
|
||||
virtual void do_oop(oop* o) = 0;
|
||||
virtual void do_oop_v(oop* o) { do_oop(o); }
|
||||
virtual void do_oop(narrowOop* o) = 0;
|
||||
virtual void do_oop_v(narrowOop* o) { do_oop(o); }
|
||||
};
|
||||
|
||||
// ExtendedOopClosure adds extra code to be run during oop iterations.
|
||||
@ -74,11 +72,9 @@ class ExtendedOopClosure : public OopClosure {
|
||||
// Currently, only CMS and G1 need these.
|
||||
|
||||
virtual bool do_metadata() { return do_metadata_nv(); }
|
||||
bool do_metadata_v() { return do_metadata(); }
|
||||
bool do_metadata_nv() { return false; }
|
||||
|
||||
virtual void do_klass(Klass* k) { do_klass_nv(k); }
|
||||
void do_klass_v(Klass* k) { do_klass(k); }
|
||||
void do_klass_nv(Klass* k) { ShouldNotReachHere(); }
|
||||
|
||||
virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
|
||||
@ -87,6 +83,14 @@ class ExtendedOopClosure : public OopClosure {
|
||||
// location without an intervening "major reset" (like the end of a GC).
|
||||
virtual bool idempotent() { return false; }
|
||||
virtual bool apply_to_weak_ref_discovered_field() { return false; }
|
||||
|
||||
#ifdef ASSERT
|
||||
// Default verification of each visited oop field.
|
||||
template <typename T> void verify(T* p);
|
||||
|
||||
// Can be used by subclasses to turn off the default verification of oop fields.
|
||||
virtual bool should_verify_oops() { return true; }
|
||||
#endif
|
||||
};
|
||||
|
||||
// Wrapper closure only used to implement oop_iterate_no_header().
|
||||
@ -147,7 +151,6 @@ class CLDToOopClosure : public CLDClosure {
|
||||
};
|
||||
|
||||
class CLDToKlassAndOopClosure : public CLDClosure {
|
||||
friend class SharedHeap;
|
||||
friend class G1CollectedHeap;
|
||||
protected:
|
||||
OopClosure* _oop_closure;
|
||||
@ -284,16 +287,6 @@ class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
|
||||
// Called for each code blob, but at most once per unique blob.
|
||||
|
||||
virtual void do_code_blob(CodeBlob* cb);
|
||||
|
||||
class MarkScope : public StackObj {
|
||||
protected:
|
||||
bool _active;
|
||||
public:
|
||||
MarkScope(bool activate = true);
|
||||
// = { if (active) nmethod::oops_do_marking_prologue(); }
|
||||
~MarkScope();
|
||||
// = { if (active) nmethod::oops_do_marking_epilogue(); }
|
||||
};
|
||||
};
|
||||
|
||||
// MonitorClosure is used for iterating over monitors in the monitors cache
|
||||
@ -364,16 +357,33 @@ class SymbolClosure : public StackObj {
|
||||
}
|
||||
};
|
||||
|
||||
// The two class template specializations are used to dispatch calls
|
||||
// to the ExtendedOopClosure functions. If use_non_virtual_call is true,
|
||||
// the non-virtual versions are called (E.g. do_oop_nv), otherwise the
|
||||
// virtual versions are called (E.g. do_oop).
|
||||
|
||||
// Helper defines for ExtendOopClosure
|
||||
template <bool use_non_virtual_call>
|
||||
class Devirtualizer {};
|
||||
|
||||
#define if_do_metadata_checked(closure, nv_suffix) \
|
||||
/* Make sure the non-virtual and the virtual versions match. */ \
|
||||
assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
|
||||
"Inconsistency in do_metadata"); \
|
||||
if (closure->do_metadata##nv_suffix())
|
||||
// Dispatches to the non-virtual functions.
|
||||
template <> class Devirtualizer<true> {
|
||||
public:
|
||||
template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
|
||||
template <class OopClosureType> static void do_klass(OopClosureType* closure, Klass* k);
|
||||
template <class OopClosureType> static bool do_metadata(OopClosureType* closure);
|
||||
};
|
||||
|
||||
#define assert_should_ignore_metadata(closure, nv_suffix) \
|
||||
assert(!closure->do_metadata##nv_suffix(), "Code to handle metadata is not implemented")
|
||||
// Dispatches to the virtual functions.
|
||||
template <> class Devirtualizer<false> {
|
||||
public:
|
||||
template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
|
||||
template <class OopClosureType> static void do_klass(OopClosureType* closure, Klass* k);
|
||||
template <class OopClosureType> static bool do_metadata(OopClosureType* closure);
|
||||
};
|
||||
|
||||
// Helper to convert the oop iterate macro suffixes into bool values that can be used by template functions.
|
||||
#define nvs_nv_to_bool true
|
||||
#define nvs_v_to_bool false
|
||||
#define nvs_to_bool(nv_suffix) nvs##nv_suffix##_to_bool
|
||||
|
||||
#endif // SHARE_VM_MEMORY_ITERATOR_HPP
|
||||
|
@ -28,6 +28,12 @@
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.inline.hpp"
|
||||
#include "oops/instanceRefKlass.inline.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
#include "oops/typeArrayKlass.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) {
|
||||
@ -44,4 +50,63 @@ inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) {
|
||||
|
||||
inline void MetadataAwareOopClosure::do_klass(Klass* k) { do_klass_nv(k); }
|
||||
|
||||
#ifdef ASSERT
|
||||
// This verification is applied to all visited oops.
|
||||
// The closures can turn is off by overriding should_verify_oops().
|
||||
template <typename T>
|
||||
void ExtendedOopClosure::verify(T* p) {
|
||||
if (should_verify_oops()) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in_closed_subset(o),
|
||||
err_msg("should be in closed *p " PTR_FORMAT " " PTR_FORMAT, p2i(p), p2i(o)));
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Implementation of the non-virtual do_oop dispatch.
|
||||
|
||||
template <class OopClosureType, typename T>
|
||||
inline void Devirtualizer<true>::do_oop(OopClosureType* closure, T* p) {
|
||||
debug_only(closure->verify(p));
|
||||
closure->do_oop_nv(p);
|
||||
}
|
||||
template <class OopClosureType>
|
||||
inline void Devirtualizer<true>::do_klass(OopClosureType* closure, Klass* k) {
|
||||
closure->do_klass_nv(k);
|
||||
}
|
||||
template <class OopClosureType>
|
||||
inline bool Devirtualizer<true>::do_metadata(OopClosureType* closure) {
|
||||
// Make sure the non-virtual and the virtual versions match.
|
||||
assert(closure->do_metadata_nv() == closure->do_metadata(), "Inconsistency in do_metadata");
|
||||
return closure->do_metadata_nv();
|
||||
}
|
||||
|
||||
// Implementation of the virtual do_oop dispatch.
|
||||
|
||||
template <class OopClosureType, typename T>
|
||||
void Devirtualizer<false>::do_oop(OopClosureType* closure, T* p) {
|
||||
debug_only(closure->verify(p));
|
||||
closure->do_oop(p);
|
||||
}
|
||||
template <class OopClosureType>
|
||||
void Devirtualizer<false>::do_klass(OopClosureType* closure, Klass* k) {
|
||||
closure->do_klass(k);
|
||||
}
|
||||
template <class OopClosureType>
|
||||
bool Devirtualizer<false>::do_metadata(OopClosureType* closure) {
|
||||
return closure->do_metadata();
|
||||
}
|
||||
|
||||
// The list of all "specializable" oop_oop_iterate function definitions.
|
||||
#define ALL_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
ALL_INSTANCE_REF_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
ALL_INSTANCE_MIRROR_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
ALL_INSTANCE_CLASS_LOADER_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix)
|
||||
|
||||
#endif // SHARE_VM_MEMORY_ITERATOR_INLINE_HPP
|
||||
|
@ -1,94 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "runtime/fprofiler.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#include "utilities/workgroup.hpp"
|
||||
|
||||
SharedHeap* SharedHeap::_sh;
|
||||
|
||||
SharedHeap::SharedHeap() :
|
||||
CollectedHeap(),
|
||||
_workers(NULL)
|
||||
{
|
||||
_sh = this; // ch is static, should be set only once.
|
||||
if (UseConcMarkSweepGC || UseG1GC) {
|
||||
_workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
|
||||
/* are_GC_task_threads */true,
|
||||
/* are_ConcurrentGC_threads */false);
|
||||
if (_workers == NULL) {
|
||||
vm_exit_during_initialization("Failed necessary allocation.");
|
||||
} else {
|
||||
_workers->initialize_workers();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool SharedHeap::heap_lock_held_for_gc() {
|
||||
Thread* t = Thread::current();
|
||||
return Heap_lock->owned_by_self()
|
||||
|| ( (t->is_GC_task_thread() || t->is_VM_thread())
|
||||
&& _thread_holds_heap_lock_for_gc);
|
||||
}
|
||||
|
||||
void SharedHeap::set_par_threads(uint t) {
|
||||
assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
|
||||
_n_par_threads = t;
|
||||
}
|
||||
|
||||
SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
|
||||
: MarkScope(activate), _sh(heap)
|
||||
{
|
||||
if (_active) {
|
||||
Threads::change_thread_claim_parity();
|
||||
// Zero the claimed high water mark in the StringTable
|
||||
StringTable::clear_parallel_claimed_index();
|
||||
}
|
||||
}
|
||||
|
||||
SharedHeap::StrongRootsScope::~StrongRootsScope() {
|
||||
Threads::assert_all_threads_claimed();
|
||||
}
|
||||
|
||||
void SharedHeap::set_barrier_set(BarrierSet* bs) {
|
||||
_barrier_set = bs;
|
||||
// Cached barrier set for fast access in oops
|
||||
oopDesc::set_bs(bs);
|
||||
}
|
||||
|
||||
void SharedHeap::post_initialize() {
|
||||
CollectedHeap::post_initialize();
|
||||
ref_processing_init();
|
||||
}
|
||||
|
||||
void SharedHeap::ref_processing_init() {}
|
@ -1,213 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP
|
||||
#define SHARE_VM_MEMORY_SHAREDHEAP_HPP
|
||||
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
|
||||
// A "SharedHeap" is an implementation of a java heap for HotSpot. This
|
||||
// is an abstract class: there may be many different kinds of heaps. This
|
||||
// class defines the functions that a heap must implement, and contains
|
||||
// infrastructure common to all heaps.
|
||||
|
||||
class Generation;
|
||||
class BarrierSet;
|
||||
class GenRemSet;
|
||||
class Space;
|
||||
class SpaceClosure;
|
||||
class OopClosure;
|
||||
class OopsInGenClosure;
|
||||
class ObjectClosure;
|
||||
class SubTasksDone;
|
||||
class WorkGang;
|
||||
class FlexibleWorkGang;
|
||||
class CollectorPolicy;
|
||||
class KlassClosure;
|
||||
|
||||
// Note on use of FlexibleWorkGang's for GC.
|
||||
// There are three places where task completion is determined.
|
||||
// In
|
||||
// 1) ParallelTaskTerminator::offer_termination() where _n_threads
|
||||
// must be set to the correct value so that count of workers that
|
||||
// have offered termination will exactly match the number
|
||||
// working on the task. Tasks such as those derived from GCTask
|
||||
// use ParallelTaskTerminator's. Tasks that want load balancing
|
||||
// by work stealing use this method to gauge completion.
|
||||
// 2) SubTasksDone has a variable _n_threads that is used in
|
||||
// all_tasks_completed() to determine completion. all_tasks_complete()
|
||||
// counts the number of tasks that have been done and then reset
|
||||
// the SubTasksDone so that it can be used again. When the number of
|
||||
// tasks is set to the number of GC workers, then _n_threads must
|
||||
// be set to the number of active GC workers. G1RootProcessor and
|
||||
// GenCollectedHeap have SubTasksDone.
|
||||
// 3) SequentialSubTasksDone has an _n_threads that is used in
|
||||
// a way similar to SubTasksDone and has the same dependency on the
|
||||
// number of active GC workers. CompactibleFreeListSpace and Space
|
||||
// have SequentialSubTasksDone's.
|
||||
//
|
||||
// Examples of using SubTasksDone and SequentialSubTasksDone:
|
||||
// G1RootProcessor and GenCollectedHeap::process_roots() use
|
||||
// SubTasksDone* _process_strong_tasks to claim tasks for workers
|
||||
//
|
||||
// GenCollectedHeap::gen_process_roots() calls
|
||||
// rem_set()->younger_refs_iterate()
|
||||
// to scan the card table and which eventually calls down into
|
||||
// CardTableModRefBS::par_non_clean_card_iterate_work(). This method
|
||||
// uses SequentialSubTasksDone* _pst to claim tasks.
|
||||
// Both SubTasksDone and SequentialSubTasksDone call their method
|
||||
// all_tasks_completed() to count the number of GC workers that have
|
||||
// finished their work. That logic is "when all the workers are
|
||||
// finished the tasks are finished".
|
||||
//
|
||||
// The pattern that appears in the code is to set _n_threads
|
||||
// to a value > 1 before a task that you would like executed in parallel
|
||||
// and then to set it to 0 after that task has completed. A value of
|
||||
// 0 is a "special" value in set_n_threads() which translates to
|
||||
// setting _n_threads to 1.
|
||||
//
|
||||
// Some code uses _n_termination to decide if work should be done in
|
||||
// parallel. The notorious possibly_parallel_oops_do() in threads.cpp
|
||||
// is an example of such code. Look for variable "is_par" for other
|
||||
// examples.
|
||||
//
|
||||
// The active_workers is not reset to 0 after a parallel phase. It's
|
||||
// value may be used in later phases and in one instance at least
|
||||
// (the parallel remark) it has to be used (the parallel remark depends
|
||||
// on the partitioning done in the previous parallel scavenge).
|
||||
|
||||
class SharedHeap : public CollectedHeap {
|
||||
friend class VMStructs;
|
||||
|
||||
friend class VM_GC_Operation;
|
||||
friend class VM_CGC_Operation;
|
||||
|
||||
protected:
|
||||
// There should be only a single instance of "SharedHeap" in a program.
|
||||
// This is enforced with the protected constructor below, which will also
|
||||
// set the static pointer "_sh" to that instance.
|
||||
static SharedHeap* _sh;
|
||||
|
||||
// If we're doing parallel GC, use this gang of threads.
|
||||
FlexibleWorkGang* _workers;
|
||||
|
||||
// Full initialization is done in a concrete subtype's "initialize"
|
||||
// function.
|
||||
SharedHeap();
|
||||
|
||||
// Returns true if the calling thread holds the heap lock,
|
||||
// or the calling thread is a par gc thread and the heap_lock is held
|
||||
// by the vm thread doing a gc operation.
|
||||
bool heap_lock_held_for_gc();
|
||||
// True if the heap_lock is held by the a non-gc thread invoking a gc
|
||||
// operation.
|
||||
bool _thread_holds_heap_lock_for_gc;
|
||||
|
||||
public:
|
||||
static SharedHeap* heap() { return _sh; }
|
||||
|
||||
void set_barrier_set(BarrierSet* bs);
|
||||
|
||||
// Does operations required after initialization has been done.
|
||||
virtual void post_initialize();
|
||||
|
||||
// Initialization of ("weak") reference processing support
|
||||
virtual void ref_processing_init();
|
||||
|
||||
// Iteration functions.
|
||||
void oop_iterate(ExtendedOopClosure* cl) = 0;
|
||||
|
||||
// Iterate over all spaces in use in the heap, in an undefined order.
|
||||
virtual void space_iterate(SpaceClosure* cl) = 0;
|
||||
|
||||
// A SharedHeap will contain some number of spaces. This finds the
|
||||
// space whose reserved area contains the given address, or else returns
|
||||
// NULL.
|
||||
virtual Space* space_containing(const void* addr) const = 0;
|
||||
|
||||
bool no_gc_in_progress() { return !is_gc_active(); }
|
||||
|
||||
// Note, the below comment needs to be updated to reflect the changes
|
||||
// introduced by JDK-8076225. This should be done as part of JDK-8076289.
|
||||
//
|
||||
//Some collectors will perform "process_strong_roots" in parallel.
|
||||
// Such a call will involve claiming some fine-grained tasks, such as
|
||||
// scanning of threads. To make this process simpler, we provide the
|
||||
// "strong_roots_parity()" method. Collectors that start parallel tasks
|
||||
// whose threads invoke "process_strong_roots" must
|
||||
// call "change_strong_roots_parity" in sequential code starting such a
|
||||
// task. (This also means that a parallel thread may only call
|
||||
// process_strong_roots once.)
|
||||
//
|
||||
// For calls to process_roots by sequential code, the parity is
|
||||
// updated automatically.
|
||||
//
|
||||
// The idea is that objects representing fine-grained tasks, such as
|
||||
// threads, will contain a "parity" field. A task will is claimed in the
|
||||
// current "process_roots" call only if its parity field is the
|
||||
// same as the "strong_roots_parity"; task claiming is accomplished by
|
||||
// updating the parity field to the strong_roots_parity with a CAS.
|
||||
//
|
||||
// If the client meats this spec, then strong_roots_parity() will have
|
||||
// the following properties:
|
||||
// a) to return a different value than was returned before the last
|
||||
// call to change_strong_roots_parity, and
|
||||
// c) to never return a distinguished value (zero) with which such
|
||||
// task-claiming variables may be initialized, to indicate "never
|
||||
// claimed".
|
||||
public:
|
||||
|
||||
// Call these in sequential code around process_roots.
|
||||
// strong_roots_prologue calls change_strong_roots_parity, if
|
||||
// parallel tasks are enabled.
|
||||
class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
|
||||
SharedHeap* _sh;
|
||||
|
||||
public:
|
||||
StrongRootsScope(SharedHeap* heap, bool activate = true);
|
||||
~StrongRootsScope();
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
public:
|
||||
FlexibleWorkGang* workers() const { return _workers; }
|
||||
|
||||
// The functions below are helper functions that a subclass of
|
||||
// "SharedHeap" can use in the implementation of its virtual
|
||||
// functions.
|
||||
|
||||
public:
|
||||
|
||||
// Do anything common to GC's.
|
||||
virtual void gc_prologue(bool full) = 0;
|
||||
virtual void gc_epilogue(bool full) = 0;
|
||||
|
||||
// Sets the number of parallel threads that will be doing tasks
|
||||
// (such as process roots) subsequently.
|
||||
virtual void set_par_threads(uint t);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP
|
@ -31,6 +31,7 @@
|
||||
#include "memory/blockOffsetTable.inline.hpp"
|
||||
#include "memory/defNewGeneration.hpp"
|
||||
#include "memory/genCollectedHeap.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/space.hpp"
|
||||
#include "memory/space.inline.hpp"
|
||||
#include "memory/universe.inline.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,37 +22,32 @@
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.memory;
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "memory/strongRootsScope.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
import sun.jvm.hotspot.debugger.*;
|
||||
import sun.jvm.hotspot.gc_interface.*;
|
||||
import sun.jvm.hotspot.runtime.*;
|
||||
import sun.jvm.hotspot.types.*;
|
||||
|
||||
public abstract class SharedHeap extends CollectedHeap {
|
||||
private static VirtualConstructor ctor;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
MarkScope::MarkScope(bool activate) : _active(activate) {
|
||||
if (_active) {
|
||||
nmethod::oops_do_marking_prologue();
|
||||
}
|
||||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("SharedHeap");
|
||||
ctor = new VirtualConstructor(db);
|
||||
MarkScope::~MarkScope() {
|
||||
if (_active) {
|
||||
nmethod::oops_do_marking_epilogue();
|
||||
}
|
||||
}
|
||||
|
||||
public SharedHeap(Address addr) {
|
||||
super(addr);
|
||||
StrongRootsScope::StrongRootsScope(bool activate) : MarkScope(activate) {
|
||||
if (_active) {
|
||||
Threads::change_thread_claim_parity();
|
||||
// Zero the claimed high water mark in the StringTable
|
||||
StringTable::clear_parallel_claimed_index();
|
||||
}
|
||||
}
|
||||
|
||||
public CollectedHeapName kind() {
|
||||
return CollectedHeapName.SHARED_HEAP;
|
||||
}
|
||||
}
|
||||
StrongRootsScope::~StrongRootsScope() {
|
||||
Threads::assert_all_threads_claimed();
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,24 +22,25 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OOPS_OOP_PSGC_INLINE_HPP
|
||||
#define SHARE_VM_OOPS_OOP_PSGC_INLINE_HPP
|
||||
#ifndef SHARE_VM_MEMORY_STRONGROOTSSCOPE_HPP
|
||||
#define SHARE_VM_MEMORY_STRONGROOTSSCOPE_HPP
|
||||
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
// ParallelScavengeHeap methods
|
||||
class MarkScope : public StackObj {
|
||||
protected:
|
||||
bool _active;
|
||||
public:
|
||||
MarkScope(bool activate = true);
|
||||
~MarkScope();
|
||||
};
|
||||
|
||||
inline void oopDesc::push_contents(PSPromotionManager* pm) {
|
||||
Klass* k = klass();
|
||||
if (!k->oop_is_typeArray()) {
|
||||
// It might contain oops beyond the header, so take the virtual call.
|
||||
k->oop_push_contents(pm, this);
|
||||
}
|
||||
// Else skip it. The TypeArrayKlass in the header never needs scavenging.
|
||||
}
|
||||
// Sets up and tears down the required state for parallel root processing.
|
||||
|
||||
#endif // SHARE_VM_OOPS_OOP_PSGC_INLINE_HPP
|
||||
class StrongRootsScope : public MarkScope {
|
||||
public:
|
||||
StrongRootsScope(bool activate = true);
|
||||
~StrongRootsScope();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_MEMORY_STRONGROOTSSCOPE_HPP
|
@ -36,6 +36,9 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/parNew/parOopClosures.hpp"
|
||||
#endif
|
||||
|
||||
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
|
||||
size_t initial_byte_size, int level,
|
||||
|
@ -1,146 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/specialized_oop_closures.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.hpp"
|
||||
#include "oops/instanceMirrorKlass.hpp"
|
||||
#include "oops/instanceOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// Macro to define InstanceClassLoaderKlass::oop_oop_iterate for virtual/nonvirtual for
|
||||
// all closures. Macros calling macros above for each oop size.
|
||||
// Since ClassLoader objects have only a pointer to the loader_data, they are not
|
||||
// compressed nor does the pointer move.
|
||||
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)\
|
||||
\
|
||||
int InstanceClassLoaderKlass:: \
|
||||
oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
/* Get size before changing pointers */ \
|
||||
int size = InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \
|
||||
\
|
||||
if_do_metadata_checked(closure, nv_suffix) { \
|
||||
ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); \
|
||||
/* cld can be null if we have a non-registered class loader. */ \
|
||||
if (cld != NULL) { \
|
||||
closure->do_class_loader_data(cld); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
return size; \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceClassLoaderKlass:: \
|
||||
oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
/* Get size before changing pointers */ \
|
||||
int size = InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \
|
||||
return size; \
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceClassLoaderKlass:: \
|
||||
oop_oop_iterate##nv_suffix##_m(oop obj, \
|
||||
OopClosureType* closure, \
|
||||
MemRegion mr) { \
|
||||
int size = InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \
|
||||
\
|
||||
if_do_metadata_checked(closure, nv_suffix) { \
|
||||
if (mr.contains(obj)) { \
|
||||
ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); \
|
||||
/* cld can be null if we have a non-registered class loader. */ \
|
||||
if (cld != NULL) { \
|
||||
closure->do_class_loader_data(cld); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
return size; \
|
||||
}
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN)
|
||||
#if INCLUDE_ALL_GCS
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
|
||||
void InstanceClassLoaderKlass::oop_follow_contents(oop obj) {
|
||||
InstanceKlass::oop_follow_contents(obj);
|
||||
ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
|
||||
|
||||
// We must NULL check here, since the class loader
|
||||
// can be found before the loader data has been set up.
|
||||
if(loader_data != NULL) {
|
||||
MarkSweep::follow_class_loader(loader_data);
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
void InstanceClassLoaderKlass::oop_follow_contents(ParCompactionManager* cm,
|
||||
oop obj) {
|
||||
InstanceKlass::oop_follow_contents(cm, obj);
|
||||
ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
|
||||
if (loader_data != NULL) {
|
||||
PSParallelCompact::follow_class_loader(cm, loader_data);
|
||||
}
|
||||
}
|
||||
|
||||
void InstanceClassLoaderKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
InstanceKlass::oop_push_contents(pm, obj);
|
||||
|
||||
// This is called by the young collector. It will already have taken care of
|
||||
// all class loader data. So, we don't have to follow the class loader ->
|
||||
// class loader data link.
|
||||
}
|
||||
|
||||
int InstanceClassLoaderKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
||||
InstanceKlass::oop_update_pointers(cm, obj);
|
||||
return size_helper();
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
@ -48,34 +48,60 @@ public:
|
||||
|
||||
InstanceClassLoaderKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); }
|
||||
|
||||
// Iterators
|
||||
int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) {
|
||||
return oop_oop_iterate_v(obj, blk);
|
||||
}
|
||||
int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) {
|
||||
return oop_oop_iterate_v_m(obj, blk, mr);
|
||||
}
|
||||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
void oop_ps_push_contents( oop obj, PSPromotionManager* pm);
|
||||
// Parallel Compact
|
||||
void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
|
||||
void oop_pc_update_pointers(oop obj);
|
||||
#endif
|
||||
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
|
||||
// Oop fields (and metadata) iterators
|
||||
// [nv = true] Use non-virtual calls to do_oop_nv.
|
||||
// [nv = false] Use virtual calls to do_oop.
|
||||
//
|
||||
// The InstanceClassLoaderKlass iterators also visit the CLD pointer (or mirror of anonymous klasses.)
|
||||
|
||||
private:
|
||||
// Forward iteration
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Reverse iteration
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
|
||||
#endif
|
||||
|
||||
// Bounded range iteration
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
public:
|
||||
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// Garbage collection
|
||||
void oop_follow_contents(oop obj);
|
||||
|
||||
// Parallel Scavenge and Parallel Old
|
||||
PARALLEL_GC_DECLS
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_HPP
|
||||
|
110
hotspot/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp
Normal file
110
hotspot/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp
Normal file
@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP
|
||||
#define SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP
|
||||
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
int size = InstanceKlass::oop_oop_iterate<nv>(obj, closure);
|
||||
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
|
||||
// cld can be null if we have a non-registered class loader.
|
||||
if (cld != NULL) {
|
||||
closure->do_class_loader_data(cld);
|
||||
}
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
|
||||
int size = InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
|
||||
|
||||
assert(!Devirtualizer<nv>::do_metadata(closure),
|
||||
"Code to handle metadata is not implemented");
|
||||
|
||||
return size;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
int size = InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
|
||||
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
if (mr.contains(obj)) {
|
||||
ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj);
|
||||
// cld can be null if we have a non-registered class loader.
|
||||
if (cld != NULL) {
|
||||
closure->do_class_loader_data(cld);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceClassLoaderKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceClassLoaderKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
|
||||
#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceClassLoaderKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
#define ALL_INSTANCE_CLASS_LOADER_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
|
||||
#endif // SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP
|
@ -28,12 +28,10 @@
|
||||
#include "classfile/verifier.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "interpreter/oopMapCache.hpp"
|
||||
#include "interpreter/rewriter.hpp"
|
||||
#include "jvmtifiles/jvmti.h"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/heapInspection.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
@ -41,7 +39,7 @@
|
||||
#include "memory/specialized_oop_closures.hpp"
|
||||
#include "oops/fieldStreams.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.hpp"
|
||||
#include "oops/instanceOop.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
@ -64,17 +62,6 @@
|
||||
#include "services/threadService.hpp"
|
||||
#include "utilities/dtrace.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Compiler.hpp"
|
||||
#endif
|
||||
@ -1993,288 +1980,6 @@ bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
|
||||
}
|
||||
#endif //PRODUCT
|
||||
|
||||
|
||||
// Garbage collection
|
||||
|
||||
#ifdef ASSERT
|
||||
template <class T> void assert_is_in(T *p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in(o), "should be in heap");
|
||||
}
|
||||
}
|
||||
template <class T> void assert_is_in_closed_subset(T *p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in_closed_subset(o),
|
||||
err_msg("should be in closed *p " INTPTR_FORMAT " " INTPTR_FORMAT, (address)p, (address)o));
|
||||
}
|
||||
}
|
||||
template <class T> void assert_is_in_reserved(T *p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
|
||||
}
|
||||
}
|
||||
template <class T> void assert_nothing(T *p) {}
|
||||
|
||||
#else
|
||||
template <class T> void assert_is_in(T *p) {}
|
||||
template <class T> void assert_is_in_closed_subset(T *p) {}
|
||||
template <class T> void assert_is_in_reserved(T *p) {}
|
||||
template <class T> void assert_nothing(T *p) {}
|
||||
#endif // ASSERT
|
||||
|
||||
//
|
||||
// Macros that iterate over areas of oops which are specialized on type of
|
||||
// oop pointer either narrow or wide, depending on UseCompressedOops
|
||||
//
|
||||
// Parameters are:
|
||||
// T - type of oop to point to (either oop or narrowOop)
|
||||
// start_p - starting pointer for region to iterate over
|
||||
// count - number of oops or narrowOops to iterate over
|
||||
// do_oop - action to perform on each oop (it's arbitrary C code which
|
||||
// makes it more efficient to put in a macro rather than making
|
||||
// it a template function)
|
||||
// assert_fn - assert function which is template function because performance
|
||||
// doesn't matter when enabled.
|
||||
#define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
|
||||
T, start_p, count, do_oop, \
|
||||
assert_fn) \
|
||||
{ \
|
||||
T* p = (T*)(start_p); \
|
||||
T* const end = p + (count); \
|
||||
while (p < end) { \
|
||||
(assert_fn)(p); \
|
||||
do_oop; \
|
||||
++p; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
|
||||
T, start_p, count, do_oop, \
|
||||
assert_fn) \
|
||||
{ \
|
||||
T* const start = (T*)(start_p); \
|
||||
T* p = start + (count); \
|
||||
while (start < p) { \
|
||||
--p; \
|
||||
(assert_fn)(p); \
|
||||
do_oop; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
|
||||
T, start_p, count, low, high, \
|
||||
do_oop, assert_fn) \
|
||||
{ \
|
||||
T* const l = (T*)(low); \
|
||||
T* const h = (T*)(high); \
|
||||
assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
|
||||
mask_bits((intptr_t)h, sizeof(T)-1) == 0, \
|
||||
"bounded region must be properly aligned"); \
|
||||
T* p = (T*)(start_p); \
|
||||
T* end = p + (count); \
|
||||
if (p < l) p = l; \
|
||||
if (end > h) end = h; \
|
||||
while (p < end) { \
|
||||
(assert_fn)(p); \
|
||||
do_oop; \
|
||||
++p; \
|
||||
} \
|
||||
}
|
||||
|
||||
|
||||
// The following macros call specialized macros, passing either oop or
|
||||
// narrowOop as the specialization type. These test the UseCompressedOops
|
||||
// flag.
|
||||
#define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \
|
||||
{ \
|
||||
/* Compute oopmap block range. The common case \
|
||||
is nonstatic_oop_map_size == 1. */ \
|
||||
OopMapBlock* map = start_of_nonstatic_oop_maps(); \
|
||||
OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
|
||||
if (UseCompressedOops) { \
|
||||
while (map < end_map) { \
|
||||
InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
|
||||
obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
|
||||
do_oop, assert_fn) \
|
||||
++map; \
|
||||
} \
|
||||
} else { \
|
||||
while (map < end_map) { \
|
||||
InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
|
||||
obj->obj_field_addr<oop>(map->offset()), map->count(), \
|
||||
do_oop, assert_fn) \
|
||||
++map; \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
#define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \
|
||||
{ \
|
||||
OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \
|
||||
OopMapBlock* map = start_map + nonstatic_oop_map_count(); \
|
||||
if (UseCompressedOops) { \
|
||||
while (start_map < map) { \
|
||||
--map; \
|
||||
InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \
|
||||
obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
|
||||
do_oop, assert_fn) \
|
||||
} \
|
||||
} else { \
|
||||
while (start_map < map) { \
|
||||
--map; \
|
||||
InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \
|
||||
obj->obj_field_addr<oop>(map->offset()), map->count(), \
|
||||
do_oop, assert_fn) \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
#define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \
|
||||
assert_fn) \
|
||||
{ \
|
||||
/* Compute oopmap block range. The common case is \
|
||||
nonstatic_oop_map_size == 1, so we accept the \
|
||||
usually non-existent extra overhead of examining \
|
||||
all the maps. */ \
|
||||
OopMapBlock* map = start_of_nonstatic_oop_maps(); \
|
||||
OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
|
||||
if (UseCompressedOops) { \
|
||||
while (map < end_map) { \
|
||||
InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
|
||||
obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
|
||||
low, high, \
|
||||
do_oop, assert_fn) \
|
||||
++map; \
|
||||
} \
|
||||
} else { \
|
||||
while (map < end_map) { \
|
||||
InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
|
||||
obj->obj_field_addr<oop>(map->offset()), map->count(), \
|
||||
low, high, \
|
||||
do_oop, assert_fn) \
|
||||
++map; \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
void InstanceKlass::oop_follow_contents(oop obj) {
|
||||
assert(obj != NULL, "can't follow the content of NULL object");
|
||||
MarkSweep::follow_klass(obj->klass());
|
||||
InstanceKlass_OOP_MAP_ITERATE( \
|
||||
obj, \
|
||||
MarkSweep::mark_and_push(p), \
|
||||
assert_is_in_closed_subset)
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
void InstanceKlass::oop_follow_contents(ParCompactionManager* cm,
|
||||
oop obj) {
|
||||
assert(obj != NULL, "can't follow the content of NULL object");
|
||||
PSParallelCompact::follow_klass(cm, obj->klass());
|
||||
// Only mark the header and let the scan of the meta-data mark
|
||||
// everything else.
|
||||
InstanceKlass_OOP_MAP_ITERATE( \
|
||||
obj, \
|
||||
PSParallelCompact::mark_and_push(cm, p), \
|
||||
assert_is_in)
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// closure's do_metadata() method dictates whether the given closure should be
|
||||
// applied to the klass ptr in the object header.
|
||||
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
/* header */ \
|
||||
if_do_metadata_checked(closure, nv_suffix) { \
|
||||
closure->do_klass##nv_suffix(obj->klass()); \
|
||||
} \
|
||||
InstanceKlass_OOP_MAP_ITERATE( \
|
||||
obj, \
|
||||
(closure)->do_oop##nv_suffix(p), \
|
||||
assert_is_in_closed_subset) \
|
||||
return size_helper(); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \
|
||||
OopClosureType* closure) { \
|
||||
assert_should_ignore_metadata(closure, nv_suffix); \
|
||||
\
|
||||
/* instance variables */ \
|
||||
InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
|
||||
obj, \
|
||||
(closure)->do_oop##nv_suffix(p), \
|
||||
assert_is_in_closed_subset) \
|
||||
return size_helper(); \
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
|
||||
OopClosureType* closure, \
|
||||
MemRegion mr) { \
|
||||
if_do_metadata_checked(closure, nv_suffix) { \
|
||||
if (mr.contains(obj)) { \
|
||||
closure->do_klass##nv_suffix(obj->klass()); \
|
||||
} \
|
||||
} \
|
||||
InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
|
||||
obj, mr.start(), mr.end(), \
|
||||
(closure)->do_oop##nv_suffix(p), \
|
||||
assert_is_in_closed_subset) \
|
||||
return size_helper(); \
|
||||
}
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
#if INCLUDE_ALL_GCS
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
int InstanceKlass::oop_adjust_pointers(oop obj) {
|
||||
int size = size_helper();
|
||||
InstanceKlass_OOP_MAP_ITERATE( \
|
||||
obj, \
|
||||
MarkSweep::adjust_pointer(p), \
|
||||
assert_is_in)
|
||||
return size;
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
void InstanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
|
||||
obj, \
|
||||
if (PSScavenge::should_scavenge(p)) { \
|
||||
pm->claim_or_forward_depth(p); \
|
||||
}, \
|
||||
assert_nothing )
|
||||
}
|
||||
|
||||
int InstanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
||||
int size = size_helper();
|
||||
InstanceKlass_OOP_MAP_ITERATE( \
|
||||
obj, \
|
||||
PSParallelCompact::adjust_pointer(p), \
|
||||
assert_is_in)
|
||||
return size;
|
||||
}
|
||||
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
|
||||
assert(class_loader_data()->is_alive(is_alive), "this klass should be live");
|
||||
if (is_interface()) {
|
||||
|
@ -965,10 +965,6 @@ class InstanceKlass: public Klass {
|
||||
void adjust_default_methods(InstanceKlass* holder, bool* trace_name_printed);
|
||||
#endif // INCLUDE_JVMTI
|
||||
|
||||
// Garbage collection
|
||||
void oop_follow_contents(oop obj);
|
||||
int oop_adjust_pointers(oop obj);
|
||||
|
||||
void clean_implementors_list(BoolObjectClosure* is_alive);
|
||||
void clean_method_data(BoolObjectClosure* is_alive);
|
||||
void clean_dependent_nmethods();
|
||||
@ -992,32 +988,108 @@ class InstanceKlass: public Klass {
|
||||
static void notify_unload_class(InstanceKlass* ik);
|
||||
static void release_C_heap_structures(InstanceKlass* ik);
|
||||
|
||||
// Parallel Scavenge and Parallel Old
|
||||
PARALLEL_GC_DECLS
|
||||
|
||||
// Naming
|
||||
const char* signature_name() const;
|
||||
|
||||
// Iterators
|
||||
int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) {
|
||||
return oop_oop_iterate_v(obj, blk);
|
||||
}
|
||||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
void oop_ps_push_contents( oop obj, PSPromotionManager* pm);
|
||||
// Parallel Compact
|
||||
void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
|
||||
void oop_pc_update_pointers(oop obj);
|
||||
#endif
|
||||
|
||||
int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) {
|
||||
return oop_oop_iterate_v_m(obj, blk, mr);
|
||||
}
|
||||
// Oop fields (and metadata) iterators
|
||||
// [nv = true] Use non-virtual calls to do_oop_nv.
|
||||
// [nv = false] Use virtual calls to do_oop.
|
||||
//
|
||||
// The InstanceKlass iterators also visits the Object's klass.
|
||||
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \
|
||||
MemRegion mr);
|
||||
// Forward iteration
|
||||
public:
|
||||
// Iterate over all oop fields in the oop maps.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline void oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure);
|
||||
|
||||
protected:
|
||||
// Iterate over all oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
|
||||
private:
|
||||
// Iterate over all oop fields in the oop maps.
|
||||
// Specialized for [T = oop] or [T = narrowOop].
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure);
|
||||
|
||||
// Iterate over all oop fields in one oop map.
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure);
|
||||
|
||||
|
||||
// Reverse iteration
|
||||
#if INCLUDE_ALL_GCS
|
||||
public:
|
||||
// Iterate over all oop fields in the oop maps.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline void oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure);
|
||||
|
||||
protected:
|
||||
// Iterate over all oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
|
||||
|
||||
private:
|
||||
// Iterate over all oop fields in the oop maps.
|
||||
// Specialized for [T = oop] or [T = narrowOop].
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure);
|
||||
|
||||
// Iterate over all oop fields in one oop map.
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure);
|
||||
#endif
|
||||
|
||||
|
||||
// Bounded range iteration
|
||||
public:
|
||||
// Iterate over all oop fields in the oop maps.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline void oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
protected:
|
||||
// Iterate over all oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
private:
|
||||
// Iterate over all oop fields in the oop maps.
|
||||
// Specialized for [T = oop] or [T = narrowOop].
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Iterate over all oop fields in one oop map.
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
|
||||
public:
|
||||
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
|
215
hotspot/src/share/vm/oops/instanceKlass.inline.hpp
Normal file
215
hotspot/src/share/vm/oops/instanceKlass.inline.hpp
Normal file
@ -0,0 +1,215 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
|
||||
#define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
|
||||
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// The iteration over the oops in objects is a hot path in the GC code.
|
||||
// By force inlining the following functions, we get similar GC performance
|
||||
// as the previous macro based implementation.
|
||||
#ifdef TARGET_COMPILER_visCPP
|
||||
#define INLINE __forceinline
|
||||
#else
|
||||
#define INLINE inline
|
||||
#endif
|
||||
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
INLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
|
||||
T* p = (T*)obj->obj_field_addr<T>(map->offset());
|
||||
T* const end = p + map->count();
|
||||
|
||||
for (; p < end; ++p) {
|
||||
Devirtualizer<nv>::do_oop(closure, p);
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
INLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
|
||||
T* const start = (T*)obj->obj_field_addr<T>(map->offset());
|
||||
T* p = start + map->count();
|
||||
|
||||
while (start < p) {
|
||||
--p;
|
||||
Devirtualizer<nv>::do_oop(closure, p);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
INLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
T* p = (T*)obj->obj_field_addr<T>(map->offset());
|
||||
T* end = p + map->count();
|
||||
|
||||
T* const l = (T*)mr.start();
|
||||
T* const h = (T*)mr.end();
|
||||
assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
|
||||
mask_bits((intptr_t)h, sizeof(T)-1) == 0,
|
||||
"bounded region must be properly aligned");
|
||||
|
||||
if (p < l) {
|
||||
p = l;
|
||||
}
|
||||
if (end > h) {
|
||||
end = h;
|
||||
}
|
||||
|
||||
for (;p < end; ++p) {
|
||||
Devirtualizer<nv>::do_oop(closure, p);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
|
||||
OopMapBlock* map = start_of_nonstatic_oop_maps();
|
||||
OopMapBlock* const end_map = map + nonstatic_oop_map_count();
|
||||
|
||||
for (; map < end_map; ++map) {
|
||||
oop_oop_iterate_oop_map<nv, T>(map, obj, closure);
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
|
||||
OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
|
||||
OopMapBlock* map = start_map + nonstatic_oop_map_count();
|
||||
|
||||
while (start_map < map) {
|
||||
--map;
|
||||
oop_oop_iterate_oop_map_reverse<nv, T>(map, obj, closure);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
OopMapBlock* map = start_of_nonstatic_oop_maps();
|
||||
OopMapBlock* const end_map = map + nonstatic_oop_map_count();
|
||||
|
||||
for (;map < end_map; ++map) {
|
||||
oop_oop_iterate_oop_map_bounded<nv, T>(map, obj, closure, mr);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
INLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
|
||||
if (UseCompressedOops) {
|
||||
oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure);
|
||||
} else {
|
||||
oop_oop_iterate_oop_maps_specialized<nv, oop>(obj, closure);
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <bool nv, class OopClosureType>
|
||||
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
|
||||
if (UseCompressedOops) {
|
||||
oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure);
|
||||
} else {
|
||||
oop_oop_iterate_oop_maps_specialized_reverse<nv, oop>(obj, closure);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
INLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
if (UseCompressedOops) {
|
||||
oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr);
|
||||
} else {
|
||||
oop_oop_iterate_oop_maps_specialized_bounded<nv, oop>(obj, closure, mr);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
INLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
Devirtualizer<nv>::do_klass(closure, this);
|
||||
}
|
||||
|
||||
oop_oop_iterate_oop_maps<nv>(obj, closure);
|
||||
|
||||
return size_helper();
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <bool nv, class OopClosureType>
|
||||
INLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
|
||||
assert(!Devirtualizer<nv>::do_metadata(closure),
|
||||
"Code to handle metadata is not implemented");
|
||||
|
||||
oop_oop_iterate_oop_maps_reverse<nv>(obj, closure);
|
||||
|
||||
return size_helper();
|
||||
}
|
||||
#endif
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
if (mr.contains(obj)) {
|
||||
Devirtualizer<nv>::do_klass(closure, this);
|
||||
}
|
||||
}
|
||||
|
||||
oop_oop_iterate_oop_maps_bounded<nv>(obj, closure, mr);
|
||||
|
||||
return size_helper();
|
||||
}
|
||||
|
||||
#undef INLINE
|
||||
|
||||
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
int InstanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
#define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
InstanceKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
InstanceKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
|
||||
#endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
|
@ -25,9 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/specialized_oop_closures.hpp"
|
||||
@ -38,313 +36,9 @@
|
||||
#include "oops/symbol.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
int InstanceMirrorKlass::_offset_of_static_fields = 0;
|
||||
|
||||
#ifdef ASSERT
|
||||
template <class T> void assert_is_in(T *p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in(o), "should be in heap");
|
||||
}
|
||||
}
|
||||
template <class T> void assert_is_in_closed_subset(T *p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
|
||||
}
|
||||
}
|
||||
template <class T> void assert_is_in_reserved(T *p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
|
||||
}
|
||||
}
|
||||
template <class T> void assert_nothing(T *p) {}
|
||||
|
||||
#else
|
||||
template <class T> void assert_is_in(T *p) {}
|
||||
template <class T> void assert_is_in_closed_subset(T *p) {}
|
||||
template <class T> void assert_is_in_reserved(T *p) {}
|
||||
template <class T> void assert_nothing(T *p) {}
|
||||
#endif // ASSERT
|
||||
|
||||
#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE( \
|
||||
T, start_p, count, do_oop, \
|
||||
assert_fn) \
|
||||
{ \
|
||||
T* p = (T*)(start_p); \
|
||||
T* const end = p + (count); \
|
||||
while (p < end) { \
|
||||
(assert_fn)(p); \
|
||||
do_oop; \
|
||||
++p; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
|
||||
T, start_p, count, low, high, \
|
||||
do_oop, assert_fn) \
|
||||
{ \
|
||||
T* const l = (T*)(low); \
|
||||
T* const h = (T*)(high); \
|
||||
assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
|
||||
mask_bits((intptr_t)h, sizeof(T)-1) == 0, \
|
||||
"bounded region must be properly aligned"); \
|
||||
T* p = (T*)(start_p); \
|
||||
T* end = p + (count); \
|
||||
if (p < l) p = l; \
|
||||
if (end > h) end = h; \
|
||||
while (p < end) { \
|
||||
(assert_fn)(p); \
|
||||
do_oop; \
|
||||
++p; \
|
||||
} \
|
||||
}
|
||||
|
||||
|
||||
#define InstanceMirrorKlass_OOP_ITERATE(start_p, count, \
|
||||
do_oop, assert_fn) \
|
||||
{ \
|
||||
if (UseCompressedOops) { \
|
||||
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
|
||||
start_p, count, \
|
||||
do_oop, assert_fn) \
|
||||
} else { \
|
||||
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(oop, \
|
||||
start_p, count, \
|
||||
do_oop, assert_fn) \
|
||||
} \
|
||||
}
|
||||
|
||||
// The following macros call specialized macros, passing either oop or
|
||||
// narrowOop as the specialization type. These test the UseCompressedOops
|
||||
// flag.
|
||||
#define InstanceMirrorKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \
|
||||
do_oop, assert_fn) \
|
||||
{ \
|
||||
if (UseCompressedOops) { \
|
||||
InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
|
||||
start_p, count, \
|
||||
low, high, \
|
||||
do_oop, assert_fn) \
|
||||
} else { \
|
||||
InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
|
||||
start_p, count, \
|
||||
low, high, \
|
||||
do_oop, assert_fn) \
|
||||
} \
|
||||
}
|
||||
|
||||
|
||||
void InstanceMirrorKlass::oop_follow_contents(oop obj) {
|
||||
InstanceKlass::oop_follow_contents(obj);
|
||||
|
||||
// Follow the klass field in the mirror.
|
||||
Klass* klass = java_lang_Class::as_Klass(obj);
|
||||
if (klass != NULL) {
|
||||
// An anonymous class doesn't have its own class loader, so the call
|
||||
// to follow_klass will mark and push its java mirror instead of the
|
||||
// class loader. When handling the java mirror for an anonymous class
|
||||
// we need to make sure its class loader data is claimed, this is done
|
||||
// by calling follow_class_loader explicitly. For non-anonymous classes
|
||||
// the call to follow_class_loader is made when the class loader itself
|
||||
// is handled.
|
||||
if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
|
||||
MarkSweep::follow_class_loader(klass->class_loader_data());
|
||||
} else {
|
||||
MarkSweep::follow_klass(klass);
|
||||
}
|
||||
} else {
|
||||
// If klass is NULL then this a mirror for a primitive type.
|
||||
// We don't have to follow them, since they are handled as strong
|
||||
// roots in Universe::oops_do.
|
||||
assert(java_lang_Class::is_primitive(obj), "Sanity check");
|
||||
}
|
||||
|
||||
InstanceMirrorKlass_OOP_ITERATE( \
|
||||
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \
|
||||
MarkSweep::mark_and_push(p), \
|
||||
assert_is_in_closed_subset)
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
void InstanceMirrorKlass::oop_follow_contents(ParCompactionManager* cm,
|
||||
oop obj) {
|
||||
InstanceKlass::oop_follow_contents(cm, obj);
|
||||
|
||||
// Follow the klass field in the mirror.
|
||||
Klass* klass = java_lang_Class::as_Klass(obj);
|
||||
if (klass != NULL) {
|
||||
// An anonymous class doesn't have its own class loader, so the call
|
||||
// to follow_klass will mark and push its java mirror instead of the
|
||||
// class loader. When handling the java mirror for an anonymous class
|
||||
// we need to make sure its class loader data is claimed, this is done
|
||||
// by calling follow_class_loader explicitly. For non-anonymous classes
|
||||
// the call to follow_class_loader is made when the class loader itself
|
||||
// is handled.
|
||||
if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
|
||||
PSParallelCompact::follow_class_loader(cm, klass->class_loader_data());
|
||||
} else {
|
||||
PSParallelCompact::follow_klass(cm, klass);
|
||||
}
|
||||
} else {
|
||||
// If klass is NULL then this a mirror for a primitive type.
|
||||
// We don't have to follow them, since they are handled as strong
|
||||
// roots in Universe::oops_do.
|
||||
assert(java_lang_Class::is_primitive(obj), "Sanity check");
|
||||
}
|
||||
|
||||
InstanceMirrorKlass_OOP_ITERATE( \
|
||||
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \
|
||||
PSParallelCompact::mark_and_push(cm, p), \
|
||||
assert_is_in)
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
int InstanceMirrorKlass::oop_adjust_pointers(oop obj) {
|
||||
int size = oop_size(obj);
|
||||
InstanceKlass::oop_adjust_pointers(obj);
|
||||
|
||||
InstanceMirrorKlass_OOP_ITERATE( \
|
||||
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \
|
||||
MarkSweep::adjust_pointer(p), \
|
||||
assert_nothing)
|
||||
return size;
|
||||
}
|
||||
|
||||
#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(T, nv_suffix) \
|
||||
InstanceMirrorKlass_OOP_ITERATE( \
|
||||
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \
|
||||
(closure)->do_oop##nv_suffix(p), \
|
||||
assert_is_in_closed_subset) \
|
||||
return oop_size(obj); \
|
||||
|
||||
#define InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(T, nv_suffix, mr) \
|
||||
InstanceMirrorKlass_BOUNDED_OOP_ITERATE( \
|
||||
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \
|
||||
mr.start(), mr.end(), \
|
||||
(closure)->do_oop##nv_suffix(p), \
|
||||
assert_is_in_closed_subset) \
|
||||
return oop_size(obj); \
|
||||
|
||||
|
||||
// Macro to define InstanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for
|
||||
// all closures. Macros calling macros above for each oop size.
|
||||
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceMirrorKlass:: \
|
||||
oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
/* Get size before changing pointers */ \
|
||||
InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \
|
||||
\
|
||||
if_do_metadata_checked(closure, nv_suffix) { \
|
||||
Klass* klass = java_lang_Class::as_Klass(obj); \
|
||||
/* We'll get NULL for primitive mirrors. */ \
|
||||
if (klass != NULL) { \
|
||||
closure->do_klass##nv_suffix(klass); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
if (UseCompressedOops) { \
|
||||
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \
|
||||
} else { \
|
||||
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \
|
||||
} \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceMirrorKlass:: \
|
||||
oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
/* Get size before changing pointers */ \
|
||||
InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \
|
||||
\
|
||||
if (UseCompressedOops) { \
|
||||
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \
|
||||
} else { \
|
||||
InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \
|
||||
} \
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceMirrorKlass:: \
|
||||
oop_oop_iterate##nv_suffix##_m(oop obj, \
|
||||
OopClosureType* closure, \
|
||||
MemRegion mr) { \
|
||||
InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \
|
||||
\
|
||||
if_do_metadata_checked(closure, nv_suffix) { \
|
||||
if (mr.contains(obj)) { \
|
||||
Klass* klass = java_lang_Class::as_Klass(obj); \
|
||||
/* We'll get NULL for primitive mirrors. */ \
|
||||
if (klass != NULL) { \
|
||||
closure->do_klass##nv_suffix(klass); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
if (UseCompressedOops) { \
|
||||
InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr); \
|
||||
} else { \
|
||||
InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr); \
|
||||
} \
|
||||
}
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN)
|
||||
#if INCLUDE_ALL_GCS
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
void InstanceMirrorKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
// Note that we don't have to follow the mirror -> klass pointer, since all
|
||||
// klasses that are dirty will be scavenged when we iterate over the
|
||||
// ClassLoaderData objects.
|
||||
|
||||
InstanceKlass::oop_push_contents(pm, obj);
|
||||
InstanceMirrorKlass_OOP_ITERATE( \
|
||||
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\
|
||||
if (PSScavenge::should_scavenge(p)) { \
|
||||
pm->claim_or_forward_depth(p); \
|
||||
}, \
|
||||
assert_nothing )
|
||||
}
|
||||
|
||||
int InstanceMirrorKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
||||
int size = oop_size(obj);
|
||||
InstanceKlass::oop_update_pointers(cm, obj);
|
||||
|
||||
InstanceMirrorKlass_OOP_ITERATE( \
|
||||
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\
|
||||
PSParallelCompact::adjust_pointer(p), \
|
||||
assert_nothing)
|
||||
return size;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
int InstanceMirrorKlass::instance_size(KlassHandle k) {
|
||||
if (k() != NULL && k->oop_is_instance()) {
|
||||
return align_object_size(size_helper() + InstanceKlass::cast(k())->static_field_size());
|
||||
|
@ -88,19 +88,66 @@ class InstanceMirrorKlass: public InstanceKlass {
|
||||
// allocation
|
||||
instanceOop allocate_instance(KlassHandle k, TRAPS);
|
||||
|
||||
// Garbage collection
|
||||
int oop_adjust_pointers(oop obj);
|
||||
void oop_follow_contents(oop obj);
|
||||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
void oop_ps_push_contents( oop obj, PSPromotionManager* pm);
|
||||
// Parallel Compact
|
||||
void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
|
||||
void oop_pc_update_pointers(oop obj);
|
||||
#endif
|
||||
|
||||
// Parallel Scavenge and Parallel Old
|
||||
PARALLEL_GC_DECLS
|
||||
// Oop fields (and metadata) iterators
|
||||
// [nv = true] Use non-virtual calls to do_oop_nv.
|
||||
// [nv = false] Use virtual calls to do_oop.
|
||||
//
|
||||
// The InstanceMirrorKlass iterators also visit the hidden Klass pointer.
|
||||
|
||||
int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) {
|
||||
return oop_oop_iterate_v(obj, blk);
|
||||
}
|
||||
int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) {
|
||||
return oop_oop_iterate_v_m(obj, blk, mr);
|
||||
}
|
||||
public:
|
||||
// Iterate over the static fields.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline void oop_oop_iterate_statics(oop obj, OopClosureType* closure);
|
||||
|
||||
private:
|
||||
// Iterate over the static fields.
|
||||
// Specialized for [T = oop] or [T = narrowOop].
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_statics_specialized(oop obj, OopClosureType* closure);
|
||||
|
||||
// Forward iteration
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
|
||||
|
||||
// Reverse iteration
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
|
||||
#endif
|
||||
|
||||
|
||||
// Bounded range iteration
|
||||
// Iterate over the oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Iterate over the static fields.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline void oop_oop_iterate_statics_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Iterate over the static fields.
|
||||
// Specialized for [T = oop] or [T = narrowOop].
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_statics_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
|
||||
public:
|
||||
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
|
||||
|
164
hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp
Normal file
164
hotspot/src/share/vm/oops/instanceMirrorKlass.inline.hpp
Normal file
@ -0,0 +1,164 @@
|
||||
/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP
|
||||
#define SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP
|
||||
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
void InstanceMirrorKlass::oop_oop_iterate_statics_specialized(oop obj, OopClosureType* closure) {
|
||||
T* p = (T*)start_of_static_fields(obj);
|
||||
T* const end = p + java_lang_Class::static_oop_field_count(obj);
|
||||
|
||||
for (; p < end; ++p) {
|
||||
Devirtualizer<nv>::do_oop(closure, p);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
void InstanceMirrorKlass::oop_oop_iterate_statics(oop obj, OopClosureType* closure) {
|
||||
if (UseCompressedOops) {
|
||||
oop_oop_iterate_statics_specialized<nv, narrowOop>(obj, closure);
|
||||
} else {
|
||||
oop_oop_iterate_statics_specialized<nv, oop>(obj, closure);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
InstanceKlass::oop_oop_iterate<nv>(obj, closure);
|
||||
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
Klass* klass = java_lang_Class::as_Klass(obj);
|
||||
// We'll get NULL for primitive mirrors.
|
||||
if (klass != NULL) {
|
||||
Devirtualizer<nv>::do_klass(closure, klass);
|
||||
}
|
||||
}
|
||||
|
||||
oop_oop_iterate_statics<nv>(obj, closure);
|
||||
|
||||
return oop_size(obj);
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
|
||||
InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
|
||||
|
||||
InstanceMirrorKlass::oop_oop_iterate_statics<nv>(obj, closure);
|
||||
|
||||
return oop_size(obj);
|
||||
}
|
||||
#endif
|
||||
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
void InstanceMirrorKlass::oop_oop_iterate_statics_specialized_bounded(oop obj,
|
||||
OopClosureType* closure,
|
||||
MemRegion mr) {
|
||||
T* p = (T*)start_of_static_fields(obj);
|
||||
T* end = p + java_lang_Class::static_oop_field_count(obj);
|
||||
|
||||
T* const l = (T*)mr.start();
|
||||
T* const h = (T*)mr.end();
|
||||
assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
|
||||
mask_bits((intptr_t)h, sizeof(T)-1) == 0,
|
||||
"bounded region must be properly aligned");
|
||||
|
||||
if (p < l) {
|
||||
p = l;
|
||||
}
|
||||
if (end > h) {
|
||||
end = h;
|
||||
}
|
||||
|
||||
for (;p < end; ++p) {
|
||||
Devirtualizer<nv>::do_oop(closure, p);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
void InstanceMirrorKlass::oop_oop_iterate_statics_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
if (UseCompressedOops) {
|
||||
oop_oop_iterate_statics_specialized_bounded<nv, narrowOop>(obj, closure, mr);
|
||||
} else {
|
||||
oop_oop_iterate_statics_specialized_bounded<nv, oop>(obj, closure, mr);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
|
||||
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
if (mr.contains(obj)) {
|
||||
Klass* klass = java_lang_Class::as_Klass(obj);
|
||||
// We'll get NULL for primitive mirrors.
|
||||
if (klass != NULL) {
|
||||
Devirtualizer<nv>::do_klass(closure, klass);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oop_oop_iterate_statics_bounded<nv>(obj, closure, mr);
|
||||
|
||||
return oop_size(obj);
|
||||
}
|
||||
|
||||
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceMirrorKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceMirrorKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
|
||||
#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceMirrorKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
#define ALL_INSTANCE_MIRROR_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
|
||||
#endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP
|
@ -25,421 +25,16 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/genCollectedHeap.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/specialized_oop_closures.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
#include "oops/instanceRefKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/preserveException.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||
|
||||
template <class T>
|
||||
void specialized_oop_follow_contents(InstanceRefKlass* ref, oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj);
|
||||
}
|
||||
)
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!referent->is_gc_marked() &&
|
||||
MarkSweep::ref_processor()->discover_reference(obj, ref->reference_type())) {
|
||||
// reference was discovered, referent will be traversed later
|
||||
ref->InstanceKlass::oop_follow_contents(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (void *)obj);
|
||||
}
|
||||
)
|
||||
return;
|
||||
} else {
|
||||
// treat referent as normal oop
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (void *)obj);
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(referent_addr);
|
||||
}
|
||||
}
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
if (ReferenceProcessor::pending_list_uses_discovered_field()) {
|
||||
// Treat discovered as normal oop, if ref is not "active",
|
||||
// i.e. if next is non-NULL.
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process discovered as normal "
|
||||
INTPTR_FORMAT, discovered_addr);
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(discovered_addr);
|
||||
}
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
// In the case of older JDKs which do not use the discovered
|
||||
// field for the pending list, an inactive ref (next != NULL)
|
||||
// must always have a NULL discovered field.
|
||||
oop next = oopDesc::load_decode_heap_oop(next_addr);
|
||||
oop discovered = java_lang_ref_Reference::discovered(obj);
|
||||
assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
|
||||
err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
|
||||
(oopDesc*)obj));
|
||||
#endif
|
||||
}
|
||||
// treat next as normal oop. next is a link in the reference queue.
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process next as normal " INTPTR_FORMAT, next_addr);
|
||||
}
|
||||
)
|
||||
MarkSweep::mark_and_push(next_addr);
|
||||
ref->InstanceKlass::oop_follow_contents(obj);
|
||||
}
|
||||
|
||||
void InstanceRefKlass::oop_follow_contents(oop obj) {
|
||||
if (UseCompressedOops) {
|
||||
specialized_oop_follow_contents<narrowOop>(this, obj);
|
||||
} else {
|
||||
specialized_oop_follow_contents<oop>(this, obj);
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <class T>
|
||||
void specialized_oop_follow_contents(InstanceRefKlass* ref,
|
||||
ParCompactionManager* cm,
|
||||
oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj);
|
||||
}
|
||||
)
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
|
||||
PSParallelCompact::ref_processor()->
|
||||
discover_reference(obj, ref->reference_type())) {
|
||||
// reference already enqueued, referent will be traversed later
|
||||
ref->InstanceKlass::oop_follow_contents(cm, obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (void *)obj);
|
||||
}
|
||||
)
|
||||
return;
|
||||
} else {
|
||||
// treat referent as normal oop
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (void *)obj);
|
||||
}
|
||||
)
|
||||
PSParallelCompact::mark_and_push(cm, referent_addr);
|
||||
}
|
||||
}
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
if (ReferenceProcessor::pending_list_uses_discovered_field()) {
|
||||
// Treat discovered as normal oop, if ref is not "active",
|
||||
// i.e. if next is non-NULL.
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process discovered as normal "
|
||||
INTPTR_FORMAT, discovered_addr);
|
||||
}
|
||||
)
|
||||
PSParallelCompact::mark_and_push(cm, discovered_addr);
|
||||
}
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
// In the case of older JDKs which do not use the discovered
|
||||
// field for the pending list, an inactive ref (next != NULL)
|
||||
// must always have a NULL discovered field.
|
||||
T next = oopDesc::load_heap_oop(next_addr);
|
||||
oop discovered = java_lang_ref_Reference::discovered(obj);
|
||||
assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
|
||||
err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
|
||||
(oopDesc*)obj));
|
||||
#endif
|
||||
}
|
||||
PSParallelCompact::mark_and_push(cm, next_addr);
|
||||
ref->InstanceKlass::oop_follow_contents(cm, obj);
|
||||
}
|
||||
|
||||
void InstanceRefKlass::oop_follow_contents(ParCompactionManager* cm,
|
||||
oop obj) {
|
||||
if (UseCompressedOops) {
|
||||
specialized_oop_follow_contents<narrowOop>(this, cm, obj);
|
||||
} else {
|
||||
specialized_oop_follow_contents<oop>(this, cm, obj);
|
||||
}
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#ifdef ASSERT
|
||||
template <class T> void trace_reference_gc(const char *s, oop obj,
|
||||
T* referent_addr,
|
||||
T* next_addr,
|
||||
T* discovered_addr) {
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr("%s obj " INTPTR_FORMAT, s, (address)obj);
|
||||
gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / "
|
||||
INTPTR_FORMAT, referent_addr,
|
||||
referent_addr ?
|
||||
(address)oopDesc::load_decode_heap_oop(referent_addr) : NULL);
|
||||
gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / "
|
||||
INTPTR_FORMAT, next_addr,
|
||||
next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL);
|
||||
gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / "
|
||||
INTPTR_FORMAT, discovered_addr,
|
||||
discovered_addr ?
|
||||
(address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
template <class T> void specialized_oop_adjust_pointers(InstanceRefKlass *ref, oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
MarkSweep::adjust_pointer(referent_addr);
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
MarkSweep::adjust_pointer(next_addr);
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
MarkSweep::adjust_pointer(discovered_addr);
|
||||
debug_only(trace_reference_gc("InstanceRefKlass::oop_adjust_pointers", obj,
|
||||
referent_addr, next_addr, discovered_addr);)
|
||||
}
|
||||
|
||||
int InstanceRefKlass::oop_adjust_pointers(oop obj) {
|
||||
int size = size_helper();
|
||||
InstanceKlass::oop_adjust_pointers(obj);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
specialized_oop_adjust_pointers<narrowOop>(this, obj);
|
||||
} else {
|
||||
specialized_oop_adjust_pointers<oop>(this, obj);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
#define InstanceRefKlass_SPECIALIZED_OOP_ITERATE(T, nv_suffix, contains) \
|
||||
T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); \
|
||||
if (closure->apply_to_weak_ref_discovered_field()) { \
|
||||
closure->do_oop##nv_suffix(disc_addr); \
|
||||
} \
|
||||
\
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr); \
|
||||
ReferenceProcessor* rp = closure->_ref_processor; \
|
||||
if (!oopDesc::is_null(heap_oop)) { \
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); \
|
||||
if (!referent->is_gc_marked() && (rp != NULL) && \
|
||||
rp->discover_reference(obj, reference_type())) { \
|
||||
return size; \
|
||||
} else if (contains(referent_addr)) { \
|
||||
/* treat referent as normal oop */ \
|
||||
closure->do_oop##nv_suffix(referent_addr); \
|
||||
} \
|
||||
} \
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); \
|
||||
if (ReferenceProcessor::pending_list_uses_discovered_field()) { \
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr); \
|
||||
/* Treat discovered as normal oop, if ref is not "active" (next non-NULL) */\
|
||||
if (!oopDesc::is_null(next_oop) && contains(disc_addr)) { \
|
||||
/* i.e. ref is not "active" */ \
|
||||
debug_only( \
|
||||
if(TraceReferenceGC && PrintGCDetails) { \
|
||||
gclog_or_tty->print_cr(" Process discovered as normal " \
|
||||
INTPTR_FORMAT, disc_addr); \
|
||||
} \
|
||||
) \
|
||||
closure->do_oop##nv_suffix(disc_addr); \
|
||||
} \
|
||||
} else { \
|
||||
/* In the case of older JDKs which do not use the discovered field for */ \
|
||||
/* the pending list, an inactive ref (next != NULL) must always have a */ \
|
||||
/* NULL discovered field. */ \
|
||||
debug_only( \
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr); \
|
||||
T disc_oop = oopDesc::load_heap_oop(disc_addr); \
|
||||
assert(oopDesc::is_null(next_oop) || oopDesc::is_null(disc_oop), \
|
||||
err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL" \
|
||||
"discovered field", (oopDesc*)obj)); \
|
||||
) \
|
||||
} \
|
||||
/* treat next as normal oop */ \
|
||||
if (contains(next_addr)) { \
|
||||
closure->do_oop##nv_suffix(next_addr); \
|
||||
} \
|
||||
return size; \
|
||||
|
||||
|
||||
template <class T> bool contains(T *t) { return true; }
|
||||
|
||||
// Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for
|
||||
// all closures. Macros calling macros above for each oop size.
|
||||
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceRefKlass:: \
|
||||
oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
/* Get size before changing pointers */ \
|
||||
int size = InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \
|
||||
\
|
||||
if (UseCompressedOops) { \
|
||||
InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, contains); \
|
||||
} else { \
|
||||
InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains); \
|
||||
} \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceRefKlass:: \
|
||||
oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
/* Get size before changing pointers */ \
|
||||
int size = InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \
|
||||
\
|
||||
if (UseCompressedOops) { \
|
||||
InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, contains); \
|
||||
} else { \
|
||||
InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains); \
|
||||
} \
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceRefKlass:: \
|
||||
oop_oop_iterate##nv_suffix##_m(oop obj, \
|
||||
OopClosureType* closure, \
|
||||
MemRegion mr) { \
|
||||
int size = InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \
|
||||
if (UseCompressedOops) { \
|
||||
InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr.contains); \
|
||||
} else { \
|
||||
InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr.contains); \
|
||||
} \
|
||||
}
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DEFN)
|
||||
#if INCLUDE_ALL_GCS
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <class T>
|
||||
void specialized_oop_push_contents(InstanceRefKlass *ref,
|
||||
PSPromotionManager* pm, oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
if (PSScavenge::should_scavenge(referent_addr)) {
|
||||
ReferenceProcessor* rp = PSScavenge::reference_processor();
|
||||
if (rp->discover_reference(obj, ref->reference_type())) {
|
||||
// reference already enqueued, referent and next will be traversed later
|
||||
ref->InstanceKlass::oop_push_contents(pm, obj);
|
||||
return;
|
||||
} else {
|
||||
// treat referent as normal oop
|
||||
pm->claim_or_forward_depth(referent_addr);
|
||||
}
|
||||
}
|
||||
// Treat discovered as normal oop, if ref is not "active",
|
||||
// i.e. if next is non-NULL.
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
if (ReferenceProcessor::pending_list_uses_discovered_field()) {
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process discovered as normal "
|
||||
INTPTR_FORMAT, discovered_addr);
|
||||
}
|
||||
)
|
||||
if (PSScavenge::should_scavenge(discovered_addr)) {
|
||||
pm->claim_or_forward_depth(discovered_addr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
// In the case of older JDKs which do not use the discovered
|
||||
// field for the pending list, an inactive ref (next != NULL)
|
||||
// must always have a NULL discovered field.
|
||||
oop next = oopDesc::load_decode_heap_oop(next_addr);
|
||||
oop discovered = java_lang_ref_Reference::discovered(obj);
|
||||
assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
|
||||
err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
|
||||
(oopDesc*)obj));
|
||||
#endif
|
||||
}
|
||||
|
||||
// Treat next as normal oop; next is a link in the reference queue.
|
||||
if (PSScavenge::should_scavenge(next_addr)) {
|
||||
pm->claim_or_forward_depth(next_addr);
|
||||
}
|
||||
ref->InstanceKlass::oop_push_contents(pm, obj);
|
||||
}
|
||||
|
||||
void InstanceRefKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
if (UseCompressedOops) {
|
||||
specialized_oop_push_contents<narrowOop>(this, pm, obj);
|
||||
} else {
|
||||
specialized_oop_push_contents<oop>(this, pm, obj);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void specialized_oop_update_pointers(InstanceRefKlass *ref,
|
||||
ParCompactionManager* cm, oop obj) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
PSParallelCompact::adjust_pointer(referent_addr);
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
PSParallelCompact::adjust_pointer(next_addr);
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
PSParallelCompact::adjust_pointer(discovered_addr);
|
||||
debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
|
||||
referent_addr, next_addr, discovered_addr);)
|
||||
}
|
||||
|
||||
int InstanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
||||
InstanceKlass::oop_update_pointers(cm, obj);
|
||||
if (UseCompressedOops) {
|
||||
specialized_oop_update_pointers<narrowOop>(this, cm, obj);
|
||||
} else {
|
||||
specialized_oop_update_pointers<oop>(this, cm, obj);
|
||||
}
|
||||
return size_helper();
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
void InstanceRefKlass::update_nonstatic_oop_maps(Klass* k) {
|
||||
// Clear the nonstatic oop-map entries corresponding to referent
|
||||
// and nextPending field. They are treated specially by the
|
||||
|
@ -64,30 +64,71 @@ class InstanceRefKlass: public InstanceKlass {
|
||||
return (InstanceRefKlass*) k;
|
||||
}
|
||||
|
||||
// Garbage collection
|
||||
int oop_adjust_pointers(oop obj);
|
||||
void oop_follow_contents(oop obj);
|
||||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
void oop_ps_push_contents( oop obj, PSPromotionManager* pm);
|
||||
// Parallel Compact
|
||||
void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
|
||||
void oop_pc_update_pointers(oop obj);
|
||||
#endif
|
||||
|
||||
// Parallel Scavenge and Parallel Old
|
||||
PARALLEL_GC_DECLS
|
||||
// Oop fields (and metadata) iterators
|
||||
// [nv = true] Use non-virtual calls to do_oop_nv.
|
||||
// [nv = false] Use virtual calls to do_oop.
|
||||
//
|
||||
// The InstanceRefKlass iterators also support reference processing.
|
||||
|
||||
int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) {
|
||||
return oop_oop_iterate_v(obj, blk);
|
||||
}
|
||||
int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) {
|
||||
return oop_oop_iterate_v_m(obj, blk, mr);
|
||||
}
|
||||
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr);
|
||||
// Forward iteration
|
||||
private:
|
||||
// Iterate over all oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
|
||||
// Reverse iteration
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Iterate over all oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// Bounded range iteration
|
||||
// Iterate over all oop fields and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Reference processing part of the iterators.
|
||||
|
||||
// Specialized for [T = oop] or [T = narrowOop].
|
||||
template <bool nv, typename T, class OopClosureType, class Contains>
|
||||
inline void oop_oop_iterate_ref_processing_specialized(oop obj, OopClosureType* closure, Contains& contains);
|
||||
|
||||
// Only perform reference processing if the referent object is within mr.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline void oop_oop_iterate_ref_processing_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Reference processing
|
||||
template <bool nv, class OopClosureType>
|
||||
inline void oop_oop_iterate_ref_processing(oop obj, OopClosureType* closure);
|
||||
|
||||
|
||||
public:
|
||||
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
|
187
hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp
Normal file
187
hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp
Normal file
@ -0,0 +1,187 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP
|
||||
#define SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP
|
||||
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "memory/referenceProcessor.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
template <bool nv, typename T, class OopClosureType, class Contains>
|
||||
void InstanceRefKlass::oop_oop_iterate_ref_processing_specialized(oop obj, OopClosureType* closure, Contains& contains) {
|
||||
T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
|
||||
if (closure->apply_to_weak_ref_discovered_field()) {
|
||||
Devirtualizer<nv>::do_oop(closure, disc_addr);
|
||||
}
|
||||
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||
ReferenceProcessor* rp = closure->_ref_processor;
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!referent->is_gc_marked() && (rp != NULL) &&
|
||||
rp->discover_reference(obj, reference_type())) {
|
||||
return;
|
||||
} else if (contains(referent_addr)) {
|
||||
// treat referent as normal oop
|
||||
Devirtualizer<nv>::do_oop(closure, referent_addr);
|
||||
}
|
||||
}
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
|
||||
if (ReferenceProcessor::pending_list_uses_discovered_field()) {
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
// Treat discovered as normal oop, if ref is not "active" (next non-NULL)
|
||||
if (!oopDesc::is_null(next_oop) && contains(disc_addr)) {
|
||||
// i.e. ref is not "active"
|
||||
debug_only(
|
||||
if(TraceReferenceGC && PrintGCDetails) {
|
||||
gclog_or_tty->print_cr(" Process discovered as normal "
|
||||
PTR_FORMAT, p2i(disc_addr));
|
||||
}
|
||||
)
|
||||
Devirtualizer<nv>::do_oop(closure, disc_addr);
|
||||
}
|
||||
} else {
|
||||
// In the case of older JDKs which do not use the discovered field for
|
||||
// the pending list, an inactive ref (next != NULL) must always have a
|
||||
// NULL discovered field.
|
||||
debug_only(
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
T disc_oop = oopDesc::load_heap_oop(disc_addr);
|
||||
assert(oopDesc::is_null(next_oop) || oopDesc::is_null(disc_oop),
|
||||
err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL"
|
||||
"discovered field", p2i(obj)));
|
||||
)
|
||||
}
|
||||
// treat next as normal oop
|
||||
if (contains(next_addr)) {
|
||||
Devirtualizer<nv>::do_oop(closure, next_addr);
|
||||
}
|
||||
}
|
||||
|
||||
class AlwaysContains {
|
||||
public:
|
||||
template <typename T> bool operator()(T* p) const { return true; }
|
||||
};
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
void InstanceRefKlass::oop_oop_iterate_ref_processing(oop obj, OopClosureType* closure) {
|
||||
AlwaysContains always_contains;
|
||||
if (UseCompressedOops) {
|
||||
oop_oop_iterate_ref_processing_specialized<nv, narrowOop>(obj, closure, always_contains);
|
||||
} else {
|
||||
oop_oop_iterate_ref_processing_specialized<nv, oop>(obj, closure, always_contains);
|
||||
}
|
||||
}
|
||||
|
||||
class MrContains {
|
||||
const MemRegion _mr;
|
||||
public:
|
||||
MrContains(MemRegion mr) : _mr(mr) {}
|
||||
template <typename T> bool operator()(T* p) const { return _mr.contains(p); }
|
||||
};
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
void InstanceRefKlass::oop_oop_iterate_ref_processing_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
const MrContains contains(mr);
|
||||
if (UseCompressedOops) {
|
||||
oop_oop_iterate_ref_processing_specialized<nv, narrowOop>(obj, closure, contains);
|
||||
} else {
|
||||
oop_oop_iterate_ref_processing_specialized<nv, oop>(obj, closure, contains);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceRefKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
// Get size before changing pointers
|
||||
int size = InstanceKlass::oop_oop_iterate<nv>(obj, closure);
|
||||
|
||||
oop_oop_iterate_ref_processing<nv>(obj, closure);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceRefKlass::
|
||||
oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
|
||||
// Get size before changing pointers
|
||||
int size = InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
|
||||
|
||||
oop_oop_iterate_ref_processing<nv>(obj, closure);
|
||||
|
||||
return size;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
int InstanceRefKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
// Get size before changing pointers
|
||||
int size = InstanceKlass::oop_oop_iterate_bounded<nv>(obj, closure, mr);
|
||||
|
||||
oop_oop_iterate_ref_processing_bounded<nv>(obj, closure, mr);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
// Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for
|
||||
// all closures. Macros calling macros above for each oop size.
|
||||
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceRefKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceRefKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
|
||||
#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int InstanceRefKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
#define ALL_INSTANCE_REF_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
InstanceRefKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
|
||||
|
||||
#endif // SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP
|
@ -27,7 +27,6 @@
|
||||
#include "classfile/dictionary.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/heapInspection.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
@ -43,9 +42,6 @@
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
void Klass::set_name(Symbol* n) {
|
||||
|
@ -25,21 +25,14 @@
|
||||
#ifndef SHARE_VM_OOPS_KLASS_HPP
|
||||
#define SHARE_VM_OOPS_KLASS_HPP
|
||||
|
||||
#include "memory/genOopClosures.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/specialized_oop_closures.hpp"
|
||||
#include "oops/klassPS.hpp"
|
||||
#include "oops/metadata.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "trace/traceMacros.hpp"
|
||||
#include "utilities/accessFlags.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.hpp"
|
||||
#include "gc_implementation/parNew/parOopClosures.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
//
|
||||
// A Klass provides:
|
||||
@ -61,6 +54,7 @@ template <class T> class GrowableArray;
|
||||
class ClassLoaderData;
|
||||
class klassVtable;
|
||||
class ParCompactionManager;
|
||||
class PSPromotionManager;
|
||||
class KlassSizeStats;
|
||||
class fieldDescriptor;
|
||||
|
||||
@ -478,13 +472,6 @@ protected:
|
||||
// and the package separators as '/'.
|
||||
virtual const char* signature_name() const;
|
||||
|
||||
// garbage collection support
|
||||
virtual void oop_follow_contents(oop obj) = 0;
|
||||
virtual int oop_adjust_pointers(oop obj) = 0;
|
||||
|
||||
// Parallel Scavenge and Parallel Old
|
||||
PARALLEL_GC_DECLS_PV
|
||||
|
||||
// type testing operations
|
||||
protected:
|
||||
virtual bool oop_is_instance_slow() const { return false; }
|
||||
@ -581,60 +568,35 @@ protected:
|
||||
clean_weak_klass_links(is_alive, false /* clean_alive_klasses */);
|
||||
}
|
||||
|
||||
// iterators
|
||||
virtual int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) = 0;
|
||||
virtual int oop_oop_iterate_v(oop obj, ExtendedOopClosure* blk) {
|
||||
return oop_oop_iterate(obj, blk);
|
||||
}
|
||||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
virtual void oop_ms_follow_contents(oop obj) = 0;
|
||||
virtual int oop_ms_adjust_pointers(oop obj) = 0;
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
virtual void oop_ps_push_contents( oop obj, PSPromotionManager* pm) = 0;
|
||||
// Parallel Compact
|
||||
virtual void oop_pc_follow_contents(oop obj, ParCompactionManager* cm) = 0;
|
||||
virtual void oop_pc_update_pointers(oop obj) = 0;
|
||||
#endif
|
||||
|
||||
// Iterators specialized to particular subtypes
|
||||
// of ExtendedOopClosure, to avoid closure virtual calls.
|
||||
#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0; \
|
||||
/* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */ \
|
||||
virtual int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) = 0;
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
// In case we don't have a specialized backward scanner use forward
|
||||
// iteration.
|
||||
virtual int oop_oop_iterate_backwards_v(oop obj, ExtendedOopClosure* blk) {
|
||||
return oop_oop_iterate_v(obj, blk);
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#define Klass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0;
|
||||
|
||||
// Iterates "blk" over all the oops in "obj" (of type "this") within "mr".
|
||||
// (I don't see why the _m should be required, but without it the Solaris
|
||||
// C++ gives warning messages about overridings of the "oop_oop_iterate"
|
||||
// defined above "hiding" this virtual function. (DLD, 6/20/00)) */
|
||||
virtual int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) = 0;
|
||||
virtual int oop_oop_iterate_v_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) {
|
||||
return oop_oop_iterate_m(obj, blk, mr);
|
||||
}
|
||||
|
||||
// Versions of the above iterators specialized to particular subtypes
|
||||
// of OopClosure, to avoid closure virtual calls.
|
||||
#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk) { \
|
||||
/* Default implementation reverts to general version. */ \
|
||||
return oop_oop_iterate(obj, blk); \
|
||||
} \
|
||||
\
|
||||
/* Iterates "blk" over all the oops in "obj" (of type "this") within "mr". \
|
||||
(I don't see why the _m should be required, but without it the Solaris \
|
||||
C++ gives warning messages about overridings of the "oop_oop_iterate" \
|
||||
defined above "hiding" this virtual function. (DLD, 6/20/00)) */ \
|
||||
virtual int oop_oop_iterate##nv_suffix##_m(oop obj, \
|
||||
OopClosureType* blk, \
|
||||
MemRegion mr) { \
|
||||
return oop_oop_iterate_m(obj, blk, mr); \
|
||||
}
|
||||
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL)
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define Klass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, \
|
||||
OopClosureType* blk) { \
|
||||
/* Default implementation reverts to general version. */ \
|
||||
return oop_oop_iterate_backwards_v(obj, blk); \
|
||||
}
|
||||
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
virtual void array_klasses_do(void f(Klass* k)) {}
|
||||
|
@ -1,54 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OOPS_KLASSPS_HPP
|
||||
#define SHARE_VM_OOPS_KLASSPS_HPP
|
||||
|
||||
// Expands to Parallel Scavenge and Parallel Old declarations
|
||||
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define PARALLEL_GC_DECLS \
|
||||
virtual void oop_push_contents(PSPromotionManager* pm, oop obj); \
|
||||
/* Parallel Old GC support \
|
||||
\
|
||||
The 2-arg version of oop_update_pointers is for objects that are \
|
||||
known not to cross chunk boundaries. The 4-arg version is for \
|
||||
objects that do (or may) cross chunk boundaries; it updates only those \
|
||||
oops that are in the region [beg_addr, end_addr). */ \
|
||||
virtual void oop_follow_contents(ParCompactionManager* cm, oop obj); \
|
||||
virtual int oop_update_pointers(ParCompactionManager* cm, oop obj);
|
||||
|
||||
// Pure virtual version for klass.hpp
|
||||
#define PARALLEL_GC_DECLS_PV \
|
||||
virtual void oop_push_contents(PSPromotionManager* pm, oop obj) = 0; \
|
||||
virtual void oop_follow_contents(ParCompactionManager* cm, oop obj) = 0; \
|
||||
virtual int oop_update_pointers(ParCompactionManager* cm, oop obj) = 0;
|
||||
#else // INCLUDE_ALL_GCS
|
||||
#define PARALLEL_GC_DECLS
|
||||
#define PARALLEL_GC_DECLS_PV
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#endif // SHARE_VM_OOPS_KLASSPS_HPP
|
@ -26,9 +26,7 @@
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/genOopClosures.inline.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
@ -45,17 +43,6 @@
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/g1RemSet.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/parNew/parOopClosures.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
ObjArrayKlass* ObjArrayKlass::allocate(ClassLoaderData* loader_data, int n, KlassHandle klass_handle, Symbol* name, TRAPS) {
|
||||
assert(ObjArrayKlass::header_size() <= InstanceKlass::header_size(),
|
||||
@ -410,179 +397,6 @@ void ObjArrayKlass::initialize(TRAPS) {
|
||||
bottom_klass()->initialize(THREAD); // dispatches to either InstanceKlass or TypeArrayKlass
|
||||
}
|
||||
|
||||
#define ObjArrayKlass_SPECIALIZED_OOP_ITERATE(T, a, p, do_oop) \
|
||||
{ \
|
||||
T* p = (T*)(a)->base(); \
|
||||
T* const end = p + (a)->length(); \
|
||||
while (p < end) { \
|
||||
do_oop; \
|
||||
p++; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(T, a, p, low, high, do_oop) \
|
||||
{ \
|
||||
T* const l = (T*)(low); \
|
||||
T* const h = (T*)(high); \
|
||||
T* p = (T*)(a)->base(); \
|
||||
T* end = p + (a)->length(); \
|
||||
if (p < l) p = l; \
|
||||
if (end > h) end = h; \
|
||||
while (p < end) { \
|
||||
do_oop; \
|
||||
++p; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define ObjArrayKlass_OOP_ITERATE(a, p, do_oop) \
|
||||
if (UseCompressedOops) { \
|
||||
ObjArrayKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
|
||||
a, p, do_oop) \
|
||||
} else { \
|
||||
ObjArrayKlass_SPECIALIZED_OOP_ITERATE(oop, \
|
||||
a, p, do_oop) \
|
||||
}
|
||||
|
||||
#define ObjArrayKlass_BOUNDED_OOP_ITERATE(a, p, low, high, do_oop) \
|
||||
if (UseCompressedOops) { \
|
||||
ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
|
||||
a, p, low, high, do_oop) \
|
||||
} else { \
|
||||
ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
|
||||
a, p, low, high, do_oop) \
|
||||
}
|
||||
|
||||
void ObjArrayKlass::oop_follow_contents(oop obj) {
|
||||
assert (obj->is_array(), "obj must be array");
|
||||
MarkSweep::follow_klass(obj->klass());
|
||||
if (UseCompressedOops) {
|
||||
objarray_follow_contents<narrowOop>(obj, 0);
|
||||
} else {
|
||||
objarray_follow_contents<oop>(obj, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
void ObjArrayKlass::oop_follow_contents(ParCompactionManager* cm,
|
||||
oop obj) {
|
||||
assert(obj->is_array(), "obj must be array");
|
||||
PSParallelCompact::follow_klass(cm, obj->klass());
|
||||
if (UseCompressedOops) {
|
||||
objarray_follow_contents<narrowOop>(cm, obj, 0);
|
||||
} else {
|
||||
objarray_follow_contents<oop>(cm, obj, 0);
|
||||
}
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \
|
||||
OopClosureType* closure) { \
|
||||
assert (obj->is_array(), "obj must be array"); \
|
||||
objArrayOop a = objArrayOop(obj); \
|
||||
/* Get size before changing pointers. */ \
|
||||
/* Don't call size() or oop_size() since that is a virtual call. */ \
|
||||
int size = a->object_size(); \
|
||||
if_do_metadata_checked(closure, nv_suffix) { \
|
||||
closure->do_klass##nv_suffix(obj->klass()); \
|
||||
} \
|
||||
ObjArrayKlass_OOP_ITERATE(a, p, (closure)->do_oop##nv_suffix(p)) \
|
||||
return size; \
|
||||
}
|
||||
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int ObjArrayKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
|
||||
OopClosureType* closure, \
|
||||
MemRegion mr) { \
|
||||
assert(obj->is_array(), "obj must be array"); \
|
||||
objArrayOop a = objArrayOop(obj); \
|
||||
/* Get size before changing pointers. */ \
|
||||
/* Don't call size() or oop_size() since that is a virtual call */ \
|
||||
int size = a->object_size(); \
|
||||
if_do_metadata_checked(closure, nv_suffix) { \
|
||||
/* SSS: Do we need to pass down mr here? */ \
|
||||
closure->do_klass##nv_suffix(a->klass()); \
|
||||
} \
|
||||
ObjArrayKlass_BOUNDED_OOP_ITERATE( \
|
||||
a, p, mr.start(), mr.end(), (closure)->do_oop##nv_suffix(p)) \
|
||||
return size; \
|
||||
}
|
||||
|
||||
// Like oop_oop_iterate but only iterates over a specified range and only used
|
||||
// for objArrayOops.
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int ObjArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, \
|
||||
OopClosureType* closure, \
|
||||
int start, int end) { \
|
||||
assert(obj->is_array(), "obj must be array"); \
|
||||
objArrayOop a = objArrayOop(obj); \
|
||||
/* Get size before changing pointers. */ \
|
||||
/* Don't call size() or oop_size() since that is a virtual call */ \
|
||||
int size = a->object_size(); \
|
||||
if (UseCompressedOops) { \
|
||||
HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr<narrowOop>(start);\
|
||||
/* this might be wierd if end needs to be aligned on HeapWord boundary */ \
|
||||
HeapWord* high = (HeapWord*)((narrowOop*)a->base() + end); \
|
||||
MemRegion mr(low, high); \
|
||||
if_do_metadata_checked(closure, nv_suffix) { \
|
||||
/* SSS: Do we need to pass down mr here? */ \
|
||||
closure->do_klass##nv_suffix(a->klass()); \
|
||||
} \
|
||||
ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
|
||||
a, p, low, high, (closure)->do_oop##nv_suffix(p)) \
|
||||
} else { \
|
||||
HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr<oop>(start); \
|
||||
HeapWord* high = (HeapWord*)((oop*)a->base() + end); \
|
||||
MemRegion mr(low, high); \
|
||||
if_do_metadata_checked(closure, nv_suffix) { \
|
||||
/* SSS: Do we need to pass down mr here? */ \
|
||||
closure->do_klass##nv_suffix(a->klass()); \
|
||||
} \
|
||||
ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
|
||||
a, p, low, high, (closure)->do_oop##nv_suffix(p)) \
|
||||
} \
|
||||
return size; \
|
||||
}
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r)
|
||||
|
||||
int ObjArrayKlass::oop_adjust_pointers(oop obj) {
|
||||
assert(obj->is_objArray(), "obj must be obj array");
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
// Get size before changing pointers.
|
||||
// Don't call size() or oop_size() since that is a virtual call.
|
||||
int size = a->object_size();
|
||||
ObjArrayKlass_OOP_ITERATE(a, p, MarkSweep::adjust_pointer(p))
|
||||
return size;
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
void ObjArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
assert(obj->is_objArray(), "obj must be obj array");
|
||||
ObjArrayKlass_OOP_ITERATE( \
|
||||
objArrayOop(obj), p, \
|
||||
if (PSScavenge::should_scavenge(p)) { \
|
||||
pm->claim_or_forward_depth(p); \
|
||||
})
|
||||
}
|
||||
|
||||
int ObjArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
||||
assert (obj->is_objArray(), "obj must be obj array");
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
int size = a->object_size();
|
||||
ObjArrayKlass_OOP_ITERATE(a, p, PSParallelCompact::adjust_pointer(p))
|
||||
return size;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// JVM support
|
||||
|
||||
jint ObjArrayKlass::compute_modifier_flags(TRAPS) const {
|
||||
|
@ -26,7 +26,6 @@
|
||||
#define SHARE_VM_OOPS_OBJARRAYKLASS_HPP
|
||||
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "memory/specialized_oop_closures.hpp"
|
||||
#include "oops/arrayKlass.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
@ -103,28 +102,67 @@ class ObjArrayKlass : public ArrayKlass {
|
||||
// Initialization (virtual from Klass)
|
||||
void initialize(TRAPS);
|
||||
|
||||
// Garbage collection
|
||||
void oop_follow_contents(oop obj);
|
||||
inline void oop_follow_contents(oop obj, int index);
|
||||
template <class T> inline void objarray_follow_contents(oop obj, int index);
|
||||
|
||||
int oop_adjust_pointers(oop obj);
|
||||
|
||||
// Parallel Scavenge and Parallel Old
|
||||
PARALLEL_GC_DECLS
|
||||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
inline void oop_follow_contents(ParCompactionManager* cm, oop obj, int index);
|
||||
template <class T> inline void
|
||||
objarray_follow_contents(ParCompactionManager* cm, oop obj, int index);
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
void oop_ps_push_contents( oop obj, PSPromotionManager* pm);
|
||||
// Parallel Compact
|
||||
void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
|
||||
void oop_pc_update_pointers(oop obj);
|
||||
#endif
|
||||
|
||||
// Oop fields (and metadata) iterators
|
||||
// [nv = true] Use non-virtual calls to do_oop_nv.
|
||||
// [nv = false] Use virtual calls to do_oop.
|
||||
//
|
||||
// The ObjArrayKlass iterators also visits the Object's klass.
|
||||
|
||||
private:
|
||||
|
||||
// Iterate over oop elements and metadata.
|
||||
template <bool nv, typename OopClosureType>
|
||||
inline int oop_oop_iterate(oop obj, OopClosureType* closure);
|
||||
|
||||
// Iterate over oop elements within mr, and metadata.
|
||||
template <bool nv, typename OopClosureType>
|
||||
inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Iterate over oop elements with indices within [start, end), and metadata.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline int oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end);
|
||||
|
||||
// Iterate over oop elements within [start, end), and metadata.
|
||||
// Specialized for [T = oop] or [T = narrowOop].
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_range_specialized(objArrayOop a, OopClosureType* closure, int start, int end);
|
||||
|
||||
public:
|
||||
// Iterate over all oop elements.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline void oop_oop_iterate_elements(objArrayOop a, OopClosureType* closure);
|
||||
|
||||
private:
|
||||
// Iterate over all oop elements.
|
||||
// Specialized for [T = oop] or [T = narrowOop].
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_elements_specialized(objArrayOop a, OopClosureType* closure);
|
||||
|
||||
// Iterate over all oop elements with indices within mr.
|
||||
template <bool nv, class OopClosureType>
|
||||
inline void oop_oop_iterate_elements_bounded(objArrayOop a, OopClosureType* closure, MemRegion mr);
|
||||
|
||||
// Iterate over oop elements within [low, high)..
|
||||
// Specialized for [T = oop] or [T = narrowOop].
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
inline void oop_oop_iterate_elements_specialized_bounded(objArrayOop a, OopClosureType* closure, void* low, void* high);
|
||||
|
||||
|
||||
public:
|
||||
|
||||
// Iterators
|
||||
int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) {
|
||||
return oop_oop_iterate_v(obj, blk);
|
||||
}
|
||||
int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) {
|
||||
return oop_oop_iterate_v_m(obj, blk, mr);
|
||||
}
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \
|
||||
@ -135,6 +173,14 @@ class ObjArrayKlass : public ArrayKlass {
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// JVM support
|
||||
jint compute_modifier_flags(TRAPS) const;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,78 +25,165 @@
|
||||
#ifndef SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP
|
||||
#define SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
void ObjArrayKlass::oop_follow_contents(oop obj, int index) {
|
||||
if (UseCompressedOops) {
|
||||
objarray_follow_contents<narrowOop>(obj, index);
|
||||
} else {
|
||||
objarray_follow_contents<oop>(obj, index);
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_elements_specialized(objArrayOop a, OopClosureType* closure) {
|
||||
T* p = (T*)a->base();
|
||||
T* const end = p + a->length();
|
||||
|
||||
for (;p < end; p++) {
|
||||
Devirtualizer<nv>::do_oop(closure, p);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void ObjArrayKlass::objarray_follow_contents(oop obj, int index) {
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_elements_specialized_bounded(
|
||||
objArrayOop a, OopClosureType* closure, void* low, void* high) {
|
||||
|
||||
T* const l = (T*)low;
|
||||
T* const h = (T*)high;
|
||||
|
||||
T* p = (T*)a->base();
|
||||
T* end = p + a->length();
|
||||
|
||||
if (p < l) {
|
||||
p = l;
|
||||
}
|
||||
if (end > h) {
|
||||
end = h;
|
||||
}
|
||||
|
||||
for (;p < end; ++p) {
|
||||
Devirtualizer<nv>::do_oop(closure, p);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_elements(objArrayOop a, OopClosureType* closure) {
|
||||
if (UseCompressedOops) {
|
||||
oop_oop_iterate_elements_specialized<nv, narrowOop>(a, closure);
|
||||
} else {
|
||||
oop_oop_iterate_elements_specialized<nv, oop>(a, closure);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool nv, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_elements_bounded(objArrayOop a, OopClosureType* closure, MemRegion mr) {
|
||||
if (UseCompressedOops) {
|
||||
oop_oop_iterate_elements_specialized_bounded<nv, narrowOop>(a, closure, mr.start(), mr.end());
|
||||
} else {
|
||||
oop_oop_iterate_elements_specialized_bounded<nv, oop>(a, closure, mr.start(), mr.end());
|
||||
}
|
||||
}
|
||||
|
||||
template <bool nv, typename OopClosureType>
|
||||
int ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
|
||||
assert (obj->is_array(), "obj must be array");
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
const size_t len = size_t(a->length());
|
||||
const size_t beg_index = size_t(index);
|
||||
assert(beg_index < len || len == 0, "index too large");
|
||||
|
||||
const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
|
||||
const size_t end_index = beg_index + stride;
|
||||
T* const base = (T*)a->base();
|
||||
T* const beg = base + beg_index;
|
||||
T* const end = base + end_index;
|
||||
|
||||
// Push the non-NULL elements of the next stride on the marking stack.
|
||||
for (T* e = beg; e < end; e++) {
|
||||
MarkSweep::mark_and_push<T>(e);
|
||||
// Get size before changing pointers.
|
||||
// Don't call size() or oop_size() since that is a virtual call.
|
||||
int size = a->object_size();
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
Devirtualizer<nv>::do_klass(closure, obj->klass());
|
||||
}
|
||||
|
||||
if (end_index < len) {
|
||||
MarkSweep::push_objarray(a, end_index); // Push the continuation.
|
||||
oop_oop_iterate_elements<nv>(a, closure);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
template <bool nv, typename OopClosureType>
|
||||
int ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
|
||||
assert(obj->is_array(), "obj must be array");
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
|
||||
// Get size before changing pointers.
|
||||
// Don't call size() or oop_size() since that is a virtual call
|
||||
int size = a->object_size();
|
||||
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
Devirtualizer<nv>::do_klass(closure, a->klass());
|
||||
}
|
||||
|
||||
oop_oop_iterate_elements_bounded<nv>(a, closure, mr);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
template <bool nv, typename T, class OopClosureType>
|
||||
void ObjArrayKlass::oop_oop_iterate_range_specialized(objArrayOop a, OopClosureType* closure, int start, int end) {
|
||||
if (Devirtualizer<nv>::do_metadata(closure)) {
|
||||
Devirtualizer<nv>::do_klass(closure, a->klass());
|
||||
}
|
||||
|
||||
T* low = start == 0 ? cast_from_oop<T*>(a) : a->obj_at_addr<T>(start);
|
||||
T* high = (T*)a->base() + end;
|
||||
|
||||
oop_oop_iterate_elements_specialized_bounded<nv, T>(a, closure, low, high);
|
||||
}
|
||||
|
||||
// Like oop_oop_iterate but only iterates over a specified range and only used
|
||||
// for objArrayOops.
|
||||
template <bool nv, class OopClosureType>
|
||||
int ObjArrayKlass::oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end) {
|
||||
assert(obj->is_array(), "obj must be array");
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
|
||||
// Get size before changing pointers.
|
||||
// Don't call size() or oop_size() since that is a virtual call
|
||||
int size = a->object_size();
|
||||
|
||||
if (UseCompressedOops) {
|
||||
oop_oop_iterate_range_specialized<nv, narrowOop>(a, closure, start, end);
|
||||
} else {
|
||||
oop_oop_iterate_range_specialized<nv, oop>(a, closure, start, end);
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
void ObjArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj,
|
||||
int index) {
|
||||
if (UseCompressedOops) {
|
||||
objarray_follow_contents<narrowOop>(cm, obj, index);
|
||||
} else {
|
||||
objarray_follow_contents<oop>(cm, obj, index);
|
||||
}
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
int ObjArrayKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
/* No reverse implementation ATM. */ \
|
||||
return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int ObjArrayKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void ObjArrayKlass::objarray_follow_contents(ParCompactionManager* cm, oop obj,
|
||||
int index) {
|
||||
objArrayOop a = objArrayOop(obj);
|
||||
const size_t len = size_t(a->length());
|
||||
const size_t beg_index = size_t(index);
|
||||
assert(beg_index < len || len == 0, "index too large");
|
||||
|
||||
const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
|
||||
const size_t end_index = beg_index + stride;
|
||||
T* const base = (T*)a->base();
|
||||
T* const beg = base + beg_index;
|
||||
T* const end = base + end_index;
|
||||
|
||||
// Push the non-NULL elements of the next stride on the marking stack.
|
||||
for (T* e = beg; e < end; e++) {
|
||||
PSParallelCompact::mark_and_push<T>(cm, e);
|
||||
}
|
||||
|
||||
if (end_index < len) {
|
||||
cm->push_objarray(a, end_index); // Push the continuation.
|
||||
}
|
||||
#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int ObjArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) { \
|
||||
return oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end); \
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
|
||||
#define ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
ObjArrayKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r( OopClosureType, nv_suffix)
|
||||
|
||||
|
||||
#endif // SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP
|
||||
|
@ -298,19 +298,6 @@ class oopDesc {
|
||||
|
||||
// garbage collection
|
||||
bool is_gc_marked() const;
|
||||
// Apply "MarkSweep::mark_and_push" to (the address of) every non-NULL
|
||||
// reference field in "this".
|
||||
void follow_contents(void);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
void push_contents(PSPromotionManager* pm);
|
||||
|
||||
// Parallel Old
|
||||
void update_contents(ParCompactionManager* cm);
|
||||
|
||||
void follow_contents(ParCompactionManager* cm);
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
bool is_scavengable() const;
|
||||
|
||||
@ -334,9 +321,6 @@ class oopDesc {
|
||||
uint age() const;
|
||||
void incr_age();
|
||||
|
||||
// Adjust all pointers in this object to point at it's forwarded location and
|
||||
// return the size of this oop. This is used by the MarkSweep collector.
|
||||
int adjust_pointers();
|
||||
|
||||
// mark-sweep support
|
||||
void follow_body(int begin, int end);
|
||||
@ -345,6 +329,22 @@ class oopDesc {
|
||||
static BarrierSet* bs() { return _bs; }
|
||||
static void set_bs(BarrierSet* bs) { _bs = bs; }
|
||||
|
||||
// Garbage Collection support
|
||||
|
||||
// Mark Sweep
|
||||
void ms_follow_contents();
|
||||
// Adjust all pointers in this object to point at it's forwarded location and
|
||||
// return the size of this oop. This is used by the MarkSweep collector.
|
||||
int ms_adjust_pointers();
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Compact
|
||||
void pc_follow_contents(ParCompactionManager* pc);
|
||||
void pc_update_contents();
|
||||
// Parallel Scavenge
|
||||
void ps_push_contents(PSPromotionManager* pm);
|
||||
#endif
|
||||
|
||||
|
||||
// iterators, returns size of object
|
||||
#define OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_iterate(OopClosureType* blk); \
|
||||
|
@ -26,13 +26,11 @@
|
||||
#define SHARE_VM_OOPS_OOP_INLINE_HPP
|
||||
|
||||
#include "gc_implementation/shared/ageTable.hpp"
|
||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "memory/barrierSet.inline.hpp"
|
||||
#include "memory/cardTableModRefBS.hpp"
|
||||
#include "memory/genCollectedHeap.hpp"
|
||||
#include "memory/generation.hpp"
|
||||
#include "memory/specialized_oop_closures.hpp"
|
||||
#include "oops/arrayKlass.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
@ -592,11 +590,6 @@ inline bool oopDesc::is_unlocked_oop() const {
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
inline void oopDesc::follow_contents(void) {
|
||||
assert (is_gc_marked(), "should be marked");
|
||||
klass()->oop_follow_contents(this);
|
||||
}
|
||||
|
||||
inline bool oopDesc::is_scavengable() const {
|
||||
return Universe::heap()->is_scavengable(this);
|
||||
}
|
||||
@ -706,21 +699,49 @@ inline intptr_t oopDesc::identity_hash() {
|
||||
}
|
||||
}
|
||||
|
||||
inline int oopDesc::adjust_pointers() {
|
||||
inline void oopDesc::ms_follow_contents() {
|
||||
klass()->oop_ms_follow_contents(this);
|
||||
}
|
||||
|
||||
inline int oopDesc::ms_adjust_pointers() {
|
||||
debug_only(int check_size = size());
|
||||
int s = klass()->oop_adjust_pointers(this);
|
||||
int s = klass()->oop_ms_adjust_pointers(this);
|
||||
assert(s == check_size, "should be the same");
|
||||
return s;
|
||||
}
|
||||
|
||||
#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
inline int oopDesc::oop_iterate(OopClosureType* blk) { \
|
||||
return klass()->oop_oop_iterate##nv_suffix(this, blk); \
|
||||
} \
|
||||
\
|
||||
inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \
|
||||
return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \
|
||||
#if INCLUDE_ALL_GCS
|
||||
inline void oopDesc::pc_follow_contents(ParCompactionManager* cm) {
|
||||
klass()->oop_pc_follow_contents(this, cm);
|
||||
}
|
||||
|
||||
inline void oopDesc::pc_update_contents() {
|
||||
Klass* k = klass();
|
||||
if (!k->oop_is_typeArray()) {
|
||||
// It might contain oops beyond the header, so take the virtual call.
|
||||
k->oop_pc_update_pointers(this);
|
||||
}
|
||||
// Else skip it. The TypeArrayKlass in the header never needs scavenging.
|
||||
}
|
||||
|
||||
inline void oopDesc::ps_push_contents(PSPromotionManager* pm) {
|
||||
Klass* k = klass();
|
||||
if (!k->oop_is_typeArray()) {
|
||||
// It might contain oops beyond the header, so take the virtual call.
|
||||
k->oop_ps_push_contents(this, pm);
|
||||
}
|
||||
// Else skip it. The TypeArrayKlass in the header never needs scavenging.
|
||||
}
|
||||
#endif
|
||||
|
||||
#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
inline int oopDesc::oop_iterate(OopClosureType* blk) { \
|
||||
return klass()->oop_oop_iterate##nv_suffix(this, blk); \
|
||||
} \
|
||||
\
|
||||
inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \
|
||||
return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \
|
||||
}
|
||||
|
||||
|
||||
@ -736,18 +757,21 @@ inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) {
|
||||
return oop_iterate(&cl, mr);
|
||||
}
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
|
||||
return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
|
||||
#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
|
||||
return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
|
||||
}
|
||||
#else
|
||||
#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix) \
|
||||
OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
|
||||
|
||||
#endif // SHARE_VM_OOPS_OOP_INLINE_HPP
|
||||
|
@ -1,57 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
|
||||
#define SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
|
||||
|
||||
#include "runtime/atomic.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc_implementation/parNew/parNewGeneration.hpp"
|
||||
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
inline void oopDesc::update_contents(ParCompactionManager* cm) {
|
||||
// The klass field must be updated before anything else
|
||||
// can be done.
|
||||
DEBUG_ONLY(Klass* original_klass = klass());
|
||||
|
||||
Klass* new_klass = klass();
|
||||
if (!new_klass->oop_is_typeArray()) {
|
||||
// It might contain oops beyond the header, so take the virtual call.
|
||||
new_klass->oop_update_pointers(cm, this);
|
||||
}
|
||||
// Else skip it. The TypeArrayKlass in the header never needs scavenging.
|
||||
}
|
||||
|
||||
inline void oopDesc::follow_contents(ParCompactionManager* cm) {
|
||||
assert (PSParallelCompact::mark_bitmap()->is_marked(this),
|
||||
"should be marked");
|
||||
klass()->oop_follow_contents(cm, this);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
|
@ -36,7 +36,7 @@
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/typeArrayKlass.hpp"
|
||||
#include "oops/typeArrayKlass.inline.hpp"
|
||||
#include "oops/typeArrayOop.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
@ -204,57 +204,6 @@ int TypeArrayKlass::oop_size(oop obj) const {
|
||||
return t->object_size();
|
||||
}
|
||||
|
||||
void TypeArrayKlass::oop_follow_contents(oop obj) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
void TypeArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
int TypeArrayKlass::oop_adjust_pointers(oop obj) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
typeArrayOop t = typeArrayOop(obj);
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
return t->object_size();
|
||||
}
|
||||
|
||||
int TypeArrayKlass::oop_oop_iterate(oop obj, ExtendedOopClosure* blk) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
typeArrayOop t = typeArrayOop(obj);
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
return t->object_size();
|
||||
}
|
||||
|
||||
int TypeArrayKlass::oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
typeArrayOop t = typeArrayOop(obj);
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
return t->object_size();
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
void TypeArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
ShouldNotReachHere();
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
}
|
||||
|
||||
int
|
||||
TypeArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
return typeArrayOop(obj)->object_size();
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
void TypeArrayKlass::initialize(TRAPS) {
|
||||
// Nothing to do. Having this function is handy since objArrayKlasses can be
|
||||
// initialized by calling initialize on their bottom_klass, see ObjArrayKlass::initialize
|
||||
|
@ -72,16 +72,46 @@ class TypeArrayKlass : public ArrayKlass {
|
||||
// Copying
|
||||
void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS);
|
||||
|
||||
// Iteration
|
||||
int oop_oop_iterate(oop obj, ExtendedOopClosure* blk);
|
||||
int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr);
|
||||
// GC specific object visitors
|
||||
//
|
||||
// Mark Sweep
|
||||
void oop_ms_follow_contents(oop obj);
|
||||
int oop_ms_adjust_pointers(oop obj);
|
||||
#if INCLUDE_ALL_GCS
|
||||
// Parallel Scavenge
|
||||
void oop_ps_push_contents( oop obj, PSPromotionManager* pm);
|
||||
// Parallel Compact
|
||||
void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
|
||||
void oop_pc_update_pointers(oop obj);
|
||||
#endif
|
||||
|
||||
// Garbage collection
|
||||
void oop_follow_contents(oop obj);
|
||||
int oop_adjust_pointers(oop obj);
|
||||
// Oop iterators. Since there are no oops in TypeArrayKlasses,
|
||||
// these functions only return the size of the object.
|
||||
|
||||
private:
|
||||
// The implementation used by all oop_oop_iterate functions in TypeArrayKlasses.
|
||||
inline int oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure);
|
||||
|
||||
public:
|
||||
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \
|
||||
int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, \
|
||||
MemRegion mr); \
|
||||
int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, \
|
||||
int start, int end);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(TypeArrayKlass_OOP_OOP_ITERATE_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(TypeArrayKlass_OOP_OOP_ITERATE_DECL)
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \
|
||||
int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure);
|
||||
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL)
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// Parallel Scavenge and Parallel Old
|
||||
PARALLEL_GC_DECLS
|
||||
|
||||
protected:
|
||||
// Find n'th dimensional array
|
||||
|
73
hotspot/src/share/vm/oops/typeArrayKlass.inline.hpp
Normal file
73
hotspot/src/share/vm/oops/typeArrayKlass.inline.hpp
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP
|
||||
#define SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP
|
||||
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/typeArrayKlass.hpp"
|
||||
#include "oops/typeArrayOop.hpp"
|
||||
|
||||
class ExtendedOopClosure;
|
||||
|
||||
inline int TypeArrayKlass::oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure) {
|
||||
assert(obj->is_typeArray(),"must be a type array");
|
||||
typeArrayOop t = typeArrayOop(obj);
|
||||
// Performance tweak: We skip iterating over the klass pointer since we
|
||||
// know that Universe::TypeArrayKlass never moves.
|
||||
return t->object_size();
|
||||
}
|
||||
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int TypeArrayKlass:: \
|
||||
oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_impl(obj, closure); \
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int TypeArrayKlass:: \
|
||||
oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \
|
||||
return oop_oop_iterate_impl(obj, closure); \
|
||||
}
|
||||
#else
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
#endif
|
||||
|
||||
|
||||
#define TypeArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
|
||||
\
|
||||
int TypeArrayKlass:: \
|
||||
oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \
|
||||
return oop_oop_iterate_impl(obj, closure); \
|
||||
}
|
||||
|
||||
#define ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
|
||||
TypeArrayKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \
|
||||
TypeArrayKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \
|
||||
TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
|
||||
|
||||
#endif // SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP
|
@ -122,7 +122,6 @@
|
||||
# include "memory/defNewGeneration.hpp"
|
||||
# include "memory/gcLocker.hpp"
|
||||
# include "memory/genCollectedHeap.hpp"
|
||||
# include "memory/genOopClosures.hpp"
|
||||
# include "memory/genRemSet.hpp"
|
||||
# include "memory/generation.hpp"
|
||||
# include "memory/heap.hpp"
|
||||
@ -133,7 +132,6 @@
|
||||
# include "memory/referencePolicy.hpp"
|
||||
# include "memory/referenceProcessor.hpp"
|
||||
# include "memory/resourceArea.hpp"
|
||||
# include "memory/sharedHeap.hpp"
|
||||
# include "memory/space.hpp"
|
||||
# include "memory/threadLocalAllocBuffer.hpp"
|
||||
# include "memory/threadLocalAllocBuffer.inline.hpp"
|
||||
@ -147,7 +145,6 @@
|
||||
# include "oops/instanceOop.hpp"
|
||||
# include "oops/instanceRefKlass.hpp"
|
||||
# include "oops/klass.hpp"
|
||||
# include "oops/klassPS.hpp"
|
||||
# include "oops/klassVtable.hpp"
|
||||
# include "oops/markOop.hpp"
|
||||
# include "oops/markOop.inline.hpp"
|
||||
@ -319,7 +316,6 @@
|
||||
# include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
|
||||
# include "gc_implementation/parallelScavenge/psGenerationCounters.hpp"
|
||||
# include "gc_implementation/parallelScavenge/psOldGen.hpp"
|
||||
# include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
|
||||
# include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
|
||||
# include "gc_implementation/parallelScavenge/psYoungGen.hpp"
|
||||
# include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
|
||||
|
@ -89,6 +89,10 @@ WB_ENTRY(jint, WB_GetVMPageSize(JNIEnv* env, jobject o))
|
||||
return os::vm_page_size();
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jlong, WB_GetVMLargePageSize(JNIEnv* env, jobject o))
|
||||
return os::large_page_size();
|
||||
WB_END
|
||||
|
||||
class WBIsKlassAliveClosure : public KlassClosure {
|
||||
Symbol* _name;
|
||||
bool _found;
|
||||
@ -1296,19 +1300,20 @@ void WhiteBox::register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread,
|
||||
#define CC (char*)
|
||||
|
||||
static JNINativeMethod methods[] = {
|
||||
{CC"getObjectAddress", CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectAddress },
|
||||
{CC"getObjectSize", CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectSize },
|
||||
{CC"isObjectInOldGen", CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen },
|
||||
{CC"getObjectAddress0", CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectAddress },
|
||||
{CC"getObjectSize0", CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectSize },
|
||||
{CC"isObjectInOldGen0", CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen },
|
||||
{CC"getHeapOopSize", CC"()I", (void*)&WB_GetHeapOopSize },
|
||||
{CC"getVMPageSize", CC"()I", (void*)&WB_GetVMPageSize },
|
||||
{CC"getVMLargePageSize", CC"()J", (void*)&WB_GetVMLargePageSize},
|
||||
{CC"isClassAlive0", CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive },
|
||||
{CC"parseCommandLine",
|
||||
{CC"parseCommandLine0",
|
||||
CC"(Ljava/lang/String;C[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
|
||||
(void*) &WB_ParseCommandLine
|
||||
},
|
||||
{CC"addToBootstrapClassLoaderSearch", CC"(Ljava/lang/String;)V",
|
||||
{CC"addToBootstrapClassLoaderSearch0", CC"(Ljava/lang/String;)V",
|
||||
(void*)&WB_AddToBootstrapClassLoaderSearch},
|
||||
{CC"addToSystemClassLoaderSearch", CC"(Ljava/lang/String;)V",
|
||||
{CC"addToSystemClassLoaderSearch0", CC"(Ljava/lang/String;)V",
|
||||
(void*)&WB_AddToSystemClassLoaderSearch},
|
||||
{CC"getCompressedOopsMaxHeapSize", CC"()J",
|
||||
(void*)&WB_GetCompressedOopsMaxHeapSize},
|
||||
@ -1318,7 +1323,7 @@ static JNINativeMethod methods[] = {
|
||||
{CC"stressVirtualSpaceResize",CC"(JJJ)I", (void*)&WB_StressVirtualSpaceResize},
|
||||
#if INCLUDE_ALL_GCS
|
||||
{CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark},
|
||||
{CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },
|
||||
{CC"g1IsHumongous0", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },
|
||||
{CC"g1NumMaxRegions", CC"()J", (void*)&WB_G1NumMaxRegions },
|
||||
{CC"g1NumFreeRegions", CC"()J", (void*)&WB_G1NumFreeRegions },
|
||||
{CC"g1RegionSize", CC"()I", (void*)&WB_G1RegionSize },
|
||||
@ -1339,29 +1344,29 @@ static JNINativeMethod methods[] = {
|
||||
#endif // INCLUDE_NMT
|
||||
{CC"deoptimizeFrames", CC"(Z)I", (void*)&WB_DeoptimizeFrames },
|
||||
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
|
||||
{CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;Z)I",
|
||||
{CC"deoptimizeMethod0", CC"(Ljava/lang/reflect/Executable;Z)I",
|
||||
(void*)&WB_DeoptimizeMethod },
|
||||
{CC"isMethodCompiled", CC"(Ljava/lang/reflect/Executable;Z)Z",
|
||||
{CC"isMethodCompiled0", CC"(Ljava/lang/reflect/Executable;Z)Z",
|
||||
(void*)&WB_IsMethodCompiled },
|
||||
{CC"isMethodCompilable", CC"(Ljava/lang/reflect/Executable;IZ)Z",
|
||||
{CC"isMethodCompilable0", CC"(Ljava/lang/reflect/Executable;IZ)Z",
|
||||
(void*)&WB_IsMethodCompilable},
|
||||
{CC"isMethodQueuedForCompilation",
|
||||
{CC"isMethodQueuedForCompilation0",
|
||||
CC"(Ljava/lang/reflect/Executable;)Z", (void*)&WB_IsMethodQueuedForCompilation},
|
||||
{CC"makeMethodNotCompilable",
|
||||
{CC"makeMethodNotCompilable0",
|
||||
CC"(Ljava/lang/reflect/Executable;IZ)V", (void*)&WB_MakeMethodNotCompilable},
|
||||
{CC"testSetDontInlineMethod",
|
||||
{CC"testSetDontInlineMethod0",
|
||||
CC"(Ljava/lang/reflect/Executable;Z)Z", (void*)&WB_TestSetDontInlineMethod},
|
||||
{CC"getMethodCompilationLevel",
|
||||
{CC"getMethodCompilationLevel0",
|
||||
CC"(Ljava/lang/reflect/Executable;Z)I", (void*)&WB_GetMethodCompilationLevel},
|
||||
{CC"getMethodEntryBci",
|
||||
{CC"getMethodEntryBci0",
|
||||
CC"(Ljava/lang/reflect/Executable;)I", (void*)&WB_GetMethodEntryBci},
|
||||
{CC"getCompileQueueSize",
|
||||
CC"(I)I", (void*)&WB_GetCompileQueueSize},
|
||||
{CC"testSetForceInlineMethod",
|
||||
{CC"testSetForceInlineMethod0",
|
||||
CC"(Ljava/lang/reflect/Executable;Z)Z", (void*)&WB_TestSetForceInlineMethod},
|
||||
{CC"enqueueMethodForCompilation",
|
||||
{CC"enqueueMethodForCompilation0",
|
||||
CC"(Ljava/lang/reflect/Executable;II)Z", (void*)&WB_EnqueueMethodForCompilation},
|
||||
{CC"clearMethodState",
|
||||
{CC"clearMethodState0",
|
||||
CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState},
|
||||
{CC"lockCompilation", CC"()V", (void*)&WB_LockCompilation},
|
||||
{CC"unlockCompilation", CC"()V", (void*)&WB_UnlockCompilation},
|
||||
@ -1400,7 +1405,7 @@ static JNINativeMethod methods[] = {
|
||||
{CC"incMetaspaceCapacityUntilGC", CC"(J)J", (void*)&WB_IncMetaspaceCapacityUntilGC },
|
||||
{CC"metaspaceCapacityUntilGC", CC"()J", (void*)&WB_MetaspaceCapacityUntilGC },
|
||||
{CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures },
|
||||
{CC"getNMethod", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
|
||||
{CC"getNMethod0", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
|
||||
(void*)&WB_GetNMethod },
|
||||
{CC"forceNMethodSweep0", CC"()Ljava/lang/Thread;", (void*)&WB_ForceNMethodSweep },
|
||||
{CC"allocateCodeBlob", CC"(II)J", (void*)&WB_AllocateCodeBlob },
|
||||
@ -1412,7 +1417,7 @@ static JNINativeMethod methods[] = {
|
||||
{CC"getThreadStackSize", CC"()J", (void*)&WB_GetThreadStackSize },
|
||||
{CC"getThreadRemainingStackSize", CC"()J", (void*)&WB_GetThreadRemainingStackSize },
|
||||
{CC"assertMatchingSafepointCalls", CC"(ZZ)V", (void*)&WB_AssertMatchingSafepointCalls },
|
||||
{CC"isMonitorInflated", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsMonitorInflated },
|
||||
{CC"isMonitorInflated0", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsMonitorInflated },
|
||||
{CC"forceSafepoint", CC"()V", (void*)&WB_ForceSafepoint },
|
||||
{CC"getMethodBooleanOption",
|
||||
CC"(Ljava/lang/reflect/Executable;Ljava/lang/String;)Ljava/lang/Boolean;",
|
||||
|
@ -754,13 +754,9 @@ bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
|
||||
return true;
|
||||
} else {
|
||||
guarantee(res == strong_roots_parity, "Or else what?");
|
||||
assert(SharedHeap::heap()->workers()->active_workers() > 0,
|
||||
"Should only fail when parallel.");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
assert(SharedHeap::heap()->workers()->active_workers() > 0,
|
||||
"Should only fail when parallel.");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -4066,20 +4062,7 @@ void Threads::assert_all_threads_claimed() {
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
// Introduce a mechanism allowing parallel threads to claim threads as
|
||||
// root groups. Overhead should be small enough to use all the time,
|
||||
// even in sequential code.
|
||||
SharedHeap* sh = SharedHeap::heap();
|
||||
// Cannot yet substitute active_workers for n_par_threads
|
||||
// because of G1CollectedHeap::verify() use of
|
||||
// SharedHeap::process_roots(). n_par_threads == 0 will
|
||||
// turn off parallelism in process_roots while active_workers
|
||||
// is being used for parallelism elsewhere.
|
||||
bool is_par = sh->n_par_threads() > 0;
|
||||
assert(!is_par ||
|
||||
(SharedHeap::heap()->n_par_threads() ==
|
||||
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
|
||||
void Threads::possibly_parallel_oops_do(bool is_par, OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
|
||||
int cp = Threads::thread_claim_parity();
|
||||
ALL_JAVA_THREADS(p) {
|
||||
if (p->claim_oops_do(is_par, cp)) {
|
||||
|
@ -1886,15 +1886,28 @@ class Threads: AllStatic {
|
||||
// Does not include JNI_VERSION_1_1
|
||||
static jboolean is_supported_jni_version(jint version);
|
||||
|
||||
// The "thread claim parity" provides a way for threads to be claimed
|
||||
// by parallel worker tasks.
|
||||
//
|
||||
// Each thread contains a a "parity" field. A task will claim the
|
||||
// thread only if its parity field is the same as the global parity,
|
||||
// which is updated by calling change_thread_claim_parity().
|
||||
//
|
||||
// For this to work change_thread_claim_parity() needs to be called
|
||||
// exactly once in sequential code before starting parallel tasks
|
||||
// that should claim threads.
|
||||
//
|
||||
// New threads get their parity set to 0 and change_thread_claim_parity()
|
||||
// never set the global parity to 0.
|
||||
static int thread_claim_parity() { return _thread_claim_parity; }
|
||||
static void change_thread_claim_parity();
|
||||
|
||||
static void assert_all_threads_claimed() PRODUCT_RETURN;
|
||||
|
||||
// Apply "f->do_oop" to all root oops in all threads.
|
||||
// This version may only be called by sequential code.
|
||||
static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
// This version may be called by sequential or parallel code.
|
||||
static void possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
|
||||
// This creates a list of GCTasks, one per thread.
|
||||
static void create_thread_roots_tasks(GCTaskQueue* q);
|
||||
// This creates a list of GCTasks, one per thread, for marking objects.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user