Merge
This commit is contained in:
commit
da0b87470d
@ -275,3 +275,4 @@ ea2f7981236f3812436958748ab3d26e80a35130 jdk9-b28
|
||||
36e9bc875325813ac9c44ac0c617a463091fa9f5 jdk9-b30
|
||||
69a84c16d9c28e0e3d504b9c8766c24bafcd58f6 jdk9-b31
|
||||
7e3512dae8e020d44399c0f1c579ff1fe3090ed6 jdk9-b32
|
||||
e4ba01b726e263953ae129be37c94de6ed145b1d jdk9-b33
|
||||
|
@ -275,3 +275,4 @@ a00b04ef067e39f50b9a0fea6f1904e35d632a73 jdk9-b28
|
||||
98967ae6ae53ebf15615e07cd5a6b1ae04dfd84c jdk9-b30
|
||||
c432b80aadd0cb2b2361b02add4d671957d4cec9 jdk9-b31
|
||||
b5b139354630edb2d06190bf31653acbdcea63a8 jdk9-b32
|
||||
cfdac5887952c2dd73c73a1d8d9aa880d0539bbf jdk9-b33
|
||||
|
@ -51,13 +51,11 @@ $(eval $(call SetupJavaCompilation,BUILD_INTERIM_CORBA, \
|
||||
JAR := $(INTERIM_CORBA_JAR)))
|
||||
|
||||
################################################################################
|
||||
# Copy idl files straight to jdk/lib. Not sure if this is the right way to do
|
||||
# it, but we are moving away from the one repo at a time build. Perhaps we should
|
||||
# scrap the 'jdk' prefix to bin, lib etc?
|
||||
$(JDK_OUTPUTDIR)/lib/%: $(CORBA_TOPDIR)/src/java.corba/share/classes/com/sun/tools/corba/se/idl/%
|
||||
# Copy idl files straight to jdk/include.
|
||||
$(JDK_OUTPUTDIR)/include/%: $(CORBA_TOPDIR)/src/java.corba/share/classes/com/sun/tools/corba/se/idl/%
|
||||
$(install-file)
|
||||
|
||||
IDL_TARGET_FILES := $(JDK_OUTPUTDIR)/lib/orb.idl $(JDK_OUTPUTDIR)/lib/ir.idl
|
||||
IDL_TARGET_FILES := $(JDK_OUTPUTDIR)/include/orb.idl $(JDK_OUTPUTDIR)/include/ir.idl
|
||||
|
||||
################################################################################
|
||||
|
||||
|
@ -435,3 +435,4 @@ deb29e92f68ace2808a36ecfa18c7d61dcb645bb jdk9-b29
|
||||
5c722dffbc0f34eb8d903dca7b261e52248fa17e jdk9-b30
|
||||
9f7d155d28e519f3e4645dc21cf185c25f3176ed jdk9-b31
|
||||
af46576a8d7cb4003028b8ee8bf408cfe227315b jdk9-b32
|
||||
9b3f5e4f33725f7c1d9b8e523133fe8383a54d9f jdk9-b33
|
||||
|
@ -32,12 +32,10 @@ import sun.jvm.hotspot.types.*;
|
||||
import sun.jvm.hotspot.utilities.*;
|
||||
|
||||
public class CodeCache {
|
||||
private static AddressField heapField;
|
||||
private static AddressField scavengeRootNMethodsField;
|
||||
private static GrowableArray<CodeHeap> heapArray;
|
||||
private static AddressField scavengeRootNMethodsField;
|
||||
private static VirtualConstructor virtualConstructor;
|
||||
|
||||
private CodeHeap heap;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
@ -49,7 +47,10 @@ public class CodeCache {
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("CodeCache");
|
||||
|
||||
heapField = type.getAddressField("_heap");
|
||||
// Get array of CodeHeaps
|
||||
AddressField heapsField = type.getAddressField("_heaps");
|
||||
heapArray = GrowableArray.create(heapsField.getValue(), new StaticBaseConstructor<CodeHeap>(CodeHeap.class));
|
||||
|
||||
scavengeRootNMethodsField = type.getAddressField("_scavenge_root_nmethods");
|
||||
|
||||
virtualConstructor = new VirtualConstructor(db);
|
||||
@ -67,16 +68,17 @@ public class CodeCache {
|
||||
}
|
||||
}
|
||||
|
||||
public CodeCache() {
|
||||
heap = (CodeHeap) VMObjectFactory.newObject(CodeHeap.class, heapField.getValue());
|
||||
}
|
||||
|
||||
public NMethod scavengeRootMethods() {
|
||||
return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootNMethodsField.getValue());
|
||||
}
|
||||
|
||||
public boolean contains(Address p) {
|
||||
return getHeap().contains(p);
|
||||
for (int i = 0; i < heapArray.length(); ++i) {
|
||||
if (heapArray.at(i).contains(p)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** When VM.getVM().isDebugging() returns true, this behaves like
|
||||
@ -97,14 +99,24 @@ public class CodeCache {
|
||||
|
||||
public CodeBlob findBlobUnsafe(Address start) {
|
||||
CodeBlob result = null;
|
||||
CodeHeap containing_heap = null;
|
||||
for (int i = 0; i < heapArray.length(); ++i) {
|
||||
if (heapArray.at(i).contains(start)) {
|
||||
containing_heap = heapArray.at(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (containing_heap == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
result = (CodeBlob) virtualConstructor.instantiateWrapperFor(getHeap().findStart(start));
|
||||
result = (CodeBlob) virtualConstructor.instantiateWrapperFor(containing_heap.findStart(start));
|
||||
}
|
||||
catch (WrongTypeException wte) {
|
||||
Address cbAddr = null;
|
||||
try {
|
||||
cbAddr = getHeap().findStart(start);
|
||||
cbAddr = containing_heap.findStart(start);
|
||||
}
|
||||
catch (Exception findEx) {
|
||||
findEx.printStackTrace();
|
||||
@ -167,31 +179,32 @@ public class CodeCache {
|
||||
}
|
||||
|
||||
public void iterate(CodeCacheVisitor visitor) {
|
||||
CodeHeap heap = getHeap();
|
||||
Address ptr = heap.begin();
|
||||
Address end = heap.end();
|
||||
|
||||
visitor.prologue(ptr, end);
|
||||
visitor.prologue(lowBound(), highBound());
|
||||
CodeBlob lastBlob = null;
|
||||
while (ptr != null && ptr.lessThan(end)) {
|
||||
try {
|
||||
// Use findStart to get a pointer inside blob other findBlob asserts
|
||||
CodeBlob blob = findBlobUnsafe(heap.findStart(ptr));
|
||||
if (blob != null) {
|
||||
visitor.visit(blob);
|
||||
if (blob == lastBlob) {
|
||||
throw new InternalError("saw same blob twice");
|
||||
|
||||
for (int i = 0; i < heapArray.length(); ++i) {
|
||||
CodeHeap current_heap = heapArray.at(i);
|
||||
Address ptr = current_heap.begin();
|
||||
while (ptr != null && ptr.lessThan(current_heap.end())) {
|
||||
try {
|
||||
// Use findStart to get a pointer inside blob other findBlob asserts
|
||||
CodeBlob blob = findBlobUnsafe(current_heap.findStart(ptr));
|
||||
if (blob != null) {
|
||||
visitor.visit(blob);
|
||||
if (blob == lastBlob) {
|
||||
throw new InternalError("saw same blob twice");
|
||||
}
|
||||
lastBlob = blob;
|
||||
}
|
||||
lastBlob = blob;
|
||||
} catch (RuntimeException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
} catch (RuntimeException e) {
|
||||
e.printStackTrace();
|
||||
Address next = current_heap.nextBlock(ptr);
|
||||
if (next != null && next.lessThan(ptr)) {
|
||||
throw new InternalError("pointer moved backwards");
|
||||
}
|
||||
ptr = next;
|
||||
}
|
||||
Address next = heap.nextBlock(ptr);
|
||||
if (next != null && next.lessThan(ptr)) {
|
||||
throw new InternalError("pointer moved backwards");
|
||||
}
|
||||
ptr = next;
|
||||
}
|
||||
visitor.epilogue();
|
||||
}
|
||||
@ -200,7 +213,23 @@ public class CodeCache {
|
||||
// Internals only below this point
|
||||
//
|
||||
|
||||
private CodeHeap getHeap() {
|
||||
return heap;
|
||||
private Address lowBound() {
|
||||
Address low = heapArray.at(0).begin();
|
||||
for (int i = 1; i < heapArray.length(); ++i) {
|
||||
if (heapArray.at(i).begin().lessThan(low)) {
|
||||
low = heapArray.at(i).begin();
|
||||
}
|
||||
}
|
||||
return low;
|
||||
}
|
||||
|
||||
private Address highBound() {
|
||||
Address high = heapArray.at(0).end();
|
||||
for (int i = 1; i < heapArray.length(); ++i) {
|
||||
if (heapArray.at(i).end().greaterThan(high)) {
|
||||
high = heapArray.at(i).end();
|
||||
}
|
||||
}
|
||||
return high;
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,40 @@
|
||||
package sun.jvm.hotspot.gc_implementation.g1;
|
||||
|
||||
import java.util.Observable;
|
||||
import java.util.Observer;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.VMObject;
|
||||
import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
public class G1Allocator extends VMObject {
|
||||
|
||||
//size_t _summary_bytes_used;
|
||||
static private CIntegerField summaryBytesUsedField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static private synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("G1Allocator");
|
||||
|
||||
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
|
||||
}
|
||||
|
||||
public long getSummaryBytes() {
|
||||
return summaryBytesUsedField.getValue(addr);
|
||||
}
|
||||
|
||||
public G1Allocator(Address addr) {
|
||||
super(addr);
|
||||
|
||||
}
|
||||
}
|
@ -36,7 +36,6 @@ import sun.jvm.hotspot.memory.SpaceClosure;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.VMObjectFactory;
|
||||
import sun.jvm.hotspot.types.AddressField;
|
||||
import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
@ -47,8 +46,8 @@ public class G1CollectedHeap extends SharedHeap {
|
||||
static private long hrmFieldOffset;
|
||||
// MemRegion _g1_reserved;
|
||||
static private long g1ReservedFieldOffset;
|
||||
// size_t _summary_bytes_used;
|
||||
static private CIntegerField summaryBytesUsedField;
|
||||
// G1Allocator* _allocator
|
||||
static private AddressField g1Allocator;
|
||||
// G1MonitoringSupport* _g1mm;
|
||||
static private AddressField g1mmField;
|
||||
// HeapRegionSet _old_set;
|
||||
@ -68,7 +67,7 @@ public class G1CollectedHeap extends SharedHeap {
|
||||
Type type = db.lookupType("G1CollectedHeap");
|
||||
|
||||
hrmFieldOffset = type.getField("_hrm").getOffset();
|
||||
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
|
||||
g1Allocator = type.getAddressField("_allocator");
|
||||
g1mmField = type.getAddressField("_g1mm");
|
||||
oldSetFieldOffset = type.getField("_old_set").getOffset();
|
||||
humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
|
||||
@ -79,7 +78,7 @@ public class G1CollectedHeap extends SharedHeap {
|
||||
}
|
||||
|
||||
public long used() {
|
||||
return summaryBytesUsedField.getValue(addr);
|
||||
return allocator().getSummaryBytes();
|
||||
}
|
||||
|
||||
public long n_regions() {
|
||||
@ -97,6 +96,11 @@ public class G1CollectedHeap extends SharedHeap {
|
||||
return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
|
||||
}
|
||||
|
||||
public G1Allocator allocator() {
|
||||
Address g1AllocatorAddr = g1Allocator.getValue(addr);
|
||||
return (G1Allocator) VMObjectFactory.newObject(G1Allocator.class, g1AllocatorAddr);
|
||||
}
|
||||
|
||||
public HeapRegionSetBase oldSet() {
|
||||
Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
|
||||
return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
|
||||
|
@ -234,10 +234,10 @@ JVM_OBJ_FILES = $(Obj_Files)
|
||||
|
||||
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
|
||||
|
||||
mapfile : $(MAPFILE) vm.def
|
||||
mapfile : $(MAPFILE) vm.def mapfile_ext
|
||||
rm -f $@
|
||||
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
|
||||
{ system ("cat vm.def"); } \
|
||||
{ system ("cat mapfile_ext"); system ("cat vm.def"); } \
|
||||
else \
|
||||
{ print $$0 } \
|
||||
}' > $@ < $(MAPFILE)
|
||||
@ -249,6 +249,13 @@ mapfile_reorder : mapfile $(REORDERFILE)
|
||||
vm.def: $(Res_Files) $(Obj_Files)
|
||||
sh $(GAMMADIR)/make/bsd/makefiles/build_vm_def.sh *.o > $@
|
||||
|
||||
mapfile_ext:
|
||||
rm -f $@
|
||||
touch $@
|
||||
if [ -f $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext ]; then \
|
||||
cat $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext > $@; \
|
||||
fi
|
||||
|
||||
STATIC_CXX = false
|
||||
|
||||
ifeq ($(LINK_INTO),AOUT)
|
||||
@ -265,6 +272,8 @@ else
|
||||
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/.
|
||||
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/..
|
||||
LFLAGS_VM += -Xlinker -install_name -Xlinker @rpath/$(@F)
|
||||
else
|
||||
LFLAGS_VM += -Wl,-z,defs
|
||||
endif
|
||||
|
||||
# JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
|
||||
|
@ -21,6 +21,9 @@
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
|
||||
include $(GAMMADIR)/make/altsrc.make
|
||||
|
||||
ifeq ($(INCLUDE_JVMTI), false)
|
||||
CXXFLAGS += -DINCLUDE_JVMTI=0
|
||||
CFLAGS += -DINCLUDE_JVMTI=0
|
||||
@ -78,12 +81,12 @@ ifeq ($(INCLUDE_ALL_GCS), false)
|
||||
CXXFLAGS += -DINCLUDE_ALL_GCS=0
|
||||
CFLAGS += -DINCLUDE_ALL_GCS=0
|
||||
|
||||
gc_impl := $(GAMMADIR)/src/share/vm/gc_implementation
|
||||
gc_exclude := \
|
||||
$(notdir $(wildcard $(gc_impl)/concurrentMarkSweep/*.cpp)) \
|
||||
$(notdir $(wildcard $(gc_impl)/g1/*.cpp)) \
|
||||
$(notdir $(wildcard $(gc_impl)/parallelScavenge/*.cpp)) \
|
||||
$(notdir $(wildcard $(gc_impl)/parNew/*.cpp))
|
||||
gc_impl := $(HS_COMMON_SRC)/share/vm/gc_implementation
|
||||
gc_impl_alt := $(HS_ALT_SRC)/share/vm/gc_implementation
|
||||
gc_subdirs := concurrentMarkSweep g1 parallelScavenge parNew
|
||||
gc_exclude := $(foreach gc,$(gc_subdirs), \
|
||||
$(notdir $(wildcard $(gc_impl)/$(gc)/*.cpp)) \
|
||||
$(notdir $(wildcard $(gc_impl_alt)/$(gc)/*.cpp)))
|
||||
Src_Files_EXCLUDE += $(gc_exclude)
|
||||
|
||||
# Exclude everything in $(gc_impl)/shared except the files listed
|
||||
|
@ -1,388 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
|
||||
# Properties for jprt
|
||||
|
||||
# All build result bundles are full jdks.
|
||||
jprt.need.sibling.build=false
|
||||
|
||||
# At submit time, the release supplied will be in jprt.submit.release
|
||||
# and will be one of the official release names defined in jprt.
|
||||
# jprt supports property value expansion using ${property.name} syntax.
|
||||
|
||||
# This tells jprt what default release we want to build
|
||||
|
||||
jprt.hotspot.default.release=jdk9
|
||||
|
||||
jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
|
||||
|
||||
# Disable syncing the source after builds and tests are done.
|
||||
|
||||
jprt.sync.push=false
|
||||
|
||||
# Note: we want both embedded releases and regular releases to build and test
|
||||
# all platforms so that regressions are not introduced (eg. change to
|
||||
# common code by SE breaks PPC/ARM; change to common code by SE-E breaks
|
||||
# sparc etc.
|
||||
|
||||
# Define the Solaris platforms we want for the various releases
|
||||
jprt.my.solaris.sparcv9.jdk9=solaris_sparcv9_5.11
|
||||
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.solaris.x64.jdk9=solaris_x64_5.11
|
||||
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.i586.jdk9=linux_i586_2.6
|
||||
jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.x64.jdk9=linux_x64_2.6
|
||||
jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.ppc.jdk9=linux_ppc_2.6
|
||||
jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.ppcv2.jdk9=linux_ppcv2_2.6
|
||||
jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.armvfpsflt.jdk9=linux_armvfpsflt_2.6
|
||||
jprt.my.linux.armvfpsflt=${jprt.my.linux.armvfpsflt.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.armvfphflt.jdk9=linux_armvfphflt_2.6
|
||||
jprt.my.linux.armvfphflt=${jprt.my.linux.armvfphflt.${jprt.tools.default.release}}
|
||||
|
||||
# The ARM GP vfp-sflt build is not currently supported
|
||||
#jprt.my.linux.armvs.jdk9=linux_armvs_2.6
|
||||
#jprt.my.linux.armvs=${jprt.my.linux.armvs.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.armvh.jdk9=linux_armvh_2.6
|
||||
jprt.my.linux.armvh=${jprt.my.linux.armvh.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.linux.armsflt.jdk9=linux_armsflt_2.6
|
||||
jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.macosx.x64.jdk9=macosx_x64_10.7
|
||||
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.windows.i586.jdk9=windows_i586_6.1
|
||||
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.windows.x64.jdk9=windows_x64_6.1
|
||||
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
|
||||
|
||||
# Standard list of jprt build targets for this source tree
|
||||
|
||||
jprt.build.targets.standard= \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}, \
|
||||
${jprt.my.linux.armvh}-{product|fastdebug}
|
||||
|
||||
jprt.build.targets.open= \
|
||||
${jprt.my.solaris.x64}-{debugOpen}, \
|
||||
${jprt.my.linux.x64}-{productOpen}
|
||||
|
||||
jprt.build.targets.embedded= \
|
||||
${jprt.my.linux.i586}-{productEmb|fastdebugEmb}, \
|
||||
${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \
|
||||
${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \
|
||||
${jprt.my.linux.armvfpsflt}-{productEmb|fastdebugEmb}, \
|
||||
${jprt.my.linux.armvfphflt}-{productEmb|fastdebugEmb}, \
|
||||
${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb}
|
||||
|
||||
jprt.build.targets.all=${jprt.build.targets.standard}, \
|
||||
${jprt.build.targets.embedded}, ${jprt.build.targets.open}
|
||||
|
||||
jprt.build.targets.jdk9=${jprt.build.targets.all}
|
||||
jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
|
||||
|
||||
# Subset lists of test targets for this source tree
|
||||
|
||||
jprt.my.solaris.sparcv9.test.targets= \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \
|
||||
${jprt.my.solaris.sparcv9}-product-c2-runThese8, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_CMS, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_G1, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_SerialGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParallelGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_CMS, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_CMS, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_G1, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParOldGC
|
||||
|
||||
jprt.my.solaris.x64.test.targets= \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-scimark, \
|
||||
${jprt.my.solaris.x64}-product-c2-runThese8, \
|
||||
${jprt.my.solaris.x64}-product-c2-runThese8_Xcomp_lang, \
|
||||
${jprt.my.solaris.x64}-product-c2-runThese8_Xcomp_vm, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_G1, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
|
||||
|
||||
jprt.my.linux.i586.test.targets = \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-scimark, \
|
||||
${jprt.my.linux.i586}-product-c1-runThese8_Xcomp_lang, \
|
||||
${jprt.my.linux.i586}-product-c1-runThese8_Xcomp_vm, \
|
||||
${jprt.my.linux.i586}-fastdebug-c1-runThese8_Xshare, \
|
||||
${jprt.my.linux.i586}-fastdebug-c2-runThese8_Xcomp_lang, \
|
||||
${jprt.my.linux.i586}-fastdebug-c2-runThese8_Xcomp_vm, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
|
||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_SerialGC, \
|
||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParallelGC, \
|
||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParNewGC, \
|
||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_CMS, \
|
||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \
|
||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_SerialGC, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_G1, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParOldGC
|
||||
|
||||
jprt.my.linux.x64.test.targets = \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_G1, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
|
||||
|
||||
jprt.my.macosx.x64.test.targets = \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
|
||||
|
||||
jprt.my.windows.i586.test.targets = \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-scimark, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-runThese8, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-runThese8_Xcomp_lang, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-runThese8_Xcomp_vm, \
|
||||
${jprt.my.windows.i586}-fastdebug-c1-runThese8_Xshare, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_SerialGC, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParallelGC, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParNewGC, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_CMS, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_G1, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParOldGC, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jbb_default, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParallelGC, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-jbb_CMS, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-jbb_G1, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParOldGC
|
||||
|
||||
jprt.my.windows.x64.test.targets = \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark, \
|
||||
${jprt.my.windows.x64}-product-c2-runThese8, \
|
||||
${jprt.my.windows.x64}-product-c2-runThese8_Xcomp_lang, \
|
||||
${jprt.my.windows.x64}-product-c2-runThese8_Xcomp_vm, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_G1, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
||||
${jprt.my.windows.x64}-product-c2-jbb_CMS, \
|
||||
${jprt.my.windows.x64}-product-c2-jbb_ParallelGC, \
|
||||
${jprt.my.windows.x64}-product-c2-jbb_G1, \
|
||||
${jprt.my.windows.x64}-product-c2-jbb_ParOldGC
|
||||
|
||||
# Some basic "smoke" tests for OpenJDK builds
|
||||
jprt.test.targets.open = \
|
||||
${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \
|
||||
${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98
|
||||
|
||||
# Testing for actual embedded builds is different to standard
|
||||
jprt.my.linux.i586.test.targets.embedded = \
|
||||
linux_i586_2.6-product-c1-scimark
|
||||
|
||||
# The complete list of test targets for jprt
|
||||
# Note: no PPC or ARM tests at this stage
|
||||
|
||||
jprt.test.targets.standard = \
|
||||
${jprt.my.linux.i586.test.targets.embedded}, \
|
||||
${jprt.my.solaris.sparcv9.test.targets}, \
|
||||
${jprt.my.solaris.x64.test.targets}, \
|
||||
${jprt.my.linux.i586.test.targets}, \
|
||||
${jprt.my.linux.x64.test.targets}, \
|
||||
${jprt.my.macosx.x64.test.targets}, \
|
||||
${jprt.my.windows.i586.test.targets}, \
|
||||
${jprt.my.windows.x64.test.targets}, \
|
||||
${jprt.test.targets.open}
|
||||
|
||||
jprt.test.targets.embedded= \
|
||||
${jprt.my.linux.i586.test.targets.embedded}, \
|
||||
${jprt.my.solaris.sparcv9.test.targets}, \
|
||||
${jprt.my.solaris.x64.test.targets}, \
|
||||
${jprt.my.linux.x64.test.targets}, \
|
||||
${jprt.my.windows.i586.test.targets}, \
|
||||
${jprt.my.windows.x64.test.targets}
|
||||
|
||||
jprt.test.targets.jdk9=${jprt.test.targets.standard}
|
||||
jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
|
||||
|
||||
# The default test/Makefile targets that should be run
|
||||
|
||||
#jprt.make.rule.test.targets=*-product-*-packtest
|
||||
|
||||
jprt.make.rule.test.targets.standard.client = \
|
||||
${jprt.my.linux.i586}-*-c1-clienttest, \
|
||||
${jprt.my.windows.i586}-*-c1-clienttest
|
||||
|
||||
jprt.make.rule.test.targets.standard.server = \
|
||||
${jprt.my.solaris.sparcv9}-*-c2-servertest, \
|
||||
${jprt.my.solaris.x64}-*-c2-servertest, \
|
||||
${jprt.my.linux.i586}-*-c2-servertest, \
|
||||
${jprt.my.linux.x64}-*-c2-servertest, \
|
||||
${jprt.my.macosx.x64}-*-c2-servertest, \
|
||||
${jprt.my.windows.i586}-*-c2-servertest, \
|
||||
${jprt.my.windows.x64}-*-c2-servertest
|
||||
|
||||
jprt.make.rule.test.targets.standard.internalvmtests = \
|
||||
${jprt.my.solaris.sparcv9}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.macosx.x64}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
|
||||
|
||||
jprt.make.rule.test.targets.standard.reg.group = \
|
||||
${jprt.my.solaris.sparcv9}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.solaris.x64}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.linux.i586}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.linux.x64}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.macosx.x64}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.windows.i586}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.windows.x64}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.linux.i586}-fastdebug-c1-GROUP, \
|
||||
${jprt.my.windows.i586}-fastdebug-c1-GROUP
|
||||
|
||||
jprt.make.rule.test.targets.standard = \
|
||||
${jprt.make.rule.test.targets.standard.client}, \
|
||||
${jprt.make.rule.test.targets.standard.server}, \
|
||||
${jprt.make.rule.test.targets.standard.internalvmtests}, \
|
||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_wbapitest}, \
|
||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_compiler}, \
|
||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_gc}, \
|
||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime}, \
|
||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime_closed}, \
|
||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_serviceability}
|
||||
|
||||
jprt.make.rule.test.targets.embedded = \
|
||||
${jprt.make.rule.test.targets.standard.client}
|
||||
|
||||
jprt.make.rule.test.targets.jdk9=${jprt.make.rule.test.targets.standard}
|
||||
jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
|
||||
|
||||
# 7155453: Work-around to prevent popups on OSX from blocking test completion
|
||||
# but the work-around is added to all platforms to be consistent
|
||||
jprt.jbb.options=-Djava.awt.headless=true
|
@ -227,10 +227,10 @@ JVM_OBJ_FILES = $(Obj_Files)
|
||||
|
||||
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
|
||||
|
||||
mapfile : $(MAPFILE) vm.def
|
||||
mapfile : $(MAPFILE) vm.def mapfile_ext
|
||||
rm -f $@
|
||||
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
|
||||
{ system ("cat vm.def"); } \
|
||||
{ system ("cat mapfile_ext"); system ("cat vm.def"); } \
|
||||
else \
|
||||
{ print $$0 } \
|
||||
}' > $@ < $(MAPFILE)
|
||||
@ -242,6 +242,13 @@ mapfile_reorder : mapfile $(REORDERFILE)
|
||||
vm.def: $(Res_Files) $(Obj_Files)
|
||||
sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
|
||||
|
||||
mapfile_ext:
|
||||
rm -f $@
|
||||
touch $@
|
||||
if [ -f $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext ]; then \
|
||||
cat $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext > $@; \
|
||||
fi
|
||||
|
||||
ifeq ($(JVM_VARIANT_ZEROSHARK), true)
|
||||
STATIC_CXX = false
|
||||
else
|
||||
@ -261,6 +268,7 @@ else
|
||||
LIBJVM_MAPFILE$(LDNOMAP) = mapfile_reorder
|
||||
LFLAGS_VM$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
|
||||
LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM))
|
||||
LFLAGS_VM += -Wl,-z,defs
|
||||
|
||||
# JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
|
||||
# get around library dependency and compatibility issues. Must use gcc not
|
||||
|
@ -258,6 +258,8 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
|
||||
echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
|
||||
[ -n "$(ZIPEXE)" ] && \
|
||||
echo && echo "ZIPEXE = $(ZIPEXE)"; \
|
||||
[ -n "$(HS_ALT_MAKE)" ] && \
|
||||
echo && echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
|
||||
[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
|
||||
echo && \
|
||||
echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
|
||||
|
@ -29,7 +29,7 @@
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
# Dtrace support
|
||||
__1cJCodeCacheF_heap_;
|
||||
__1cJCodeCacheG_heaps_;
|
||||
__1cIUniverseO_collectedHeap_;
|
||||
__1cGMethodG__vtbl_;
|
||||
__1cHnmethodG__vtbl_;
|
||||
|
@ -29,7 +29,7 @@
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
# Dtrace support
|
||||
__1cJCodeCacheF_heap_;
|
||||
__1cJCodeCacheG_heaps_;
|
||||
__1cIUniverseO_collectedHeap_;
|
||||
__1cGMethodG__vtbl_;
|
||||
__1cHnmethodG__vtbl_;
|
||||
|
@ -29,7 +29,7 @@
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
# Dtrace support
|
||||
__1cJCodeCacheF_heap_;
|
||||
__1cJCodeCacheG_heaps_;
|
||||
__1cIUniverseO_collectedHeap_;
|
||||
__1cGMethodG__vtbl_;
|
||||
__1cHnmethodG__vtbl_;
|
||||
|
@ -130,7 +130,7 @@ ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 505), 1)
|
||||
# Not sure what the 'designed for' comment is referring too above.
|
||||
# The order may not be too significant anymore, but I have placed this
|
||||
# older libm before libCrun, just to make sure it's found and used first.
|
||||
LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc -ldemangle
|
||||
LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc -ldemangle -lnsl
|
||||
else
|
||||
ifeq ($(COMPILER_REV_NUMERIC), 502)
|
||||
# SC6.1 has it's own libm.so: specifying anything else provokes a name conflict.
|
||||
@ -249,11 +249,12 @@ JVM_OBJ_FILES = $(Obj_Files) $(DTRACE_OBJS)
|
||||
|
||||
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
|
||||
|
||||
mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def
|
||||
mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def mapfile_ext
|
||||
rm -f $@
|
||||
cat $(MAPFILE) $(MAPFILE_DTRACE_OPT) \
|
||||
| $(NAWK) '{ \
|
||||
if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") { \
|
||||
system ("cat mapfile_ext"); \
|
||||
system ("cat vm.def"); \
|
||||
} else { \
|
||||
print $$0; \
|
||||
@ -267,6 +268,13 @@ mapfile_extended : mapfile $(MAPFILE_DTRACE_OPT)
|
||||
vm.def: $(Obj_Files)
|
||||
sh $(GAMMADIR)/make/solaris/makefiles/build_vm_def.sh *.o > $@
|
||||
|
||||
mapfile_ext:
|
||||
rm -f $@
|
||||
touch $@
|
||||
if [ -f $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext ]; then \
|
||||
cat $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext > $@; \
|
||||
fi
|
||||
|
||||
ifeq ($(LINK_INTO),AOUT)
|
||||
LIBJVM.o =
|
||||
LIBJVM_MAPFILE =
|
||||
@ -276,6 +284,7 @@ else
|
||||
LIBJVM_MAPFILE$(LDNOMAP) = mapfile_extended
|
||||
LFLAGS_VM$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
|
||||
LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM))
|
||||
LFLAGS_VM += -Wl,-z,defs
|
||||
ifndef USE_GCC
|
||||
LIBS_VM = $(LIBS)
|
||||
else
|
||||
|
@ -79,6 +79,9 @@ define_pd_global(bool, OptoScheduling, false);
|
||||
|
||||
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 256*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 125*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 126*M);
|
||||
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
|
@ -308,3 +308,10 @@ intptr_t *frame::initial_deoptimization_info() {
|
||||
// unused... but returns fp() to minimize changes introduced by 7087445
|
||||
return fp();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// This is a generic constructor which is only used by pns() in debug.cpp.
|
||||
frame::frame(void* sp, void* fp, void* pc) : _sp((intptr_t*)sp), _unextended_sp((intptr_t*)sp) {
|
||||
find_codeblob_and_set_pc_and_deopt_state((address)pc); // also sets _fp and adjusts _unextended_sp
|
||||
}
|
||||
#endif
|
||||
|
@ -47,6 +47,9 @@ define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(intx, FreqInlineSize, 325 );
|
||||
define_pd_global(bool, ResizeTLAB, true );
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
@ -74,6 +74,9 @@ define_pd_global(bool, OptoScheduling, true);
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
@ -82,6 +85,9 @@ define_pd_global(uint64_t,MaxRAM, 128ULL*G);
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M);
|
||||
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t,MaxRAM, 4ULL*G);
|
||||
|
@ -343,7 +343,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
||||
// constructors
|
||||
|
||||
// Construct an unpatchable, deficient frame
|
||||
frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
|
||||
void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
|
||||
#ifdef _LP64
|
||||
assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
|
||||
#endif
|
||||
@ -365,6 +365,10 @@ frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
|
||||
#endif // ASSERT
|
||||
}
|
||||
|
||||
frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
|
||||
init(sp, pc, cb);
|
||||
}
|
||||
|
||||
frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
|
||||
_sp(sp),
|
||||
_younger_sp(younger_sp),
|
||||
@ -419,6 +423,13 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpret
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// This is a generic constructor which is only used by pns() in debug.cpp.
|
||||
frame::frame(void* sp, void* fp, void* pc) {
|
||||
init((intptr_t*)sp, (address)pc, NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool frame::is_interpreted_frame() const {
|
||||
return Interpreter::contains(pc());
|
||||
}
|
||||
|
@ -163,6 +163,8 @@
|
||||
enum unpatchable_t { unpatchable };
|
||||
frame(intptr_t* sp, unpatchable_t, address pc = NULL, CodeBlob* cb = NULL);
|
||||
|
||||
void init(intptr_t* sp, address pc, CodeBlob* cb);
|
||||
|
||||
// Walk from sp outward looking for old_sp, and return old_sp's predecessor
|
||||
// (i.e. return the sp from the frame where old_sp is the fp).
|
||||
// Register windows are assumed to be flushed for the stack in question.
|
||||
|
@ -1128,51 +1128,82 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
||||
// Hoist any int/ptr/long's in the first 6 to int regs.
|
||||
// Hoist any flt/dbl's in the first 16 dbl regs.
|
||||
int j = 0; // Count of actual args, not HALVES
|
||||
for( int i=0; i<total_args_passed; i++, j++ ) {
|
||||
switch( sig_bt[i] ) {
|
||||
VMRegPair param_array_reg; // location of the argument in the parameter array
|
||||
for (int i = 0; i < total_args_passed; i++, j++) {
|
||||
param_array_reg.set_bad();
|
||||
switch (sig_bt[i]) {
|
||||
case T_BOOLEAN:
|
||||
case T_BYTE:
|
||||
case T_CHAR:
|
||||
case T_INT:
|
||||
case T_SHORT:
|
||||
regs[i].set1( int_stk_helper( j ) ); break;
|
||||
regs[i].set1(int_stk_helper(j));
|
||||
break;
|
||||
case T_LONG:
|
||||
assert( sig_bt[i+1] == T_VOID, "expecting half" );
|
||||
assert(sig_bt[i+1] == T_VOID, "expecting half");
|
||||
case T_ADDRESS: // raw pointers, like current thread, for VM calls
|
||||
case T_ARRAY:
|
||||
case T_OBJECT:
|
||||
case T_METADATA:
|
||||
regs[i].set2( int_stk_helper( j ) );
|
||||
regs[i].set2(int_stk_helper(j));
|
||||
break;
|
||||
case T_FLOAT:
|
||||
if ( j < 16 ) {
|
||||
// V9ism: floats go in ODD registers
|
||||
regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
|
||||
} else {
|
||||
// V9ism: floats go in ODD stack slot
|
||||
regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
|
||||
// Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
|
||||
// http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
|
||||
//
|
||||
// "When a callee prototype exists, and does not indicate variable arguments,
|
||||
// floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
|
||||
// will be promoted to floating-point registers"
|
||||
//
|
||||
// By "promoted" it means that the argument is located in two places, an unused
|
||||
// spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
|
||||
// float register. In most cases, there are 6 or fewer arguments of any type,
|
||||
// and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
|
||||
// serve as shadow slots. Per the spec floating point registers %d6 to %d16
|
||||
// require slots beyond that (up to %sp+BIAS+248).
|
||||
//
|
||||
{
|
||||
// V9ism: floats go in ODD registers and stack slots
|
||||
int float_index = 1 + (j << 1);
|
||||
param_array_reg.set1(VMRegImpl::stack2reg(float_index));
|
||||
if (j < 16) {
|
||||
regs[i].set1(as_FloatRegister(float_index)->as_VMReg());
|
||||
} else {
|
||||
regs[i] = param_array_reg;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
assert( sig_bt[i+1] == T_VOID, "expecting half" );
|
||||
if ( j < 16 ) {
|
||||
// V9ism: doubles go in EVEN/ODD regs
|
||||
regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
|
||||
} else {
|
||||
// V9ism: doubles go in EVEN/ODD stack slots
|
||||
regs[i].set2(VMRegImpl::stack2reg(j<<1));
|
||||
{
|
||||
assert(sig_bt[i + 1] == T_VOID, "expecting half");
|
||||
// V9ism: doubles go in EVEN/ODD regs and stack slots
|
||||
int double_index = (j << 1);
|
||||
param_array_reg.set2(VMRegImpl::stack2reg(double_index));
|
||||
if (j < 16) {
|
||||
regs[i].set2(as_FloatRegister(double_index)->as_VMReg());
|
||||
} else {
|
||||
// V9ism: doubles go in EVEN/ODD stack slots
|
||||
regs[i] = param_array_reg;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES
|
||||
case T_VOID:
|
||||
regs[i].set_bad();
|
||||
j--;
|
||||
break; // Do not count HALVES
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
if (regs[i].first()->is_stack()) {
|
||||
int off = regs[i].first()->reg2stack();
|
||||
// Keep track of the deepest parameter array slot.
|
||||
if (!param_array_reg.first()->is_valid()) {
|
||||
param_array_reg = regs[i];
|
||||
}
|
||||
if (param_array_reg.first()->is_stack()) {
|
||||
int off = param_array_reg.first()->reg2stack();
|
||||
if (off > max_stack_slots) max_stack_slots = off;
|
||||
}
|
||||
if (regs[i].second()->is_stack()) {
|
||||
int off = regs[i].second()->reg2stack();
|
||||
if (param_array_reg.second()->is_stack()) {
|
||||
int off = param_array_reg.second()->reg2stack();
|
||||
if (off > max_stack_slots) max_stack_slots = off;
|
||||
}
|
||||
}
|
||||
@ -1180,8 +1211,8 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
||||
#else // _LP64
|
||||
// V8 convention: first 6 things in O-regs, rest on stack.
|
||||
// Alignment is willy-nilly.
|
||||
for( int i=0; i<total_args_passed; i++ ) {
|
||||
switch( sig_bt[i] ) {
|
||||
for (int i = 0; i < total_args_passed; i++) {
|
||||
switch (sig_bt[i]) {
|
||||
case T_ADDRESS: // raw pointers, like current thread, for VM calls
|
||||
case T_ARRAY:
|
||||
case T_BOOLEAN:
|
||||
@ -1192,23 +1223,23 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
||||
case T_OBJECT:
|
||||
case T_METADATA:
|
||||
case T_SHORT:
|
||||
regs[i].set1( int_stk_helper( i ) );
|
||||
regs[i].set1(int_stk_helper(i));
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
case T_LONG:
|
||||
assert( sig_bt[i+1] == T_VOID, "expecting half" );
|
||||
regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
|
||||
assert(sig_bt[i + 1] == T_VOID, "expecting half");
|
||||
regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
|
||||
break;
|
||||
case T_VOID: regs[i].set_bad(); break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
if (regs[i].first()->is_stack()) {
|
||||
int off = regs[i].first()->reg2stack();
|
||||
int off = regs[i].first()->reg2stack();
|
||||
if (off > max_stack_slots) max_stack_slots = off;
|
||||
}
|
||||
if (regs[i].second()->is_stack()) {
|
||||
int off = regs[i].second()->reg2stack();
|
||||
int off = regs[i].second()->reg2stack();
|
||||
if (off > max_stack_slots) max_stack_slots = off;
|
||||
}
|
||||
}
|
||||
@ -1357,11 +1388,10 @@ static void object_move(MacroAssembler* masm,
|
||||
const Register rOop = src.first()->as_Register();
|
||||
const Register rHandle = L5;
|
||||
int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
|
||||
int offset = oop_slot*VMRegImpl::stack_slot_size;
|
||||
Label skip;
|
||||
int offset = oop_slot * VMRegImpl::stack_slot_size;
|
||||
__ st_ptr(rOop, SP, offset + STACK_BIAS);
|
||||
if (is_receiver) {
|
||||
*receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
|
||||
*receiver_offset = offset;
|
||||
}
|
||||
map->set_oop(VMRegImpl::stack2reg(oop_slot));
|
||||
__ add(SP, offset + STACK_BIAS, rHandle);
|
||||
|
@ -1989,7 +1989,7 @@ void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
|
||||
// to implement the UseStrictFP mode.
|
||||
const bool Matcher::strict_fp_requires_explicit_rounding = false;
|
||||
|
||||
// Are floats conerted to double when stored to stack during deoptimization?
|
||||
// Are floats converted to double when stored to stack during deoptimization?
|
||||
// Sparc does not handle callee-save floats.
|
||||
bool Matcher::float_in_double() { return false; }
|
||||
|
||||
@ -3218,7 +3218,7 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI r
|
||||
// are owned by the CALLEE. Holes should not be nessecary in the
|
||||
// incoming area, as the Java calling convention is completely under
|
||||
// the control of the AD file. Doubles can be sorted and packed to
|
||||
// avoid holes. Holes in the outgoing arguments may be nessecary for
|
||||
// avoid holes. Holes in the outgoing arguments may be necessary for
|
||||
// varargs C calling conventions.
|
||||
// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
|
||||
// even aligned with pad0 as needed.
|
||||
@ -3284,7 +3284,7 @@ frame %{
|
||||
%}
|
||||
|
||||
// Body of function which returns an OptoRegs array locating
|
||||
// arguments either in registers or in stack slots for callin
|
||||
// arguments either in registers or in stack slots for calling
|
||||
// C.
|
||||
c_calling_convention %{
|
||||
// This is obviously always outgoing
|
||||
|
@ -47,6 +47,9 @@ define_pd_global(intx, FreqInlineSize, 325 );
|
||||
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
|
||||
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
|
@ -84,6 +84,9 @@ define_pd_global(bool, OptoScheduling, false);
|
||||
define_pd_global(bool, OptoBundling, false);
|
||||
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
|
@ -715,3 +715,10 @@ intptr_t* frame::real_fp() const {
|
||||
assert(! is_compiled_frame(), "unknown compiled frame size");
|
||||
return fp();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// This is a generic constructor which is only used by pns() in debug.cpp.
|
||||
frame::frame(void* sp, void* fp, void* pc) {
|
||||
init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
|
||||
}
|
||||
#endif
|
||||
|
@ -187,6 +187,8 @@
|
||||
|
||||
frame(intptr_t* sp, intptr_t* fp);
|
||||
|
||||
void init(intptr_t* sp, intptr_t* fp, address pc);
|
||||
|
||||
// accessors for the instance variables
|
||||
// Note: not necessarily the real 'frame pointer' (see real_fp)
|
||||
intptr_t* fp() const { return _fp; }
|
||||
|
@ -41,7 +41,7 @@ inline frame::frame() {
|
||||
_deopt_state = unknown;
|
||||
}
|
||||
|
||||
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
|
||||
inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
|
||||
_sp = sp;
|
||||
_unextended_sp = sp;
|
||||
_fp = fp;
|
||||
@ -59,6 +59,10 @@ inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
|
||||
}
|
||||
}
|
||||
|
||||
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
|
||||
init(sp, fp, pc);
|
||||
}
|
||||
|
||||
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
|
||||
_sp = sp;
|
||||
_unextended_sp = unextended_sp;
|
||||
|
@ -438,3 +438,10 @@ intptr_t *frame::initial_deoptimization_info() {
|
||||
// unused... but returns fp() to minimize changes introduced by 7087445
|
||||
return fp();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// This is a generic constructor which is only used by pns() in debug.cpp.
|
||||
frame::frame(void* sp, void* fp, void* pc) {
|
||||
Unimplemented();
|
||||
}
|
||||
#endif
|
||||
|
@ -53,6 +53,9 @@ define_pd_global(uintx, NewRatio, 12 );
|
||||
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
|
||||
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
|
||||
|
@ -67,7 +67,7 @@
|
||||
* we link this program with -z nodefs .
|
||||
*
|
||||
* But for 'debug1' and 'fastdebug1' we still have to provide
|
||||
* a particular workaround for the following symbols bellow.
|
||||
* a particular workaround for the following symbols below.
|
||||
* It will be good to find out a generic way in the future.
|
||||
*/
|
||||
|
||||
@ -87,21 +87,24 @@ StubQueue* AbstractInterpreter::_code = NULL;
|
||||
#endif /* ASSERT */
|
||||
#endif /* COMPILER1 */
|
||||
|
||||
#define GEN_OFFS(Type,Name) \
|
||||
#define GEN_OFFS_NAME(Type,Name,OutputType) \
|
||||
switch(gen_variant) { \
|
||||
case GEN_OFFSET: \
|
||||
printf("#define OFFSET_%-33s %ld\n", \
|
||||
#Type #Name, offset_of(Type, Name)); \
|
||||
printf("#define OFFSET_%-33s %ld\n", \
|
||||
#OutputType #Name, offset_of(Type, Name)); \
|
||||
break; \
|
||||
case GEN_INDEX: \
|
||||
printf("#define IDX_OFFSET_%-33s %d\n", \
|
||||
#Type #Name, index++); \
|
||||
#OutputType #Name, index++); \
|
||||
break; \
|
||||
case GEN_TABLE: \
|
||||
printf("\tOFFSET_%s,\n", #Type #Name); \
|
||||
printf("\tOFFSET_%s,\n", #OutputType #Name); \
|
||||
break; \
|
||||
}
|
||||
|
||||
#define GEN_OFFS(Type,Name) \
|
||||
GEN_OFFS_NAME(Type,Name,Type)
|
||||
|
||||
#define GEN_SIZE(Type) \
|
||||
switch(gen_variant) { \
|
||||
case GEN_OFFSET: \
|
||||
@ -246,6 +249,11 @@ int generateJvmOffsets(GEN_variant gen_variant) {
|
||||
GEN_OFFS(VirtualSpace, _high);
|
||||
printf("\n");
|
||||
|
||||
/* We need to use different names here because of the template parameter */
|
||||
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _data, GrowableArray_CodeHeap);
|
||||
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _len, GrowableArray_CodeHeap);
|
||||
printf("\n");
|
||||
|
||||
GEN_OFFS(CodeBlob, _name);
|
||||
GEN_OFFS(CodeBlob, _header_size);
|
||||
GEN_OFFS(CodeBlob, _content_offset);
|
||||
|
@ -43,7 +43,9 @@
|
||||
|
||||
extern pointer __JvmOffsets;
|
||||
|
||||
extern pointer __1cJCodeCacheF_heap_;
|
||||
/* GrowableArray<CodeHeaps*>* */
|
||||
extern pointer __1cJCodeCacheG_heaps_;
|
||||
|
||||
extern pointer __1cIUniverseO_collectedHeap_;
|
||||
|
||||
extern pointer __1cHnmethodG__vtbl_;
|
||||
@ -95,8 +97,8 @@ dtrace:helper:ustack:
|
||||
/!init_done && !this->done/
|
||||
{
|
||||
MARK_LINE;
|
||||
init_done = 1;
|
||||
|
||||
copyin_offset(POINTER_SIZE);
|
||||
copyin_offset(COMPILER);
|
||||
copyin_offset(OFFSET_CollectedHeap_reserved);
|
||||
copyin_offset(OFFSET_MemRegion_start);
|
||||
@ -122,6 +124,9 @@ dtrace:helper:ustack:
|
||||
copyin_offset(OFFSET_CodeHeap_segmap);
|
||||
copyin_offset(OFFSET_CodeHeap_log2_segment_size);
|
||||
|
||||
copyin_offset(OFFSET_GrowableArray_CodeHeap_data);
|
||||
copyin_offset(OFFSET_GrowableArray_CodeHeap_len);
|
||||
|
||||
copyin_offset(OFFSET_VirtualSpace_low);
|
||||
copyin_offset(OFFSET_VirtualSpace_high);
|
||||
|
||||
@ -152,26 +157,14 @@ dtrace:helper:ustack:
|
||||
#error "Don't know architecture"
|
||||
#endif
|
||||
|
||||
this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
|
||||
|
||||
/* Reading volatile values */
|
||||
this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
|
||||
this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
|
||||
this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address +
|
||||
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
|
||||
|
||||
this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address +
|
||||
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high);
|
||||
|
||||
this->CodeHeap_log2_segment_size = copyin_uint32(
|
||||
this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size);
|
||||
|
||||
this->Method_vtbl = (pointer) &``__1cNMethodG__vtbl_;
|
||||
/* Read address of GrowableArray<CodeHeaps*> */
|
||||
this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_);
|
||||
/* Read address of _data array field in GrowableArray */
|
||||
this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data);
|
||||
this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
|
||||
|
||||
this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
|
||||
|
||||
/*
|
||||
* Get Java heap bounds
|
||||
*/
|
||||
@ -187,21 +180,152 @@ dtrace:helper:ustack:
|
||||
this->heap_end = this->heap_start + this->heap_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in
|
||||
* the code cache. If more code heaps are added the following probes have to
|
||||
* be extended. This is done by simply adding a probe to get the heap bounds
|
||||
* and another probe to set the code heap address of the newly created heap.
|
||||
*/
|
||||
|
||||
/*
|
||||
* ----- BEGIN: Get bounds of code heaps -----
|
||||
*/
|
||||
dtrace:helper:ustack:
|
||||
/!this->done &&
|
||||
this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/
|
||||
/init_done < 1 && this->number_of_heaps >= 1 && !this->done/
|
||||
{
|
||||
MARK_LINE;
|
||||
/* CodeHeap 1 */
|
||||
init_done = 1;
|
||||
this->code_heap1_address = copyin_ptr(this->code_heaps_array_address);
|
||||
this->code_heap1_low = copyin_ptr(this->code_heap1_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap1_high = copyin_ptr(this->code_heap1_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/init_done < 2 && this->number_of_heaps >= 2 && !this->done/
|
||||
{
|
||||
MARK_LINE;
|
||||
/* CodeHeap 2 */
|
||||
init_done = 2;
|
||||
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||
this->code_heap2_address = copyin_ptr(this->code_heaps_array_address);
|
||||
this->code_heap2_low = copyin_ptr(this->code_heap2_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap2_high = copyin_ptr(this->code_heap2_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/init_done < 3 && this->number_of_heaps >= 3 && !this->done/
|
||||
{
|
||||
/* CodeHeap 3 */
|
||||
init_done = 3;
|
||||
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||
this->code_heap3_address = copyin_ptr(this->code_heaps_array_address);
|
||||
this->code_heap3_low = copyin_ptr(this->code_heap3_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap3_high = copyin_ptr(this->code_heap3_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/init_done < 4 && this->number_of_heaps >= 4 && !this->done/
|
||||
{
|
||||
/* CodeHeap 4 */
|
||||
init_done = 4;
|
||||
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||
this->code_heap4_address = copyin_ptr(this->code_heaps_array_address);
|
||||
this->code_heap4_low = copyin_ptr(this->code_heap4_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap4_high = copyin_ptr(this->code_heap4_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/init_done < 5 && this->number_of_heaps >= 5 && !this->done/
|
||||
{
|
||||
/* CodeHeap 5 */
|
||||
init_done = 5;
|
||||
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||
this->code_heap5_address = copyin_ptr(this->code_heaps_array_address);
|
||||
this->code_heap5_low = copyin_ptr(this->code_heap5_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap5_high = copyin_ptr(this->code_heap5_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
}
|
||||
/*
|
||||
* ----- END: Get bounds of code heaps -----
|
||||
*/
|
||||
|
||||
/*
|
||||
* ----- BEGIN: Get address of the code heap pc points to -----
|
||||
*/
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->codecache = 1;
|
||||
this->code_heap_address = this->code_heap1_address;
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->codecache = 1;
|
||||
this->code_heap_address = this->code_heap2_address;
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->codecache = 1;
|
||||
this->code_heap_address = this->code_heap3_address;
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->codecache = 1;
|
||||
this->code_heap_address = this->code_heap4_address;
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->codecache = 1;
|
||||
this->code_heap_address = this->code_heap5_address;
|
||||
}
|
||||
/*
|
||||
* ----- END: Get address of the code heap pc points to -----
|
||||
*/
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->codecache/
|
||||
{
|
||||
MARK_LINE;
|
||||
/*
|
||||
* Get code heap configuration
|
||||
*/
|
||||
this->code_heap_low = copyin_ptr(this->code_heap_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap_segmap_low = copyin_ptr(this->code_heap_address +
|
||||
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
|
||||
this->code_heap_log2_segment_size = copyin_uint32(
|
||||
this->code_heap_address + OFFSET_CodeHeap_log2_segment_size);
|
||||
|
||||
/*
|
||||
* Find start.
|
||||
* Find start
|
||||
*/
|
||||
this->segment = (this->pc - this->CodeCache_low) >>
|
||||
this->CodeHeap_log2_segment_size;
|
||||
this->block = this->CodeCache_segmap_low;
|
||||
this->segment = (this->pc - this->code_heap_low) >>
|
||||
this->code_heap_log2_segment_size;
|
||||
this->block = this->code_heap_segmap_low;
|
||||
this->tag = copyin_uchar(this->block + this->segment);
|
||||
"second";
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
@ -256,8 +380,8 @@ dtrace:helper:ustack:
|
||||
/!this->done && this->codecache/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->block = this->CodeCache_low +
|
||||
(this->segment << this->CodeHeap_log2_segment_size);
|
||||
this->block = this->code_heap_low +
|
||||
(this->segment << this->code_heap_log2_segment_size);
|
||||
this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
|
||||
}
|
||||
|
||||
|
@ -150,16 +150,18 @@ struct jvm_agent {
|
||||
uint64_t Use_Compressed_Oops_address;
|
||||
uint64_t Universe_narrow_oop_base_address;
|
||||
uint64_t Universe_narrow_oop_shift_address;
|
||||
uint64_t CodeCache_heap_address;
|
||||
uint64_t CodeCache_heaps_address;
|
||||
|
||||
/* Volatiles */
|
||||
uint8_t Use_Compressed_Oops;
|
||||
uint64_t Universe_narrow_oop_base;
|
||||
uint32_t Universe_narrow_oop_shift;
|
||||
uint64_t CodeCache_low;
|
||||
uint64_t CodeCache_high;
|
||||
uint64_t CodeCache_segmap_low;
|
||||
uint64_t CodeCache_segmap_high;
|
||||
// Code cache heaps
|
||||
int32_t Number_of_heaps;
|
||||
uint64_t* Heap_low;
|
||||
uint64_t* Heap_high;
|
||||
uint64_t* Heap_segmap_low;
|
||||
uint64_t* Heap_segmap_high;
|
||||
|
||||
int32_t SIZE_CodeCache_log2_segment;
|
||||
|
||||
@ -278,8 +280,9 @@ static int parse_vmstructs(jvm_agent_t* J) {
|
||||
}
|
||||
|
||||
if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
|
||||
if (strcmp("_heap", vmp->fieldName) == 0) {
|
||||
err = read_pointer(J, vmp->address, &J->CodeCache_heap_address);
|
||||
/* Read _heaps field of type GrowableArray<CodeHeaps*>* */
|
||||
if (strcmp("_heaps", vmp->fieldName) == 0) {
|
||||
err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address);
|
||||
}
|
||||
} else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
|
||||
if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
|
||||
@ -318,7 +321,9 @@ static int find_symbol(jvm_agent_t* J, const char *name, uint64_t* valuep) {
|
||||
}
|
||||
|
||||
static int read_volatiles(jvm_agent_t* J) {
|
||||
uint64_t ptr;
|
||||
int i;
|
||||
uint64_t array_data;
|
||||
uint64_t code_heap_address;
|
||||
int err;
|
||||
|
||||
err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
|
||||
@ -334,20 +339,43 @@ static int read_volatiles(jvm_agent_t* J) {
|
||||
err = ps_pread(J->P, J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
|
||||
CHECK_FAIL(err);
|
||||
|
||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
|
||||
OFFSET_VirtualSpace_low, &J->CodeCache_low);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
|
||||
OFFSET_VirtualSpace_high, &J->CodeCache_high);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
|
||||
OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
|
||||
OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high);
|
||||
CHECK_FAIL(err);
|
||||
/* CodeCache_heaps_address points to GrowableArray<CodeHeaps*>, read _data field
|
||||
pointing to the first entry of type CodeCache* in the array */
|
||||
err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data);
|
||||
/* Read _len field containing the number of code heaps */
|
||||
err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len,
|
||||
&J->Number_of_heaps, sizeof(J->Number_of_heaps));
|
||||
|
||||
err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size,
|
||||
/* Allocate memory for heap configurations */
|
||||
J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||
J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||
J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||
J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||
|
||||
/* Read code heap configurations */
|
||||
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||
/* Read address of heap */
|
||||
err = read_pointer(J, array_data, &code_heap_address);
|
||||
CHECK_FAIL(err);
|
||||
|
||||
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
|
||||
OFFSET_VirtualSpace_low, &J->Heap_low[i]);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
|
||||
OFFSET_VirtualSpace_high, &J->Heap_high[i]);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
|
||||
OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
|
||||
OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]);
|
||||
CHECK_FAIL(err);
|
||||
|
||||
/* Increment pointer to next entry */
|
||||
array_data = array_data + POINTER_SIZE;
|
||||
}
|
||||
|
||||
err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size,
|
||||
&J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
|
||||
CHECK_FAIL(err);
|
||||
|
||||
@ -357,46 +385,57 @@ static int read_volatiles(jvm_agent_t* J) {
|
||||
return err;
|
||||
}
|
||||
|
||||
static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) {
|
||||
return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]);
|
||||
}
|
||||
|
||||
static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
|
||||
/* make sure the code cache is up to date */
|
||||
return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high);
|
||||
int i;
|
||||
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||
if (codeheap_contains(i, J, ptr)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t segment_for(jvm_agent_t* J, uint64_t p) {
|
||||
return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment;
|
||||
static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) {
|
||||
return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment;
|
||||
}
|
||||
|
||||
static uint64_t block_at(jvm_agent_t* J, int i) {
|
||||
return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment);
|
||||
static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) {
|
||||
return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment);
|
||||
}
|
||||
|
||||
static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
|
||||
int err;
|
||||
int i;
|
||||
|
||||
*startp = 0;
|
||||
if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) {
|
||||
int32_t used;
|
||||
uint64_t segment = segment_for(J, ptr);
|
||||
uint64_t block = J->CodeCache_segmap_low;
|
||||
uint8_t tag;
|
||||
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
||||
CHECK_FAIL(err);
|
||||
if (tag == 0xff)
|
||||
return PS_OK;
|
||||
while (tag > 0) {
|
||||
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||
*startp = 0;
|
||||
if (codeheap_contains(i, J, ptr)) {
|
||||
int32_t used;
|
||||
uint64_t segment = segment_for(i, J, ptr);
|
||||
uint64_t block = J->Heap_segmap_low[i];
|
||||
uint8_t tag;
|
||||
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
||||
CHECK_FAIL(err);
|
||||
segment -= tag;
|
||||
}
|
||||
block = block_at(J, segment);
|
||||
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
|
||||
CHECK_FAIL(err);
|
||||
if (used) {
|
||||
*startp = block + SIZE_HeapBlockHeader;
|
||||
if (tag == 0xff)
|
||||
return PS_OK;
|
||||
while (tag > 0) {
|
||||
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
||||
CHECK_FAIL(err);
|
||||
segment -= tag;
|
||||
}
|
||||
block = block_at(i, J, segment);
|
||||
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
|
||||
CHECK_FAIL(err);
|
||||
if (used) {
|
||||
*startp = block + SIZE_HeapBlockHeader;
|
||||
}
|
||||
}
|
||||
return PS_OK;
|
||||
}
|
||||
return PS_OK;
|
||||
|
||||
fail:
|
||||
return -1;
|
||||
|
@ -82,21 +82,24 @@ StubQueue* AbstractInterpreter::_code = NULL;
|
||||
#endif /* ASSERT */
|
||||
#endif /* COMPILER1 */
|
||||
|
||||
#define GEN_OFFS(Type,Name) \
|
||||
#define GEN_OFFS_NAME(Type,Name,OutputType) \
|
||||
switch(gen_variant) { \
|
||||
case GEN_OFFSET: \
|
||||
printf("#define OFFSET_%-33s %d\n", \
|
||||
#Type #Name, offset_of(Type, Name)); \
|
||||
#OutputType #Name, offset_of(Type, Name)); \
|
||||
break; \
|
||||
case GEN_INDEX: \
|
||||
printf("#define IDX_OFFSET_%-33s %d\n", \
|
||||
#Type #Name, index++); \
|
||||
#OutputType #Name, index++); \
|
||||
break; \
|
||||
case GEN_TABLE: \
|
||||
printf("\tOFFSET_%s,\n", #Type #Name); \
|
||||
printf("\tOFFSET_%s,\n", #OutputType #Name); \
|
||||
break; \
|
||||
}
|
||||
|
||||
#define GEN_OFFS(Type,Name) \
|
||||
GEN_OFFS_NAME(Type,Name,Type)
|
||||
|
||||
#define GEN_SIZE(Type) \
|
||||
switch(gen_variant) { \
|
||||
case GEN_OFFSET: \
|
||||
@ -241,6 +244,11 @@ int generateJvmOffsets(GEN_variant gen_variant) {
|
||||
GEN_OFFS(VirtualSpace, _high);
|
||||
printf("\n");
|
||||
|
||||
/* We need to use different names here because of the template parameter */
|
||||
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _data, GrowableArray_CodeHeap);
|
||||
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _len, GrowableArray_CodeHeap);
|
||||
printf("\n");
|
||||
|
||||
GEN_OFFS(CodeBlob, _name);
|
||||
GEN_OFFS(CodeBlob, _header_size);
|
||||
GEN_OFFS(CodeBlob, _content_offset);
|
||||
|
@ -43,7 +43,9 @@
|
||||
|
||||
extern pointer __JvmOffsets;
|
||||
|
||||
extern pointer __1cJCodeCacheF_heap_;
|
||||
/* GrowableArray<CodeHeaps*>* */
|
||||
extern pointer __1cJCodeCacheG_heaps_;
|
||||
|
||||
extern pointer __1cIUniverseO_collectedHeap_;
|
||||
|
||||
extern pointer __1cHnmethodG__vtbl_;
|
||||
@ -95,8 +97,8 @@ dtrace:helper:ustack:
|
||||
/!init_done && !this->done/
|
||||
{
|
||||
MARK_LINE;
|
||||
init_done = 1;
|
||||
|
||||
|
||||
copyin_offset(POINTER_SIZE);
|
||||
copyin_offset(COMPILER);
|
||||
copyin_offset(OFFSET_CollectedHeap_reserved);
|
||||
copyin_offset(OFFSET_MemRegion_start);
|
||||
@ -122,6 +124,9 @@ dtrace:helper:ustack:
|
||||
copyin_offset(OFFSET_CodeHeap_segmap);
|
||||
copyin_offset(OFFSET_CodeHeap_log2_segment_size);
|
||||
|
||||
copyin_offset(OFFSET_GrowableArray_CodeHeap_data);
|
||||
copyin_offset(OFFSET_GrowableArray_CodeHeap_len);
|
||||
|
||||
copyin_offset(OFFSET_VirtualSpace_low);
|
||||
copyin_offset(OFFSET_VirtualSpace_high);
|
||||
|
||||
@ -152,24 +157,13 @@ dtrace:helper:ustack:
|
||||
#error "Don't know architecture"
|
||||
#endif
|
||||
|
||||
this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
|
||||
/* Read address of GrowableArray<CodeHeaps*> */
|
||||
this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_);
|
||||
/* Read address of _data array field in GrowableArray */
|
||||
this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data);
|
||||
this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
|
||||
|
||||
this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
|
||||
this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
|
||||
this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address +
|
||||
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
|
||||
|
||||
this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address +
|
||||
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high);
|
||||
|
||||
this->CodeHeap_log2_segment_size = copyin_uint32(
|
||||
this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size);
|
||||
|
||||
this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
|
||||
this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
|
||||
|
||||
/*
|
||||
* Get Java heap bounds
|
||||
@ -186,21 +180,152 @@ dtrace:helper:ustack:
|
||||
this->heap_end = this->heap_start + this->heap_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in
|
||||
* the code cache. If more code heaps are added the following probes have to
|
||||
* be extended. This is done by simply adding a probe to get the heap bounds
|
||||
* and another probe to set the code heap address of the newly created heap.
|
||||
*/
|
||||
|
||||
/*
|
||||
* ----- BEGIN: Get bounds of code heaps -----
|
||||
*/
|
||||
dtrace:helper:ustack:
|
||||
/!this->done &&
|
||||
this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/
|
||||
/init_done < 1 && this->number_of_heaps >= 1 && !this->done/
|
||||
{
|
||||
MARK_LINE;
|
||||
/* CodeHeap 1 */
|
||||
init_done = 1;
|
||||
this->code_heap1_address = copyin_ptr(this->code_heaps_array_address);
|
||||
this->code_heap1_low = copyin_ptr(this->code_heap1_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap1_high = copyin_ptr(this->code_heap1_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/init_done < 2 && this->number_of_heaps >= 2 && !this->done/
|
||||
{
|
||||
MARK_LINE;
|
||||
/* CodeHeap 2 */
|
||||
init_done = 2;
|
||||
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||
this->code_heap2_address = copyin_ptr(this->code_heaps_array_address);
|
||||
this->code_heap2_low = copyin_ptr(this->code_heap2_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap2_high = copyin_ptr(this->code_heap2_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/init_done < 3 && this->number_of_heaps >= 3 && !this->done/
|
||||
{
|
||||
/* CodeHeap 3 */
|
||||
init_done = 3;
|
||||
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||
this->code_heap3_address = copyin_ptr(this->code_heaps_array_address);
|
||||
this->code_heap3_low = copyin_ptr(this->code_heap3_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap3_high = copyin_ptr(this->code_heap3_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/init_done < 4 && this->number_of_heaps >= 4 && !this->done/
|
||||
{
|
||||
/* CodeHeap 4 */
|
||||
init_done = 4;
|
||||
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||
this->code_heap4_address = copyin_ptr(this->code_heaps_array_address);
|
||||
this->code_heap4_low = copyin_ptr(this->code_heap4_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap4_high = copyin_ptr(this->code_heap4_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/init_done < 5 && this->number_of_heaps >= 5 && !this->done/
|
||||
{
|
||||
/* CodeHeap 5 */
|
||||
init_done = 5;
|
||||
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||
this->code_heap5_address = copyin_ptr(this->code_heaps_array_address);
|
||||
this->code_heap5_low = copyin_ptr(this->code_heap5_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap5_high = copyin_ptr(this->code_heap5_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||
}
|
||||
/*
|
||||
* ----- END: Get bounds of code heaps -----
|
||||
*/
|
||||
|
||||
/*
|
||||
* ----- BEGIN: Get address of the code heap pc points to -----
|
||||
*/
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->codecache = 1;
|
||||
this->code_heap_address = this->code_heap1_address;
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->codecache = 1;
|
||||
this->code_heap_address = this->code_heap2_address;
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->codecache = 1;
|
||||
this->code_heap_address = this->code_heap3_address;
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->codecache = 1;
|
||||
this->code_heap_address = this->code_heap4_address;
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->codecache = 1;
|
||||
this->code_heap_address = this->code_heap5_address;
|
||||
}
|
||||
/*
|
||||
* ----- END: Get address of the code heap pc points to -----
|
||||
*/
|
||||
|
||||
dtrace:helper:ustack:
|
||||
/!this->done && this->codecache/
|
||||
{
|
||||
MARK_LINE;
|
||||
/*
|
||||
* Get code heap configuration
|
||||
*/
|
||||
this->code_heap_low = copyin_ptr(this->code_heap_address +
|
||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||
this->code_heap_segmap_low = copyin_ptr(this->code_heap_address +
|
||||
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
|
||||
this->code_heap_log2_segment_size = copyin_uint32(
|
||||
this->code_heap_address + OFFSET_CodeHeap_log2_segment_size);
|
||||
|
||||
/*
|
||||
* Find start.
|
||||
* Find start
|
||||
*/
|
||||
this->segment = (this->pc - this->CodeCache_low) >>
|
||||
this->CodeHeap_log2_segment_size;
|
||||
this->block = this->CodeCache_segmap_low;
|
||||
this->segment = (this->pc - this->code_heap_low) >>
|
||||
this->code_heap_log2_segment_size;
|
||||
this->block = this->code_heap_segmap_low;
|
||||
this->tag = copyin_uchar(this->block + this->segment);
|
||||
"second";
|
||||
}
|
||||
|
||||
dtrace:helper:ustack:
|
||||
@ -255,8 +380,8 @@ dtrace:helper:ustack:
|
||||
/!this->done && this->codecache/
|
||||
{
|
||||
MARK_LINE;
|
||||
this->block = this->CodeCache_low +
|
||||
(this->segment << this->CodeHeap_log2_segment_size);
|
||||
this->block = this->code_heap_low +
|
||||
(this->segment << this->code_heap_log2_segment_size);
|
||||
this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
|
||||
}
|
||||
|
||||
|
@ -150,16 +150,18 @@ struct jvm_agent {
|
||||
uint64_t Use_Compressed_Oops_address;
|
||||
uint64_t Universe_narrow_oop_base_address;
|
||||
uint64_t Universe_narrow_oop_shift_address;
|
||||
uint64_t CodeCache_heap_address;
|
||||
uint64_t CodeCache_heaps_address;
|
||||
|
||||
/* Volatiles */
|
||||
uint8_t Use_Compressed_Oops;
|
||||
uint64_t Universe_narrow_oop_base;
|
||||
uint32_t Universe_narrow_oop_shift;
|
||||
uint64_t CodeCache_low;
|
||||
uint64_t CodeCache_high;
|
||||
uint64_t CodeCache_segmap_low;
|
||||
uint64_t CodeCache_segmap_high;
|
||||
// Code cache heaps
|
||||
int32_t Number_of_heaps;
|
||||
uint64_t* Heap_low;
|
||||
uint64_t* Heap_high;
|
||||
uint64_t* Heap_segmap_low;
|
||||
uint64_t* Heap_segmap_high;
|
||||
|
||||
int32_t SIZE_CodeCache_log2_segment;
|
||||
|
||||
@ -278,8 +280,9 @@ static int parse_vmstructs(jvm_agent_t* J) {
|
||||
}
|
||||
|
||||
if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
|
||||
if (strcmp("_heap", vmp->fieldName) == 0) {
|
||||
err = read_pointer(J, vmp->address, &J->CodeCache_heap_address);
|
||||
/* Read _heaps field of type GrowableArray<CodeHeaps*>* */
|
||||
if (strcmp("_heaps", vmp->fieldName) == 0) {
|
||||
err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address);
|
||||
}
|
||||
} else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
|
||||
if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
|
||||
@ -318,7 +321,9 @@ static int find_symbol(jvm_agent_t* J, const char *name, uint64_t* valuep) {
|
||||
}
|
||||
|
||||
static int read_volatiles(jvm_agent_t* J) {
|
||||
uint64_t ptr;
|
||||
int i;
|
||||
uint64_t array_data;
|
||||
uint64_t code_heap_address;
|
||||
int err;
|
||||
|
||||
err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
|
||||
@ -334,20 +339,43 @@ static int read_volatiles(jvm_agent_t* J) {
|
||||
err = ps_pread(J->P, J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
|
||||
CHECK_FAIL(err);
|
||||
|
||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
|
||||
OFFSET_VirtualSpace_low, &J->CodeCache_low);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
|
||||
OFFSET_VirtualSpace_high, &J->CodeCache_high);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
|
||||
OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
|
||||
OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high);
|
||||
CHECK_FAIL(err);
|
||||
/* CodeCache_heaps_address points to GrowableArray<CodeHeaps*>, read _data field
|
||||
pointing to the first entry of type CodeCache* in the array */
|
||||
err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data);
|
||||
/* Read _len field containing the number of code heaps */
|
||||
err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len,
|
||||
&J->Number_of_heaps, sizeof(J->Number_of_heaps));
|
||||
|
||||
err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size,
|
||||
/* Allocate memory for heap configurations */
|
||||
J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||
J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||
J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||
J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||
|
||||
/* Read code heap configurations */
|
||||
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||
/* Read address of heap */
|
||||
err = read_pointer(J, array_data, &code_heap_address);
|
||||
CHECK_FAIL(err);
|
||||
|
||||
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
|
||||
OFFSET_VirtualSpace_low, &J->Heap_low[i]);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
|
||||
OFFSET_VirtualSpace_high, &J->Heap_high[i]);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
|
||||
OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]);
|
||||
CHECK_FAIL(err);
|
||||
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
|
||||
OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]);
|
||||
CHECK_FAIL(err);
|
||||
|
||||
/* Increment pointer to next entry */
|
||||
array_data = array_data + POINTER_SIZE;
|
||||
}
|
||||
|
||||
err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size,
|
||||
&J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
|
||||
CHECK_FAIL(err);
|
||||
|
||||
@ -357,46 +385,57 @@ static int read_volatiles(jvm_agent_t* J) {
|
||||
return err;
|
||||
}
|
||||
|
||||
static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) {
|
||||
return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]);
|
||||
}
|
||||
|
||||
static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
|
||||
/* make sure the code cache is up to date */
|
||||
return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high);
|
||||
int i;
|
||||
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||
if (codeheap_contains(i, J, ptr)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t segment_for(jvm_agent_t* J, uint64_t p) {
|
||||
return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment;
|
||||
static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) {
|
||||
return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment;
|
||||
}
|
||||
|
||||
static uint64_t block_at(jvm_agent_t* J, int i) {
|
||||
return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment);
|
||||
static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) {
|
||||
return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment);
|
||||
}
|
||||
|
||||
static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
|
||||
int err;
|
||||
int i;
|
||||
|
||||
*startp = 0;
|
||||
if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) {
|
||||
int32_t used;
|
||||
uint64_t segment = segment_for(J, ptr);
|
||||
uint64_t block = J->CodeCache_segmap_low;
|
||||
uint8_t tag;
|
||||
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
||||
CHECK_FAIL(err);
|
||||
if (tag == 0xff)
|
||||
return PS_OK;
|
||||
while (tag > 0) {
|
||||
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||
*startp = 0;
|
||||
if (codeheap_contains(i, J, ptr)) {
|
||||
int32_t used;
|
||||
uint64_t segment = segment_for(i, J, ptr);
|
||||
uint64_t block = J->Heap_segmap_low[i];
|
||||
uint8_t tag;
|
||||
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
||||
CHECK_FAIL(err);
|
||||
segment -= tag;
|
||||
}
|
||||
block = block_at(J, segment);
|
||||
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
|
||||
CHECK_FAIL(err);
|
||||
if (used) {
|
||||
*startp = block + SIZE_HeapBlockHeader;
|
||||
if (tag == 0xff)
|
||||
return PS_OK;
|
||||
while (tag > 0) {
|
||||
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
||||
CHECK_FAIL(err);
|
||||
segment -= tag;
|
||||
}
|
||||
block = block_at(i, J, segment);
|
||||
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
|
||||
CHECK_FAIL(err);
|
||||
if (used) {
|
||||
*startp = block + SIZE_HeapBlockHeader;
|
||||
}
|
||||
}
|
||||
return PS_OK;
|
||||
}
|
||||
return PS_OK;
|
||||
|
||||
fail:
|
||||
return -1;
|
||||
|
@ -3129,8 +3129,7 @@ bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
|
||||
return true;
|
||||
}
|
||||
|
||||
char* os::reserve_memory_special(size_t size, size_t alignment, char* addr,
|
||||
bool exec) {
|
||||
char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
|
||||
fatal("os::reserve_memory_special should not be called on Solaris.");
|
||||
return NULL;
|
||||
}
|
||||
|
@ -265,7 +265,7 @@ frame os::current_frame() {
|
||||
CAST_FROM_FN_PTR(address, os::current_frame));
|
||||
if (os::is_first_C_frame(&myframe)) {
|
||||
// stack is not walkable
|
||||
return frame(NULL, NULL, NULL);
|
||||
return frame(NULL, NULL, false);
|
||||
} else {
|
||||
return os::get_sender_for_C_frame(&myframe);
|
||||
}
|
||||
|
@ -327,7 +327,7 @@ void Canonicalizer::do_ShiftOp (ShiftOp* x) {
|
||||
if (t2->is_constant()) {
|
||||
switch (t2->tag()) {
|
||||
case intTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return;
|
||||
case longTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return;
|
||||
case longTag : if (t2->as_LongConstant()->value() == (jlong)0) set_canonical(x->x()); return;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
@ -808,28 +808,41 @@ void Canonicalizer::do_ExceptionObject(ExceptionObject* x) {}
|
||||
|
||||
static bool match_index_and_scale(Instruction* instr,
|
||||
Instruction** index,
|
||||
int* log2_scale,
|
||||
Instruction** instr_to_unpin) {
|
||||
*instr_to_unpin = NULL;
|
||||
|
||||
// Skip conversion ops
|
||||
int* log2_scale) {
|
||||
// Skip conversion ops. This works only on 32bit because of the implicit l2i that the
|
||||
// unsafe performs.
|
||||
#ifndef _LP64
|
||||
Convert* convert = instr->as_Convert();
|
||||
if (convert != NULL) {
|
||||
if (convert != NULL && convert->op() == Bytecodes::_i2l) {
|
||||
assert(convert->value()->type() == intType, "invalid input type");
|
||||
instr = convert->value();
|
||||
}
|
||||
#endif
|
||||
|
||||
ShiftOp* shift = instr->as_ShiftOp();
|
||||
if (shift != NULL) {
|
||||
if (shift->is_pinned()) {
|
||||
*instr_to_unpin = shift;
|
||||
if (shift->op() == Bytecodes::_lshl) {
|
||||
assert(shift->x()->type() == longType, "invalid input type");
|
||||
} else {
|
||||
#ifndef _LP64
|
||||
if (shift->op() == Bytecodes::_ishl) {
|
||||
assert(shift->x()->type() == intType, "invalid input type");
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Constant shift value?
|
||||
Constant* con = shift->y()->as_Constant();
|
||||
if (con == NULL) return false;
|
||||
// Well-known type and value?
|
||||
IntConstant* val = con->type()->as_IntConstant();
|
||||
if (val == NULL) return false;
|
||||
if (shift->x()->type() != intType) return false;
|
||||
assert(val != NULL, "Should be an int constant");
|
||||
|
||||
*index = shift->x();
|
||||
int tmp_scale = val->value();
|
||||
if (tmp_scale >= 0 && tmp_scale < 4) {
|
||||
@ -842,31 +855,42 @@ static bool match_index_and_scale(Instruction* instr,
|
||||
|
||||
ArithmeticOp* arith = instr->as_ArithmeticOp();
|
||||
if (arith != NULL) {
|
||||
if (arith->is_pinned()) {
|
||||
*instr_to_unpin = arith;
|
||||
// See if either arg is a known constant
|
||||
Constant* con = arith->x()->as_Constant();
|
||||
if (con != NULL) {
|
||||
*index = arith->y();
|
||||
} else {
|
||||
con = arith->y()->as_Constant();
|
||||
if (con == NULL) return false;
|
||||
*index = arith->x();
|
||||
}
|
||||
long const_value;
|
||||
// Check for integer multiply
|
||||
if (arith->op() == Bytecodes::_imul) {
|
||||
// See if either arg is a known constant
|
||||
Constant* con = arith->x()->as_Constant();
|
||||
if (con != NULL) {
|
||||
*index = arith->y();
|
||||
if (arith->op() == Bytecodes::_lmul) {
|
||||
assert((*index)->type() == longType, "invalid input type");
|
||||
LongConstant* val = con->type()->as_LongConstant();
|
||||
assert(val != NULL, "expecting a long constant");
|
||||
const_value = val->value();
|
||||
} else {
|
||||
#ifndef _LP64
|
||||
if (arith->op() == Bytecodes::_imul) {
|
||||
assert((*index)->type() == intType, "invalid input type");
|
||||
IntConstant* val = con->type()->as_IntConstant();
|
||||
assert(val != NULL, "expecting an int constant");
|
||||
const_value = val->value();
|
||||
} else {
|
||||
con = arith->y()->as_Constant();
|
||||
if (con == NULL) return false;
|
||||
*index = arith->x();
|
||||
}
|
||||
if ((*index)->type() != intType) return false;
|
||||
// Well-known type and value?
|
||||
IntConstant* val = con->type()->as_IntConstant();
|
||||
if (val == NULL) return false;
|
||||
switch (val->value()) {
|
||||
case 1: *log2_scale = 0; return true;
|
||||
case 2: *log2_scale = 1; return true;
|
||||
case 4: *log2_scale = 2; return true;
|
||||
case 8: *log2_scale = 3; return true;
|
||||
default: return false;
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
switch (const_value) {
|
||||
case 1: *log2_scale = 0; return true;
|
||||
case 2: *log2_scale = 1; return true;
|
||||
case 4: *log2_scale = 2; return true;
|
||||
case 8: *log2_scale = 3; return true;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -879,29 +903,37 @@ static bool match(UnsafeRawOp* x,
|
||||
Instruction** base,
|
||||
Instruction** index,
|
||||
int* log2_scale) {
|
||||
Instruction* instr_to_unpin = NULL;
|
||||
ArithmeticOp* root = x->base()->as_ArithmeticOp();
|
||||
if (root == NULL) return false;
|
||||
// Limit ourselves to addition for now
|
||||
if (root->op() != Bytecodes::_ladd) return false;
|
||||
|
||||
bool match_found = false;
|
||||
// Try to find shift or scale op
|
||||
if (match_index_and_scale(root->y(), index, log2_scale, &instr_to_unpin)) {
|
||||
if (match_index_and_scale(root->y(), index, log2_scale)) {
|
||||
*base = root->x();
|
||||
} else if (match_index_and_scale(root->x(), index, log2_scale, &instr_to_unpin)) {
|
||||
match_found = true;
|
||||
} else if (match_index_and_scale(root->x(), index, log2_scale)) {
|
||||
*base = root->y();
|
||||
} else if (root->y()->as_Convert() != NULL) {
|
||||
match_found = true;
|
||||
} else if (NOT_LP64(root->y()->as_Convert() != NULL) LP64_ONLY(false)) {
|
||||
// Skipping i2l works only on 32bit because of the implicit l2i that the unsafe performs.
|
||||
// 64bit needs a real sign-extending conversion.
|
||||
Convert* convert = root->y()->as_Convert();
|
||||
if (convert->op() == Bytecodes::_i2l && convert->value()->type() == intType) {
|
||||
if (convert->op() == Bytecodes::_i2l) {
|
||||
assert(convert->value()->type() == intType, "should be an int");
|
||||
// pick base and index, setting scale at 1
|
||||
*base = root->x();
|
||||
*index = convert->value();
|
||||
*log2_scale = 0;
|
||||
} else {
|
||||
return false;
|
||||
match_found = true;
|
||||
}
|
||||
} else {
|
||||
// doesn't match any expected sequences
|
||||
return false;
|
||||
}
|
||||
// The default solution
|
||||
if (!match_found) {
|
||||
*base = root->x();
|
||||
*index = root->y();
|
||||
*log2_scale = 0;
|
||||
}
|
||||
|
||||
// If the value is pinned then it will be always be computed so
|
||||
|
@ -76,6 +76,11 @@ void Compiler::initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
int Compiler::code_buffer_size() {
|
||||
assert(SegmentedCodeCache, "Should be only used with a segmented code cache");
|
||||
return Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size();
|
||||
}
|
||||
|
||||
BufferBlob* Compiler::init_buffer_blob() {
|
||||
// Allocate buffer blob once at startup since allocation for each
|
||||
// compilation seems to be too expensive (at least on Intel win32).
|
||||
|
@ -54,6 +54,9 @@ class Compiler: public AbstractCompiler {
|
||||
|
||||
// Print compilation timers and statistics
|
||||
virtual void print_timers();
|
||||
|
||||
// Size of the code buffer
|
||||
static int code_buffer_size();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_C1_C1_COMPILER_HPP
|
||||
|
@ -2045,6 +2045,8 @@ void LIRGenerator::do_RoundFP(RoundFP* x) {
|
||||
}
|
||||
}
|
||||
|
||||
// Here UnsafeGetRaw may have x->base() and x->index() be int or long
|
||||
// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
|
||||
void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
|
||||
LIRItem base(x->base(), this);
|
||||
LIRItem idx(this);
|
||||
@ -2059,50 +2061,73 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
|
||||
|
||||
int log2_scale = 0;
|
||||
if (x->has_index()) {
|
||||
assert(x->index()->type()->tag() == intTag, "should not find non-int index");
|
||||
log2_scale = x->log2_scale();
|
||||
}
|
||||
|
||||
assert(!x->has_index() || idx.value() == x->index(), "should match");
|
||||
|
||||
LIR_Opr base_op = base.result();
|
||||
LIR_Opr index_op = idx.result();
|
||||
#ifndef _LP64
|
||||
if (x->base()->type()->tag() == longTag) {
|
||||
base_op = new_register(T_INT);
|
||||
__ convert(Bytecodes::_l2i, base.result(), base_op);
|
||||
} else {
|
||||
assert(x->base()->type()->tag() == intTag, "must be");
|
||||
}
|
||||
if (x->has_index()) {
|
||||
if (x->index()->type()->tag() == longTag) {
|
||||
LIR_Opr long_index_op = index_op;
|
||||
if (x->index()->type()->is_constant()) {
|
||||
long_index_op = new_register(T_LONG);
|
||||
__ move(index_op, long_index_op);
|
||||
}
|
||||
index_op = new_register(T_INT);
|
||||
__ convert(Bytecodes::_l2i, long_index_op, index_op);
|
||||
} else {
|
||||
assert(x->index()->type()->tag() == intTag, "must be");
|
||||
}
|
||||
}
|
||||
// At this point base and index should be all ints.
|
||||
assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
|
||||
assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
|
||||
#else
|
||||
if (x->has_index()) {
|
||||
if (x->index()->type()->tag() == intTag) {
|
||||
if (!x->index()->type()->is_constant()) {
|
||||
index_op = new_register(T_LONG);
|
||||
__ convert(Bytecodes::_i2l, idx.result(), index_op);
|
||||
}
|
||||
} else {
|
||||
assert(x->index()->type()->tag() == longTag, "must be");
|
||||
if (x->index()->type()->is_constant()) {
|
||||
index_op = new_register(T_LONG);
|
||||
__ move(idx.result(), index_op);
|
||||
}
|
||||
}
|
||||
}
|
||||
// At this point base is a long non-constant
|
||||
// Index is a long register or a int constant.
|
||||
// We allow the constant to stay an int because that would allow us a more compact encoding by
|
||||
// embedding an immediate offset in the address expression. If we have a long constant, we have to
|
||||
// move it into a register first.
|
||||
assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
|
||||
assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
|
||||
(index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
|
||||
#endif
|
||||
|
||||
BasicType dst_type = x->basic_type();
|
||||
LIR_Opr index_op = idx.result();
|
||||
|
||||
LIR_Address* addr;
|
||||
if (index_op->is_constant()) {
|
||||
assert(log2_scale == 0, "must not have a scale");
|
||||
assert(index_op->type() == T_INT, "only int constants supported");
|
||||
addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
|
||||
} else {
|
||||
#ifdef X86
|
||||
#ifdef _LP64
|
||||
if (!index_op->is_illegal() && index_op->type() == T_INT) {
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
__ convert(Bytecodes::_i2l, index_op, tmp);
|
||||
index_op = tmp;
|
||||
}
|
||||
#endif
|
||||
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
|
||||
#elif defined(ARM)
|
||||
addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
|
||||
#else
|
||||
if (index_op->is_illegal() || log2_scale == 0) {
|
||||
#ifdef _LP64
|
||||
if (!index_op->is_illegal() && index_op->type() == T_INT) {
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
__ convert(Bytecodes::_i2l, index_op, tmp);
|
||||
index_op = tmp;
|
||||
}
|
||||
#endif
|
||||
addr = new LIR_Address(base_op, index_op, dst_type);
|
||||
} else {
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
@ -2129,7 +2154,6 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
|
||||
BasicType type = x->basic_type();
|
||||
|
||||
if (x->has_index()) {
|
||||
assert(x->index()->type()->tag() == intTag, "should not find non-int index");
|
||||
log2_scale = x->log2_scale();
|
||||
}
|
||||
|
||||
@ -2152,38 +2176,39 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
|
||||
set_no_result(x);
|
||||
|
||||
LIR_Opr base_op = base.result();
|
||||
LIR_Opr index_op = idx.result();
|
||||
|
||||
#ifndef _LP64
|
||||
if (x->base()->type()->tag() == longTag) {
|
||||
base_op = new_register(T_INT);
|
||||
__ convert(Bytecodes::_l2i, base.result(), base_op);
|
||||
} else {
|
||||
assert(x->base()->type()->tag() == intTag, "must be");
|
||||
}
|
||||
if (x->has_index()) {
|
||||
if (x->index()->type()->tag() == longTag) {
|
||||
index_op = new_register(T_INT);
|
||||
__ convert(Bytecodes::_l2i, idx.result(), index_op);
|
||||
}
|
||||
}
|
||||
// At this point base and index should be all ints and not constants
|
||||
assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
|
||||
assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
|
||||
#else
|
||||
if (x->has_index()) {
|
||||
if (x->index()->type()->tag() == intTag) {
|
||||
index_op = new_register(T_LONG);
|
||||
__ convert(Bytecodes::_i2l, idx.result(), index_op);
|
||||
}
|
||||
}
|
||||
// At this point base and index are long and non-constant
|
||||
assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
|
||||
assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
|
||||
#endif
|
||||
|
||||
LIR_Opr index_op = idx.result();
|
||||
if (log2_scale != 0) {
|
||||
// temporary fix (platform dependent code without shift on Intel would be better)
|
||||
index_op = new_pointer_register();
|
||||
#ifdef _LP64
|
||||
if(idx.result()->type() == T_INT) {
|
||||
__ convert(Bytecodes::_i2l, idx.result(), index_op);
|
||||
} else {
|
||||
#endif
|
||||
// TODO: ARM also allows embedded shift in the address
|
||||
__ move(idx.result(), index_op);
|
||||
#ifdef _LP64
|
||||
}
|
||||
#endif
|
||||
// TODO: ARM also allows embedded shift in the address
|
||||
__ shift_left(index_op, log2_scale, index_op);
|
||||
}
|
||||
#ifdef _LP64
|
||||
else if(!index_op->is_illegal() && index_op->type() == T_INT) {
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
__ convert(Bytecodes::_i2l, index_op, tmp);
|
||||
index_op = tmp;
|
||||
}
|
||||
#endif
|
||||
|
||||
LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
|
||||
__ move(value.result(), addr);
|
||||
|
@ -287,9 +287,6 @@
|
||||
develop(bool, InstallMethods, true, \
|
||||
"Install methods at the end of successful compilations") \
|
||||
\
|
||||
product(intx, CompilationRepeat, 0, \
|
||||
"Number of times to recompile method before returning result") \
|
||||
\
|
||||
develop(intx, NMethodSizeLimit, (64*K)*wordSize, \
|
||||
"Maximum size of a compiled method.") \
|
||||
\
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "ci/ciUtilities.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "compiler/compileLog.hpp"
|
||||
@ -1085,7 +1086,7 @@ void ciEnv::register_method(ciMethod* target,
|
||||
} else {
|
||||
// The CodeCache is full. Print out warning and disable compilation.
|
||||
record_failure("code cache is full");
|
||||
CompileBroker::handle_full_code_cache();
|
||||
CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1694,8 +1694,6 @@ void ClassVerifier::verify_exception_handler_table(u4 code_length, char* code_da
|
||||
constantPoolHandle cp (THREAD, _method->constants());
|
||||
|
||||
for(int i = 0; i < exlength; i++) {
|
||||
//reacquire the table in case a GC happened
|
||||
ExceptionTable exhandlers(_method());
|
||||
u2 start_pc = exhandlers.start_pc(i);
|
||||
u2 end_pc = exhandlers.end_pc(i);
|
||||
u2 handler_pc = exhandlers.handler_pc(i);
|
||||
@ -1803,8 +1801,6 @@ void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit, S
|
||||
ExceptionTable exhandlers(_method());
|
||||
int exlength = exhandlers.length();
|
||||
for(int i = 0; i < exlength; i++) {
|
||||
//reacquire the table in case a GC happened
|
||||
ExceptionTable exhandlers(_method());
|
||||
u2 start_pc = exhandlers.start_pc(i);
|
||||
u2 end_pc = exhandlers.end_pc(i);
|
||||
u2 handler_pc = exhandlers.handler_pc(i);
|
||||
|
@ -229,14 +229,11 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
|
||||
return blob;
|
||||
}
|
||||
|
||||
|
||||
void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
|
||||
void* p = CodeCache::allocate(size, is_critical);
|
||||
return p;
|
||||
return CodeCache::allocate(size, CodeBlobType::NonMethod, is_critical);
|
||||
}
|
||||
|
||||
|
||||
void BufferBlob::free( BufferBlob *blob ) {
|
||||
void BufferBlob::free(BufferBlob *blob) {
|
||||
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
||||
blob->flush();
|
||||
{
|
||||
@ -299,7 +296,6 @@ MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
|
||||
return blob;
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Implementation of RuntimeStub
|
||||
|
||||
@ -340,14 +336,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
|
||||
|
||||
|
||||
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
|
||||
void* p = CodeCache::allocate(size, true);
|
||||
void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true);
|
||||
if (!p) fatal("Initial size of CodeCache is too small");
|
||||
return p;
|
||||
}
|
||||
|
||||
// operator new shared by all singletons:
|
||||
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
|
||||
void* p = CodeCache::allocate(size, true);
|
||||
void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true);
|
||||
if (!p) fatal("Initial size of CodeCache is too small");
|
||||
return p;
|
||||
}
|
||||
|
@ -30,6 +30,18 @@
|
||||
#include "runtime/frame.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
|
||||
// CodeBlob Types
|
||||
// Used in the CodeCache to assign CodeBlobs to different CodeHeaps
|
||||
struct CodeBlobType {
|
||||
enum {
|
||||
MethodNonProfiled = 0, // Execution level 1 and 4 (non-profiled) nmethods (including native nmethods)
|
||||
MethodProfiled = 1, // Execution level 2 and 3 (profiled) nmethods
|
||||
NonMethod = 2, // Non-methods like Buffers, Adapters and Runtime Stubs
|
||||
All = 3, // All types (No code cache segmentation)
|
||||
NumTypes = 4 // Number of CodeBlobTypes
|
||||
};
|
||||
};
|
||||
|
||||
// CodeBlob - superclass for all entries in the CodeCache.
|
||||
//
|
||||
// Suptypes are:
|
||||
@ -385,9 +397,6 @@ class DeoptimizationBlob: public SingletonBlob {
|
||||
return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// GC for args
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -26,105 +26,117 @@
|
||||
#define SHARE_VM_CODE_CODECACHE_HPP
|
||||
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/heap.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
// The CodeCache implements the code cache for various pieces of generated
|
||||
// code, e.g., compiled java methods, runtime stubs, transition frames, etc.
|
||||
// The entries in the CodeCache are all CodeBlob's.
|
||||
|
||||
// Implementation:
|
||||
// - Each CodeBlob occupies one chunk of memory.
|
||||
// - Like the offset table in oldspace the zone has at table for
|
||||
// locating a method given a addess of an instruction.
|
||||
// -- Implementation --
|
||||
// The CodeCache consists of one or more CodeHeaps, each of which contains
|
||||
// CodeBlobs of a specific CodeBlobType. Currently heaps for the following
|
||||
// types are available:
|
||||
// - Non-methods: Non-methods like Buffers, Adapters and Runtime Stubs
|
||||
// - Profiled nmethods: nmethods that are profiled, i.e., those
|
||||
// executed at level 2 or 3
|
||||
// - Non-Profiled nmethods: nmethods that are not profiled, i.e., those
|
||||
// executed at level 1 or 4 and native methods
|
||||
// - All: Used for code of all types if code cache segmentation is disabled.
|
||||
//
|
||||
// In the rare case of the non-method code heap getting full, non-method code
|
||||
// will be stored in the non-profiled code heap as a fallback solution.
|
||||
//
|
||||
// Depending on the availability of compilers and TieredCompilation there
|
||||
// may be fewer heaps. The size of the code heaps depends on the values of
|
||||
// ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize
|
||||
// (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..)
|
||||
// for details).
|
||||
//
|
||||
// Code cache segmentation is controlled by the flag SegmentedCodeCache.
|
||||
// If turned off, all code types are stored in a single code heap. By default
|
||||
// code cache segmentation is turned on if TieredCompilation is enabled and
|
||||
// ReservedCodeCacheSize >= 240 MB.
|
||||
//
|
||||
// All methods of the CodeCache accepting a CodeBlobType only apply to
|
||||
// CodeBlobs of the given type. For example, iteration over the
|
||||
// CodeBlobs of a specific type can be done by using CodeCache::first_blob(..)
|
||||
// and CodeCache::next_blob(..) and providing the corresponding CodeBlobType.
|
||||
//
|
||||
// IMPORTANT: If you add new CodeHeaps to the code cache or change the
|
||||
// existing ones, make sure to adapt the dtrace scripts (jhelper.d) for
|
||||
// Solaris and BSD.
|
||||
|
||||
class OopClosure;
|
||||
class DepChange;
|
||||
|
||||
class CodeCache : AllStatic {
|
||||
friend class VMStructs;
|
||||
friend class NMethodIterator;
|
||||
private:
|
||||
// CodeHeap is malloc()'ed at startup and never deleted during shutdown,
|
||||
// so that the generated assembly code is always there when it's needed.
|
||||
// This may cause memory leak, but is necessary, for now. See 4423824,
|
||||
// 4422213 or 4436291 for details.
|
||||
static CodeHeap * _heap;
|
||||
static int _number_of_blobs;
|
||||
static int _number_of_adapters;
|
||||
static int _number_of_nmethods;
|
||||
static int _number_of_nmethods_with_dependencies;
|
||||
static bool _needs_cache_clean;
|
||||
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
||||
// CodeHeaps of the cache
|
||||
static GrowableArray<CodeHeap*>* _heaps;
|
||||
|
||||
static address _low_bound; // Lower bound of CodeHeap addresses
|
||||
static address _high_bound; // Upper bound of CodeHeap addresses
|
||||
static int _number_of_blobs; // Total number of CodeBlobs in the cache
|
||||
static int _number_of_adapters; // Total number of Adapters in the cache
|
||||
static int _number_of_nmethods; // Total number of nmethods in the cache
|
||||
static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies
|
||||
static bool _needs_cache_clean; // True if inline caches of the nmethods needs to be flushed
|
||||
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
||||
static int _codemem_full_count; // Number of times a CodeHeap in the cache was full
|
||||
|
||||
static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
|
||||
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
|
||||
|
||||
static int _codemem_full_count;
|
||||
static size_t bytes_allocated_in_freelist() { return _heap->allocated_in_freelist(); }
|
||||
static int allocated_segments() { return _heap->allocated_segments(); }
|
||||
static size_t freelist_length() { return _heap->freelist_length(); }
|
||||
// CodeHeap management
|
||||
static void initialize_heaps(); // Initializes the CodeHeaps
|
||||
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
|
||||
static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
|
||||
static CodeHeap* get_code_heap(CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
|
||||
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
|
||||
static bool heap_available(int code_blob_type); // Returns true if a CodeHeap for the given CodeBlobType is available
|
||||
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
|
||||
|
||||
// Iteration
|
||||
static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap
|
||||
static CodeBlob* first_blob(int code_blob_type); // Returns the first CodeBlob of the given type
|
||||
static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the first alive CodeBlob on the given CodeHeap
|
||||
static CodeBlob* next_blob(CodeBlob* cb); // Returns the next CodeBlob of the given type succeeding the given CodeBlob
|
||||
|
||||
static size_t bytes_allocated_in_freelists();
|
||||
static int allocated_segments();
|
||||
static size_t freelists_length();
|
||||
|
||||
public:
|
||||
|
||||
// Initialization
|
||||
static void initialize();
|
||||
|
||||
static void report_codemem_full();
|
||||
|
||||
// Allocation/administration
|
||||
static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
|
||||
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
|
||||
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
|
||||
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
|
||||
static void free(CodeBlob* cb); // frees a CodeBlob
|
||||
static bool contains(void *p); // returns whether p is included
|
||||
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
|
||||
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
|
||||
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
|
||||
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
|
||||
static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob
|
||||
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
|
||||
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
|
||||
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
|
||||
static void free(CodeBlob* cb); // frees a CodeBlob
|
||||
static bool contains(void *p); // returns whether p is included
|
||||
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
|
||||
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
|
||||
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
|
||||
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
|
||||
|
||||
// Lookup
|
||||
static CodeBlob* find_blob(void* start);
|
||||
static nmethod* find_nmethod(void* start);
|
||||
static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
|
||||
static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method
|
||||
static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address
|
||||
|
||||
// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
|
||||
// what you are doing)
|
||||
static CodeBlob* find_blob_unsafe(void* start) {
|
||||
// NMT can walk the stack before code cache is created
|
||||
if (_heap == NULL) return NULL;
|
||||
|
||||
CodeBlob* result = (CodeBlob*)_heap->find_start(start);
|
||||
// this assert is too strong because the heap code will return the
|
||||
// heapblock containing start. That block can often be larger than
|
||||
// the codeBlob itself. If you look up an address that is within
|
||||
// the heapblock but not in the codeBlob you will assert.
|
||||
//
|
||||
// Most things will not lookup such bad addresses. However
|
||||
// AsyncGetCallTrace can see intermediate frames and get that kind
|
||||
// of invalid address and so can a developer using hsfind.
|
||||
//
|
||||
// The more correct answer is to return NULL if blob_contains() returns
|
||||
// false.
|
||||
// assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
|
||||
|
||||
if (result != NULL && !result->blob_contains((address)start)) {
|
||||
result = NULL;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Iteration
|
||||
static CodeBlob* first();
|
||||
static CodeBlob* next (CodeBlob* cb);
|
||||
static CodeBlob* alive(CodeBlob *cb);
|
||||
static nmethod* alive_nmethod(CodeBlob *cb);
|
||||
static nmethod* first_nmethod();
|
||||
static nmethod* next_nmethod (CodeBlob* cb);
|
||||
static int nof_blobs() { return _number_of_blobs; }
|
||||
static int nof_adapters() { return _number_of_adapters; }
|
||||
static int nof_nmethods() { return _number_of_nmethods; }
|
||||
static int nof_blobs() { return _number_of_blobs; } // Returns the total number of CodeBlobs in the cache
|
||||
static int nof_adapters() { return _number_of_adapters; } // Returns the total number of Adapters in the cache
|
||||
static int nof_nmethods() { return _number_of_nmethods; } // Returns the total number of nmethods in the cache
|
||||
|
||||
// GC support
|
||||
static void gc_epilogue();
|
||||
@ -137,7 +149,7 @@ class CodeCache : AllStatic {
|
||||
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
|
||||
static void scavenge_root_nmethods_do(CodeBlobClosure* f);
|
||||
|
||||
static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
|
||||
static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
|
||||
static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
|
||||
static void add_scavenge_root_nmethod(nmethod* nm);
|
||||
static void drop_scavenge_root_nmethod(nmethod* nm);
|
||||
@ -151,27 +163,47 @@ class CodeCache : AllStatic {
|
||||
static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
|
||||
static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
|
||||
static void log_state(outputStream* st);
|
||||
static const char* get_code_heap_name(int code_blob_type) { return (heap_available(code_blob_type) ? get_code_heap(code_blob_type)->name() : "Unused"); }
|
||||
static void report_codemem_full(int code_blob_type, bool print);
|
||||
|
||||
// Dcmd (Diagnostic commands)
|
||||
static void print_codelist(outputStream* st);
|
||||
static void print_layout(outputStream* st);
|
||||
|
||||
// The full limits of the codeCache
|
||||
static address low_bound() { return (address) _heap->low_boundary(); }
|
||||
static address high_bound() { return (address) _heap->high_boundary(); }
|
||||
static address high() { return (address) _heap->high(); }
|
||||
static address low_bound() { return _low_bound; }
|
||||
static address high_bound() { return _high_bound; }
|
||||
|
||||
// Profiling
|
||||
static address first_address(); // first address used for CodeBlobs
|
||||
static address last_address(); // last address used for CodeBlobs
|
||||
static size_t capacity() { return _heap->capacity(); }
|
||||
static size_t max_capacity() { return _heap->max_capacity(); }
|
||||
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
|
||||
static double reverse_free_ratio();
|
||||
static size_t capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->capacity() : 0; }
|
||||
static size_t capacity();
|
||||
static size_t unallocated_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->unallocated_capacity() : 0; }
|
||||
static size_t unallocated_capacity();
|
||||
static size_t max_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->max_capacity() : 0; }
|
||||
static size_t max_capacity();
|
||||
|
||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
static bool is_full(int* code_blob_type);
|
||||
static double reverse_free_ratio(int code_blob_type);
|
||||
|
||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||
static void clear_inline_caches(); // clear all inline caches
|
||||
|
||||
// Returns the CodeBlobType for nmethods of the given compilation level
|
||||
static int get_code_blob_type(int comp_level) {
|
||||
if (comp_level == CompLevel_none ||
|
||||
comp_level == CompLevel_simple ||
|
||||
comp_level == CompLevel_full_optimization) {
|
||||
// Non profiled methods
|
||||
return CodeBlobType::MethodNonProfiled;
|
||||
} else if (comp_level == CompLevel_limited_profile ||
|
||||
comp_level == CompLevel_full_profile) {
|
||||
// Profiled methods
|
||||
return CodeBlobType::MethodProfiled;
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void verify_clean_inline_caches();
|
||||
static void verify_icholder_relocations();
|
||||
@ -187,10 +219,87 @@ class CodeCache : AllStatic {
|
||||
static void make_marked_nmethods_zombies();
|
||||
static void make_marked_nmethods_not_entrant();
|
||||
|
||||
// tells how many nmethods have dependencies
|
||||
// tells how many nmethods have dependencies
|
||||
static int number_of_nmethods_with_dependencies();
|
||||
|
||||
static int get_codemem_full_count() { return _codemem_full_count; }
|
||||
};
|
||||
|
||||
|
||||
// Iterator to iterate over nmethods in the CodeCache.
|
||||
class NMethodIterator : public StackObj {
|
||||
private:
|
||||
CodeBlob* _code_blob; // Current CodeBlob
|
||||
int _code_blob_type; // Refers to current CodeHeap
|
||||
|
||||
public:
|
||||
NMethodIterator() {
|
||||
initialize(NULL); // Set to NULL, initialized by first call to next()
|
||||
}
|
||||
|
||||
NMethodIterator(nmethod* nm) {
|
||||
initialize(nm);
|
||||
}
|
||||
|
||||
// Advance iterator to next nmethod
|
||||
bool next() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
assert(_code_blob_type < CodeBlobType::NumTypes, "end reached");
|
||||
|
||||
bool result = next_nmethod();
|
||||
while (!result && (_code_blob_type < CodeBlobType::MethodProfiled)) {
|
||||
// Advance to next code heap if segmented code cache
|
||||
_code_blob_type++;
|
||||
result = next_nmethod();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Advance iterator to next alive nmethod
|
||||
bool next_alive() {
|
||||
bool result = next();
|
||||
while(result && !_code_blob->is_alive()) {
|
||||
result = next();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool end() const { return _code_blob == NULL; }
|
||||
nmethod* method() const { return (nmethod*)_code_blob; }
|
||||
|
||||
private:
|
||||
// Initialize iterator to given nmethod
|
||||
void initialize(nmethod* nm) {
|
||||
_code_blob = (CodeBlob*)nm;
|
||||
if (!SegmentedCodeCache) {
|
||||
// Iterate over all CodeBlobs
|
||||
_code_blob_type = CodeBlobType::All;
|
||||
} else if (nm != NULL) {
|
||||
_code_blob_type = CodeCache::get_code_blob_type(nm->comp_level());
|
||||
} else {
|
||||
// Only iterate over method code heaps, starting with non-profiled
|
||||
_code_blob_type = CodeBlobType::MethodNonProfiled;
|
||||
}
|
||||
}
|
||||
|
||||
// Advance iterator to the next nmethod in the current code heap
|
||||
bool next_nmethod() {
|
||||
// Get first method CodeBlob
|
||||
if (_code_blob == NULL) {
|
||||
_code_blob = CodeCache::first_blob(_code_blob_type);
|
||||
if (_code_blob == NULL) {
|
||||
return false;
|
||||
} else if (_code_blob->is_nmethod()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// Search for next method CodeBlob
|
||||
_code_blob = CodeCache::next_blob(_code_blob);
|
||||
while (_code_blob != NULL && !_code_blob->is_nmethod()) {
|
||||
_code_blob = CodeCache::next_blob(_code_blob);
|
||||
}
|
||||
return _code_blob != NULL;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CODE_CODECACHE_HPP
|
||||
|
@ -500,7 +500,7 @@ nmethod* nmethod::new_native_nmethod(methodHandle method,
|
||||
CodeOffsets offsets;
|
||||
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
|
||||
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
||||
nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
|
||||
nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
|
||||
compile_id, &offsets,
|
||||
code_buffer, frame_size,
|
||||
basic_lock_owner_sp_offset,
|
||||
@ -538,7 +538,7 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
|
||||
offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
|
||||
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
||||
|
||||
nm = new (nmethod_size) nmethod(method(), nmethod_size,
|
||||
nm = new (nmethod_size, CompLevel_none) nmethod(method(), nmethod_size,
|
||||
&offsets, code_buffer, frame_size);
|
||||
|
||||
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
|
||||
@ -586,7 +586,7 @@ nmethod* nmethod::new_nmethod(methodHandle method,
|
||||
+ round_to(nul_chk_table->size_in_bytes(), oopSize)
|
||||
+ round_to(debug_info->data_size() , oopSize);
|
||||
|
||||
nm = new (nmethod_size)
|
||||
nm = new (nmethod_size, comp_level)
|
||||
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
|
||||
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
|
||||
oop_maps,
|
||||
@ -803,9 +803,11 @@ nmethod::nmethod(
|
||||
}
|
||||
#endif // def HAVE_DTRACE_H
|
||||
|
||||
void* nmethod::operator new(size_t size, int nmethod_size) throw() {
|
||||
// Not critical, may return null if there is too little continuous memory
|
||||
return CodeCache::allocate(nmethod_size);
|
||||
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
|
||||
// With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
|
||||
// with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
|
||||
bool is_critical = SegmentedCodeCache;
|
||||
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
|
||||
}
|
||||
|
||||
nmethod::nmethod(
|
||||
@ -1530,7 +1532,7 @@ void nmethod::flush() {
|
||||
Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
|
||||
if (PrintMethodFlushing) {
|
||||
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
|
||||
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
|
||||
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
|
||||
}
|
||||
|
||||
// We need to deallocate any ExceptionCache data.
|
||||
@ -1557,7 +1559,6 @@ void nmethod::flush() {
|
||||
CodeCache::free(this);
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Notify all classes this nmethod is dependent on that it is no
|
||||
// longer dependent. This should only be called in two situations.
|
||||
@ -2418,15 +2419,18 @@ void nmethod::check_all_dependencies(DepChange& changes) {
|
||||
// Turn off dependency tracing while actually testing dependencies.
|
||||
NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
|
||||
|
||||
typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
|
||||
&DependencySignature::equals, 11027> DepTable;
|
||||
typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
|
||||
&DependencySignature::equals, 11027> DepTable;
|
||||
|
||||
DepTable* table = new DepTable();
|
||||
DepTable* table = new DepTable();
|
||||
|
||||
// Iterate over live nmethods and check dependencies of all nmethods that are not
|
||||
// marked for deoptimization. A particular dependency is only checked once.
|
||||
for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) {
|
||||
if (!nm->is_marked_for_deoptimization()) {
|
||||
NMethodIterator iter;
|
||||
while(iter.next()) {
|
||||
nmethod* nm = iter.method();
|
||||
// Only notify for live nmethods
|
||||
if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
|
||||
for (Dependencies::DepStream deps(nm); deps.next(); ) {
|
||||
// Construct abstraction of a dependency.
|
||||
DependencySignature* current_sig = new DependencySignature(deps);
|
||||
|
@ -288,7 +288,7 @@ class nmethod : public CodeBlob {
|
||||
int comp_level);
|
||||
|
||||
// helper methods
|
||||
void* operator new(size_t size, int nmethod_size) throw();
|
||||
void* operator new(size_t size, int nmethod_size, int comp_level) throw();
|
||||
|
||||
const char* reloc_string_for(u_char* begin, u_char* end);
|
||||
// Returns true if this thread changed the state of the nmethod or
|
||||
|
@ -63,7 +63,7 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
|
||||
// If changing the name, update the other file accordingly.
|
||||
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
|
||||
if (blob == NULL) {
|
||||
CompileBroker::handle_full_code_cache();
|
||||
CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
|
||||
return NULL;
|
||||
}
|
||||
_chunk = blob->content_begin();
|
||||
|
@ -783,18 +783,22 @@ CompileQueue* CompileBroker::compile_queue(int comp_level) {
|
||||
|
||||
|
||||
void CompileBroker::print_compile_queues(outputStream* st) {
|
||||
_c1_compile_queue->print(st);
|
||||
_c2_compile_queue->print(st);
|
||||
MutexLocker locker(MethodCompileQueue_lock);
|
||||
if (_c1_compile_queue != NULL) {
|
||||
_c1_compile_queue->print(st);
|
||||
}
|
||||
if (_c2_compile_queue != NULL) {
|
||||
_c2_compile_queue->print(st);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CompileQueue::print(outputStream* st) {
|
||||
assert_locked_or_safepoint(lock());
|
||||
assert(lock()->owned_by_self(), "must own lock");
|
||||
st->print_cr("Contents of %s", name());
|
||||
st->print_cr("----------------------------");
|
||||
CompileTask* task = _first;
|
||||
if (task == NULL) {
|
||||
st->print_cr("Empty");;
|
||||
st->print_cr("Empty");
|
||||
} else {
|
||||
while (task != NULL) {
|
||||
task->print_compilation(st, NULL, true, true);
|
||||
@ -1206,6 +1210,12 @@ void CompileBroker::compile_method_base(methodHandle method,
|
||||
return;
|
||||
}
|
||||
|
||||
if (TieredCompilation) {
|
||||
// Tiered policy requires MethodCounters to exist before adding a method to
|
||||
// the queue. Create if we don't have them yet.
|
||||
method->get_method_counters(thread);
|
||||
}
|
||||
|
||||
// Outputs from the following MutexLocker block:
|
||||
CompileTask* task = NULL;
|
||||
bool blocking = false;
|
||||
@ -1747,9 +1757,11 @@ void CompileBroker::compiler_thread_loop() {
|
||||
// We need this HandleMark to avoid leaking VM handles.
|
||||
HandleMark hm(thread);
|
||||
|
||||
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
|
||||
// the code cache is really full
|
||||
handle_full_code_cache();
|
||||
// Check if the CodeCache is full
|
||||
int code_blob_type = 0;
|
||||
if (CodeCache::is_full(&code_blob_type)) {
|
||||
// The CodeHeap for code_blob_type is really full
|
||||
handle_full_code_cache(code_blob_type);
|
||||
}
|
||||
|
||||
CompileTask* task = queue->get();
|
||||
@ -1777,22 +1789,6 @@ void CompileBroker::compiler_thread_loop() {
|
||||
if (method()->number_of_breakpoints() == 0) {
|
||||
// Compile the method.
|
||||
if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
|
||||
#ifdef COMPILER1
|
||||
// Allow repeating compilations for the purpose of benchmarking
|
||||
// compile speed. This is not useful for customers.
|
||||
if (CompilationRepeat != 0) {
|
||||
int compile_count = CompilationRepeat;
|
||||
while (compile_count > 0) {
|
||||
invoke_compiler_on_method(task);
|
||||
nmethod* nm = method->code();
|
||||
if (nm != NULL) {
|
||||
nm->make_zombie();
|
||||
method->clear_code();
|
||||
}
|
||||
compile_count--;
|
||||
}
|
||||
}
|
||||
#endif /* COMPILER1 */
|
||||
invoke_compiler_on_method(task);
|
||||
} else {
|
||||
// After compilation is disabled, remove remaining methods from queue
|
||||
@ -2079,7 +2075,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
* The CodeCache is full. Print out warning and disable compilation
|
||||
* or try code cache cleaning so compilation can continue later.
|
||||
*/
|
||||
void CompileBroker::handle_full_code_cache() {
|
||||
void CompileBroker::handle_full_code_cache(int code_blob_type) {
|
||||
UseInterpreter = true;
|
||||
if (UseCompiler || AlwaysCompileLoopMethods ) {
|
||||
if (xtty != NULL) {
|
||||
@ -2096,8 +2092,6 @@ void CompileBroker::handle_full_code_cache() {
|
||||
xtty->end_elem();
|
||||
}
|
||||
|
||||
CodeCache::report_codemem_full();
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (CompileTheWorld || ExitOnFullCodeCache) {
|
||||
codecache_print(/* detailed= */ true);
|
||||
@ -2119,12 +2113,7 @@ void CompileBroker::handle_full_code_cache() {
|
||||
disable_compilation_forever();
|
||||
}
|
||||
|
||||
// Print warning only once
|
||||
if (should_print_compiler_warning()) {
|
||||
warning("CodeCache is full. Compiler has been disabled.");
|
||||
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
|
||||
codecache_print(/* detailed= */ true);
|
||||
}
|
||||
CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -434,7 +434,7 @@ class CompileBroker: AllStatic {
|
||||
static bool is_compilation_disabled_forever() {
|
||||
return _should_compile_new_jobs == shutdown_compilaton;
|
||||
}
|
||||
static void handle_full_code_cache();
|
||||
static void handle_full_code_cache(int code_blob_type);
|
||||
// Ensures that warning is only printed once.
|
||||
static bool should_print_compiler_warning() {
|
||||
jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0);
|
||||
|
@ -4167,7 +4167,7 @@ class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
|
||||
// been published), so we do not need to check for
|
||||
// uninitialized objects before pushing here.
|
||||
void Par_ConcMarkingClosure::do_oop(oop obj) {
|
||||
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
|
||||
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
@ -7226,7 +7226,7 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
|
||||
// isMarked() query is "safe".
|
||||
bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
|
||||
// Ignore mark word because we are running concurrent with mutators
|
||||
assert(p->is_oop_or_null(true), "expected an oop or null");
|
||||
assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
|
||||
HeapWord* addr = (HeapWord*)p;
|
||||
assert(_span.contains(addr), "we are scanning the CMS generation");
|
||||
bool is_obj_array = false;
|
||||
@ -7666,7 +7666,7 @@ void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
|
||||
}
|
||||
|
||||
void PushAndMarkVerifyClosure::do_oop(oop obj) {
|
||||
assert(obj->is_oop_or_null(), "expected an oop or NULL");
|
||||
assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
@ -7764,7 +7764,7 @@ void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
|
||||
|
||||
void PushOrMarkClosure::do_oop(oop obj) {
|
||||
// Ignore mark word because we are running concurrent with mutators.
|
||||
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
|
||||
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
@ -7802,7 +7802,7 @@ void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p)
|
||||
|
||||
void Par_PushOrMarkClosure::do_oop(oop obj) {
|
||||
// Ignore mark word because we are running concurrent with mutators.
|
||||
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
|
||||
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
||||
// Oop lies in _span and isn't yet grey or black
|
||||
@ -7879,7 +7879,7 @@ void PushAndMarkClosure::do_oop(oop obj) {
|
||||
// path and may be at the end of the global overflow list (so
|
||||
// the mark word may be NULL).
|
||||
assert(obj->is_oop_or_null(true /* ignore mark word */),
|
||||
"expected an oop or NULL");
|
||||
err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
@ -7959,7 +7959,7 @@ void Par_PushAndMarkClosure::do_oop(oop obj) {
|
||||
// the debugger, is_oop_or_null(false) may subsequently start
|
||||
// to hold.
|
||||
assert(obj->is_oop_or_null(true),
|
||||
"expected an oop or NULL");
|
||||
err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
// Check if oop points into the CMS generation
|
||||
// and is not marked
|
||||
|
@ -73,7 +73,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
|
||||
} else {
|
||||
res = (PromotedObject*)(_next & next_mask);
|
||||
}
|
||||
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Not an oop?");
|
||||
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res))));
|
||||
return res;
|
||||
}
|
||||
inline void setNext(PromotedObject* x) {
|
||||
|
@ -107,7 +107,7 @@ void CollectionSetChooser::verify() {
|
||||
HeapRegion *curr = regions_at(index++);
|
||||
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
|
||||
guarantee(!curr->is_young(), "should not be young!");
|
||||
guarantee(!curr->isHumongous(), "should not be humongous!");
|
||||
guarantee(!curr->is_humongous(), "should not be humongous!");
|
||||
if (prev != NULL) {
|
||||
guarantee(order_regions(prev, curr) != 1,
|
||||
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
|
||||
@ -149,7 +149,7 @@ void CollectionSetChooser::sort_regions() {
|
||||
|
||||
|
||||
void CollectionSetChooser::add_region(HeapRegion* hr) {
|
||||
assert(!hr->isHumongous(),
|
||||
assert(!hr->is_humongous(),
|
||||
"Humongous regions shouldn't be added to the collection set");
|
||||
assert(!hr->is_young(), "should not be young!");
|
||||
_regions.append(hr);
|
||||
|
@ -109,7 +109,7 @@ public:
|
||||
bool should_add(HeapRegion* hr) {
|
||||
assert(hr->is_marked(), "pre-condition");
|
||||
assert(!hr->is_young(), "should never consider young regions");
|
||||
return !hr->isHumongous() &&
|
||||
return !hr->is_humongous() &&
|
||||
hr->live_bytes() < _region_live_threshold_bytes;
|
||||
}
|
||||
|
||||
|
@ -910,7 +910,7 @@ bool ConcurrentMark::nextMarkBitmapIsClear() {
|
||||
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
|
||||
public:
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->continuesHumongous()) {
|
||||
if (!r->is_continues_humongous()) {
|
||||
r->note_start_of_marking();
|
||||
}
|
||||
return false;
|
||||
@ -1288,6 +1288,22 @@ void ConcurrentMark::markFromRoots() {
|
||||
print_stats();
|
||||
}
|
||||
|
||||
// Helper class to get rid of some boilerplate code.
|
||||
class G1CMTraceTime : public GCTraceTime {
|
||||
static bool doit_and_prepend(bool doit) {
|
||||
if (doit) {
|
||||
gclog_or_tty->put(' ');
|
||||
}
|
||||
return doit;
|
||||
}
|
||||
|
||||
public:
|
||||
G1CMTraceTime(const char* title, bool doit)
|
||||
: GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
|
||||
G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
|
||||
}
|
||||
};
|
||||
|
||||
void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
// world is stopped at this checkpoint
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
@ -1341,9 +1357,13 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||
// marking due to overflowing the global mark stack.
|
||||
reset_marking_state();
|
||||
} else {
|
||||
// Aggregate the per-task counting data that we have accumulated
|
||||
// while marking.
|
||||
aggregate_count_data();
|
||||
{
|
||||
G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
|
||||
|
||||
// Aggregate the per-task counting data that we have accumulated
|
||||
// while marking.
|
||||
aggregate_count_data();
|
||||
}
|
||||
|
||||
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
||||
// We're done with marking.
|
||||
@ -1398,10 +1418,10 @@ protected:
|
||||
// to 1 the bits on the region bitmap that correspond to its
|
||||
// associated "continues humongous" regions.
|
||||
void set_bit_for_region(HeapRegion* hr) {
|
||||
assert(!hr->continuesHumongous(), "should have filtered those out");
|
||||
assert(!hr->is_continues_humongous(), "should have filtered those out");
|
||||
|
||||
BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
|
||||
if (!hr->startsHumongous()) {
|
||||
if (!hr->is_starts_humongous()) {
|
||||
// Normal (non-humongous) case: just set the bit.
|
||||
_region_bm->par_at_put(index, true);
|
||||
} else {
|
||||
@ -1434,7 +1454,7 @@ public:
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
|
||||
if (hr->continuesHumongous()) {
|
||||
if (hr->is_continues_humongous()) {
|
||||
// We will ignore these here and process them when their
|
||||
// associated "starts humongous" region is processed (see
|
||||
// set_bit_for_heap_region()). Note that we cannot rely on their
|
||||
@ -1556,7 +1576,7 @@ public:
|
||||
int failures() const { return _failures; }
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->continuesHumongous()) {
|
||||
if (hr->is_continues_humongous()) {
|
||||
// We will ignore these here and process them when their
|
||||
// associated "starts humongous" region is processed (see
|
||||
// set_bit_for_heap_region()). Note that we cannot rely on their
|
||||
@ -1731,7 +1751,7 @@ class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
|
||||
if (hr->continuesHumongous()) {
|
||||
if (hr->is_continues_humongous()) {
|
||||
// We will ignore these here and process them when their
|
||||
// associated "starts humongous" region is processed (see
|
||||
// set_bit_for_heap_region()). Note that we cannot rely on their
|
||||
@ -1861,7 +1881,7 @@ public:
|
||||
const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
|
||||
|
||||
bool doHeapRegion(HeapRegion *hr) {
|
||||
if (hr->continuesHumongous()) {
|
||||
if (hr->is_continues_humongous()) {
|
||||
return false;
|
||||
}
|
||||
// We use a claim value of zero here because all regions
|
||||
@ -1875,8 +1895,8 @@ public:
|
||||
if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
|
||||
_freed_bytes += hr->used();
|
||||
hr->set_containing_set(NULL);
|
||||
if (hr->isHumongous()) {
|
||||
assert(hr->startsHumongous(), "we should only see starts humongous");
|
||||
if (hr->is_humongous()) {
|
||||
assert(hr->is_starts_humongous(), "we should only see starts humongous");
|
||||
_humongous_regions_removed.increment(1u, hr->capacity());
|
||||
_g1->free_humongous_region(hr, _local_cleanup_list, true);
|
||||
} else {
|
||||
@ -2466,22 +2486,6 @@ void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool
|
||||
G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
|
||||
}
|
||||
|
||||
// Helper class to get rid of some boilerplate code.
|
||||
class G1RemarkGCTraceTime : public GCTraceTime {
|
||||
static bool doit_and_prepend(bool doit) {
|
||||
if (doit) {
|
||||
gclog_or_tty->put(' ');
|
||||
}
|
||||
return doit;
|
||||
}
|
||||
|
||||
public:
|
||||
G1RemarkGCTraceTime(const char* title, bool doit)
|
||||
: GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
|
||||
G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
|
||||
}
|
||||
};
|
||||
|
||||
void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
if (has_overflown()) {
|
||||
// Skip processing the discovered references if we have
|
||||
@ -2504,10 +2508,7 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
// Inner scope to exclude the cleaning of the string and symbol
|
||||
// tables from the displayed time.
|
||||
{
|
||||
if (G1Log::finer()) {
|
||||
gclog_or_tty->put(' ');
|
||||
}
|
||||
GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
|
||||
G1CMTraceTime t("GC ref-proc", G1Log::finer());
|
||||
|
||||
ReferenceProcessor* rp = g1h->ref_processor_cm();
|
||||
|
||||
@ -2598,24 +2599,24 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
|
||||
// Unload Klasses, String, Symbols, Code Cache, etc.
|
||||
{
|
||||
G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
|
||||
G1CMTraceTime trace("Unloading", G1Log::finer());
|
||||
|
||||
if (ClassUnloadingWithConcurrentMark) {
|
||||
bool purged_classes;
|
||||
|
||||
{
|
||||
G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
|
||||
G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
|
||||
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
|
||||
}
|
||||
|
||||
{
|
||||
G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
|
||||
G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
|
||||
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
|
||||
}
|
||||
}
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
|
||||
G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
|
||||
G1StringDedup::unlink(&g1_is_alive);
|
||||
}
|
||||
}
|
||||
@ -2719,7 +2720,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
|
||||
HandleMark hm;
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
|
||||
G1CMTraceTime trace("Finalize Marking", G1Log::finer());
|
||||
|
||||
g1h->ensure_parsability(false);
|
||||
|
||||
@ -3191,7 +3192,7 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
|
||||
_cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->continuesHumongous()) {
|
||||
if (hr->is_continues_humongous()) {
|
||||
// We will ignore these here and process them when their
|
||||
// associated "starts humongous" region is processed.
|
||||
// Note that we cannot rely on their associated
|
||||
@ -3334,6 +3335,7 @@ void ConcurrentMark::aggregate_count_data() {
|
||||
} else {
|
||||
g1_par_agg_task.work(0);
|
||||
}
|
||||
_g1h->allocation_context_stats().update_at_remark();
|
||||
}
|
||||
|
||||
// Clear the per-worker arrays used to store the per-region counting data
|
||||
@ -3562,7 +3564,7 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
|
||||
void CMTask::setup_for_region(HeapRegion* hr) {
|
||||
assert(hr != NULL,
|
||||
"claim_region() should have filtered out NULL regions");
|
||||
assert(!hr->continuesHumongous(),
|
||||
assert(!hr->is_continues_humongous(),
|
||||
"claim_region() should have filtered out continues humongous regions");
|
||||
|
||||
if (_cm->verbose_low()) {
|
||||
@ -4287,7 +4289,7 @@ void CMTask::do_marking_step(double time_target_ms,
|
||||
HR_FORMAT_PARAMS(_curr_region));
|
||||
}
|
||||
|
||||
assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
|
||||
assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
|
||||
"humongous regions should go around loop once only");
|
||||
|
||||
// Some special cases:
|
||||
@ -4301,7 +4303,7 @@ void CMTask::do_marking_step(double time_target_ms,
|
||||
if (mr.is_empty()) {
|
||||
giveup_current_region();
|
||||
regular_clock_call();
|
||||
} else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
|
||||
} else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
|
||||
if (_nextMarkBitMap->isMarked(mr.start())) {
|
||||
// The object is marked - apply the closure
|
||||
BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
|
||||
@ -4748,7 +4750,7 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
|
||||
size_t remset_bytes = r->rem_set()->mem_size();
|
||||
size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
|
||||
|
||||
if (r->startsHumongous()) {
|
||||
if (r->is_starts_humongous()) {
|
||||
assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
|
||||
_hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
|
||||
"they should have been zeroed after the last time we used them");
|
||||
@ -4760,7 +4762,7 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
|
||||
get_hum_bytes(&used_bytes, &capacity_bytes,
|
||||
&prev_live_bytes, &next_live_bytes);
|
||||
end = bottom + HeapRegion::GrainWords;
|
||||
} else if (r->continuesHumongous()) {
|
||||
} else if (r->is_continues_humongous()) {
|
||||
get_hum_bytes(&used_bytes, &capacity_bytes,
|
||||
&prev_live_bytes, &next_live_bytes);
|
||||
assert(end == bottom + HeapRegion::GrainWords, "invariant");
|
||||
|
@ -88,7 +88,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
|
||||
size_t region_size_bytes = mr.byte_size();
|
||||
uint index = hr->hrm_index();
|
||||
|
||||
assert(!hr->continuesHumongous(), "should not be HC region");
|
||||
assert(!hr->is_continues_humongous(), "should not be HC region");
|
||||
assert(hr == g1h->heap_region_containing(start), "sanity");
|
||||
assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
|
||||
assert(marked_bytes_array != NULL, "pre-condition");
|
||||
@ -277,7 +277,7 @@ inline void CMTask::deal_with_reference(oop obj) {
|
||||
++_refs_reached;
|
||||
|
||||
HeapWord* objAddr = (HeapWord*) obj;
|
||||
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
|
||||
assert(obj->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||
if (_g1h->is_in_g1_reserved(objAddr)) {
|
||||
assert(obj != NULL, "null check is implicit");
|
||||
if (!_nextMarkBitMap->isMarked(objAddr)) {
|
||||
@ -366,7 +366,7 @@ inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
|
||||
assert(hr != NULL, "sanity");
|
||||
// Given that we're looking for a region that contains an object
|
||||
// header it's impossible to get back a HC region.
|
||||
assert(!hr->continuesHumongous(), "sanity");
|
||||
assert(!hr->is_continues_humongous(), "sanity");
|
||||
|
||||
// We cannot assert that word_size == obj->size() given that obj
|
||||
// might not be in a consistent state (another thread might be in
|
||||
|
@ -129,8 +129,7 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
|
||||
// Note that we first perform the allocation and then we store the
|
||||
// region in _alloc_region. This is the reason why an active region
|
||||
// can never be empty.
|
||||
_alloc_region = new_alloc_region;
|
||||
_count += 1;
|
||||
update_alloc_region(new_alloc_region);
|
||||
trace("region allocation successful");
|
||||
return result;
|
||||
} else {
|
||||
@ -172,6 +171,19 @@ void G1AllocRegion::set(HeapRegion* alloc_region) {
|
||||
trace("set");
|
||||
}
|
||||
|
||||
void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
|
||||
trace("update");
|
||||
// We explicitly check that the region is not empty to make sure we
|
||||
// maintain the "the alloc region cannot be empty" invariant.
|
||||
assert(alloc_region != NULL && !alloc_region->is_empty(),
|
||||
ar_ext_msg(this, "pre-condition"));
|
||||
|
||||
_alloc_region = alloc_region;
|
||||
_alloc_region->set_allocation_context(allocation_context());
|
||||
_count += 1;
|
||||
trace("updated");
|
||||
}
|
||||
|
||||
HeapRegion* G1AllocRegion::release() {
|
||||
trace("releasing");
|
||||
HeapRegion* alloc_region = _alloc_region;
|
||||
@ -225,5 +237,70 @@ void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
|
||||
G1AllocRegion::G1AllocRegion(const char* name,
|
||||
bool bot_updates)
|
||||
: _name(name), _bot_updates(bot_updates),
|
||||
_alloc_region(NULL), _count(0), _used_bytes_before(0) { }
|
||||
_alloc_region(NULL), _count(0), _used_bytes_before(0),
|
||||
_allocation_context(AllocationContext::system()) { }
|
||||
|
||||
|
||||
HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
|
||||
bool force) {
|
||||
return _g1h->new_mutator_alloc_region(word_size, force);
|
||||
}
|
||||
|
||||
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes) {
|
||||
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
|
||||
}
|
||||
|
||||
HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
|
||||
bool force) {
|
||||
assert(!force, "not supported for GC alloc regions");
|
||||
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
|
||||
}
|
||||
|
||||
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes) {
|
||||
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
|
||||
GCAllocForSurvived);
|
||||
}
|
||||
|
||||
HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
|
||||
bool force) {
|
||||
assert(!force, "not supported for GC alloc regions");
|
||||
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
|
||||
}
|
||||
|
||||
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes) {
|
||||
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
|
||||
GCAllocForTenured);
|
||||
}
|
||||
|
||||
HeapRegion* OldGCAllocRegion::release() {
|
||||
HeapRegion* cur = get();
|
||||
if (cur != NULL) {
|
||||
// Determine how far we are from the next card boundary. If it is smaller than
|
||||
// the minimum object size we can allocate into, expand into the next card.
|
||||
HeapWord* top = cur->top();
|
||||
HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
|
||||
|
||||
size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
|
||||
|
||||
if (to_allocate_words != 0) {
|
||||
// We are not at a card boundary. Fill up, possibly into the next, taking the
|
||||
// end of the region and the minimum object size into account.
|
||||
to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
|
||||
MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
|
||||
|
||||
// Skip allocation if there is not enough space to allocate even the smallest
|
||||
// possible object. In this case this region will not be retained, so the
|
||||
// original problem cannot occur.
|
||||
if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
|
||||
HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
|
||||
CollectedHeap::fill_with_object(dummy, to_allocate_words);
|
||||
}
|
||||
}
|
||||
}
|
||||
return G1AllocRegion::release();
|
||||
}
|
||||
|
||||
|
||||
|
@ -57,6 +57,9 @@ private:
|
||||
// correct use of init() and release()).
|
||||
HeapRegion* volatile _alloc_region;
|
||||
|
||||
// Allocation context associated with this alloc region.
|
||||
AllocationContext_t _allocation_context;
|
||||
|
||||
// It keeps track of the distinct number of regions that are used
|
||||
// for allocation in the active interval of this object, i.e.,
|
||||
// between a call to init() and a call to release(). The count
|
||||
@ -110,6 +113,10 @@ private:
|
||||
// else can allocate out of it.
|
||||
void retire(bool fill_up);
|
||||
|
||||
// After a region is allocated by alloc_new_region, this
|
||||
// method is used to set it as the active alloc_region
|
||||
void update_alloc_region(HeapRegion* alloc_region);
|
||||
|
||||
// Allocate a new active region and use it to perform a word_size
|
||||
// allocation. The force parameter will be passed on to
|
||||
// G1CollectedHeap::allocate_new_alloc_region() and tells it to try
|
||||
@ -137,6 +144,9 @@ public:
|
||||
return (hr == _dummy_region) ? NULL : hr;
|
||||
}
|
||||
|
||||
void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
|
||||
AllocationContext_t allocation_context() { return _allocation_context; }
|
||||
|
||||
uint count() { return _count; }
|
||||
|
||||
// The following two are the building blocks for the allocation method.
|
||||
@ -182,6 +192,40 @@ public:
|
||||
#endif // G1_ALLOC_REGION_TRACING
|
||||
};
|
||||
|
||||
class MutatorAllocRegion : public G1AllocRegion {
|
||||
protected:
|
||||
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
||||
public:
|
||||
MutatorAllocRegion()
|
||||
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
|
||||
};
|
||||
|
||||
class SurvivorGCAllocRegion : public G1AllocRegion {
|
||||
protected:
|
||||
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
||||
public:
|
||||
SurvivorGCAllocRegion()
|
||||
: G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
|
||||
};
|
||||
|
||||
class OldGCAllocRegion : public G1AllocRegion {
|
||||
protected:
|
||||
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
||||
public:
|
||||
OldGCAllocRegion()
|
||||
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
|
||||
|
||||
// This specialization of release() makes sure that the last card that has
|
||||
// been allocated into has been completely filled by a dummy object. This
|
||||
// avoids races when remembered set scanning wants to update the BOT of the
|
||||
// last card in the retained old gc alloc region, and allocation threads
|
||||
// allocating into that card at the same time.
|
||||
virtual HeapRegion* release();
|
||||
};
|
||||
|
||||
class ar_ext_msg : public err_msg {
|
||||
public:
|
||||
ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
|
||||
|
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
typedef unsigned char AllocationContext_t;
|
||||
|
||||
class AllocationContext : AllStatic {
|
||||
public:
|
||||
// Currently used context
|
||||
static AllocationContext_t current() {
|
||||
return 0;
|
||||
}
|
||||
// System wide default context
|
||||
static AllocationContext_t system() {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
class AllocationContextStats: public StackObj {
|
||||
public:
|
||||
inline void clear() { }
|
||||
inline void update(bool full_gc) { }
|
||||
inline void update_at_remark() { }
|
||||
inline bool available() { return false; }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
|
155
hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp
Normal file
155
hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp
Normal file
@ -0,0 +1,155 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1Allocator.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||
|
||||
void G1DefaultAllocator::init_mutator_alloc_region() {
|
||||
assert(_mutator_alloc_region.get() == NULL, "pre-condition");
|
||||
_mutator_alloc_region.init();
|
||||
}
|
||||
|
||||
void G1DefaultAllocator::release_mutator_alloc_region() {
|
||||
_mutator_alloc_region.release();
|
||||
assert(_mutator_alloc_region.get() == NULL, "post-condition");
|
||||
}
|
||||
|
||||
void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||
OldGCAllocRegion* old,
|
||||
HeapRegion** retained_old) {
|
||||
HeapRegion* retained_region = *retained_old;
|
||||
*retained_old = NULL;
|
||||
|
||||
// We will discard the current GC alloc region if:
|
||||
// a) it's in the collection set (it can happen!),
|
||||
// b) it's already full (no point in using it),
|
||||
// c) it's empty (this means that it was emptied during
|
||||
// a cleanup and it should be on the free list now), or
|
||||
// d) it's humongous (this means that it was emptied
|
||||
// during a cleanup and was added to the free list, but
|
||||
// has been subsequently used to allocate a humongous
|
||||
// object that may be less than the region size).
|
||||
if (retained_region != NULL &&
|
||||
!retained_region->in_collection_set() &&
|
||||
!(retained_region->top() == retained_region->end()) &&
|
||||
!retained_region->is_empty() &&
|
||||
!retained_region->is_humongous()) {
|
||||
retained_region->record_top_and_timestamp();
|
||||
// The retained region was added to the old region set when it was
|
||||
// retired. We have to remove it now, since we don't allow regions
|
||||
// we allocate to in the region sets. We'll re-add it later, when
|
||||
// it's retired again.
|
||||
_g1h->_old_set.remove(retained_region);
|
||||
bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
|
||||
retained_region->note_start_of_copying(during_im);
|
||||
old->set(retained_region);
|
||||
_g1h->_hr_printer.reuse(retained_region);
|
||||
evacuation_info.set_alloc_regions_used_before(retained_region->used());
|
||||
}
|
||||
}
|
||||
|
||||
void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
|
||||
_survivor_gc_alloc_region.init();
|
||||
_old_gc_alloc_region.init();
|
||||
reuse_retained_old_region(evacuation_info,
|
||||
&_old_gc_alloc_region,
|
||||
&_retained_old_gc_alloc_region);
|
||||
}
|
||||
|
||||
void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
|
||||
AllocationContext_t context = AllocationContext::current();
|
||||
evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
|
||||
old_gc_alloc_region(context)->count());
|
||||
survivor_gc_alloc_region(context)->release();
|
||||
// If we have an old GC alloc region to release, we'll save it in
|
||||
// _retained_old_gc_alloc_region. If we don't
|
||||
// _retained_old_gc_alloc_region will become NULL. This is what we
|
||||
// want either way so no reason to check explicitly for either
|
||||
// condition.
|
||||
_retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
|
||||
|
||||
if (ResizePLAB) {
|
||||
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
||||
_g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
||||
}
|
||||
}
|
||||
|
||||
void G1DefaultAllocator::abandon_gc_alloc_regions() {
|
||||
assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
|
||||
assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
|
||||
_retained_old_gc_alloc_region = NULL;
|
||||
}
|
||||
|
||||
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
|
||||
ParGCAllocBuffer(gclab_word_size), _retired(true) { }
|
||||
|
||||
HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
|
||||
HeapWord* obj = NULL;
|
||||
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
|
||||
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
|
||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
||||
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
|
||||
|
||||
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
|
||||
if (buf == NULL) {
|
||||
return NULL; // Let caller handle allocation failure.
|
||||
}
|
||||
// Otherwise.
|
||||
alloc_buf->set_word_size(gclab_word_size);
|
||||
alloc_buf->set_buf(buf);
|
||||
|
||||
obj = alloc_buf->allocate(word_sz);
|
||||
assert(obj != NULL, "buffer was definitely big enough...");
|
||||
} else {
|
||||
obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
|
||||
G1ParGCAllocator(g1h),
|
||||
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
|
||||
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
|
||||
|
||||
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
|
||||
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
|
||||
|
||||
}
|
||||
|
||||
void G1DefaultParGCAllocator::retire_alloc_buffers() {
|
||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||
size_t waste = _alloc_buffers[ap]->words_remaining();
|
||||
add_to_alloc_buffer_waste(waste);
|
||||
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
|
||||
true /* end_of_gc */,
|
||||
false /* retain */);
|
||||
}
|
||||
}
|
242
hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp
Normal file
242
hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp
Normal file
@ -0,0 +1,242 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1AllocationContext.hpp"
|
||||
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
|
||||
|
||||
enum GCAllocPurpose {
|
||||
GCAllocForTenured,
|
||||
GCAllocForSurvived,
|
||||
GCAllocPurposeCount
|
||||
};
|
||||
|
||||
// Base class for G1 allocators.
|
||||
class G1Allocator : public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// Outside of GC pauses, the number of bytes used in all regions other
|
||||
// than the current allocation region.
|
||||
size_t _summary_bytes_used;
|
||||
|
||||
public:
|
||||
G1Allocator(G1CollectedHeap* heap) :
|
||||
_g1h(heap), _summary_bytes_used(0) { }
|
||||
|
||||
static G1Allocator* create_allocator(G1CollectedHeap* g1h);
|
||||
|
||||
virtual void init_mutator_alloc_region() = 0;
|
||||
virtual void release_mutator_alloc_region() = 0;
|
||||
|
||||
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
|
||||
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
|
||||
virtual void abandon_gc_alloc_regions() = 0;
|
||||
|
||||
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
|
||||
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
|
||||
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
|
||||
virtual size_t used() = 0;
|
||||
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
|
||||
|
||||
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||
OldGCAllocRegion* old,
|
||||
HeapRegion** retained);
|
||||
|
||||
size_t used_unlocked() const {
|
||||
return _summary_bytes_used;
|
||||
}
|
||||
|
||||
void increase_used(size_t bytes) {
|
||||
_summary_bytes_used += bytes;
|
||||
}
|
||||
|
||||
void decrease_used(size_t bytes) {
|
||||
assert(_summary_bytes_used >= bytes,
|
||||
err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
|
||||
_summary_bytes_used, bytes));
|
||||
_summary_bytes_used -= bytes;
|
||||
}
|
||||
|
||||
void set_used(size_t bytes) {
|
||||
_summary_bytes_used = bytes;
|
||||
}
|
||||
|
||||
virtual HeapRegion* new_heap_region(uint hrs_index,
|
||||
G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr) {
|
||||
return new HeapRegion(hrs_index, sharedOffsetArray, mr);
|
||||
}
|
||||
};
|
||||
|
||||
// The default allocator for G1.
|
||||
class G1DefaultAllocator : public G1Allocator {
|
||||
protected:
|
||||
// Alloc region used to satisfy mutator allocation requests.
|
||||
MutatorAllocRegion _mutator_alloc_region;
|
||||
|
||||
// Alloc region used to satisfy allocation requests by the GC for
|
||||
// survivor objects.
|
||||
SurvivorGCAllocRegion _survivor_gc_alloc_region;
|
||||
|
||||
// Alloc region used to satisfy allocation requests by the GC for
|
||||
// old objects.
|
||||
OldGCAllocRegion _old_gc_alloc_region;
|
||||
|
||||
HeapRegion* _retained_old_gc_alloc_region;
|
||||
public:
|
||||
G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
|
||||
|
||||
virtual void init_mutator_alloc_region();
|
||||
virtual void release_mutator_alloc_region();
|
||||
|
||||
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
|
||||
virtual void abandon_gc_alloc_regions();
|
||||
|
||||
virtual bool is_retained_old_region(HeapRegion* hr) {
|
||||
return _retained_old_gc_alloc_region == hr;
|
||||
}
|
||||
|
||||
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
|
||||
return &_mutator_alloc_region;
|
||||
}
|
||||
|
||||
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
|
||||
return &_survivor_gc_alloc_region;
|
||||
}
|
||||
|
||||
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
|
||||
return &_old_gc_alloc_region;
|
||||
}
|
||||
|
||||
virtual size_t used() {
|
||||
assert(Heap_lock->owner() != NULL,
|
||||
"Should be owned on this thread's behalf.");
|
||||
size_t result = _summary_bytes_used;
|
||||
|
||||
// Read only once in case it is set to NULL concurrently
|
||||
HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
|
||||
if (hr != NULL) {
|
||||
result += hr->used();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
|
||||
private:
|
||||
bool _retired;
|
||||
|
||||
public:
|
||||
G1ParGCAllocBuffer(size_t gclab_word_size);
|
||||
virtual ~G1ParGCAllocBuffer() {
|
||||
guarantee(_retired, "Allocation buffer has not been retired");
|
||||
}
|
||||
|
||||
virtual void set_buf(HeapWord* buf) {
|
||||
ParGCAllocBuffer::set_buf(buf);
|
||||
_retired = false;
|
||||
}
|
||||
|
||||
virtual void retire(bool end_of_gc, bool retain) {
|
||||
if (_retired) {
|
||||
return;
|
||||
}
|
||||
ParGCAllocBuffer::retire(end_of_gc, retain);
|
||||
_retired = true;
|
||||
}
|
||||
};
|
||||
|
||||
class G1ParGCAllocator : public CHeapObj<mtGC> {
|
||||
friend class G1ParScanThreadState;
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
size_t _alloc_buffer_waste;
|
||||
size_t _undo_waste;
|
||||
|
||||
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
||||
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
||||
|
||||
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
|
||||
|
||||
virtual void retire_alloc_buffers() = 0;
|
||||
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
|
||||
|
||||
public:
|
||||
G1ParGCAllocator(G1CollectedHeap* g1h) :
|
||||
_g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
|
||||
}
|
||||
|
||||
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
|
||||
|
||||
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
|
||||
size_t undo_waste() {return _undo_waste; }
|
||||
|
||||
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
|
||||
HeapWord* obj = NULL;
|
||||
if (purpose == GCAllocForSurvived) {
|
||||
obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
|
||||
} else {
|
||||
obj = alloc_buffer(purpose, context)->allocate(word_sz);
|
||||
}
|
||||
if (obj != NULL) {
|
||||
return obj;
|
||||
}
|
||||
return allocate_slow(purpose, word_sz, context);
|
||||
}
|
||||
|
||||
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
|
||||
if (alloc_buffer(purpose, context)->contains(obj)) {
|
||||
assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
|
||||
"should contain whole object");
|
||||
alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
|
||||
} else {
|
||||
CollectedHeap::fill_with_object(obj, word_sz);
|
||||
add_to_undo_waste(word_sz);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class G1DefaultParGCAllocator : public G1ParGCAllocator {
|
||||
G1ParGCAllocBuffer _surviving_alloc_buffer;
|
||||
G1ParGCAllocBuffer _tenured_alloc_buffer;
|
||||
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
|
||||
|
||||
public:
|
||||
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
|
||||
|
||||
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
|
||||
return _alloc_buffers[purpose];
|
||||
}
|
||||
|
||||
virtual void retire_alloc_buffers() ;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
|
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1Allocator.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
|
||||
G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
|
||||
return new G1DefaultAllocator(g1h);
|
||||
}
|
||||
|
||||
G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
|
||||
return new G1DefaultParGCAllocator(g1h);
|
||||
}
|
@ -469,7 +469,7 @@ bool G1CollectedHeap::is_in_partial_collection(const void* p) {
|
||||
// can move in an incremental collection.
|
||||
bool G1CollectedHeap::is_scavengable(const void* p) {
|
||||
HeapRegion* hr = heap_region_containing(p);
|
||||
return !hr->isHumongous();
|
||||
return !hr->is_humongous();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
||||
@ -560,7 +560,7 @@ G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
|
||||
}
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
|
||||
assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
|
||||
assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
|
||||
"the only time we use this to allocate a humongous region is "
|
||||
"when we are allocating a single humongous region");
|
||||
|
||||
@ -615,9 +615,10 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_e
|
||||
HeapWord*
|
||||
G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||
uint num_regions,
|
||||
size_t word_size) {
|
||||
size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
assert(first != G1_NO_HRM_INDEX, "pre-condition");
|
||||
assert(isHumongous(word_size), "word_size should be humongous");
|
||||
assert(is_humongous(word_size), "word_size should be humongous");
|
||||
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
|
||||
|
||||
// Index of last region in the series + 1.
|
||||
@ -666,14 +667,15 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||
// will also update the BOT covering all the regions to reflect
|
||||
// that there is a single object that starts at the bottom of the
|
||||
// first region.
|
||||
first_hr->set_startsHumongous(new_top, new_end);
|
||||
|
||||
first_hr->set_starts_humongous(new_top, new_end);
|
||||
first_hr->set_allocation_context(context);
|
||||
// Then, if there are any, we will set up the "continues
|
||||
// humongous" regions.
|
||||
HeapRegion* hr = NULL;
|
||||
for (uint i = first + 1; i < last; ++i) {
|
||||
hr = region_at(i);
|
||||
hr->set_continuesHumongous(first_hr);
|
||||
hr->set_continues_humongous(first_hr);
|
||||
hr->set_allocation_context(context);
|
||||
}
|
||||
// If we have "continues humongous" regions (hr != NULL), then the
|
||||
// end of the last one should match new_end.
|
||||
@ -711,7 +713,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||
// G1. For example, the code that looks for a consecutive number
|
||||
// of empty regions will consider them empty and try to
|
||||
// re-allocate them. We can extend is_empty() to also include
|
||||
// !continuesHumongous(), but it is easier to just update the top
|
||||
// !is_continues_humongous(), but it is easier to just update the top
|
||||
// fields here. The way we set top for all regions (i.e., top ==
|
||||
// end for all regions but the last one, top == new_top for the
|
||||
// last one) is actually used when we will free up the humongous
|
||||
@ -740,7 +742,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||
check_bitmaps("Humongous Region Allocation", first_hr);
|
||||
|
||||
assert(first_hr->used() == word_size * HeapWordSize, "invariant");
|
||||
_summary_bytes_used += first_hr->used();
|
||||
_allocator->increase_used(first_hr->used());
|
||||
_humongous_set.add(first_hr);
|
||||
|
||||
return new_obj;
|
||||
@ -749,7 +751,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||
// If could fit into free regions w/o expansion, try.
|
||||
// Otherwise, if can expand, do so.
|
||||
// Otherwise, if using ex regions might help, try with ex given back.
|
||||
HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
|
||||
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
||||
|
||||
verify_region_sets_optional();
|
||||
@ -818,7 +820,8 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
|
||||
HeapWord* result = NULL;
|
||||
if (first != G1_NO_HRM_INDEX) {
|
||||
result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
|
||||
result = humongous_obj_allocate_initialize_regions(first, obj_regions,
|
||||
word_size, context);
|
||||
assert(result != NULL, "it should always return a valid result");
|
||||
|
||||
// A successful humongous object allocation changes the used space
|
||||
@ -834,7 +837,7 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
||||
|
||||
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(!isHumongous(word_size), "we do not allow humongous TLABs");
|
||||
assert(!is_humongous(word_size), "we do not allow humongous TLABs");
|
||||
|
||||
unsigned int dummy_gc_count_before;
|
||||
int dummy_gclocker_retry_count = 0;
|
||||
@ -851,7 +854,7 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
||||
unsigned int gc_count_before;
|
||||
|
||||
HeapWord* result = NULL;
|
||||
if (!isHumongous(word_size)) {
|
||||
if (!is_humongous(word_size)) {
|
||||
result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
|
||||
} else {
|
||||
result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
|
||||
@ -862,6 +865,8 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
||||
|
||||
// Create the garbage collection operation...
|
||||
VM_G1CollectForAllocation op(gc_count_before, word_size);
|
||||
op.set_allocation_context(AllocationContext::current());
|
||||
|
||||
// ...and get the VM thread to execute it.
|
||||
VMThread::execute(&op);
|
||||
|
||||
@ -870,7 +875,7 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
||||
// if it is NULL. If the allocation attempt failed immediately
|
||||
// after a Full GC, it's unlikely we'll be able to allocate now.
|
||||
HeapWord* result = op.result();
|
||||
if (result != NULL && !isHumongous(word_size)) {
|
||||
if (result != NULL && !is_humongous(word_size)) {
|
||||
// Allocations that take place on VM operations do not do any
|
||||
// card dirtying and we have to do it here. We only have to do
|
||||
// this for non-humongous allocations, though.
|
||||
@ -897,12 +902,13 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
unsigned int *gc_count_before_ret,
|
||||
int* gclocker_retry_count_ret) {
|
||||
AllocationContext_t context,
|
||||
unsigned int *gc_count_before_ret,
|
||||
int* gclocker_retry_count_ret) {
|
||||
// Make sure you read the note in attempt_allocation_humongous().
|
||||
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
|
||||
assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
|
||||
"be called for humongous allocation requests");
|
||||
|
||||
// We should only get here after the first-level allocation attempt
|
||||
@ -919,23 +925,22 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
|
||||
{
|
||||
MutexLockerEx x(Heap_lock);
|
||||
|
||||
result = _mutator_alloc_region.attempt_allocation_locked(word_size,
|
||||
false /* bot_updates */);
|
||||
result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||
false /* bot_updates */);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// If we reach here, attempt_allocation_locked() above failed to
|
||||
// allocate a new region. So the mutator alloc region should be NULL.
|
||||
assert(_mutator_alloc_region.get() == NULL, "only way to get here");
|
||||
assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
|
||||
|
||||
if (GC_locker::is_active_and_needs_gc()) {
|
||||
if (g1_policy()->can_expand_young_list()) {
|
||||
// No need for an ergo verbose message here,
|
||||
// can_expand_young_list() does this when it returns true.
|
||||
result = _mutator_alloc_region.attempt_allocation_force(word_size,
|
||||
false /* bot_updates */);
|
||||
result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
|
||||
false /* bot_updates */);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
@ -995,8 +1000,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
// first attempt (without holding the Heap_lock) here and the
|
||||
// follow-on attempt will be at the start of the next loop
|
||||
// iteration (after taking the Heap_lock).
|
||||
result = _mutator_alloc_region.attempt_allocation(word_size,
|
||||
false /* bot_updates */);
|
||||
result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
|
||||
false /* bot_updates */);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
@ -1014,8 +1019,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
unsigned int * gc_count_before_ret,
|
||||
int* gclocker_retry_count_ret) {
|
||||
unsigned int * gc_count_before_ret,
|
||||
int* gclocker_retry_count_ret) {
|
||||
// The structure of this method has a lot of similarities to
|
||||
// attempt_allocation_slow(). The reason these two were not merged
|
||||
// into a single one is that such a method would require several "if
|
||||
@ -1028,7 +1033,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
// much as possible.
|
||||
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(isHumongous(word_size), "attempt_allocation_humongous() "
|
||||
assert(is_humongous(word_size), "attempt_allocation_humongous() "
|
||||
"should only be called for humongous allocations");
|
||||
|
||||
// Humongous objects can exhaust the heap quickly, so we should check if we
|
||||
@ -1056,7 +1061,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
// Given that humongous objects are not allocated in young
|
||||
// regions, we'll first try to do the allocation without doing a
|
||||
// collection hoping that there's enough space in the heap.
|
||||
result = humongous_obj_allocate(word_size);
|
||||
result = humongous_obj_allocate(word_size, AllocationContext::current());
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
@ -1132,17 +1137,18 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
||||
bool expect_null_mutator_alloc_region) {
|
||||
AllocationContext_t context,
|
||||
bool expect_null_mutator_alloc_region) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
assert(_mutator_alloc_region.get() == NULL ||
|
||||
assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
|
||||
!expect_null_mutator_alloc_region,
|
||||
"the current alloc region was unexpectedly found to be non-NULL");
|
||||
|
||||
if (!isHumongous(word_size)) {
|
||||
return _mutator_alloc_region.attempt_allocation_locked(word_size,
|
||||
if (!is_humongous(word_size)) {
|
||||
return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||
false /* bot_updates */);
|
||||
} else {
|
||||
HeapWord* result = humongous_obj_allocate(word_size);
|
||||
HeapWord* result = humongous_obj_allocate(word_size, context);
|
||||
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
|
||||
g1_policy()->set_initiate_conc_mark_if_possible();
|
||||
}
|
||||
@ -1162,7 +1168,7 @@ public:
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
HeapRegionRemSet* hrrs = r->rem_set();
|
||||
|
||||
if (r->continuesHumongous()) {
|
||||
if (r->is_continues_humongous()) {
|
||||
// We'll assert that the strong code root list and RSet is empty
|
||||
assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
|
||||
assert(hrrs->occupied() == 0, "RSet should be empty");
|
||||
@ -1199,7 +1205,7 @@ public:
|
||||
{ }
|
||||
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->continuesHumongous()) {
|
||||
if (!r->is_continues_humongous()) {
|
||||
_cl.set_from(r);
|
||||
r->oop_iterate(&_cl);
|
||||
}
|
||||
@ -1231,14 +1237,14 @@ public:
|
||||
assert(!hr->is_young(), "not expecting to find young regions");
|
||||
if (hr->is_free()) {
|
||||
// We only generate output for non-empty regions.
|
||||
} else if (hr->startsHumongous()) {
|
||||
} else if (hr->is_starts_humongous()) {
|
||||
if (hr->region_num() == 1) {
|
||||
// single humongous region
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
|
||||
} else {
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
|
||||
}
|
||||
} else if (hr->continuesHumongous()) {
|
||||
} else if (hr->is_continues_humongous()) {
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
|
||||
} else if (hr->is_old()) {
|
||||
_hr_printer->post_compaction(hr, G1HRPrinter::Old);
|
||||
@ -1342,8 +1348,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
concurrent_mark()->abort();
|
||||
|
||||
// Make sure we'll choose a new allocation region afterwards.
|
||||
release_mutator_alloc_region();
|
||||
abandon_gc_alloc_regions();
|
||||
_allocator->release_mutator_alloc_region();
|
||||
_allocator->abandon_gc_alloc_regions();
|
||||
g1_rem_set()->cleanupHRRS();
|
||||
|
||||
// We should call this after we retire any currently active alloc
|
||||
@ -1515,7 +1521,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
|
||||
clear_cset_fast_test();
|
||||
|
||||
init_mutator_alloc_region();
|
||||
_allocator->init_mutator_alloc_region();
|
||||
|
||||
double end = os::elapsedTime();
|
||||
g1_policy()->record_full_collection_end();
|
||||
@ -1651,6 +1657,7 @@ resize_if_necessary_after_full_collection(size_t word_size) {
|
||||
|
||||
HeapWord*
|
||||
G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
AllocationContext_t context,
|
||||
bool* succeeded) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
|
||||
@ -1658,7 +1665,8 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
// Let's attempt the allocation first.
|
||||
HeapWord* result =
|
||||
attempt_allocation_at_safepoint(word_size,
|
||||
false /* expect_null_mutator_alloc_region */);
|
||||
context,
|
||||
false /* expect_null_mutator_alloc_region */);
|
||||
if (result != NULL) {
|
||||
assert(*succeeded, "sanity");
|
||||
return result;
|
||||
@ -1668,7 +1676,7 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
// incremental pauses. Therefore, at least for now, we'll favor
|
||||
// expansion over collection. (This might change in the future if we can
|
||||
// do something smarter than full collection to satisfy a failed alloc.)
|
||||
result = expand_and_allocate(word_size);
|
||||
result = expand_and_allocate(word_size, context);
|
||||
if (result != NULL) {
|
||||
assert(*succeeded, "sanity");
|
||||
return result;
|
||||
@ -1685,7 +1693,8 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
|
||||
// Retry the allocation
|
||||
result = attempt_allocation_at_safepoint(word_size,
|
||||
true /* expect_null_mutator_alloc_region */);
|
||||
context,
|
||||
true /* expect_null_mutator_alloc_region */);
|
||||
if (result != NULL) {
|
||||
assert(*succeeded, "sanity");
|
||||
return result;
|
||||
@ -1702,7 +1711,8 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
|
||||
// Retry the allocation once more
|
||||
result = attempt_allocation_at_safepoint(word_size,
|
||||
true /* expect_null_mutator_alloc_region */);
|
||||
context,
|
||||
true /* expect_null_mutator_alloc_region */);
|
||||
if (result != NULL) {
|
||||
assert(*succeeded, "sanity");
|
||||
return result;
|
||||
@ -1724,7 +1734,7 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
// successful, perform the allocation and return the address of the
|
||||
// allocated block, or else "NULL".
|
||||
|
||||
HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
|
||||
HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
|
||||
verify_region_sets_optional();
|
||||
@ -1739,7 +1749,8 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
|
||||
_hrm.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
return attempt_allocation_at_safepoint(word_size,
|
||||
false /* expect_null_mutator_alloc_region */);
|
||||
context,
|
||||
false /* expect_null_mutator_alloc_region */);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -1816,7 +1827,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
|
||||
// We should only reach here at the end of a Full GC which means we
|
||||
// should not not be holding to any GC alloc regions. The method
|
||||
// below will make sure of that and do any remaining clean up.
|
||||
abandon_gc_alloc_regions();
|
||||
_allocator->abandon_gc_alloc_regions();
|
||||
|
||||
// Instead of tearing down / rebuilding the free lists here, we
|
||||
// could instead use the remove_all_pending() method on free_list to
|
||||
@ -1849,7 +1860,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_bot_shared(NULL),
|
||||
_evac_failure_scan_stack(NULL),
|
||||
_mark_in_progress(false),
|
||||
_cg1r(NULL), _summary_bytes_used(0),
|
||||
_cg1r(NULL),
|
||||
_g1mm(NULL),
|
||||
_refine_cte_cl(NULL),
|
||||
_full_collection(false),
|
||||
@ -1861,7 +1872,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_free_regions_coming(false),
|
||||
_young_list(new YoungList(this)),
|
||||
_gc_time_stamp(0),
|
||||
_retained_old_gc_alloc_region(NULL),
|
||||
_survivor_plab_stats(YoungPLABSize, PLABWeight),
|
||||
_old_plab_stats(OldPLABSize, PLABWeight),
|
||||
_expand_heap_after_alloc_failure(true),
|
||||
@ -1884,6 +1894,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
vm_exit_during_initialization("Failed necessary allocation.");
|
||||
}
|
||||
|
||||
_allocator = G1Allocator::create_allocator(_g1h);
|
||||
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
|
||||
|
||||
int n_queues = MAX2((int)ParallelGCThreads, 1);
|
||||
@ -1960,15 +1971,10 @@ jint G1CollectedHeap::initialize() {
|
||||
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
|
||||
heap_alignment);
|
||||
|
||||
// It is important to do this in a way such that concurrent readers can't
|
||||
// temporarily think something is in the heap. (I've actually seen this
|
||||
// happen in asserts: DLD.)
|
||||
_reserved.set_word_size(0);
|
||||
_reserved.set_start((HeapWord*)heap_rs.base());
|
||||
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||
|
||||
// Create the gen rem set (and barrier set) for the entire reserved region.
|
||||
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
|
||||
_rem_set = collector_policy()->create_rem_set(reserved_region(), 2);
|
||||
set_barrier_set(rem_set()->bs());
|
||||
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
|
||||
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
|
||||
@ -2052,7 +2058,7 @@ jint G1CollectedHeap::initialize() {
|
||||
|
||||
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
|
||||
|
||||
_bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
|
||||
_bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
|
||||
|
||||
_g1h = this;
|
||||
|
||||
@ -2127,7 +2133,7 @@ jint G1CollectedHeap::initialize() {
|
||||
dummy_region->set_top(dummy_region->end());
|
||||
G1AllocRegion::setup(this, dummy_region);
|
||||
|
||||
init_mutator_alloc_region();
|
||||
_allocator->init_mutator_alloc_region();
|
||||
|
||||
// Do create of the monitoring and management support so that
|
||||
// values in the heap have been properly initialized.
|
||||
@ -2237,14 +2243,14 @@ size_t G1CollectedHeap::capacity() const {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
|
||||
assert(!hr->continuesHumongous(), "pre-condition");
|
||||
assert(!hr->is_continues_humongous(), "pre-condition");
|
||||
hr->reset_gc_time_stamp();
|
||||
if (hr->startsHumongous()) {
|
||||
if (hr->is_starts_humongous()) {
|
||||
uint first_index = hr->hrm_index() + 1;
|
||||
uint last_index = hr->last_hc_index();
|
||||
for (uint i = first_index; i < last_index; i += 1) {
|
||||
HeapRegion* chr = region_at(i);
|
||||
assert(chr->continuesHumongous(), "sanity");
|
||||
assert(chr->is_continues_humongous(), "sanity");
|
||||
chr->reset_gc_time_stamp();
|
||||
}
|
||||
}
|
||||
@ -2301,21 +2307,12 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
||||
|
||||
|
||||
// Computes the sum of the storage used by the various regions.
|
||||
|
||||
size_t G1CollectedHeap::used() const {
|
||||
assert(Heap_lock->owner() != NULL,
|
||||
"Should be owned on this thread's behalf.");
|
||||
size_t result = _summary_bytes_used;
|
||||
// Read only once in case it is set to NULL concurrently
|
||||
HeapRegion* hr = _mutator_alloc_region.get();
|
||||
if (hr != NULL)
|
||||
result += hr->used();
|
||||
return result;
|
||||
return _allocator->used();
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::used_unlocked() const {
|
||||
size_t result = _summary_bytes_used;
|
||||
return result;
|
||||
return _allocator->used_unlocked();
|
||||
}
|
||||
|
||||
class SumUsedClosure: public HeapRegionClosure {
|
||||
@ -2323,7 +2320,7 @@ class SumUsedClosure: public HeapRegionClosure {
|
||||
public:
|
||||
SumUsedClosure() : _used(0) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->continuesHumongous()) {
|
||||
if (!r->is_continues_humongous()) {
|
||||
_used += r->used();
|
||||
}
|
||||
return false;
|
||||
@ -2355,11 +2352,12 @@ void G1CollectedHeap::allocate_dummy_regions() {
|
||||
// Let's fill up most of the region
|
||||
size_t word_size = HeapRegion::GrainWords - 1024;
|
||||
// And as a result the region we'll allocate will be humongous.
|
||||
guarantee(isHumongous(word_size), "sanity");
|
||||
guarantee(is_humongous(word_size), "sanity");
|
||||
|
||||
for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
|
||||
// Let's use the existing mechanism for the allocation
|
||||
HeapWord* dummy_obj = humongous_obj_allocate(word_size);
|
||||
HeapWord* dummy_obj = humongous_obj_allocate(word_size,
|
||||
AllocationContext::system());
|
||||
if (dummy_obj != NULL) {
|
||||
MemRegion mr(dummy_obj, word_size);
|
||||
CollectedHeap::fill_with_object(mr);
|
||||
@ -2510,6 +2508,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||
true, /* should_initiate_conc_mark */
|
||||
g1_policy()->max_pause_time_ms(),
|
||||
cause);
|
||||
op.set_allocation_context(AllocationContext::current());
|
||||
|
||||
VMThread::execute(&op);
|
||||
if (!op.pause_succeeded()) {
|
||||
@ -2581,7 +2580,7 @@ class IterateOopClosureRegionClosure: public HeapRegionClosure {
|
||||
public:
|
||||
IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->continuesHumongous()) {
|
||||
if (!r->is_continues_humongous()) {
|
||||
r->oop_iterate(_cl);
|
||||
}
|
||||
return false;
|
||||
@ -2600,7 +2599,7 @@ class IterateObjectClosureRegionClosure: public HeapRegionClosure {
|
||||
public:
|
||||
IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (! r->continuesHumongous()) {
|
||||
if (!r->is_continues_humongous()) {
|
||||
r->object_iterate(_cl);
|
||||
}
|
||||
return false;
|
||||
@ -2682,11 +2681,11 @@ public:
|
||||
r->claim_value(), _claim_value);
|
||||
++_failures;
|
||||
}
|
||||
if (!r->isHumongous()) {
|
||||
if (!r->is_humongous()) {
|
||||
_sh_region = NULL;
|
||||
} else if (r->startsHumongous()) {
|
||||
} else if (r->is_starts_humongous()) {
|
||||
_sh_region = r;
|
||||
} else if (r->continuesHumongous()) {
|
||||
} else if (r->is_continues_humongous()) {
|
||||
if (r->humongous_start_region() != _sh_region) {
|
||||
gclog_or_tty->print_cr("Region " HR_FORMAT ", "
|
||||
"HS = "PTR_FORMAT", should be "PTR_FORMAT,
|
||||
@ -2720,7 +2719,7 @@ public:
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
assert(hr->in_collection_set(), "how?");
|
||||
assert(!hr->isHumongous(), "H-region in CSet");
|
||||
assert(!hr->is_humongous(), "H-region in CSet");
|
||||
if (hr->claim_value() != _claim_value) {
|
||||
gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
|
||||
"claim value = %d, should be %d",
|
||||
@ -2859,7 +2858,7 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
|
||||
|
||||
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
|
||||
HeapRegion* result = _hrm.next_region_in_heap(from);
|
||||
while (result != NULL && result->isHumongous()) {
|
||||
while (result != NULL && result->is_humongous()) {
|
||||
result = _hrm.next_region_in_heap(result);
|
||||
}
|
||||
return result;
|
||||
@ -2910,7 +2909,7 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
||||
// since we can't allow tlabs to grow big enough to accommodate
|
||||
// humongous objects.
|
||||
|
||||
HeapRegion* hr = _mutator_alloc_region.get();
|
||||
HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
|
||||
size_t max_tlab = max_tlab_size() * wordSize;
|
||||
if (hr == NULL) {
|
||||
return max_tlab;
|
||||
@ -3219,7 +3218,7 @@ public:
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->continuesHumongous()) {
|
||||
if (!r->is_continues_humongous()) {
|
||||
bool failures = false;
|
||||
r->verify(_vo, &failures);
|
||||
if (failures) {
|
||||
@ -3597,7 +3596,7 @@ void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
||||
void G1CollectedHeap::gc_epilogue(bool full) {
|
||||
|
||||
if (G1SummarizeRSetStats &&
|
||||
(G1SummarizeRSetStatsPeriod > 0) &&
|
||||
@ -3614,6 +3613,7 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
||||
// always_do_update_barrier = true;
|
||||
|
||||
resize_all_tlabs();
|
||||
allocation_context_stats().update(full);
|
||||
|
||||
// We have just completed a GC. Update the soft reference
|
||||
// policy with the new heap occupancy
|
||||
@ -3631,6 +3631,8 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
|
||||
false, /* should_initiate_conc_mark */
|
||||
g1_policy()->max_pause_time_ms(),
|
||||
gc_cause);
|
||||
|
||||
op.set_allocation_context(AllocationContext::current());
|
||||
VMThread::execute(&op);
|
||||
|
||||
HeapWord* result = op.result();
|
||||
@ -3676,7 +3678,7 @@ size_t G1CollectedHeap::cards_scanned() {
|
||||
|
||||
bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
|
||||
HeapRegion* region = region_at(index);
|
||||
assert(region->startsHumongous(), "Must start a humongous object");
|
||||
assert(region->is_starts_humongous(), "Must start a humongous object");
|
||||
return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
|
||||
}
|
||||
|
||||
@ -3689,7 +3691,7 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
||||
}
|
||||
|
||||
virtual bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->startsHumongous()) {
|
||||
if (!r->is_starts_humongous()) {
|
||||
return false;
|
||||
}
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
@ -3961,7 +3963,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
|
||||
// Forget the current alloc region (we might even choose it to be part
|
||||
// of the collection set!).
|
||||
release_mutator_alloc_region();
|
||||
_allocator->release_mutator_alloc_region();
|
||||
|
||||
// We should call this after we retire the mutator alloc
|
||||
// region(s) so that all the ALLOC / RETIRE events are generated
|
||||
@ -4044,7 +4046,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
setup_surviving_young_words();
|
||||
|
||||
// Initialize the GC alloc regions.
|
||||
init_gc_alloc_regions(evacuation_info);
|
||||
_allocator->init_gc_alloc_regions(evacuation_info);
|
||||
|
||||
// Actually do the work...
|
||||
evacuate_collection_set(evacuation_info);
|
||||
@ -4093,7 +4095,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
_young_list->reset_auxilary_lists();
|
||||
|
||||
if (evacuation_failed()) {
|
||||
_summary_bytes_used = recalculate_used();
|
||||
_allocator->set_used(recalculate_used());
|
||||
uint n_queues = MAX2((int)ParallelGCThreads, 1);
|
||||
for (uint i = 0; i < n_queues; i++) {
|
||||
if (_evacuation_failed_info_array[i].has_failed()) {
|
||||
@ -4103,7 +4105,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
} else {
|
||||
// The "used" of the the collection set have already been subtracted
|
||||
// when they were freed. Add in the bytes evacuated.
|
||||
_summary_bytes_used += g1_policy()->bytes_copied_during_gc();
|
||||
_allocator->increase_used(g1_policy()->bytes_copied_during_gc());
|
||||
}
|
||||
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
@ -4125,7 +4127,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
||||
#endif // YOUNG_LIST_VERBOSE
|
||||
|
||||
init_mutator_alloc_region();
|
||||
_allocator->init_mutator_alloc_region();
|
||||
|
||||
{
|
||||
size_t expand_bytes = g1_policy()->expansion_amount();
|
||||
@ -4270,80 +4272,6 @@ size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
|
||||
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::init_mutator_alloc_region() {
|
||||
assert(_mutator_alloc_region.get() == NULL, "pre-condition");
|
||||
_mutator_alloc_region.init();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::release_mutator_alloc_region() {
|
||||
_mutator_alloc_region.release();
|
||||
assert(_mutator_alloc_region.get() == NULL, "post-condition");
|
||||
}
|
||||
|
||||
void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
|
||||
HeapRegion* retained_region = _retained_old_gc_alloc_region;
|
||||
_retained_old_gc_alloc_region = NULL;
|
||||
|
||||
// We will discard the current GC alloc region if:
|
||||
// a) it's in the collection set (it can happen!),
|
||||
// b) it's already full (no point in using it),
|
||||
// c) it's empty (this means that it was emptied during
|
||||
// a cleanup and it should be on the free list now), or
|
||||
// d) it's humongous (this means that it was emptied
|
||||
// during a cleanup and was added to the free list, but
|
||||
// has been subsequently used to allocate a humongous
|
||||
// object that may be less than the region size).
|
||||
if (retained_region != NULL &&
|
||||
!retained_region->in_collection_set() &&
|
||||
!(retained_region->top() == retained_region->end()) &&
|
||||
!retained_region->is_empty() &&
|
||||
!retained_region->isHumongous()) {
|
||||
retained_region->record_top_and_timestamp();
|
||||
// The retained region was added to the old region set when it was
|
||||
// retired. We have to remove it now, since we don't allow regions
|
||||
// we allocate to in the region sets. We'll re-add it later, when
|
||||
// it's retired again.
|
||||
_old_set.remove(retained_region);
|
||||
bool during_im = g1_policy()->during_initial_mark_pause();
|
||||
retained_region->note_start_of_copying(during_im);
|
||||
_old_gc_alloc_region.set(retained_region);
|
||||
_hr_printer.reuse(retained_region);
|
||||
evacuation_info.set_alloc_regions_used_before(retained_region->used());
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
|
||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||
|
||||
_survivor_gc_alloc_region.init();
|
||||
_old_gc_alloc_region.init();
|
||||
|
||||
use_retained_old_gc_alloc_region(evacuation_info);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
|
||||
evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
|
||||
_old_gc_alloc_region.count());
|
||||
_survivor_gc_alloc_region.release();
|
||||
// If we have an old GC alloc region to release, we'll save it in
|
||||
// _retained_old_gc_alloc_region. If we don't
|
||||
// _retained_old_gc_alloc_region will become NULL. This is what we
|
||||
// want either way so no reason to check explicitly for either
|
||||
// condition.
|
||||
_retained_old_gc_alloc_region = _old_gc_alloc_region.release();
|
||||
|
||||
if (ResizePLAB) {
|
||||
_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
||||
_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::abandon_gc_alloc_regions() {
|
||||
assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
|
||||
assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
|
||||
_retained_old_gc_alloc_region = NULL;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
|
||||
_drain_in_progress = false;
|
||||
set_evac_failure_closure(cl);
|
||||
@ -4484,25 +4412,26 @@ void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
|
||||
size_t word_size) {
|
||||
size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
if (purpose == GCAllocForSurvived) {
|
||||
HeapWord* result = survivor_attempt_allocation(word_size);
|
||||
HeapWord* result = survivor_attempt_allocation(word_size, context);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
} else {
|
||||
// Let's try to allocate in the old gen in case we can fit the
|
||||
// object there.
|
||||
return old_attempt_allocation(word_size);
|
||||
return old_attempt_allocation(word_size, context);
|
||||
}
|
||||
} else {
|
||||
assert(purpose == GCAllocForTenured, "sanity");
|
||||
HeapWord* result = old_attempt_allocation(word_size);
|
||||
HeapWord* result = old_attempt_allocation(word_size, context);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
} else {
|
||||
// Let's try to allocate in the survivors in case we can fit the
|
||||
// object there.
|
||||
return survivor_attempt_allocation(word_size);
|
||||
return survivor_attempt_allocation(word_size, context);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4511,9 +4440,6 @@ HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
|
||||
ParGCAllocBuffer(gclab_word_size), _retired(true) { }
|
||||
|
||||
void G1ParCopyHelper::mark_object(oop obj) {
|
||||
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
|
||||
|
||||
@ -5087,7 +5013,11 @@ private:
|
||||
_num_entered_barrier(0)
|
||||
{
|
||||
nmethod::increase_unloading_clock();
|
||||
_first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
|
||||
// Get first alive nmethod
|
||||
NMethodIterator iter = NMethodIterator();
|
||||
if(iter.next_alive()) {
|
||||
_first_nmethod = iter.method();
|
||||
}
|
||||
_claimed_nmethod = (volatile nmethod*)_first_nmethod;
|
||||
}
|
||||
|
||||
@ -5130,27 +5060,26 @@ private:
|
||||
|
||||
void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
|
||||
nmethod* first;
|
||||
nmethod* last;
|
||||
NMethodIterator last;
|
||||
|
||||
do {
|
||||
*num_claimed_nmethods = 0;
|
||||
|
||||
first = last = (nmethod*)_claimed_nmethod;
|
||||
first = (nmethod*)_claimed_nmethod;
|
||||
last = NMethodIterator(first);
|
||||
|
||||
if (first != NULL) {
|
||||
for (int i = 0; i < MaxClaimNmethods; i++) {
|
||||
last = CodeCache::alive_nmethod(CodeCache::next(last));
|
||||
|
||||
if (last == NULL) {
|
||||
for (int i = 0; i < MaxClaimNmethods; i++) {
|
||||
if (!last.next_alive()) {
|
||||
break;
|
||||
}
|
||||
|
||||
claimed_nmethods[i] = last;
|
||||
claimed_nmethods[i] = last.method();
|
||||
(*num_claimed_nmethods)++;
|
||||
}
|
||||
}
|
||||
|
||||
} while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
|
||||
} while ((nmethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
|
||||
}
|
||||
|
||||
nmethod* claim_postponed_nmethod() {
|
||||
@ -6008,7 +5937,7 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
}
|
||||
}
|
||||
|
||||
release_gc_alloc_regions(n_workers, evacuation_info);
|
||||
_allocator->release_gc_alloc_regions(n_workers, evacuation_info);
|
||||
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
||||
|
||||
// Reset and re-enable the hot card cache.
|
||||
@ -6075,7 +6004,7 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
|
||||
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
|
||||
FreeRegionList* free_list,
|
||||
bool par) {
|
||||
assert(hr->startsHumongous(), "this is only for starts humongous regions");
|
||||
assert(hr->is_starts_humongous(), "this is only for starts humongous regions");
|
||||
assert(free_list != NULL, "pre-condition");
|
||||
|
||||
size_t hr_capacity = hr->capacity();
|
||||
@ -6088,7 +6017,7 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
|
||||
uint i = hr->hrm_index() + 1;
|
||||
while (i < last_index) {
|
||||
HeapRegion* curr_hr = region_at(i);
|
||||
assert(curr_hr->continuesHumongous(), "invariant");
|
||||
assert(curr_hr->is_continues_humongous(), "invariant");
|
||||
curr_hr->clear_humongous();
|
||||
free_region(curr_hr, free_list, par);
|
||||
i += 1;
|
||||
@ -6114,10 +6043,7 @@ void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
|
||||
assert(_summary_bytes_used >= bytes,
|
||||
err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
|
||||
_summary_bytes_used, bytes));
|
||||
_summary_bytes_used -= bytes;
|
||||
_allocator->decrease_used(bytes);
|
||||
}
|
||||
|
||||
class G1ParCleanupCTTask : public AbstractGangTask {
|
||||
@ -6259,7 +6185,7 @@ public:
|
||||
bool failures() { return _failures; }
|
||||
|
||||
virtual bool doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->continuesHumongous()) return false;
|
||||
if (hr->is_continues_humongous()) return false;
|
||||
|
||||
bool result = _g1h->verify_bitmaps(_caller, hr);
|
||||
if (!result) {
|
||||
@ -6438,7 +6364,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
||||
}
|
||||
|
||||
virtual bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->startsHumongous()) {
|
||||
if (!r->is_starts_humongous()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -6484,7 +6410,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
||||
|
||||
if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
|
||||
gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
|
||||
r->isHumongous(),
|
||||
r->is_humongous(),
|
||||
region_idx,
|
||||
r->rem_set()->occupied(),
|
||||
r->rem_set()->strong_code_roots_list_length(),
|
||||
@ -6503,7 +6429,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
||||
|
||||
if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
|
||||
gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
|
||||
r->isHumongous(),
|
||||
r->is_humongous(),
|
||||
r->bottom(),
|
||||
region_idx,
|
||||
r->region_num(),
|
||||
@ -6693,7 +6619,7 @@ public:
|
||||
// We ignore young regions, we'll empty the young list afterwards.
|
||||
// We ignore humongous regions, we're not tearing down the
|
||||
// humongous regions set.
|
||||
assert(r->is_free() || r->is_young() || r->isHumongous(),
|
||||
assert(r->is_free() || r->is_young() || r->is_humongous(),
|
||||
"it cannot be another type");
|
||||
}
|
||||
return false;
|
||||
@ -6738,18 +6664,19 @@ public:
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (r->continuesHumongous()) {
|
||||
if (r->is_continues_humongous()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (r->is_empty()) {
|
||||
// Add free regions to the free list
|
||||
r->set_free();
|
||||
r->set_allocation_context(AllocationContext::system());
|
||||
_hrm->insert_into_free_list(r);
|
||||
} else if (!_free_list_only) {
|
||||
assert(!r->is_young(), "we should not come across young regions");
|
||||
|
||||
if (r->isHumongous()) {
|
||||
if (r->is_humongous()) {
|
||||
// We ignore humongous regions, we left the humongous set unchanged
|
||||
} else {
|
||||
// Objects that were compacted would have ended up on regions
|
||||
@ -6781,12 +6708,12 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
|
||||
heap_region_iterate(&cl);
|
||||
|
||||
if (!free_list_only) {
|
||||
_summary_bytes_used = cl.total_used();
|
||||
_allocator->set_used(cl.total_used());
|
||||
}
|
||||
assert(_summary_bytes_used == recalculate_used(),
|
||||
err_msg("inconsistent _summary_bytes_used, "
|
||||
assert(_allocator->used_unlocked() == recalculate_used(),
|
||||
err_msg("inconsistent _allocator->used_unlocked(), "
|
||||
"value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
|
||||
_summary_bytes_used, recalculate_used()));
|
||||
_allocator->used_unlocked(), recalculate_used()));
|
||||
}
|
||||
|
||||
void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
|
||||
@ -6826,7 +6753,7 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
|
||||
assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
|
||||
|
||||
g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
|
||||
_summary_bytes_used += allocated_bytes;
|
||||
_allocator->increase_used(allocated_bytes);
|
||||
_hr_printer.retire(alloc_region);
|
||||
// We update the eden sizes here, when the region is retired,
|
||||
// instead of when it's allocated, since this is the point that its
|
||||
@ -6834,11 +6761,6 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
|
||||
g1mm()->update_eden_size();
|
||||
}
|
||||
|
||||
HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
|
||||
bool force) {
|
||||
return _g1h->new_mutator_alloc_region(word_size, force);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::set_par_threads() {
|
||||
// Don't change the number of workers. Use the value previously set
|
||||
// in the workgroup.
|
||||
@ -6855,11 +6777,6 @@ void G1CollectedHeap::set_par_threads() {
|
||||
set_par_threads(n_workers);
|
||||
}
|
||||
|
||||
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes) {
|
||||
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
|
||||
}
|
||||
|
||||
// Methods for the GC alloc regions
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||
@ -6910,58 +6827,6 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
_hr_printer.retire(alloc_region);
|
||||
}
|
||||
|
||||
HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
|
||||
bool force) {
|
||||
assert(!force, "not supported for GC alloc regions");
|
||||
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
|
||||
}
|
||||
|
||||
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes) {
|
||||
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
|
||||
GCAllocForSurvived);
|
||||
}
|
||||
|
||||
HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
|
||||
bool force) {
|
||||
assert(!force, "not supported for GC alloc regions");
|
||||
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
|
||||
}
|
||||
|
||||
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes) {
|
||||
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
|
||||
GCAllocForTenured);
|
||||
}
|
||||
|
||||
HeapRegion* OldGCAllocRegion::release() {
|
||||
HeapRegion* cur = get();
|
||||
if (cur != NULL) {
|
||||
// Determine how far we are from the next card boundary. If it is smaller than
|
||||
// the minimum object size we can allocate into, expand into the next card.
|
||||
HeapWord* top = cur->top();
|
||||
HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
|
||||
|
||||
size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
|
||||
|
||||
if (to_allocate_words != 0) {
|
||||
// We are not at a card boundary. Fill up, possibly into the next, taking the
|
||||
// end of the region and the minimum object size into account.
|
||||
to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
|
||||
MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
|
||||
|
||||
// Skip allocation if there is not enough space to allocate even the smallest
|
||||
// possible object. In this case this region will not be retained, so the
|
||||
// original problem cannot occur.
|
||||
if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
|
||||
HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
|
||||
CollectedHeap::fill_with_object(dummy, to_allocate_words);
|
||||
}
|
||||
}
|
||||
}
|
||||
return G1AllocRegion::release();
|
||||
}
|
||||
|
||||
// Heap region set verification
|
||||
|
||||
class VerifyRegionListsClosure : public HeapRegionClosure {
|
||||
@ -6982,13 +6847,13 @@ public:
|
||||
_old_count(), _humongous_count(), _free_count(){ }
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->continuesHumongous()) {
|
||||
if (hr->is_continues_humongous()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (hr->is_young()) {
|
||||
// TODO
|
||||
} else if (hr->startsHumongous()) {
|
||||
} else if (hr->is_starts_humongous()) {
|
||||
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
|
||||
_humongous_count.increment(1u, hr->capacity());
|
||||
} else if (hr->is_empty()) {
|
||||
@ -7069,7 +6934,7 @@ class RegisterNMethodOopClosure: public OopClosure {
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
assert(!hr->continuesHumongous(),
|
||||
assert(!hr->is_continues_humongous(),
|
||||
err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
|
||||
" starting at "HR_FORMAT,
|
||||
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
|
||||
@ -7096,7 +6961,7 @@ class UnregisterNMethodOopClosure: public OopClosure {
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
assert(!hr->continuesHumongous(),
|
||||
assert(!hr->is_continues_humongous(),
|
||||
err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
|
||||
" starting at "HR_FORMAT,
|
||||
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
|
||||
|
@ -25,6 +25,8 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1AllocationContext.hpp"
|
||||
#include "gc_implementation/g1/g1Allocator.hpp"
|
||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||
#include "gc_implementation/g1/evacuationInfo.hpp"
|
||||
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||
@ -80,12 +82,6 @@ typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
|
||||
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
|
||||
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
||||
|
||||
enum GCAllocPurpose {
|
||||
GCAllocForTenured,
|
||||
GCAllocForSurvived,
|
||||
GCAllocPurposeCount
|
||||
};
|
||||
|
||||
class YoungList : public CHeapObj<mtGC> {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
@ -158,40 +154,6 @@ public:
|
||||
void print();
|
||||
};
|
||||
|
||||
class MutatorAllocRegion : public G1AllocRegion {
|
||||
protected:
|
||||
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
||||
public:
|
||||
MutatorAllocRegion()
|
||||
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
|
||||
};
|
||||
|
||||
class SurvivorGCAllocRegion : public G1AllocRegion {
|
||||
protected:
|
||||
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
||||
public:
|
||||
SurvivorGCAllocRegion()
|
||||
: G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
|
||||
};
|
||||
|
||||
class OldGCAllocRegion : public G1AllocRegion {
|
||||
protected:
|
||||
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
||||
public:
|
||||
OldGCAllocRegion()
|
||||
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
|
||||
|
||||
// This specialization of release() makes sure that the last card that has been
|
||||
// allocated into has been completely filled by a dummy object.
|
||||
// This avoids races when remembered set scanning wants to update the BOT of the
|
||||
// last card in the retained old gc alloc region, and allocation threads
|
||||
// allocating into that card at the same time.
|
||||
virtual HeapRegion* release();
|
||||
};
|
||||
|
||||
// The G1 STW is alive closure.
|
||||
// An instance is embedded into the G1CH and used as the
|
||||
// (optional) _is_alive_non_header closure in the STW
|
||||
@ -222,6 +184,9 @@ class G1CollectedHeap : public SharedHeap {
|
||||
friend class MutatorAllocRegion;
|
||||
friend class SurvivorGCAllocRegion;
|
||||
friend class OldGCAllocRegion;
|
||||
friend class G1Allocator;
|
||||
friend class G1DefaultAllocator;
|
||||
friend class G1ResManAllocator;
|
||||
|
||||
// Closures used in implementation.
|
||||
template <G1Barrier barrier, G1Mark do_mark_object>
|
||||
@ -232,6 +197,8 @@ class G1CollectedHeap : public SharedHeap {
|
||||
friend class G1ParScanClosureSuper;
|
||||
friend class G1ParEvacuateFollowersClosure;
|
||||
friend class G1ParTask;
|
||||
friend class G1ParGCAllocator;
|
||||
friend class G1DefaultParGCAllocator;
|
||||
friend class G1FreeGarbageRegionClosure;
|
||||
friend class RefineCardTableEntryClosure;
|
||||
friend class G1PrepareCompactClosure;
|
||||
@ -293,44 +260,18 @@ private:
|
||||
// The sequence of all heap regions in the heap.
|
||||
HeapRegionManager _hrm;
|
||||
|
||||
// Alloc region used to satisfy mutator allocation requests.
|
||||
MutatorAllocRegion _mutator_alloc_region;
|
||||
// Class that handles the different kinds of allocations.
|
||||
G1Allocator* _allocator;
|
||||
|
||||
// Alloc region used to satisfy allocation requests by the GC for
|
||||
// survivor objects.
|
||||
SurvivorGCAllocRegion _survivor_gc_alloc_region;
|
||||
// Statistics for each allocation context
|
||||
AllocationContextStats _allocation_context_stats;
|
||||
|
||||
// PLAB sizing policy for survivors.
|
||||
PLABStats _survivor_plab_stats;
|
||||
|
||||
// Alloc region used to satisfy allocation requests by the GC for
|
||||
// old objects.
|
||||
OldGCAllocRegion _old_gc_alloc_region;
|
||||
|
||||
// PLAB sizing policy for tenured objects.
|
||||
PLABStats _old_plab_stats;
|
||||
|
||||
PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
|
||||
PLABStats* stats = NULL;
|
||||
|
||||
switch (purpose) {
|
||||
case GCAllocForSurvived:
|
||||
stats = &_survivor_plab_stats;
|
||||
break;
|
||||
case GCAllocForTenured:
|
||||
stats = &_old_plab_stats;
|
||||
break;
|
||||
default:
|
||||
assert(false, "unrecognized GCAllocPurpose");
|
||||
}
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
// The last old region we allocated to during the last GC.
|
||||
// Typically, it is not full so we should re-use it during the next GC.
|
||||
HeapRegion* _retained_old_gc_alloc_region;
|
||||
|
||||
// It specifies whether we should attempt to expand the heap after a
|
||||
// region allocation failure. If heap expansion fails we set this to
|
||||
// false so that we don't re-attempt the heap expansion (it's likely
|
||||
@ -348,9 +289,6 @@ private:
|
||||
// It initializes the GC alloc regions at the start of a GC.
|
||||
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||
|
||||
// Setup the retained old gc alloc region as the currrent old gc alloc region.
|
||||
void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
|
||||
|
||||
// It releases the GC alloc regions at the end of a GC.
|
||||
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
|
||||
|
||||
@ -361,13 +299,6 @@ private:
|
||||
// Helper for monitoring and management support.
|
||||
G1MonitoringSupport* _g1mm;
|
||||
|
||||
// Determines PLAB size for a particular allocation purpose.
|
||||
size_t desired_plab_sz(GCAllocPurpose purpose);
|
||||
|
||||
// Outside of GC pauses, the number of bytes used in all regions other
|
||||
// than the current allocation region.
|
||||
size_t _summary_bytes_used;
|
||||
|
||||
// Records whether the region at the given index is kept live by roots or
|
||||
// references from the young generation.
|
||||
class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
|
||||
@ -526,11 +457,12 @@ protected:
|
||||
// humongous region.
|
||||
HeapWord* humongous_obj_allocate_initialize_regions(uint first,
|
||||
uint num_regions,
|
||||
size_t word_size);
|
||||
size_t word_size,
|
||||
AllocationContext_t context);
|
||||
|
||||
// Attempt to allocate a humongous object of the given size. Return
|
||||
// NULL if unsuccessful.
|
||||
HeapWord* humongous_obj_allocate(size_t word_size);
|
||||
HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
|
||||
|
||||
// The following two methods, allocate_new_tlab() and
|
||||
// mem_allocate(), are the two main entry points from the runtime
|
||||
@ -586,6 +518,7 @@ protected:
|
||||
// retry the allocation attempt, potentially scheduling a GC
|
||||
// pause. This should only be used for non-humongous allocations.
|
||||
HeapWord* attempt_allocation_slow(size_t word_size,
|
||||
AllocationContext_t context,
|
||||
unsigned int* gc_count_before_ret,
|
||||
int* gclocker_retry_count_ret);
|
||||
|
||||
@ -600,7 +533,8 @@ protected:
|
||||
// specifies whether the mutator alloc region is expected to be NULL
|
||||
// or not.
|
||||
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
|
||||
bool expect_null_mutator_alloc_region);
|
||||
AllocationContext_t context,
|
||||
bool expect_null_mutator_alloc_region);
|
||||
|
||||
// It dirties the cards that cover the block so that so that the post
|
||||
// write barrier never queues anything when updating objects on this
|
||||
@ -612,7 +546,9 @@ protected:
|
||||
// allocation region, either by picking one or expanding the
|
||||
// heap, and then allocate a block of the given size. The block
|
||||
// may not be a humongous - it must fit into a single heap region.
|
||||
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
|
||||
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
|
||||
size_t word_size,
|
||||
AllocationContext_t context);
|
||||
|
||||
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
|
||||
HeapRegion* alloc_region,
|
||||
@ -624,10 +560,12 @@ protected:
|
||||
void par_allocate_remaining_space(HeapRegion* r);
|
||||
|
||||
// Allocation attempt during GC for a survivor object / PLAB.
|
||||
inline HeapWord* survivor_attempt_allocation(size_t word_size);
|
||||
inline HeapWord* survivor_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context);
|
||||
|
||||
// Allocation attempt during GC for an old object / PLAB.
|
||||
inline HeapWord* old_attempt_allocation(size_t word_size);
|
||||
inline HeapWord* old_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context);
|
||||
|
||||
// These methods are the "callbacks" from the G1AllocRegion class.
|
||||
|
||||
@ -666,13 +604,15 @@ protected:
|
||||
// Callback from VM_G1CollectForAllocation operation.
|
||||
// This function does everything necessary/possible to satisfy a
|
||||
// failed allocation request (including collection, expansion, etc.)
|
||||
HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
|
||||
HeapWord* satisfy_failed_allocation(size_t word_size,
|
||||
AllocationContext_t context,
|
||||
bool* succeeded);
|
||||
|
||||
// Attempting to expand the heap sufficiently
|
||||
// to support an allocation of the given "word_size". If
|
||||
// successful, perform the allocation and return the address of the
|
||||
// allocated block, or else "NULL".
|
||||
HeapWord* expand_and_allocate(size_t word_size);
|
||||
HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
|
||||
|
||||
// Process any reference objects discovered during
|
||||
// an incremental evacuation pause.
|
||||
@ -684,6 +624,10 @@ protected:
|
||||
|
||||
public:
|
||||
|
||||
G1Allocator* allocator() {
|
||||
return _allocator;
|
||||
}
|
||||
|
||||
G1MonitoringSupport* g1mm() {
|
||||
assert(_g1mm != NULL, "should have been initialized");
|
||||
return _g1mm;
|
||||
@ -695,6 +639,29 @@ public:
|
||||
// (Rounds up to a HeapRegion boundary.)
|
||||
bool expand(size_t expand_bytes);
|
||||
|
||||
// Returns the PLAB statistics given a purpose.
|
||||
PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
|
||||
PLABStats* stats = NULL;
|
||||
|
||||
switch (purpose) {
|
||||
case GCAllocForSurvived:
|
||||
stats = &_survivor_plab_stats;
|
||||
break;
|
||||
case GCAllocForTenured:
|
||||
stats = &_old_plab_stats;
|
||||
break;
|
||||
default:
|
||||
assert(false, "unrecognized GCAllocPurpose");
|
||||
}
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
// Determines PLAB size for a particular allocation purpose.
|
||||
size_t desired_plab_sz(GCAllocPurpose purpose);
|
||||
|
||||
inline AllocationContextStats& allocation_context_stats();
|
||||
|
||||
// Do anything common to GC's.
|
||||
virtual void gc_prologue(bool full);
|
||||
virtual void gc_epilogue(bool full);
|
||||
@ -1272,7 +1239,7 @@ public:
|
||||
// Determine whether the given region is one that we are using as an
|
||||
// old GC alloc region.
|
||||
bool is_old_gc_alloc_region(HeapRegion* hr) {
|
||||
return hr == _retained_old_gc_alloc_region;
|
||||
return _allocator->is_retained_old_region(hr);
|
||||
}
|
||||
|
||||
// Perform a collection of the heap; intended for use in implementing
|
||||
@ -1283,6 +1250,11 @@ public:
|
||||
// The same as above but assume that the caller holds the Heap_lock.
|
||||
void collect_locked(GCCause::Cause cause);
|
||||
|
||||
virtual void copy_allocation_context_stats(const jint* contexts,
|
||||
jlong* totals,
|
||||
jbyte* accuracy,
|
||||
jint len);
|
||||
|
||||
// True iff an evacuation has failed in the most-recent collection.
|
||||
bool evacuation_failed() { return _evacuation_failed; }
|
||||
|
||||
@ -1540,7 +1512,7 @@ public:
|
||||
virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
|
||||
|
||||
// Returns "true" iff the given word_size is "very large".
|
||||
static bool isHumongous(size_t word_size) {
|
||||
static bool is_humongous(size_t word_size) {
|
||||
// Note this has to be strictly greater-than as the TLABs
|
||||
// are capped at the humongous threshold and we want to
|
||||
// ensure that we don't try to allocate a TLAB as
|
||||
@ -1747,28 +1719,4 @@ protected:
|
||||
size_t _max_heap_capacity;
|
||||
};
|
||||
|
||||
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
|
||||
private:
|
||||
bool _retired;
|
||||
|
||||
public:
|
||||
G1ParGCAllocBuffer(size_t gclab_word_size);
|
||||
virtual ~G1ParGCAllocBuffer() {
|
||||
guarantee(_retired, "Allocation buffer has not been retired");
|
||||
}
|
||||
|
||||
virtual void set_buf(HeapWord* buf) {
|
||||
ParGCAllocBuffer::set_buf(buf);
|
||||
_retired = false;
|
||||
}
|
||||
|
||||
virtual void retire(bool end_of_gc, bool retain) {
|
||||
if (_retired) {
|
||||
return;
|
||||
}
|
||||
ParGCAllocBuffer::retire(end_of_gc, retain);
|
||||
_retired = true;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
||||
|
@ -37,14 +37,18 @@
|
||||
|
||||
// Inline functions for G1CollectedHeap
|
||||
|
||||
inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
|
||||
return _allocation_context_stats;
|
||||
}
|
||||
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
|
||||
|
||||
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
|
||||
assert(is_in_reserved(addr),
|
||||
err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
|
||||
p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
|
||||
return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
|
||||
p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())));
|
||||
return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
|
||||
}
|
||||
|
||||
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
|
||||
@ -63,7 +67,7 @@ inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) con
|
||||
template <class T>
|
||||
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||
HeapRegion* hr = heap_region_containing_raw(addr);
|
||||
if (hr->continuesHumongous()) {
|
||||
if (hr->is_continues_humongous()) {
|
||||
return hr->humongous_start_region();
|
||||
}
|
||||
return hr;
|
||||
@ -95,13 +99,15 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
||||
unsigned int* gc_count_before_ret,
|
||||
int* gclocker_retry_count_ret) {
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(!isHumongous(word_size), "attempt_allocation() should not "
|
||||
assert(!is_humongous(word_size), "attempt_allocation() should not "
|
||||
"be called for humongous allocation requests");
|
||||
|
||||
HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
|
||||
false /* bot_updates */);
|
||||
AllocationContext_t context = AllocationContext::current();
|
||||
HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
|
||||
false /* bot_updates */);
|
||||
if (result == NULL) {
|
||||
result = attempt_allocation_slow(word_size,
|
||||
context,
|
||||
gc_count_before_ret,
|
||||
gclocker_retry_count_ret);
|
||||
}
|
||||
@ -112,17 +118,17 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
||||
return result;
|
||||
}
|
||||
|
||||
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
|
||||
word_size) {
|
||||
assert(!isHumongous(word_size),
|
||||
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
assert(!is_humongous(word_size),
|
||||
"we should not be seeing humongous-size allocations in this path");
|
||||
|
||||
HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
|
||||
false /* bot_updates */);
|
||||
HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
|
||||
false /* bot_updates */);
|
||||
if (result == NULL) {
|
||||
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
|
||||
false /* bot_updates */);
|
||||
result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||
false /* bot_updates */);
|
||||
}
|
||||
if (result != NULL) {
|
||||
dirty_young_block(result, word_size);
|
||||
@ -130,16 +136,17 @@ inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
|
||||
return result;
|
||||
}
|
||||
|
||||
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
|
||||
assert(!isHumongous(word_size),
|
||||
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
|
||||
AllocationContext_t context) {
|
||||
assert(!is_humongous(word_size),
|
||||
"we should not be seeing humongous-size allocations in this path");
|
||||
|
||||
HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
|
||||
true /* bot_updates */);
|
||||
HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
|
||||
true /* bot_updates */);
|
||||
if (result == NULL) {
|
||||
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
|
||||
true /* bot_updates */);
|
||||
result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||
true /* bot_updates */);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -159,7 +166,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
||||
assert(word_size > 0, "pre-condition");
|
||||
assert(containing_hr->is_in(start), "it should contain start");
|
||||
assert(containing_hr->is_young(), "it should be young");
|
||||
assert(!containing_hr->isHumongous(), "it should not be humongous");
|
||||
assert(!containing_hr->is_humongous(), "it should not be humongous");
|
||||
|
||||
HeapWord* end = start + word_size;
|
||||
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
|
||||
|
@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||
|
||||
void G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
||||
jlong* totals,
|
||||
jbyte* accuracy,
|
||||
jint len) {
|
||||
}
|
@ -192,7 +192,7 @@ public:
|
||||
bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
|
||||
bool during_conc_mark = _g1h->mark_in_progress();
|
||||
|
||||
assert(!hr->isHumongous(), "sanity");
|
||||
assert(!hr->is_humongous(), "sanity");
|
||||
assert(hr->in_collection_set(), "bad CS");
|
||||
|
||||
if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
|
||||
|
@ -43,9 +43,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
|
||||
_hot_cache_idx = 0;
|
||||
|
||||
// For refining the cards in the hot cache in parallel
|
||||
uint n_workers = (ParallelGCThreads > 0 ?
|
||||
_g1h->workers()->total_workers() : 1);
|
||||
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
|
||||
_hot_cache_par_chunk_size = (ParallelGCThreads > 0 ? ClaimChunkSize : _hot_cache_size);
|
||||
_hot_cache_par_claimed_idx = 0;
|
||||
|
||||
_card_counts.initialize(card_counts_storage);
|
||||
|
@ -70,6 +70,9 @@ class G1HotCardCache: public CHeapObj<mtGC> {
|
||||
|
||||
G1CardCounts _card_counts;
|
||||
|
||||
// The number of cached cards a thread claims when flushing the cache
|
||||
static const int ClaimChunkSize = 32;
|
||||
|
||||
bool default_use_cache() const {
|
||||
return (G1ConcRSLogCacheSize > 0);
|
||||
}
|
||||
|
@ -193,76 +193,6 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
||||
gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
|
||||
}
|
||||
|
||||
class G1PrepareCompactClosure: public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
ModRefBarrierSet* _mrbs;
|
||||
CompactPoint _cp;
|
||||
HeapRegionSetCount _humongous_regions_removed;
|
||||
|
||||
bool is_cp_initialized() const {
|
||||
return _cp.space != NULL;
|
||||
}
|
||||
|
||||
void prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
|
||||
// If this is the first live region that we came across which we can compact,
|
||||
// initialize the CompactPoint.
|
||||
if (!is_cp_initialized()) {
|
||||
_cp.space = hr;
|
||||
_cp.threshold = hr->initialize_threshold();
|
||||
}
|
||||
hr->prepare_for_compaction(&_cp);
|
||||
// Also clear the part of the card table that will be unused after
|
||||
// compaction.
|
||||
_mrbs->clear(MemRegion(hr->compaction_top(), end));
|
||||
}
|
||||
|
||||
void free_humongous_region(HeapRegion* hr) {
|
||||
HeapWord* end = hr->end();
|
||||
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
|
||||
|
||||
assert(hr->startsHumongous(),
|
||||
"Only the start of a humongous region should be freed.");
|
||||
|
||||
hr->set_containing_set(NULL);
|
||||
_humongous_regions_removed.increment(1u, hr->capacity());
|
||||
|
||||
_g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
|
||||
prepare_for_compaction(hr, end);
|
||||
dummy_free_list.remove_all();
|
||||
}
|
||||
|
||||
public:
|
||||
G1PrepareCompactClosure()
|
||||
: _g1h(G1CollectedHeap::heap()),
|
||||
_mrbs(_g1h->g1_barrier_set()),
|
||||
_cp(NULL),
|
||||
_humongous_regions_removed() { }
|
||||
|
||||
void update_sets() {
|
||||
// We'll recalculate total used bytes and recreate the free list
|
||||
// at the end of the GC, so no point in updating those values here.
|
||||
HeapRegionSetCount empty_set;
|
||||
_g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->isHumongous()) {
|
||||
if (hr->startsHumongous()) {
|
||||
oop obj = oop(hr->bottom());
|
||||
if (obj->is_gc_marked()) {
|
||||
obj->forward_to(obj);
|
||||
} else {
|
||||
free_humongous_region(hr);
|
||||
}
|
||||
} else {
|
||||
assert(hr->continuesHumongous(), "Invalid humongous.");
|
||||
}
|
||||
} else {
|
||||
prepare_for_compaction(hr, hr->end());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void G1MarkSweep::mark_sweep_phase2() {
|
||||
// Now all live objects are marked, compute the new object addresses.
|
||||
@ -271,21 +201,17 @@ void G1MarkSweep::mark_sweep_phase2() {
|
||||
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
|
||||
// tracking expects us to do so. See comment under phase4.
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
|
||||
GenMarkSweep::trace("2");
|
||||
|
||||
G1PrepareCompactClosure blk;
|
||||
g1h->heap_region_iterate(&blk);
|
||||
blk.update_sets();
|
||||
prepare_compaction();
|
||||
}
|
||||
|
||||
class G1AdjustPointersClosure: public HeapRegionClosure {
|
||||
public:
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (r->isHumongous()) {
|
||||
if (r->startsHumongous()) {
|
||||
if (r->is_humongous()) {
|
||||
if (r->is_starts_humongous()) {
|
||||
// We must adjust the pointers on the single H object.
|
||||
oop obj = oop(r->bottom());
|
||||
// point all the oops to the new location
|
||||
@ -340,8 +266,8 @@ public:
|
||||
G1SpaceCompactClosure() {}
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->isHumongous()) {
|
||||
if (hr->startsHumongous()) {
|
||||
if (hr->is_humongous()) {
|
||||
if (hr->is_starts_humongous()) {
|
||||
oop obj = oop(hr->bottom());
|
||||
if (obj->is_gc_marked()) {
|
||||
obj->init_mark();
|
||||
@ -373,3 +299,68 @@ void G1MarkSweep::mark_sweep_phase4() {
|
||||
g1h->heap_region_iterate(&blk);
|
||||
|
||||
}
|
||||
|
||||
void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
g1h->heap_region_iterate(blk);
|
||||
blk->update_sets();
|
||||
}
|
||||
|
||||
void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
|
||||
HeapWord* end = hr->end();
|
||||
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
|
||||
|
||||
assert(hr->is_starts_humongous(),
|
||||
"Only the start of a humongous region should be freed.");
|
||||
|
||||
hr->set_containing_set(NULL);
|
||||
_humongous_regions_removed.increment(1u, hr->capacity());
|
||||
|
||||
_g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
|
||||
prepare_for_compaction(hr, end);
|
||||
dummy_free_list.remove_all();
|
||||
}
|
||||
|
||||
void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
|
||||
// If this is the first live region that we came across which we can compact,
|
||||
// initialize the CompactPoint.
|
||||
if (!is_cp_initialized()) {
|
||||
_cp.space = hr;
|
||||
_cp.threshold = hr->initialize_threshold();
|
||||
}
|
||||
prepare_for_compaction_work(&_cp, hr, end);
|
||||
}
|
||||
|
||||
void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
|
||||
HeapRegion* hr,
|
||||
HeapWord* end) {
|
||||
hr->prepare_for_compaction(cp);
|
||||
// Also clear the part of the card table that will be unused after
|
||||
// compaction.
|
||||
_mrbs->clear(MemRegion(hr->compaction_top(), end));
|
||||
}
|
||||
|
||||
void G1PrepareCompactClosure::update_sets() {
|
||||
// We'll recalculate total used bytes and recreate the free list
|
||||
// at the end of the GC, so no point in updating those values here.
|
||||
HeapRegionSetCount empty_set;
|
||||
_g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
|
||||
}
|
||||
|
||||
bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
|
||||
if (hr->is_humongous()) {
|
||||
if (hr->is_starts_humongous()) {
|
||||
oop obj = oop(hr->bottom());
|
||||
if (obj->is_gc_marked()) {
|
||||
obj->forward_to(obj);
|
||||
} else {
|
||||
free_humongous_region(hr);
|
||||
}
|
||||
} else {
|
||||
assert(hr->is_continues_humongous(), "Invalid humongous.");
|
||||
}
|
||||
} else {
|
||||
prepare_for_compaction(hr, hr->end());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ class ReferenceProcessor;
|
||||
// compaction.
|
||||
//
|
||||
// Class unloading will only occur when a full gc is invoked.
|
||||
|
||||
class G1PrepareCompactClosure;
|
||||
|
||||
class G1MarkSweep : AllStatic {
|
||||
friend class VM_G1MarkSweep;
|
||||
@ -70,6 +70,30 @@ class G1MarkSweep : AllStatic {
|
||||
static void mark_sweep_phase4();
|
||||
|
||||
static void allocate_stacks();
|
||||
static void prepare_compaction();
|
||||
static void prepare_compaction_work(G1PrepareCompactClosure* blk);
|
||||
};
|
||||
|
||||
class G1PrepareCompactClosure : public HeapRegionClosure {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
ModRefBarrierSet* _mrbs;
|
||||
CompactPoint _cp;
|
||||
HeapRegionSetCount _humongous_regions_removed;
|
||||
|
||||
virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end);
|
||||
void prepare_for_compaction_work(CompactPoint* cp, HeapRegion* hr, HeapWord* end);
|
||||
void free_humongous_region(HeapRegion* hr);
|
||||
bool is_cp_initialized() const { return _cp.space != NULL; }
|
||||
|
||||
public:
|
||||
G1PrepareCompactClosure() :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_mrbs(_g1h->g1_barrier_set()),
|
||||
_humongous_regions_removed() { }
|
||||
|
||||
void update_sets();
|
||||
bool doHeapRegion(HeapRegion* hr);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
|
||||
|
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/g1/g1MarkSweep.hpp"
|
||||
|
||||
void G1MarkSweep::prepare_compaction() {
|
||||
G1PrepareCompactClosure blk;
|
||||
G1MarkSweep::prepare_compaction_work(&blk);
|
||||
}
|
@ -38,11 +38,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
|
||||
_g1_rem(g1h->g1_rem_set()),
|
||||
_hash_seed(17), _queue_num(queue_num),
|
||||
_term_attempts(0),
|
||||
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
|
||||
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
|
||||
_age_table(false), _scanner(g1h, rp),
|
||||
_strong_roots_time(0), _term_time(0),
|
||||
_alloc_buffer_waste(0), _undo_waste(0) {
|
||||
_strong_roots_time(0), _term_time(0) {
|
||||
_scanner.set_par_scan_thread_state(this);
|
||||
// we allocate G1YoungSurvRateNumRegions plus one entries, since
|
||||
// we "sacrifice" entry 0 to keep track of surviving bytes for
|
||||
@ -60,14 +57,14 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
|
||||
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
|
||||
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
|
||||
|
||||
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
|
||||
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
|
||||
_g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
|
||||
|
||||
_start = os::elapsedTime();
|
||||
}
|
||||
|
||||
G1ParScanThreadState::~G1ParScanThreadState() {
|
||||
retire_alloc_buffers();
|
||||
_g1_par_allocator->retire_alloc_buffers();
|
||||
delete _g1_par_allocator;
|
||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
|
||||
}
|
||||
|
||||
@ -90,14 +87,16 @@ G1ParScanThreadState::print_termination_stats(int i,
|
||||
const double elapsed_ms = elapsed_time() * 1000.0;
|
||||
const double s_roots_ms = strong_roots_time() * 1000.0;
|
||||
const double term_ms = term_time() * 1000.0;
|
||||
const size_t alloc_buffer_waste = _g1_par_allocator->alloc_buffer_waste();
|
||||
const size_t undo_waste = _g1_par_allocator->undo_waste();
|
||||
st->print_cr("%3d %9.2f %9.2f %6.2f "
|
||||
"%9.2f %6.2f " SIZE_FORMAT_W(8) " "
|
||||
SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
|
||||
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
|
||||
term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
|
||||
(alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
|
||||
alloc_buffer_waste() * HeapWordSize / K,
|
||||
undo_waste() * HeapWordSize / K);
|
||||
(alloc_buffer_waste + undo_waste) * HeapWordSize / K,
|
||||
alloc_buffer_waste * HeapWordSize / K,
|
||||
undo_waste * HeapWordSize / K);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -164,12 +163,13 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
|
||||
: m->age();
|
||||
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
|
||||
word_sz);
|
||||
HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
|
||||
AllocationContext_t context = from_region->allocation_context();
|
||||
HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
|
||||
#ifndef PRODUCT
|
||||
// Should this evacuation fail?
|
||||
if (_g1h->evacuation_should_fail()) {
|
||||
if (obj_ptr != NULL) {
|
||||
undo_allocation(alloc_purpose, obj_ptr, word_sz);
|
||||
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
|
||||
obj_ptr = NULL;
|
||||
}
|
||||
}
|
||||
@ -246,66 +246,8 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
|
||||
obj->oop_iterate_backwards(&_scanner);
|
||||
}
|
||||
} else {
|
||||
undo_allocation(alloc_purpose, obj_ptr, word_sz);
|
||||
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
|
||||
obj = forward_ptr;
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
||||
HeapWord* obj = NULL;
|
||||
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
|
||||
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
||||
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
|
||||
|
||||
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
|
||||
if (buf == NULL) {
|
||||
return NULL; // Let caller handle allocation failure.
|
||||
}
|
||||
// Otherwise.
|
||||
alloc_buf->set_word_size(gclab_word_size);
|
||||
alloc_buf->set_buf(buf);
|
||||
|
||||
obj = alloc_buf->allocate(word_sz);
|
||||
assert(obj != NULL, "buffer was definitely big enough...");
|
||||
} else {
|
||||
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
|
||||
if (alloc_buffer(purpose)->contains(obj)) {
|
||||
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
|
||||
"should contain whole object");
|
||||
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
|
||||
} else {
|
||||
CollectedHeap::fill_with_object(obj, word_sz);
|
||||
add_to_undo_waste(word_sz);
|
||||
}
|
||||
}
|
||||
|
||||
HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
|
||||
HeapWord* obj = NULL;
|
||||
if (purpose == GCAllocForSurvived) {
|
||||
obj = alloc_buffer(GCAllocForSurvived)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
|
||||
} else {
|
||||
obj = alloc_buffer(GCAllocForTenured)->allocate(word_sz);
|
||||
}
|
||||
if (obj != NULL) {
|
||||
return obj;
|
||||
}
|
||||
return allocate_slow(purpose, word_sz);
|
||||
}
|
||||
|
||||
void G1ParScanThreadState::retire_alloc_buffers() {
|
||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||
size_t waste = _alloc_buffers[ap]->words_remaining();
|
||||
add_to_alloc_buffer_waste(waste);
|
||||
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
|
||||
true /* end_of_gc */,
|
||||
false /* retain */);
|
||||
}
|
||||
}
|
||||
|
@ -46,9 +46,8 @@ class G1ParScanThreadState : public StackObj {
|
||||
G1SATBCardTableModRefBS* _ct_bs;
|
||||
G1RemSet* _g1_rem;
|
||||
|
||||
G1ParGCAllocBuffer _surviving_alloc_buffer;
|
||||
G1ParGCAllocBuffer _tenured_alloc_buffer;
|
||||
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
|
||||
G1ParGCAllocator* _g1_par_allocator;
|
||||
|
||||
ageTable _age_table;
|
||||
|
||||
G1ParScanClosure _scanner;
|
||||
@ -78,7 +77,6 @@ class G1ParScanThreadState : public StackObj {
|
||||
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
|
||||
|
||||
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
||||
|
||||
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
||||
|
||||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||
@ -90,13 +88,6 @@ class G1ParScanThreadState : public StackObj {
|
||||
|
||||
ageTable* age_table() { return &_age_table; }
|
||||
|
||||
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
||||
return _alloc_buffers[purpose];
|
||||
}
|
||||
|
||||
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
|
||||
size_t undo_waste() const { return _undo_waste; }
|
||||
|
||||
#ifdef ASSERT
|
||||
bool queue_is_empty() const { return _refs->is_empty(); }
|
||||
|
||||
@ -110,7 +101,7 @@ class G1ParScanThreadState : public StackObj {
|
||||
_refs->push(ref);
|
||||
}
|
||||
|
||||
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
|
||||
template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
|
||||
// If the new value of the field points to the same region or
|
||||
// is the to-space, we don't need to include it in the Rset updates.
|
||||
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
|
||||
@ -121,12 +112,6 @@ class G1ParScanThreadState : public StackObj {
|
||||
}
|
||||
}
|
||||
}
|
||||
private:
|
||||
|
||||
inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
|
||||
inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
|
||||
inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
|
||||
|
||||
public:
|
||||
|
||||
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
|
||||
@ -172,8 +157,6 @@ class G1ParScanThreadState : public StackObj {
|
||||
}
|
||||
|
||||
private:
|
||||
void retire_alloc_buffers();
|
||||
|
||||
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||
|
||||
inline bool has_partial_array_mask(oop* ref) const {
|
||||
|
@ -413,7 +413,7 @@ public:
|
||||
_ctbs(_g1h->g1_barrier_set()) {}
|
||||
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->continuesHumongous()) {
|
||||
if (!r->is_continues_humongous()) {
|
||||
r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
|
||||
}
|
||||
return false;
|
||||
|
@ -119,7 +119,7 @@ public:
|
||||
// Record, if necessary, the fact that *p (where "p" is in region "from",
|
||||
// which is required to be non-NULL) has changed to a new non-NULL value.
|
||||
template <class T> void write_ref(HeapRegion* from, T* p);
|
||||
template <class T> void par_write_ref(HeapRegion* from, T* p, int tid);
|
||||
template <class T> void par_write_ref(HeapRegion* from, T* p, uint tid);
|
||||
|
||||
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
|
||||
// or card, respectively, such that a region or card with a corresponding
|
||||
|
@ -44,7 +44,7 @@ inline void G1RemSet::write_ref(HeapRegion* from, T* p) {
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
|
||||
inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (obj == NULL) {
|
||||
return;
|
||||
|
@ -263,7 +263,7 @@ public:
|
||||
current = &_free;
|
||||
} else if (r->is_young()) {
|
||||
current = &_young;
|
||||
} else if (r->isHumongous()) {
|
||||
} else if (r->is_humongous()) {
|
||||
current = &_humonguous;
|
||||
} else if (r->is_old()) {
|
||||
current = &_old;
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionBounds.inline.hpp"
|
||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||
#include "gc_implementation/shared/liveRange.hpp"
|
||||
@ -138,32 +139,16 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
||||
}
|
||||
}
|
||||
|
||||
// Minimum region size; we won't go lower than that.
|
||||
// We might want to decrease this in the future, to deal with small
|
||||
// heaps a bit more efficiently.
|
||||
#define MIN_REGION_SIZE ( 1024 * 1024 )
|
||||
|
||||
// Maximum region size; we don't go higher than that. There's a good
|
||||
// reason for having an upper bound. We don't want regions to get too
|
||||
// large, otherwise cleanup's effectiveness would decrease as there
|
||||
// will be fewer opportunities to find totally empty regions after
|
||||
// marking.
|
||||
#define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
|
||||
|
||||
// The automatic region size calculation will try to have around this
|
||||
// many regions in the heap (based on the min heap size).
|
||||
#define TARGET_REGION_NUMBER 2048
|
||||
|
||||
size_t HeapRegion::max_region_size() {
|
||||
return (size_t)MAX_REGION_SIZE;
|
||||
return HeapRegionBounds::max_size();
|
||||
}
|
||||
|
||||
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
|
||||
uintx region_size = G1HeapRegionSize;
|
||||
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
|
||||
size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
|
||||
region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
|
||||
(uintx) MIN_REGION_SIZE);
|
||||
region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
|
||||
(uintx) HeapRegionBounds::min_size());
|
||||
}
|
||||
|
||||
int region_size_log = log2_long((jlong) region_size);
|
||||
@ -173,10 +158,10 @@ void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_hea
|
||||
region_size = ((uintx)1 << region_size_log);
|
||||
|
||||
// Now make sure that we don't go over or under our limits.
|
||||
if (region_size < MIN_REGION_SIZE) {
|
||||
region_size = MIN_REGION_SIZE;
|
||||
} else if (region_size > MAX_REGION_SIZE) {
|
||||
region_size = MAX_REGION_SIZE;
|
||||
if (region_size < HeapRegionBounds::min_size()) {
|
||||
region_size = HeapRegionBounds::min_size();
|
||||
} else if (region_size > HeapRegionBounds::max_size()) {
|
||||
region_size = HeapRegionBounds::max_size();
|
||||
}
|
||||
|
||||
// And recalculate the log.
|
||||
@ -213,11 +198,12 @@ void HeapRegion::reset_after_compaction() {
|
||||
void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
|
||||
assert(_humongous_start_region == NULL,
|
||||
"we should have already filtered out humongous regions");
|
||||
assert(_end == _orig_end,
|
||||
assert(_end == orig_end(),
|
||||
"we should have already filtered out humongous regions");
|
||||
|
||||
_in_collection_set = false;
|
||||
|
||||
set_allocation_context(AllocationContext::system());
|
||||
set_young_index_in_cset(-1);
|
||||
uninstall_surv_rate_group();
|
||||
set_free();
|
||||
@ -264,9 +250,9 @@ void HeapRegion::calc_gc_efficiency() {
|
||||
_gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
|
||||
}
|
||||
|
||||
void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
|
||||
assert(!isHumongous(), "sanity / pre-condition");
|
||||
assert(end() == _orig_end,
|
||||
void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) {
|
||||
assert(!is_humongous(), "sanity / pre-condition");
|
||||
assert(end() == orig_end(),
|
||||
"Should be normal before the humongous object allocation");
|
||||
assert(top() == bottom(), "should be empty");
|
||||
assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
|
||||
@ -278,30 +264,30 @@ void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
|
||||
_offsets.set_for_starts_humongous(new_top);
|
||||
}
|
||||
|
||||
void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
|
||||
assert(!isHumongous(), "sanity / pre-condition");
|
||||
assert(end() == _orig_end,
|
||||
void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
|
||||
assert(!is_humongous(), "sanity / pre-condition");
|
||||
assert(end() == orig_end(),
|
||||
"Should be normal before the humongous object allocation");
|
||||
assert(top() == bottom(), "should be empty");
|
||||
assert(first_hr->startsHumongous(), "pre-condition");
|
||||
assert(first_hr->is_starts_humongous(), "pre-condition");
|
||||
|
||||
_type.set_continues_humongous();
|
||||
_humongous_start_region = first_hr;
|
||||
}
|
||||
|
||||
void HeapRegion::clear_humongous() {
|
||||
assert(isHumongous(), "pre-condition");
|
||||
assert(is_humongous(), "pre-condition");
|
||||
|
||||
if (startsHumongous()) {
|
||||
if (is_starts_humongous()) {
|
||||
assert(top() <= end(), "pre-condition");
|
||||
set_end(_orig_end);
|
||||
set_end(orig_end());
|
||||
if (top() > end()) {
|
||||
// at least one "continues humongous" region after it
|
||||
set_top(end());
|
||||
}
|
||||
} else {
|
||||
// continues humongous
|
||||
assert(end() == _orig_end, "sanity");
|
||||
assert(end() == orig_end(), "sanity");
|
||||
}
|
||||
|
||||
assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
|
||||
@ -324,9 +310,10 @@ HeapRegion::HeapRegion(uint hrm_index,
|
||||
MemRegion mr) :
|
||||
G1OffsetTableContigSpace(sharedOffsetArray, mr),
|
||||
_hrm_index(hrm_index),
|
||||
_allocation_context(AllocationContext::system()),
|
||||
_humongous_start_region(NULL),
|
||||
_in_collection_set(false),
|
||||
_next_in_special_set(NULL), _orig_end(NULL),
|
||||
_next_in_special_set(NULL),
|
||||
_claimed(InitialClaimValue), _evacuation_failed(false),
|
||||
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
|
||||
_next_young_region(NULL),
|
||||
@ -349,10 +336,14 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
||||
|
||||
G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
|
||||
|
||||
_orig_end = mr.end();
|
||||
hr_clear(false /*par*/, false /*clear_space*/);
|
||||
set_top(bottom());
|
||||
record_top_and_timestamp();
|
||||
|
||||
assert(mr.end() == orig_end(),
|
||||
err_msg("Given region end address " PTR_FORMAT " should match exactly "
|
||||
"bottom plus one region size, i.e. " PTR_FORMAT,
|
||||
p2i(mr.end()), p2i(orig_end())));
|
||||
}
|
||||
|
||||
CompactibleSpace* HeapRegion::next_compaction_space() const {
|
||||
@ -663,7 +654,7 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
|
||||
return;
|
||||
}
|
||||
|
||||
if (continuesHumongous()) {
|
||||
if (is_continues_humongous()) {
|
||||
if (strong_code_roots_length > 0) {
|
||||
gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous "
|
||||
"region but has "SIZE_FORMAT" code root entries",
|
||||
@ -683,6 +674,8 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
|
||||
|
||||
void HeapRegion::print() const { print_on(gclog_or_tty); }
|
||||
void HeapRegion::print_on(outputStream* st) const {
|
||||
st->print("AC%4u", allocation_context());
|
||||
|
||||
st->print(" %2s", get_short_type_str());
|
||||
if (in_collection_set())
|
||||
st->print(" CS");
|
||||
@ -788,7 +781,7 @@ public:
|
||||
HeapRegion* to = _g1h->heap_region_containing(obj);
|
||||
if (from != NULL && to != NULL &&
|
||||
from != to &&
|
||||
!to->isHumongous()) {
|
||||
!to->is_humongous()) {
|
||||
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
|
||||
jbyte cv_field = *_bs->byte_for_const(p);
|
||||
const jbyte dirty = CardTableModRefBS::dirty_card_val();
|
||||
@ -842,19 +835,19 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
HeapWord* p = bottom();
|
||||
HeapWord* prev_p = NULL;
|
||||
VerifyLiveClosure vl_cl(g1, vo);
|
||||
bool is_humongous = isHumongous();
|
||||
bool is_region_humongous = is_humongous();
|
||||
size_t object_num = 0;
|
||||
while (p < top()) {
|
||||
oop obj = oop(p);
|
||||
size_t obj_size = block_size(p);
|
||||
object_num += 1;
|
||||
|
||||
if (is_humongous != g1->isHumongous(obj_size) &&
|
||||
if (is_region_humongous != g1->is_humongous(obj_size) &&
|
||||
!g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
|
||||
gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
|
||||
SIZE_FORMAT" words) in a %shumongous region",
|
||||
p, g1->isHumongous(obj_size) ? "" : "non-",
|
||||
obj_size, is_humongous ? "" : "non-");
|
||||
p, g1->is_humongous(obj_size) ? "" : "non-",
|
||||
obj_size, is_region_humongous ? "" : "non-");
|
||||
*failures = true;
|
||||
return;
|
||||
}
|
||||
@ -963,7 +956,7 @@ void HeapRegion::verify(VerifyOption vo,
|
||||
}
|
||||
}
|
||||
|
||||
if (is_humongous && object_num > 1) {
|
||||
if (is_region_humongous && object_num > 1) {
|
||||
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
|
||||
"but has "SIZE_FORMAT", objects",
|
||||
bottom(), end(), object_num);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1AllocationContext.hpp"
|
||||
#include "gc_implementation/g1/g1BlockOffsetTable.hpp"
|
||||
#include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
|
||||
#include "gc_implementation/g1/heapRegionType.hpp"
|
||||
@ -222,13 +223,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// The index of this region in the heap region sequence.
|
||||
uint _hrm_index;
|
||||
|
||||
AllocationContext_t _allocation_context;
|
||||
|
||||
HeapRegionType _type;
|
||||
|
||||
// For a humongous region, region in which it starts.
|
||||
HeapRegion* _humongous_start_region;
|
||||
// For the start region of a humongous sequence, it's original end().
|
||||
HeapWord* _orig_end;
|
||||
|
||||
// True iff the region is in current collection_set.
|
||||
bool _in_collection_set;
|
||||
|
||||
@ -417,9 +417,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
bool is_eden() const { return _type.is_eden(); }
|
||||
bool is_survivor() const { return _type.is_survivor(); }
|
||||
|
||||
bool isHumongous() const { return _type.is_humongous(); }
|
||||
bool startsHumongous() const { return _type.is_starts_humongous(); }
|
||||
bool continuesHumongous() const { return _type.is_continues_humongous(); }
|
||||
bool is_humongous() const { return _type.is_humongous(); }
|
||||
bool is_starts_humongous() const { return _type.is_starts_humongous(); }
|
||||
bool is_continues_humongous() const { return _type.is_continues_humongous(); }
|
||||
|
||||
bool is_old() const { return _type.is_old(); }
|
||||
|
||||
@ -431,10 +431,10 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// Return the number of distinct regions that are covered by this region:
|
||||
// 1 if the region is not humongous, >= 1 if the region is humongous.
|
||||
uint region_num() const {
|
||||
if (!isHumongous()) {
|
||||
if (!is_humongous()) {
|
||||
return 1U;
|
||||
} else {
|
||||
assert(startsHumongous(), "doesn't make sense on HC regions");
|
||||
assert(is_starts_humongous(), "doesn't make sense on HC regions");
|
||||
assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
|
||||
return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
|
||||
}
|
||||
@ -443,7 +443,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// Return the index + 1 of the last HC regions that's associated
|
||||
// with this HS region.
|
||||
uint last_hc_index() const {
|
||||
assert(startsHumongous(), "don't call this otherwise");
|
||||
assert(is_starts_humongous(), "don't call this otherwise");
|
||||
return hrm_index() + region_num();
|
||||
}
|
||||
|
||||
@ -452,7 +452,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// their _end set up to be the end of the last continues region of the
|
||||
// corresponding humongous object.
|
||||
bool is_in_reserved_raw(const void* p) const {
|
||||
return _bottom <= p && p < _orig_end;
|
||||
return _bottom <= p && p < orig_end();
|
||||
}
|
||||
|
||||
// Makes the current region be a "starts humongous" region, i.e.,
|
||||
@ -478,12 +478,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// humongous regions can be calculated by just looking at the
|
||||
// "starts humongous" regions and by ignoring the "continues
|
||||
// humongous" regions.
|
||||
void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
|
||||
void set_starts_humongous(HeapWord* new_top, HeapWord* new_end);
|
||||
|
||||
// Makes the current region be a "continues humongous'
|
||||
// region. first_hr is the "start humongous" region of the series
|
||||
// which this region will be part of.
|
||||
void set_continuesHumongous(HeapRegion* first_hr);
|
||||
void set_continues_humongous(HeapRegion* first_hr);
|
||||
|
||||
// Unsets the humongous-related fields on the region.
|
||||
void clear_humongous();
|
||||
@ -513,6 +513,14 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
_next_in_special_set = r;
|
||||
}
|
||||
|
||||
void set_allocation_context(AllocationContext_t context) {
|
||||
_allocation_context = context;
|
||||
}
|
||||
|
||||
AllocationContext_t allocation_context() const {
|
||||
return _allocation_context;
|
||||
}
|
||||
|
||||
// Methods used by the HeapRegionSetBase class and subclasses.
|
||||
|
||||
// Getter and setter for the next and prev fields used to link regions into
|
||||
@ -556,7 +564,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
|
||||
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
|
||||
|
||||
HeapWord* orig_end() const { return _orig_end; }
|
||||
// For the start region of a humongous sequence, it's original end().
|
||||
HeapWord* orig_end() const { return _bottom + GrainWords; }
|
||||
|
||||
// Reset HR stuff to default values.
|
||||
void hr_clear(bool par, bool clear_space, bool locked = false);
|
||||
@ -603,7 +612,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
bool is_marked() { return _prev_top_at_mark_start != bottom(); }
|
||||
|
||||
void reset_during_compaction() {
|
||||
assert(isHumongous() && startsHumongous(),
|
||||
assert(is_starts_humongous(),
|
||||
"should only be called for starts humongous regions");
|
||||
|
||||
zero_marked_bytes();
|
||||
|
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
|
||||
|
||||
class HeapRegionBounds : public AllStatic {
|
||||
private:
|
||||
// Minimum region size; we won't go lower than that.
|
||||
// We might want to decrease this in the future, to deal with small
|
||||
// heaps a bit more efficiently.
|
||||
static const size_t MIN_REGION_SIZE = 1024 * 1024;
|
||||
|
||||
// Maximum region size; we don't go higher than that. There's a good
|
||||
// reason for having an upper bound. We don't want regions to get too
|
||||
// large, otherwise cleanup's effectiveness would decrease as there
|
||||
// will be fewer opportunities to find totally empty regions after
|
||||
// marking.
|
||||
static const size_t MAX_REGION_SIZE = 32 * 1024 * 1024;
|
||||
|
||||
// The automatic region size calculation will try to have around this
|
||||
// many regions in the heap (based on the min heap size).
|
||||
static const size_t TARGET_REGION_NUMBER = 2048;
|
||||
|
||||
public:
|
||||
static inline size_t min_size();
|
||||
static inline size_t max_size();
|
||||
static inline size_t target_number();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
|
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "gc_implementation/g1/heapRegionBounds.hpp"
|
||||
|
||||
size_t HeapRegionBounds::min_size() {
|
||||
return MIN_REGION_SIZE;
|
||||
}
|
||||
|
||||
size_t HeapRegionBounds::max_size() {
|
||||
return MAX_REGION_SIZE;
|
||||
}
|
||||
|
||||
size_t HeapRegionBounds::target_number() {
|
||||
return TARGET_REGION_NUMBER;
|
||||
}
|
@ -66,10 +66,11 @@ bool HeapRegionManager::is_free(HeapRegion* hr) const {
|
||||
#endif
|
||||
|
||||
HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
|
||||
HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrm_index);
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
|
||||
MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
|
||||
assert(reserved().contains(mr), "invariant");
|
||||
return new HeapRegion(hrm_index, G1CollectedHeap::heap()->bot_shared(), mr);
|
||||
return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
|
||||
}
|
||||
|
||||
void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
|
||||
@ -281,7 +282,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
||||
// We'll ignore "continues humongous" regions (we'll process them
|
||||
// when we come across their corresponding "start humongous"
|
||||
// region) and regions already claimed.
|
||||
if (r->claim_value() == claim_value || r->continuesHumongous()) {
|
||||
if (r->claim_value() == claim_value || r->is_continues_humongous()) {
|
||||
continue;
|
||||
}
|
||||
// OK, try to claim it
|
||||
@ -289,7 +290,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
||||
continue;
|
||||
}
|
||||
// Success!
|
||||
if (r->startsHumongous()) {
|
||||
if (r->is_starts_humongous()) {
|
||||
// If the region is "starts humongous" we'll iterate over its
|
||||
// "continues humongous" first; in fact we'll do them
|
||||
// first. The order is important. In one case, calling the
|
||||
@ -301,7 +302,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
||||
for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
|
||||
HeapRegion* chr = _regions.get_by_index(ch_index);
|
||||
|
||||
assert(chr->continuesHumongous(), "Must be humongous region");
|
||||
assert(chr->is_continues_humongous(), "Must be humongous region");
|
||||
assert(chr->humongous_start_region() == r,
|
||||
err_msg("Must work on humongous continuation of the original start region "
|
||||
PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
|
||||
@ -311,7 +312,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
||||
bool claim_result = chr->claimHeapRegion(claim_value);
|
||||
// We should always be able to claim it; no one else should
|
||||
// be trying to claim this region.
|
||||
guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
|
||||
guarantee(claim_result, "We should always be able to claim the is_continues_humongous part of the humongous object");
|
||||
|
||||
bool res2 = blk->doHeapRegion(chr);
|
||||
if (res2) {
|
||||
@ -322,7 +323,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
||||
// does something with "continues humongous" regions
|
||||
// clears them). We might have to weaken it in the future,
|
||||
// but let's leave these two asserts here for extra safety.
|
||||
assert(chr->continuesHumongous(), "should still be the case");
|
||||
assert(chr->is_continues_humongous(), "should still be the case");
|
||||
assert(chr->humongous_start_region() == r, "sanity");
|
||||
}
|
||||
}
|
||||
@ -424,7 +425,7 @@ void HeapRegionManager::verify() {
|
||||
// this method may be called, we have only completed allocation of the regions,
|
||||
// but not put into a region set.
|
||||
prev_committed = true;
|
||||
if (hr->startsHumongous()) {
|
||||
if (hr->is_starts_humongous()) {
|
||||
prev_end = hr->orig_end();
|
||||
} else {
|
||||
prev_end = hr->end();
|
||||
|
@ -419,7 +419,7 @@ void OtherRegionsTable::print_from_card_cache() {
|
||||
FromCardCache::print();
|
||||
}
|
||||
|
||||
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
|
||||
uint cur_hrm_ind = hr()->hrm_index();
|
||||
|
||||
if (G1TraceHeapRegionRememberedSet) {
|
||||
@ -435,10 +435,10 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
if (G1TraceHeapRegionRememberedSet) {
|
||||
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
|
||||
hr()->bottom(), from_card,
|
||||
FromCardCache::at((uint)tid, cur_hrm_ind));
|
||||
FromCardCache::at(tid, cur_hrm_ind));
|
||||
}
|
||||
|
||||
if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
|
||||
if (FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
|
||||
if (G1TraceHeapRegionRememberedSet) {
|
||||
gclog_or_tty->print_cr(" from-card cache hit.");
|
||||
}
|
||||
@ -493,7 +493,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
return;
|
||||
} else {
|
||||
if (G1TraceHeapRegionRememberedSet) {
|
||||
gclog_or_tty->print_cr(" [tid %d] sparse table entry "
|
||||
gclog_or_tty->print_cr(" [tid %u] sparse table entry "
|
||||
"overflow(f: %d, t: %u)",
|
||||
tid, from_hrm_ind, cur_hrm_ind);
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ public:
|
||||
|
||||
// For now. Could "expand" some tables in the future, so that this made
|
||||
// sense.
|
||||
void add_reference(OopOrNarrowOopStar from, int tid);
|
||||
void add_reference(OopOrNarrowOopStar from, uint tid);
|
||||
|
||||
// Removes any entries shown by the given bitmaps to contain only dead
|
||||
// objects.
|
||||
@ -301,7 +301,7 @@ public:
|
||||
}
|
||||
|
||||
// Used in the parallel case.
|
||||
void add_reference(OopOrNarrowOopStar from, int tid) {
|
||||
void add_reference(OopOrNarrowOopStar from, uint tid) {
|
||||
_other_regions.add_reference(from, tid);
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
|
||||
void HeapRegionSetBase::verify_region(HeapRegion* hr) {
|
||||
assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrm_index()));
|
||||
assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrm_index())); // currently we don't use these sets for young regions
|
||||
assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
|
||||
assert(hr->is_humongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
|
||||
assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
|
||||
assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
|
||||
assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name()));
|
||||
|
@ -30,8 +30,8 @@ bool HeapRegionType::is_valid(Tag tag) {
|
||||
case FreeTag:
|
||||
case EdenTag:
|
||||
case SurvTag:
|
||||
case HumStartsTag:
|
||||
case HumContTag:
|
||||
case StartsHumongousTag:
|
||||
case ContinuesHumongousTag:
|
||||
case OldTag:
|
||||
return true;
|
||||
}
|
||||
@ -41,12 +41,12 @@ bool HeapRegionType::is_valid(Tag tag) {
|
||||
const char* HeapRegionType::get_str() const {
|
||||
hrt_assert_is_valid(_tag);
|
||||
switch (_tag) {
|
||||
case FreeTag: return "FREE";
|
||||
case EdenTag: return "EDEN";
|
||||
case SurvTag: return "SURV";
|
||||
case HumStartsTag: return "HUMS";
|
||||
case HumContTag: return "HUMC";
|
||||
case OldTag: return "OLD";
|
||||
case FreeTag: return "FREE";
|
||||
case EdenTag: return "EDEN";
|
||||
case SurvTag: return "SURV";
|
||||
case StartsHumongousTag: return "HUMS";
|
||||
case ContinuesHumongousTag: return "HUMC";
|
||||
case OldTag: return "OLD";
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
// keep some compilers happy
|
||||
@ -56,12 +56,12 @@ const char* HeapRegionType::get_str() const {
|
||||
const char* HeapRegionType::get_short_str() const {
|
||||
hrt_assert_is_valid(_tag);
|
||||
switch (_tag) {
|
||||
case FreeTag: return "F";
|
||||
case EdenTag: return "E";
|
||||
case SurvTag: return "S";
|
||||
case HumStartsTag: return "HS";
|
||||
case HumContTag: return "HC";
|
||||
case OldTag: return "O";
|
||||
case FreeTag: return "F";
|
||||
case EdenTag: return "E";
|
||||
case SurvTag: return "S";
|
||||
case StartsHumongousTag: return "HS";
|
||||
case ContinuesHumongousTag: return "HC";
|
||||
case OldTag: return "O";
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
// keep some compilers happy
|
||||
|
@ -49,22 +49,22 @@ private:
|
||||
// 0001 1 [ 3] Survivor
|
||||
//
|
||||
// 0010 0 Humongous Mask
|
||||
// 0010 0 [ 4] Humongous Starts
|
||||
// 0010 1 [ 5] Humongous Continues
|
||||
// 0010 0 [ 4] Starts Humongous
|
||||
// 0010 1 [ 5] Continues Humongous
|
||||
//
|
||||
// 01000 [ 8] Old
|
||||
typedef enum {
|
||||
FreeTag = 0,
|
||||
FreeTag = 0,
|
||||
|
||||
YoungMask = 2,
|
||||
EdenTag = YoungMask,
|
||||
SurvTag = YoungMask + 1,
|
||||
YoungMask = 2,
|
||||
EdenTag = YoungMask,
|
||||
SurvTag = YoungMask + 1,
|
||||
|
||||
HumMask = 4,
|
||||
HumStartsTag = HumMask,
|
||||
HumContTag = HumMask + 1,
|
||||
HumongousMask = 4,
|
||||
StartsHumongousTag = HumongousMask,
|
||||
ContinuesHumongousTag = HumongousMask + 1,
|
||||
|
||||
OldTag = 8
|
||||
OldTag = 8
|
||||
} Tag;
|
||||
|
||||
volatile Tag _tag;
|
||||
@ -104,9 +104,9 @@ public:
|
||||
bool is_eden() const { return get() == EdenTag; }
|
||||
bool is_survivor() const { return get() == SurvTag; }
|
||||
|
||||
bool is_humongous() const { return (get() & HumMask) != 0; }
|
||||
bool is_starts_humongous() const { return get() == HumStartsTag; }
|
||||
bool is_continues_humongous() const { return get() == HumContTag; }
|
||||
bool is_humongous() const { return (get() & HumongousMask) != 0; }
|
||||
bool is_starts_humongous() const { return get() == StartsHumongousTag; }
|
||||
bool is_continues_humongous() const { return get() == ContinuesHumongousTag; }
|
||||
|
||||
bool is_old() const { return get() == OldTag; }
|
||||
|
||||
@ -118,8 +118,8 @@ public:
|
||||
void set_eden_pre_gc() { set_from(EdenTag, SurvTag); }
|
||||
void set_survivor() { set_from(SurvTag, FreeTag); }
|
||||
|
||||
void set_starts_humongous() { set_from(HumStartsTag, FreeTag); }
|
||||
void set_continues_humongous() { set_from(HumContTag, FreeTag); }
|
||||
void set_starts_humongous() { set_from(StartsHumongousTag, FreeTag); }
|
||||
void set_continues_humongous() { set_from(ContinuesHumongousTag, FreeTag); }
|
||||
|
||||
void set_old() { set(OldTag); }
|
||||
|
||||
|
@ -45,11 +45,13 @@
|
||||
nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \
|
||||
nonstatic_field(HeapRegionManager, _num_committed, uint) \
|
||||
\
|
||||
nonstatic_field(G1Allocator, _summary_bytes_used, size_t) \
|
||||
\
|
||||
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
|
||||
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
|
||||
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
|
||||
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
|
||||
nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \
|
||||
nonstatic_field(G1CollectedHeap, _allocator, G1Allocator*) \
|
||||
\
|
||||
nonstatic_field(G1MonitoringSupport, _eden_committed, size_t) \
|
||||
nonstatic_field(G1MonitoringSupport, _eden_used, size_t) \
|
||||
@ -72,14 +74,16 @@
|
||||
\
|
||||
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
|
||||
declare_type(HeapRegion, G1OffsetTableContigSpace) \
|
||||
declare_toplevel_type(HeapRegionManager) \
|
||||
declare_toplevel_type(HeapRegionManager) \
|
||||
declare_toplevel_type(HeapRegionSetBase) \
|
||||
declare_toplevel_type(HeapRegionSetCount) \
|
||||
declare_toplevel_type(G1MonitoringSupport) \
|
||||
declare_toplevel_type(G1Allocator) \
|
||||
\
|
||||
declare_toplevel_type(G1CollectedHeap*) \
|
||||
declare_toplevel_type(HeapRegion*) \
|
||||
declare_toplevel_type(G1MonitoringSupport*) \
|
||||
declare_toplevel_type(G1Allocator*) \
|
||||
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
|
||||
|
@ -45,7 +45,8 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation(
|
||||
void VM_G1CollectForAllocation::doit() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
GCCauseSetter x(g1h, _gc_cause);
|
||||
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
|
||||
|
||||
_result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
|
||||
assert(_result == NULL || _pause_succeeded,
|
||||
"if we get back a result, the pause should have succeeded");
|
||||
}
|
||||
@ -99,7 +100,7 @@ void VM_G1IncCollectionPause::doit() {
|
||||
|
||||
if (_word_size > 0) {
|
||||
// An allocation has been requested. So, try to do that first.
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
|
||||
false /* expect_null_cur_alloc_region */);
|
||||
if (_result != NULL) {
|
||||
// If we can successfully allocate before we actually do the
|
||||
@ -152,7 +153,7 @@ void VM_G1IncCollectionPause::doit() {
|
||||
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
|
||||
if (_pause_succeeded && _word_size > 0) {
|
||||
// An allocation had been requested.
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
|
||||
true /* expect_null_cur_alloc_region */);
|
||||
} else {
|
||||
assert(_result == NULL, "invariant");
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
|
||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
|
||||
|
||||
#include "gc_implementation/g1/g1AllocationContext.hpp"
|
||||
#include "gc_implementation/shared/vmGCOperations.hpp"
|
||||
|
||||
// VM_operations for the G1 collector.
|
||||
@ -40,6 +41,7 @@ protected:
|
||||
size_t _word_size;
|
||||
HeapWord* _result;
|
||||
bool _pause_succeeded;
|
||||
AllocationContext_t _allocation_context;
|
||||
|
||||
public:
|
||||
VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
|
||||
@ -49,6 +51,8 @@ public:
|
||||
_word_size(word_size), _result(NULL), _pause_succeeded(false) { }
|
||||
HeapWord* result() { return _result; }
|
||||
bool pause_succeeded() { return _pause_succeeded; }
|
||||
void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
|
||||
AllocationContext_t allocation_context() { return _allocation_context; }
|
||||
};
|
||||
|
||||
class VM_G1CollectFull: public VM_GC_Operation {
|
||||
|
@ -288,7 +288,7 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
|
||||
while (p < to) {
|
||||
Prefetch::write(p, interval);
|
||||
oop m = oop(p);
|
||||
assert(m->is_oop_or_null(), "check for header");
|
||||
assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
|
||||
m->push_contents(pm);
|
||||
p += m->size();
|
||||
}
|
||||
@ -296,7 +296,7 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
|
||||
} else {
|
||||
while (p < to) {
|
||||
oop m = oop(p);
|
||||
assert(m->is_oop_or_null(), "check for header");
|
||||
assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
|
||||
m->push_contents(pm);
|
||||
p += m->size();
|
||||
}
|
||||
|
@ -74,10 +74,9 @@ jint ParallelScavengeHeap::initialize() {
|
||||
return JNI_ENOMEM;
|
||||
}
|
||||
|
||||
_reserved = MemRegion((HeapWord*)heap_rs.base(),
|
||||
(HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||
|
||||
CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
|
||||
CardTableExtension* const barrier_set = new CardTableExtension(reserved_region(), 3);
|
||||
barrier_set->initialize();
|
||||
_barrier_set = barrier_set;
|
||||
oopDesc::set_bs(_barrier_set);
|
||||
|
@ -2882,7 +2882,7 @@ void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
|
||||
start_array->allocate_block(addr);
|
||||
}
|
||||
oop(addr)->update_contents(cm);
|
||||
assert(oop(addr)->is_oop_or_null(), "should be an oop now");
|
||||
assert(oop(addr)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(addr))));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3366,7 +3366,7 @@ MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
|
||||
|
||||
oop moved_oop = (oop) destination();
|
||||
moved_oop->update_contents(compaction_manager());
|
||||
assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
|
||||
assert(moved_oop->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop)));
|
||||
|
||||
update_state(words);
|
||||
assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
|
||||
|
@ -582,6 +582,14 @@ void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
|
||||
}
|
||||
}
|
||||
|
||||
void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
|
||||
// It is important to do this in a way such that concurrent readers can't
|
||||
// temporarily think something is in the heap. (Seen this happen in asserts.)
|
||||
_reserved.set_word_size(0);
|
||||
_reserved.set_start(start);
|
||||
_reserved.set_end(end);
|
||||
}
|
||||
|
||||
/////////////// Unit tests ///////////////
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user