Merge
This commit is contained in:
commit
89495909c8
@ -275,3 +275,4 @@ ea2f7981236f3812436958748ab3d26e80a35130 jdk9-b28
|
|||||||
36e9bc875325813ac9c44ac0c617a463091fa9f5 jdk9-b30
|
36e9bc875325813ac9c44ac0c617a463091fa9f5 jdk9-b30
|
||||||
69a84c16d9c28e0e3d504b9c8766c24bafcd58f6 jdk9-b31
|
69a84c16d9c28e0e3d504b9c8766c24bafcd58f6 jdk9-b31
|
||||||
7e3512dae8e020d44399c0f1c579ff1fe3090ed6 jdk9-b32
|
7e3512dae8e020d44399c0f1c579ff1fe3090ed6 jdk9-b32
|
||||||
|
e4ba01b726e263953ae129be37c94de6ed145b1d jdk9-b33
|
||||||
|
@ -435,3 +435,4 @@ deb29e92f68ace2808a36ecfa18c7d61dcb645bb jdk9-b29
|
|||||||
5c722dffbc0f34eb8d903dca7b261e52248fa17e jdk9-b30
|
5c722dffbc0f34eb8d903dca7b261e52248fa17e jdk9-b30
|
||||||
9f7d155d28e519f3e4645dc21cf185c25f3176ed jdk9-b31
|
9f7d155d28e519f3e4645dc21cf185c25f3176ed jdk9-b31
|
||||||
af46576a8d7cb4003028b8ee8bf408cfe227315b jdk9-b32
|
af46576a8d7cb4003028b8ee8bf408cfe227315b jdk9-b32
|
||||||
|
9b3f5e4f33725f7c1d9b8e523133fe8383a54d9f jdk9-b33
|
||||||
|
@ -32,12 +32,10 @@ import sun.jvm.hotspot.types.*;
|
|||||||
import sun.jvm.hotspot.utilities.*;
|
import sun.jvm.hotspot.utilities.*;
|
||||||
|
|
||||||
public class CodeCache {
|
public class CodeCache {
|
||||||
private static AddressField heapField;
|
private static GrowableArray<CodeHeap> heapArray;
|
||||||
private static AddressField scavengeRootNMethodsField;
|
private static AddressField scavengeRootNMethodsField;
|
||||||
private static VirtualConstructor virtualConstructor;
|
private static VirtualConstructor virtualConstructor;
|
||||||
|
|
||||||
private CodeHeap heap;
|
|
||||||
|
|
||||||
static {
|
static {
|
||||||
VM.registerVMInitializedObserver(new Observer() {
|
VM.registerVMInitializedObserver(new Observer() {
|
||||||
public void update(Observable o, Object data) {
|
public void update(Observable o, Object data) {
|
||||||
@ -49,7 +47,10 @@ public class CodeCache {
|
|||||||
private static synchronized void initialize(TypeDataBase db) {
|
private static synchronized void initialize(TypeDataBase db) {
|
||||||
Type type = db.lookupType("CodeCache");
|
Type type = db.lookupType("CodeCache");
|
||||||
|
|
||||||
heapField = type.getAddressField("_heap");
|
// Get array of CodeHeaps
|
||||||
|
AddressField heapsField = type.getAddressField("_heaps");
|
||||||
|
heapArray = GrowableArray.create(heapsField.getValue(), new StaticBaseConstructor<CodeHeap>(CodeHeap.class));
|
||||||
|
|
||||||
scavengeRootNMethodsField = type.getAddressField("_scavenge_root_nmethods");
|
scavengeRootNMethodsField = type.getAddressField("_scavenge_root_nmethods");
|
||||||
|
|
||||||
virtualConstructor = new VirtualConstructor(db);
|
virtualConstructor = new VirtualConstructor(db);
|
||||||
@ -67,16 +68,17 @@ public class CodeCache {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public CodeCache() {
|
|
||||||
heap = (CodeHeap) VMObjectFactory.newObject(CodeHeap.class, heapField.getValue());
|
|
||||||
}
|
|
||||||
|
|
||||||
public NMethod scavengeRootMethods() {
|
public NMethod scavengeRootMethods() {
|
||||||
return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootNMethodsField.getValue());
|
return (NMethod) VMObjectFactory.newObject(NMethod.class, scavengeRootNMethodsField.getValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean contains(Address p) {
|
public boolean contains(Address p) {
|
||||||
return getHeap().contains(p);
|
for (int i = 0; i < heapArray.length(); ++i) {
|
||||||
|
if (heapArray.at(i).contains(p)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** When VM.getVM().isDebugging() returns true, this behaves like
|
/** When VM.getVM().isDebugging() returns true, this behaves like
|
||||||
@ -97,14 +99,24 @@ public class CodeCache {
|
|||||||
|
|
||||||
public CodeBlob findBlobUnsafe(Address start) {
|
public CodeBlob findBlobUnsafe(Address start) {
|
||||||
CodeBlob result = null;
|
CodeBlob result = null;
|
||||||
|
CodeHeap containing_heap = null;
|
||||||
|
for (int i = 0; i < heapArray.length(); ++i) {
|
||||||
|
if (heapArray.at(i).contains(start)) {
|
||||||
|
containing_heap = heapArray.at(i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (containing_heap == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
result = (CodeBlob) virtualConstructor.instantiateWrapperFor(getHeap().findStart(start));
|
result = (CodeBlob) virtualConstructor.instantiateWrapperFor(containing_heap.findStart(start));
|
||||||
}
|
}
|
||||||
catch (WrongTypeException wte) {
|
catch (WrongTypeException wte) {
|
||||||
Address cbAddr = null;
|
Address cbAddr = null;
|
||||||
try {
|
try {
|
||||||
cbAddr = getHeap().findStart(start);
|
cbAddr = containing_heap.findStart(start);
|
||||||
}
|
}
|
||||||
catch (Exception findEx) {
|
catch (Exception findEx) {
|
||||||
findEx.printStackTrace();
|
findEx.printStackTrace();
|
||||||
@ -167,31 +179,32 @@ public class CodeCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void iterate(CodeCacheVisitor visitor) {
|
public void iterate(CodeCacheVisitor visitor) {
|
||||||
CodeHeap heap = getHeap();
|
visitor.prologue(lowBound(), highBound());
|
||||||
Address ptr = heap.begin();
|
|
||||||
Address end = heap.end();
|
|
||||||
|
|
||||||
visitor.prologue(ptr, end);
|
|
||||||
CodeBlob lastBlob = null;
|
CodeBlob lastBlob = null;
|
||||||
while (ptr != null && ptr.lessThan(end)) {
|
|
||||||
try {
|
for (int i = 0; i < heapArray.length(); ++i) {
|
||||||
// Use findStart to get a pointer inside blob other findBlob asserts
|
CodeHeap current_heap = heapArray.at(i);
|
||||||
CodeBlob blob = findBlobUnsafe(heap.findStart(ptr));
|
Address ptr = current_heap.begin();
|
||||||
if (blob != null) {
|
while (ptr != null && ptr.lessThan(current_heap.end())) {
|
||||||
visitor.visit(blob);
|
try {
|
||||||
if (blob == lastBlob) {
|
// Use findStart to get a pointer inside blob other findBlob asserts
|
||||||
throw new InternalError("saw same blob twice");
|
CodeBlob blob = findBlobUnsafe(current_heap.findStart(ptr));
|
||||||
|
if (blob != null) {
|
||||||
|
visitor.visit(blob);
|
||||||
|
if (blob == lastBlob) {
|
||||||
|
throw new InternalError("saw same blob twice");
|
||||||
|
}
|
||||||
|
lastBlob = blob;
|
||||||
}
|
}
|
||||||
lastBlob = blob;
|
} catch (RuntimeException e) {
|
||||||
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
} catch (RuntimeException e) {
|
Address next = current_heap.nextBlock(ptr);
|
||||||
e.printStackTrace();
|
if (next != null && next.lessThan(ptr)) {
|
||||||
|
throw new InternalError("pointer moved backwards");
|
||||||
|
}
|
||||||
|
ptr = next;
|
||||||
}
|
}
|
||||||
Address next = heap.nextBlock(ptr);
|
|
||||||
if (next != null && next.lessThan(ptr)) {
|
|
||||||
throw new InternalError("pointer moved backwards");
|
|
||||||
}
|
|
||||||
ptr = next;
|
|
||||||
}
|
}
|
||||||
visitor.epilogue();
|
visitor.epilogue();
|
||||||
}
|
}
|
||||||
@ -200,7 +213,23 @@ public class CodeCache {
|
|||||||
// Internals only below this point
|
// Internals only below this point
|
||||||
//
|
//
|
||||||
|
|
||||||
private CodeHeap getHeap() {
|
private Address lowBound() {
|
||||||
return heap;
|
Address low = heapArray.at(0).begin();
|
||||||
|
for (int i = 1; i < heapArray.length(); ++i) {
|
||||||
|
if (heapArray.at(i).begin().lessThan(low)) {
|
||||||
|
low = heapArray.at(i).begin();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return low;
|
||||||
|
}
|
||||||
|
|
||||||
|
private Address highBound() {
|
||||||
|
Address high = heapArray.at(0).end();
|
||||||
|
for (int i = 1; i < heapArray.length(); ++i) {
|
||||||
|
if (heapArray.at(i).end().greaterThan(high)) {
|
||||||
|
high = heapArray.at(i).end();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return high;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,40 @@
|
|||||||
|
package sun.jvm.hotspot.gc_implementation.g1;
|
||||||
|
|
||||||
|
import java.util.Observable;
|
||||||
|
import java.util.Observer;
|
||||||
|
|
||||||
|
import sun.jvm.hotspot.debugger.Address;
|
||||||
|
import sun.jvm.hotspot.runtime.VM;
|
||||||
|
import sun.jvm.hotspot.runtime.VMObject;
|
||||||
|
import sun.jvm.hotspot.types.CIntegerField;
|
||||||
|
import sun.jvm.hotspot.types.Type;
|
||||||
|
import sun.jvm.hotspot.types.TypeDataBase;
|
||||||
|
|
||||||
|
public class G1Allocator extends VMObject {
|
||||||
|
|
||||||
|
//size_t _summary_bytes_used;
|
||||||
|
static private CIntegerField summaryBytesUsedField;
|
||||||
|
|
||||||
|
static {
|
||||||
|
VM.registerVMInitializedObserver(new Observer() {
|
||||||
|
public void update(Observable o, Object data) {
|
||||||
|
initialize(VM.getVM().getTypeDataBase());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
static private synchronized void initialize(TypeDataBase db) {
|
||||||
|
Type type = db.lookupType("G1Allocator");
|
||||||
|
|
||||||
|
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getSummaryBytes() {
|
||||||
|
return summaryBytesUsedField.getValue(addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
public G1Allocator(Address addr) {
|
||||||
|
super(addr);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
@ -36,7 +36,6 @@ import sun.jvm.hotspot.memory.SpaceClosure;
|
|||||||
import sun.jvm.hotspot.runtime.VM;
|
import sun.jvm.hotspot.runtime.VM;
|
||||||
import sun.jvm.hotspot.runtime.VMObjectFactory;
|
import sun.jvm.hotspot.runtime.VMObjectFactory;
|
||||||
import sun.jvm.hotspot.types.AddressField;
|
import sun.jvm.hotspot.types.AddressField;
|
||||||
import sun.jvm.hotspot.types.CIntegerField;
|
|
||||||
import sun.jvm.hotspot.types.Type;
|
import sun.jvm.hotspot.types.Type;
|
||||||
import sun.jvm.hotspot.types.TypeDataBase;
|
import sun.jvm.hotspot.types.TypeDataBase;
|
||||||
|
|
||||||
@ -47,8 +46,8 @@ public class G1CollectedHeap extends SharedHeap {
|
|||||||
static private long hrmFieldOffset;
|
static private long hrmFieldOffset;
|
||||||
// MemRegion _g1_reserved;
|
// MemRegion _g1_reserved;
|
||||||
static private long g1ReservedFieldOffset;
|
static private long g1ReservedFieldOffset;
|
||||||
// size_t _summary_bytes_used;
|
// G1Allocator* _allocator
|
||||||
static private CIntegerField summaryBytesUsedField;
|
static private AddressField g1Allocator;
|
||||||
// G1MonitoringSupport* _g1mm;
|
// G1MonitoringSupport* _g1mm;
|
||||||
static private AddressField g1mmField;
|
static private AddressField g1mmField;
|
||||||
// HeapRegionSet _old_set;
|
// HeapRegionSet _old_set;
|
||||||
@ -68,7 +67,7 @@ public class G1CollectedHeap extends SharedHeap {
|
|||||||
Type type = db.lookupType("G1CollectedHeap");
|
Type type = db.lookupType("G1CollectedHeap");
|
||||||
|
|
||||||
hrmFieldOffset = type.getField("_hrm").getOffset();
|
hrmFieldOffset = type.getField("_hrm").getOffset();
|
||||||
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
|
g1Allocator = type.getAddressField("_allocator");
|
||||||
g1mmField = type.getAddressField("_g1mm");
|
g1mmField = type.getAddressField("_g1mm");
|
||||||
oldSetFieldOffset = type.getField("_old_set").getOffset();
|
oldSetFieldOffset = type.getField("_old_set").getOffset();
|
||||||
humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
|
humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
|
||||||
@ -79,7 +78,7 @@ public class G1CollectedHeap extends SharedHeap {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public long used() {
|
public long used() {
|
||||||
return summaryBytesUsedField.getValue(addr);
|
return allocator().getSummaryBytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long n_regions() {
|
public long n_regions() {
|
||||||
@ -97,6 +96,11 @@ public class G1CollectedHeap extends SharedHeap {
|
|||||||
return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
|
return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public G1Allocator allocator() {
|
||||||
|
Address g1AllocatorAddr = g1Allocator.getValue(addr);
|
||||||
|
return (G1Allocator) VMObjectFactory.newObject(G1Allocator.class, g1AllocatorAddr);
|
||||||
|
}
|
||||||
|
|
||||||
public HeapRegionSetBase oldSet() {
|
public HeapRegionSetBase oldSet() {
|
||||||
Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
|
Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
|
||||||
return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
|
return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
|
||||||
|
@ -234,10 +234,10 @@ JVM_OBJ_FILES = $(Obj_Files)
|
|||||||
|
|
||||||
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
|
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
|
||||||
|
|
||||||
mapfile : $(MAPFILE) vm.def
|
mapfile : $(MAPFILE) vm.def mapfile_ext
|
||||||
rm -f $@
|
rm -f $@
|
||||||
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
|
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
|
||||||
{ system ("cat vm.def"); } \
|
{ system ("cat mapfile_ext"); system ("cat vm.def"); } \
|
||||||
else \
|
else \
|
||||||
{ print $$0 } \
|
{ print $$0 } \
|
||||||
}' > $@ < $(MAPFILE)
|
}' > $@ < $(MAPFILE)
|
||||||
@ -249,6 +249,13 @@ mapfile_reorder : mapfile $(REORDERFILE)
|
|||||||
vm.def: $(Res_Files) $(Obj_Files)
|
vm.def: $(Res_Files) $(Obj_Files)
|
||||||
sh $(GAMMADIR)/make/bsd/makefiles/build_vm_def.sh *.o > $@
|
sh $(GAMMADIR)/make/bsd/makefiles/build_vm_def.sh *.o > $@
|
||||||
|
|
||||||
|
mapfile_ext:
|
||||||
|
rm -f $@
|
||||||
|
touch $@
|
||||||
|
if [ -f $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext ]; then \
|
||||||
|
cat $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext > $@; \
|
||||||
|
fi
|
||||||
|
|
||||||
STATIC_CXX = false
|
STATIC_CXX = false
|
||||||
|
|
||||||
ifeq ($(LINK_INTO),AOUT)
|
ifeq ($(LINK_INTO),AOUT)
|
||||||
@ -265,6 +272,8 @@ else
|
|||||||
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/.
|
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/.
|
||||||
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/..
|
LFLAGS_VM += -Xlinker -rpath -Xlinker @loader_path/..
|
||||||
LFLAGS_VM += -Xlinker -install_name -Xlinker @rpath/$(@F)
|
LFLAGS_VM += -Xlinker -install_name -Xlinker @rpath/$(@F)
|
||||||
|
else
|
||||||
|
LFLAGS_VM += -Wl,-z,defs
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
|
# JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
|
||||||
|
@ -21,6 +21,9 @@
|
|||||||
# questions.
|
# questions.
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
|
|
||||||
|
include $(GAMMADIR)/make/altsrc.make
|
||||||
|
|
||||||
ifeq ($(INCLUDE_JVMTI), false)
|
ifeq ($(INCLUDE_JVMTI), false)
|
||||||
CXXFLAGS += -DINCLUDE_JVMTI=0
|
CXXFLAGS += -DINCLUDE_JVMTI=0
|
||||||
CFLAGS += -DINCLUDE_JVMTI=0
|
CFLAGS += -DINCLUDE_JVMTI=0
|
||||||
@ -78,12 +81,12 @@ ifeq ($(INCLUDE_ALL_GCS), false)
|
|||||||
CXXFLAGS += -DINCLUDE_ALL_GCS=0
|
CXXFLAGS += -DINCLUDE_ALL_GCS=0
|
||||||
CFLAGS += -DINCLUDE_ALL_GCS=0
|
CFLAGS += -DINCLUDE_ALL_GCS=0
|
||||||
|
|
||||||
gc_impl := $(GAMMADIR)/src/share/vm/gc_implementation
|
gc_impl := $(HS_COMMON_SRC)/share/vm/gc_implementation
|
||||||
gc_exclude := \
|
gc_impl_alt := $(HS_ALT_SRC)/share/vm/gc_implementation
|
||||||
$(notdir $(wildcard $(gc_impl)/concurrentMarkSweep/*.cpp)) \
|
gc_subdirs := concurrentMarkSweep g1 parallelScavenge parNew
|
||||||
$(notdir $(wildcard $(gc_impl)/g1/*.cpp)) \
|
gc_exclude := $(foreach gc,$(gc_subdirs), \
|
||||||
$(notdir $(wildcard $(gc_impl)/parallelScavenge/*.cpp)) \
|
$(notdir $(wildcard $(gc_impl)/$(gc)/*.cpp)) \
|
||||||
$(notdir $(wildcard $(gc_impl)/parNew/*.cpp))
|
$(notdir $(wildcard $(gc_impl_alt)/$(gc)/*.cpp)))
|
||||||
Src_Files_EXCLUDE += $(gc_exclude)
|
Src_Files_EXCLUDE += $(gc_exclude)
|
||||||
|
|
||||||
# Exclude everything in $(gc_impl)/shared except the files listed
|
# Exclude everything in $(gc_impl)/shared except the files listed
|
||||||
|
@ -1,388 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
#
|
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
|
||||||
# under the terms of the GNU General Public License version 2 only, as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
# version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
# accompanied this code).
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License version
|
|
||||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
#
|
|
||||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
# or visit www.oracle.com if you need additional information or have any
|
|
||||||
# questions.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
# Properties for jprt
|
|
||||||
|
|
||||||
# All build result bundles are full jdks.
|
|
||||||
jprt.need.sibling.build=false
|
|
||||||
|
|
||||||
# At submit time, the release supplied will be in jprt.submit.release
|
|
||||||
# and will be one of the official release names defined in jprt.
|
|
||||||
# jprt supports property value expansion using ${property.name} syntax.
|
|
||||||
|
|
||||||
# This tells jprt what default release we want to build
|
|
||||||
|
|
||||||
jprt.hotspot.default.release=jdk9
|
|
||||||
|
|
||||||
jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
|
|
||||||
|
|
||||||
# Disable syncing the source after builds and tests are done.
|
|
||||||
|
|
||||||
jprt.sync.push=false
|
|
||||||
|
|
||||||
# Note: we want both embedded releases and regular releases to build and test
|
|
||||||
# all platforms so that regressions are not introduced (eg. change to
|
|
||||||
# common code by SE breaks PPC/ARM; change to common code by SE-E breaks
|
|
||||||
# sparc etc.
|
|
||||||
|
|
||||||
# Define the Solaris platforms we want for the various releases
|
|
||||||
jprt.my.solaris.sparcv9.jdk9=solaris_sparcv9_5.11
|
|
||||||
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.solaris.x64.jdk9=solaris_x64_5.11
|
|
||||||
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.linux.i586.jdk9=linux_i586_2.6
|
|
||||||
jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.linux.x64.jdk9=linux_x64_2.6
|
|
||||||
jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.linux.ppc.jdk9=linux_ppc_2.6
|
|
||||||
jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.linux.ppcv2.jdk9=linux_ppcv2_2.6
|
|
||||||
jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.linux.armvfpsflt.jdk9=linux_armvfpsflt_2.6
|
|
||||||
jprt.my.linux.armvfpsflt=${jprt.my.linux.armvfpsflt.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.linux.armvfphflt.jdk9=linux_armvfphflt_2.6
|
|
||||||
jprt.my.linux.armvfphflt=${jprt.my.linux.armvfphflt.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
# The ARM GP vfp-sflt build is not currently supported
|
|
||||||
#jprt.my.linux.armvs.jdk9=linux_armvs_2.6
|
|
||||||
#jprt.my.linux.armvs=${jprt.my.linux.armvs.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.linux.armvh.jdk9=linux_armvh_2.6
|
|
||||||
jprt.my.linux.armvh=${jprt.my.linux.armvh.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.linux.armsflt.jdk9=linux_armsflt_2.6
|
|
||||||
jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.macosx.x64.jdk9=macosx_x64_10.7
|
|
||||||
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.windows.i586.jdk9=windows_i586_6.1
|
|
||||||
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.windows.x64.jdk9=windows_x64_6.1
|
|
||||||
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
# Standard list of jprt build targets for this source tree
|
|
||||||
|
|
||||||
jprt.build.targets.standard= \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}, \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}, \
|
|
||||||
${jprt.my.linux.armvh}-{product|fastdebug}
|
|
||||||
|
|
||||||
jprt.build.targets.open= \
|
|
||||||
${jprt.my.solaris.x64}-{debugOpen}, \
|
|
||||||
${jprt.my.linux.x64}-{productOpen}
|
|
||||||
|
|
||||||
jprt.build.targets.embedded= \
|
|
||||||
${jprt.my.linux.i586}-{productEmb|fastdebugEmb}, \
|
|
||||||
${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \
|
|
||||||
${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \
|
|
||||||
${jprt.my.linux.armvfpsflt}-{productEmb|fastdebugEmb}, \
|
|
||||||
${jprt.my.linux.armvfphflt}-{productEmb|fastdebugEmb}, \
|
|
||||||
${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb}
|
|
||||||
|
|
||||||
jprt.build.targets.all=${jprt.build.targets.standard}, \
|
|
||||||
${jprt.build.targets.embedded}, ${jprt.build.targets.open}
|
|
||||||
|
|
||||||
jprt.build.targets.jdk9=${jprt.build.targets.all}
|
|
||||||
jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
# Subset lists of test targets for this source tree
|
|
||||||
|
|
||||||
jprt.my.solaris.sparcv9.test.targets= \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \
|
|
||||||
${jprt.my.solaris.sparcv9}-product-c2-runThese8, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_CMS, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_G1, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_SerialGC, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParallelGC, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_CMS, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_CMS, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_G1, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParOldGC
|
|
||||||
|
|
||||||
jprt.my.solaris.x64.test.targets= \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-scimark, \
|
|
||||||
${jprt.my.solaris.x64}-product-c2-runThese8, \
|
|
||||||
${jprt.my.solaris.x64}-product-c2-runThese8_Xcomp_lang, \
|
|
||||||
${jprt.my.solaris.x64}-product-c2-runThese8_Xcomp_vm, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_G1, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
|
|
||||||
|
|
||||||
jprt.my.linux.i586.test.targets = \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-scimark, \
|
|
||||||
${jprt.my.linux.i586}-product-c1-runThese8_Xcomp_lang, \
|
|
||||||
${jprt.my.linux.i586}-product-c1-runThese8_Xcomp_vm, \
|
|
||||||
${jprt.my.linux.i586}-fastdebug-c1-runThese8_Xshare, \
|
|
||||||
${jprt.my.linux.i586}-fastdebug-c2-runThese8_Xcomp_lang, \
|
|
||||||
${jprt.my.linux.i586}-fastdebug-c2-runThese8_Xcomp_vm, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
|
|
||||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_SerialGC, \
|
|
||||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParallelGC, \
|
|
||||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParNewGC, \
|
|
||||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_CMS, \
|
|
||||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \
|
|
||||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_SerialGC, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_G1, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParOldGC
|
|
||||||
|
|
||||||
jprt.my.linux.x64.test.targets = \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_G1, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
|
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
|
|
||||||
|
|
||||||
jprt.my.macosx.x64.test.targets = \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
|
|
||||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
|
|
||||||
|
|
||||||
jprt.my.windows.i586.test.targets = \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-scimark, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-runThese8, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-runThese8_Xcomp_lang, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-runThese8_Xcomp_vm, \
|
|
||||||
${jprt.my.windows.i586}-fastdebug-c1-runThese8_Xshare, \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_SerialGC, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParallelGC, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParNewGC, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_CMS, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_G1, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParOldGC, \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jbb_default, \
|
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParallelGC, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-jbb_CMS, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-jbb_G1, \
|
|
||||||
${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParOldGC
|
|
||||||
|
|
||||||
jprt.my.windows.x64.test.targets = \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark, \
|
|
||||||
${jprt.my.windows.x64}-product-c2-runThese8, \
|
|
||||||
${jprt.my.windows.x64}-product-c2-runThese8_Xcomp_lang, \
|
|
||||||
${jprt.my.windows.x64}-product-c2-runThese8_Xcomp_vm, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_G1, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default, \
|
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
|
||||||
${jprt.my.windows.x64}-product-c2-jbb_CMS, \
|
|
||||||
${jprt.my.windows.x64}-product-c2-jbb_ParallelGC, \
|
|
||||||
${jprt.my.windows.x64}-product-c2-jbb_G1, \
|
|
||||||
${jprt.my.windows.x64}-product-c2-jbb_ParOldGC
|
|
||||||
|
|
||||||
# Some basic "smoke" tests for OpenJDK builds
|
|
||||||
jprt.test.targets.open = \
|
|
||||||
${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \
|
|
||||||
${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98
|
|
||||||
|
|
||||||
# Testing for actual embedded builds is different to standard
|
|
||||||
jprt.my.linux.i586.test.targets.embedded = \
|
|
||||||
linux_i586_2.6-product-c1-scimark
|
|
||||||
|
|
||||||
# The complete list of test targets for jprt
|
|
||||||
# Note: no PPC or ARM tests at this stage
|
|
||||||
|
|
||||||
jprt.test.targets.standard = \
|
|
||||||
${jprt.my.linux.i586.test.targets.embedded}, \
|
|
||||||
${jprt.my.solaris.sparcv9.test.targets}, \
|
|
||||||
${jprt.my.solaris.x64.test.targets}, \
|
|
||||||
${jprt.my.linux.i586.test.targets}, \
|
|
||||||
${jprt.my.linux.x64.test.targets}, \
|
|
||||||
${jprt.my.macosx.x64.test.targets}, \
|
|
||||||
${jprt.my.windows.i586.test.targets}, \
|
|
||||||
${jprt.my.windows.x64.test.targets}, \
|
|
||||||
${jprt.test.targets.open}
|
|
||||||
|
|
||||||
jprt.test.targets.embedded= \
|
|
||||||
${jprt.my.linux.i586.test.targets.embedded}, \
|
|
||||||
${jprt.my.solaris.sparcv9.test.targets}, \
|
|
||||||
${jprt.my.solaris.x64.test.targets}, \
|
|
||||||
${jprt.my.linux.x64.test.targets}, \
|
|
||||||
${jprt.my.windows.i586.test.targets}, \
|
|
||||||
${jprt.my.windows.x64.test.targets}
|
|
||||||
|
|
||||||
jprt.test.targets.jdk9=${jprt.test.targets.standard}
|
|
||||||
jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
# The default test/Makefile targets that should be run
|
|
||||||
|
|
||||||
#jprt.make.rule.test.targets=*-product-*-packtest
|
|
||||||
|
|
||||||
jprt.make.rule.test.targets.standard.client = \
|
|
||||||
${jprt.my.linux.i586}-*-c1-clienttest, \
|
|
||||||
${jprt.my.windows.i586}-*-c1-clienttest
|
|
||||||
|
|
||||||
jprt.make.rule.test.targets.standard.server = \
|
|
||||||
${jprt.my.solaris.sparcv9}-*-c2-servertest, \
|
|
||||||
${jprt.my.solaris.x64}-*-c2-servertest, \
|
|
||||||
${jprt.my.linux.i586}-*-c2-servertest, \
|
|
||||||
${jprt.my.linux.x64}-*-c2-servertest, \
|
|
||||||
${jprt.my.macosx.x64}-*-c2-servertest, \
|
|
||||||
${jprt.my.windows.i586}-*-c2-servertest, \
|
|
||||||
${jprt.my.windows.x64}-*-c2-servertest
|
|
||||||
|
|
||||||
jprt.make.rule.test.targets.standard.internalvmtests = \
|
|
||||||
${jprt.my.solaris.sparcv9}-fastdebug-c2-internalvmtests, \
|
|
||||||
${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
|
|
||||||
${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
|
|
||||||
${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
|
|
||||||
${jprt.my.macosx.x64}-fastdebug-c2-internalvmtests, \
|
|
||||||
${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \
|
|
||||||
${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
|
|
||||||
|
|
||||||
jprt.make.rule.test.targets.standard.reg.group = \
|
|
||||||
${jprt.my.solaris.sparcv9}-fastdebug-c2-GROUP, \
|
|
||||||
${jprt.my.solaris.x64}-fastdebug-c2-GROUP, \
|
|
||||||
${jprt.my.linux.i586}-fastdebug-c2-GROUP, \
|
|
||||||
${jprt.my.linux.x64}-fastdebug-c2-GROUP, \
|
|
||||||
${jprt.my.macosx.x64}-fastdebug-c2-GROUP, \
|
|
||||||
${jprt.my.windows.i586}-fastdebug-c2-GROUP, \
|
|
||||||
${jprt.my.windows.x64}-fastdebug-c2-GROUP, \
|
|
||||||
${jprt.my.linux.i586}-fastdebug-c1-GROUP, \
|
|
||||||
${jprt.my.windows.i586}-fastdebug-c1-GROUP
|
|
||||||
|
|
||||||
jprt.make.rule.test.targets.standard = \
|
|
||||||
${jprt.make.rule.test.targets.standard.client}, \
|
|
||||||
${jprt.make.rule.test.targets.standard.server}, \
|
|
||||||
${jprt.make.rule.test.targets.standard.internalvmtests}, \
|
|
||||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_wbapitest}, \
|
|
||||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_compiler}, \
|
|
||||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_gc}, \
|
|
||||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime}, \
|
|
||||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime_closed}, \
|
|
||||||
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_serviceability}
|
|
||||||
|
|
||||||
jprt.make.rule.test.targets.embedded = \
|
|
||||||
${jprt.make.rule.test.targets.standard.client}
|
|
||||||
|
|
||||||
jprt.make.rule.test.targets.jdk9=${jprt.make.rule.test.targets.standard}
|
|
||||||
jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
# 7155453: Work-around to prevent popups on OSX from blocking test completion
|
|
||||||
# but the work-around is added to all platforms to be consistent
|
|
||||||
jprt.jbb.options=-Djava.awt.headless=true
|
|
@ -227,10 +227,10 @@ JVM_OBJ_FILES = $(Obj_Files)
|
|||||||
|
|
||||||
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
|
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
|
||||||
|
|
||||||
mapfile : $(MAPFILE) vm.def
|
mapfile : $(MAPFILE) vm.def mapfile_ext
|
||||||
rm -f $@
|
rm -f $@
|
||||||
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
|
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
|
||||||
{ system ("cat vm.def"); } \
|
{ system ("cat mapfile_ext"); system ("cat vm.def"); } \
|
||||||
else \
|
else \
|
||||||
{ print $$0 } \
|
{ print $$0 } \
|
||||||
}' > $@ < $(MAPFILE)
|
}' > $@ < $(MAPFILE)
|
||||||
@ -242,6 +242,13 @@ mapfile_reorder : mapfile $(REORDERFILE)
|
|||||||
vm.def: $(Res_Files) $(Obj_Files)
|
vm.def: $(Res_Files) $(Obj_Files)
|
||||||
sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
|
sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
|
||||||
|
|
||||||
|
mapfile_ext:
|
||||||
|
rm -f $@
|
||||||
|
touch $@
|
||||||
|
if [ -f $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext ]; then \
|
||||||
|
cat $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext > $@; \
|
||||||
|
fi
|
||||||
|
|
||||||
ifeq ($(JVM_VARIANT_ZEROSHARK), true)
|
ifeq ($(JVM_VARIANT_ZEROSHARK), true)
|
||||||
STATIC_CXX = false
|
STATIC_CXX = false
|
||||||
else
|
else
|
||||||
@ -261,6 +268,7 @@ else
|
|||||||
LIBJVM_MAPFILE$(LDNOMAP) = mapfile_reorder
|
LIBJVM_MAPFILE$(LDNOMAP) = mapfile_reorder
|
||||||
LFLAGS_VM$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
|
LFLAGS_VM$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
|
||||||
LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM))
|
LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM))
|
||||||
|
LFLAGS_VM += -Wl,-z,defs
|
||||||
|
|
||||||
# JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
|
# JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
|
||||||
# get around library dependency and compatibility issues. Must use gcc not
|
# get around library dependency and compatibility issues. Must use gcc not
|
||||||
|
@ -258,6 +258,8 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
|
|||||||
echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
|
echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
|
||||||
[ -n "$(ZIPEXE)" ] && \
|
[ -n "$(ZIPEXE)" ] && \
|
||||||
echo && echo "ZIPEXE = $(ZIPEXE)"; \
|
echo && echo "ZIPEXE = $(ZIPEXE)"; \
|
||||||
|
[ -n "$(HS_ALT_MAKE)" ] && \
|
||||||
|
echo && echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
|
||||||
[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
|
[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
|
||||||
echo && \
|
echo && \
|
||||||
echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
|
echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
SUNWprivate_1.1 {
|
SUNWprivate_1.1 {
|
||||||
global:
|
global:
|
||||||
# Dtrace support
|
# Dtrace support
|
||||||
__1cJCodeCacheF_heap_;
|
__1cJCodeCacheG_heaps_;
|
||||||
__1cIUniverseO_collectedHeap_;
|
__1cIUniverseO_collectedHeap_;
|
||||||
__1cGMethodG__vtbl_;
|
__1cGMethodG__vtbl_;
|
||||||
__1cHnmethodG__vtbl_;
|
__1cHnmethodG__vtbl_;
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
SUNWprivate_1.1 {
|
SUNWprivate_1.1 {
|
||||||
global:
|
global:
|
||||||
# Dtrace support
|
# Dtrace support
|
||||||
__1cJCodeCacheF_heap_;
|
__1cJCodeCacheG_heaps_;
|
||||||
__1cIUniverseO_collectedHeap_;
|
__1cIUniverseO_collectedHeap_;
|
||||||
__1cGMethodG__vtbl_;
|
__1cGMethodG__vtbl_;
|
||||||
__1cHnmethodG__vtbl_;
|
__1cHnmethodG__vtbl_;
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
SUNWprivate_1.1 {
|
SUNWprivate_1.1 {
|
||||||
global:
|
global:
|
||||||
# Dtrace support
|
# Dtrace support
|
||||||
__1cJCodeCacheF_heap_;
|
__1cJCodeCacheG_heaps_;
|
||||||
__1cIUniverseO_collectedHeap_;
|
__1cIUniverseO_collectedHeap_;
|
||||||
__1cGMethodG__vtbl_;
|
__1cGMethodG__vtbl_;
|
||||||
__1cHnmethodG__vtbl_;
|
__1cHnmethodG__vtbl_;
|
||||||
|
@ -130,7 +130,7 @@ ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 505), 1)
|
|||||||
# Not sure what the 'designed for' comment is referring too above.
|
# Not sure what the 'designed for' comment is referring too above.
|
||||||
# The order may not be too significant anymore, but I have placed this
|
# The order may not be too significant anymore, but I have placed this
|
||||||
# older libm before libCrun, just to make sure it's found and used first.
|
# older libm before libCrun, just to make sure it's found and used first.
|
||||||
LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc -ldemangle
|
LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc -ldemangle -lnsl
|
||||||
else
|
else
|
||||||
ifeq ($(COMPILER_REV_NUMERIC), 502)
|
ifeq ($(COMPILER_REV_NUMERIC), 502)
|
||||||
# SC6.1 has it's own libm.so: specifying anything else provokes a name conflict.
|
# SC6.1 has it's own libm.so: specifying anything else provokes a name conflict.
|
||||||
@ -249,11 +249,12 @@ JVM_OBJ_FILES = $(Obj_Files) $(DTRACE_OBJS)
|
|||||||
|
|
||||||
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
|
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
|
||||||
|
|
||||||
mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def
|
mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def mapfile_ext
|
||||||
rm -f $@
|
rm -f $@
|
||||||
cat $(MAPFILE) $(MAPFILE_DTRACE_OPT) \
|
cat $(MAPFILE) $(MAPFILE_DTRACE_OPT) \
|
||||||
| $(NAWK) '{ \
|
| $(NAWK) '{ \
|
||||||
if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") { \
|
if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") { \
|
||||||
|
system ("cat mapfile_ext"); \
|
||||||
system ("cat vm.def"); \
|
system ("cat vm.def"); \
|
||||||
} else { \
|
} else { \
|
||||||
print $$0; \
|
print $$0; \
|
||||||
@ -267,6 +268,13 @@ mapfile_extended : mapfile $(MAPFILE_DTRACE_OPT)
|
|||||||
vm.def: $(Obj_Files)
|
vm.def: $(Obj_Files)
|
||||||
sh $(GAMMADIR)/make/solaris/makefiles/build_vm_def.sh *.o > $@
|
sh $(GAMMADIR)/make/solaris/makefiles/build_vm_def.sh *.o > $@
|
||||||
|
|
||||||
|
mapfile_ext:
|
||||||
|
rm -f $@
|
||||||
|
touch $@
|
||||||
|
if [ -f $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext ]; then \
|
||||||
|
cat $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext > $@; \
|
||||||
|
fi
|
||||||
|
|
||||||
ifeq ($(LINK_INTO),AOUT)
|
ifeq ($(LINK_INTO),AOUT)
|
||||||
LIBJVM.o =
|
LIBJVM.o =
|
||||||
LIBJVM_MAPFILE =
|
LIBJVM_MAPFILE =
|
||||||
@ -276,6 +284,7 @@ else
|
|||||||
LIBJVM_MAPFILE$(LDNOMAP) = mapfile_extended
|
LIBJVM_MAPFILE$(LDNOMAP) = mapfile_extended
|
||||||
LFLAGS_VM$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
|
LFLAGS_VM$(LDNOMAP) += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
|
||||||
LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM))
|
LFLAGS_VM += $(SONAMEFLAG:SONAME=$(LIBJVM))
|
||||||
|
LFLAGS_VM += -Wl,-z,defs
|
||||||
ifndef USE_GCC
|
ifndef USE_GCC
|
||||||
LIBS_VM = $(LIBS)
|
LIBS_VM = $(LIBS)
|
||||||
else
|
else
|
||||||
|
@ -79,6 +79,9 @@ define_pd_global(bool, OptoScheduling, false);
|
|||||||
|
|
||||||
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||||
define_pd_global(intx, ReservedCodeCacheSize, 256*M);
|
define_pd_global(intx, ReservedCodeCacheSize, 256*M);
|
||||||
|
define_pd_global(intx, NonProfiledCodeHeapSize, 125*M);
|
||||||
|
define_pd_global(intx, ProfiledCodeHeapSize, 126*M);
|
||||||
|
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||||
|
|
||||||
// Ergonomics related flags
|
// Ergonomics related flags
|
||||||
|
@ -308,3 +308,10 @@ intptr_t *frame::initial_deoptimization_info() {
|
|||||||
// unused... but returns fp() to minimize changes introduced by 7087445
|
// unused... but returns fp() to minimize changes introduced by 7087445
|
||||||
return fp();
|
return fp();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
// This is a generic constructor which is only used by pns() in debug.cpp.
|
||||||
|
frame::frame(void* sp, void* fp, void* pc) : _sp((intptr_t*)sp), _unextended_sp((intptr_t*)sp) {
|
||||||
|
find_codeblob_and_set_pc_and_deopt_state((address)pc); // also sets _fp and adjusts _unextended_sp
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -47,6 +47,9 @@ define_pd_global(bool, ProfileInterpreter, false);
|
|||||||
define_pd_global(intx, FreqInlineSize, 325 );
|
define_pd_global(intx, FreqInlineSize, 325 );
|
||||||
define_pd_global(bool, ResizeTLAB, true );
|
define_pd_global(bool, ResizeTLAB, true );
|
||||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||||
|
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||||
|
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||||
|
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||||
|
@ -74,6 +74,9 @@ define_pd_global(bool, OptoScheduling, true);
|
|||||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||||
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||||
|
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||||
|
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
|
||||||
|
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||||
|
|
||||||
// Ergonomics related flags
|
// Ergonomics related flags
|
||||||
@ -82,6 +85,9 @@ define_pd_global(uint64_t,MaxRAM, 128ULL*G);
|
|||||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||||
define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
|
define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
|
||||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
|
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
|
||||||
|
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M);
|
||||||
|
define_pd_global(intx, ProfiledCodeHeapSize, 14*M);
|
||||||
|
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
|
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
|
||||||
// Ergonomics related flags
|
// Ergonomics related flags
|
||||||
define_pd_global(uint64_t,MaxRAM, 4ULL*G);
|
define_pd_global(uint64_t,MaxRAM, 4ULL*G);
|
||||||
|
@ -343,7 +343,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
|
|||||||
// constructors
|
// constructors
|
||||||
|
|
||||||
// Construct an unpatchable, deficient frame
|
// Construct an unpatchable, deficient frame
|
||||||
frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
|
void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
|
assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
|
||||||
#endif
|
#endif
|
||||||
@ -365,6 +365,10 @@ frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
|
|||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
|
||||||
|
init(sp, pc, cb);
|
||||||
|
}
|
||||||
|
|
||||||
frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
|
frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
|
||||||
_sp(sp),
|
_sp(sp),
|
||||||
_younger_sp(younger_sp),
|
_younger_sp(younger_sp),
|
||||||
@ -419,6 +423,13 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpret
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
// This is a generic constructor which is only used by pns() in debug.cpp.
|
||||||
|
frame::frame(void* sp, void* fp, void* pc) {
|
||||||
|
init((intptr_t*)sp, (address)pc, NULL);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
bool frame::is_interpreted_frame() const {
|
bool frame::is_interpreted_frame() const {
|
||||||
return Interpreter::contains(pc());
|
return Interpreter::contains(pc());
|
||||||
}
|
}
|
||||||
|
@ -163,6 +163,8 @@
|
|||||||
enum unpatchable_t { unpatchable };
|
enum unpatchable_t { unpatchable };
|
||||||
frame(intptr_t* sp, unpatchable_t, address pc = NULL, CodeBlob* cb = NULL);
|
frame(intptr_t* sp, unpatchable_t, address pc = NULL, CodeBlob* cb = NULL);
|
||||||
|
|
||||||
|
void init(intptr_t* sp, address pc, CodeBlob* cb);
|
||||||
|
|
||||||
// Walk from sp outward looking for old_sp, and return old_sp's predecessor
|
// Walk from sp outward looking for old_sp, and return old_sp's predecessor
|
||||||
// (i.e. return the sp from the frame where old_sp is the fp).
|
// (i.e. return the sp from the frame where old_sp is the fp).
|
||||||
// Register windows are assumed to be flushed for the stack in question.
|
// Register windows are assumed to be flushed for the stack in question.
|
||||||
|
@ -1128,51 +1128,82 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
|||||||
// Hoist any int/ptr/long's in the first 6 to int regs.
|
// Hoist any int/ptr/long's in the first 6 to int regs.
|
||||||
// Hoist any flt/dbl's in the first 16 dbl regs.
|
// Hoist any flt/dbl's in the first 16 dbl regs.
|
||||||
int j = 0; // Count of actual args, not HALVES
|
int j = 0; // Count of actual args, not HALVES
|
||||||
for( int i=0; i<total_args_passed; i++, j++ ) {
|
VMRegPair param_array_reg; // location of the argument in the parameter array
|
||||||
switch( sig_bt[i] ) {
|
for (int i = 0; i < total_args_passed; i++, j++) {
|
||||||
|
param_array_reg.set_bad();
|
||||||
|
switch (sig_bt[i]) {
|
||||||
case T_BOOLEAN:
|
case T_BOOLEAN:
|
||||||
case T_BYTE:
|
case T_BYTE:
|
||||||
case T_CHAR:
|
case T_CHAR:
|
||||||
case T_INT:
|
case T_INT:
|
||||||
case T_SHORT:
|
case T_SHORT:
|
||||||
regs[i].set1( int_stk_helper( j ) ); break;
|
regs[i].set1(int_stk_helper(j));
|
||||||
|
break;
|
||||||
case T_LONG:
|
case T_LONG:
|
||||||
assert( sig_bt[i+1] == T_VOID, "expecting half" );
|
assert(sig_bt[i+1] == T_VOID, "expecting half");
|
||||||
case T_ADDRESS: // raw pointers, like current thread, for VM calls
|
case T_ADDRESS: // raw pointers, like current thread, for VM calls
|
||||||
case T_ARRAY:
|
case T_ARRAY:
|
||||||
case T_OBJECT:
|
case T_OBJECT:
|
||||||
case T_METADATA:
|
case T_METADATA:
|
||||||
regs[i].set2( int_stk_helper( j ) );
|
regs[i].set2(int_stk_helper(j));
|
||||||
break;
|
break;
|
||||||
case T_FLOAT:
|
case T_FLOAT:
|
||||||
if ( j < 16 ) {
|
// Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
|
||||||
// V9ism: floats go in ODD registers
|
// http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
|
||||||
regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
|
//
|
||||||
} else {
|
// "When a callee prototype exists, and does not indicate variable arguments,
|
||||||
// V9ism: floats go in ODD stack slot
|
// floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
|
||||||
regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
|
// will be promoted to floating-point registers"
|
||||||
|
//
|
||||||
|
// By "promoted" it means that the argument is located in two places, an unused
|
||||||
|
// spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
|
||||||
|
// float register. In most cases, there are 6 or fewer arguments of any type,
|
||||||
|
// and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
|
||||||
|
// serve as shadow slots. Per the spec floating point registers %d6 to %d16
|
||||||
|
// require slots beyond that (up to %sp+BIAS+248).
|
||||||
|
//
|
||||||
|
{
|
||||||
|
// V9ism: floats go in ODD registers and stack slots
|
||||||
|
int float_index = 1 + (j << 1);
|
||||||
|
param_array_reg.set1(VMRegImpl::stack2reg(float_index));
|
||||||
|
if (j < 16) {
|
||||||
|
regs[i].set1(as_FloatRegister(float_index)->as_VMReg());
|
||||||
|
} else {
|
||||||
|
regs[i] = param_array_reg;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case T_DOUBLE:
|
case T_DOUBLE:
|
||||||
assert( sig_bt[i+1] == T_VOID, "expecting half" );
|
{
|
||||||
if ( j < 16 ) {
|
assert(sig_bt[i + 1] == T_VOID, "expecting half");
|
||||||
// V9ism: doubles go in EVEN/ODD regs
|
// V9ism: doubles go in EVEN/ODD regs and stack slots
|
||||||
regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
|
int double_index = (j << 1);
|
||||||
} else {
|
param_array_reg.set2(VMRegImpl::stack2reg(double_index));
|
||||||
// V9ism: doubles go in EVEN/ODD stack slots
|
if (j < 16) {
|
||||||
regs[i].set2(VMRegImpl::stack2reg(j<<1));
|
regs[i].set2(as_FloatRegister(double_index)->as_VMReg());
|
||||||
|
} else {
|
||||||
|
// V9ism: doubles go in EVEN/ODD stack slots
|
||||||
|
regs[i] = param_array_reg;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES
|
case T_VOID:
|
||||||
|
regs[i].set_bad();
|
||||||
|
j--;
|
||||||
|
break; // Do not count HALVES
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
if (regs[i].first()->is_stack()) {
|
// Keep track of the deepest parameter array slot.
|
||||||
int off = regs[i].first()->reg2stack();
|
if (!param_array_reg.first()->is_valid()) {
|
||||||
|
param_array_reg = regs[i];
|
||||||
|
}
|
||||||
|
if (param_array_reg.first()->is_stack()) {
|
||||||
|
int off = param_array_reg.first()->reg2stack();
|
||||||
if (off > max_stack_slots) max_stack_slots = off;
|
if (off > max_stack_slots) max_stack_slots = off;
|
||||||
}
|
}
|
||||||
if (regs[i].second()->is_stack()) {
|
if (param_array_reg.second()->is_stack()) {
|
||||||
int off = regs[i].second()->reg2stack();
|
int off = param_array_reg.second()->reg2stack();
|
||||||
if (off > max_stack_slots) max_stack_slots = off;
|
if (off > max_stack_slots) max_stack_slots = off;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1180,8 +1211,8 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
|||||||
#else // _LP64
|
#else // _LP64
|
||||||
// V8 convention: first 6 things in O-regs, rest on stack.
|
// V8 convention: first 6 things in O-regs, rest on stack.
|
||||||
// Alignment is willy-nilly.
|
// Alignment is willy-nilly.
|
||||||
for( int i=0; i<total_args_passed; i++ ) {
|
for (int i = 0; i < total_args_passed; i++) {
|
||||||
switch( sig_bt[i] ) {
|
switch (sig_bt[i]) {
|
||||||
case T_ADDRESS: // raw pointers, like current thread, for VM calls
|
case T_ADDRESS: // raw pointers, like current thread, for VM calls
|
||||||
case T_ARRAY:
|
case T_ARRAY:
|
||||||
case T_BOOLEAN:
|
case T_BOOLEAN:
|
||||||
@ -1192,23 +1223,23 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
|||||||
case T_OBJECT:
|
case T_OBJECT:
|
||||||
case T_METADATA:
|
case T_METADATA:
|
||||||
case T_SHORT:
|
case T_SHORT:
|
||||||
regs[i].set1( int_stk_helper( i ) );
|
regs[i].set1(int_stk_helper(i));
|
||||||
break;
|
break;
|
||||||
case T_DOUBLE:
|
case T_DOUBLE:
|
||||||
case T_LONG:
|
case T_LONG:
|
||||||
assert( sig_bt[i+1] == T_VOID, "expecting half" );
|
assert(sig_bt[i + 1] == T_VOID, "expecting half");
|
||||||
regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
|
regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
|
||||||
break;
|
break;
|
||||||
case T_VOID: regs[i].set_bad(); break;
|
case T_VOID: regs[i].set_bad(); break;
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
if (regs[i].first()->is_stack()) {
|
if (regs[i].first()->is_stack()) {
|
||||||
int off = regs[i].first()->reg2stack();
|
int off = regs[i].first()->reg2stack();
|
||||||
if (off > max_stack_slots) max_stack_slots = off;
|
if (off > max_stack_slots) max_stack_slots = off;
|
||||||
}
|
}
|
||||||
if (regs[i].second()->is_stack()) {
|
if (regs[i].second()->is_stack()) {
|
||||||
int off = regs[i].second()->reg2stack();
|
int off = regs[i].second()->reg2stack();
|
||||||
if (off > max_stack_slots) max_stack_slots = off;
|
if (off > max_stack_slots) max_stack_slots = off;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1357,11 +1388,10 @@ static void object_move(MacroAssembler* masm,
|
|||||||
const Register rOop = src.first()->as_Register();
|
const Register rOop = src.first()->as_Register();
|
||||||
const Register rHandle = L5;
|
const Register rHandle = L5;
|
||||||
int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
|
int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
|
||||||
int offset = oop_slot*VMRegImpl::stack_slot_size;
|
int offset = oop_slot * VMRegImpl::stack_slot_size;
|
||||||
Label skip;
|
|
||||||
__ st_ptr(rOop, SP, offset + STACK_BIAS);
|
__ st_ptr(rOop, SP, offset + STACK_BIAS);
|
||||||
if (is_receiver) {
|
if (is_receiver) {
|
||||||
*receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
|
*receiver_offset = offset;
|
||||||
}
|
}
|
||||||
map->set_oop(VMRegImpl::stack2reg(oop_slot));
|
map->set_oop(VMRegImpl::stack2reg(oop_slot));
|
||||||
__ add(SP, offset + STACK_BIAS, rHandle);
|
__ add(SP, offset + STACK_BIAS, rHandle);
|
||||||
|
@ -1989,7 +1989,7 @@ void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
|
|||||||
// to implement the UseStrictFP mode.
|
// to implement the UseStrictFP mode.
|
||||||
const bool Matcher::strict_fp_requires_explicit_rounding = false;
|
const bool Matcher::strict_fp_requires_explicit_rounding = false;
|
||||||
|
|
||||||
// Are floats conerted to double when stored to stack during deoptimization?
|
// Are floats converted to double when stored to stack during deoptimization?
|
||||||
// Sparc does not handle callee-save floats.
|
// Sparc does not handle callee-save floats.
|
||||||
bool Matcher::float_in_double() { return false; }
|
bool Matcher::float_in_double() { return false; }
|
||||||
|
|
||||||
@ -3218,7 +3218,7 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI r
|
|||||||
// are owned by the CALLEE. Holes should not be nessecary in the
|
// are owned by the CALLEE. Holes should not be nessecary in the
|
||||||
// incoming area, as the Java calling convention is completely under
|
// incoming area, as the Java calling convention is completely under
|
||||||
// the control of the AD file. Doubles can be sorted and packed to
|
// the control of the AD file. Doubles can be sorted and packed to
|
||||||
// avoid holes. Holes in the outgoing arguments may be nessecary for
|
// avoid holes. Holes in the outgoing arguments may be necessary for
|
||||||
// varargs C calling conventions.
|
// varargs C calling conventions.
|
||||||
// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
|
// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
|
||||||
// even aligned with pad0 as needed.
|
// even aligned with pad0 as needed.
|
||||||
@ -3284,7 +3284,7 @@ frame %{
|
|||||||
%}
|
%}
|
||||||
|
|
||||||
// Body of function which returns an OptoRegs array locating
|
// Body of function which returns an OptoRegs array locating
|
||||||
// arguments either in registers or in stack slots for callin
|
// arguments either in registers or in stack slots for calling
|
||||||
// C.
|
// C.
|
||||||
c_calling_convention %{
|
c_calling_convention %{
|
||||||
// This is obviously always outgoing
|
// This is obviously always outgoing
|
||||||
|
@ -47,6 +47,9 @@ define_pd_global(intx, FreqInlineSize, 325 );
|
|||||||
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
|
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
|
||||||
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
||||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||||
|
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||||
|
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||||
|
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||||
define_pd_global(bool, ProfileInterpreter, false);
|
define_pd_global(bool, ProfileInterpreter, false);
|
||||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||||
|
@ -84,6 +84,9 @@ define_pd_global(bool, OptoScheduling, false);
|
|||||||
define_pd_global(bool, OptoBundling, false);
|
define_pd_global(bool, OptoBundling, false);
|
||||||
|
|
||||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||||
|
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||||
|
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
|
||||||
|
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||||
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
|
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
|
||||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||||
|
|
||||||
|
@ -715,3 +715,10 @@ intptr_t* frame::real_fp() const {
|
|||||||
assert(! is_compiled_frame(), "unknown compiled frame size");
|
assert(! is_compiled_frame(), "unknown compiled frame size");
|
||||||
return fp();
|
return fp();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
// This is a generic constructor which is only used by pns() in debug.cpp.
|
||||||
|
frame::frame(void* sp, void* fp, void* pc) {
|
||||||
|
init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -187,6 +187,8 @@
|
|||||||
|
|
||||||
frame(intptr_t* sp, intptr_t* fp);
|
frame(intptr_t* sp, intptr_t* fp);
|
||||||
|
|
||||||
|
void init(intptr_t* sp, intptr_t* fp, address pc);
|
||||||
|
|
||||||
// accessors for the instance variables
|
// accessors for the instance variables
|
||||||
// Note: not necessarily the real 'frame pointer' (see real_fp)
|
// Note: not necessarily the real 'frame pointer' (see real_fp)
|
||||||
intptr_t* fp() const { return _fp; }
|
intptr_t* fp() const { return _fp; }
|
||||||
|
@ -41,7 +41,7 @@ inline frame::frame() {
|
|||||||
_deopt_state = unknown;
|
_deopt_state = unknown;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
|
inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
|
||||||
_sp = sp;
|
_sp = sp;
|
||||||
_unextended_sp = sp;
|
_unextended_sp = sp;
|
||||||
_fp = fp;
|
_fp = fp;
|
||||||
@ -59,6 +59,10 @@ inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
|
||||||
|
init(sp, fp, pc);
|
||||||
|
}
|
||||||
|
|
||||||
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
|
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
|
||||||
_sp = sp;
|
_sp = sp;
|
||||||
_unextended_sp = unextended_sp;
|
_unextended_sp = unextended_sp;
|
||||||
|
@ -438,3 +438,10 @@ intptr_t *frame::initial_deoptimization_info() {
|
|||||||
// unused... but returns fp() to minimize changes introduced by 7087445
|
// unused... but returns fp() to minimize changes introduced by 7087445
|
||||||
return fp();
|
return fp();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
// This is a generic constructor which is only used by pns() in debug.cpp.
|
||||||
|
frame::frame(void* sp, void* fp, void* pc) {
|
||||||
|
Unimplemented();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -53,6 +53,9 @@ define_pd_global(uintx, NewRatio, 12 );
|
|||||||
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
|
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
|
||||||
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
||||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||||
|
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||||
|
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||||
|
define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
|
||||||
define_pd_global(bool, ProfileInterpreter, false);
|
define_pd_global(bool, ProfileInterpreter, false);
|
||||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
|
define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
|
||||||
|
@ -67,7 +67,7 @@
|
|||||||
* we link this program with -z nodefs .
|
* we link this program with -z nodefs .
|
||||||
*
|
*
|
||||||
* But for 'debug1' and 'fastdebug1' we still have to provide
|
* But for 'debug1' and 'fastdebug1' we still have to provide
|
||||||
* a particular workaround for the following symbols bellow.
|
* a particular workaround for the following symbols below.
|
||||||
* It will be good to find out a generic way in the future.
|
* It will be good to find out a generic way in the future.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -87,21 +87,24 @@ StubQueue* AbstractInterpreter::_code = NULL;
|
|||||||
#endif /* ASSERT */
|
#endif /* ASSERT */
|
||||||
#endif /* COMPILER1 */
|
#endif /* COMPILER1 */
|
||||||
|
|
||||||
#define GEN_OFFS(Type,Name) \
|
#define GEN_OFFS_NAME(Type,Name,OutputType) \
|
||||||
switch(gen_variant) { \
|
switch(gen_variant) { \
|
||||||
case GEN_OFFSET: \
|
case GEN_OFFSET: \
|
||||||
printf("#define OFFSET_%-33s %ld\n", \
|
printf("#define OFFSET_%-33s %ld\n", \
|
||||||
#Type #Name, offset_of(Type, Name)); \
|
#OutputType #Name, offset_of(Type, Name)); \
|
||||||
break; \
|
break; \
|
||||||
case GEN_INDEX: \
|
case GEN_INDEX: \
|
||||||
printf("#define IDX_OFFSET_%-33s %d\n", \
|
printf("#define IDX_OFFSET_%-33s %d\n", \
|
||||||
#Type #Name, index++); \
|
#OutputType #Name, index++); \
|
||||||
break; \
|
break; \
|
||||||
case GEN_TABLE: \
|
case GEN_TABLE: \
|
||||||
printf("\tOFFSET_%s,\n", #Type #Name); \
|
printf("\tOFFSET_%s,\n", #OutputType #Name); \
|
||||||
break; \
|
break; \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define GEN_OFFS(Type,Name) \
|
||||||
|
GEN_OFFS_NAME(Type,Name,Type)
|
||||||
|
|
||||||
#define GEN_SIZE(Type) \
|
#define GEN_SIZE(Type) \
|
||||||
switch(gen_variant) { \
|
switch(gen_variant) { \
|
||||||
case GEN_OFFSET: \
|
case GEN_OFFSET: \
|
||||||
@ -246,6 +249,11 @@ int generateJvmOffsets(GEN_variant gen_variant) {
|
|||||||
GEN_OFFS(VirtualSpace, _high);
|
GEN_OFFS(VirtualSpace, _high);
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
||||||
|
/* We need to use different names here because of the template parameter */
|
||||||
|
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _data, GrowableArray_CodeHeap);
|
||||||
|
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _len, GrowableArray_CodeHeap);
|
||||||
|
printf("\n");
|
||||||
|
|
||||||
GEN_OFFS(CodeBlob, _name);
|
GEN_OFFS(CodeBlob, _name);
|
||||||
GEN_OFFS(CodeBlob, _header_size);
|
GEN_OFFS(CodeBlob, _header_size);
|
||||||
GEN_OFFS(CodeBlob, _content_offset);
|
GEN_OFFS(CodeBlob, _content_offset);
|
||||||
|
@ -43,7 +43,9 @@
|
|||||||
|
|
||||||
extern pointer __JvmOffsets;
|
extern pointer __JvmOffsets;
|
||||||
|
|
||||||
extern pointer __1cJCodeCacheF_heap_;
|
/* GrowableArray<CodeHeaps*>* */
|
||||||
|
extern pointer __1cJCodeCacheG_heaps_;
|
||||||
|
|
||||||
extern pointer __1cIUniverseO_collectedHeap_;
|
extern pointer __1cIUniverseO_collectedHeap_;
|
||||||
|
|
||||||
extern pointer __1cHnmethodG__vtbl_;
|
extern pointer __1cHnmethodG__vtbl_;
|
||||||
@ -95,8 +97,8 @@ dtrace:helper:ustack:
|
|||||||
/!init_done && !this->done/
|
/!init_done && !this->done/
|
||||||
{
|
{
|
||||||
MARK_LINE;
|
MARK_LINE;
|
||||||
init_done = 1;
|
|
||||||
|
|
||||||
|
copyin_offset(POINTER_SIZE);
|
||||||
copyin_offset(COMPILER);
|
copyin_offset(COMPILER);
|
||||||
copyin_offset(OFFSET_CollectedHeap_reserved);
|
copyin_offset(OFFSET_CollectedHeap_reserved);
|
||||||
copyin_offset(OFFSET_MemRegion_start);
|
copyin_offset(OFFSET_MemRegion_start);
|
||||||
@ -122,6 +124,9 @@ dtrace:helper:ustack:
|
|||||||
copyin_offset(OFFSET_CodeHeap_segmap);
|
copyin_offset(OFFSET_CodeHeap_segmap);
|
||||||
copyin_offset(OFFSET_CodeHeap_log2_segment_size);
|
copyin_offset(OFFSET_CodeHeap_log2_segment_size);
|
||||||
|
|
||||||
|
copyin_offset(OFFSET_GrowableArray_CodeHeap_data);
|
||||||
|
copyin_offset(OFFSET_GrowableArray_CodeHeap_len);
|
||||||
|
|
||||||
copyin_offset(OFFSET_VirtualSpace_low);
|
copyin_offset(OFFSET_VirtualSpace_low);
|
||||||
copyin_offset(OFFSET_VirtualSpace_high);
|
copyin_offset(OFFSET_VirtualSpace_high);
|
||||||
|
|
||||||
@ -152,26 +157,14 @@ dtrace:helper:ustack:
|
|||||||
#error "Don't know architecture"
|
#error "Don't know architecture"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
|
/* Read address of GrowableArray<CodeHeaps*> */
|
||||||
|
this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_);
|
||||||
/* Reading volatile values */
|
/* Read address of _data array field in GrowableArray */
|
||||||
this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address +
|
this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data);
|
||||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
|
||||||
|
|
||||||
this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address +
|
|
||||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
|
||||||
|
|
||||||
this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address +
|
|
||||||
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
|
|
||||||
|
|
||||||
this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address +
|
|
||||||
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high);
|
|
||||||
|
|
||||||
this->CodeHeap_log2_segment_size = copyin_uint32(
|
|
||||||
this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size);
|
|
||||||
|
|
||||||
this->Method_vtbl = (pointer) &``__1cNMethodG__vtbl_;
|
|
||||||
|
|
||||||
|
this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get Java heap bounds
|
* Get Java heap bounds
|
||||||
*/
|
*/
|
||||||
@ -187,21 +180,152 @@ dtrace:helper:ustack:
|
|||||||
this->heap_end = this->heap_start + this->heap_size;
|
this->heap_end = this->heap_start + this->heap_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in
|
||||||
|
* the code cache. If more code heaps are added the following probes have to
|
||||||
|
* be extended. This is done by simply adding a probe to get the heap bounds
|
||||||
|
* and another probe to set the code heap address of the newly created heap.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ----- BEGIN: Get bounds of code heaps -----
|
||||||
|
*/
|
||||||
dtrace:helper:ustack:
|
dtrace:helper:ustack:
|
||||||
/!this->done &&
|
/init_done < 1 && this->number_of_heaps >= 1 && !this->done/
|
||||||
this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
/* CodeHeap 1 */
|
||||||
|
init_done = 1;
|
||||||
|
this->code_heap1_address = copyin_ptr(this->code_heaps_array_address);
|
||||||
|
this->code_heap1_low = copyin_ptr(this->code_heap1_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap1_high = copyin_ptr(this->code_heap1_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/init_done < 2 && this->number_of_heaps >= 2 && !this->done/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
/* CodeHeap 2 */
|
||||||
|
init_done = 2;
|
||||||
|
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||||
|
this->code_heap2_address = copyin_ptr(this->code_heaps_array_address);
|
||||||
|
this->code_heap2_low = copyin_ptr(this->code_heap2_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap2_high = copyin_ptr(this->code_heap2_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/init_done < 3 && this->number_of_heaps >= 3 && !this->done/
|
||||||
|
{
|
||||||
|
/* CodeHeap 3 */
|
||||||
|
init_done = 3;
|
||||||
|
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||||
|
this->code_heap3_address = copyin_ptr(this->code_heaps_array_address);
|
||||||
|
this->code_heap3_low = copyin_ptr(this->code_heap3_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap3_high = copyin_ptr(this->code_heap3_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/init_done < 4 && this->number_of_heaps >= 4 && !this->done/
|
||||||
|
{
|
||||||
|
/* CodeHeap 4 */
|
||||||
|
init_done = 4;
|
||||||
|
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||||
|
this->code_heap4_address = copyin_ptr(this->code_heaps_array_address);
|
||||||
|
this->code_heap4_low = copyin_ptr(this->code_heap4_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap4_high = copyin_ptr(this->code_heap4_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/init_done < 5 && this->number_of_heaps >= 5 && !this->done/
|
||||||
|
{
|
||||||
|
/* CodeHeap 5 */
|
||||||
|
init_done = 5;
|
||||||
|
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||||
|
this->code_heap5_address = copyin_ptr(this->code_heaps_array_address);
|
||||||
|
this->code_heap5_low = copyin_ptr(this->code_heap5_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap5_high = copyin_ptr(this->code_heap5_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* ----- END: Get bounds of code heaps -----
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ----- BEGIN: Get address of the code heap pc points to -----
|
||||||
|
*/
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/
|
||||||
{
|
{
|
||||||
MARK_LINE;
|
MARK_LINE;
|
||||||
this->codecache = 1;
|
this->codecache = 1;
|
||||||
|
this->code_heap_address = this->code_heap1_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
this->codecache = 1;
|
||||||
|
this->code_heap_address = this->code_heap2_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
this->codecache = 1;
|
||||||
|
this->code_heap_address = this->code_heap3_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
this->codecache = 1;
|
||||||
|
this->code_heap_address = this->code_heap4_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
this->codecache = 1;
|
||||||
|
this->code_heap_address = this->code_heap5_address;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* ----- END: Get address of the code heap pc points to -----
|
||||||
|
*/
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->codecache/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
/*
|
||||||
|
* Get code heap configuration
|
||||||
|
*/
|
||||||
|
this->code_heap_low = copyin_ptr(this->code_heap_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap_segmap_low = copyin_ptr(this->code_heap_address +
|
||||||
|
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap_log2_segment_size = copyin_uint32(
|
||||||
|
this->code_heap_address + OFFSET_CodeHeap_log2_segment_size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find start.
|
* Find start
|
||||||
*/
|
*/
|
||||||
this->segment = (this->pc - this->CodeCache_low) >>
|
this->segment = (this->pc - this->code_heap_low) >>
|
||||||
this->CodeHeap_log2_segment_size;
|
this->code_heap_log2_segment_size;
|
||||||
this->block = this->CodeCache_segmap_low;
|
this->block = this->code_heap_segmap_low;
|
||||||
this->tag = copyin_uchar(this->block + this->segment);
|
this->tag = copyin_uchar(this->block + this->segment);
|
||||||
"second";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dtrace:helper:ustack:
|
dtrace:helper:ustack:
|
||||||
@ -256,8 +380,8 @@ dtrace:helper:ustack:
|
|||||||
/!this->done && this->codecache/
|
/!this->done && this->codecache/
|
||||||
{
|
{
|
||||||
MARK_LINE;
|
MARK_LINE;
|
||||||
this->block = this->CodeCache_low +
|
this->block = this->code_heap_low +
|
||||||
(this->segment << this->CodeHeap_log2_segment_size);
|
(this->segment << this->code_heap_log2_segment_size);
|
||||||
this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
|
this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,16 +150,18 @@ struct jvm_agent {
|
|||||||
uint64_t Use_Compressed_Oops_address;
|
uint64_t Use_Compressed_Oops_address;
|
||||||
uint64_t Universe_narrow_oop_base_address;
|
uint64_t Universe_narrow_oop_base_address;
|
||||||
uint64_t Universe_narrow_oop_shift_address;
|
uint64_t Universe_narrow_oop_shift_address;
|
||||||
uint64_t CodeCache_heap_address;
|
uint64_t CodeCache_heaps_address;
|
||||||
|
|
||||||
/* Volatiles */
|
/* Volatiles */
|
||||||
uint8_t Use_Compressed_Oops;
|
uint8_t Use_Compressed_Oops;
|
||||||
uint64_t Universe_narrow_oop_base;
|
uint64_t Universe_narrow_oop_base;
|
||||||
uint32_t Universe_narrow_oop_shift;
|
uint32_t Universe_narrow_oop_shift;
|
||||||
uint64_t CodeCache_low;
|
// Code cache heaps
|
||||||
uint64_t CodeCache_high;
|
int32_t Number_of_heaps;
|
||||||
uint64_t CodeCache_segmap_low;
|
uint64_t* Heap_low;
|
||||||
uint64_t CodeCache_segmap_high;
|
uint64_t* Heap_high;
|
||||||
|
uint64_t* Heap_segmap_low;
|
||||||
|
uint64_t* Heap_segmap_high;
|
||||||
|
|
||||||
int32_t SIZE_CodeCache_log2_segment;
|
int32_t SIZE_CodeCache_log2_segment;
|
||||||
|
|
||||||
@ -278,8 +280,9 @@ static int parse_vmstructs(jvm_agent_t* J) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
|
if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
|
||||||
if (strcmp("_heap", vmp->fieldName) == 0) {
|
/* Read _heaps field of type GrowableArray<CodeHeaps*>* */
|
||||||
err = read_pointer(J, vmp->address, &J->CodeCache_heap_address);
|
if (strcmp("_heaps", vmp->fieldName) == 0) {
|
||||||
|
err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address);
|
||||||
}
|
}
|
||||||
} else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
|
} else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
|
||||||
if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
|
if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
|
||||||
@ -318,7 +321,9 @@ static int find_symbol(jvm_agent_t* J, const char *name, uint64_t* valuep) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int read_volatiles(jvm_agent_t* J) {
|
static int read_volatiles(jvm_agent_t* J) {
|
||||||
uint64_t ptr;
|
int i;
|
||||||
|
uint64_t array_data;
|
||||||
|
uint64_t code_heap_address;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
|
err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
|
||||||
@ -334,20 +339,43 @@ static int read_volatiles(jvm_agent_t* J) {
|
|||||||
err = ps_pread(J->P, J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
|
err = ps_pread(J->P, J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
|
||||||
CHECK_FAIL(err);
|
CHECK_FAIL(err);
|
||||||
|
|
||||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
|
/* CodeCache_heaps_address points to GrowableArray<CodeHeaps*>, read _data field
|
||||||
OFFSET_VirtualSpace_low, &J->CodeCache_low);
|
pointing to the first entry of type CodeCache* in the array */
|
||||||
CHECK_FAIL(err);
|
err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data);
|
||||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
|
/* Read _len field containing the number of code heaps */
|
||||||
OFFSET_VirtualSpace_high, &J->CodeCache_high);
|
err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len,
|
||||||
CHECK_FAIL(err);
|
&J->Number_of_heaps, sizeof(J->Number_of_heaps));
|
||||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
|
|
||||||
OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low);
|
|
||||||
CHECK_FAIL(err);
|
|
||||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
|
|
||||||
OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high);
|
|
||||||
CHECK_FAIL(err);
|
|
||||||
|
|
||||||
err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size,
|
/* Allocate memory for heap configurations */
|
||||||
|
J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||||
|
J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||||
|
J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||||
|
J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||||
|
|
||||||
|
/* Read code heap configurations */
|
||||||
|
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||||
|
/* Read address of heap */
|
||||||
|
err = read_pointer(J, array_data, &code_heap_address);
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
|
||||||
|
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
|
||||||
|
OFFSET_VirtualSpace_low, &J->Heap_low[i]);
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
|
||||||
|
OFFSET_VirtualSpace_high, &J->Heap_high[i]);
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
|
||||||
|
OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]);
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
|
||||||
|
OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]);
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
|
||||||
|
/* Increment pointer to next entry */
|
||||||
|
array_data = array_data + POINTER_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size,
|
||||||
&J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
|
&J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
|
||||||
CHECK_FAIL(err);
|
CHECK_FAIL(err);
|
||||||
|
|
||||||
@ -357,46 +385,57 @@ static int read_volatiles(jvm_agent_t* J) {
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) {
|
||||||
|
return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]);
|
||||||
|
}
|
||||||
|
|
||||||
static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
|
static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
|
||||||
/* make sure the code cache is up to date */
|
int i;
|
||||||
return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high);
|
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||||
|
if (codeheap_contains(i, J, ptr)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t segment_for(jvm_agent_t* J, uint64_t p) {
|
static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) {
|
||||||
return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment;
|
return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t block_at(jvm_agent_t* J, int i) {
|
static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) {
|
||||||
return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment);
|
return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
|
static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
|
||||||
int err;
|
int err;
|
||||||
|
int i;
|
||||||
|
|
||||||
*startp = 0;
|
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||||
if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) {
|
*startp = 0;
|
||||||
int32_t used;
|
if (codeheap_contains(i, J, ptr)) {
|
||||||
uint64_t segment = segment_for(J, ptr);
|
int32_t used;
|
||||||
uint64_t block = J->CodeCache_segmap_low;
|
uint64_t segment = segment_for(i, J, ptr);
|
||||||
uint8_t tag;
|
uint64_t block = J->Heap_segmap_low[i];
|
||||||
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
uint8_t tag;
|
||||||
CHECK_FAIL(err);
|
|
||||||
if (tag == 0xff)
|
|
||||||
return PS_OK;
|
|
||||||
while (tag > 0) {
|
|
||||||
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
||||||
CHECK_FAIL(err);
|
CHECK_FAIL(err);
|
||||||
segment -= tag;
|
if (tag == 0xff)
|
||||||
}
|
return PS_OK;
|
||||||
block = block_at(J, segment);
|
while (tag > 0) {
|
||||||
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
|
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
||||||
CHECK_FAIL(err);
|
CHECK_FAIL(err);
|
||||||
if (used) {
|
segment -= tag;
|
||||||
*startp = block + SIZE_HeapBlockHeader;
|
}
|
||||||
|
block = block_at(i, J, segment);
|
||||||
|
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
if (used) {
|
||||||
|
*startp = block + SIZE_HeapBlockHeader;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return PS_OK;
|
||||||
}
|
}
|
||||||
return PS_OK;
|
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -82,21 +82,24 @@ StubQueue* AbstractInterpreter::_code = NULL;
|
|||||||
#endif /* ASSERT */
|
#endif /* ASSERT */
|
||||||
#endif /* COMPILER1 */
|
#endif /* COMPILER1 */
|
||||||
|
|
||||||
#define GEN_OFFS(Type,Name) \
|
#define GEN_OFFS_NAME(Type,Name,OutputType) \
|
||||||
switch(gen_variant) { \
|
switch(gen_variant) { \
|
||||||
case GEN_OFFSET: \
|
case GEN_OFFSET: \
|
||||||
printf("#define OFFSET_%-33s %d\n", \
|
printf("#define OFFSET_%-33s %d\n", \
|
||||||
#Type #Name, offset_of(Type, Name)); \
|
#OutputType #Name, offset_of(Type, Name)); \
|
||||||
break; \
|
break; \
|
||||||
case GEN_INDEX: \
|
case GEN_INDEX: \
|
||||||
printf("#define IDX_OFFSET_%-33s %d\n", \
|
printf("#define IDX_OFFSET_%-33s %d\n", \
|
||||||
#Type #Name, index++); \
|
#OutputType #Name, index++); \
|
||||||
break; \
|
break; \
|
||||||
case GEN_TABLE: \
|
case GEN_TABLE: \
|
||||||
printf("\tOFFSET_%s,\n", #Type #Name); \
|
printf("\tOFFSET_%s,\n", #OutputType #Name); \
|
||||||
break; \
|
break; \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define GEN_OFFS(Type,Name) \
|
||||||
|
GEN_OFFS_NAME(Type,Name,Type)
|
||||||
|
|
||||||
#define GEN_SIZE(Type) \
|
#define GEN_SIZE(Type) \
|
||||||
switch(gen_variant) { \
|
switch(gen_variant) { \
|
||||||
case GEN_OFFSET: \
|
case GEN_OFFSET: \
|
||||||
@ -241,6 +244,11 @@ int generateJvmOffsets(GEN_variant gen_variant) {
|
|||||||
GEN_OFFS(VirtualSpace, _high);
|
GEN_OFFS(VirtualSpace, _high);
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
||||||
|
/* We need to use different names here because of the template parameter */
|
||||||
|
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _data, GrowableArray_CodeHeap);
|
||||||
|
GEN_OFFS_NAME(GrowableArray<CodeHeap*>, _len, GrowableArray_CodeHeap);
|
||||||
|
printf("\n");
|
||||||
|
|
||||||
GEN_OFFS(CodeBlob, _name);
|
GEN_OFFS(CodeBlob, _name);
|
||||||
GEN_OFFS(CodeBlob, _header_size);
|
GEN_OFFS(CodeBlob, _header_size);
|
||||||
GEN_OFFS(CodeBlob, _content_offset);
|
GEN_OFFS(CodeBlob, _content_offset);
|
||||||
|
@ -43,7 +43,9 @@
|
|||||||
|
|
||||||
extern pointer __JvmOffsets;
|
extern pointer __JvmOffsets;
|
||||||
|
|
||||||
extern pointer __1cJCodeCacheF_heap_;
|
/* GrowableArray<CodeHeaps*>* */
|
||||||
|
extern pointer __1cJCodeCacheG_heaps_;
|
||||||
|
|
||||||
extern pointer __1cIUniverseO_collectedHeap_;
|
extern pointer __1cIUniverseO_collectedHeap_;
|
||||||
|
|
||||||
extern pointer __1cHnmethodG__vtbl_;
|
extern pointer __1cHnmethodG__vtbl_;
|
||||||
@ -95,8 +97,8 @@ dtrace:helper:ustack:
|
|||||||
/!init_done && !this->done/
|
/!init_done && !this->done/
|
||||||
{
|
{
|
||||||
MARK_LINE;
|
MARK_LINE;
|
||||||
init_done = 1;
|
|
||||||
|
copyin_offset(POINTER_SIZE);
|
||||||
copyin_offset(COMPILER);
|
copyin_offset(COMPILER);
|
||||||
copyin_offset(OFFSET_CollectedHeap_reserved);
|
copyin_offset(OFFSET_CollectedHeap_reserved);
|
||||||
copyin_offset(OFFSET_MemRegion_start);
|
copyin_offset(OFFSET_MemRegion_start);
|
||||||
@ -122,6 +124,9 @@ dtrace:helper:ustack:
|
|||||||
copyin_offset(OFFSET_CodeHeap_segmap);
|
copyin_offset(OFFSET_CodeHeap_segmap);
|
||||||
copyin_offset(OFFSET_CodeHeap_log2_segment_size);
|
copyin_offset(OFFSET_CodeHeap_log2_segment_size);
|
||||||
|
|
||||||
|
copyin_offset(OFFSET_GrowableArray_CodeHeap_data);
|
||||||
|
copyin_offset(OFFSET_GrowableArray_CodeHeap_len);
|
||||||
|
|
||||||
copyin_offset(OFFSET_VirtualSpace_low);
|
copyin_offset(OFFSET_VirtualSpace_low);
|
||||||
copyin_offset(OFFSET_VirtualSpace_high);
|
copyin_offset(OFFSET_VirtualSpace_high);
|
||||||
|
|
||||||
@ -152,24 +157,13 @@ dtrace:helper:ustack:
|
|||||||
#error "Don't know architecture"
|
#error "Don't know architecture"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
this->CodeCache_heap_address = copyin_ptr(&``__1cJCodeCacheF_heap_);
|
/* Read address of GrowableArray<CodeHeaps*> */
|
||||||
|
this->code_heaps_address = copyin_ptr(&``__1cJCodeCacheG_heaps_);
|
||||||
|
/* Read address of _data array field in GrowableArray */
|
||||||
|
this->code_heaps_array_address = copyin_ptr(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_data);
|
||||||
|
this->number_of_heaps = copyin_uint32(this->code_heaps_address + OFFSET_GrowableArray_CodeHeap_len);
|
||||||
|
|
||||||
this->CodeCache_low = copyin_ptr(this->CodeCache_heap_address +
|
this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
|
||||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
|
||||||
|
|
||||||
this->CodeCache_high = copyin_ptr(this->CodeCache_heap_address +
|
|
||||||
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
|
||||||
|
|
||||||
this->CodeCache_segmap_low = copyin_ptr(this->CodeCache_heap_address +
|
|
||||||
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
|
|
||||||
|
|
||||||
this->CodeCache_segmap_high = copyin_ptr(this->CodeCache_heap_address +
|
|
||||||
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_high);
|
|
||||||
|
|
||||||
this->CodeHeap_log2_segment_size = copyin_uint32(
|
|
||||||
this->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size);
|
|
||||||
|
|
||||||
this->Method_vtbl = (pointer) &``__1cGMethodG__vtbl_;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get Java heap bounds
|
* Get Java heap bounds
|
||||||
@ -186,21 +180,152 @@ dtrace:helper:ustack:
|
|||||||
this->heap_end = this->heap_start + this->heap_size;
|
this->heap_end = this->heap_start + this->heap_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IMPORTANT: At the moment the ustack helper supports up to 5 code heaps in
|
||||||
|
* the code cache. If more code heaps are added the following probes have to
|
||||||
|
* be extended. This is done by simply adding a probe to get the heap bounds
|
||||||
|
* and another probe to set the code heap address of the newly created heap.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ----- BEGIN: Get bounds of code heaps -----
|
||||||
|
*/
|
||||||
dtrace:helper:ustack:
|
dtrace:helper:ustack:
|
||||||
/!this->done &&
|
/init_done < 1 && this->number_of_heaps >= 1 && !this->done/
|
||||||
this->CodeCache_low <= this->pc && this->pc < this->CodeCache_high/
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
/* CodeHeap 1 */
|
||||||
|
init_done = 1;
|
||||||
|
this->code_heap1_address = copyin_ptr(this->code_heaps_array_address);
|
||||||
|
this->code_heap1_low = copyin_ptr(this->code_heap1_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap1_high = copyin_ptr(this->code_heap1_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/init_done < 2 && this->number_of_heaps >= 2 && !this->done/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
/* CodeHeap 2 */
|
||||||
|
init_done = 2;
|
||||||
|
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||||
|
this->code_heap2_address = copyin_ptr(this->code_heaps_array_address);
|
||||||
|
this->code_heap2_low = copyin_ptr(this->code_heap2_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap2_high = copyin_ptr(this->code_heap2_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/init_done < 3 && this->number_of_heaps >= 3 && !this->done/
|
||||||
|
{
|
||||||
|
/* CodeHeap 3 */
|
||||||
|
init_done = 3;
|
||||||
|
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||||
|
this->code_heap3_address = copyin_ptr(this->code_heaps_array_address);
|
||||||
|
this->code_heap3_low = copyin_ptr(this->code_heap3_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap3_high = copyin_ptr(this->code_heap3_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/init_done < 4 && this->number_of_heaps >= 4 && !this->done/
|
||||||
|
{
|
||||||
|
/* CodeHeap 4 */
|
||||||
|
init_done = 4;
|
||||||
|
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||||
|
this->code_heap4_address = copyin_ptr(this->code_heaps_array_address);
|
||||||
|
this->code_heap4_low = copyin_ptr(this->code_heap4_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap4_high = copyin_ptr(this->code_heap4_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/init_done < 5 && this->number_of_heaps >= 5 && !this->done/
|
||||||
|
{
|
||||||
|
/* CodeHeap 5 */
|
||||||
|
init_done = 5;
|
||||||
|
this->code_heaps_array_address = this->code_heaps_array_address + POINTER_SIZE;
|
||||||
|
this->code_heap5_address = copyin_ptr(this->code_heaps_array_address);
|
||||||
|
this->code_heap5_low = copyin_ptr(this->code_heap5_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap5_high = copyin_ptr(this->code_heap5_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_high);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* ----- END: Get bounds of code heaps -----
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ----- BEGIN: Get address of the code heap pc points to -----
|
||||||
|
*/
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->number_of_heaps >= 1 && this->code_heap1_low <= this->pc && this->pc < this->code_heap1_high/
|
||||||
{
|
{
|
||||||
MARK_LINE;
|
MARK_LINE;
|
||||||
this->codecache = 1;
|
this->codecache = 1;
|
||||||
|
this->code_heap_address = this->code_heap1_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->number_of_heaps >= 2 && this->code_heap2_low <= this->pc && this->pc < this->code_heap2_high/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
this->codecache = 1;
|
||||||
|
this->code_heap_address = this->code_heap2_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->number_of_heaps >= 3 && this->code_heap3_low <= this->pc && this->pc < this->code_heap3_high/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
this->codecache = 1;
|
||||||
|
this->code_heap_address = this->code_heap3_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->number_of_heaps >= 4 && this->code_heap4_low <= this->pc && this->pc < this->code_heap4_high/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
this->codecache = 1;
|
||||||
|
this->code_heap_address = this->code_heap4_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->number_of_heaps >= 5 && this->code_heap5_low <= this->pc && this->pc < this->code_heap5_high/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
this->codecache = 1;
|
||||||
|
this->code_heap_address = this->code_heap5_address;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* ----- END: Get address of the code heap pc points to -----
|
||||||
|
*/
|
||||||
|
|
||||||
|
dtrace:helper:ustack:
|
||||||
|
/!this->done && this->codecache/
|
||||||
|
{
|
||||||
|
MARK_LINE;
|
||||||
|
/*
|
||||||
|
* Get code heap configuration
|
||||||
|
*/
|
||||||
|
this->code_heap_low = copyin_ptr(this->code_heap_address +
|
||||||
|
OFFSET_CodeHeap_memory + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap_segmap_low = copyin_ptr(this->code_heap_address +
|
||||||
|
OFFSET_CodeHeap_segmap + OFFSET_VirtualSpace_low);
|
||||||
|
this->code_heap_log2_segment_size = copyin_uint32(
|
||||||
|
this->code_heap_address + OFFSET_CodeHeap_log2_segment_size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find start.
|
* Find start
|
||||||
*/
|
*/
|
||||||
this->segment = (this->pc - this->CodeCache_low) >>
|
this->segment = (this->pc - this->code_heap_low) >>
|
||||||
this->CodeHeap_log2_segment_size;
|
this->code_heap_log2_segment_size;
|
||||||
this->block = this->CodeCache_segmap_low;
|
this->block = this->code_heap_segmap_low;
|
||||||
this->tag = copyin_uchar(this->block + this->segment);
|
this->tag = copyin_uchar(this->block + this->segment);
|
||||||
"second";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dtrace:helper:ustack:
|
dtrace:helper:ustack:
|
||||||
@ -255,8 +380,8 @@ dtrace:helper:ustack:
|
|||||||
/!this->done && this->codecache/
|
/!this->done && this->codecache/
|
||||||
{
|
{
|
||||||
MARK_LINE;
|
MARK_LINE;
|
||||||
this->block = this->CodeCache_low +
|
this->block = this->code_heap_low +
|
||||||
(this->segment << this->CodeHeap_log2_segment_size);
|
(this->segment << this->code_heap_log2_segment_size);
|
||||||
this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
|
this->used = copyin_uint32(this->block + OFFSET_HeapBlockHeader_used);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,16 +150,18 @@ struct jvm_agent {
|
|||||||
uint64_t Use_Compressed_Oops_address;
|
uint64_t Use_Compressed_Oops_address;
|
||||||
uint64_t Universe_narrow_oop_base_address;
|
uint64_t Universe_narrow_oop_base_address;
|
||||||
uint64_t Universe_narrow_oop_shift_address;
|
uint64_t Universe_narrow_oop_shift_address;
|
||||||
uint64_t CodeCache_heap_address;
|
uint64_t CodeCache_heaps_address;
|
||||||
|
|
||||||
/* Volatiles */
|
/* Volatiles */
|
||||||
uint8_t Use_Compressed_Oops;
|
uint8_t Use_Compressed_Oops;
|
||||||
uint64_t Universe_narrow_oop_base;
|
uint64_t Universe_narrow_oop_base;
|
||||||
uint32_t Universe_narrow_oop_shift;
|
uint32_t Universe_narrow_oop_shift;
|
||||||
uint64_t CodeCache_low;
|
// Code cache heaps
|
||||||
uint64_t CodeCache_high;
|
int32_t Number_of_heaps;
|
||||||
uint64_t CodeCache_segmap_low;
|
uint64_t* Heap_low;
|
||||||
uint64_t CodeCache_segmap_high;
|
uint64_t* Heap_high;
|
||||||
|
uint64_t* Heap_segmap_low;
|
||||||
|
uint64_t* Heap_segmap_high;
|
||||||
|
|
||||||
int32_t SIZE_CodeCache_log2_segment;
|
int32_t SIZE_CodeCache_log2_segment;
|
||||||
|
|
||||||
@ -278,8 +280,9 @@ static int parse_vmstructs(jvm_agent_t* J) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
|
if (vmp->typeName[0] == 'C' && strcmp("CodeCache", vmp->typeName) == 0) {
|
||||||
if (strcmp("_heap", vmp->fieldName) == 0) {
|
/* Read _heaps field of type GrowableArray<CodeHeaps*>* */
|
||||||
err = read_pointer(J, vmp->address, &J->CodeCache_heap_address);
|
if (strcmp("_heaps", vmp->fieldName) == 0) {
|
||||||
|
err = read_pointer(J, vmp->address, &J->CodeCache_heaps_address);
|
||||||
}
|
}
|
||||||
} else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
|
} else if (vmp->typeName[0] == 'U' && strcmp("Universe", vmp->typeName) == 0) {
|
||||||
if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
|
if (strcmp("_narrow_oop._base", vmp->fieldName) == 0) {
|
||||||
@ -318,7 +321,9 @@ static int find_symbol(jvm_agent_t* J, const char *name, uint64_t* valuep) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int read_volatiles(jvm_agent_t* J) {
|
static int read_volatiles(jvm_agent_t* J) {
|
||||||
uint64_t ptr;
|
int i;
|
||||||
|
uint64_t array_data;
|
||||||
|
uint64_t code_heap_address;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
|
err = find_symbol(J, "UseCompressedOops", &J->Use_Compressed_Oops_address);
|
||||||
@ -334,20 +339,43 @@ static int read_volatiles(jvm_agent_t* J) {
|
|||||||
err = ps_pread(J->P, J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
|
err = ps_pread(J->P, J->Universe_narrow_oop_shift_address, &J->Universe_narrow_oop_shift, sizeof(uint32_t));
|
||||||
CHECK_FAIL(err);
|
CHECK_FAIL(err);
|
||||||
|
|
||||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
|
/* CodeCache_heaps_address points to GrowableArray<CodeHeaps*>, read _data field
|
||||||
OFFSET_VirtualSpace_low, &J->CodeCache_low);
|
pointing to the first entry of type CodeCache* in the array */
|
||||||
CHECK_FAIL(err);
|
err = read_pointer(J, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_data, &array_data);
|
||||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_memory +
|
/* Read _len field containing the number of code heaps */
|
||||||
OFFSET_VirtualSpace_high, &J->CodeCache_high);
|
err = ps_pread(J->P, J->CodeCache_heaps_address + OFFSET_GrowableArray_CodeHeap_len,
|
||||||
CHECK_FAIL(err);
|
&J->Number_of_heaps, sizeof(J->Number_of_heaps));
|
||||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
|
|
||||||
OFFSET_VirtualSpace_low, &J->CodeCache_segmap_low);
|
|
||||||
CHECK_FAIL(err);
|
|
||||||
err = read_pointer(J, J->CodeCache_heap_address + OFFSET_CodeHeap_segmap +
|
|
||||||
OFFSET_VirtualSpace_high, &J->CodeCache_segmap_high);
|
|
||||||
CHECK_FAIL(err);
|
|
||||||
|
|
||||||
err = ps_pread(J->P, J->CodeCache_heap_address + OFFSET_CodeHeap_log2_segment_size,
|
/* Allocate memory for heap configurations */
|
||||||
|
J->Heap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||||
|
J->Heap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||||
|
J->Heap_segmap_low = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||||
|
J->Heap_segmap_high = (jvm_agent_t*)calloc(J->Number_of_heaps, sizeof(uint64_t));
|
||||||
|
|
||||||
|
/* Read code heap configurations */
|
||||||
|
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||||
|
/* Read address of heap */
|
||||||
|
err = read_pointer(J, array_data, &code_heap_address);
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
|
||||||
|
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
|
||||||
|
OFFSET_VirtualSpace_low, &J->Heap_low[i]);
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_memory +
|
||||||
|
OFFSET_VirtualSpace_high, &J->Heap_high[i]);
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
|
||||||
|
OFFSET_VirtualSpace_low, &J->Heap_segmap_low[i]);
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
err = read_pointer(J, code_heap_address + OFFSET_CodeHeap_segmap +
|
||||||
|
OFFSET_VirtualSpace_high, &J->Heap_segmap_high[i]);
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
|
||||||
|
/* Increment pointer to next entry */
|
||||||
|
array_data = array_data + POINTER_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ps_pread(J->P, code_heap_address + OFFSET_CodeHeap_log2_segment_size,
|
||||||
&J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
|
&J->SIZE_CodeCache_log2_segment, sizeof(J->SIZE_CodeCache_log2_segment));
|
||||||
CHECK_FAIL(err);
|
CHECK_FAIL(err);
|
||||||
|
|
||||||
@ -357,46 +385,57 @@ static int read_volatiles(jvm_agent_t* J) {
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int codeheap_contains(int heap_num, jvm_agent_t* J, uint64_t ptr) {
|
||||||
|
return (J->Heap_low[heap_num] <= ptr && ptr < J->Heap_high[heap_num]);
|
||||||
|
}
|
||||||
|
|
||||||
static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
|
static int codecache_contains(jvm_agent_t* J, uint64_t ptr) {
|
||||||
/* make sure the code cache is up to date */
|
int i;
|
||||||
return (J->CodeCache_low <= ptr && ptr < J->CodeCache_high);
|
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||||
|
if (codeheap_contains(i, J, ptr)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t segment_for(jvm_agent_t* J, uint64_t p) {
|
static uint64_t segment_for(int heap_num, jvm_agent_t* J, uint64_t p) {
|
||||||
return (p - J->CodeCache_low) >> J->SIZE_CodeCache_log2_segment;
|
return (p - J->Heap_low[heap_num]) >> J->SIZE_CodeCache_log2_segment;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t block_at(jvm_agent_t* J, int i) {
|
static uint64_t block_at(int heap_num, jvm_agent_t* J, int i) {
|
||||||
return J->CodeCache_low + (i << J->SIZE_CodeCache_log2_segment);
|
return J->Heap_low[heap_num] + (i << J->SIZE_CodeCache_log2_segment);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
|
static int find_start(jvm_agent_t* J, uint64_t ptr, uint64_t *startp) {
|
||||||
int err;
|
int err;
|
||||||
|
int i;
|
||||||
|
|
||||||
*startp = 0;
|
for (i = 0; i < J->Number_of_heaps; ++i) {
|
||||||
if (J->CodeCache_low <= ptr && ptr < J->CodeCache_high) {
|
*startp = 0;
|
||||||
int32_t used;
|
if (codeheap_contains(i, J, ptr)) {
|
||||||
uint64_t segment = segment_for(J, ptr);
|
int32_t used;
|
||||||
uint64_t block = J->CodeCache_segmap_low;
|
uint64_t segment = segment_for(i, J, ptr);
|
||||||
uint8_t tag;
|
uint64_t block = J->Heap_segmap_low[i];
|
||||||
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
uint8_t tag;
|
||||||
CHECK_FAIL(err);
|
|
||||||
if (tag == 0xff)
|
|
||||||
return PS_OK;
|
|
||||||
while (tag > 0) {
|
|
||||||
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
||||||
CHECK_FAIL(err);
|
CHECK_FAIL(err);
|
||||||
segment -= tag;
|
if (tag == 0xff)
|
||||||
}
|
return PS_OK;
|
||||||
block = block_at(J, segment);
|
while (tag > 0) {
|
||||||
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
|
err = ps_pread(J->P, block + segment, &tag, sizeof(tag));
|
||||||
CHECK_FAIL(err);
|
CHECK_FAIL(err);
|
||||||
if (used) {
|
segment -= tag;
|
||||||
*startp = block + SIZE_HeapBlockHeader;
|
}
|
||||||
|
block = block_at(i, J, segment);
|
||||||
|
err = ps_pread(J->P, block + OFFSET_HeapBlockHeader_used, &used, sizeof(used));
|
||||||
|
CHECK_FAIL(err);
|
||||||
|
if (used) {
|
||||||
|
*startp = block + SIZE_HeapBlockHeader;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return PS_OK;
|
||||||
}
|
}
|
||||||
return PS_OK;
|
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -3129,8 +3129,7 @@ bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
char* os::reserve_memory_special(size_t size, size_t alignment, char* addr,
|
char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
|
||||||
bool exec) {
|
|
||||||
fatal("os::reserve_memory_special should not be called on Solaris.");
|
fatal("os::reserve_memory_special should not be called on Solaris.");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -265,7 +265,7 @@ frame os::current_frame() {
|
|||||||
CAST_FROM_FN_PTR(address, os::current_frame));
|
CAST_FROM_FN_PTR(address, os::current_frame));
|
||||||
if (os::is_first_C_frame(&myframe)) {
|
if (os::is_first_C_frame(&myframe)) {
|
||||||
// stack is not walkable
|
// stack is not walkable
|
||||||
return frame(NULL, NULL, NULL);
|
return frame(NULL, NULL, false);
|
||||||
} else {
|
} else {
|
||||||
return os::get_sender_for_C_frame(&myframe);
|
return os::get_sender_for_C_frame(&myframe);
|
||||||
}
|
}
|
||||||
|
@ -327,7 +327,7 @@ void Canonicalizer::do_ShiftOp (ShiftOp* x) {
|
|||||||
if (t2->is_constant()) {
|
if (t2->is_constant()) {
|
||||||
switch (t2->tag()) {
|
switch (t2->tag()) {
|
||||||
case intTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return;
|
case intTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return;
|
||||||
case longTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return;
|
case longTag : if (t2->as_LongConstant()->value() == (jlong)0) set_canonical(x->x()); return;
|
||||||
default : ShouldNotReachHere();
|
default : ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -808,28 +808,41 @@ void Canonicalizer::do_ExceptionObject(ExceptionObject* x) {}
|
|||||||
|
|
||||||
static bool match_index_and_scale(Instruction* instr,
|
static bool match_index_and_scale(Instruction* instr,
|
||||||
Instruction** index,
|
Instruction** index,
|
||||||
int* log2_scale,
|
int* log2_scale) {
|
||||||
Instruction** instr_to_unpin) {
|
// Skip conversion ops. This works only on 32bit because of the implicit l2i that the
|
||||||
*instr_to_unpin = NULL;
|
// unsafe performs.
|
||||||
|
#ifndef _LP64
|
||||||
// Skip conversion ops
|
|
||||||
Convert* convert = instr->as_Convert();
|
Convert* convert = instr->as_Convert();
|
||||||
if (convert != NULL) {
|
if (convert != NULL && convert->op() == Bytecodes::_i2l) {
|
||||||
|
assert(convert->value()->type() == intType, "invalid input type");
|
||||||
instr = convert->value();
|
instr = convert->value();
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
ShiftOp* shift = instr->as_ShiftOp();
|
ShiftOp* shift = instr->as_ShiftOp();
|
||||||
if (shift != NULL) {
|
if (shift != NULL) {
|
||||||
if (shift->is_pinned()) {
|
if (shift->op() == Bytecodes::_lshl) {
|
||||||
*instr_to_unpin = shift;
|
assert(shift->x()->type() == longType, "invalid input type");
|
||||||
|
} else {
|
||||||
|
#ifndef _LP64
|
||||||
|
if (shift->op() == Bytecodes::_ishl) {
|
||||||
|
assert(shift->x()->type() == intType, "invalid input type");
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Constant shift value?
|
// Constant shift value?
|
||||||
Constant* con = shift->y()->as_Constant();
|
Constant* con = shift->y()->as_Constant();
|
||||||
if (con == NULL) return false;
|
if (con == NULL) return false;
|
||||||
// Well-known type and value?
|
// Well-known type and value?
|
||||||
IntConstant* val = con->type()->as_IntConstant();
|
IntConstant* val = con->type()->as_IntConstant();
|
||||||
if (val == NULL) return false;
|
assert(val != NULL, "Should be an int constant");
|
||||||
if (shift->x()->type() != intType) return false;
|
|
||||||
*index = shift->x();
|
*index = shift->x();
|
||||||
int tmp_scale = val->value();
|
int tmp_scale = val->value();
|
||||||
if (tmp_scale >= 0 && tmp_scale < 4) {
|
if (tmp_scale >= 0 && tmp_scale < 4) {
|
||||||
@ -842,31 +855,42 @@ static bool match_index_and_scale(Instruction* instr,
|
|||||||
|
|
||||||
ArithmeticOp* arith = instr->as_ArithmeticOp();
|
ArithmeticOp* arith = instr->as_ArithmeticOp();
|
||||||
if (arith != NULL) {
|
if (arith != NULL) {
|
||||||
if (arith->is_pinned()) {
|
// See if either arg is a known constant
|
||||||
*instr_to_unpin = arith;
|
Constant* con = arith->x()->as_Constant();
|
||||||
|
if (con != NULL) {
|
||||||
|
*index = arith->y();
|
||||||
|
} else {
|
||||||
|
con = arith->y()->as_Constant();
|
||||||
|
if (con == NULL) return false;
|
||||||
|
*index = arith->x();
|
||||||
}
|
}
|
||||||
|
long const_value;
|
||||||
// Check for integer multiply
|
// Check for integer multiply
|
||||||
if (arith->op() == Bytecodes::_imul) {
|
if (arith->op() == Bytecodes::_lmul) {
|
||||||
// See if either arg is a known constant
|
assert((*index)->type() == longType, "invalid input type");
|
||||||
Constant* con = arith->x()->as_Constant();
|
LongConstant* val = con->type()->as_LongConstant();
|
||||||
if (con != NULL) {
|
assert(val != NULL, "expecting a long constant");
|
||||||
*index = arith->y();
|
const_value = val->value();
|
||||||
|
} else {
|
||||||
|
#ifndef _LP64
|
||||||
|
if (arith->op() == Bytecodes::_imul) {
|
||||||
|
assert((*index)->type() == intType, "invalid input type");
|
||||||
|
IntConstant* val = con->type()->as_IntConstant();
|
||||||
|
assert(val != NULL, "expecting an int constant");
|
||||||
|
const_value = val->value();
|
||||||
} else {
|
} else {
|
||||||
con = arith->y()->as_Constant();
|
return false;
|
||||||
if (con == NULL) return false;
|
|
||||||
*index = arith->x();
|
|
||||||
}
|
|
||||||
if ((*index)->type() != intType) return false;
|
|
||||||
// Well-known type and value?
|
|
||||||
IntConstant* val = con->type()->as_IntConstant();
|
|
||||||
if (val == NULL) return false;
|
|
||||||
switch (val->value()) {
|
|
||||||
case 1: *log2_scale = 0; return true;
|
|
||||||
case 2: *log2_scale = 1; return true;
|
|
||||||
case 4: *log2_scale = 2; return true;
|
|
||||||
case 8: *log2_scale = 3; return true;
|
|
||||||
default: return false;
|
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
switch (const_value) {
|
||||||
|
case 1: *log2_scale = 0; return true;
|
||||||
|
case 2: *log2_scale = 1; return true;
|
||||||
|
case 4: *log2_scale = 2; return true;
|
||||||
|
case 8: *log2_scale = 3; return true;
|
||||||
|
default: return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -879,29 +903,37 @@ static bool match(UnsafeRawOp* x,
|
|||||||
Instruction** base,
|
Instruction** base,
|
||||||
Instruction** index,
|
Instruction** index,
|
||||||
int* log2_scale) {
|
int* log2_scale) {
|
||||||
Instruction* instr_to_unpin = NULL;
|
|
||||||
ArithmeticOp* root = x->base()->as_ArithmeticOp();
|
ArithmeticOp* root = x->base()->as_ArithmeticOp();
|
||||||
if (root == NULL) return false;
|
if (root == NULL) return false;
|
||||||
// Limit ourselves to addition for now
|
// Limit ourselves to addition for now
|
||||||
if (root->op() != Bytecodes::_ladd) return false;
|
if (root->op() != Bytecodes::_ladd) return false;
|
||||||
|
|
||||||
|
bool match_found = false;
|
||||||
// Try to find shift or scale op
|
// Try to find shift or scale op
|
||||||
if (match_index_and_scale(root->y(), index, log2_scale, &instr_to_unpin)) {
|
if (match_index_and_scale(root->y(), index, log2_scale)) {
|
||||||
*base = root->x();
|
*base = root->x();
|
||||||
} else if (match_index_and_scale(root->x(), index, log2_scale, &instr_to_unpin)) {
|
match_found = true;
|
||||||
|
} else if (match_index_and_scale(root->x(), index, log2_scale)) {
|
||||||
*base = root->y();
|
*base = root->y();
|
||||||
} else if (root->y()->as_Convert() != NULL) {
|
match_found = true;
|
||||||
|
} else if (NOT_LP64(root->y()->as_Convert() != NULL) LP64_ONLY(false)) {
|
||||||
|
// Skipping i2l works only on 32bit because of the implicit l2i that the unsafe performs.
|
||||||
|
// 64bit needs a real sign-extending conversion.
|
||||||
Convert* convert = root->y()->as_Convert();
|
Convert* convert = root->y()->as_Convert();
|
||||||
if (convert->op() == Bytecodes::_i2l && convert->value()->type() == intType) {
|
if (convert->op() == Bytecodes::_i2l) {
|
||||||
|
assert(convert->value()->type() == intType, "should be an int");
|
||||||
// pick base and index, setting scale at 1
|
// pick base and index, setting scale at 1
|
||||||
*base = root->x();
|
*base = root->x();
|
||||||
*index = convert->value();
|
*index = convert->value();
|
||||||
*log2_scale = 0;
|
*log2_scale = 0;
|
||||||
} else {
|
match_found = true;
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
// doesn't match any expected sequences
|
// The default solution
|
||||||
return false;
|
if (!match_found) {
|
||||||
|
*base = root->x();
|
||||||
|
*index = root->y();
|
||||||
|
*log2_scale = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the value is pinned then it will be always be computed so
|
// If the value is pinned then it will be always be computed so
|
||||||
|
@ -76,6 +76,11 @@ void Compiler::initialize() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int Compiler::code_buffer_size() {
|
||||||
|
assert(SegmentedCodeCache, "Should be only used with a segmented code cache");
|
||||||
|
return Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size();
|
||||||
|
}
|
||||||
|
|
||||||
BufferBlob* Compiler::init_buffer_blob() {
|
BufferBlob* Compiler::init_buffer_blob() {
|
||||||
// Allocate buffer blob once at startup since allocation for each
|
// Allocate buffer blob once at startup since allocation for each
|
||||||
// compilation seems to be too expensive (at least on Intel win32).
|
// compilation seems to be too expensive (at least on Intel win32).
|
||||||
|
@ -54,6 +54,9 @@ class Compiler: public AbstractCompiler {
|
|||||||
|
|
||||||
// Print compilation timers and statistics
|
// Print compilation timers and statistics
|
||||||
virtual void print_timers();
|
virtual void print_timers();
|
||||||
|
|
||||||
|
// Size of the code buffer
|
||||||
|
static int code_buffer_size();
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_C1_C1_COMPILER_HPP
|
#endif // SHARE_VM_C1_C1_COMPILER_HPP
|
||||||
|
@ -2045,6 +2045,8 @@ void LIRGenerator::do_RoundFP(RoundFP* x) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Here UnsafeGetRaw may have x->base() and x->index() be int or long
|
||||||
|
// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
|
||||||
void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
|
void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
|
||||||
LIRItem base(x->base(), this);
|
LIRItem base(x->base(), this);
|
||||||
LIRItem idx(this);
|
LIRItem idx(this);
|
||||||
@ -2059,50 +2061,73 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
|
|||||||
|
|
||||||
int log2_scale = 0;
|
int log2_scale = 0;
|
||||||
if (x->has_index()) {
|
if (x->has_index()) {
|
||||||
assert(x->index()->type()->tag() == intTag, "should not find non-int index");
|
|
||||||
log2_scale = x->log2_scale();
|
log2_scale = x->log2_scale();
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(!x->has_index() || idx.value() == x->index(), "should match");
|
assert(!x->has_index() || idx.value() == x->index(), "should match");
|
||||||
|
|
||||||
LIR_Opr base_op = base.result();
|
LIR_Opr base_op = base.result();
|
||||||
|
LIR_Opr index_op = idx.result();
|
||||||
#ifndef _LP64
|
#ifndef _LP64
|
||||||
if (x->base()->type()->tag() == longTag) {
|
if (x->base()->type()->tag() == longTag) {
|
||||||
base_op = new_register(T_INT);
|
base_op = new_register(T_INT);
|
||||||
__ convert(Bytecodes::_l2i, base.result(), base_op);
|
__ convert(Bytecodes::_l2i, base.result(), base_op);
|
||||||
} else {
|
|
||||||
assert(x->base()->type()->tag() == intTag, "must be");
|
|
||||||
}
|
}
|
||||||
|
if (x->has_index()) {
|
||||||
|
if (x->index()->type()->tag() == longTag) {
|
||||||
|
LIR_Opr long_index_op = index_op;
|
||||||
|
if (x->index()->type()->is_constant()) {
|
||||||
|
long_index_op = new_register(T_LONG);
|
||||||
|
__ move(index_op, long_index_op);
|
||||||
|
}
|
||||||
|
index_op = new_register(T_INT);
|
||||||
|
__ convert(Bytecodes::_l2i, long_index_op, index_op);
|
||||||
|
} else {
|
||||||
|
assert(x->index()->type()->tag() == intTag, "must be");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// At this point base and index should be all ints.
|
||||||
|
assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
|
||||||
|
assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
|
||||||
|
#else
|
||||||
|
if (x->has_index()) {
|
||||||
|
if (x->index()->type()->tag() == intTag) {
|
||||||
|
if (!x->index()->type()->is_constant()) {
|
||||||
|
index_op = new_register(T_LONG);
|
||||||
|
__ convert(Bytecodes::_i2l, idx.result(), index_op);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assert(x->index()->type()->tag() == longTag, "must be");
|
||||||
|
if (x->index()->type()->is_constant()) {
|
||||||
|
index_op = new_register(T_LONG);
|
||||||
|
__ move(idx.result(), index_op);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// At this point base is a long non-constant
|
||||||
|
// Index is a long register or a int constant.
|
||||||
|
// We allow the constant to stay an int because that would allow us a more compact encoding by
|
||||||
|
// embedding an immediate offset in the address expression. If we have a long constant, we have to
|
||||||
|
// move it into a register first.
|
||||||
|
assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
|
||||||
|
assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
|
||||||
|
(index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
BasicType dst_type = x->basic_type();
|
BasicType dst_type = x->basic_type();
|
||||||
LIR_Opr index_op = idx.result();
|
|
||||||
|
|
||||||
LIR_Address* addr;
|
LIR_Address* addr;
|
||||||
if (index_op->is_constant()) {
|
if (index_op->is_constant()) {
|
||||||
assert(log2_scale == 0, "must not have a scale");
|
assert(log2_scale == 0, "must not have a scale");
|
||||||
|
assert(index_op->type() == T_INT, "only int constants supported");
|
||||||
addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
|
addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
|
||||||
} else {
|
} else {
|
||||||
#ifdef X86
|
#ifdef X86
|
||||||
#ifdef _LP64
|
|
||||||
if (!index_op->is_illegal() && index_op->type() == T_INT) {
|
|
||||||
LIR_Opr tmp = new_pointer_register();
|
|
||||||
__ convert(Bytecodes::_i2l, index_op, tmp);
|
|
||||||
index_op = tmp;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
|
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
|
||||||
#elif defined(ARM)
|
#elif defined(ARM)
|
||||||
addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
|
addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
|
||||||
#else
|
#else
|
||||||
if (index_op->is_illegal() || log2_scale == 0) {
|
if (index_op->is_illegal() || log2_scale == 0) {
|
||||||
#ifdef _LP64
|
|
||||||
if (!index_op->is_illegal() && index_op->type() == T_INT) {
|
|
||||||
LIR_Opr tmp = new_pointer_register();
|
|
||||||
__ convert(Bytecodes::_i2l, index_op, tmp);
|
|
||||||
index_op = tmp;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
addr = new LIR_Address(base_op, index_op, dst_type);
|
addr = new LIR_Address(base_op, index_op, dst_type);
|
||||||
} else {
|
} else {
|
||||||
LIR_Opr tmp = new_pointer_register();
|
LIR_Opr tmp = new_pointer_register();
|
||||||
@ -2129,7 +2154,6 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
|
|||||||
BasicType type = x->basic_type();
|
BasicType type = x->basic_type();
|
||||||
|
|
||||||
if (x->has_index()) {
|
if (x->has_index()) {
|
||||||
assert(x->index()->type()->tag() == intTag, "should not find non-int index");
|
|
||||||
log2_scale = x->log2_scale();
|
log2_scale = x->log2_scale();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2152,38 +2176,39 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
|
|||||||
set_no_result(x);
|
set_no_result(x);
|
||||||
|
|
||||||
LIR_Opr base_op = base.result();
|
LIR_Opr base_op = base.result();
|
||||||
|
LIR_Opr index_op = idx.result();
|
||||||
|
|
||||||
#ifndef _LP64
|
#ifndef _LP64
|
||||||
if (x->base()->type()->tag() == longTag) {
|
if (x->base()->type()->tag() == longTag) {
|
||||||
base_op = new_register(T_INT);
|
base_op = new_register(T_INT);
|
||||||
__ convert(Bytecodes::_l2i, base.result(), base_op);
|
__ convert(Bytecodes::_l2i, base.result(), base_op);
|
||||||
} else {
|
|
||||||
assert(x->base()->type()->tag() == intTag, "must be");
|
|
||||||
}
|
}
|
||||||
|
if (x->has_index()) {
|
||||||
|
if (x->index()->type()->tag() == longTag) {
|
||||||
|
index_op = new_register(T_INT);
|
||||||
|
__ convert(Bytecodes::_l2i, idx.result(), index_op);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// At this point base and index should be all ints and not constants
|
||||||
|
assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
|
||||||
|
assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
|
||||||
|
#else
|
||||||
|
if (x->has_index()) {
|
||||||
|
if (x->index()->type()->tag() == intTag) {
|
||||||
|
index_op = new_register(T_LONG);
|
||||||
|
__ convert(Bytecodes::_i2l, idx.result(), index_op);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// At this point base and index are long and non-constant
|
||||||
|
assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
|
||||||
|
assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
LIR_Opr index_op = idx.result();
|
|
||||||
if (log2_scale != 0) {
|
if (log2_scale != 0) {
|
||||||
// temporary fix (platform dependent code without shift on Intel would be better)
|
// temporary fix (platform dependent code without shift on Intel would be better)
|
||||||
index_op = new_pointer_register();
|
// TODO: ARM also allows embedded shift in the address
|
||||||
#ifdef _LP64
|
|
||||||
if(idx.result()->type() == T_INT) {
|
|
||||||
__ convert(Bytecodes::_i2l, idx.result(), index_op);
|
|
||||||
} else {
|
|
||||||
#endif
|
|
||||||
// TODO: ARM also allows embedded shift in the address
|
|
||||||
__ move(idx.result(), index_op);
|
|
||||||
#ifdef _LP64
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
__ shift_left(index_op, log2_scale, index_op);
|
__ shift_left(index_op, log2_scale, index_op);
|
||||||
}
|
}
|
||||||
#ifdef _LP64
|
|
||||||
else if(!index_op->is_illegal() && index_op->type() == T_INT) {
|
|
||||||
LIR_Opr tmp = new_pointer_register();
|
|
||||||
__ convert(Bytecodes::_i2l, index_op, tmp);
|
|
||||||
index_op = tmp;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
|
LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
|
||||||
__ move(value.result(), addr);
|
__ move(value.result(), addr);
|
||||||
|
@ -287,9 +287,6 @@
|
|||||||
develop(bool, InstallMethods, true, \
|
develop(bool, InstallMethods, true, \
|
||||||
"Install methods at the end of successful compilations") \
|
"Install methods at the end of successful compilations") \
|
||||||
\
|
\
|
||||||
product(intx, CompilationRepeat, 0, \
|
|
||||||
"Number of times to recompile method before returning result") \
|
|
||||||
\
|
|
||||||
develop(intx, NMethodSizeLimit, (64*K)*wordSize, \
|
develop(intx, NMethodSizeLimit, (64*K)*wordSize, \
|
||||||
"Maximum size of a compiled method.") \
|
"Maximum size of a compiled method.") \
|
||||||
\
|
\
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
#include "ci/ciUtilities.hpp"
|
#include "ci/ciUtilities.hpp"
|
||||||
#include "classfile/systemDictionary.hpp"
|
#include "classfile/systemDictionary.hpp"
|
||||||
#include "classfile/vmSymbols.hpp"
|
#include "classfile/vmSymbols.hpp"
|
||||||
|
#include "code/codeCache.hpp"
|
||||||
#include "code/scopeDesc.hpp"
|
#include "code/scopeDesc.hpp"
|
||||||
#include "compiler/compileBroker.hpp"
|
#include "compiler/compileBroker.hpp"
|
||||||
#include "compiler/compileLog.hpp"
|
#include "compiler/compileLog.hpp"
|
||||||
@ -1085,7 +1086,7 @@ void ciEnv::register_method(ciMethod* target,
|
|||||||
} else {
|
} else {
|
||||||
// The CodeCache is full. Print out warning and disable compilation.
|
// The CodeCache is full. Print out warning and disable compilation.
|
||||||
record_failure("code cache is full");
|
record_failure("code cache is full");
|
||||||
CompileBroker::handle_full_code_cache();
|
CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1694,8 +1694,6 @@ void ClassVerifier::verify_exception_handler_table(u4 code_length, char* code_da
|
|||||||
constantPoolHandle cp (THREAD, _method->constants());
|
constantPoolHandle cp (THREAD, _method->constants());
|
||||||
|
|
||||||
for(int i = 0; i < exlength; i++) {
|
for(int i = 0; i < exlength; i++) {
|
||||||
//reacquire the table in case a GC happened
|
|
||||||
ExceptionTable exhandlers(_method());
|
|
||||||
u2 start_pc = exhandlers.start_pc(i);
|
u2 start_pc = exhandlers.start_pc(i);
|
||||||
u2 end_pc = exhandlers.end_pc(i);
|
u2 end_pc = exhandlers.end_pc(i);
|
||||||
u2 handler_pc = exhandlers.handler_pc(i);
|
u2 handler_pc = exhandlers.handler_pc(i);
|
||||||
@ -1803,8 +1801,6 @@ void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit, S
|
|||||||
ExceptionTable exhandlers(_method());
|
ExceptionTable exhandlers(_method());
|
||||||
int exlength = exhandlers.length();
|
int exlength = exhandlers.length();
|
||||||
for(int i = 0; i < exlength; i++) {
|
for(int i = 0; i < exlength; i++) {
|
||||||
//reacquire the table in case a GC happened
|
|
||||||
ExceptionTable exhandlers(_method());
|
|
||||||
u2 start_pc = exhandlers.start_pc(i);
|
u2 start_pc = exhandlers.start_pc(i);
|
||||||
u2 end_pc = exhandlers.end_pc(i);
|
u2 end_pc = exhandlers.end_pc(i);
|
||||||
u2 handler_pc = exhandlers.handler_pc(i);
|
u2 handler_pc = exhandlers.handler_pc(i);
|
||||||
|
@ -229,14 +229,11 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
|
|||||||
return blob;
|
return blob;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
|
void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
|
||||||
void* p = CodeCache::allocate(size, is_critical);
|
return CodeCache::allocate(size, CodeBlobType::NonMethod, is_critical);
|
||||||
return p;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void BufferBlob::free(BufferBlob *blob) {
|
||||||
void BufferBlob::free( BufferBlob *blob ) {
|
|
||||||
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
|
||||||
blob->flush();
|
blob->flush();
|
||||||
{
|
{
|
||||||
@ -299,7 +296,6 @@ MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
|
|||||||
return blob;
|
return blob;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------------------------
|
||||||
// Implementation of RuntimeStub
|
// Implementation of RuntimeStub
|
||||||
|
|
||||||
@ -340,14 +336,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
|
|||||||
|
|
||||||
|
|
||||||
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
|
void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
|
||||||
void* p = CodeCache::allocate(size, true);
|
void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true);
|
||||||
if (!p) fatal("Initial size of CodeCache is too small");
|
if (!p) fatal("Initial size of CodeCache is too small");
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
// operator new shared by all singletons:
|
// operator new shared by all singletons:
|
||||||
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
|
void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
|
||||||
void* p = CodeCache::allocate(size, true);
|
void* p = CodeCache::allocate(size, CodeBlobType::NonMethod, true);
|
||||||
if (!p) fatal("Initial size of CodeCache is too small");
|
if (!p) fatal("Initial size of CodeCache is too small");
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,18 @@
|
|||||||
#include "runtime/frame.hpp"
|
#include "runtime/frame.hpp"
|
||||||
#include "runtime/handles.hpp"
|
#include "runtime/handles.hpp"
|
||||||
|
|
||||||
|
// CodeBlob Types
|
||||||
|
// Used in the CodeCache to assign CodeBlobs to different CodeHeaps
|
||||||
|
struct CodeBlobType {
|
||||||
|
enum {
|
||||||
|
MethodNonProfiled = 0, // Execution level 1 and 4 (non-profiled) nmethods (including native nmethods)
|
||||||
|
MethodProfiled = 1, // Execution level 2 and 3 (profiled) nmethods
|
||||||
|
NonMethod = 2, // Non-methods like Buffers, Adapters and Runtime Stubs
|
||||||
|
All = 3, // All types (No code cache segmentation)
|
||||||
|
NumTypes = 4 // Number of CodeBlobTypes
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
// CodeBlob - superclass for all entries in the CodeCache.
|
// CodeBlob - superclass for all entries in the CodeCache.
|
||||||
//
|
//
|
||||||
// Suptypes are:
|
// Suptypes are:
|
||||||
@ -385,9 +397,6 @@ class DeoptimizationBlob: public SingletonBlob {
|
|||||||
return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc);
|
return (pc == unpack_pc || (pc + frame::pc_return_offset) == unpack_pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// GC for args
|
// GC for args
|
||||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
|
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* Nothing to do */ }
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -26,105 +26,117 @@
|
|||||||
#define SHARE_VM_CODE_CODECACHE_HPP
|
#define SHARE_VM_CODE_CODECACHE_HPP
|
||||||
|
|
||||||
#include "code/codeBlob.hpp"
|
#include "code/codeBlob.hpp"
|
||||||
|
#include "code/nmethod.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "memory/heap.hpp"
|
#include "memory/heap.hpp"
|
||||||
#include "oops/instanceKlass.hpp"
|
#include "oops/instanceKlass.hpp"
|
||||||
#include "oops/oopsHierarchy.hpp"
|
#include "oops/oopsHierarchy.hpp"
|
||||||
|
#include "runtime/mutexLocker.hpp"
|
||||||
|
|
||||||
// The CodeCache implements the code cache for various pieces of generated
|
// The CodeCache implements the code cache for various pieces of generated
|
||||||
// code, e.g., compiled java methods, runtime stubs, transition frames, etc.
|
// code, e.g., compiled java methods, runtime stubs, transition frames, etc.
|
||||||
// The entries in the CodeCache are all CodeBlob's.
|
// The entries in the CodeCache are all CodeBlob's.
|
||||||
|
|
||||||
// Implementation:
|
// -- Implementation --
|
||||||
// - Each CodeBlob occupies one chunk of memory.
|
// The CodeCache consists of one or more CodeHeaps, each of which contains
|
||||||
// - Like the offset table in oldspace the zone has at table for
|
// CodeBlobs of a specific CodeBlobType. Currently heaps for the following
|
||||||
// locating a method given a addess of an instruction.
|
// types are available:
|
||||||
|
// - Non-methods: Non-methods like Buffers, Adapters and Runtime Stubs
|
||||||
|
// - Profiled nmethods: nmethods that are profiled, i.e., those
|
||||||
|
// executed at level 2 or 3
|
||||||
|
// - Non-Profiled nmethods: nmethods that are not profiled, i.e., those
|
||||||
|
// executed at level 1 or 4 and native methods
|
||||||
|
// - All: Used for code of all types if code cache segmentation is disabled.
|
||||||
|
//
|
||||||
|
// In the rare case of the non-method code heap getting full, non-method code
|
||||||
|
// will be stored in the non-profiled code heap as a fallback solution.
|
||||||
|
//
|
||||||
|
// Depending on the availability of compilers and TieredCompilation there
|
||||||
|
// may be fewer heaps. The size of the code heaps depends on the values of
|
||||||
|
// ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize
|
||||||
|
// (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..)
|
||||||
|
// for details).
|
||||||
|
//
|
||||||
|
// Code cache segmentation is controlled by the flag SegmentedCodeCache.
|
||||||
|
// If turned off, all code types are stored in a single code heap. By default
|
||||||
|
// code cache segmentation is turned on if TieredCompilation is enabled and
|
||||||
|
// ReservedCodeCacheSize >= 240 MB.
|
||||||
|
//
|
||||||
|
// All methods of the CodeCache accepting a CodeBlobType only apply to
|
||||||
|
// CodeBlobs of the given type. For example, iteration over the
|
||||||
|
// CodeBlobs of a specific type can be done by using CodeCache::first_blob(..)
|
||||||
|
// and CodeCache::next_blob(..) and providing the corresponding CodeBlobType.
|
||||||
|
//
|
||||||
|
// IMPORTANT: If you add new CodeHeaps to the code cache or change the
|
||||||
|
// existing ones, make sure to adapt the dtrace scripts (jhelper.d) for
|
||||||
|
// Solaris and BSD.
|
||||||
|
|
||||||
class OopClosure;
|
class OopClosure;
|
||||||
class DepChange;
|
class DepChange;
|
||||||
|
|
||||||
class CodeCache : AllStatic {
|
class CodeCache : AllStatic {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
|
friend class NMethodIterator;
|
||||||
private:
|
private:
|
||||||
// CodeHeap is malloc()'ed at startup and never deleted during shutdown,
|
// CodeHeaps of the cache
|
||||||
// so that the generated assembly code is always there when it's needed.
|
static GrowableArray<CodeHeap*>* _heaps;
|
||||||
// This may cause memory leak, but is necessary, for now. See 4423824,
|
|
||||||
// 4422213 or 4436291 for details.
|
static address _low_bound; // Lower bound of CodeHeap addresses
|
||||||
static CodeHeap * _heap;
|
static address _high_bound; // Upper bound of CodeHeap addresses
|
||||||
static int _number_of_blobs;
|
static int _number_of_blobs; // Total number of CodeBlobs in the cache
|
||||||
static int _number_of_adapters;
|
static int _number_of_adapters; // Total number of Adapters in the cache
|
||||||
static int _number_of_nmethods;
|
static int _number_of_nmethods; // Total number of nmethods in the cache
|
||||||
static int _number_of_nmethods_with_dependencies;
|
static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies
|
||||||
static bool _needs_cache_clean;
|
static bool _needs_cache_clean; // True if inline caches of the nmethods needs to be flushed
|
||||||
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
||||||
|
static int _codemem_full_count; // Number of times a CodeHeap in the cache was full
|
||||||
|
|
||||||
static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
|
static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
|
||||||
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
|
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
|
||||||
|
|
||||||
static int _codemem_full_count;
|
// CodeHeap management
|
||||||
static size_t bytes_allocated_in_freelist() { return _heap->allocated_in_freelist(); }
|
static void initialize_heaps(); // Initializes the CodeHeaps
|
||||||
static int allocated_segments() { return _heap->allocated_segments(); }
|
// Creates a new heap with the given name and size, containing CodeBlobs of the given type
|
||||||
static size_t freelist_length() { return _heap->freelist_length(); }
|
static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
|
||||||
|
static CodeHeap* get_code_heap(CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob
|
||||||
|
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
|
||||||
|
static bool heap_available(int code_blob_type); // Returns true if a CodeHeap for the given CodeBlobType is available
|
||||||
|
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
|
||||||
|
|
||||||
|
// Iteration
|
||||||
|
static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap
|
||||||
|
static CodeBlob* first_blob(int code_blob_type); // Returns the first CodeBlob of the given type
|
||||||
|
static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the first alive CodeBlob on the given CodeHeap
|
||||||
|
static CodeBlob* next_blob(CodeBlob* cb); // Returns the next CodeBlob of the given type succeeding the given CodeBlob
|
||||||
|
|
||||||
|
static size_t bytes_allocated_in_freelists();
|
||||||
|
static int allocated_segments();
|
||||||
|
static size_t freelists_length();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
// Initialization
|
// Initialization
|
||||||
static void initialize();
|
static void initialize();
|
||||||
|
|
||||||
static void report_codemem_full();
|
|
||||||
|
|
||||||
// Allocation/administration
|
// Allocation/administration
|
||||||
static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
|
static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob
|
||||||
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
|
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
|
||||||
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
|
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
|
||||||
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
|
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
|
||||||
static void free(CodeBlob* cb); // frees a CodeBlob
|
static void free(CodeBlob* cb); // frees a CodeBlob
|
||||||
static bool contains(void *p); // returns whether p is included
|
static bool contains(void *p); // returns whether p is included
|
||||||
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
|
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
|
||||||
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
|
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
|
||||||
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
|
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
|
||||||
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
|
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
|
||||||
|
|
||||||
// Lookup
|
// Lookup
|
||||||
static CodeBlob* find_blob(void* start);
|
static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
|
||||||
static nmethod* find_nmethod(void* start);
|
static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method
|
||||||
|
static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address
|
||||||
|
|
||||||
// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
|
static int nof_blobs() { return _number_of_blobs; } // Returns the total number of CodeBlobs in the cache
|
||||||
// what you are doing)
|
static int nof_adapters() { return _number_of_adapters; } // Returns the total number of Adapters in the cache
|
||||||
static CodeBlob* find_blob_unsafe(void* start) {
|
static int nof_nmethods() { return _number_of_nmethods; } // Returns the total number of nmethods in the cache
|
||||||
// NMT can walk the stack before code cache is created
|
|
||||||
if (_heap == NULL) return NULL;
|
|
||||||
|
|
||||||
CodeBlob* result = (CodeBlob*)_heap->find_start(start);
|
|
||||||
// this assert is too strong because the heap code will return the
|
|
||||||
// heapblock containing start. That block can often be larger than
|
|
||||||
// the codeBlob itself. If you look up an address that is within
|
|
||||||
// the heapblock but not in the codeBlob you will assert.
|
|
||||||
//
|
|
||||||
// Most things will not lookup such bad addresses. However
|
|
||||||
// AsyncGetCallTrace can see intermediate frames and get that kind
|
|
||||||
// of invalid address and so can a developer using hsfind.
|
|
||||||
//
|
|
||||||
// The more correct answer is to return NULL if blob_contains() returns
|
|
||||||
// false.
|
|
||||||
// assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
|
|
||||||
|
|
||||||
if (result != NULL && !result->blob_contains((address)start)) {
|
|
||||||
result = NULL;
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iteration
|
|
||||||
static CodeBlob* first();
|
|
||||||
static CodeBlob* next (CodeBlob* cb);
|
|
||||||
static CodeBlob* alive(CodeBlob *cb);
|
|
||||||
static nmethod* alive_nmethod(CodeBlob *cb);
|
|
||||||
static nmethod* first_nmethod();
|
|
||||||
static nmethod* next_nmethod (CodeBlob* cb);
|
|
||||||
static int nof_blobs() { return _number_of_blobs; }
|
|
||||||
static int nof_adapters() { return _number_of_adapters; }
|
|
||||||
static int nof_nmethods() { return _number_of_nmethods; }
|
|
||||||
|
|
||||||
// GC support
|
// GC support
|
||||||
static void gc_epilogue();
|
static void gc_epilogue();
|
||||||
@ -137,7 +149,7 @@ class CodeCache : AllStatic {
|
|||||||
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
|
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
|
||||||
static void scavenge_root_nmethods_do(CodeBlobClosure* f);
|
static void scavenge_root_nmethods_do(CodeBlobClosure* f);
|
||||||
|
|
||||||
static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
|
static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
|
||||||
static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
|
static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
|
||||||
static void add_scavenge_root_nmethod(nmethod* nm);
|
static void add_scavenge_root_nmethod(nmethod* nm);
|
||||||
static void drop_scavenge_root_nmethod(nmethod* nm);
|
static void drop_scavenge_root_nmethod(nmethod* nm);
|
||||||
@ -151,27 +163,47 @@ class CodeCache : AllStatic {
|
|||||||
static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
|
static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
|
||||||
static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
|
static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
|
||||||
static void log_state(outputStream* st);
|
static void log_state(outputStream* st);
|
||||||
|
static const char* get_code_heap_name(int code_blob_type) { return (heap_available(code_blob_type) ? get_code_heap(code_blob_type)->name() : "Unused"); }
|
||||||
|
static void report_codemem_full(int code_blob_type, bool print);
|
||||||
|
|
||||||
// Dcmd (Diagnostic commands)
|
// Dcmd (Diagnostic commands)
|
||||||
static void print_codelist(outputStream* st);
|
static void print_codelist(outputStream* st);
|
||||||
static void print_layout(outputStream* st);
|
static void print_layout(outputStream* st);
|
||||||
|
|
||||||
// The full limits of the codeCache
|
// The full limits of the codeCache
|
||||||
static address low_bound() { return (address) _heap->low_boundary(); }
|
static address low_bound() { return _low_bound; }
|
||||||
static address high_bound() { return (address) _heap->high_boundary(); }
|
static address high_bound() { return _high_bound; }
|
||||||
static address high() { return (address) _heap->high(); }
|
|
||||||
|
|
||||||
// Profiling
|
// Profiling
|
||||||
static address first_address(); // first address used for CodeBlobs
|
static size_t capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->capacity() : 0; }
|
||||||
static address last_address(); // last address used for CodeBlobs
|
static size_t capacity();
|
||||||
static size_t capacity() { return _heap->capacity(); }
|
static size_t unallocated_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->unallocated_capacity() : 0; }
|
||||||
static size_t max_capacity() { return _heap->max_capacity(); }
|
static size_t unallocated_capacity();
|
||||||
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
|
static size_t max_capacity(int code_blob_type) { return heap_available(code_blob_type) ? get_code_heap(code_blob_type)->max_capacity() : 0; }
|
||||||
static double reverse_free_ratio();
|
static size_t max_capacity();
|
||||||
|
|
||||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
static bool is_full(int* code_blob_type);
|
||||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
static double reverse_free_ratio(int code_blob_type);
|
||||||
static void clear_inline_caches(); // clear all inline caches
|
|
||||||
|
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||||
|
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||||
|
static void clear_inline_caches(); // clear all inline caches
|
||||||
|
|
||||||
|
// Returns the CodeBlobType for nmethods of the given compilation level
|
||||||
|
static int get_code_blob_type(int comp_level) {
|
||||||
|
if (comp_level == CompLevel_none ||
|
||||||
|
comp_level == CompLevel_simple ||
|
||||||
|
comp_level == CompLevel_full_optimization) {
|
||||||
|
// Non profiled methods
|
||||||
|
return CodeBlobType::MethodNonProfiled;
|
||||||
|
} else if (comp_level == CompLevel_limited_profile ||
|
||||||
|
comp_level == CompLevel_full_profile) {
|
||||||
|
// Profiled methods
|
||||||
|
return CodeBlobType::MethodProfiled;
|
||||||
|
}
|
||||||
|
ShouldNotReachHere();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void verify_clean_inline_caches();
|
static void verify_clean_inline_caches();
|
||||||
static void verify_icholder_relocations();
|
static void verify_icholder_relocations();
|
||||||
@ -187,10 +219,87 @@ class CodeCache : AllStatic {
|
|||||||
static void make_marked_nmethods_zombies();
|
static void make_marked_nmethods_zombies();
|
||||||
static void make_marked_nmethods_not_entrant();
|
static void make_marked_nmethods_not_entrant();
|
||||||
|
|
||||||
// tells how many nmethods have dependencies
|
// tells how many nmethods have dependencies
|
||||||
static int number_of_nmethods_with_dependencies();
|
static int number_of_nmethods_with_dependencies();
|
||||||
|
|
||||||
static int get_codemem_full_count() { return _codemem_full_count; }
|
static int get_codemem_full_count() { return _codemem_full_count; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
// Iterator to iterate over nmethods in the CodeCache.
|
||||||
|
class NMethodIterator : public StackObj {
|
||||||
|
private:
|
||||||
|
CodeBlob* _code_blob; // Current CodeBlob
|
||||||
|
int _code_blob_type; // Refers to current CodeHeap
|
||||||
|
|
||||||
|
public:
|
||||||
|
NMethodIterator() {
|
||||||
|
initialize(NULL); // Set to NULL, initialized by first call to next()
|
||||||
|
}
|
||||||
|
|
||||||
|
NMethodIterator(nmethod* nm) {
|
||||||
|
initialize(nm);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Advance iterator to next nmethod
|
||||||
|
bool next() {
|
||||||
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
|
assert(_code_blob_type < CodeBlobType::NumTypes, "end reached");
|
||||||
|
|
||||||
|
bool result = next_nmethod();
|
||||||
|
while (!result && (_code_blob_type < CodeBlobType::MethodProfiled)) {
|
||||||
|
// Advance to next code heap if segmented code cache
|
||||||
|
_code_blob_type++;
|
||||||
|
result = next_nmethod();
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Advance iterator to next alive nmethod
|
||||||
|
bool next_alive() {
|
||||||
|
bool result = next();
|
||||||
|
while(result && !_code_blob->is_alive()) {
|
||||||
|
result = next();
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool end() const { return _code_blob == NULL; }
|
||||||
|
nmethod* method() const { return (nmethod*)_code_blob; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Initialize iterator to given nmethod
|
||||||
|
void initialize(nmethod* nm) {
|
||||||
|
_code_blob = (CodeBlob*)nm;
|
||||||
|
if (!SegmentedCodeCache) {
|
||||||
|
// Iterate over all CodeBlobs
|
||||||
|
_code_blob_type = CodeBlobType::All;
|
||||||
|
} else if (nm != NULL) {
|
||||||
|
_code_blob_type = CodeCache::get_code_blob_type(nm->comp_level());
|
||||||
|
} else {
|
||||||
|
// Only iterate over method code heaps, starting with non-profiled
|
||||||
|
_code_blob_type = CodeBlobType::MethodNonProfiled;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Advance iterator to the next nmethod in the current code heap
|
||||||
|
bool next_nmethod() {
|
||||||
|
// Get first method CodeBlob
|
||||||
|
if (_code_blob == NULL) {
|
||||||
|
_code_blob = CodeCache::first_blob(_code_blob_type);
|
||||||
|
if (_code_blob == NULL) {
|
||||||
|
return false;
|
||||||
|
} else if (_code_blob->is_nmethod()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Search for next method CodeBlob
|
||||||
|
_code_blob = CodeCache::next_blob(_code_blob);
|
||||||
|
while (_code_blob != NULL && !_code_blob->is_nmethod()) {
|
||||||
|
_code_blob = CodeCache::next_blob(_code_blob);
|
||||||
|
}
|
||||||
|
return _code_blob != NULL;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_CODE_CODECACHE_HPP
|
#endif // SHARE_VM_CODE_CODECACHE_HPP
|
||||||
|
@ -500,7 +500,7 @@ nmethod* nmethod::new_native_nmethod(methodHandle method,
|
|||||||
CodeOffsets offsets;
|
CodeOffsets offsets;
|
||||||
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
|
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
|
||||||
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
||||||
nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
|
nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size,
|
||||||
compile_id, &offsets,
|
compile_id, &offsets,
|
||||||
code_buffer, frame_size,
|
code_buffer, frame_size,
|
||||||
basic_lock_owner_sp_offset,
|
basic_lock_owner_sp_offset,
|
||||||
@ -538,7 +538,7 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
|
|||||||
offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
|
offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
|
||||||
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
|
||||||
|
|
||||||
nm = new (nmethod_size) nmethod(method(), nmethod_size,
|
nm = new (nmethod_size, CompLevel_none) nmethod(method(), nmethod_size,
|
||||||
&offsets, code_buffer, frame_size);
|
&offsets, code_buffer, frame_size);
|
||||||
|
|
||||||
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
|
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
|
||||||
@ -586,7 +586,7 @@ nmethod* nmethod::new_nmethod(methodHandle method,
|
|||||||
+ round_to(nul_chk_table->size_in_bytes(), oopSize)
|
+ round_to(nul_chk_table->size_in_bytes(), oopSize)
|
||||||
+ round_to(debug_info->data_size() , oopSize);
|
+ round_to(debug_info->data_size() , oopSize);
|
||||||
|
|
||||||
nm = new (nmethod_size)
|
nm = new (nmethod_size, comp_level)
|
||||||
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
|
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
|
||||||
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
|
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
|
||||||
oop_maps,
|
oop_maps,
|
||||||
@ -803,9 +803,11 @@ nmethod::nmethod(
|
|||||||
}
|
}
|
||||||
#endif // def HAVE_DTRACE_H
|
#endif // def HAVE_DTRACE_H
|
||||||
|
|
||||||
void* nmethod::operator new(size_t size, int nmethod_size) throw() {
|
void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
|
||||||
// Not critical, may return null if there is too little continuous memory
|
// With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
|
||||||
return CodeCache::allocate(nmethod_size);
|
// with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
|
||||||
|
bool is_critical = SegmentedCodeCache;
|
||||||
|
return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
|
||||||
}
|
}
|
||||||
|
|
||||||
nmethod::nmethod(
|
nmethod::nmethod(
|
||||||
@ -1530,7 +1532,7 @@ void nmethod::flush() {
|
|||||||
Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
|
Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, this);
|
||||||
if (PrintMethodFlushing) {
|
if (PrintMethodFlushing) {
|
||||||
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
|
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
|
||||||
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
|
_compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(_comp_level))/1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to deallocate any ExceptionCache data.
|
// We need to deallocate any ExceptionCache data.
|
||||||
@ -1557,7 +1559,6 @@ void nmethod::flush() {
|
|||||||
CodeCache::free(this);
|
CodeCache::free(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Notify all classes this nmethod is dependent on that it is no
|
// Notify all classes this nmethod is dependent on that it is no
|
||||||
// longer dependent. This should only be called in two situations.
|
// longer dependent. This should only be called in two situations.
|
||||||
@ -2418,15 +2419,18 @@ void nmethod::check_all_dependencies(DepChange& changes) {
|
|||||||
// Turn off dependency tracing while actually testing dependencies.
|
// Turn off dependency tracing while actually testing dependencies.
|
||||||
NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
|
NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
|
||||||
|
|
||||||
typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
|
typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
|
||||||
&DependencySignature::equals, 11027> DepTable;
|
&DependencySignature::equals, 11027> DepTable;
|
||||||
|
|
||||||
DepTable* table = new DepTable();
|
DepTable* table = new DepTable();
|
||||||
|
|
||||||
// Iterate over live nmethods and check dependencies of all nmethods that are not
|
// Iterate over live nmethods and check dependencies of all nmethods that are not
|
||||||
// marked for deoptimization. A particular dependency is only checked once.
|
// marked for deoptimization. A particular dependency is only checked once.
|
||||||
for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) {
|
NMethodIterator iter;
|
||||||
if (!nm->is_marked_for_deoptimization()) {
|
while(iter.next()) {
|
||||||
|
nmethod* nm = iter.method();
|
||||||
|
// Only notify for live nmethods
|
||||||
|
if (nm->is_alive() && !nm->is_marked_for_deoptimization()) {
|
||||||
for (Dependencies::DepStream deps(nm); deps.next(); ) {
|
for (Dependencies::DepStream deps(nm); deps.next(); ) {
|
||||||
// Construct abstraction of a dependency.
|
// Construct abstraction of a dependency.
|
||||||
DependencySignature* current_sig = new DependencySignature(deps);
|
DependencySignature* current_sig = new DependencySignature(deps);
|
||||||
|
@ -288,7 +288,7 @@ class nmethod : public CodeBlob {
|
|||||||
int comp_level);
|
int comp_level);
|
||||||
|
|
||||||
// helper methods
|
// helper methods
|
||||||
void* operator new(size_t size, int nmethod_size) throw();
|
void* operator new(size_t size, int nmethod_size, int comp_level) throw();
|
||||||
|
|
||||||
const char* reloc_string_for(u_char* begin, u_char* end);
|
const char* reloc_string_for(u_char* begin, u_char* end);
|
||||||
// Returns true if this thread changed the state of the nmethod or
|
// Returns true if this thread changed the state of the nmethod or
|
||||||
|
@ -63,7 +63,7 @@ void* VtableStub::operator new(size_t size, int code_size) throw() {
|
|||||||
// If changing the name, update the other file accordingly.
|
// If changing the name, update the other file accordingly.
|
||||||
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
|
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
|
||||||
if (blob == NULL) {
|
if (blob == NULL) {
|
||||||
CompileBroker::handle_full_code_cache();
|
CompileBroker::handle_full_code_cache(CodeBlobType::NonMethod);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
_chunk = blob->content_begin();
|
_chunk = blob->content_begin();
|
||||||
|
@ -783,18 +783,22 @@ CompileQueue* CompileBroker::compile_queue(int comp_level) {
|
|||||||
|
|
||||||
|
|
||||||
void CompileBroker::print_compile_queues(outputStream* st) {
|
void CompileBroker::print_compile_queues(outputStream* st) {
|
||||||
_c1_compile_queue->print(st);
|
MutexLocker locker(MethodCompileQueue_lock);
|
||||||
_c2_compile_queue->print(st);
|
if (_c1_compile_queue != NULL) {
|
||||||
|
_c1_compile_queue->print(st);
|
||||||
|
}
|
||||||
|
if (_c2_compile_queue != NULL) {
|
||||||
|
_c2_compile_queue->print(st);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CompileQueue::print(outputStream* st) {
|
void CompileQueue::print(outputStream* st) {
|
||||||
assert_locked_or_safepoint(lock());
|
assert(lock()->owned_by_self(), "must own lock");
|
||||||
st->print_cr("Contents of %s", name());
|
st->print_cr("Contents of %s", name());
|
||||||
st->print_cr("----------------------------");
|
st->print_cr("----------------------------");
|
||||||
CompileTask* task = _first;
|
CompileTask* task = _first;
|
||||||
if (task == NULL) {
|
if (task == NULL) {
|
||||||
st->print_cr("Empty");;
|
st->print_cr("Empty");
|
||||||
} else {
|
} else {
|
||||||
while (task != NULL) {
|
while (task != NULL) {
|
||||||
task->print_compilation(st, NULL, true, true);
|
task->print_compilation(st, NULL, true, true);
|
||||||
@ -1206,6 +1210,12 @@ void CompileBroker::compile_method_base(methodHandle method,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (TieredCompilation) {
|
||||||
|
// Tiered policy requires MethodCounters to exist before adding a method to
|
||||||
|
// the queue. Create if we don't have them yet.
|
||||||
|
method->get_method_counters(thread);
|
||||||
|
}
|
||||||
|
|
||||||
// Outputs from the following MutexLocker block:
|
// Outputs from the following MutexLocker block:
|
||||||
CompileTask* task = NULL;
|
CompileTask* task = NULL;
|
||||||
bool blocking = false;
|
bool blocking = false;
|
||||||
@ -1747,9 +1757,11 @@ void CompileBroker::compiler_thread_loop() {
|
|||||||
// We need this HandleMark to avoid leaking VM handles.
|
// We need this HandleMark to avoid leaking VM handles.
|
||||||
HandleMark hm(thread);
|
HandleMark hm(thread);
|
||||||
|
|
||||||
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
|
// Check if the CodeCache is full
|
||||||
// the code cache is really full
|
int code_blob_type = 0;
|
||||||
handle_full_code_cache();
|
if (CodeCache::is_full(&code_blob_type)) {
|
||||||
|
// The CodeHeap for code_blob_type is really full
|
||||||
|
handle_full_code_cache(code_blob_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
CompileTask* task = queue->get();
|
CompileTask* task = queue->get();
|
||||||
@ -1777,22 +1789,6 @@ void CompileBroker::compiler_thread_loop() {
|
|||||||
if (method()->number_of_breakpoints() == 0) {
|
if (method()->number_of_breakpoints() == 0) {
|
||||||
// Compile the method.
|
// Compile the method.
|
||||||
if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
|
if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
|
||||||
#ifdef COMPILER1
|
|
||||||
// Allow repeating compilations for the purpose of benchmarking
|
|
||||||
// compile speed. This is not useful for customers.
|
|
||||||
if (CompilationRepeat != 0) {
|
|
||||||
int compile_count = CompilationRepeat;
|
|
||||||
while (compile_count > 0) {
|
|
||||||
invoke_compiler_on_method(task);
|
|
||||||
nmethod* nm = method->code();
|
|
||||||
if (nm != NULL) {
|
|
||||||
nm->make_zombie();
|
|
||||||
method->clear_code();
|
|
||||||
}
|
|
||||||
compile_count--;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* COMPILER1 */
|
|
||||||
invoke_compiler_on_method(task);
|
invoke_compiler_on_method(task);
|
||||||
} else {
|
} else {
|
||||||
// After compilation is disabled, remove remaining methods from queue
|
// After compilation is disabled, remove remaining methods from queue
|
||||||
@ -2079,7 +2075,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
|||||||
* The CodeCache is full. Print out warning and disable compilation
|
* The CodeCache is full. Print out warning and disable compilation
|
||||||
* or try code cache cleaning so compilation can continue later.
|
* or try code cache cleaning so compilation can continue later.
|
||||||
*/
|
*/
|
||||||
void CompileBroker::handle_full_code_cache() {
|
void CompileBroker::handle_full_code_cache(int code_blob_type) {
|
||||||
UseInterpreter = true;
|
UseInterpreter = true;
|
||||||
if (UseCompiler || AlwaysCompileLoopMethods ) {
|
if (UseCompiler || AlwaysCompileLoopMethods ) {
|
||||||
if (xtty != NULL) {
|
if (xtty != NULL) {
|
||||||
@ -2096,8 +2092,6 @@ void CompileBroker::handle_full_code_cache() {
|
|||||||
xtty->end_elem();
|
xtty->end_elem();
|
||||||
}
|
}
|
||||||
|
|
||||||
CodeCache::report_codemem_full();
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
if (CompileTheWorld || ExitOnFullCodeCache) {
|
if (CompileTheWorld || ExitOnFullCodeCache) {
|
||||||
codecache_print(/* detailed= */ true);
|
codecache_print(/* detailed= */ true);
|
||||||
@ -2119,12 +2113,7 @@ void CompileBroker::handle_full_code_cache() {
|
|||||||
disable_compilation_forever();
|
disable_compilation_forever();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print warning only once
|
CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning());
|
||||||
if (should_print_compiler_warning()) {
|
|
||||||
warning("CodeCache is full. Compiler has been disabled.");
|
|
||||||
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
|
|
||||||
codecache_print(/* detailed= */ true);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,7 +434,7 @@ class CompileBroker: AllStatic {
|
|||||||
static bool is_compilation_disabled_forever() {
|
static bool is_compilation_disabled_forever() {
|
||||||
return _should_compile_new_jobs == shutdown_compilaton;
|
return _should_compile_new_jobs == shutdown_compilaton;
|
||||||
}
|
}
|
||||||
static void handle_full_code_cache();
|
static void handle_full_code_cache(int code_blob_type);
|
||||||
// Ensures that warning is only printed once.
|
// Ensures that warning is only printed once.
|
||||||
static bool should_print_compiler_warning() {
|
static bool should_print_compiler_warning() {
|
||||||
jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0);
|
jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0);
|
||||||
|
@ -4167,7 +4167,7 @@ class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
|
|||||||
// been published), so we do not need to check for
|
// been published), so we do not need to check for
|
||||||
// uninitialized objects before pushing here.
|
// uninitialized objects before pushing here.
|
||||||
void Par_ConcMarkingClosure::do_oop(oop obj) {
|
void Par_ConcMarkingClosure::do_oop(oop obj) {
|
||||||
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
|
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||||
HeapWord* addr = (HeapWord*)obj;
|
HeapWord* addr = (HeapWord*)obj;
|
||||||
// Check if oop points into the CMS generation
|
// Check if oop points into the CMS generation
|
||||||
// and is not marked
|
// and is not marked
|
||||||
@ -7226,7 +7226,7 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
|
|||||||
// isMarked() query is "safe".
|
// isMarked() query is "safe".
|
||||||
bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
|
bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
|
||||||
// Ignore mark word because we are running concurrent with mutators
|
// Ignore mark word because we are running concurrent with mutators
|
||||||
assert(p->is_oop_or_null(true), "expected an oop or null");
|
assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
|
||||||
HeapWord* addr = (HeapWord*)p;
|
HeapWord* addr = (HeapWord*)p;
|
||||||
assert(_span.contains(addr), "we are scanning the CMS generation");
|
assert(_span.contains(addr), "we are scanning the CMS generation");
|
||||||
bool is_obj_array = false;
|
bool is_obj_array = false;
|
||||||
@ -7666,7 +7666,7 @@ void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void PushAndMarkVerifyClosure::do_oop(oop obj) {
|
void PushAndMarkVerifyClosure::do_oop(oop obj) {
|
||||||
assert(obj->is_oop_or_null(), "expected an oop or NULL");
|
assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||||
HeapWord* addr = (HeapWord*)obj;
|
HeapWord* addr = (HeapWord*)obj;
|
||||||
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
|
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
|
||||||
// Oop lies in _span and isn't yet grey or black
|
// Oop lies in _span and isn't yet grey or black
|
||||||
@ -7764,7 +7764,7 @@ void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
|
|||||||
|
|
||||||
void PushOrMarkClosure::do_oop(oop obj) {
|
void PushOrMarkClosure::do_oop(oop obj) {
|
||||||
// Ignore mark word because we are running concurrent with mutators.
|
// Ignore mark word because we are running concurrent with mutators.
|
||||||
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
|
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||||
HeapWord* addr = (HeapWord*)obj;
|
HeapWord* addr = (HeapWord*)obj;
|
||||||
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
|
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
|
||||||
// Oop lies in _span and isn't yet grey or black
|
// Oop lies in _span and isn't yet grey or black
|
||||||
@ -7802,7 +7802,7 @@ void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p)
|
|||||||
|
|
||||||
void Par_PushOrMarkClosure::do_oop(oop obj) {
|
void Par_PushOrMarkClosure::do_oop(oop obj) {
|
||||||
// Ignore mark word because we are running concurrent with mutators.
|
// Ignore mark word because we are running concurrent with mutators.
|
||||||
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
|
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||||
HeapWord* addr = (HeapWord*)obj;
|
HeapWord* addr = (HeapWord*)obj;
|
||||||
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
|
||||||
// Oop lies in _span and isn't yet grey or black
|
// Oop lies in _span and isn't yet grey or black
|
||||||
@ -7879,7 +7879,7 @@ void PushAndMarkClosure::do_oop(oop obj) {
|
|||||||
// path and may be at the end of the global overflow list (so
|
// path and may be at the end of the global overflow list (so
|
||||||
// the mark word may be NULL).
|
// the mark word may be NULL).
|
||||||
assert(obj->is_oop_or_null(true /* ignore mark word */),
|
assert(obj->is_oop_or_null(true /* ignore mark word */),
|
||||||
"expected an oop or NULL");
|
err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||||
HeapWord* addr = (HeapWord*)obj;
|
HeapWord* addr = (HeapWord*)obj;
|
||||||
// Check if oop points into the CMS generation
|
// Check if oop points into the CMS generation
|
||||||
// and is not marked
|
// and is not marked
|
||||||
@ -7959,7 +7959,7 @@ void Par_PushAndMarkClosure::do_oop(oop obj) {
|
|||||||
// the debugger, is_oop_or_null(false) may subsequently start
|
// the debugger, is_oop_or_null(false) may subsequently start
|
||||||
// to hold.
|
// to hold.
|
||||||
assert(obj->is_oop_or_null(true),
|
assert(obj->is_oop_or_null(true),
|
||||||
"expected an oop or NULL");
|
err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||||
HeapWord* addr = (HeapWord*)obj;
|
HeapWord* addr = (HeapWord*)obj;
|
||||||
// Check if oop points into the CMS generation
|
// Check if oop points into the CMS generation
|
||||||
// and is not marked
|
// and is not marked
|
||||||
|
@ -73,7 +73,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
|
|||||||
} else {
|
} else {
|
||||||
res = (PromotedObject*)(_next & next_mask);
|
res = (PromotedObject*)(_next & next_mask);
|
||||||
}
|
}
|
||||||
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Not an oop?");
|
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res))));
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
inline void setNext(PromotedObject* x) {
|
inline void setNext(PromotedObject* x) {
|
||||||
|
@ -107,7 +107,7 @@ void CollectionSetChooser::verify() {
|
|||||||
HeapRegion *curr = regions_at(index++);
|
HeapRegion *curr = regions_at(index++);
|
||||||
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
|
guarantee(curr != NULL, "Regions in _regions array cannot be NULL");
|
||||||
guarantee(!curr->is_young(), "should not be young!");
|
guarantee(!curr->is_young(), "should not be young!");
|
||||||
guarantee(!curr->isHumongous(), "should not be humongous!");
|
guarantee(!curr->is_humongous(), "should not be humongous!");
|
||||||
if (prev != NULL) {
|
if (prev != NULL) {
|
||||||
guarantee(order_regions(prev, curr) != 1,
|
guarantee(order_regions(prev, curr) != 1,
|
||||||
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
|
err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
|
||||||
@ -149,7 +149,7 @@ void CollectionSetChooser::sort_regions() {
|
|||||||
|
|
||||||
|
|
||||||
void CollectionSetChooser::add_region(HeapRegion* hr) {
|
void CollectionSetChooser::add_region(HeapRegion* hr) {
|
||||||
assert(!hr->isHumongous(),
|
assert(!hr->is_humongous(),
|
||||||
"Humongous regions shouldn't be added to the collection set");
|
"Humongous regions shouldn't be added to the collection set");
|
||||||
assert(!hr->is_young(), "should not be young!");
|
assert(!hr->is_young(), "should not be young!");
|
||||||
_regions.append(hr);
|
_regions.append(hr);
|
||||||
|
@ -109,7 +109,7 @@ public:
|
|||||||
bool should_add(HeapRegion* hr) {
|
bool should_add(HeapRegion* hr) {
|
||||||
assert(hr->is_marked(), "pre-condition");
|
assert(hr->is_marked(), "pre-condition");
|
||||||
assert(!hr->is_young(), "should never consider young regions");
|
assert(!hr->is_young(), "should never consider young regions");
|
||||||
return !hr->isHumongous() &&
|
return !hr->is_humongous() &&
|
||||||
hr->live_bytes() < _region_live_threshold_bytes;
|
hr->live_bytes() < _region_live_threshold_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -910,7 +910,7 @@ bool ConcurrentMark::nextMarkBitmapIsClear() {
|
|||||||
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
|
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
|
||||||
public:
|
public:
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
if (!r->continuesHumongous()) {
|
if (!r->is_continues_humongous()) {
|
||||||
r->note_start_of_marking();
|
r->note_start_of_marking();
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -1288,6 +1288,22 @@ void ConcurrentMark::markFromRoots() {
|
|||||||
print_stats();
|
print_stats();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper class to get rid of some boilerplate code.
|
||||||
|
class G1CMTraceTime : public GCTraceTime {
|
||||||
|
static bool doit_and_prepend(bool doit) {
|
||||||
|
if (doit) {
|
||||||
|
gclog_or_tty->put(' ');
|
||||||
|
}
|
||||||
|
return doit;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1CMTraceTime(const char* title, bool doit)
|
||||||
|
: GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
|
||||||
|
G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
||||||
// world is stopped at this checkpoint
|
// world is stopped at this checkpoint
|
||||||
assert(SafepointSynchronize::is_at_safepoint(),
|
assert(SafepointSynchronize::is_at_safepoint(),
|
||||||
@ -1341,9 +1357,13 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
|
|||||||
// marking due to overflowing the global mark stack.
|
// marking due to overflowing the global mark stack.
|
||||||
reset_marking_state();
|
reset_marking_state();
|
||||||
} else {
|
} else {
|
||||||
// Aggregate the per-task counting data that we have accumulated
|
{
|
||||||
// while marking.
|
G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
|
||||||
aggregate_count_data();
|
|
||||||
|
// Aggregate the per-task counting data that we have accumulated
|
||||||
|
// while marking.
|
||||||
|
aggregate_count_data();
|
||||||
|
}
|
||||||
|
|
||||||
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
||||||
// We're done with marking.
|
// We're done with marking.
|
||||||
@ -1398,10 +1418,10 @@ protected:
|
|||||||
// to 1 the bits on the region bitmap that correspond to its
|
// to 1 the bits on the region bitmap that correspond to its
|
||||||
// associated "continues humongous" regions.
|
// associated "continues humongous" regions.
|
||||||
void set_bit_for_region(HeapRegion* hr) {
|
void set_bit_for_region(HeapRegion* hr) {
|
||||||
assert(!hr->continuesHumongous(), "should have filtered those out");
|
assert(!hr->is_continues_humongous(), "should have filtered those out");
|
||||||
|
|
||||||
BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
|
BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
|
||||||
if (!hr->startsHumongous()) {
|
if (!hr->is_starts_humongous()) {
|
||||||
// Normal (non-humongous) case: just set the bit.
|
// Normal (non-humongous) case: just set the bit.
|
||||||
_region_bm->par_at_put(index, true);
|
_region_bm->par_at_put(index, true);
|
||||||
} else {
|
} else {
|
||||||
@ -1434,7 +1454,7 @@ public:
|
|||||||
|
|
||||||
bool doHeapRegion(HeapRegion* hr) {
|
bool doHeapRegion(HeapRegion* hr) {
|
||||||
|
|
||||||
if (hr->continuesHumongous()) {
|
if (hr->is_continues_humongous()) {
|
||||||
// We will ignore these here and process them when their
|
// We will ignore these here and process them when their
|
||||||
// associated "starts humongous" region is processed (see
|
// associated "starts humongous" region is processed (see
|
||||||
// set_bit_for_heap_region()). Note that we cannot rely on their
|
// set_bit_for_heap_region()). Note that we cannot rely on their
|
||||||
@ -1556,7 +1576,7 @@ public:
|
|||||||
int failures() const { return _failures; }
|
int failures() const { return _failures; }
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* hr) {
|
bool doHeapRegion(HeapRegion* hr) {
|
||||||
if (hr->continuesHumongous()) {
|
if (hr->is_continues_humongous()) {
|
||||||
// We will ignore these here and process them when their
|
// We will ignore these here and process them when their
|
||||||
// associated "starts humongous" region is processed (see
|
// associated "starts humongous" region is processed (see
|
||||||
// set_bit_for_heap_region()). Note that we cannot rely on their
|
// set_bit_for_heap_region()). Note that we cannot rely on their
|
||||||
@ -1731,7 +1751,7 @@ class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
|
|||||||
|
|
||||||
bool doHeapRegion(HeapRegion* hr) {
|
bool doHeapRegion(HeapRegion* hr) {
|
||||||
|
|
||||||
if (hr->continuesHumongous()) {
|
if (hr->is_continues_humongous()) {
|
||||||
// We will ignore these here and process them when their
|
// We will ignore these here and process them when their
|
||||||
// associated "starts humongous" region is processed (see
|
// associated "starts humongous" region is processed (see
|
||||||
// set_bit_for_heap_region()). Note that we cannot rely on their
|
// set_bit_for_heap_region()). Note that we cannot rely on their
|
||||||
@ -1861,7 +1881,7 @@ public:
|
|||||||
const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
|
const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; }
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion *hr) {
|
bool doHeapRegion(HeapRegion *hr) {
|
||||||
if (hr->continuesHumongous()) {
|
if (hr->is_continues_humongous()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// We use a claim value of zero here because all regions
|
// We use a claim value of zero here because all regions
|
||||||
@ -1875,8 +1895,8 @@ public:
|
|||||||
if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
|
if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
|
||||||
_freed_bytes += hr->used();
|
_freed_bytes += hr->used();
|
||||||
hr->set_containing_set(NULL);
|
hr->set_containing_set(NULL);
|
||||||
if (hr->isHumongous()) {
|
if (hr->is_humongous()) {
|
||||||
assert(hr->startsHumongous(), "we should only see starts humongous");
|
assert(hr->is_starts_humongous(), "we should only see starts humongous");
|
||||||
_humongous_regions_removed.increment(1u, hr->capacity());
|
_humongous_regions_removed.increment(1u, hr->capacity());
|
||||||
_g1->free_humongous_region(hr, _local_cleanup_list, true);
|
_g1->free_humongous_region(hr, _local_cleanup_list, true);
|
||||||
} else {
|
} else {
|
||||||
@ -2466,22 +2486,6 @@ void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool
|
|||||||
G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
|
G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper class to get rid of some boilerplate code.
|
|
||||||
class G1RemarkGCTraceTime : public GCTraceTime {
|
|
||||||
static bool doit_and_prepend(bool doit) {
|
|
||||||
if (doit) {
|
|
||||||
gclog_or_tty->put(' ');
|
|
||||||
}
|
|
||||||
return doit;
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
G1RemarkGCTraceTime(const char* title, bool doit)
|
|
||||||
: GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
|
|
||||||
G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||||
if (has_overflown()) {
|
if (has_overflown()) {
|
||||||
// Skip processing the discovered references if we have
|
// Skip processing the discovered references if we have
|
||||||
@ -2504,10 +2508,7 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
|||||||
// Inner scope to exclude the cleaning of the string and symbol
|
// Inner scope to exclude the cleaning of the string and symbol
|
||||||
// tables from the displayed time.
|
// tables from the displayed time.
|
||||||
{
|
{
|
||||||
if (G1Log::finer()) {
|
G1CMTraceTime t("GC ref-proc", G1Log::finer());
|
||||||
gclog_or_tty->put(' ');
|
|
||||||
}
|
|
||||||
GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id());
|
|
||||||
|
|
||||||
ReferenceProcessor* rp = g1h->ref_processor_cm();
|
ReferenceProcessor* rp = g1h->ref_processor_cm();
|
||||||
|
|
||||||
@ -2598,24 +2599,24 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
|||||||
|
|
||||||
// Unload Klasses, String, Symbols, Code Cache, etc.
|
// Unload Klasses, String, Symbols, Code Cache, etc.
|
||||||
{
|
{
|
||||||
G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
|
G1CMTraceTime trace("Unloading", G1Log::finer());
|
||||||
|
|
||||||
if (ClassUnloadingWithConcurrentMark) {
|
if (ClassUnloadingWithConcurrentMark) {
|
||||||
bool purged_classes;
|
bool purged_classes;
|
||||||
|
|
||||||
{
|
{
|
||||||
G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
|
G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
|
||||||
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
|
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
|
G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
|
||||||
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
|
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (G1StringDedup::is_enabled()) {
|
if (G1StringDedup::is_enabled()) {
|
||||||
G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
|
G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
|
||||||
G1StringDedup::unlink(&g1_is_alive);
|
G1StringDedup::unlink(&g1_is_alive);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2719,7 +2720,7 @@ void ConcurrentMark::checkpointRootsFinalWork() {
|
|||||||
HandleMark hm;
|
HandleMark hm;
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
|
|
||||||
G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
|
G1CMTraceTime trace("Finalize Marking", G1Log::finer());
|
||||||
|
|
||||||
g1h->ensure_parsability(false);
|
g1h->ensure_parsability(false);
|
||||||
|
|
||||||
@ -3191,7 +3192,7 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
|
|||||||
_cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
|
_cm_card_bm(cm_card_bm), _max_worker_id(max_worker_id) { }
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* hr) {
|
bool doHeapRegion(HeapRegion* hr) {
|
||||||
if (hr->continuesHumongous()) {
|
if (hr->is_continues_humongous()) {
|
||||||
// We will ignore these here and process them when their
|
// We will ignore these here and process them when their
|
||||||
// associated "starts humongous" region is processed.
|
// associated "starts humongous" region is processed.
|
||||||
// Note that we cannot rely on their associated
|
// Note that we cannot rely on their associated
|
||||||
@ -3334,6 +3335,7 @@ void ConcurrentMark::aggregate_count_data() {
|
|||||||
} else {
|
} else {
|
||||||
g1_par_agg_task.work(0);
|
g1_par_agg_task.work(0);
|
||||||
}
|
}
|
||||||
|
_g1h->allocation_context_stats().update_at_remark();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear the per-worker arrays used to store the per-region counting data
|
// Clear the per-worker arrays used to store the per-region counting data
|
||||||
@ -3562,7 +3564,7 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
|
|||||||
void CMTask::setup_for_region(HeapRegion* hr) {
|
void CMTask::setup_for_region(HeapRegion* hr) {
|
||||||
assert(hr != NULL,
|
assert(hr != NULL,
|
||||||
"claim_region() should have filtered out NULL regions");
|
"claim_region() should have filtered out NULL regions");
|
||||||
assert(!hr->continuesHumongous(),
|
assert(!hr->is_continues_humongous(),
|
||||||
"claim_region() should have filtered out continues humongous regions");
|
"claim_region() should have filtered out continues humongous regions");
|
||||||
|
|
||||||
if (_cm->verbose_low()) {
|
if (_cm->verbose_low()) {
|
||||||
@ -4287,7 +4289,7 @@ void CMTask::do_marking_step(double time_target_ms,
|
|||||||
HR_FORMAT_PARAMS(_curr_region));
|
HR_FORMAT_PARAMS(_curr_region));
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(!_curr_region->isHumongous() || mr.start() == _curr_region->bottom(),
|
assert(!_curr_region->is_humongous() || mr.start() == _curr_region->bottom(),
|
||||||
"humongous regions should go around loop once only");
|
"humongous regions should go around loop once only");
|
||||||
|
|
||||||
// Some special cases:
|
// Some special cases:
|
||||||
@ -4301,7 +4303,7 @@ void CMTask::do_marking_step(double time_target_ms,
|
|||||||
if (mr.is_empty()) {
|
if (mr.is_empty()) {
|
||||||
giveup_current_region();
|
giveup_current_region();
|
||||||
regular_clock_call();
|
regular_clock_call();
|
||||||
} else if (_curr_region->isHumongous() && mr.start() == _curr_region->bottom()) {
|
} else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
|
||||||
if (_nextMarkBitMap->isMarked(mr.start())) {
|
if (_nextMarkBitMap->isMarked(mr.start())) {
|
||||||
// The object is marked - apply the closure
|
// The object is marked - apply the closure
|
||||||
BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
|
BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
|
||||||
@ -4748,7 +4750,7 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
|
|||||||
size_t remset_bytes = r->rem_set()->mem_size();
|
size_t remset_bytes = r->rem_set()->mem_size();
|
||||||
size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
|
size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
|
||||||
|
|
||||||
if (r->startsHumongous()) {
|
if (r->is_starts_humongous()) {
|
||||||
assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
|
assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
|
||||||
_hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
|
_hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
|
||||||
"they should have been zeroed after the last time we used them");
|
"they should have been zeroed after the last time we used them");
|
||||||
@ -4760,7 +4762,7 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
|
|||||||
get_hum_bytes(&used_bytes, &capacity_bytes,
|
get_hum_bytes(&used_bytes, &capacity_bytes,
|
||||||
&prev_live_bytes, &next_live_bytes);
|
&prev_live_bytes, &next_live_bytes);
|
||||||
end = bottom + HeapRegion::GrainWords;
|
end = bottom + HeapRegion::GrainWords;
|
||||||
} else if (r->continuesHumongous()) {
|
} else if (r->is_continues_humongous()) {
|
||||||
get_hum_bytes(&used_bytes, &capacity_bytes,
|
get_hum_bytes(&used_bytes, &capacity_bytes,
|
||||||
&prev_live_bytes, &next_live_bytes);
|
&prev_live_bytes, &next_live_bytes);
|
||||||
assert(end == bottom + HeapRegion::GrainWords, "invariant");
|
assert(end == bottom + HeapRegion::GrainWords, "invariant");
|
||||||
|
@ -88,7 +88,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
|
|||||||
size_t region_size_bytes = mr.byte_size();
|
size_t region_size_bytes = mr.byte_size();
|
||||||
uint index = hr->hrm_index();
|
uint index = hr->hrm_index();
|
||||||
|
|
||||||
assert(!hr->continuesHumongous(), "should not be HC region");
|
assert(!hr->is_continues_humongous(), "should not be HC region");
|
||||||
assert(hr == g1h->heap_region_containing(start), "sanity");
|
assert(hr == g1h->heap_region_containing(start), "sanity");
|
||||||
assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
|
assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
|
||||||
assert(marked_bytes_array != NULL, "pre-condition");
|
assert(marked_bytes_array != NULL, "pre-condition");
|
||||||
@ -277,7 +277,7 @@ inline void CMTask::deal_with_reference(oop obj) {
|
|||||||
++_refs_reached;
|
++_refs_reached;
|
||||||
|
|
||||||
HeapWord* objAddr = (HeapWord*) obj;
|
HeapWord* objAddr = (HeapWord*) obj;
|
||||||
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
|
assert(obj->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
|
||||||
if (_g1h->is_in_g1_reserved(objAddr)) {
|
if (_g1h->is_in_g1_reserved(objAddr)) {
|
||||||
assert(obj != NULL, "null check is implicit");
|
assert(obj != NULL, "null check is implicit");
|
||||||
if (!_nextMarkBitMap->isMarked(objAddr)) {
|
if (!_nextMarkBitMap->isMarked(objAddr)) {
|
||||||
@ -366,7 +366,7 @@ inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
|
|||||||
assert(hr != NULL, "sanity");
|
assert(hr != NULL, "sanity");
|
||||||
// Given that we're looking for a region that contains an object
|
// Given that we're looking for a region that contains an object
|
||||||
// header it's impossible to get back a HC region.
|
// header it's impossible to get back a HC region.
|
||||||
assert(!hr->continuesHumongous(), "sanity");
|
assert(!hr->is_continues_humongous(), "sanity");
|
||||||
|
|
||||||
// We cannot assert that word_size == obj->size() given that obj
|
// We cannot assert that word_size == obj->size() given that obj
|
||||||
// might not be in a consistent state (another thread might be in
|
// might not be in a consistent state (another thread might be in
|
||||||
|
@ -129,8 +129,7 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
|
|||||||
// Note that we first perform the allocation and then we store the
|
// Note that we first perform the allocation and then we store the
|
||||||
// region in _alloc_region. This is the reason why an active region
|
// region in _alloc_region. This is the reason why an active region
|
||||||
// can never be empty.
|
// can never be empty.
|
||||||
_alloc_region = new_alloc_region;
|
update_alloc_region(new_alloc_region);
|
||||||
_count += 1;
|
|
||||||
trace("region allocation successful");
|
trace("region allocation successful");
|
||||||
return result;
|
return result;
|
||||||
} else {
|
} else {
|
||||||
@ -172,6 +171,19 @@ void G1AllocRegion::set(HeapRegion* alloc_region) {
|
|||||||
trace("set");
|
trace("set");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
|
||||||
|
trace("update");
|
||||||
|
// We explicitly check that the region is not empty to make sure we
|
||||||
|
// maintain the "the alloc region cannot be empty" invariant.
|
||||||
|
assert(alloc_region != NULL && !alloc_region->is_empty(),
|
||||||
|
ar_ext_msg(this, "pre-condition"));
|
||||||
|
|
||||||
|
_alloc_region = alloc_region;
|
||||||
|
_alloc_region->set_allocation_context(allocation_context());
|
||||||
|
_count += 1;
|
||||||
|
trace("updated");
|
||||||
|
}
|
||||||
|
|
||||||
HeapRegion* G1AllocRegion::release() {
|
HeapRegion* G1AllocRegion::release() {
|
||||||
trace("releasing");
|
trace("releasing");
|
||||||
HeapRegion* alloc_region = _alloc_region;
|
HeapRegion* alloc_region = _alloc_region;
|
||||||
@ -225,5 +237,70 @@ void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
|
|||||||
G1AllocRegion::G1AllocRegion(const char* name,
|
G1AllocRegion::G1AllocRegion(const char* name,
|
||||||
bool bot_updates)
|
bool bot_updates)
|
||||||
: _name(name), _bot_updates(bot_updates),
|
: _name(name), _bot_updates(bot_updates),
|
||||||
_alloc_region(NULL), _count(0), _used_bytes_before(0) { }
|
_alloc_region(NULL), _count(0), _used_bytes_before(0),
|
||||||
|
_allocation_context(AllocationContext::system()) { }
|
||||||
|
|
||||||
|
|
||||||
|
HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
|
||||||
|
bool force) {
|
||||||
|
return _g1h->new_mutator_alloc_region(word_size, force);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||||
|
size_t allocated_bytes) {
|
||||||
|
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
|
||||||
|
bool force) {
|
||||||
|
assert(!force, "not supported for GC alloc regions");
|
||||||
|
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||||
|
size_t allocated_bytes) {
|
||||||
|
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
|
||||||
|
GCAllocForSurvived);
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
|
||||||
|
bool force) {
|
||||||
|
assert(!force, "not supported for GC alloc regions");
|
||||||
|
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
|
||||||
|
}
|
||||||
|
|
||||||
|
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
|
||||||
|
size_t allocated_bytes) {
|
||||||
|
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
|
||||||
|
GCAllocForTenured);
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapRegion* OldGCAllocRegion::release() {
|
||||||
|
HeapRegion* cur = get();
|
||||||
|
if (cur != NULL) {
|
||||||
|
// Determine how far we are from the next card boundary. If it is smaller than
|
||||||
|
// the minimum object size we can allocate into, expand into the next card.
|
||||||
|
HeapWord* top = cur->top();
|
||||||
|
HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
|
||||||
|
|
||||||
|
size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
|
||||||
|
|
||||||
|
if (to_allocate_words != 0) {
|
||||||
|
// We are not at a card boundary. Fill up, possibly into the next, taking the
|
||||||
|
// end of the region and the minimum object size into account.
|
||||||
|
to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
|
||||||
|
MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
|
||||||
|
|
||||||
|
// Skip allocation if there is not enough space to allocate even the smallest
|
||||||
|
// possible object. In this case this region will not be retained, so the
|
||||||
|
// original problem cannot occur.
|
||||||
|
if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
|
||||||
|
HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
|
||||||
|
CollectedHeap::fill_with_object(dummy, to_allocate_words);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return G1AllocRegion::release();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -57,6 +57,9 @@ private:
|
|||||||
// correct use of init() and release()).
|
// correct use of init() and release()).
|
||||||
HeapRegion* volatile _alloc_region;
|
HeapRegion* volatile _alloc_region;
|
||||||
|
|
||||||
|
// Allocation context associated with this alloc region.
|
||||||
|
AllocationContext_t _allocation_context;
|
||||||
|
|
||||||
// It keeps track of the distinct number of regions that are used
|
// It keeps track of the distinct number of regions that are used
|
||||||
// for allocation in the active interval of this object, i.e.,
|
// for allocation in the active interval of this object, i.e.,
|
||||||
// between a call to init() and a call to release(). The count
|
// between a call to init() and a call to release(). The count
|
||||||
@ -110,6 +113,10 @@ private:
|
|||||||
// else can allocate out of it.
|
// else can allocate out of it.
|
||||||
void retire(bool fill_up);
|
void retire(bool fill_up);
|
||||||
|
|
||||||
|
// After a region is allocated by alloc_new_region, this
|
||||||
|
// method is used to set it as the active alloc_region
|
||||||
|
void update_alloc_region(HeapRegion* alloc_region);
|
||||||
|
|
||||||
// Allocate a new active region and use it to perform a word_size
|
// Allocate a new active region and use it to perform a word_size
|
||||||
// allocation. The force parameter will be passed on to
|
// allocation. The force parameter will be passed on to
|
||||||
// G1CollectedHeap::allocate_new_alloc_region() and tells it to try
|
// G1CollectedHeap::allocate_new_alloc_region() and tells it to try
|
||||||
@ -137,6 +144,9 @@ public:
|
|||||||
return (hr == _dummy_region) ? NULL : hr;
|
return (hr == _dummy_region) ? NULL : hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
|
||||||
|
AllocationContext_t allocation_context() { return _allocation_context; }
|
||||||
|
|
||||||
uint count() { return _count; }
|
uint count() { return _count; }
|
||||||
|
|
||||||
// The following two are the building blocks for the allocation method.
|
// The following two are the building blocks for the allocation method.
|
||||||
@ -182,6 +192,40 @@ public:
|
|||||||
#endif // G1_ALLOC_REGION_TRACING
|
#endif // G1_ALLOC_REGION_TRACING
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class MutatorAllocRegion : public G1AllocRegion {
|
||||||
|
protected:
|
||||||
|
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||||
|
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
||||||
|
public:
|
||||||
|
MutatorAllocRegion()
|
||||||
|
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
|
||||||
|
};
|
||||||
|
|
||||||
|
class SurvivorGCAllocRegion : public G1AllocRegion {
|
||||||
|
protected:
|
||||||
|
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||||
|
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
||||||
|
public:
|
||||||
|
SurvivorGCAllocRegion()
|
||||||
|
: G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
|
||||||
|
};
|
||||||
|
|
||||||
|
class OldGCAllocRegion : public G1AllocRegion {
|
||||||
|
protected:
|
||||||
|
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||||
|
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
||||||
|
public:
|
||||||
|
OldGCAllocRegion()
|
||||||
|
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
|
||||||
|
|
||||||
|
// This specialization of release() makes sure that the last card that has
|
||||||
|
// been allocated into has been completely filled by a dummy object. This
|
||||||
|
// avoids races when remembered set scanning wants to update the BOT of the
|
||||||
|
// last card in the retained old gc alloc region, and allocation threads
|
||||||
|
// allocating into that card at the same time.
|
||||||
|
virtual HeapRegion* release();
|
||||||
|
};
|
||||||
|
|
||||||
class ar_ext_msg : public err_msg {
|
class ar_ext_msg : public err_msg {
|
||||||
public:
|
public:
|
||||||
ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
|
ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
|
||||||
|
@ -0,0 +1,52 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
|
||||||
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
|
||||||
|
|
||||||
|
#include "memory/allocation.hpp"
|
||||||
|
|
||||||
|
typedef unsigned char AllocationContext_t;
|
||||||
|
|
||||||
|
class AllocationContext : AllStatic {
|
||||||
|
public:
|
||||||
|
// Currently used context
|
||||||
|
static AllocationContext_t current() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
// System wide default context
|
||||||
|
static AllocationContext_t system() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class AllocationContextStats: public StackObj {
|
||||||
|
public:
|
||||||
|
inline void clear() { }
|
||||||
|
inline void update(bool full_gc) { }
|
||||||
|
inline void update_at_remark() { }
|
||||||
|
inline bool available() { return false; }
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
|
155
hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp
Normal file
155
hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc_implementation/g1/g1Allocator.hpp"
|
||||||
|
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||||
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
|
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||||
|
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
|
||||||
|
|
||||||
|
void G1DefaultAllocator::init_mutator_alloc_region() {
|
||||||
|
assert(_mutator_alloc_region.get() == NULL, "pre-condition");
|
||||||
|
_mutator_alloc_region.init();
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1DefaultAllocator::release_mutator_alloc_region() {
|
||||||
|
_mutator_alloc_region.release();
|
||||||
|
assert(_mutator_alloc_region.get() == NULL, "post-condition");
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||||
|
OldGCAllocRegion* old,
|
||||||
|
HeapRegion** retained_old) {
|
||||||
|
HeapRegion* retained_region = *retained_old;
|
||||||
|
*retained_old = NULL;
|
||||||
|
|
||||||
|
// We will discard the current GC alloc region if:
|
||||||
|
// a) it's in the collection set (it can happen!),
|
||||||
|
// b) it's already full (no point in using it),
|
||||||
|
// c) it's empty (this means that it was emptied during
|
||||||
|
// a cleanup and it should be on the free list now), or
|
||||||
|
// d) it's humongous (this means that it was emptied
|
||||||
|
// during a cleanup and was added to the free list, but
|
||||||
|
// has been subsequently used to allocate a humongous
|
||||||
|
// object that may be less than the region size).
|
||||||
|
if (retained_region != NULL &&
|
||||||
|
!retained_region->in_collection_set() &&
|
||||||
|
!(retained_region->top() == retained_region->end()) &&
|
||||||
|
!retained_region->is_empty() &&
|
||||||
|
!retained_region->is_humongous()) {
|
||||||
|
retained_region->record_top_and_timestamp();
|
||||||
|
// The retained region was added to the old region set when it was
|
||||||
|
// retired. We have to remove it now, since we don't allow regions
|
||||||
|
// we allocate to in the region sets. We'll re-add it later, when
|
||||||
|
// it's retired again.
|
||||||
|
_g1h->_old_set.remove(retained_region);
|
||||||
|
bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
|
||||||
|
retained_region->note_start_of_copying(during_im);
|
||||||
|
old->set(retained_region);
|
||||||
|
_g1h->_hr_printer.reuse(retained_region);
|
||||||
|
evacuation_info.set_alloc_regions_used_before(retained_region->used());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
|
||||||
|
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||||
|
|
||||||
|
_survivor_gc_alloc_region.init();
|
||||||
|
_old_gc_alloc_region.init();
|
||||||
|
reuse_retained_old_region(evacuation_info,
|
||||||
|
&_old_gc_alloc_region,
|
||||||
|
&_retained_old_gc_alloc_region);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
|
||||||
|
AllocationContext_t context = AllocationContext::current();
|
||||||
|
evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
|
||||||
|
old_gc_alloc_region(context)->count());
|
||||||
|
survivor_gc_alloc_region(context)->release();
|
||||||
|
// If we have an old GC alloc region to release, we'll save it in
|
||||||
|
// _retained_old_gc_alloc_region. If we don't
|
||||||
|
// _retained_old_gc_alloc_region will become NULL. This is what we
|
||||||
|
// want either way so no reason to check explicitly for either
|
||||||
|
// condition.
|
||||||
|
_retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
|
||||||
|
|
||||||
|
if (ResizePLAB) {
|
||||||
|
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
||||||
|
_g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1DefaultAllocator::abandon_gc_alloc_regions() {
|
||||||
|
assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
|
||||||
|
assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
|
||||||
|
_retained_old_gc_alloc_region = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
|
||||||
|
ParGCAllocBuffer(gclab_word_size), _retired(true) { }
|
||||||
|
|
||||||
|
HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
|
||||||
|
HeapWord* obj = NULL;
|
||||||
|
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
|
||||||
|
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
||||||
|
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
|
||||||
|
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
||||||
|
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
|
||||||
|
|
||||||
|
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
|
||||||
|
if (buf == NULL) {
|
||||||
|
return NULL; // Let caller handle allocation failure.
|
||||||
|
}
|
||||||
|
// Otherwise.
|
||||||
|
alloc_buf->set_word_size(gclab_word_size);
|
||||||
|
alloc_buf->set_buf(buf);
|
||||||
|
|
||||||
|
obj = alloc_buf->allocate(word_sz);
|
||||||
|
assert(obj != NULL, "buffer was definitely big enough...");
|
||||||
|
} else {
|
||||||
|
obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
|
||||||
|
}
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
|
||||||
|
G1ParGCAllocator(g1h),
|
||||||
|
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
|
||||||
|
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
|
||||||
|
|
||||||
|
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
|
||||||
|
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1DefaultParGCAllocator::retire_alloc_buffers() {
|
||||||
|
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||||
|
size_t waste = _alloc_buffers[ap]->words_remaining();
|
||||||
|
add_to_alloc_buffer_waste(waste);
|
||||||
|
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
|
||||||
|
true /* end_of_gc */,
|
||||||
|
false /* retain */);
|
||||||
|
}
|
||||||
|
}
|
242
hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp
Normal file
242
hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp
Normal file
@ -0,0 +1,242 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
|
||||||
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/g1AllocationContext.hpp"
|
||||||
|
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||||
|
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
|
||||||
|
|
||||||
|
enum GCAllocPurpose {
|
||||||
|
GCAllocForTenured,
|
||||||
|
GCAllocForSurvived,
|
||||||
|
GCAllocPurposeCount
|
||||||
|
};
|
||||||
|
|
||||||
|
// Base class for G1 allocators.
|
||||||
|
class G1Allocator : public CHeapObj<mtGC> {
|
||||||
|
friend class VMStructs;
|
||||||
|
protected:
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
|
||||||
|
// Outside of GC pauses, the number of bytes used in all regions other
|
||||||
|
// than the current allocation region.
|
||||||
|
size_t _summary_bytes_used;
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1Allocator(G1CollectedHeap* heap) :
|
||||||
|
_g1h(heap), _summary_bytes_used(0) { }
|
||||||
|
|
||||||
|
static G1Allocator* create_allocator(G1CollectedHeap* g1h);
|
||||||
|
|
||||||
|
virtual void init_mutator_alloc_region() = 0;
|
||||||
|
virtual void release_mutator_alloc_region() = 0;
|
||||||
|
|
||||||
|
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
|
||||||
|
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
|
||||||
|
virtual void abandon_gc_alloc_regions() = 0;
|
||||||
|
|
||||||
|
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
|
||||||
|
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
|
||||||
|
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
|
||||||
|
virtual size_t used() = 0;
|
||||||
|
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
|
||||||
|
|
||||||
|
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||||
|
OldGCAllocRegion* old,
|
||||||
|
HeapRegion** retained);
|
||||||
|
|
||||||
|
size_t used_unlocked() const {
|
||||||
|
return _summary_bytes_used;
|
||||||
|
}
|
||||||
|
|
||||||
|
void increase_used(size_t bytes) {
|
||||||
|
_summary_bytes_used += bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
void decrease_used(size_t bytes) {
|
||||||
|
assert(_summary_bytes_used >= bytes,
|
||||||
|
err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
|
||||||
|
_summary_bytes_used, bytes));
|
||||||
|
_summary_bytes_used -= bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_used(size_t bytes) {
|
||||||
|
_summary_bytes_used = bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual HeapRegion* new_heap_region(uint hrs_index,
|
||||||
|
G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||||
|
MemRegion mr) {
|
||||||
|
return new HeapRegion(hrs_index, sharedOffsetArray, mr);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// The default allocator for G1.
|
||||||
|
class G1DefaultAllocator : public G1Allocator {
|
||||||
|
protected:
|
||||||
|
// Alloc region used to satisfy mutator allocation requests.
|
||||||
|
MutatorAllocRegion _mutator_alloc_region;
|
||||||
|
|
||||||
|
// Alloc region used to satisfy allocation requests by the GC for
|
||||||
|
// survivor objects.
|
||||||
|
SurvivorGCAllocRegion _survivor_gc_alloc_region;
|
||||||
|
|
||||||
|
// Alloc region used to satisfy allocation requests by the GC for
|
||||||
|
// old objects.
|
||||||
|
OldGCAllocRegion _old_gc_alloc_region;
|
||||||
|
|
||||||
|
HeapRegion* _retained_old_gc_alloc_region;
|
||||||
|
public:
|
||||||
|
G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
|
||||||
|
|
||||||
|
virtual void init_mutator_alloc_region();
|
||||||
|
virtual void release_mutator_alloc_region();
|
||||||
|
|
||||||
|
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||||
|
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
|
||||||
|
virtual void abandon_gc_alloc_regions();
|
||||||
|
|
||||||
|
virtual bool is_retained_old_region(HeapRegion* hr) {
|
||||||
|
return _retained_old_gc_alloc_region == hr;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
|
||||||
|
return &_mutator_alloc_region;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
|
||||||
|
return &_survivor_gc_alloc_region;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
|
||||||
|
return &_old_gc_alloc_region;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual size_t used() {
|
||||||
|
assert(Heap_lock->owner() != NULL,
|
||||||
|
"Should be owned on this thread's behalf.");
|
||||||
|
size_t result = _summary_bytes_used;
|
||||||
|
|
||||||
|
// Read only once in case it is set to NULL concurrently
|
||||||
|
HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
|
||||||
|
if (hr != NULL) {
|
||||||
|
result += hr->used();
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
|
||||||
|
private:
|
||||||
|
bool _retired;
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1ParGCAllocBuffer(size_t gclab_word_size);
|
||||||
|
virtual ~G1ParGCAllocBuffer() {
|
||||||
|
guarantee(_retired, "Allocation buffer has not been retired");
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void set_buf(HeapWord* buf) {
|
||||||
|
ParGCAllocBuffer::set_buf(buf);
|
||||||
|
_retired = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void retire(bool end_of_gc, bool retain) {
|
||||||
|
if (_retired) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ParGCAllocBuffer::retire(end_of_gc, retain);
|
||||||
|
_retired = true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1ParGCAllocator : public CHeapObj<mtGC> {
|
||||||
|
friend class G1ParScanThreadState;
|
||||||
|
protected:
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
|
||||||
|
size_t _alloc_buffer_waste;
|
||||||
|
size_t _undo_waste;
|
||||||
|
|
||||||
|
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
||||||
|
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
||||||
|
|
||||||
|
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
|
||||||
|
|
||||||
|
virtual void retire_alloc_buffers() = 0;
|
||||||
|
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1ParGCAllocator(G1CollectedHeap* g1h) :
|
||||||
|
_g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
|
||||||
|
}
|
||||||
|
|
||||||
|
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
|
||||||
|
|
||||||
|
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
|
||||||
|
size_t undo_waste() {return _undo_waste; }
|
||||||
|
|
||||||
|
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
|
||||||
|
HeapWord* obj = NULL;
|
||||||
|
if (purpose == GCAllocForSurvived) {
|
||||||
|
obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
|
||||||
|
} else {
|
||||||
|
obj = alloc_buffer(purpose, context)->allocate(word_sz);
|
||||||
|
}
|
||||||
|
if (obj != NULL) {
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
return allocate_slow(purpose, word_sz, context);
|
||||||
|
}
|
||||||
|
|
||||||
|
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
|
||||||
|
if (alloc_buffer(purpose, context)->contains(obj)) {
|
||||||
|
assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
|
||||||
|
"should contain whole object");
|
||||||
|
alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
|
||||||
|
} else {
|
||||||
|
CollectedHeap::fill_with_object(obj, word_sz);
|
||||||
|
add_to_undo_waste(word_sz);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1DefaultParGCAllocator : public G1ParGCAllocator {
|
||||||
|
G1ParGCAllocBuffer _surviving_alloc_buffer;
|
||||||
|
G1ParGCAllocBuffer _tenured_alloc_buffer;
|
||||||
|
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
|
||||||
|
|
||||||
|
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
|
||||||
|
return _alloc_buffers[purpose];
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void retire_alloc_buffers() ;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
|
@ -0,0 +1,35 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc_implementation/g1/g1Allocator.hpp"
|
||||||
|
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||||
|
|
||||||
|
G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
|
||||||
|
return new G1DefaultAllocator(g1h);
|
||||||
|
}
|
||||||
|
|
||||||
|
G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
|
||||||
|
return new G1DefaultParGCAllocator(g1h);
|
||||||
|
}
|
@ -469,7 +469,7 @@ bool G1CollectedHeap::is_in_partial_collection(const void* p) {
|
|||||||
// can move in an incremental collection.
|
// can move in an incremental collection.
|
||||||
bool G1CollectedHeap::is_scavengable(const void* p) {
|
bool G1CollectedHeap::is_scavengable(const void* p) {
|
||||||
HeapRegion* hr = heap_region_containing(p);
|
HeapRegion* hr = heap_region_containing(p);
|
||||||
return !hr->isHumongous();
|
return !hr->is_humongous();
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
||||||
@ -560,7 +560,7 @@ G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
|
HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
|
||||||
assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
|
assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
|
||||||
"the only time we use this to allocate a humongous region is "
|
"the only time we use this to allocate a humongous region is "
|
||||||
"when we are allocating a single humongous region");
|
"when we are allocating a single humongous region");
|
||||||
|
|
||||||
@ -615,9 +615,10 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_e
|
|||||||
HeapWord*
|
HeapWord*
|
||||||
G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
||||||
uint num_regions,
|
uint num_regions,
|
||||||
size_t word_size) {
|
size_t word_size,
|
||||||
|
AllocationContext_t context) {
|
||||||
assert(first != G1_NO_HRM_INDEX, "pre-condition");
|
assert(first != G1_NO_HRM_INDEX, "pre-condition");
|
||||||
assert(isHumongous(word_size), "word_size should be humongous");
|
assert(is_humongous(word_size), "word_size should be humongous");
|
||||||
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
|
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
|
||||||
|
|
||||||
// Index of last region in the series + 1.
|
// Index of last region in the series + 1.
|
||||||
@ -666,14 +667,15 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
|||||||
// will also update the BOT covering all the regions to reflect
|
// will also update the BOT covering all the regions to reflect
|
||||||
// that there is a single object that starts at the bottom of the
|
// that there is a single object that starts at the bottom of the
|
||||||
// first region.
|
// first region.
|
||||||
first_hr->set_startsHumongous(new_top, new_end);
|
first_hr->set_starts_humongous(new_top, new_end);
|
||||||
|
first_hr->set_allocation_context(context);
|
||||||
// Then, if there are any, we will set up the "continues
|
// Then, if there are any, we will set up the "continues
|
||||||
// humongous" regions.
|
// humongous" regions.
|
||||||
HeapRegion* hr = NULL;
|
HeapRegion* hr = NULL;
|
||||||
for (uint i = first + 1; i < last; ++i) {
|
for (uint i = first + 1; i < last; ++i) {
|
||||||
hr = region_at(i);
|
hr = region_at(i);
|
||||||
hr->set_continuesHumongous(first_hr);
|
hr->set_continues_humongous(first_hr);
|
||||||
|
hr->set_allocation_context(context);
|
||||||
}
|
}
|
||||||
// If we have "continues humongous" regions (hr != NULL), then the
|
// If we have "continues humongous" regions (hr != NULL), then the
|
||||||
// end of the last one should match new_end.
|
// end of the last one should match new_end.
|
||||||
@ -711,7 +713,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
|||||||
// G1. For example, the code that looks for a consecutive number
|
// G1. For example, the code that looks for a consecutive number
|
||||||
// of empty regions will consider them empty and try to
|
// of empty regions will consider them empty and try to
|
||||||
// re-allocate them. We can extend is_empty() to also include
|
// re-allocate them. We can extend is_empty() to also include
|
||||||
// !continuesHumongous(), but it is easier to just update the top
|
// !is_continues_humongous(), but it is easier to just update the top
|
||||||
// fields here. The way we set top for all regions (i.e., top ==
|
// fields here. The way we set top for all regions (i.e., top ==
|
||||||
// end for all regions but the last one, top == new_top for the
|
// end for all regions but the last one, top == new_top for the
|
||||||
// last one) is actually used when we will free up the humongous
|
// last one) is actually used when we will free up the humongous
|
||||||
@ -740,7 +742,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
|||||||
check_bitmaps("Humongous Region Allocation", first_hr);
|
check_bitmaps("Humongous Region Allocation", first_hr);
|
||||||
|
|
||||||
assert(first_hr->used() == word_size * HeapWordSize, "invariant");
|
assert(first_hr->used() == word_size * HeapWordSize, "invariant");
|
||||||
_summary_bytes_used += first_hr->used();
|
_allocator->increase_used(first_hr->used());
|
||||||
_humongous_set.add(first_hr);
|
_humongous_set.add(first_hr);
|
||||||
|
|
||||||
return new_obj;
|
return new_obj;
|
||||||
@ -749,7 +751,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
|
|||||||
// If could fit into free regions w/o expansion, try.
|
// If could fit into free regions w/o expansion, try.
|
||||||
// Otherwise, if can expand, do so.
|
// Otherwise, if can expand, do so.
|
||||||
// Otherwise, if using ex regions might help, try with ex given back.
|
// Otherwise, if using ex regions might help, try with ex given back.
|
||||||
HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
|
||||||
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
|
||||||
|
|
||||||
verify_region_sets_optional();
|
verify_region_sets_optional();
|
||||||
@ -818,7 +820,8 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
|||||||
|
|
||||||
HeapWord* result = NULL;
|
HeapWord* result = NULL;
|
||||||
if (first != G1_NO_HRM_INDEX) {
|
if (first != G1_NO_HRM_INDEX) {
|
||||||
result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
|
result = humongous_obj_allocate_initialize_regions(first, obj_regions,
|
||||||
|
word_size, context);
|
||||||
assert(result != NULL, "it should always return a valid result");
|
assert(result != NULL, "it should always return a valid result");
|
||||||
|
|
||||||
// A successful humongous object allocation changes the used space
|
// A successful humongous object allocation changes the used space
|
||||||
@ -834,7 +837,7 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
|
|||||||
|
|
||||||
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
|
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
|
||||||
assert_heap_not_locked_and_not_at_safepoint();
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
assert(!isHumongous(word_size), "we do not allow humongous TLABs");
|
assert(!is_humongous(word_size), "we do not allow humongous TLABs");
|
||||||
|
|
||||||
unsigned int dummy_gc_count_before;
|
unsigned int dummy_gc_count_before;
|
||||||
int dummy_gclocker_retry_count = 0;
|
int dummy_gclocker_retry_count = 0;
|
||||||
@ -851,7 +854,7 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
|||||||
unsigned int gc_count_before;
|
unsigned int gc_count_before;
|
||||||
|
|
||||||
HeapWord* result = NULL;
|
HeapWord* result = NULL;
|
||||||
if (!isHumongous(word_size)) {
|
if (!is_humongous(word_size)) {
|
||||||
result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
|
result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
|
||||||
} else {
|
} else {
|
||||||
result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
|
result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
|
||||||
@ -862,6 +865,8 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
|||||||
|
|
||||||
// Create the garbage collection operation...
|
// Create the garbage collection operation...
|
||||||
VM_G1CollectForAllocation op(gc_count_before, word_size);
|
VM_G1CollectForAllocation op(gc_count_before, word_size);
|
||||||
|
op.set_allocation_context(AllocationContext::current());
|
||||||
|
|
||||||
// ...and get the VM thread to execute it.
|
// ...and get the VM thread to execute it.
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
|
|
||||||
@ -870,7 +875,7 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
|||||||
// if it is NULL. If the allocation attempt failed immediately
|
// if it is NULL. If the allocation attempt failed immediately
|
||||||
// after a Full GC, it's unlikely we'll be able to allocate now.
|
// after a Full GC, it's unlikely we'll be able to allocate now.
|
||||||
HeapWord* result = op.result();
|
HeapWord* result = op.result();
|
||||||
if (result != NULL && !isHumongous(word_size)) {
|
if (result != NULL && !is_humongous(word_size)) {
|
||||||
// Allocations that take place on VM operations do not do any
|
// Allocations that take place on VM operations do not do any
|
||||||
// card dirtying and we have to do it here. We only have to do
|
// card dirtying and we have to do it here. We only have to do
|
||||||
// this for non-humongous allocations, though.
|
// this for non-humongous allocations, though.
|
||||||
@ -897,12 +902,13 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||||
unsigned int *gc_count_before_ret,
|
AllocationContext_t context,
|
||||||
int* gclocker_retry_count_ret) {
|
unsigned int *gc_count_before_ret,
|
||||||
|
int* gclocker_retry_count_ret) {
|
||||||
// Make sure you read the note in attempt_allocation_humongous().
|
// Make sure you read the note in attempt_allocation_humongous().
|
||||||
|
|
||||||
assert_heap_not_locked_and_not_at_safepoint();
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
|
assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
|
||||||
"be called for humongous allocation requests");
|
"be called for humongous allocation requests");
|
||||||
|
|
||||||
// We should only get here after the first-level allocation attempt
|
// We should only get here after the first-level allocation attempt
|
||||||
@ -919,23 +925,22 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
|||||||
|
|
||||||
{
|
{
|
||||||
MutexLockerEx x(Heap_lock);
|
MutexLockerEx x(Heap_lock);
|
||||||
|
result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||||
result = _mutator_alloc_region.attempt_allocation_locked(word_size,
|
false /* bot_updates */);
|
||||||
false /* bot_updates */);
|
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we reach here, attempt_allocation_locked() above failed to
|
// If we reach here, attempt_allocation_locked() above failed to
|
||||||
// allocate a new region. So the mutator alloc region should be NULL.
|
// allocate a new region. So the mutator alloc region should be NULL.
|
||||||
assert(_mutator_alloc_region.get() == NULL, "only way to get here");
|
assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
|
||||||
|
|
||||||
if (GC_locker::is_active_and_needs_gc()) {
|
if (GC_locker::is_active_and_needs_gc()) {
|
||||||
if (g1_policy()->can_expand_young_list()) {
|
if (g1_policy()->can_expand_young_list()) {
|
||||||
// No need for an ergo verbose message here,
|
// No need for an ergo verbose message here,
|
||||||
// can_expand_young_list() does this when it returns true.
|
// can_expand_young_list() does this when it returns true.
|
||||||
result = _mutator_alloc_region.attempt_allocation_force(word_size,
|
result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
|
||||||
false /* bot_updates */);
|
false /* bot_updates */);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -995,8 +1000,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
|||||||
// first attempt (without holding the Heap_lock) here and the
|
// first attempt (without holding the Heap_lock) here and the
|
||||||
// follow-on attempt will be at the start of the next loop
|
// follow-on attempt will be at the start of the next loop
|
||||||
// iteration (after taking the Heap_lock).
|
// iteration (after taking the Heap_lock).
|
||||||
result = _mutator_alloc_region.attempt_allocation(word_size,
|
result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
|
||||||
false /* bot_updates */);
|
false /* bot_updates */);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -1014,8 +1019,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||||
unsigned int * gc_count_before_ret,
|
unsigned int * gc_count_before_ret,
|
||||||
int* gclocker_retry_count_ret) {
|
int* gclocker_retry_count_ret) {
|
||||||
// The structure of this method has a lot of similarities to
|
// The structure of this method has a lot of similarities to
|
||||||
// attempt_allocation_slow(). The reason these two were not merged
|
// attempt_allocation_slow(). The reason these two were not merged
|
||||||
// into a single one is that such a method would require several "if
|
// into a single one is that such a method would require several "if
|
||||||
@ -1028,7 +1033,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
|||||||
// much as possible.
|
// much as possible.
|
||||||
|
|
||||||
assert_heap_not_locked_and_not_at_safepoint();
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
assert(isHumongous(word_size), "attempt_allocation_humongous() "
|
assert(is_humongous(word_size), "attempt_allocation_humongous() "
|
||||||
"should only be called for humongous allocations");
|
"should only be called for humongous allocations");
|
||||||
|
|
||||||
// Humongous objects can exhaust the heap quickly, so we should check if we
|
// Humongous objects can exhaust the heap quickly, so we should check if we
|
||||||
@ -1056,7 +1061,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
|||||||
// Given that humongous objects are not allocated in young
|
// Given that humongous objects are not allocated in young
|
||||||
// regions, we'll first try to do the allocation without doing a
|
// regions, we'll first try to do the allocation without doing a
|
||||||
// collection hoping that there's enough space in the heap.
|
// collection hoping that there's enough space in the heap.
|
||||||
result = humongous_obj_allocate(word_size);
|
result = humongous_obj_allocate(word_size, AllocationContext::current());
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -1132,17 +1137,18 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
||||||
bool expect_null_mutator_alloc_region) {
|
AllocationContext_t context,
|
||||||
|
bool expect_null_mutator_alloc_region) {
|
||||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||||
assert(_mutator_alloc_region.get() == NULL ||
|
assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
|
||||||
!expect_null_mutator_alloc_region,
|
!expect_null_mutator_alloc_region,
|
||||||
"the current alloc region was unexpectedly found to be non-NULL");
|
"the current alloc region was unexpectedly found to be non-NULL");
|
||||||
|
|
||||||
if (!isHumongous(word_size)) {
|
if (!is_humongous(word_size)) {
|
||||||
return _mutator_alloc_region.attempt_allocation_locked(word_size,
|
return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||||
false /* bot_updates */);
|
false /* bot_updates */);
|
||||||
} else {
|
} else {
|
||||||
HeapWord* result = humongous_obj_allocate(word_size);
|
HeapWord* result = humongous_obj_allocate(word_size, context);
|
||||||
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
|
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
|
||||||
g1_policy()->set_initiate_conc_mark_if_possible();
|
g1_policy()->set_initiate_conc_mark_if_possible();
|
||||||
}
|
}
|
||||||
@ -1162,7 +1168,7 @@ public:
|
|||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
HeapRegionRemSet* hrrs = r->rem_set();
|
HeapRegionRemSet* hrrs = r->rem_set();
|
||||||
|
|
||||||
if (r->continuesHumongous()) {
|
if (r->is_continues_humongous()) {
|
||||||
// We'll assert that the strong code root list and RSet is empty
|
// We'll assert that the strong code root list and RSet is empty
|
||||||
assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
|
assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
|
||||||
assert(hrrs->occupied() == 0, "RSet should be empty");
|
assert(hrrs->occupied() == 0, "RSet should be empty");
|
||||||
@ -1199,7 +1205,7 @@ public:
|
|||||||
{ }
|
{ }
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
if (!r->continuesHumongous()) {
|
if (!r->is_continues_humongous()) {
|
||||||
_cl.set_from(r);
|
_cl.set_from(r);
|
||||||
r->oop_iterate(&_cl);
|
r->oop_iterate(&_cl);
|
||||||
}
|
}
|
||||||
@ -1231,14 +1237,14 @@ public:
|
|||||||
assert(!hr->is_young(), "not expecting to find young regions");
|
assert(!hr->is_young(), "not expecting to find young regions");
|
||||||
if (hr->is_free()) {
|
if (hr->is_free()) {
|
||||||
// We only generate output for non-empty regions.
|
// We only generate output for non-empty regions.
|
||||||
} else if (hr->startsHumongous()) {
|
} else if (hr->is_starts_humongous()) {
|
||||||
if (hr->region_num() == 1) {
|
if (hr->region_num() == 1) {
|
||||||
// single humongous region
|
// single humongous region
|
||||||
_hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
|
_hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
|
||||||
} else {
|
} else {
|
||||||
_hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
|
_hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
|
||||||
}
|
}
|
||||||
} else if (hr->continuesHumongous()) {
|
} else if (hr->is_continues_humongous()) {
|
||||||
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
|
_hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
|
||||||
} else if (hr->is_old()) {
|
} else if (hr->is_old()) {
|
||||||
_hr_printer->post_compaction(hr, G1HRPrinter::Old);
|
_hr_printer->post_compaction(hr, G1HRPrinter::Old);
|
||||||
@ -1342,8 +1348,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
concurrent_mark()->abort();
|
concurrent_mark()->abort();
|
||||||
|
|
||||||
// Make sure we'll choose a new allocation region afterwards.
|
// Make sure we'll choose a new allocation region afterwards.
|
||||||
release_mutator_alloc_region();
|
_allocator->release_mutator_alloc_region();
|
||||||
abandon_gc_alloc_regions();
|
_allocator->abandon_gc_alloc_regions();
|
||||||
g1_rem_set()->cleanupHRRS();
|
g1_rem_set()->cleanupHRRS();
|
||||||
|
|
||||||
// We should call this after we retire any currently active alloc
|
// We should call this after we retire any currently active alloc
|
||||||
@ -1515,7 +1521,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
|
|
||||||
clear_cset_fast_test();
|
clear_cset_fast_test();
|
||||||
|
|
||||||
init_mutator_alloc_region();
|
_allocator->init_mutator_alloc_region();
|
||||||
|
|
||||||
double end = os::elapsedTime();
|
double end = os::elapsedTime();
|
||||||
g1_policy()->record_full_collection_end();
|
g1_policy()->record_full_collection_end();
|
||||||
@ -1651,6 +1657,7 @@ resize_if_necessary_after_full_collection(size_t word_size) {
|
|||||||
|
|
||||||
HeapWord*
|
HeapWord*
|
||||||
G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||||
|
AllocationContext_t context,
|
||||||
bool* succeeded) {
|
bool* succeeded) {
|
||||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||||
|
|
||||||
@ -1658,7 +1665,8 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
|||||||
// Let's attempt the allocation first.
|
// Let's attempt the allocation first.
|
||||||
HeapWord* result =
|
HeapWord* result =
|
||||||
attempt_allocation_at_safepoint(word_size,
|
attempt_allocation_at_safepoint(word_size,
|
||||||
false /* expect_null_mutator_alloc_region */);
|
context,
|
||||||
|
false /* expect_null_mutator_alloc_region */);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert(*succeeded, "sanity");
|
assert(*succeeded, "sanity");
|
||||||
return result;
|
return result;
|
||||||
@ -1668,7 +1676,7 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
|||||||
// incremental pauses. Therefore, at least for now, we'll favor
|
// incremental pauses. Therefore, at least for now, we'll favor
|
||||||
// expansion over collection. (This might change in the future if we can
|
// expansion over collection. (This might change in the future if we can
|
||||||
// do something smarter than full collection to satisfy a failed alloc.)
|
// do something smarter than full collection to satisfy a failed alloc.)
|
||||||
result = expand_and_allocate(word_size);
|
result = expand_and_allocate(word_size, context);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert(*succeeded, "sanity");
|
assert(*succeeded, "sanity");
|
||||||
return result;
|
return result;
|
||||||
@ -1685,7 +1693,8 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
|||||||
|
|
||||||
// Retry the allocation
|
// Retry the allocation
|
||||||
result = attempt_allocation_at_safepoint(word_size,
|
result = attempt_allocation_at_safepoint(word_size,
|
||||||
true /* expect_null_mutator_alloc_region */);
|
context,
|
||||||
|
true /* expect_null_mutator_alloc_region */);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert(*succeeded, "sanity");
|
assert(*succeeded, "sanity");
|
||||||
return result;
|
return result;
|
||||||
@ -1702,7 +1711,8 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
|||||||
|
|
||||||
// Retry the allocation once more
|
// Retry the allocation once more
|
||||||
result = attempt_allocation_at_safepoint(word_size,
|
result = attempt_allocation_at_safepoint(word_size,
|
||||||
true /* expect_null_mutator_alloc_region */);
|
context,
|
||||||
|
true /* expect_null_mutator_alloc_region */);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert(*succeeded, "sanity");
|
assert(*succeeded, "sanity");
|
||||||
return result;
|
return result;
|
||||||
@ -1724,7 +1734,7 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
|||||||
// successful, perform the allocation and return the address of the
|
// successful, perform the allocation and return the address of the
|
||||||
// allocated block, or else "NULL".
|
// allocated block, or else "NULL".
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
|
HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
|
||||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||||
|
|
||||||
verify_region_sets_optional();
|
verify_region_sets_optional();
|
||||||
@ -1739,7 +1749,8 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
|
|||||||
_hrm.verify_optional();
|
_hrm.verify_optional();
|
||||||
verify_region_sets_optional();
|
verify_region_sets_optional();
|
||||||
return attempt_allocation_at_safepoint(word_size,
|
return attempt_allocation_at_safepoint(word_size,
|
||||||
false /* expect_null_mutator_alloc_region */);
|
context,
|
||||||
|
false /* expect_null_mutator_alloc_region */);
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -1816,7 +1827,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
|
|||||||
// We should only reach here at the end of a Full GC which means we
|
// We should only reach here at the end of a Full GC which means we
|
||||||
// should not not be holding to any GC alloc regions. The method
|
// should not not be holding to any GC alloc regions. The method
|
||||||
// below will make sure of that and do any remaining clean up.
|
// below will make sure of that and do any remaining clean up.
|
||||||
abandon_gc_alloc_regions();
|
_allocator->abandon_gc_alloc_regions();
|
||||||
|
|
||||||
// Instead of tearing down / rebuilding the free lists here, we
|
// Instead of tearing down / rebuilding the free lists here, we
|
||||||
// could instead use the remove_all_pending() method on free_list to
|
// could instead use the remove_all_pending() method on free_list to
|
||||||
@ -1849,7 +1860,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
|||||||
_bot_shared(NULL),
|
_bot_shared(NULL),
|
||||||
_evac_failure_scan_stack(NULL),
|
_evac_failure_scan_stack(NULL),
|
||||||
_mark_in_progress(false),
|
_mark_in_progress(false),
|
||||||
_cg1r(NULL), _summary_bytes_used(0),
|
_cg1r(NULL),
|
||||||
_g1mm(NULL),
|
_g1mm(NULL),
|
||||||
_refine_cte_cl(NULL),
|
_refine_cte_cl(NULL),
|
||||||
_full_collection(false),
|
_full_collection(false),
|
||||||
@ -1861,7 +1872,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
|||||||
_free_regions_coming(false),
|
_free_regions_coming(false),
|
||||||
_young_list(new YoungList(this)),
|
_young_list(new YoungList(this)),
|
||||||
_gc_time_stamp(0),
|
_gc_time_stamp(0),
|
||||||
_retained_old_gc_alloc_region(NULL),
|
|
||||||
_survivor_plab_stats(YoungPLABSize, PLABWeight),
|
_survivor_plab_stats(YoungPLABSize, PLABWeight),
|
||||||
_old_plab_stats(OldPLABSize, PLABWeight),
|
_old_plab_stats(OldPLABSize, PLABWeight),
|
||||||
_expand_heap_after_alloc_failure(true),
|
_expand_heap_after_alloc_failure(true),
|
||||||
@ -1884,6 +1894,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
|||||||
vm_exit_during_initialization("Failed necessary allocation.");
|
vm_exit_during_initialization("Failed necessary allocation.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_allocator = G1Allocator::create_allocator(_g1h);
|
||||||
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
|
_humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
|
||||||
|
|
||||||
int n_queues = MAX2((int)ParallelGCThreads, 1);
|
int n_queues = MAX2((int)ParallelGCThreads, 1);
|
||||||
@ -1960,15 +1971,10 @@ jint G1CollectedHeap::initialize() {
|
|||||||
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
|
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
|
||||||
heap_alignment);
|
heap_alignment);
|
||||||
|
|
||||||
// It is important to do this in a way such that concurrent readers can't
|
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||||
// temporarily think something is in the heap. (I've actually seen this
|
|
||||||
// happen in asserts: DLD.)
|
|
||||||
_reserved.set_word_size(0);
|
|
||||||
_reserved.set_start((HeapWord*)heap_rs.base());
|
|
||||||
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
|
|
||||||
|
|
||||||
// Create the gen rem set (and barrier set) for the entire reserved region.
|
// Create the gen rem set (and barrier set) for the entire reserved region.
|
||||||
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
|
_rem_set = collector_policy()->create_rem_set(reserved_region(), 2);
|
||||||
set_barrier_set(rem_set()->bs());
|
set_barrier_set(rem_set()->bs());
|
||||||
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
|
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
|
||||||
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
|
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
|
||||||
@ -2052,7 +2058,7 @@ jint G1CollectedHeap::initialize() {
|
|||||||
|
|
||||||
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
|
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
|
||||||
|
|
||||||
_bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
|
_bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
|
||||||
|
|
||||||
_g1h = this;
|
_g1h = this;
|
||||||
|
|
||||||
@ -2127,7 +2133,7 @@ jint G1CollectedHeap::initialize() {
|
|||||||
dummy_region->set_top(dummy_region->end());
|
dummy_region->set_top(dummy_region->end());
|
||||||
G1AllocRegion::setup(this, dummy_region);
|
G1AllocRegion::setup(this, dummy_region);
|
||||||
|
|
||||||
init_mutator_alloc_region();
|
_allocator->init_mutator_alloc_region();
|
||||||
|
|
||||||
// Do create of the monitoring and management support so that
|
// Do create of the monitoring and management support so that
|
||||||
// values in the heap have been properly initialized.
|
// values in the heap have been properly initialized.
|
||||||
@ -2237,14 +2243,14 @@ size_t G1CollectedHeap::capacity() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
|
void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
|
||||||
assert(!hr->continuesHumongous(), "pre-condition");
|
assert(!hr->is_continues_humongous(), "pre-condition");
|
||||||
hr->reset_gc_time_stamp();
|
hr->reset_gc_time_stamp();
|
||||||
if (hr->startsHumongous()) {
|
if (hr->is_starts_humongous()) {
|
||||||
uint first_index = hr->hrm_index() + 1;
|
uint first_index = hr->hrm_index() + 1;
|
||||||
uint last_index = hr->last_hc_index();
|
uint last_index = hr->last_hc_index();
|
||||||
for (uint i = first_index; i < last_index; i += 1) {
|
for (uint i = first_index; i < last_index; i += 1) {
|
||||||
HeapRegion* chr = region_at(i);
|
HeapRegion* chr = region_at(i);
|
||||||
assert(chr->continuesHumongous(), "sanity");
|
assert(chr->is_continues_humongous(), "sanity");
|
||||||
chr->reset_gc_time_stamp();
|
chr->reset_gc_time_stamp();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2301,21 +2307,12 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
|
|||||||
|
|
||||||
|
|
||||||
// Computes the sum of the storage used by the various regions.
|
// Computes the sum of the storage used by the various regions.
|
||||||
|
|
||||||
size_t G1CollectedHeap::used() const {
|
size_t G1CollectedHeap::used() const {
|
||||||
assert(Heap_lock->owner() != NULL,
|
return _allocator->used();
|
||||||
"Should be owned on this thread's behalf.");
|
|
||||||
size_t result = _summary_bytes_used;
|
|
||||||
// Read only once in case it is set to NULL concurrently
|
|
||||||
HeapRegion* hr = _mutator_alloc_region.get();
|
|
||||||
if (hr != NULL)
|
|
||||||
result += hr->used();
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1CollectedHeap::used_unlocked() const {
|
size_t G1CollectedHeap::used_unlocked() const {
|
||||||
size_t result = _summary_bytes_used;
|
return _allocator->used_unlocked();
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
class SumUsedClosure: public HeapRegionClosure {
|
class SumUsedClosure: public HeapRegionClosure {
|
||||||
@ -2323,7 +2320,7 @@ class SumUsedClosure: public HeapRegionClosure {
|
|||||||
public:
|
public:
|
||||||
SumUsedClosure() : _used(0) {}
|
SumUsedClosure() : _used(0) {}
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
if (!r->continuesHumongous()) {
|
if (!r->is_continues_humongous()) {
|
||||||
_used += r->used();
|
_used += r->used();
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -2355,11 +2352,12 @@ void G1CollectedHeap::allocate_dummy_regions() {
|
|||||||
// Let's fill up most of the region
|
// Let's fill up most of the region
|
||||||
size_t word_size = HeapRegion::GrainWords - 1024;
|
size_t word_size = HeapRegion::GrainWords - 1024;
|
||||||
// And as a result the region we'll allocate will be humongous.
|
// And as a result the region we'll allocate will be humongous.
|
||||||
guarantee(isHumongous(word_size), "sanity");
|
guarantee(is_humongous(word_size), "sanity");
|
||||||
|
|
||||||
for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
|
for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
|
||||||
// Let's use the existing mechanism for the allocation
|
// Let's use the existing mechanism for the allocation
|
||||||
HeapWord* dummy_obj = humongous_obj_allocate(word_size);
|
HeapWord* dummy_obj = humongous_obj_allocate(word_size,
|
||||||
|
AllocationContext::system());
|
||||||
if (dummy_obj != NULL) {
|
if (dummy_obj != NULL) {
|
||||||
MemRegion mr(dummy_obj, word_size);
|
MemRegion mr(dummy_obj, word_size);
|
||||||
CollectedHeap::fill_with_object(mr);
|
CollectedHeap::fill_with_object(mr);
|
||||||
@ -2510,6 +2508,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
|||||||
true, /* should_initiate_conc_mark */
|
true, /* should_initiate_conc_mark */
|
||||||
g1_policy()->max_pause_time_ms(),
|
g1_policy()->max_pause_time_ms(),
|
||||||
cause);
|
cause);
|
||||||
|
op.set_allocation_context(AllocationContext::current());
|
||||||
|
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
if (!op.pause_succeeded()) {
|
if (!op.pause_succeeded()) {
|
||||||
@ -2581,7 +2580,7 @@ class IterateOopClosureRegionClosure: public HeapRegionClosure {
|
|||||||
public:
|
public:
|
||||||
IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
|
IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
if (!r->continuesHumongous()) {
|
if (!r->is_continues_humongous()) {
|
||||||
r->oop_iterate(_cl);
|
r->oop_iterate(_cl);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -2600,7 +2599,7 @@ class IterateObjectClosureRegionClosure: public HeapRegionClosure {
|
|||||||
public:
|
public:
|
||||||
IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
|
IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
if (! r->continuesHumongous()) {
|
if (!r->is_continues_humongous()) {
|
||||||
r->object_iterate(_cl);
|
r->object_iterate(_cl);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -2682,11 +2681,11 @@ public:
|
|||||||
r->claim_value(), _claim_value);
|
r->claim_value(), _claim_value);
|
||||||
++_failures;
|
++_failures;
|
||||||
}
|
}
|
||||||
if (!r->isHumongous()) {
|
if (!r->is_humongous()) {
|
||||||
_sh_region = NULL;
|
_sh_region = NULL;
|
||||||
} else if (r->startsHumongous()) {
|
} else if (r->is_starts_humongous()) {
|
||||||
_sh_region = r;
|
_sh_region = r;
|
||||||
} else if (r->continuesHumongous()) {
|
} else if (r->is_continues_humongous()) {
|
||||||
if (r->humongous_start_region() != _sh_region) {
|
if (r->humongous_start_region() != _sh_region) {
|
||||||
gclog_or_tty->print_cr("Region " HR_FORMAT ", "
|
gclog_or_tty->print_cr("Region " HR_FORMAT ", "
|
||||||
"HS = "PTR_FORMAT", should be "PTR_FORMAT,
|
"HS = "PTR_FORMAT", should be "PTR_FORMAT,
|
||||||
@ -2720,7 +2719,7 @@ public:
|
|||||||
|
|
||||||
bool doHeapRegion(HeapRegion* hr) {
|
bool doHeapRegion(HeapRegion* hr) {
|
||||||
assert(hr->in_collection_set(), "how?");
|
assert(hr->in_collection_set(), "how?");
|
||||||
assert(!hr->isHumongous(), "H-region in CSet");
|
assert(!hr->is_humongous(), "H-region in CSet");
|
||||||
if (hr->claim_value() != _claim_value) {
|
if (hr->claim_value() != _claim_value) {
|
||||||
gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
|
gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
|
||||||
"claim value = %d, should be %d",
|
"claim value = %d, should be %d",
|
||||||
@ -2859,7 +2858,7 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
|
|||||||
|
|
||||||
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
|
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
|
||||||
HeapRegion* result = _hrm.next_region_in_heap(from);
|
HeapRegion* result = _hrm.next_region_in_heap(from);
|
||||||
while (result != NULL && result->isHumongous()) {
|
while (result != NULL && result->is_humongous()) {
|
||||||
result = _hrm.next_region_in_heap(result);
|
result = _hrm.next_region_in_heap(result);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
@ -2910,7 +2909,7 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
|||||||
// since we can't allow tlabs to grow big enough to accommodate
|
// since we can't allow tlabs to grow big enough to accommodate
|
||||||
// humongous objects.
|
// humongous objects.
|
||||||
|
|
||||||
HeapRegion* hr = _mutator_alloc_region.get();
|
HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
|
||||||
size_t max_tlab = max_tlab_size() * wordSize;
|
size_t max_tlab = max_tlab_size() * wordSize;
|
||||||
if (hr == NULL) {
|
if (hr == NULL) {
|
||||||
return max_tlab;
|
return max_tlab;
|
||||||
@ -3219,7 +3218,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
if (!r->continuesHumongous()) {
|
if (!r->is_continues_humongous()) {
|
||||||
bool failures = false;
|
bool failures = false;
|
||||||
r->verify(_vo, &failures);
|
r->verify(_vo, &failures);
|
||||||
if (failures) {
|
if (failures) {
|
||||||
@ -3597,7 +3596,7 @@ void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
void G1CollectedHeap::gc_epilogue(bool full) {
|
||||||
|
|
||||||
if (G1SummarizeRSetStats &&
|
if (G1SummarizeRSetStats &&
|
||||||
(G1SummarizeRSetStatsPeriod > 0) &&
|
(G1SummarizeRSetStatsPeriod > 0) &&
|
||||||
@ -3614,6 +3613,7 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
|||||||
// always_do_update_barrier = true;
|
// always_do_update_barrier = true;
|
||||||
|
|
||||||
resize_all_tlabs();
|
resize_all_tlabs();
|
||||||
|
allocation_context_stats().update(full);
|
||||||
|
|
||||||
// We have just completed a GC. Update the soft reference
|
// We have just completed a GC. Update the soft reference
|
||||||
// policy with the new heap occupancy
|
// policy with the new heap occupancy
|
||||||
@ -3631,6 +3631,8 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
|
|||||||
false, /* should_initiate_conc_mark */
|
false, /* should_initiate_conc_mark */
|
||||||
g1_policy()->max_pause_time_ms(),
|
g1_policy()->max_pause_time_ms(),
|
||||||
gc_cause);
|
gc_cause);
|
||||||
|
|
||||||
|
op.set_allocation_context(AllocationContext::current());
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
|
|
||||||
HeapWord* result = op.result();
|
HeapWord* result = op.result();
|
||||||
@ -3676,7 +3678,7 @@ size_t G1CollectedHeap::cards_scanned() {
|
|||||||
|
|
||||||
bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
|
bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
|
||||||
HeapRegion* region = region_at(index);
|
HeapRegion* region = region_at(index);
|
||||||
assert(region->startsHumongous(), "Must start a humongous object");
|
assert(region->is_starts_humongous(), "Must start a humongous object");
|
||||||
return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
|
return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3689,7 +3691,7 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual bool doHeapRegion(HeapRegion* r) {
|
virtual bool doHeapRegion(HeapRegion* r) {
|
||||||
if (!r->startsHumongous()) {
|
if (!r->is_starts_humongous()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
@ -3961,7 +3963,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
|
|
||||||
// Forget the current alloc region (we might even choose it to be part
|
// Forget the current alloc region (we might even choose it to be part
|
||||||
// of the collection set!).
|
// of the collection set!).
|
||||||
release_mutator_alloc_region();
|
_allocator->release_mutator_alloc_region();
|
||||||
|
|
||||||
// We should call this after we retire the mutator alloc
|
// We should call this after we retire the mutator alloc
|
||||||
// region(s) so that all the ALLOC / RETIRE events are generated
|
// region(s) so that all the ALLOC / RETIRE events are generated
|
||||||
@ -4044,7 +4046,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
setup_surviving_young_words();
|
setup_surviving_young_words();
|
||||||
|
|
||||||
// Initialize the GC alloc regions.
|
// Initialize the GC alloc regions.
|
||||||
init_gc_alloc_regions(evacuation_info);
|
_allocator->init_gc_alloc_regions(evacuation_info);
|
||||||
|
|
||||||
// Actually do the work...
|
// Actually do the work...
|
||||||
evacuate_collection_set(evacuation_info);
|
evacuate_collection_set(evacuation_info);
|
||||||
@ -4093,7 +4095,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
_young_list->reset_auxilary_lists();
|
_young_list->reset_auxilary_lists();
|
||||||
|
|
||||||
if (evacuation_failed()) {
|
if (evacuation_failed()) {
|
||||||
_summary_bytes_used = recalculate_used();
|
_allocator->set_used(recalculate_used());
|
||||||
uint n_queues = MAX2((int)ParallelGCThreads, 1);
|
uint n_queues = MAX2((int)ParallelGCThreads, 1);
|
||||||
for (uint i = 0; i < n_queues; i++) {
|
for (uint i = 0; i < n_queues; i++) {
|
||||||
if (_evacuation_failed_info_array[i].has_failed()) {
|
if (_evacuation_failed_info_array[i].has_failed()) {
|
||||||
@ -4103,7 +4105,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
} else {
|
} else {
|
||||||
// The "used" of the the collection set have already been subtracted
|
// The "used" of the the collection set have already been subtracted
|
||||||
// when they were freed. Add in the bytes evacuated.
|
// when they were freed. Add in the bytes evacuated.
|
||||||
_summary_bytes_used += g1_policy()->bytes_copied_during_gc();
|
_allocator->increase_used(g1_policy()->bytes_copied_during_gc());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (g1_policy()->during_initial_mark_pause()) {
|
if (g1_policy()->during_initial_mark_pause()) {
|
||||||
@ -4125,7 +4127,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
|
||||||
#endif // YOUNG_LIST_VERBOSE
|
#endif // YOUNG_LIST_VERBOSE
|
||||||
|
|
||||||
init_mutator_alloc_region();
|
_allocator->init_mutator_alloc_region();
|
||||||
|
|
||||||
{
|
{
|
||||||
size_t expand_bytes = g1_policy()->expansion_amount();
|
size_t expand_bytes = g1_policy()->expansion_amount();
|
||||||
@ -4270,80 +4272,6 @@ size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
|
|||||||
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
|
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::init_mutator_alloc_region() {
|
|
||||||
assert(_mutator_alloc_region.get() == NULL, "pre-condition");
|
|
||||||
_mutator_alloc_region.init();
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::release_mutator_alloc_region() {
|
|
||||||
_mutator_alloc_region.release();
|
|
||||||
assert(_mutator_alloc_region.get() == NULL, "post-condition");
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info) {
|
|
||||||
HeapRegion* retained_region = _retained_old_gc_alloc_region;
|
|
||||||
_retained_old_gc_alloc_region = NULL;
|
|
||||||
|
|
||||||
// We will discard the current GC alloc region if:
|
|
||||||
// a) it's in the collection set (it can happen!),
|
|
||||||
// b) it's already full (no point in using it),
|
|
||||||
// c) it's empty (this means that it was emptied during
|
|
||||||
// a cleanup and it should be on the free list now), or
|
|
||||||
// d) it's humongous (this means that it was emptied
|
|
||||||
// during a cleanup and was added to the free list, but
|
|
||||||
// has been subsequently used to allocate a humongous
|
|
||||||
// object that may be less than the region size).
|
|
||||||
if (retained_region != NULL &&
|
|
||||||
!retained_region->in_collection_set() &&
|
|
||||||
!(retained_region->top() == retained_region->end()) &&
|
|
||||||
!retained_region->is_empty() &&
|
|
||||||
!retained_region->isHumongous()) {
|
|
||||||
retained_region->record_top_and_timestamp();
|
|
||||||
// The retained region was added to the old region set when it was
|
|
||||||
// retired. We have to remove it now, since we don't allow regions
|
|
||||||
// we allocate to in the region sets. We'll re-add it later, when
|
|
||||||
// it's retired again.
|
|
||||||
_old_set.remove(retained_region);
|
|
||||||
bool during_im = g1_policy()->during_initial_mark_pause();
|
|
||||||
retained_region->note_start_of_copying(during_im);
|
|
||||||
_old_gc_alloc_region.set(retained_region);
|
|
||||||
_hr_printer.reuse(retained_region);
|
|
||||||
evacuation_info.set_alloc_regions_used_before(retained_region->used());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
|
|
||||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
|
||||||
|
|
||||||
_survivor_gc_alloc_region.init();
|
|
||||||
_old_gc_alloc_region.init();
|
|
||||||
|
|
||||||
use_retained_old_gc_alloc_region(evacuation_info);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
|
|
||||||
evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
|
|
||||||
_old_gc_alloc_region.count());
|
|
||||||
_survivor_gc_alloc_region.release();
|
|
||||||
// If we have an old GC alloc region to release, we'll save it in
|
|
||||||
// _retained_old_gc_alloc_region. If we don't
|
|
||||||
// _retained_old_gc_alloc_region will become NULL. This is what we
|
|
||||||
// want either way so no reason to check explicitly for either
|
|
||||||
// condition.
|
|
||||||
_retained_old_gc_alloc_region = _old_gc_alloc_region.release();
|
|
||||||
|
|
||||||
if (ResizePLAB) {
|
|
||||||
_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
|
||||||
_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::abandon_gc_alloc_regions() {
|
|
||||||
assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
|
|
||||||
assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
|
|
||||||
_retained_old_gc_alloc_region = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
|
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
|
||||||
_drain_in_progress = false;
|
_drain_in_progress = false;
|
||||||
set_evac_failure_closure(cl);
|
set_evac_failure_closure(cl);
|
||||||
@ -4484,25 +4412,26 @@ void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
|
HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
|
||||||
size_t word_size) {
|
size_t word_size,
|
||||||
|
AllocationContext_t context) {
|
||||||
if (purpose == GCAllocForSurvived) {
|
if (purpose == GCAllocForSurvived) {
|
||||||
HeapWord* result = survivor_attempt_allocation(word_size);
|
HeapWord* result = survivor_attempt_allocation(word_size, context);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
return result;
|
return result;
|
||||||
} else {
|
} else {
|
||||||
// Let's try to allocate in the old gen in case we can fit the
|
// Let's try to allocate in the old gen in case we can fit the
|
||||||
// object there.
|
// object there.
|
||||||
return old_attempt_allocation(word_size);
|
return old_attempt_allocation(word_size, context);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(purpose == GCAllocForTenured, "sanity");
|
assert(purpose == GCAllocForTenured, "sanity");
|
||||||
HeapWord* result = old_attempt_allocation(word_size);
|
HeapWord* result = old_attempt_allocation(word_size, context);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
return result;
|
return result;
|
||||||
} else {
|
} else {
|
||||||
// Let's try to allocate in the survivors in case we can fit the
|
// Let's try to allocate in the survivors in case we can fit the
|
||||||
// object there.
|
// object there.
|
||||||
return survivor_attempt_allocation(word_size);
|
return survivor_attempt_allocation(word_size, context);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4511,9 +4440,6 @@ HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
|
|
||||||
ParGCAllocBuffer(gclab_word_size), _retired(true) { }
|
|
||||||
|
|
||||||
void G1ParCopyHelper::mark_object(oop obj) {
|
void G1ParCopyHelper::mark_object(oop obj) {
|
||||||
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
|
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
|
||||||
|
|
||||||
@ -5087,7 +5013,11 @@ private:
|
|||||||
_num_entered_barrier(0)
|
_num_entered_barrier(0)
|
||||||
{
|
{
|
||||||
nmethod::increase_unloading_clock();
|
nmethod::increase_unloading_clock();
|
||||||
_first_nmethod = CodeCache::alive_nmethod(CodeCache::first());
|
// Get first alive nmethod
|
||||||
|
NMethodIterator iter = NMethodIterator();
|
||||||
|
if(iter.next_alive()) {
|
||||||
|
_first_nmethod = iter.method();
|
||||||
|
}
|
||||||
_claimed_nmethod = (volatile nmethod*)_first_nmethod;
|
_claimed_nmethod = (volatile nmethod*)_first_nmethod;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5130,27 +5060,26 @@ private:
|
|||||||
|
|
||||||
void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
|
void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
|
||||||
nmethod* first;
|
nmethod* first;
|
||||||
nmethod* last;
|
NMethodIterator last;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
*num_claimed_nmethods = 0;
|
*num_claimed_nmethods = 0;
|
||||||
|
|
||||||
first = last = (nmethod*)_claimed_nmethod;
|
first = (nmethod*)_claimed_nmethod;
|
||||||
|
last = NMethodIterator(first);
|
||||||
|
|
||||||
if (first != NULL) {
|
if (first != NULL) {
|
||||||
for (int i = 0; i < MaxClaimNmethods; i++) {
|
|
||||||
last = CodeCache::alive_nmethod(CodeCache::next(last));
|
|
||||||
|
|
||||||
if (last == NULL) {
|
for (int i = 0; i < MaxClaimNmethods; i++) {
|
||||||
|
if (!last.next_alive()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
claimed_nmethods[i] = last.method();
|
||||||
claimed_nmethods[i] = last;
|
|
||||||
(*num_claimed_nmethods)++;
|
(*num_claimed_nmethods)++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first);
|
} while ((nmethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
|
||||||
}
|
}
|
||||||
|
|
||||||
nmethod* claim_postponed_nmethod() {
|
nmethod* claim_postponed_nmethod() {
|
||||||
@ -6008,7 +5937,7 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
release_gc_alloc_regions(n_workers, evacuation_info);
|
_allocator->release_gc_alloc_regions(n_workers, evacuation_info);
|
||||||
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
||||||
|
|
||||||
// Reset and re-enable the hot card cache.
|
// Reset and re-enable the hot card cache.
|
||||||
@ -6075,7 +6004,7 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
|
|||||||
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
|
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
|
||||||
FreeRegionList* free_list,
|
FreeRegionList* free_list,
|
||||||
bool par) {
|
bool par) {
|
||||||
assert(hr->startsHumongous(), "this is only for starts humongous regions");
|
assert(hr->is_starts_humongous(), "this is only for starts humongous regions");
|
||||||
assert(free_list != NULL, "pre-condition");
|
assert(free_list != NULL, "pre-condition");
|
||||||
|
|
||||||
size_t hr_capacity = hr->capacity();
|
size_t hr_capacity = hr->capacity();
|
||||||
@ -6088,7 +6017,7 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
|
|||||||
uint i = hr->hrm_index() + 1;
|
uint i = hr->hrm_index() + 1;
|
||||||
while (i < last_index) {
|
while (i < last_index) {
|
||||||
HeapRegion* curr_hr = region_at(i);
|
HeapRegion* curr_hr = region_at(i);
|
||||||
assert(curr_hr->continuesHumongous(), "invariant");
|
assert(curr_hr->is_continues_humongous(), "invariant");
|
||||||
curr_hr->clear_humongous();
|
curr_hr->clear_humongous();
|
||||||
free_region(curr_hr, free_list, par);
|
free_region(curr_hr, free_list, par);
|
||||||
i += 1;
|
i += 1;
|
||||||
@ -6114,10 +6043,7 @@ void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
|
void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
|
||||||
assert(_summary_bytes_used >= bytes,
|
_allocator->decrease_used(bytes);
|
||||||
err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
|
|
||||||
_summary_bytes_used, bytes));
|
|
||||||
_summary_bytes_used -= bytes;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
class G1ParCleanupCTTask : public AbstractGangTask {
|
class G1ParCleanupCTTask : public AbstractGangTask {
|
||||||
@ -6259,7 +6185,7 @@ public:
|
|||||||
bool failures() { return _failures; }
|
bool failures() { return _failures; }
|
||||||
|
|
||||||
virtual bool doHeapRegion(HeapRegion* hr) {
|
virtual bool doHeapRegion(HeapRegion* hr) {
|
||||||
if (hr->continuesHumongous()) return false;
|
if (hr->is_continues_humongous()) return false;
|
||||||
|
|
||||||
bool result = _g1h->verify_bitmaps(_caller, hr);
|
bool result = _g1h->verify_bitmaps(_caller, hr);
|
||||||
if (!result) {
|
if (!result) {
|
||||||
@ -6438,7 +6364,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual bool doHeapRegion(HeapRegion* r) {
|
virtual bool doHeapRegion(HeapRegion* r) {
|
||||||
if (!r->startsHumongous()) {
|
if (!r->is_starts_humongous()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6484,7 +6410,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
|||||||
|
|
||||||
if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
|
if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
|
||||||
gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
|
gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
|
||||||
r->isHumongous(),
|
r->is_humongous(),
|
||||||
region_idx,
|
region_idx,
|
||||||
r->rem_set()->occupied(),
|
r->rem_set()->occupied(),
|
||||||
r->rem_set()->strong_code_roots_list_length(),
|
r->rem_set()->strong_code_roots_list_length(),
|
||||||
@ -6503,7 +6429,7 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
|||||||
|
|
||||||
if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
|
if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
|
||||||
gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
|
gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
|
||||||
r->isHumongous(),
|
r->is_humongous(),
|
||||||
r->bottom(),
|
r->bottom(),
|
||||||
region_idx,
|
region_idx,
|
||||||
r->region_num(),
|
r->region_num(),
|
||||||
@ -6693,7 +6619,7 @@ public:
|
|||||||
// We ignore young regions, we'll empty the young list afterwards.
|
// We ignore young regions, we'll empty the young list afterwards.
|
||||||
// We ignore humongous regions, we're not tearing down the
|
// We ignore humongous regions, we're not tearing down the
|
||||||
// humongous regions set.
|
// humongous regions set.
|
||||||
assert(r->is_free() || r->is_young() || r->isHumongous(),
|
assert(r->is_free() || r->is_young() || r->is_humongous(),
|
||||||
"it cannot be another type");
|
"it cannot be another type");
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -6738,18 +6664,19 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
if (r->continuesHumongous()) {
|
if (r->is_continues_humongous()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (r->is_empty()) {
|
if (r->is_empty()) {
|
||||||
// Add free regions to the free list
|
// Add free regions to the free list
|
||||||
r->set_free();
|
r->set_free();
|
||||||
|
r->set_allocation_context(AllocationContext::system());
|
||||||
_hrm->insert_into_free_list(r);
|
_hrm->insert_into_free_list(r);
|
||||||
} else if (!_free_list_only) {
|
} else if (!_free_list_only) {
|
||||||
assert(!r->is_young(), "we should not come across young regions");
|
assert(!r->is_young(), "we should not come across young regions");
|
||||||
|
|
||||||
if (r->isHumongous()) {
|
if (r->is_humongous()) {
|
||||||
// We ignore humongous regions, we left the humongous set unchanged
|
// We ignore humongous regions, we left the humongous set unchanged
|
||||||
} else {
|
} else {
|
||||||
// Objects that were compacted would have ended up on regions
|
// Objects that were compacted would have ended up on regions
|
||||||
@ -6781,12 +6708,12 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
|
|||||||
heap_region_iterate(&cl);
|
heap_region_iterate(&cl);
|
||||||
|
|
||||||
if (!free_list_only) {
|
if (!free_list_only) {
|
||||||
_summary_bytes_used = cl.total_used();
|
_allocator->set_used(cl.total_used());
|
||||||
}
|
}
|
||||||
assert(_summary_bytes_used == recalculate_used(),
|
assert(_allocator->used_unlocked() == recalculate_used(),
|
||||||
err_msg("inconsistent _summary_bytes_used, "
|
err_msg("inconsistent _allocator->used_unlocked(), "
|
||||||
"value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
|
"value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
|
||||||
_summary_bytes_used, recalculate_used()));
|
_allocator->used_unlocked(), recalculate_used()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
|
void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
|
||||||
@ -6826,7 +6753,7 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
|
|||||||
assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
|
assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
|
||||||
|
|
||||||
g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
|
g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
|
||||||
_summary_bytes_used += allocated_bytes;
|
_allocator->increase_used(allocated_bytes);
|
||||||
_hr_printer.retire(alloc_region);
|
_hr_printer.retire(alloc_region);
|
||||||
// We update the eden sizes here, when the region is retired,
|
// We update the eden sizes here, when the region is retired,
|
||||||
// instead of when it's allocated, since this is the point that its
|
// instead of when it's allocated, since this is the point that its
|
||||||
@ -6834,11 +6761,6 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
|
|||||||
g1mm()->update_eden_size();
|
g1mm()->update_eden_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
|
|
||||||
bool force) {
|
|
||||||
return _g1h->new_mutator_alloc_region(word_size, force);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::set_par_threads() {
|
void G1CollectedHeap::set_par_threads() {
|
||||||
// Don't change the number of workers. Use the value previously set
|
// Don't change the number of workers. Use the value previously set
|
||||||
// in the workgroup.
|
// in the workgroup.
|
||||||
@ -6855,11 +6777,6 @@ void G1CollectedHeap::set_par_threads() {
|
|||||||
set_par_threads(n_workers);
|
set_par_threads(n_workers);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
|
|
||||||
size_t allocated_bytes) {
|
|
||||||
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Methods for the GC alloc regions
|
// Methods for the GC alloc regions
|
||||||
|
|
||||||
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
|
||||||
@ -6910,58 +6827,6 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
|||||||
_hr_printer.retire(alloc_region);
|
_hr_printer.retire(alloc_region);
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
|
|
||||||
bool force) {
|
|
||||||
assert(!force, "not supported for GC alloc regions");
|
|
||||||
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
|
|
||||||
size_t allocated_bytes) {
|
|
||||||
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
|
|
||||||
GCAllocForSurvived);
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
|
|
||||||
bool force) {
|
|
||||||
assert(!force, "not supported for GC alloc regions");
|
|
||||||
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
|
|
||||||
}
|
|
||||||
|
|
||||||
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
|
|
||||||
size_t allocated_bytes) {
|
|
||||||
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
|
|
||||||
GCAllocForTenured);
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapRegion* OldGCAllocRegion::release() {
|
|
||||||
HeapRegion* cur = get();
|
|
||||||
if (cur != NULL) {
|
|
||||||
// Determine how far we are from the next card boundary. If it is smaller than
|
|
||||||
// the minimum object size we can allocate into, expand into the next card.
|
|
||||||
HeapWord* top = cur->top();
|
|
||||||
HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
|
|
||||||
|
|
||||||
size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
|
|
||||||
|
|
||||||
if (to_allocate_words != 0) {
|
|
||||||
// We are not at a card boundary. Fill up, possibly into the next, taking the
|
|
||||||
// end of the region and the minimum object size into account.
|
|
||||||
to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
|
|
||||||
MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
|
|
||||||
|
|
||||||
// Skip allocation if there is not enough space to allocate even the smallest
|
|
||||||
// possible object. In this case this region will not be retained, so the
|
|
||||||
// original problem cannot occur.
|
|
||||||
if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
|
|
||||||
HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
|
|
||||||
CollectedHeap::fill_with_object(dummy, to_allocate_words);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return G1AllocRegion::release();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Heap region set verification
|
// Heap region set verification
|
||||||
|
|
||||||
class VerifyRegionListsClosure : public HeapRegionClosure {
|
class VerifyRegionListsClosure : public HeapRegionClosure {
|
||||||
@ -6982,13 +6847,13 @@ public:
|
|||||||
_old_count(), _humongous_count(), _free_count(){ }
|
_old_count(), _humongous_count(), _free_count(){ }
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* hr) {
|
bool doHeapRegion(HeapRegion* hr) {
|
||||||
if (hr->continuesHumongous()) {
|
if (hr->is_continues_humongous()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hr->is_young()) {
|
if (hr->is_young()) {
|
||||||
// TODO
|
// TODO
|
||||||
} else if (hr->startsHumongous()) {
|
} else if (hr->is_starts_humongous()) {
|
||||||
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
|
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
|
||||||
_humongous_count.increment(1u, hr->capacity());
|
_humongous_count.increment(1u, hr->capacity());
|
||||||
} else if (hr->is_empty()) {
|
} else if (hr->is_empty()) {
|
||||||
@ -7069,7 +6934,7 @@ class RegisterNMethodOopClosure: public OopClosure {
|
|||||||
if (!oopDesc::is_null(heap_oop)) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||||
assert(!hr->continuesHumongous(),
|
assert(!hr->is_continues_humongous(),
|
||||||
err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
|
err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
|
||||||
" starting at "HR_FORMAT,
|
" starting at "HR_FORMAT,
|
||||||
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
|
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
|
||||||
@ -7096,7 +6961,7 @@ class UnregisterNMethodOopClosure: public OopClosure {
|
|||||||
if (!oopDesc::is_null(heap_oop)) {
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||||
assert(!hr->continuesHumongous(),
|
assert(!hr->is_continues_humongous(),
|
||||||
err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
|
err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
|
||||||
" starting at "HR_FORMAT,
|
" starting at "HR_FORMAT,
|
||||||
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
|
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
|
||||||
|
@ -25,6 +25,8 @@
|
|||||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/g1AllocationContext.hpp"
|
||||||
|
#include "gc_implementation/g1/g1Allocator.hpp"
|
||||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||||
#include "gc_implementation/g1/evacuationInfo.hpp"
|
#include "gc_implementation/g1/evacuationInfo.hpp"
|
||||||
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
#include "gc_implementation/g1/g1AllocRegion.hpp"
|
||||||
@ -80,12 +82,6 @@ typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
|
|||||||
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
|
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
|
||||||
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
||||||
|
|
||||||
enum GCAllocPurpose {
|
|
||||||
GCAllocForTenured,
|
|
||||||
GCAllocForSurvived,
|
|
||||||
GCAllocPurposeCount
|
|
||||||
};
|
|
||||||
|
|
||||||
class YoungList : public CHeapObj<mtGC> {
|
class YoungList : public CHeapObj<mtGC> {
|
||||||
private:
|
private:
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
@ -158,40 +154,6 @@ public:
|
|||||||
void print();
|
void print();
|
||||||
};
|
};
|
||||||
|
|
||||||
class MutatorAllocRegion : public G1AllocRegion {
|
|
||||||
protected:
|
|
||||||
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
|
||||||
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
|
||||||
public:
|
|
||||||
MutatorAllocRegion()
|
|
||||||
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
|
|
||||||
};
|
|
||||||
|
|
||||||
class SurvivorGCAllocRegion : public G1AllocRegion {
|
|
||||||
protected:
|
|
||||||
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
|
||||||
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
|
||||||
public:
|
|
||||||
SurvivorGCAllocRegion()
|
|
||||||
: G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
|
|
||||||
};
|
|
||||||
|
|
||||||
class OldGCAllocRegion : public G1AllocRegion {
|
|
||||||
protected:
|
|
||||||
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
|
||||||
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
|
|
||||||
public:
|
|
||||||
OldGCAllocRegion()
|
|
||||||
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
|
|
||||||
|
|
||||||
// This specialization of release() makes sure that the last card that has been
|
|
||||||
// allocated into has been completely filled by a dummy object.
|
|
||||||
// This avoids races when remembered set scanning wants to update the BOT of the
|
|
||||||
// last card in the retained old gc alloc region, and allocation threads
|
|
||||||
// allocating into that card at the same time.
|
|
||||||
virtual HeapRegion* release();
|
|
||||||
};
|
|
||||||
|
|
||||||
// The G1 STW is alive closure.
|
// The G1 STW is alive closure.
|
||||||
// An instance is embedded into the G1CH and used as the
|
// An instance is embedded into the G1CH and used as the
|
||||||
// (optional) _is_alive_non_header closure in the STW
|
// (optional) _is_alive_non_header closure in the STW
|
||||||
@ -222,6 +184,9 @@ class G1CollectedHeap : public SharedHeap {
|
|||||||
friend class MutatorAllocRegion;
|
friend class MutatorAllocRegion;
|
||||||
friend class SurvivorGCAllocRegion;
|
friend class SurvivorGCAllocRegion;
|
||||||
friend class OldGCAllocRegion;
|
friend class OldGCAllocRegion;
|
||||||
|
friend class G1Allocator;
|
||||||
|
friend class G1DefaultAllocator;
|
||||||
|
friend class G1ResManAllocator;
|
||||||
|
|
||||||
// Closures used in implementation.
|
// Closures used in implementation.
|
||||||
template <G1Barrier barrier, G1Mark do_mark_object>
|
template <G1Barrier barrier, G1Mark do_mark_object>
|
||||||
@ -232,6 +197,8 @@ class G1CollectedHeap : public SharedHeap {
|
|||||||
friend class G1ParScanClosureSuper;
|
friend class G1ParScanClosureSuper;
|
||||||
friend class G1ParEvacuateFollowersClosure;
|
friend class G1ParEvacuateFollowersClosure;
|
||||||
friend class G1ParTask;
|
friend class G1ParTask;
|
||||||
|
friend class G1ParGCAllocator;
|
||||||
|
friend class G1DefaultParGCAllocator;
|
||||||
friend class G1FreeGarbageRegionClosure;
|
friend class G1FreeGarbageRegionClosure;
|
||||||
friend class RefineCardTableEntryClosure;
|
friend class RefineCardTableEntryClosure;
|
||||||
friend class G1PrepareCompactClosure;
|
friend class G1PrepareCompactClosure;
|
||||||
@ -293,44 +260,18 @@ private:
|
|||||||
// The sequence of all heap regions in the heap.
|
// The sequence of all heap regions in the heap.
|
||||||
HeapRegionManager _hrm;
|
HeapRegionManager _hrm;
|
||||||
|
|
||||||
// Alloc region used to satisfy mutator allocation requests.
|
// Class that handles the different kinds of allocations.
|
||||||
MutatorAllocRegion _mutator_alloc_region;
|
G1Allocator* _allocator;
|
||||||
|
|
||||||
// Alloc region used to satisfy allocation requests by the GC for
|
// Statistics for each allocation context
|
||||||
// survivor objects.
|
AllocationContextStats _allocation_context_stats;
|
||||||
SurvivorGCAllocRegion _survivor_gc_alloc_region;
|
|
||||||
|
|
||||||
// PLAB sizing policy for survivors.
|
// PLAB sizing policy for survivors.
|
||||||
PLABStats _survivor_plab_stats;
|
PLABStats _survivor_plab_stats;
|
||||||
|
|
||||||
// Alloc region used to satisfy allocation requests by the GC for
|
|
||||||
// old objects.
|
|
||||||
OldGCAllocRegion _old_gc_alloc_region;
|
|
||||||
|
|
||||||
// PLAB sizing policy for tenured objects.
|
// PLAB sizing policy for tenured objects.
|
||||||
PLABStats _old_plab_stats;
|
PLABStats _old_plab_stats;
|
||||||
|
|
||||||
PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
|
|
||||||
PLABStats* stats = NULL;
|
|
||||||
|
|
||||||
switch (purpose) {
|
|
||||||
case GCAllocForSurvived:
|
|
||||||
stats = &_survivor_plab_stats;
|
|
||||||
break;
|
|
||||||
case GCAllocForTenured:
|
|
||||||
stats = &_old_plab_stats;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
assert(false, "unrecognized GCAllocPurpose");
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The last old region we allocated to during the last GC.
|
|
||||||
// Typically, it is not full so we should re-use it during the next GC.
|
|
||||||
HeapRegion* _retained_old_gc_alloc_region;
|
|
||||||
|
|
||||||
// It specifies whether we should attempt to expand the heap after a
|
// It specifies whether we should attempt to expand the heap after a
|
||||||
// region allocation failure. If heap expansion fails we set this to
|
// region allocation failure. If heap expansion fails we set this to
|
||||||
// false so that we don't re-attempt the heap expansion (it's likely
|
// false so that we don't re-attempt the heap expansion (it's likely
|
||||||
@ -348,9 +289,6 @@ private:
|
|||||||
// It initializes the GC alloc regions at the start of a GC.
|
// It initializes the GC alloc regions at the start of a GC.
|
||||||
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
|
||||||
|
|
||||||
// Setup the retained old gc alloc region as the currrent old gc alloc region.
|
|
||||||
void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
|
|
||||||
|
|
||||||
// It releases the GC alloc regions at the end of a GC.
|
// It releases the GC alloc regions at the end of a GC.
|
||||||
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
|
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
|
||||||
|
|
||||||
@ -361,13 +299,6 @@ private:
|
|||||||
// Helper for monitoring and management support.
|
// Helper for monitoring and management support.
|
||||||
G1MonitoringSupport* _g1mm;
|
G1MonitoringSupport* _g1mm;
|
||||||
|
|
||||||
// Determines PLAB size for a particular allocation purpose.
|
|
||||||
size_t desired_plab_sz(GCAllocPurpose purpose);
|
|
||||||
|
|
||||||
// Outside of GC pauses, the number of bytes used in all regions other
|
|
||||||
// than the current allocation region.
|
|
||||||
size_t _summary_bytes_used;
|
|
||||||
|
|
||||||
// Records whether the region at the given index is kept live by roots or
|
// Records whether the region at the given index is kept live by roots or
|
||||||
// references from the young generation.
|
// references from the young generation.
|
||||||
class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
|
class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
|
||||||
@ -526,11 +457,12 @@ protected:
|
|||||||
// humongous region.
|
// humongous region.
|
||||||
HeapWord* humongous_obj_allocate_initialize_regions(uint first,
|
HeapWord* humongous_obj_allocate_initialize_regions(uint first,
|
||||||
uint num_regions,
|
uint num_regions,
|
||||||
size_t word_size);
|
size_t word_size,
|
||||||
|
AllocationContext_t context);
|
||||||
|
|
||||||
// Attempt to allocate a humongous object of the given size. Return
|
// Attempt to allocate a humongous object of the given size. Return
|
||||||
// NULL if unsuccessful.
|
// NULL if unsuccessful.
|
||||||
HeapWord* humongous_obj_allocate(size_t word_size);
|
HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
|
||||||
|
|
||||||
// The following two methods, allocate_new_tlab() and
|
// The following two methods, allocate_new_tlab() and
|
||||||
// mem_allocate(), are the two main entry points from the runtime
|
// mem_allocate(), are the two main entry points from the runtime
|
||||||
@ -586,6 +518,7 @@ protected:
|
|||||||
// retry the allocation attempt, potentially scheduling a GC
|
// retry the allocation attempt, potentially scheduling a GC
|
||||||
// pause. This should only be used for non-humongous allocations.
|
// pause. This should only be used for non-humongous allocations.
|
||||||
HeapWord* attempt_allocation_slow(size_t word_size,
|
HeapWord* attempt_allocation_slow(size_t word_size,
|
||||||
|
AllocationContext_t context,
|
||||||
unsigned int* gc_count_before_ret,
|
unsigned int* gc_count_before_ret,
|
||||||
int* gclocker_retry_count_ret);
|
int* gclocker_retry_count_ret);
|
||||||
|
|
||||||
@ -600,7 +533,8 @@ protected:
|
|||||||
// specifies whether the mutator alloc region is expected to be NULL
|
// specifies whether the mutator alloc region is expected to be NULL
|
||||||
// or not.
|
// or not.
|
||||||
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
|
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
|
||||||
bool expect_null_mutator_alloc_region);
|
AllocationContext_t context,
|
||||||
|
bool expect_null_mutator_alloc_region);
|
||||||
|
|
||||||
// It dirties the cards that cover the block so that so that the post
|
// It dirties the cards that cover the block so that so that the post
|
||||||
// write barrier never queues anything when updating objects on this
|
// write barrier never queues anything when updating objects on this
|
||||||
@ -612,7 +546,9 @@ protected:
|
|||||||
// allocation region, either by picking one or expanding the
|
// allocation region, either by picking one or expanding the
|
||||||
// heap, and then allocate a block of the given size. The block
|
// heap, and then allocate a block of the given size. The block
|
||||||
// may not be a humongous - it must fit into a single heap region.
|
// may not be a humongous - it must fit into a single heap region.
|
||||||
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
|
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
|
||||||
|
size_t word_size,
|
||||||
|
AllocationContext_t context);
|
||||||
|
|
||||||
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
|
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
|
||||||
HeapRegion* alloc_region,
|
HeapRegion* alloc_region,
|
||||||
@ -624,10 +560,12 @@ protected:
|
|||||||
void par_allocate_remaining_space(HeapRegion* r);
|
void par_allocate_remaining_space(HeapRegion* r);
|
||||||
|
|
||||||
// Allocation attempt during GC for a survivor object / PLAB.
|
// Allocation attempt during GC for a survivor object / PLAB.
|
||||||
inline HeapWord* survivor_attempt_allocation(size_t word_size);
|
inline HeapWord* survivor_attempt_allocation(size_t word_size,
|
||||||
|
AllocationContext_t context);
|
||||||
|
|
||||||
// Allocation attempt during GC for an old object / PLAB.
|
// Allocation attempt during GC for an old object / PLAB.
|
||||||
inline HeapWord* old_attempt_allocation(size_t word_size);
|
inline HeapWord* old_attempt_allocation(size_t word_size,
|
||||||
|
AllocationContext_t context);
|
||||||
|
|
||||||
// These methods are the "callbacks" from the G1AllocRegion class.
|
// These methods are the "callbacks" from the G1AllocRegion class.
|
||||||
|
|
||||||
@ -666,13 +604,15 @@ protected:
|
|||||||
// Callback from VM_G1CollectForAllocation operation.
|
// Callback from VM_G1CollectForAllocation operation.
|
||||||
// This function does everything necessary/possible to satisfy a
|
// This function does everything necessary/possible to satisfy a
|
||||||
// failed allocation request (including collection, expansion, etc.)
|
// failed allocation request (including collection, expansion, etc.)
|
||||||
HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
|
HeapWord* satisfy_failed_allocation(size_t word_size,
|
||||||
|
AllocationContext_t context,
|
||||||
|
bool* succeeded);
|
||||||
|
|
||||||
// Attempting to expand the heap sufficiently
|
// Attempting to expand the heap sufficiently
|
||||||
// to support an allocation of the given "word_size". If
|
// to support an allocation of the given "word_size". If
|
||||||
// successful, perform the allocation and return the address of the
|
// successful, perform the allocation and return the address of the
|
||||||
// allocated block, or else "NULL".
|
// allocated block, or else "NULL".
|
||||||
HeapWord* expand_and_allocate(size_t word_size);
|
HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
|
||||||
|
|
||||||
// Process any reference objects discovered during
|
// Process any reference objects discovered during
|
||||||
// an incremental evacuation pause.
|
// an incremental evacuation pause.
|
||||||
@ -684,6 +624,10 @@ protected:
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
G1Allocator* allocator() {
|
||||||
|
return _allocator;
|
||||||
|
}
|
||||||
|
|
||||||
G1MonitoringSupport* g1mm() {
|
G1MonitoringSupport* g1mm() {
|
||||||
assert(_g1mm != NULL, "should have been initialized");
|
assert(_g1mm != NULL, "should have been initialized");
|
||||||
return _g1mm;
|
return _g1mm;
|
||||||
@ -695,6 +639,29 @@ public:
|
|||||||
// (Rounds up to a HeapRegion boundary.)
|
// (Rounds up to a HeapRegion boundary.)
|
||||||
bool expand(size_t expand_bytes);
|
bool expand(size_t expand_bytes);
|
||||||
|
|
||||||
|
// Returns the PLAB statistics given a purpose.
|
||||||
|
PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
|
||||||
|
PLABStats* stats = NULL;
|
||||||
|
|
||||||
|
switch (purpose) {
|
||||||
|
case GCAllocForSurvived:
|
||||||
|
stats = &_survivor_plab_stats;
|
||||||
|
break;
|
||||||
|
case GCAllocForTenured:
|
||||||
|
stats = &_old_plab_stats;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assert(false, "unrecognized GCAllocPurpose");
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determines PLAB size for a particular allocation purpose.
|
||||||
|
size_t desired_plab_sz(GCAllocPurpose purpose);
|
||||||
|
|
||||||
|
inline AllocationContextStats& allocation_context_stats();
|
||||||
|
|
||||||
// Do anything common to GC's.
|
// Do anything common to GC's.
|
||||||
virtual void gc_prologue(bool full);
|
virtual void gc_prologue(bool full);
|
||||||
virtual void gc_epilogue(bool full);
|
virtual void gc_epilogue(bool full);
|
||||||
@ -1272,7 +1239,7 @@ public:
|
|||||||
// Determine whether the given region is one that we are using as an
|
// Determine whether the given region is one that we are using as an
|
||||||
// old GC alloc region.
|
// old GC alloc region.
|
||||||
bool is_old_gc_alloc_region(HeapRegion* hr) {
|
bool is_old_gc_alloc_region(HeapRegion* hr) {
|
||||||
return hr == _retained_old_gc_alloc_region;
|
return _allocator->is_retained_old_region(hr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Perform a collection of the heap; intended for use in implementing
|
// Perform a collection of the heap; intended for use in implementing
|
||||||
@ -1283,6 +1250,11 @@ public:
|
|||||||
// The same as above but assume that the caller holds the Heap_lock.
|
// The same as above but assume that the caller holds the Heap_lock.
|
||||||
void collect_locked(GCCause::Cause cause);
|
void collect_locked(GCCause::Cause cause);
|
||||||
|
|
||||||
|
virtual void copy_allocation_context_stats(const jint* contexts,
|
||||||
|
jlong* totals,
|
||||||
|
jbyte* accuracy,
|
||||||
|
jint len);
|
||||||
|
|
||||||
// True iff an evacuation has failed in the most-recent collection.
|
// True iff an evacuation has failed in the most-recent collection.
|
||||||
bool evacuation_failed() { return _evacuation_failed; }
|
bool evacuation_failed() { return _evacuation_failed; }
|
||||||
|
|
||||||
@ -1540,7 +1512,7 @@ public:
|
|||||||
virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
|
virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
|
||||||
|
|
||||||
// Returns "true" iff the given word_size is "very large".
|
// Returns "true" iff the given word_size is "very large".
|
||||||
static bool isHumongous(size_t word_size) {
|
static bool is_humongous(size_t word_size) {
|
||||||
// Note this has to be strictly greater-than as the TLABs
|
// Note this has to be strictly greater-than as the TLABs
|
||||||
// are capped at the humongous threshold and we want to
|
// are capped at the humongous threshold and we want to
|
||||||
// ensure that we don't try to allocate a TLAB as
|
// ensure that we don't try to allocate a TLAB as
|
||||||
@ -1747,28 +1719,4 @@ protected:
|
|||||||
size_t _max_heap_capacity;
|
size_t _max_heap_capacity;
|
||||||
};
|
};
|
||||||
|
|
||||||
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
|
|
||||||
private:
|
|
||||||
bool _retired;
|
|
||||||
|
|
||||||
public:
|
|
||||||
G1ParGCAllocBuffer(size_t gclab_word_size);
|
|
||||||
virtual ~G1ParGCAllocBuffer() {
|
|
||||||
guarantee(_retired, "Allocation buffer has not been retired");
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void set_buf(HeapWord* buf) {
|
|
||||||
ParGCAllocBuffer::set_buf(buf);
|
|
||||||
_retired = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void retire(bool end_of_gc, bool retain) {
|
|
||||||
if (_retired) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ParGCAllocBuffer::retire(end_of_gc, retain);
|
|
||||||
_retired = true;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
||||||
|
@ -37,14 +37,18 @@
|
|||||||
|
|
||||||
// Inline functions for G1CollectedHeap
|
// Inline functions for G1CollectedHeap
|
||||||
|
|
||||||
|
inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
|
||||||
|
return _allocation_context_stats;
|
||||||
|
}
|
||||||
|
|
||||||
// Return the region with the given index. It assumes the index is valid.
|
// Return the region with the given index. It assumes the index is valid.
|
||||||
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
|
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
|
||||||
|
|
||||||
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
|
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
|
||||||
assert(is_in_reserved(addr),
|
assert(is_in_reserved(addr),
|
||||||
err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
|
err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
|
||||||
p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
|
p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())));
|
||||||
return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
|
return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
|
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
|
||||||
@ -63,7 +67,7 @@ inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) con
|
|||||||
template <class T>
|
template <class T>
|
||||||
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
|
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||||
HeapRegion* hr = heap_region_containing_raw(addr);
|
HeapRegion* hr = heap_region_containing_raw(addr);
|
||||||
if (hr->continuesHumongous()) {
|
if (hr->is_continues_humongous()) {
|
||||||
return hr->humongous_start_region();
|
return hr->humongous_start_region();
|
||||||
}
|
}
|
||||||
return hr;
|
return hr;
|
||||||
@ -95,13 +99,15 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
|||||||
unsigned int* gc_count_before_ret,
|
unsigned int* gc_count_before_ret,
|
||||||
int* gclocker_retry_count_ret) {
|
int* gclocker_retry_count_ret) {
|
||||||
assert_heap_not_locked_and_not_at_safepoint();
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
assert(!isHumongous(word_size), "attempt_allocation() should not "
|
assert(!is_humongous(word_size), "attempt_allocation() should not "
|
||||||
"be called for humongous allocation requests");
|
"be called for humongous allocation requests");
|
||||||
|
|
||||||
HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
|
AllocationContext_t context = AllocationContext::current();
|
||||||
false /* bot_updates */);
|
HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
|
||||||
|
false /* bot_updates */);
|
||||||
if (result == NULL) {
|
if (result == NULL) {
|
||||||
result = attempt_allocation_slow(word_size,
|
result = attempt_allocation_slow(word_size,
|
||||||
|
context,
|
||||||
gc_count_before_ret,
|
gc_count_before_ret,
|
||||||
gclocker_retry_count_ret);
|
gclocker_retry_count_ret);
|
||||||
}
|
}
|
||||||
@ -112,17 +118,17 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
|
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
|
||||||
word_size) {
|
AllocationContext_t context) {
|
||||||
assert(!isHumongous(word_size),
|
assert(!is_humongous(word_size),
|
||||||
"we should not be seeing humongous-size allocations in this path");
|
"we should not be seeing humongous-size allocations in this path");
|
||||||
|
|
||||||
HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
|
HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
|
||||||
false /* bot_updates */);
|
false /* bot_updates */);
|
||||||
if (result == NULL) {
|
if (result == NULL) {
|
||||||
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||||
result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
|
result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||||
false /* bot_updates */);
|
false /* bot_updates */);
|
||||||
}
|
}
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
dirty_young_block(result, word_size);
|
dirty_young_block(result, word_size);
|
||||||
@ -130,16 +136,17 @@ inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
|
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
|
||||||
assert(!isHumongous(word_size),
|
AllocationContext_t context) {
|
||||||
|
assert(!is_humongous(word_size),
|
||||||
"we should not be seeing humongous-size allocations in this path");
|
"we should not be seeing humongous-size allocations in this path");
|
||||||
|
|
||||||
HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
|
HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
|
||||||
true /* bot_updates */);
|
true /* bot_updates */);
|
||||||
if (result == NULL) {
|
if (result == NULL) {
|
||||||
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||||
result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
|
result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
|
||||||
true /* bot_updates */);
|
true /* bot_updates */);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -159,7 +166,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
|||||||
assert(word_size > 0, "pre-condition");
|
assert(word_size > 0, "pre-condition");
|
||||||
assert(containing_hr->is_in(start), "it should contain start");
|
assert(containing_hr->is_in(start), "it should contain start");
|
||||||
assert(containing_hr->is_young(), "it should be young");
|
assert(containing_hr->is_young(), "it should be young");
|
||||||
assert(!containing_hr->isHumongous(), "it should not be humongous");
|
assert(!containing_hr->is_humongous(), "it should not be humongous");
|
||||||
|
|
||||||
HeapWord* end = start + word_size;
|
HeapWord* end = start + word_size;
|
||||||
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
|
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
|
||||||
|
@ -0,0 +1,32 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc_implementation/g1/g1CollectedHeap.hpp"
|
||||||
|
|
||||||
|
void G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
|
||||||
|
jlong* totals,
|
||||||
|
jbyte* accuracy,
|
||||||
|
jint len) {
|
||||||
|
}
|
@ -192,7 +192,7 @@ public:
|
|||||||
bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
|
bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
|
||||||
bool during_conc_mark = _g1h->mark_in_progress();
|
bool during_conc_mark = _g1h->mark_in_progress();
|
||||||
|
|
||||||
assert(!hr->isHumongous(), "sanity");
|
assert(!hr->is_humongous(), "sanity");
|
||||||
assert(hr->in_collection_set(), "bad CS");
|
assert(hr->in_collection_set(), "bad CS");
|
||||||
|
|
||||||
if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
|
if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
|
||||||
|
@ -43,9 +43,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
|
|||||||
_hot_cache_idx = 0;
|
_hot_cache_idx = 0;
|
||||||
|
|
||||||
// For refining the cards in the hot cache in parallel
|
// For refining the cards in the hot cache in parallel
|
||||||
uint n_workers = (ParallelGCThreads > 0 ?
|
_hot_cache_par_chunk_size = (ParallelGCThreads > 0 ? ClaimChunkSize : _hot_cache_size);
|
||||||
_g1h->workers()->total_workers() : 1);
|
|
||||||
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
|
|
||||||
_hot_cache_par_claimed_idx = 0;
|
_hot_cache_par_claimed_idx = 0;
|
||||||
|
|
||||||
_card_counts.initialize(card_counts_storage);
|
_card_counts.initialize(card_counts_storage);
|
||||||
|
@ -70,6 +70,9 @@ class G1HotCardCache: public CHeapObj<mtGC> {
|
|||||||
|
|
||||||
G1CardCounts _card_counts;
|
G1CardCounts _card_counts;
|
||||||
|
|
||||||
|
// The number of cached cards a thread claims when flushing the cache
|
||||||
|
static const int ClaimChunkSize = 32;
|
||||||
|
|
||||||
bool default_use_cache() const {
|
bool default_use_cache() const {
|
||||||
return (G1ConcRSLogCacheSize > 0);
|
return (G1ConcRSLogCacheSize > 0);
|
||||||
}
|
}
|
||||||
|
@ -193,76 +193,6 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
|
|||||||
gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
|
gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
|
||||||
}
|
}
|
||||||
|
|
||||||
class G1PrepareCompactClosure: public HeapRegionClosure {
|
|
||||||
G1CollectedHeap* _g1h;
|
|
||||||
ModRefBarrierSet* _mrbs;
|
|
||||||
CompactPoint _cp;
|
|
||||||
HeapRegionSetCount _humongous_regions_removed;
|
|
||||||
|
|
||||||
bool is_cp_initialized() const {
|
|
||||||
return _cp.space != NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
|
|
||||||
// If this is the first live region that we came across which we can compact,
|
|
||||||
// initialize the CompactPoint.
|
|
||||||
if (!is_cp_initialized()) {
|
|
||||||
_cp.space = hr;
|
|
||||||
_cp.threshold = hr->initialize_threshold();
|
|
||||||
}
|
|
||||||
hr->prepare_for_compaction(&_cp);
|
|
||||||
// Also clear the part of the card table that will be unused after
|
|
||||||
// compaction.
|
|
||||||
_mrbs->clear(MemRegion(hr->compaction_top(), end));
|
|
||||||
}
|
|
||||||
|
|
||||||
void free_humongous_region(HeapRegion* hr) {
|
|
||||||
HeapWord* end = hr->end();
|
|
||||||
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
|
|
||||||
|
|
||||||
assert(hr->startsHumongous(),
|
|
||||||
"Only the start of a humongous region should be freed.");
|
|
||||||
|
|
||||||
hr->set_containing_set(NULL);
|
|
||||||
_humongous_regions_removed.increment(1u, hr->capacity());
|
|
||||||
|
|
||||||
_g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
|
|
||||||
prepare_for_compaction(hr, end);
|
|
||||||
dummy_free_list.remove_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
G1PrepareCompactClosure()
|
|
||||||
: _g1h(G1CollectedHeap::heap()),
|
|
||||||
_mrbs(_g1h->g1_barrier_set()),
|
|
||||||
_cp(NULL),
|
|
||||||
_humongous_regions_removed() { }
|
|
||||||
|
|
||||||
void update_sets() {
|
|
||||||
// We'll recalculate total used bytes and recreate the free list
|
|
||||||
// at the end of the GC, so no point in updating those values here.
|
|
||||||
HeapRegionSetCount empty_set;
|
|
||||||
_g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* hr) {
|
|
||||||
if (hr->isHumongous()) {
|
|
||||||
if (hr->startsHumongous()) {
|
|
||||||
oop obj = oop(hr->bottom());
|
|
||||||
if (obj->is_gc_marked()) {
|
|
||||||
obj->forward_to(obj);
|
|
||||||
} else {
|
|
||||||
free_humongous_region(hr);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assert(hr->continuesHumongous(), "Invalid humongous.");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
prepare_for_compaction(hr, hr->end());
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
void G1MarkSweep::mark_sweep_phase2() {
|
void G1MarkSweep::mark_sweep_phase2() {
|
||||||
// Now all live objects are marked, compute the new object addresses.
|
// Now all live objects are marked, compute the new object addresses.
|
||||||
@ -271,21 +201,17 @@ void G1MarkSweep::mark_sweep_phase2() {
|
|||||||
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
|
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
|
||||||
// tracking expects us to do so. See comment under phase4.
|
// tracking expects us to do so. See comment under phase4.
|
||||||
|
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
|
||||||
|
|
||||||
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
|
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
|
||||||
GenMarkSweep::trace("2");
|
GenMarkSweep::trace("2");
|
||||||
|
|
||||||
G1PrepareCompactClosure blk;
|
prepare_compaction();
|
||||||
g1h->heap_region_iterate(&blk);
|
|
||||||
blk.update_sets();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
class G1AdjustPointersClosure: public HeapRegionClosure {
|
class G1AdjustPointersClosure: public HeapRegionClosure {
|
||||||
public:
|
public:
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
if (r->isHumongous()) {
|
if (r->is_humongous()) {
|
||||||
if (r->startsHumongous()) {
|
if (r->is_starts_humongous()) {
|
||||||
// We must adjust the pointers on the single H object.
|
// We must adjust the pointers on the single H object.
|
||||||
oop obj = oop(r->bottom());
|
oop obj = oop(r->bottom());
|
||||||
// point all the oops to the new location
|
// point all the oops to the new location
|
||||||
@ -340,8 +266,8 @@ public:
|
|||||||
G1SpaceCompactClosure() {}
|
G1SpaceCompactClosure() {}
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* hr) {
|
bool doHeapRegion(HeapRegion* hr) {
|
||||||
if (hr->isHumongous()) {
|
if (hr->is_humongous()) {
|
||||||
if (hr->startsHumongous()) {
|
if (hr->is_starts_humongous()) {
|
||||||
oop obj = oop(hr->bottom());
|
oop obj = oop(hr->bottom());
|
||||||
if (obj->is_gc_marked()) {
|
if (obj->is_gc_marked()) {
|
||||||
obj->init_mark();
|
obj->init_mark();
|
||||||
@ -373,3 +299,68 @@ void G1MarkSweep::mark_sweep_phase4() {
|
|||||||
g1h->heap_region_iterate(&blk);
|
g1h->heap_region_iterate(&blk);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
|
||||||
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
|
g1h->heap_region_iterate(blk);
|
||||||
|
blk->update_sets();
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
|
||||||
|
HeapWord* end = hr->end();
|
||||||
|
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
|
||||||
|
|
||||||
|
assert(hr->is_starts_humongous(),
|
||||||
|
"Only the start of a humongous region should be freed.");
|
||||||
|
|
||||||
|
hr->set_containing_set(NULL);
|
||||||
|
_humongous_regions_removed.increment(1u, hr->capacity());
|
||||||
|
|
||||||
|
_g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
|
||||||
|
prepare_for_compaction(hr, end);
|
||||||
|
dummy_free_list.remove_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
|
||||||
|
// If this is the first live region that we came across which we can compact,
|
||||||
|
// initialize the CompactPoint.
|
||||||
|
if (!is_cp_initialized()) {
|
||||||
|
_cp.space = hr;
|
||||||
|
_cp.threshold = hr->initialize_threshold();
|
||||||
|
}
|
||||||
|
prepare_for_compaction_work(&_cp, hr, end);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
|
||||||
|
HeapRegion* hr,
|
||||||
|
HeapWord* end) {
|
||||||
|
hr->prepare_for_compaction(cp);
|
||||||
|
// Also clear the part of the card table that will be unused after
|
||||||
|
// compaction.
|
||||||
|
_mrbs->clear(MemRegion(hr->compaction_top(), end));
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1PrepareCompactClosure::update_sets() {
|
||||||
|
// We'll recalculate total used bytes and recreate the free list
|
||||||
|
// at the end of the GC, so no point in updating those values here.
|
||||||
|
HeapRegionSetCount empty_set;
|
||||||
|
_g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
|
||||||
|
if (hr->is_humongous()) {
|
||||||
|
if (hr->is_starts_humongous()) {
|
||||||
|
oop obj = oop(hr->bottom());
|
||||||
|
if (obj->is_gc_marked()) {
|
||||||
|
obj->forward_to(obj);
|
||||||
|
} else {
|
||||||
|
free_humongous_region(hr);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assert(hr->is_continues_humongous(), "Invalid humongous.");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
prepare_for_compaction(hr, hr->end());
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
@ -43,7 +43,7 @@ class ReferenceProcessor;
|
|||||||
// compaction.
|
// compaction.
|
||||||
//
|
//
|
||||||
// Class unloading will only occur when a full gc is invoked.
|
// Class unloading will only occur when a full gc is invoked.
|
||||||
|
class G1PrepareCompactClosure;
|
||||||
|
|
||||||
class G1MarkSweep : AllStatic {
|
class G1MarkSweep : AllStatic {
|
||||||
friend class VM_G1MarkSweep;
|
friend class VM_G1MarkSweep;
|
||||||
@ -70,6 +70,30 @@ class G1MarkSweep : AllStatic {
|
|||||||
static void mark_sweep_phase4();
|
static void mark_sweep_phase4();
|
||||||
|
|
||||||
static void allocate_stacks();
|
static void allocate_stacks();
|
||||||
|
static void prepare_compaction();
|
||||||
|
static void prepare_compaction_work(G1PrepareCompactClosure* blk);
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1PrepareCompactClosure : public HeapRegionClosure {
|
||||||
|
protected:
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
ModRefBarrierSet* _mrbs;
|
||||||
|
CompactPoint _cp;
|
||||||
|
HeapRegionSetCount _humongous_regions_removed;
|
||||||
|
|
||||||
|
virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end);
|
||||||
|
void prepare_for_compaction_work(CompactPoint* cp, HeapRegion* hr, HeapWord* end);
|
||||||
|
void free_humongous_region(HeapRegion* hr);
|
||||||
|
bool is_cp_initialized() const { return _cp.space != NULL; }
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1PrepareCompactClosure() :
|
||||||
|
_g1h(G1CollectedHeap::heap()),
|
||||||
|
_mrbs(_g1h->g1_barrier_set()),
|
||||||
|
_humongous_regions_removed() { }
|
||||||
|
|
||||||
|
void update_sets();
|
||||||
|
bool doHeapRegion(HeapRegion* hr);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MARKSWEEP_HPP
|
||||||
|
@ -0,0 +1,31 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "gc_implementation/g1/g1MarkSweep.hpp"
|
||||||
|
|
||||||
|
void G1MarkSweep::prepare_compaction() {
|
||||||
|
G1PrepareCompactClosure blk;
|
||||||
|
G1MarkSweep::prepare_compaction_work(&blk);
|
||||||
|
}
|
@ -38,11 +38,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
|
|||||||
_g1_rem(g1h->g1_rem_set()),
|
_g1_rem(g1h->g1_rem_set()),
|
||||||
_hash_seed(17), _queue_num(queue_num),
|
_hash_seed(17), _queue_num(queue_num),
|
||||||
_term_attempts(0),
|
_term_attempts(0),
|
||||||
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
|
|
||||||
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
|
|
||||||
_age_table(false), _scanner(g1h, rp),
|
_age_table(false), _scanner(g1h, rp),
|
||||||
_strong_roots_time(0), _term_time(0),
|
_strong_roots_time(0), _term_time(0) {
|
||||||
_alloc_buffer_waste(0), _undo_waste(0) {
|
|
||||||
_scanner.set_par_scan_thread_state(this);
|
_scanner.set_par_scan_thread_state(this);
|
||||||
// we allocate G1YoungSurvRateNumRegions plus one entries, since
|
// we allocate G1YoungSurvRateNumRegions plus one entries, since
|
||||||
// we "sacrifice" entry 0 to keep track of surviving bytes for
|
// we "sacrifice" entry 0 to keep track of surviving bytes for
|
||||||
@ -60,14 +57,14 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
|
|||||||
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
|
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
|
||||||
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
|
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
|
||||||
|
|
||||||
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
|
_g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
|
||||||
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
|
|
||||||
|
|
||||||
_start = os::elapsedTime();
|
_start = os::elapsedTime();
|
||||||
}
|
}
|
||||||
|
|
||||||
G1ParScanThreadState::~G1ParScanThreadState() {
|
G1ParScanThreadState::~G1ParScanThreadState() {
|
||||||
retire_alloc_buffers();
|
_g1_par_allocator->retire_alloc_buffers();
|
||||||
|
delete _g1_par_allocator;
|
||||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
|
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,14 +87,16 @@ G1ParScanThreadState::print_termination_stats(int i,
|
|||||||
const double elapsed_ms = elapsed_time() * 1000.0;
|
const double elapsed_ms = elapsed_time() * 1000.0;
|
||||||
const double s_roots_ms = strong_roots_time() * 1000.0;
|
const double s_roots_ms = strong_roots_time() * 1000.0;
|
||||||
const double term_ms = term_time() * 1000.0;
|
const double term_ms = term_time() * 1000.0;
|
||||||
|
const size_t alloc_buffer_waste = _g1_par_allocator->alloc_buffer_waste();
|
||||||
|
const size_t undo_waste = _g1_par_allocator->undo_waste();
|
||||||
st->print_cr("%3d %9.2f %9.2f %6.2f "
|
st->print_cr("%3d %9.2f %9.2f %6.2f "
|
||||||
"%9.2f %6.2f " SIZE_FORMAT_W(8) " "
|
"%9.2f %6.2f " SIZE_FORMAT_W(8) " "
|
||||||
SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
|
SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
|
||||||
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
|
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
|
||||||
term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
|
term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
|
||||||
(alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
|
(alloc_buffer_waste + undo_waste) * HeapWordSize / K,
|
||||||
alloc_buffer_waste() * HeapWordSize / K,
|
alloc_buffer_waste * HeapWordSize / K,
|
||||||
undo_waste() * HeapWordSize / K);
|
undo_waste * HeapWordSize / K);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -164,12 +163,13 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
|
|||||||
: m->age();
|
: m->age();
|
||||||
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
|
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
|
||||||
word_sz);
|
word_sz);
|
||||||
HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
|
AllocationContext_t context = from_region->allocation_context();
|
||||||
|
HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
// Should this evacuation fail?
|
// Should this evacuation fail?
|
||||||
if (_g1h->evacuation_should_fail()) {
|
if (_g1h->evacuation_should_fail()) {
|
||||||
if (obj_ptr != NULL) {
|
if (obj_ptr != NULL) {
|
||||||
undo_allocation(alloc_purpose, obj_ptr, word_sz);
|
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
|
||||||
obj_ptr = NULL;
|
obj_ptr = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -246,66 +246,8 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
|
|||||||
obj->oop_iterate_backwards(&_scanner);
|
obj->oop_iterate_backwards(&_scanner);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
undo_allocation(alloc_purpose, obj_ptr, word_sz);
|
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
|
||||||
obj = forward_ptr;
|
obj = forward_ptr;
|
||||||
}
|
}
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
|
|
||||||
HeapWord* obj = NULL;
|
|
||||||
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
|
|
||||||
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
|
|
||||||
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
|
|
||||||
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
|
|
||||||
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
|
|
||||||
|
|
||||||
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
|
|
||||||
if (buf == NULL) {
|
|
||||||
return NULL; // Let caller handle allocation failure.
|
|
||||||
}
|
|
||||||
// Otherwise.
|
|
||||||
alloc_buf->set_word_size(gclab_word_size);
|
|
||||||
alloc_buf->set_buf(buf);
|
|
||||||
|
|
||||||
obj = alloc_buf->allocate(word_sz);
|
|
||||||
assert(obj != NULL, "buffer was definitely big enough...");
|
|
||||||
} else {
|
|
||||||
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
|
|
||||||
}
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
|
|
||||||
if (alloc_buffer(purpose)->contains(obj)) {
|
|
||||||
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
|
|
||||||
"should contain whole object");
|
|
||||||
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
|
|
||||||
} else {
|
|
||||||
CollectedHeap::fill_with_object(obj, word_sz);
|
|
||||||
add_to_undo_waste(word_sz);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
|
|
||||||
HeapWord* obj = NULL;
|
|
||||||
if (purpose == GCAllocForSurvived) {
|
|
||||||
obj = alloc_buffer(GCAllocForSurvived)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
|
|
||||||
} else {
|
|
||||||
obj = alloc_buffer(GCAllocForTenured)->allocate(word_sz);
|
|
||||||
}
|
|
||||||
if (obj != NULL) {
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
return allocate_slow(purpose, word_sz);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1ParScanThreadState::retire_alloc_buffers() {
|
|
||||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
|
||||||
size_t waste = _alloc_buffers[ap]->words_remaining();
|
|
||||||
add_to_alloc_buffer_waste(waste);
|
|
||||||
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
|
|
||||||
true /* end_of_gc */,
|
|
||||||
false /* retain */);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -46,9 +46,8 @@ class G1ParScanThreadState : public StackObj {
|
|||||||
G1SATBCardTableModRefBS* _ct_bs;
|
G1SATBCardTableModRefBS* _ct_bs;
|
||||||
G1RemSet* _g1_rem;
|
G1RemSet* _g1_rem;
|
||||||
|
|
||||||
G1ParGCAllocBuffer _surviving_alloc_buffer;
|
G1ParGCAllocator* _g1_par_allocator;
|
||||||
G1ParGCAllocBuffer _tenured_alloc_buffer;
|
|
||||||
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
|
|
||||||
ageTable _age_table;
|
ageTable _age_table;
|
||||||
|
|
||||||
G1ParScanClosure _scanner;
|
G1ParScanClosure _scanner;
|
||||||
@ -78,7 +77,6 @@ class G1ParScanThreadState : public StackObj {
|
|||||||
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
|
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
|
||||||
|
|
||||||
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
|
||||||
|
|
||||||
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
|
||||||
|
|
||||||
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
DirtyCardQueue& dirty_card_queue() { return _dcq; }
|
||||||
@ -90,13 +88,6 @@ class G1ParScanThreadState : public StackObj {
|
|||||||
|
|
||||||
ageTable* age_table() { return &_age_table; }
|
ageTable* age_table() { return &_age_table; }
|
||||||
|
|
||||||
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
|
|
||||||
return _alloc_buffers[purpose];
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
|
|
||||||
size_t undo_waste() const { return _undo_waste; }
|
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
bool queue_is_empty() const { return _refs->is_empty(); }
|
bool queue_is_empty() const { return _refs->is_empty(); }
|
||||||
|
|
||||||
@ -110,7 +101,7 @@ class G1ParScanThreadState : public StackObj {
|
|||||||
_refs->push(ref);
|
_refs->push(ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
|
template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
|
||||||
// If the new value of the field points to the same region or
|
// If the new value of the field points to the same region or
|
||||||
// is the to-space, we don't need to include it in the Rset updates.
|
// is the to-space, we don't need to include it in the Rset updates.
|
||||||
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
|
if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
|
||||||
@ -121,12 +112,6 @@ class G1ParScanThreadState : public StackObj {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
private:
|
|
||||||
|
|
||||||
inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
|
|
||||||
inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
|
|
||||||
inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
|
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
|
||||||
@ -172,8 +157,6 @@ class G1ParScanThreadState : public StackObj {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void retire_alloc_buffers();
|
|
||||||
|
|
||||||
#define G1_PARTIAL_ARRAY_MASK 0x2
|
#define G1_PARTIAL_ARRAY_MASK 0x2
|
||||||
|
|
||||||
inline bool has_partial_array_mask(oop* ref) const {
|
inline bool has_partial_array_mask(oop* ref) const {
|
||||||
|
@ -413,7 +413,7 @@ public:
|
|||||||
_ctbs(_g1h->g1_barrier_set()) {}
|
_ctbs(_g1h->g1_barrier_set()) {}
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
if (!r->continuesHumongous()) {
|
if (!r->is_continues_humongous()) {
|
||||||
r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
|
r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
@ -119,7 +119,7 @@ public:
|
|||||||
// Record, if necessary, the fact that *p (where "p" is in region "from",
|
// Record, if necessary, the fact that *p (where "p" is in region "from",
|
||||||
// which is required to be non-NULL) has changed to a new non-NULL value.
|
// which is required to be non-NULL) has changed to a new non-NULL value.
|
||||||
template <class T> void write_ref(HeapRegion* from, T* p);
|
template <class T> void write_ref(HeapRegion* from, T* p);
|
||||||
template <class T> void par_write_ref(HeapRegion* from, T* p, int tid);
|
template <class T> void par_write_ref(HeapRegion* from, T* p, uint tid);
|
||||||
|
|
||||||
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
|
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
|
||||||
// or card, respectively, such that a region or card with a corresponding
|
// or card, respectively, such that a region or card with a corresponding
|
||||||
|
@ -44,7 +44,7 @@ inline void G1RemSet::write_ref(HeapRegion* from, T* p) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
|
inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) {
|
||||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||||
if (obj == NULL) {
|
if (obj == NULL) {
|
||||||
return;
|
return;
|
||||||
|
@ -263,7 +263,7 @@ public:
|
|||||||
current = &_free;
|
current = &_free;
|
||||||
} else if (r->is_young()) {
|
} else if (r->is_young()) {
|
||||||
current = &_young;
|
current = &_young;
|
||||||
} else if (r->isHumongous()) {
|
} else if (r->is_humongous()) {
|
||||||
current = &_humonguous;
|
current = &_humonguous;
|
||||||
} else if (r->is_old()) {
|
} else if (r->is_old()) {
|
||||||
current = &_old;
|
current = &_old;
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||||
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
#include "gc_implementation/g1/heapRegion.inline.hpp"
|
||||||
|
#include "gc_implementation/g1/heapRegionBounds.inline.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
#include "gc_implementation/g1/heapRegionRemSet.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
|
||||||
#include "gc_implementation/shared/liveRange.hpp"
|
#include "gc_implementation/shared/liveRange.hpp"
|
||||||
@ -138,32 +139,16 @@ void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Minimum region size; we won't go lower than that.
|
|
||||||
// We might want to decrease this in the future, to deal with small
|
|
||||||
// heaps a bit more efficiently.
|
|
||||||
#define MIN_REGION_SIZE ( 1024 * 1024 )
|
|
||||||
|
|
||||||
// Maximum region size; we don't go higher than that. There's a good
|
|
||||||
// reason for having an upper bound. We don't want regions to get too
|
|
||||||
// large, otherwise cleanup's effectiveness would decrease as there
|
|
||||||
// will be fewer opportunities to find totally empty regions after
|
|
||||||
// marking.
|
|
||||||
#define MAX_REGION_SIZE ( 32 * 1024 * 1024 )
|
|
||||||
|
|
||||||
// The automatic region size calculation will try to have around this
|
|
||||||
// many regions in the heap (based on the min heap size).
|
|
||||||
#define TARGET_REGION_NUMBER 2048
|
|
||||||
|
|
||||||
size_t HeapRegion::max_region_size() {
|
size_t HeapRegion::max_region_size() {
|
||||||
return (size_t)MAX_REGION_SIZE;
|
return HeapRegionBounds::max_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
|
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
|
||||||
uintx region_size = G1HeapRegionSize;
|
uintx region_size = G1HeapRegionSize;
|
||||||
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
|
if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
|
||||||
size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
|
size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
|
||||||
region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
|
region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
|
||||||
(uintx) MIN_REGION_SIZE);
|
(uintx) HeapRegionBounds::min_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
int region_size_log = log2_long((jlong) region_size);
|
int region_size_log = log2_long((jlong) region_size);
|
||||||
@ -173,10 +158,10 @@ void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_hea
|
|||||||
region_size = ((uintx)1 << region_size_log);
|
region_size = ((uintx)1 << region_size_log);
|
||||||
|
|
||||||
// Now make sure that we don't go over or under our limits.
|
// Now make sure that we don't go over or under our limits.
|
||||||
if (region_size < MIN_REGION_SIZE) {
|
if (region_size < HeapRegionBounds::min_size()) {
|
||||||
region_size = MIN_REGION_SIZE;
|
region_size = HeapRegionBounds::min_size();
|
||||||
} else if (region_size > MAX_REGION_SIZE) {
|
} else if (region_size > HeapRegionBounds::max_size()) {
|
||||||
region_size = MAX_REGION_SIZE;
|
region_size = HeapRegionBounds::max_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
// And recalculate the log.
|
// And recalculate the log.
|
||||||
@ -213,11 +198,12 @@ void HeapRegion::reset_after_compaction() {
|
|||||||
void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
|
void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
|
||||||
assert(_humongous_start_region == NULL,
|
assert(_humongous_start_region == NULL,
|
||||||
"we should have already filtered out humongous regions");
|
"we should have already filtered out humongous regions");
|
||||||
assert(_end == _orig_end,
|
assert(_end == orig_end(),
|
||||||
"we should have already filtered out humongous regions");
|
"we should have already filtered out humongous regions");
|
||||||
|
|
||||||
_in_collection_set = false;
|
_in_collection_set = false;
|
||||||
|
|
||||||
|
set_allocation_context(AllocationContext::system());
|
||||||
set_young_index_in_cset(-1);
|
set_young_index_in_cset(-1);
|
||||||
uninstall_surv_rate_group();
|
uninstall_surv_rate_group();
|
||||||
set_free();
|
set_free();
|
||||||
@ -264,9 +250,9 @@ void HeapRegion::calc_gc_efficiency() {
|
|||||||
_gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
|
_gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
|
void HeapRegion::set_starts_humongous(HeapWord* new_top, HeapWord* new_end) {
|
||||||
assert(!isHumongous(), "sanity / pre-condition");
|
assert(!is_humongous(), "sanity / pre-condition");
|
||||||
assert(end() == _orig_end,
|
assert(end() == orig_end(),
|
||||||
"Should be normal before the humongous object allocation");
|
"Should be normal before the humongous object allocation");
|
||||||
assert(top() == bottom(), "should be empty");
|
assert(top() == bottom(), "should be empty");
|
||||||
assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
|
assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
|
||||||
@ -278,30 +264,30 @@ void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
|
|||||||
_offsets.set_for_starts_humongous(new_top);
|
_offsets.set_for_starts_humongous(new_top);
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
|
void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
|
||||||
assert(!isHumongous(), "sanity / pre-condition");
|
assert(!is_humongous(), "sanity / pre-condition");
|
||||||
assert(end() == _orig_end,
|
assert(end() == orig_end(),
|
||||||
"Should be normal before the humongous object allocation");
|
"Should be normal before the humongous object allocation");
|
||||||
assert(top() == bottom(), "should be empty");
|
assert(top() == bottom(), "should be empty");
|
||||||
assert(first_hr->startsHumongous(), "pre-condition");
|
assert(first_hr->is_starts_humongous(), "pre-condition");
|
||||||
|
|
||||||
_type.set_continues_humongous();
|
_type.set_continues_humongous();
|
||||||
_humongous_start_region = first_hr;
|
_humongous_start_region = first_hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegion::clear_humongous() {
|
void HeapRegion::clear_humongous() {
|
||||||
assert(isHumongous(), "pre-condition");
|
assert(is_humongous(), "pre-condition");
|
||||||
|
|
||||||
if (startsHumongous()) {
|
if (is_starts_humongous()) {
|
||||||
assert(top() <= end(), "pre-condition");
|
assert(top() <= end(), "pre-condition");
|
||||||
set_end(_orig_end);
|
set_end(orig_end());
|
||||||
if (top() > end()) {
|
if (top() > end()) {
|
||||||
// at least one "continues humongous" region after it
|
// at least one "continues humongous" region after it
|
||||||
set_top(end());
|
set_top(end());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// continues humongous
|
// continues humongous
|
||||||
assert(end() == _orig_end, "sanity");
|
assert(end() == orig_end(), "sanity");
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
|
assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
|
||||||
@ -324,9 +310,10 @@ HeapRegion::HeapRegion(uint hrm_index,
|
|||||||
MemRegion mr) :
|
MemRegion mr) :
|
||||||
G1OffsetTableContigSpace(sharedOffsetArray, mr),
|
G1OffsetTableContigSpace(sharedOffsetArray, mr),
|
||||||
_hrm_index(hrm_index),
|
_hrm_index(hrm_index),
|
||||||
|
_allocation_context(AllocationContext::system()),
|
||||||
_humongous_start_region(NULL),
|
_humongous_start_region(NULL),
|
||||||
_in_collection_set(false),
|
_in_collection_set(false),
|
||||||
_next_in_special_set(NULL), _orig_end(NULL),
|
_next_in_special_set(NULL),
|
||||||
_claimed(InitialClaimValue), _evacuation_failed(false),
|
_claimed(InitialClaimValue), _evacuation_failed(false),
|
||||||
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
|
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
|
||||||
_next_young_region(NULL),
|
_next_young_region(NULL),
|
||||||
@ -349,10 +336,14 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
|
|||||||
|
|
||||||
G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
|
G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
|
||||||
|
|
||||||
_orig_end = mr.end();
|
|
||||||
hr_clear(false /*par*/, false /*clear_space*/);
|
hr_clear(false /*par*/, false /*clear_space*/);
|
||||||
set_top(bottom());
|
set_top(bottom());
|
||||||
record_top_and_timestamp();
|
record_top_and_timestamp();
|
||||||
|
|
||||||
|
assert(mr.end() == orig_end(),
|
||||||
|
err_msg("Given region end address " PTR_FORMAT " should match exactly "
|
||||||
|
"bottom plus one region size, i.e. " PTR_FORMAT,
|
||||||
|
p2i(mr.end()), p2i(orig_end())));
|
||||||
}
|
}
|
||||||
|
|
||||||
CompactibleSpace* HeapRegion::next_compaction_space() const {
|
CompactibleSpace* HeapRegion::next_compaction_space() const {
|
||||||
@ -663,7 +654,7 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (continuesHumongous()) {
|
if (is_continues_humongous()) {
|
||||||
if (strong_code_roots_length > 0) {
|
if (strong_code_roots_length > 0) {
|
||||||
gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous "
|
gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous "
|
||||||
"region but has "SIZE_FORMAT" code root entries",
|
"region but has "SIZE_FORMAT" code root entries",
|
||||||
@ -683,6 +674,8 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
|
|||||||
|
|
||||||
void HeapRegion::print() const { print_on(gclog_or_tty); }
|
void HeapRegion::print() const { print_on(gclog_or_tty); }
|
||||||
void HeapRegion::print_on(outputStream* st) const {
|
void HeapRegion::print_on(outputStream* st) const {
|
||||||
|
st->print("AC%4u", allocation_context());
|
||||||
|
|
||||||
st->print(" %2s", get_short_type_str());
|
st->print(" %2s", get_short_type_str());
|
||||||
if (in_collection_set())
|
if (in_collection_set())
|
||||||
st->print(" CS");
|
st->print(" CS");
|
||||||
@ -788,7 +781,7 @@ public:
|
|||||||
HeapRegion* to = _g1h->heap_region_containing(obj);
|
HeapRegion* to = _g1h->heap_region_containing(obj);
|
||||||
if (from != NULL && to != NULL &&
|
if (from != NULL && to != NULL &&
|
||||||
from != to &&
|
from != to &&
|
||||||
!to->isHumongous()) {
|
!to->is_humongous()) {
|
||||||
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
|
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
|
||||||
jbyte cv_field = *_bs->byte_for_const(p);
|
jbyte cv_field = *_bs->byte_for_const(p);
|
||||||
const jbyte dirty = CardTableModRefBS::dirty_card_val();
|
const jbyte dirty = CardTableModRefBS::dirty_card_val();
|
||||||
@ -842,19 +835,19 @@ void HeapRegion::verify(VerifyOption vo,
|
|||||||
HeapWord* p = bottom();
|
HeapWord* p = bottom();
|
||||||
HeapWord* prev_p = NULL;
|
HeapWord* prev_p = NULL;
|
||||||
VerifyLiveClosure vl_cl(g1, vo);
|
VerifyLiveClosure vl_cl(g1, vo);
|
||||||
bool is_humongous = isHumongous();
|
bool is_region_humongous = is_humongous();
|
||||||
size_t object_num = 0;
|
size_t object_num = 0;
|
||||||
while (p < top()) {
|
while (p < top()) {
|
||||||
oop obj = oop(p);
|
oop obj = oop(p);
|
||||||
size_t obj_size = block_size(p);
|
size_t obj_size = block_size(p);
|
||||||
object_num += 1;
|
object_num += 1;
|
||||||
|
|
||||||
if (is_humongous != g1->isHumongous(obj_size) &&
|
if (is_region_humongous != g1->is_humongous(obj_size) &&
|
||||||
!g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
|
!g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
|
||||||
gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
|
gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
|
||||||
SIZE_FORMAT" words) in a %shumongous region",
|
SIZE_FORMAT" words) in a %shumongous region",
|
||||||
p, g1->isHumongous(obj_size) ? "" : "non-",
|
p, g1->is_humongous(obj_size) ? "" : "non-",
|
||||||
obj_size, is_humongous ? "" : "non-");
|
obj_size, is_region_humongous ? "" : "non-");
|
||||||
*failures = true;
|
*failures = true;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -963,7 +956,7 @@ void HeapRegion::verify(VerifyOption vo,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_humongous && object_num > 1) {
|
if (is_region_humongous && object_num > 1) {
|
||||||
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
|
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
|
||||||
"but has "SIZE_FORMAT", objects",
|
"but has "SIZE_FORMAT", objects",
|
||||||
bottom(), end(), object_num);
|
bottom(), end(), object_num);
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/g1AllocationContext.hpp"
|
||||||
#include "gc_implementation/g1/g1BlockOffsetTable.hpp"
|
#include "gc_implementation/g1/g1BlockOffsetTable.hpp"
|
||||||
#include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
|
#include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
|
||||||
#include "gc_implementation/g1/heapRegionType.hpp"
|
#include "gc_implementation/g1/heapRegionType.hpp"
|
||||||
@ -222,13 +223,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
// The index of this region in the heap region sequence.
|
// The index of this region in the heap region sequence.
|
||||||
uint _hrm_index;
|
uint _hrm_index;
|
||||||
|
|
||||||
|
AllocationContext_t _allocation_context;
|
||||||
|
|
||||||
HeapRegionType _type;
|
HeapRegionType _type;
|
||||||
|
|
||||||
// For a humongous region, region in which it starts.
|
// For a humongous region, region in which it starts.
|
||||||
HeapRegion* _humongous_start_region;
|
HeapRegion* _humongous_start_region;
|
||||||
// For the start region of a humongous sequence, it's original end().
|
|
||||||
HeapWord* _orig_end;
|
|
||||||
|
|
||||||
// True iff the region is in current collection_set.
|
// True iff the region is in current collection_set.
|
||||||
bool _in_collection_set;
|
bool _in_collection_set;
|
||||||
|
|
||||||
@ -417,9 +417,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
bool is_eden() const { return _type.is_eden(); }
|
bool is_eden() const { return _type.is_eden(); }
|
||||||
bool is_survivor() const { return _type.is_survivor(); }
|
bool is_survivor() const { return _type.is_survivor(); }
|
||||||
|
|
||||||
bool isHumongous() const { return _type.is_humongous(); }
|
bool is_humongous() const { return _type.is_humongous(); }
|
||||||
bool startsHumongous() const { return _type.is_starts_humongous(); }
|
bool is_starts_humongous() const { return _type.is_starts_humongous(); }
|
||||||
bool continuesHumongous() const { return _type.is_continues_humongous(); }
|
bool is_continues_humongous() const { return _type.is_continues_humongous(); }
|
||||||
|
|
||||||
bool is_old() const { return _type.is_old(); }
|
bool is_old() const { return _type.is_old(); }
|
||||||
|
|
||||||
@ -431,10 +431,10 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
// Return the number of distinct regions that are covered by this region:
|
// Return the number of distinct regions that are covered by this region:
|
||||||
// 1 if the region is not humongous, >= 1 if the region is humongous.
|
// 1 if the region is not humongous, >= 1 if the region is humongous.
|
||||||
uint region_num() const {
|
uint region_num() const {
|
||||||
if (!isHumongous()) {
|
if (!is_humongous()) {
|
||||||
return 1U;
|
return 1U;
|
||||||
} else {
|
} else {
|
||||||
assert(startsHumongous(), "doesn't make sense on HC regions");
|
assert(is_starts_humongous(), "doesn't make sense on HC regions");
|
||||||
assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
|
assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
|
||||||
return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
|
return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
|
||||||
}
|
}
|
||||||
@ -443,7 +443,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
// Return the index + 1 of the last HC regions that's associated
|
// Return the index + 1 of the last HC regions that's associated
|
||||||
// with this HS region.
|
// with this HS region.
|
||||||
uint last_hc_index() const {
|
uint last_hc_index() const {
|
||||||
assert(startsHumongous(), "don't call this otherwise");
|
assert(is_starts_humongous(), "don't call this otherwise");
|
||||||
return hrm_index() + region_num();
|
return hrm_index() + region_num();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -452,7 +452,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
// their _end set up to be the end of the last continues region of the
|
// their _end set up to be the end of the last continues region of the
|
||||||
// corresponding humongous object.
|
// corresponding humongous object.
|
||||||
bool is_in_reserved_raw(const void* p) const {
|
bool is_in_reserved_raw(const void* p) const {
|
||||||
return _bottom <= p && p < _orig_end;
|
return _bottom <= p && p < orig_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Makes the current region be a "starts humongous" region, i.e.,
|
// Makes the current region be a "starts humongous" region, i.e.,
|
||||||
@ -478,12 +478,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
// humongous regions can be calculated by just looking at the
|
// humongous regions can be calculated by just looking at the
|
||||||
// "starts humongous" regions and by ignoring the "continues
|
// "starts humongous" regions and by ignoring the "continues
|
||||||
// humongous" regions.
|
// humongous" regions.
|
||||||
void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
|
void set_starts_humongous(HeapWord* new_top, HeapWord* new_end);
|
||||||
|
|
||||||
// Makes the current region be a "continues humongous'
|
// Makes the current region be a "continues humongous'
|
||||||
// region. first_hr is the "start humongous" region of the series
|
// region. first_hr is the "start humongous" region of the series
|
||||||
// which this region will be part of.
|
// which this region will be part of.
|
||||||
void set_continuesHumongous(HeapRegion* first_hr);
|
void set_continues_humongous(HeapRegion* first_hr);
|
||||||
|
|
||||||
// Unsets the humongous-related fields on the region.
|
// Unsets the humongous-related fields on the region.
|
||||||
void clear_humongous();
|
void clear_humongous();
|
||||||
@ -513,6 +513,14 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
_next_in_special_set = r;
|
_next_in_special_set = r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void set_allocation_context(AllocationContext_t context) {
|
||||||
|
_allocation_context = context;
|
||||||
|
}
|
||||||
|
|
||||||
|
AllocationContext_t allocation_context() const {
|
||||||
|
return _allocation_context;
|
||||||
|
}
|
||||||
|
|
||||||
// Methods used by the HeapRegionSetBase class and subclasses.
|
// Methods used by the HeapRegionSetBase class and subclasses.
|
||||||
|
|
||||||
// Getter and setter for the next and prev fields used to link regions into
|
// Getter and setter for the next and prev fields used to link regions into
|
||||||
@ -556,7 +564,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
|
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
|
||||||
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
|
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
|
||||||
|
|
||||||
HeapWord* orig_end() const { return _orig_end; }
|
// For the start region of a humongous sequence, it's original end().
|
||||||
|
HeapWord* orig_end() const { return _bottom + GrainWords; }
|
||||||
|
|
||||||
// Reset HR stuff to default values.
|
// Reset HR stuff to default values.
|
||||||
void hr_clear(bool par, bool clear_space, bool locked = false);
|
void hr_clear(bool par, bool clear_space, bool locked = false);
|
||||||
@ -603,7 +612,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
bool is_marked() { return _prev_top_at_mark_start != bottom(); }
|
bool is_marked() { return _prev_top_at_mark_start != bottom(); }
|
||||||
|
|
||||||
void reset_during_compaction() {
|
void reset_during_compaction() {
|
||||||
assert(isHumongous() && startsHumongous(),
|
assert(is_starts_humongous(),
|
||||||
"should only be called for starts humongous regions");
|
"should only be called for starts humongous regions");
|
||||||
|
|
||||||
zero_marked_bytes();
|
zero_marked_bytes();
|
||||||
|
@ -0,0 +1,52 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
|
||||||
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
|
||||||
|
|
||||||
|
class HeapRegionBounds : public AllStatic {
|
||||||
|
private:
|
||||||
|
// Minimum region size; we won't go lower than that.
|
||||||
|
// We might want to decrease this in the future, to deal with small
|
||||||
|
// heaps a bit more efficiently.
|
||||||
|
static const size_t MIN_REGION_SIZE = 1024 * 1024;
|
||||||
|
|
||||||
|
// Maximum region size; we don't go higher than that. There's a good
|
||||||
|
// reason for having an upper bound. We don't want regions to get too
|
||||||
|
// large, otherwise cleanup's effectiveness would decrease as there
|
||||||
|
// will be fewer opportunities to find totally empty regions after
|
||||||
|
// marking.
|
||||||
|
static const size_t MAX_REGION_SIZE = 32 * 1024 * 1024;
|
||||||
|
|
||||||
|
// The automatic region size calculation will try to have around this
|
||||||
|
// many regions in the heap (based on the min heap size).
|
||||||
|
static const size_t TARGET_REGION_NUMBER = 2048;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static inline size_t min_size();
|
||||||
|
static inline size_t max_size();
|
||||||
|
static inline size_t target_number();
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONBOUNDS_HPP
|
@ -0,0 +1,37 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/heapRegionBounds.hpp"
|
||||||
|
|
||||||
|
size_t HeapRegionBounds::min_size() {
|
||||||
|
return MIN_REGION_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t HeapRegionBounds::max_size() {
|
||||||
|
return MAX_REGION_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t HeapRegionBounds::target_number() {
|
||||||
|
return TARGET_REGION_NUMBER;
|
||||||
|
}
|
@ -66,10 +66,11 @@ bool HeapRegionManager::is_free(HeapRegion* hr) const {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
|
HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
|
||||||
HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrm_index);
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
|
HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
|
||||||
MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
|
MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
|
||||||
assert(reserved().contains(mr), "invariant");
|
assert(reserved().contains(mr), "invariant");
|
||||||
return new HeapRegion(hrm_index, G1CollectedHeap::heap()->bot_shared(), mr);
|
return g1h->allocator()->new_heap_region(hrm_index, g1h->bot_shared(), mr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
|
void HeapRegionManager::commit_regions(uint index, size_t num_regions) {
|
||||||
@ -281,7 +282,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
|||||||
// We'll ignore "continues humongous" regions (we'll process them
|
// We'll ignore "continues humongous" regions (we'll process them
|
||||||
// when we come across their corresponding "start humongous"
|
// when we come across their corresponding "start humongous"
|
||||||
// region) and regions already claimed.
|
// region) and regions already claimed.
|
||||||
if (r->claim_value() == claim_value || r->continuesHumongous()) {
|
if (r->claim_value() == claim_value || r->is_continues_humongous()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// OK, try to claim it
|
// OK, try to claim it
|
||||||
@ -289,7 +290,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
if (r->startsHumongous()) {
|
if (r->is_starts_humongous()) {
|
||||||
// If the region is "starts humongous" we'll iterate over its
|
// If the region is "starts humongous" we'll iterate over its
|
||||||
// "continues humongous" first; in fact we'll do them
|
// "continues humongous" first; in fact we'll do them
|
||||||
// first. The order is important. In one case, calling the
|
// first. The order is important. In one case, calling the
|
||||||
@ -301,7 +302,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
|||||||
for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
|
for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
|
||||||
HeapRegion* chr = _regions.get_by_index(ch_index);
|
HeapRegion* chr = _regions.get_by_index(ch_index);
|
||||||
|
|
||||||
assert(chr->continuesHumongous(), "Must be humongous region");
|
assert(chr->is_continues_humongous(), "Must be humongous region");
|
||||||
assert(chr->humongous_start_region() == r,
|
assert(chr->humongous_start_region() == r,
|
||||||
err_msg("Must work on humongous continuation of the original start region "
|
err_msg("Must work on humongous continuation of the original start region "
|
||||||
PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
|
PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
|
||||||
@ -311,7 +312,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
|||||||
bool claim_result = chr->claimHeapRegion(claim_value);
|
bool claim_result = chr->claimHeapRegion(claim_value);
|
||||||
// We should always be able to claim it; no one else should
|
// We should always be able to claim it; no one else should
|
||||||
// be trying to claim this region.
|
// be trying to claim this region.
|
||||||
guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
|
guarantee(claim_result, "We should always be able to claim the is_continues_humongous part of the humongous object");
|
||||||
|
|
||||||
bool res2 = blk->doHeapRegion(chr);
|
bool res2 = blk->doHeapRegion(chr);
|
||||||
if (res2) {
|
if (res2) {
|
||||||
@ -322,7 +323,7 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
|||||||
// does something with "continues humongous" regions
|
// does something with "continues humongous" regions
|
||||||
// clears them). We might have to weaken it in the future,
|
// clears them). We might have to weaken it in the future,
|
||||||
// but let's leave these two asserts here for extra safety.
|
// but let's leave these two asserts here for extra safety.
|
||||||
assert(chr->continuesHumongous(), "should still be the case");
|
assert(chr->is_continues_humongous(), "should still be the case");
|
||||||
assert(chr->humongous_start_region() == r, "sanity");
|
assert(chr->humongous_start_region() == r, "sanity");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -424,7 +425,7 @@ void HeapRegionManager::verify() {
|
|||||||
// this method may be called, we have only completed allocation of the regions,
|
// this method may be called, we have only completed allocation of the regions,
|
||||||
// but not put into a region set.
|
// but not put into a region set.
|
||||||
prev_committed = true;
|
prev_committed = true;
|
||||||
if (hr->startsHumongous()) {
|
if (hr->is_starts_humongous()) {
|
||||||
prev_end = hr->orig_end();
|
prev_end = hr->orig_end();
|
||||||
} else {
|
} else {
|
||||||
prev_end = hr->end();
|
prev_end = hr->end();
|
||||||
|
@ -419,7 +419,7 @@ void OtherRegionsTable::print_from_card_cache() {
|
|||||||
FromCardCache::print();
|
FromCardCache::print();
|
||||||
}
|
}
|
||||||
|
|
||||||
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
|
||||||
uint cur_hrm_ind = hr()->hrm_index();
|
uint cur_hrm_ind = hr()->hrm_index();
|
||||||
|
|
||||||
if (G1TraceHeapRegionRememberedSet) {
|
if (G1TraceHeapRegionRememberedSet) {
|
||||||
@ -435,10 +435,10 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
|||||||
if (G1TraceHeapRegionRememberedSet) {
|
if (G1TraceHeapRegionRememberedSet) {
|
||||||
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
|
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
|
||||||
hr()->bottom(), from_card,
|
hr()->bottom(), from_card,
|
||||||
FromCardCache::at((uint)tid, cur_hrm_ind));
|
FromCardCache::at(tid, cur_hrm_ind));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
|
if (FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
|
||||||
if (G1TraceHeapRegionRememberedSet) {
|
if (G1TraceHeapRegionRememberedSet) {
|
||||||
gclog_or_tty->print_cr(" from-card cache hit.");
|
gclog_or_tty->print_cr(" from-card cache hit.");
|
||||||
}
|
}
|
||||||
@ -493,7 +493,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
|
|||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
if (G1TraceHeapRegionRememberedSet) {
|
if (G1TraceHeapRegionRememberedSet) {
|
||||||
gclog_or_tty->print_cr(" [tid %d] sparse table entry "
|
gclog_or_tty->print_cr(" [tid %u] sparse table entry "
|
||||||
"overflow(f: %d, t: %u)",
|
"overflow(f: %d, t: %u)",
|
||||||
tid, from_hrm_ind, cur_hrm_ind);
|
tid, from_hrm_ind, cur_hrm_ind);
|
||||||
}
|
}
|
||||||
|
@ -179,7 +179,7 @@ public:
|
|||||||
|
|
||||||
// For now. Could "expand" some tables in the future, so that this made
|
// For now. Could "expand" some tables in the future, so that this made
|
||||||
// sense.
|
// sense.
|
||||||
void add_reference(OopOrNarrowOopStar from, int tid);
|
void add_reference(OopOrNarrowOopStar from, uint tid);
|
||||||
|
|
||||||
// Removes any entries shown by the given bitmaps to contain only dead
|
// Removes any entries shown by the given bitmaps to contain only dead
|
||||||
// objects.
|
// objects.
|
||||||
@ -301,7 +301,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Used in the parallel case.
|
// Used in the parallel case.
|
||||||
void add_reference(OopOrNarrowOopStar from, int tid) {
|
void add_reference(OopOrNarrowOopStar from, uint tid) {
|
||||||
_other_regions.add_reference(from, tid);
|
_other_regions.add_reference(from, tid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
|
|||||||
void HeapRegionSetBase::verify_region(HeapRegion* hr) {
|
void HeapRegionSetBase::verify_region(HeapRegion* hr) {
|
||||||
assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrm_index()));
|
assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrm_index()));
|
||||||
assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrm_index())); // currently we don't use these sets for young regions
|
assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrm_index())); // currently we don't use these sets for young regions
|
||||||
assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
|
assert(hr->is_humongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name()));
|
||||||
assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
|
assert(hr->is_free() == regions_free(), err_msg("Wrong free state for region %u and set %s", hr->hrm_index(), name()));
|
||||||
assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
|
assert(!hr->is_free() || hr->is_empty(), err_msg("Free region %u is not empty for set %s", hr->hrm_index(), name()));
|
||||||
assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name()));
|
assert(!hr->is_empty() || hr->is_free(), err_msg("Empty region %u is not free for set %s", hr->hrm_index(), name()));
|
||||||
|
@ -30,8 +30,8 @@ bool HeapRegionType::is_valid(Tag tag) {
|
|||||||
case FreeTag:
|
case FreeTag:
|
||||||
case EdenTag:
|
case EdenTag:
|
||||||
case SurvTag:
|
case SurvTag:
|
||||||
case HumStartsTag:
|
case StartsHumongousTag:
|
||||||
case HumContTag:
|
case ContinuesHumongousTag:
|
||||||
case OldTag:
|
case OldTag:
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -41,12 +41,12 @@ bool HeapRegionType::is_valid(Tag tag) {
|
|||||||
const char* HeapRegionType::get_str() const {
|
const char* HeapRegionType::get_str() const {
|
||||||
hrt_assert_is_valid(_tag);
|
hrt_assert_is_valid(_tag);
|
||||||
switch (_tag) {
|
switch (_tag) {
|
||||||
case FreeTag: return "FREE";
|
case FreeTag: return "FREE";
|
||||||
case EdenTag: return "EDEN";
|
case EdenTag: return "EDEN";
|
||||||
case SurvTag: return "SURV";
|
case SurvTag: return "SURV";
|
||||||
case HumStartsTag: return "HUMS";
|
case StartsHumongousTag: return "HUMS";
|
||||||
case HumContTag: return "HUMC";
|
case ContinuesHumongousTag: return "HUMC";
|
||||||
case OldTag: return "OLD";
|
case OldTag: return "OLD";
|
||||||
}
|
}
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
// keep some compilers happy
|
// keep some compilers happy
|
||||||
@ -56,12 +56,12 @@ const char* HeapRegionType::get_str() const {
|
|||||||
const char* HeapRegionType::get_short_str() const {
|
const char* HeapRegionType::get_short_str() const {
|
||||||
hrt_assert_is_valid(_tag);
|
hrt_assert_is_valid(_tag);
|
||||||
switch (_tag) {
|
switch (_tag) {
|
||||||
case FreeTag: return "F";
|
case FreeTag: return "F";
|
||||||
case EdenTag: return "E";
|
case EdenTag: return "E";
|
||||||
case SurvTag: return "S";
|
case SurvTag: return "S";
|
||||||
case HumStartsTag: return "HS";
|
case StartsHumongousTag: return "HS";
|
||||||
case HumContTag: return "HC";
|
case ContinuesHumongousTag: return "HC";
|
||||||
case OldTag: return "O";
|
case OldTag: return "O";
|
||||||
}
|
}
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
// keep some compilers happy
|
// keep some compilers happy
|
||||||
|
@ -49,22 +49,22 @@ private:
|
|||||||
// 0001 1 [ 3] Survivor
|
// 0001 1 [ 3] Survivor
|
||||||
//
|
//
|
||||||
// 0010 0 Humongous Mask
|
// 0010 0 Humongous Mask
|
||||||
// 0010 0 [ 4] Humongous Starts
|
// 0010 0 [ 4] Starts Humongous
|
||||||
// 0010 1 [ 5] Humongous Continues
|
// 0010 1 [ 5] Continues Humongous
|
||||||
//
|
//
|
||||||
// 01000 [ 8] Old
|
// 01000 [ 8] Old
|
||||||
typedef enum {
|
typedef enum {
|
||||||
FreeTag = 0,
|
FreeTag = 0,
|
||||||
|
|
||||||
YoungMask = 2,
|
YoungMask = 2,
|
||||||
EdenTag = YoungMask,
|
EdenTag = YoungMask,
|
||||||
SurvTag = YoungMask + 1,
|
SurvTag = YoungMask + 1,
|
||||||
|
|
||||||
HumMask = 4,
|
HumongousMask = 4,
|
||||||
HumStartsTag = HumMask,
|
StartsHumongousTag = HumongousMask,
|
||||||
HumContTag = HumMask + 1,
|
ContinuesHumongousTag = HumongousMask + 1,
|
||||||
|
|
||||||
OldTag = 8
|
OldTag = 8
|
||||||
} Tag;
|
} Tag;
|
||||||
|
|
||||||
volatile Tag _tag;
|
volatile Tag _tag;
|
||||||
@ -104,9 +104,9 @@ public:
|
|||||||
bool is_eden() const { return get() == EdenTag; }
|
bool is_eden() const { return get() == EdenTag; }
|
||||||
bool is_survivor() const { return get() == SurvTag; }
|
bool is_survivor() const { return get() == SurvTag; }
|
||||||
|
|
||||||
bool is_humongous() const { return (get() & HumMask) != 0; }
|
bool is_humongous() const { return (get() & HumongousMask) != 0; }
|
||||||
bool is_starts_humongous() const { return get() == HumStartsTag; }
|
bool is_starts_humongous() const { return get() == StartsHumongousTag; }
|
||||||
bool is_continues_humongous() const { return get() == HumContTag; }
|
bool is_continues_humongous() const { return get() == ContinuesHumongousTag; }
|
||||||
|
|
||||||
bool is_old() const { return get() == OldTag; }
|
bool is_old() const { return get() == OldTag; }
|
||||||
|
|
||||||
@ -118,8 +118,8 @@ public:
|
|||||||
void set_eden_pre_gc() { set_from(EdenTag, SurvTag); }
|
void set_eden_pre_gc() { set_from(EdenTag, SurvTag); }
|
||||||
void set_survivor() { set_from(SurvTag, FreeTag); }
|
void set_survivor() { set_from(SurvTag, FreeTag); }
|
||||||
|
|
||||||
void set_starts_humongous() { set_from(HumStartsTag, FreeTag); }
|
void set_starts_humongous() { set_from(StartsHumongousTag, FreeTag); }
|
||||||
void set_continues_humongous() { set_from(HumContTag, FreeTag); }
|
void set_continues_humongous() { set_from(ContinuesHumongousTag, FreeTag); }
|
||||||
|
|
||||||
void set_old() { set(OldTag); }
|
void set_old() { set(OldTag); }
|
||||||
|
|
||||||
|
@ -45,11 +45,13 @@
|
|||||||
nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \
|
nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \
|
||||||
nonstatic_field(HeapRegionManager, _num_committed, uint) \
|
nonstatic_field(HeapRegionManager, _num_committed, uint) \
|
||||||
\
|
\
|
||||||
|
nonstatic_field(G1Allocator, _summary_bytes_used, size_t) \
|
||||||
|
\
|
||||||
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
|
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
|
||||||
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
|
|
||||||
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
|
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
|
||||||
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
|
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
|
||||||
nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \
|
nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \
|
||||||
|
nonstatic_field(G1CollectedHeap, _allocator, G1Allocator*) \
|
||||||
\
|
\
|
||||||
nonstatic_field(G1MonitoringSupport, _eden_committed, size_t) \
|
nonstatic_field(G1MonitoringSupport, _eden_committed, size_t) \
|
||||||
nonstatic_field(G1MonitoringSupport, _eden_used, size_t) \
|
nonstatic_field(G1MonitoringSupport, _eden_used, size_t) \
|
||||||
@ -72,14 +74,16 @@
|
|||||||
\
|
\
|
||||||
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
|
declare_type(G1OffsetTableContigSpace, CompactibleSpace) \
|
||||||
declare_type(HeapRegion, G1OffsetTableContigSpace) \
|
declare_type(HeapRegion, G1OffsetTableContigSpace) \
|
||||||
declare_toplevel_type(HeapRegionManager) \
|
declare_toplevel_type(HeapRegionManager) \
|
||||||
declare_toplevel_type(HeapRegionSetBase) \
|
declare_toplevel_type(HeapRegionSetBase) \
|
||||||
declare_toplevel_type(HeapRegionSetCount) \
|
declare_toplevel_type(HeapRegionSetCount) \
|
||||||
declare_toplevel_type(G1MonitoringSupport) \
|
declare_toplevel_type(G1MonitoringSupport) \
|
||||||
|
declare_toplevel_type(G1Allocator) \
|
||||||
\
|
\
|
||||||
declare_toplevel_type(G1CollectedHeap*) \
|
declare_toplevel_type(G1CollectedHeap*) \
|
||||||
declare_toplevel_type(HeapRegion*) \
|
declare_toplevel_type(HeapRegion*) \
|
||||||
declare_toplevel_type(G1MonitoringSupport*) \
|
declare_toplevel_type(G1MonitoringSupport*) \
|
||||||
|
declare_toplevel_type(G1Allocator*) \
|
||||||
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
|
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
|
||||||
|
@ -45,7 +45,8 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation(
|
|||||||
void VM_G1CollectForAllocation::doit() {
|
void VM_G1CollectForAllocation::doit() {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
GCCauseSetter x(g1h, _gc_cause);
|
GCCauseSetter x(g1h, _gc_cause);
|
||||||
_result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
|
|
||||||
|
_result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
|
||||||
assert(_result == NULL || _pause_succeeded,
|
assert(_result == NULL || _pause_succeeded,
|
||||||
"if we get back a result, the pause should have succeeded");
|
"if we get back a result, the pause should have succeeded");
|
||||||
}
|
}
|
||||||
@ -99,7 +100,7 @@ void VM_G1IncCollectionPause::doit() {
|
|||||||
|
|
||||||
if (_word_size > 0) {
|
if (_word_size > 0) {
|
||||||
// An allocation has been requested. So, try to do that first.
|
// An allocation has been requested. So, try to do that first.
|
||||||
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
|
||||||
false /* expect_null_cur_alloc_region */);
|
false /* expect_null_cur_alloc_region */);
|
||||||
if (_result != NULL) {
|
if (_result != NULL) {
|
||||||
// If we can successfully allocate before we actually do the
|
// If we can successfully allocate before we actually do the
|
||||||
@ -152,7 +153,7 @@ void VM_G1IncCollectionPause::doit() {
|
|||||||
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
|
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
|
||||||
if (_pause_succeeded && _word_size > 0) {
|
if (_pause_succeeded && _word_size > 0) {
|
||||||
// An allocation had been requested.
|
// An allocation had been requested.
|
||||||
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
|
||||||
true /* expect_null_cur_alloc_region */);
|
true /* expect_null_cur_alloc_region */);
|
||||||
} else {
|
} else {
|
||||||
assert(_result == NULL, "invariant");
|
assert(_result == NULL, "invariant");
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
|
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
|
#define SHARE_VM_GC_IMPLEMENTATION_G1_VM_OPERATIONS_G1_HPP
|
||||||
|
|
||||||
|
#include "gc_implementation/g1/g1AllocationContext.hpp"
|
||||||
#include "gc_implementation/shared/vmGCOperations.hpp"
|
#include "gc_implementation/shared/vmGCOperations.hpp"
|
||||||
|
|
||||||
// VM_operations for the G1 collector.
|
// VM_operations for the G1 collector.
|
||||||
@ -40,6 +41,7 @@ protected:
|
|||||||
size_t _word_size;
|
size_t _word_size;
|
||||||
HeapWord* _result;
|
HeapWord* _result;
|
||||||
bool _pause_succeeded;
|
bool _pause_succeeded;
|
||||||
|
AllocationContext_t _allocation_context;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
|
VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
|
||||||
@ -49,6 +51,8 @@ public:
|
|||||||
_word_size(word_size), _result(NULL), _pause_succeeded(false) { }
|
_word_size(word_size), _result(NULL), _pause_succeeded(false) { }
|
||||||
HeapWord* result() { return _result; }
|
HeapWord* result() { return _result; }
|
||||||
bool pause_succeeded() { return _pause_succeeded; }
|
bool pause_succeeded() { return _pause_succeeded; }
|
||||||
|
void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
|
||||||
|
AllocationContext_t allocation_context() { return _allocation_context; }
|
||||||
};
|
};
|
||||||
|
|
||||||
class VM_G1CollectFull: public VM_GC_Operation {
|
class VM_G1CollectFull: public VM_GC_Operation {
|
||||||
|
@ -288,7 +288,7 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
|
|||||||
while (p < to) {
|
while (p < to) {
|
||||||
Prefetch::write(p, interval);
|
Prefetch::write(p, interval);
|
||||||
oop m = oop(p);
|
oop m = oop(p);
|
||||||
assert(m->is_oop_or_null(), "check for header");
|
assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
|
||||||
m->push_contents(pm);
|
m->push_contents(pm);
|
||||||
p += m->size();
|
p += m->size();
|
||||||
}
|
}
|
||||||
@ -296,7 +296,7 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
|
|||||||
} else {
|
} else {
|
||||||
while (p < to) {
|
while (p < to) {
|
||||||
oop m = oop(p);
|
oop m = oop(p);
|
||||||
assert(m->is_oop_or_null(), "check for header");
|
assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
|
||||||
m->push_contents(pm);
|
m->push_contents(pm);
|
||||||
p += m->size();
|
p += m->size();
|
||||||
}
|
}
|
||||||
|
@ -74,10 +74,9 @@ jint ParallelScavengeHeap::initialize() {
|
|||||||
return JNI_ENOMEM;
|
return JNI_ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
_reserved = MemRegion((HeapWord*)heap_rs.base(),
|
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
|
||||||
(HeapWord*)(heap_rs.base() + heap_rs.size()));
|
|
||||||
|
|
||||||
CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
|
CardTableExtension* const barrier_set = new CardTableExtension(reserved_region(), 3);
|
||||||
barrier_set->initialize();
|
barrier_set->initialize();
|
||||||
_barrier_set = barrier_set;
|
_barrier_set = barrier_set;
|
||||||
oopDesc::set_bs(_barrier_set);
|
oopDesc::set_bs(_barrier_set);
|
||||||
|
@ -2882,7 +2882,7 @@ void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
|
|||||||
start_array->allocate_block(addr);
|
start_array->allocate_block(addr);
|
||||||
}
|
}
|
||||||
oop(addr)->update_contents(cm);
|
oop(addr)->update_contents(cm);
|
||||||
assert(oop(addr)->is_oop_or_null(), "should be an oop now");
|
assert(oop(addr)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(addr))));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3366,7 +3366,7 @@ MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
|
|||||||
|
|
||||||
oop moved_oop = (oop) destination();
|
oop moved_oop = (oop) destination();
|
||||||
moved_oop->update_contents(compaction_manager());
|
moved_oop->update_contents(compaction_manager());
|
||||||
assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
|
assert(moved_oop->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop)));
|
||||||
|
|
||||||
update_state(words);
|
update_state(words);
|
||||||
assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
|
assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
|
||||||
|
@ -582,6 +582,14 @@ void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
|
||||||
|
// It is important to do this in a way such that concurrent readers can't
|
||||||
|
// temporarily think something is in the heap. (Seen this happen in asserts.)
|
||||||
|
_reserved.set_word_size(0);
|
||||||
|
_reserved.set_start(start);
|
||||||
|
_reserved.set_end(end);
|
||||||
|
}
|
||||||
|
|
||||||
/////////////// Unit tests ///////////////
|
/////////////// Unit tests ///////////////
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -85,6 +85,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
|||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
friend class IsGCActiveMark; // Block structured external access to _is_gc_active
|
friend class IsGCActiveMark; // Block structured external access to _is_gc_active
|
||||||
|
|
||||||
|
private:
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
static int _fire_out_of_memory_count;
|
static int _fire_out_of_memory_count;
|
||||||
#endif
|
#endif
|
||||||
@ -97,8 +98,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
|||||||
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
|
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
|
||||||
bool _defer_initial_card_mark;
|
bool _defer_initial_card_mark;
|
||||||
|
|
||||||
protected:
|
|
||||||
MemRegion _reserved;
|
MemRegion _reserved;
|
||||||
|
|
||||||
|
protected:
|
||||||
BarrierSet* _barrier_set;
|
BarrierSet* _barrier_set;
|
||||||
bool _is_gc_active;
|
bool _is_gc_active;
|
||||||
uint _n_par_threads;
|
uint _n_par_threads;
|
||||||
@ -211,6 +213,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
|||||||
// Stop any onging concurrent work and prepare for exit.
|
// Stop any onging concurrent work and prepare for exit.
|
||||||
virtual void stop() {}
|
virtual void stop() {}
|
||||||
|
|
||||||
|
void initialize_reserved_region(HeapWord *start, HeapWord *end);
|
||||||
MemRegion reserved_region() const { return _reserved; }
|
MemRegion reserved_region() const { return _reserved; }
|
||||||
address base() const { return (address)reserved_region().start(); }
|
address base() const { return (address)reserved_region().start(); }
|
||||||
|
|
||||||
@ -637,6 +640,15 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
|||||||
// actual number may be germane.
|
// actual number may be germane.
|
||||||
static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
|
static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
|
||||||
|
|
||||||
|
// Copy the current allocation context statistics for the specified contexts.
|
||||||
|
// For each context in contexts, set the corresponding entries in the totals
|
||||||
|
// and accuracy arrays to the current values held by the statistics. Each
|
||||||
|
// array should be of length len.
|
||||||
|
virtual void copy_allocation_context_stats(const jint* contexts,
|
||||||
|
jlong* totals,
|
||||||
|
jbyte* accuracy,
|
||||||
|
jint len) { }
|
||||||
|
|
||||||
/////////////// Unit tests ///////////////
|
/////////////// Unit tests ///////////////
|
||||||
|
|
||||||
NOT_PRODUCT(static void test_is_in();)
|
NOT_PRODUCT(static void test_is_in();)
|
||||||
|
@ -54,6 +54,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
|
|||||||
case _wb_young_gc:
|
case _wb_young_gc:
|
||||||
return "WhiteBox Initiated Young GC";
|
return "WhiteBox Initiated Young GC";
|
||||||
|
|
||||||
|
case _update_allocation_context_stats:
|
||||||
|
return "Update Allocation Context Stats";
|
||||||
|
|
||||||
case _no_gc:
|
case _no_gc:
|
||||||
return "No GC";
|
return "No GC";
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user