Merge
This commit is contained in:
commit
afe805a586
2
.hgtags
2
.hgtags
@ -143,3 +143,5 @@ f0eccb2946986fb9626efde7d8ed9c8192623f5c jdk8-b17
|
||||
0ff7113a0882ec82d642cb9f0297b4e497807ced jdk8-b19
|
||||
6561530ea757c3f3a6fb171c9cc7b3885cdeca85 jdk8-b20
|
||||
b3a426170188f52981cf4573a2f14d487fddab0d jdk8-b21
|
||||
e8f03541af27e38aafb619b96863e17f65ffe53b jdk8-b22
|
||||
498124337041ad53cbaa7eb110f3d7acd6d4eac4 jdk8-b23
|
||||
|
@ -143,3 +143,5 @@ a4f28069d44a379cda99dd1d921d19f819726d22 jdk8-b15
|
||||
237bc29afbfc6f56a4fe4a6008e2befb59c44bac jdk8-b19
|
||||
5a5eaf6374bcbe23530899579fed17a05b7705f3 jdk8-b20
|
||||
cc771d92284f71765eca14d6d08703c4af254c04 jdk8-b21
|
||||
7ad075c809952e355d25030605da6af30456ed74 jdk8-b22
|
||||
60d6f64a86b1e511169d264727f6d51415978df0 jdk8-b23
|
||||
|
@ -143,3 +143,5 @@ e59c47de1ad8982ff3b0e843773a6902b36c2337 jdk8-b14
|
||||
e1366c5d84ef984095a332bcee70b3938232d07d jdk8-b19
|
||||
51d8b6cb18c0978ecfa4f33e1537d35ee01b69fa jdk8-b20
|
||||
f157fc2a71a38ce44007a6f18d5b011824dce705 jdk8-b21
|
||||
a11d0062c445d5f36651c78650ab88aa594bcbff jdk8-b22
|
||||
5218eb256658442b62b05295aafa5b5f35252972 jdk8-b23
|
||||
|
@ -211,3 +211,7 @@ a2fef924d8e6f37dac2a887315e3502876cc8e24 hs23-b08
|
||||
fe2c8764998112b7fefcd7d41599714813ae4327 jdk8-b20
|
||||
9952d1c439d64c5fd4ad1236a63a62bd5a49d4c3 jdk8-b21
|
||||
513351373923f74a7c91755748b95c9771e59f96 hs23-b10
|
||||
24727fb37561779077fdfa5a33342246f20e5c0f jdk8-b22
|
||||
dcc292399a39113957eebbd3e487b7e05e2c79fc hs23-b11
|
||||
e850d8e7ea54b91c7aa656e297f0f9f38dd4c296 jdk8-b23
|
||||
9e177d44b10fe92ecffa965fef9c5ac5433c1b46 hs23-b12
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -49,8 +49,12 @@ public class G1CollectedHeap extends SharedHeap {
|
||||
static private long g1CommittedFieldOffset;
|
||||
// size_t _summary_bytes_used;
|
||||
static private CIntegerField summaryBytesUsedField;
|
||||
// G1MonitoringSupport* _g1mm
|
||||
// G1MonitoringSupport* _g1mm;
|
||||
static private AddressField g1mmField;
|
||||
// MasterOldRegionSet _old_set;
|
||||
static private long oldSetFieldOffset;
|
||||
// MasterHumongousRegionSet _humongous_set;
|
||||
static private long humongousSetFieldOffset;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
@ -67,12 +71,14 @@ public class G1CollectedHeap extends SharedHeap {
|
||||
g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
|
||||
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
|
||||
g1mmField = type.getAddressField("_g1mm");
|
||||
oldSetFieldOffset = type.getField("_old_set").getOffset();
|
||||
humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
|
||||
}
|
||||
|
||||
public long capacity() {
|
||||
Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
|
||||
MemRegion g1_committed = new MemRegion(g1CommittedAddr);
|
||||
return g1_committed.byteSize();
|
||||
MemRegion g1Committed = new MemRegion(g1CommittedAddr);
|
||||
return g1Committed.byteSize();
|
||||
}
|
||||
|
||||
public long used() {
|
||||
@ -94,6 +100,18 @@ public class G1CollectedHeap extends SharedHeap {
|
||||
return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
|
||||
}
|
||||
|
||||
public HeapRegionSetBase oldSet() {
|
||||
Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
|
||||
return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
|
||||
oldSetAddr);
|
||||
}
|
||||
|
||||
public HeapRegionSetBase humongousSet() {
|
||||
Address humongousSetAddr = addr.addOffsetTo(humongousSetFieldOffset);
|
||||
return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
|
||||
humongousSetAddr);
|
||||
}
|
||||
|
||||
private Iterator<HeapRegion> heapRegionIterator() {
|
||||
return hrs().heapRegionIterator();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -77,6 +77,10 @@ public class G1MonitoringSupport extends VMObject {
|
||||
return edenUsedField.getValue(addr);
|
||||
}
|
||||
|
||||
public long edenRegionNum() {
|
||||
return edenUsed() / HeapRegion.grainBytes();
|
||||
}
|
||||
|
||||
public long survivorCommitted() {
|
||||
return survivorCommittedField.getValue(addr);
|
||||
}
|
||||
@ -85,6 +89,10 @@ public class G1MonitoringSupport extends VMObject {
|
||||
return survivorUsedField.getValue(addr);
|
||||
}
|
||||
|
||||
public long survivorRegionNum() {
|
||||
return survivorUsed() / HeapRegion.grainBytes();
|
||||
}
|
||||
|
||||
public long oldCommitted() {
|
||||
return oldCommittedField.getValue(addr);
|
||||
}
|
||||
|
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.gc_implementation.g1;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Observable;
|
||||
import java.util.Observer;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.VMObject;
|
||||
import sun.jvm.hotspot.runtime.VMObjectFactory;
|
||||
import sun.jvm.hotspot.types.AddressField;
|
||||
import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
// Mirror class for HeapRegionSetBase. Represents a group of regions.
|
||||
|
||||
public class HeapRegionSetBase extends VMObject {
|
||||
// size_t _length;
|
||||
static private CIntegerField lengthField;
|
||||
// size_t _region_num;
|
||||
static private CIntegerField regionNumField;
|
||||
// size_t _total_used_bytes;
|
||||
static private CIntegerField totalUsedBytesField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static private synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("HeapRegionSetBase");
|
||||
|
||||
lengthField = type.getCIntegerField("_length");
|
||||
regionNumField = type.getCIntegerField("_region_num");
|
||||
totalUsedBytesField = type.getCIntegerField("_total_used_bytes");
|
||||
}
|
||||
|
||||
public long length() {
|
||||
return lengthField.getValue(addr);
|
||||
}
|
||||
|
||||
public long regionNum() {
|
||||
return regionNumField.getValue(addr);
|
||||
}
|
||||
|
||||
public long totalUsedBytes() {
|
||||
return totalUsedBytesField.getValue(addr);
|
||||
}
|
||||
|
||||
public HeapRegionSetBase(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -67,6 +67,7 @@ public class HeapSummary extends Tool {
|
||||
printValue("SurvivorRatio = ", getFlagValue("SurvivorRatio", flagMap));
|
||||
printValMB("PermSize = ", getFlagValue("PermSize", flagMap));
|
||||
printValMB("MaxPermSize = ", getFlagValue("MaxPermSize", flagMap));
|
||||
printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes());
|
||||
|
||||
System.out.println();
|
||||
System.out.println("Heap Usage:");
|
||||
@ -100,11 +101,20 @@ public class HeapSummary extends Tool {
|
||||
} else if (sharedHeap instanceof G1CollectedHeap) {
|
||||
G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
|
||||
G1MonitoringSupport g1mm = g1h.g1mm();
|
||||
System.out.println("G1 Young Generation");
|
||||
printG1Space("Eden Space:", g1mm.edenUsed(), g1mm.edenCommitted());
|
||||
printG1Space("From Space:", g1mm.survivorUsed(), g1mm.survivorCommitted());
|
||||
printG1Space("To Space:", 0, 0);
|
||||
printG1Space("G1 Old Generation", g1mm.oldUsed(), g1mm.oldCommitted());
|
||||
long edenRegionNum = g1mm.edenRegionNum();
|
||||
long survivorRegionNum = g1mm.survivorRegionNum();
|
||||
HeapRegionSetBase oldSet = g1h.oldSet();
|
||||
HeapRegionSetBase humongousSet = g1h.humongousSet();
|
||||
long oldRegionNum = oldSet.regionNum() + humongousSet.regionNum();
|
||||
printG1Space("G1 Heap:", g1h.n_regions(),
|
||||
g1h.used(), g1h.capacity());
|
||||
System.out.println("G1 Young Generation:");
|
||||
printG1Space("Eden Space:", edenRegionNum,
|
||||
g1mm.edenUsed(), g1mm.edenCommitted());
|
||||
printG1Space("Survivor Space:", survivorRegionNum,
|
||||
g1mm.survivorUsed(), g1mm.survivorCommitted());
|
||||
printG1Space("G1 Old Generation:", oldRegionNum,
|
||||
g1mm.oldUsed(), g1mm.oldCommitted());
|
||||
} else {
|
||||
throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
|
||||
}
|
||||
@ -216,9 +226,11 @@ public class HeapSummary extends Tool {
|
||||
System.out.println(alignment + (double)space.used() * 100.0 / space.capacity() + "% used");
|
||||
}
|
||||
|
||||
private void printG1Space(String spaceName, long used, long capacity) {
|
||||
private void printG1Space(String spaceName, long regionNum,
|
||||
long used, long capacity) {
|
||||
long free = capacity - used;
|
||||
System.out.println(spaceName);
|
||||
printValue("regions = ", regionNum);
|
||||
printValMB("capacity = ", capacity);
|
||||
printValMB("used = ", used);
|
||||
printValMB("free = ", free);
|
||||
|
@ -367,7 +367,7 @@ endif
|
||||
$(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar
|
||||
$(install-file)
|
||||
|
||||
# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h)
|
||||
# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h, jfr.h)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/%
|
||||
$(install-file)
|
||||
|
||||
@ -384,6 +384,16 @@ $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h: $(HS_JNI_ARCH_SRC)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/services/%
|
||||
$(install-file)
|
||||
|
||||
JFR_EXISTS=$(shell if [ -d $(HS_ALT_SRC) ]; then echo 1; else echo 0; fi)
|
||||
# export jfr.h
|
||||
ifeq ($JFR_EXISTS,1)
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/agent/%
|
||||
$(install-file)
|
||||
else
|
||||
$(EXPORT_INCLUDE_DIR)/jfr.h:
|
||||
|
||||
endif
|
||||
|
||||
# Doc files (jvmti.html)
|
||||
$(EXPORT_DOCS_DIR)/platform/jvmti/%: $(DOCS_DIR)/%
|
||||
$(install-file)
|
||||
|
@ -208,7 +208,7 @@ TARGETS_ZERO = $(addsuffix zero,$(TARGETS))
|
||||
TARGETS_SHARK = $(addsuffix shark,$(TARGETS))
|
||||
|
||||
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
|
||||
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH)
|
||||
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) LIBRARY_SUFFIX=$(LIBRARY_SUFFIX)
|
||||
BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
|
||||
|
||||
BUILDTREE = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
|
||||
|
@ -162,20 +162,6 @@ ifndef HOTSPOT_VM_DISTRO
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
# MACOSX FIXME: we should be able to run test_gamma (see MACOSX_PORT-214)
|
||||
ifeq ($(ALWAYS_PASS_TEST_GAMMA),)
|
||||
# ALWAYS_PASS_TEST_GAMMA wasn't set so we default to true on MacOS X
|
||||
# until MACOSX_PORT-214 is fixed
|
||||
ALWAYS_PASS_TEST_GAMMA=true
|
||||
endif
|
||||
endif
|
||||
ifeq ($(ALWAYS_PASS_TEST_GAMMA), true)
|
||||
TEST_GAMMA_STATUS= echo 'exit 0';
|
||||
else
|
||||
TEST_GAMMA_STATUS=
|
||||
endif
|
||||
|
||||
BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
|
||||
|
||||
BUILDTREE = \
|
||||
@ -353,12 +339,10 @@ env.sh: $(BUILDTREE_MAKE)
|
||||
$(BUILDTREE_COMMENT); \
|
||||
[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
|
||||
{ \
|
||||
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
|
||||
echo "DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
|
||||
echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
|
||||
} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
|
||||
echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
|
||||
echo "export JAVA_HOME LD_LIBRARY_PATH DYLD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \
|
||||
echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \
|
||||
) > $@
|
||||
|
||||
env.csh: env.sh
|
||||
@ -412,7 +396,7 @@ JAVA_FLAG/32 = -d32
|
||||
JAVA_FLAG/64 = -d64
|
||||
|
||||
WRONG_DATA_MODE_MSG = \
|
||||
echo "JAVA_HOME must point to $(DATA_MODE)bit JDK."
|
||||
echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
|
||||
|
||||
CROSS_COMPILING_MSG = \
|
||||
echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
|
||||
@ -420,20 +404,78 @@ CROSS_COMPILING_MSG = \
|
||||
test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
echo '#!/bin/sh'; \
|
||||
echo "#!/bin/sh"; \
|
||||
echo ""; \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
echo '. ./env.sh'; \
|
||||
echo "if [ \"$(CROSS_COMPILE_ARCH)\" != \"\" ]; then { $(CROSS_COMPILING_MSG); exit 0; }; fi"; \
|
||||
echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \
|
||||
echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \
|
||||
echo "then"; \
|
||||
echo " $(WRONG_DATA_MODE_MSG); exit 0;"; \
|
||||
echo ""; \
|
||||
echo "# Include environment settings for gamma run"; \
|
||||
echo ""; \
|
||||
echo ". ./env.sh"; \
|
||||
echo ""; \
|
||||
echo "# Do not run gamma test for cross compiles"; \
|
||||
echo ""; \
|
||||
echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
|
||||
echo " $(CROSS_COMPILING_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
|
||||
echo ""; \
|
||||
echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
|
||||
echo " $(NO_JAVA_HOME_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Check JAVA_HOME version to be used for the test"; \
|
||||
echo ""; \
|
||||
echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
|
||||
echo "if [ \$$? -ne 0 ]; then "; \
|
||||
echo " $(WRONG_DATA_MODE_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Use gamma_g if it exists"; \
|
||||
echo ""; \
|
||||
echo "GAMMA_PROG=gamma"; \
|
||||
echo "if [ -f gamma_g ]; then "; \
|
||||
echo " GAMMA_PROG=gamma_g"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
|
||||
echo " # Ensure architecture for gamma and JAVA_HOME is the same."; \
|
||||
echo " # NOTE: gamma assumes the OpenJDK directory layout."; \
|
||||
echo ""; \
|
||||
echo " GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
|
||||
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
|
||||
echo " if [ ! -f \$${JVM_LIB} ]; then"; \
|
||||
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
|
||||
echo " fi"; \
|
||||
echo " if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
|
||||
echo " $(WRONG_DATA_MODE_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo " fi"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Compile Queens program for test"; \
|
||||
echo ""; \
|
||||
echo "rm -f Queens.class"; \
|
||||
echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
|
||||
echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \
|
||||
echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \
|
||||
$(TEST_GAMMA_STATUS) \
|
||||
echo ""; \
|
||||
echo "# Set library path solely for gamma launcher test run"; \
|
||||
echo ""; \
|
||||
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
|
||||
echo "export LD_LIBRARY_PATH"; \
|
||||
echo "unset LD_LIBRARY_PATH_32"; \
|
||||
echo "unset LD_LIBRARY_PATH_64"; \
|
||||
echo ""; \
|
||||
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
|
||||
echo " DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
|
||||
echo " export DYLD_LIBRARY_PATH"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
|
||||
echo ""; \
|
||||
echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
|
||||
) > $@
|
||||
$(QUIETLY) chmod +x $@
|
||||
|
||||
|
@ -142,6 +142,7 @@ EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
|
||||
# client and server subdirectories have symbolic links to ../libjsig.so
|
||||
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
|
||||
EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
|
||||
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
|
||||
|
||||
ifndef BUILD_CLIENT_ONLY
|
||||
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
|
||||
@ -150,7 +151,6 @@ endif
|
||||
|
||||
ifneq ($(ZERO_BUILD), true)
|
||||
ifeq ($(ARCH_DATA_MODEL), 32)
|
||||
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
|
||||
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
|
||||
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
|
||||
endif
|
||||
|
@ -50,7 +50,24 @@ ifeq ($(LINK_INTO),AOUT)
|
||||
LIBS_LAUNCHER += $(STATIC_STDCXX) $(LIBS)
|
||||
else
|
||||
LAUNCHER.o = launcher.o
|
||||
LFLAGS_LAUNCHER += -L`pwd`
|
||||
LFLAGS_LAUNCHER += -L`pwd`
|
||||
|
||||
# The gamma launcher runs the JDK from $JAVA_HOME, overriding the JVM with a
|
||||
# freshly built JVM at ./libjvm.{so|dylib}. This is accomplished by setting
|
||||
# the library searchpath using ({DY}LD_LIBRARY_PATH) to find the local JVM
|
||||
# first. Gamma dlopen()s libjava from $JAVA_HOME/jre/lib{/$arch}, which is
|
||||
# statically linked with CoreFoundation framework libs. Unfortunately, gamma's
|
||||
# unique searchpath results in some unresolved symbols in the framework
|
||||
# libraries, because JDK libraries are inadvertently discovered first on the
|
||||
# searchpath, e.g. libjpeg. On Mac OS X, filenames are case *insensitive*.
|
||||
# So, the actual filename collision is libjpeg.dylib and libJPEG.dylib.
|
||||
# To resolve this, gamma needs to also statically link with the CoreFoundation
|
||||
# framework libraries.
|
||||
|
||||
ifeq ($(OS_VENDOR),Darwin)
|
||||
LFLAGS_LAUNCHER += -framework CoreFoundation
|
||||
endif
|
||||
|
||||
LIBS_LAUNCHER += -l$(JVM) $(LIBS)
|
||||
endif
|
||||
|
||||
|
@ -96,6 +96,10 @@ ifdef DEFAULT_LIBPATH
|
||||
CPPFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\""
|
||||
endif
|
||||
|
||||
ifndef JAVASE_EMBEDDED
|
||||
CFLAGS += -DINCLUDE_TRACE
|
||||
endif
|
||||
|
||||
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
|
||||
CFLAGS += $(CFLAGS_WARN/BYFILE)
|
||||
|
||||
@ -147,6 +151,12 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
|
||||
|
||||
ifndef JAVASE_EMBEDDED
|
||||
SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
|
||||
find $(HS_ALT_SRC)/share/vm/jfr -type d; \
|
||||
fi)
|
||||
endif
|
||||
|
||||
CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
|
||||
CORE_PATHS+=$(GENERATED)/jvmtifiles
|
||||
|
||||
@ -327,8 +337,8 @@ ifeq ($(OS_VENDOR), Darwin)
|
||||
$(LIBJVM).dSYM: $(LIBJVM)
|
||||
dsymutil $(LIBJVM)
|
||||
|
||||
# no launcher or libjvm_db for macosx
|
||||
build: $(LIBJVM) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM
|
||||
# no libjvm_db for macosx
|
||||
build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM
|
||||
echo "Doing vm.make build:"
|
||||
else
|
||||
build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
|
||||
|
@ -294,3 +294,7 @@ EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jvmticmlr.h
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jni.h
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
|
||||
|
||||
ifndef JAVASE_EMBEDDED
|
||||
EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h
|
||||
endif
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
|
||||
|
||||
HS_MAJOR_VER=23
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=10
|
||||
HS_BUILD_NUMBER=12
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=8
|
||||
|
@ -174,6 +174,10 @@ jprt.my.linux.armsflt.ejdk6=linux_armsflt_2.6
|
||||
jprt.my.linux.armsflt.ejdk7=linux_armsflt_2.6
|
||||
jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.macosx.x64.jdk8=macosx_x64_10.7
|
||||
jprt.my.macosx.x64.jdk7=macosx_x64_10.7
|
||||
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
|
||||
|
||||
jprt.my.windows.i586.jdk8=windows_i586_5.1
|
||||
jprt.my.windows.i586.jdk7=windows_i586_5.1
|
||||
jprt.my.windows.i586.jdk7b107=windows_i586_5.0
|
||||
@ -211,6 +215,7 @@ jprt.build.targets.standard= \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug|debug}, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug|debug}, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug|debug}, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug|debug}, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug|debug}
|
||||
|
||||
@ -416,6 +421,30 @@ jprt.my.linux.x64.test.targets = \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
|
||||
|
||||
jprt.my.macosx.x64.test.targets = \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_default, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_default, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
|
||||
${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParOldGC
|
||||
|
||||
jprt.my.windows.i586.test.targets = \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
|
||||
@ -492,6 +521,7 @@ jprt.test.targets.standard = \
|
||||
${jprt.my.solaris.x64.test.targets}, \
|
||||
${jprt.my.linux.i586.test.targets}, \
|
||||
${jprt.my.linux.x64.test.targets}, \
|
||||
${jprt.my.macosx.x64.test.targets}, \
|
||||
${jprt.my.windows.i586.test.targets}, \
|
||||
${jprt.my.windows.x64.test.targets}, \
|
||||
${jprt.test.targets.open}
|
||||
@ -538,6 +568,7 @@ jprt.make.rule.test.targets.standard.server = \
|
||||
${jprt.my.solaris.x64}-*-c2-servertest, \
|
||||
${jprt.my.linux.i586}-*-c2-servertest, \
|
||||
${jprt.my.linux.x64}-*-c2-servertest, \
|
||||
${jprt.my.macosx.x64}-*-c2-servertest, \
|
||||
${jprt.my.windows.i586}-*-c2-servertest, \
|
||||
${jprt.my.windows.x64}-*-c2-servertest
|
||||
|
||||
@ -548,6 +579,7 @@ jprt.make.rule.test.targets.standard.internalvmtests = \
|
||||
${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.macosx.x64}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.windows.i586}-fastdebug-c2-internalvmtests, \
|
||||
${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
|
||||
|
||||
|
@ -326,11 +326,10 @@ env.sh: $(BUILDTREE_MAKE)
|
||||
$(BUILDTREE_COMMENT); \
|
||||
[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
|
||||
{ \
|
||||
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
|
||||
echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
|
||||
} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
|
||||
echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
|
||||
echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \
|
||||
echo "export JAVA_HOME CLASSPATH HOTSPOT_BUILD_USER"; \
|
||||
) > $@
|
||||
|
||||
env.csh: env.sh
|
||||
@ -384,7 +383,7 @@ JAVA_FLAG/32 = -d32
|
||||
JAVA_FLAG/64 = -d64
|
||||
|
||||
WRONG_DATA_MODE_MSG = \
|
||||
echo "JAVA_HOME must point to $(DATA_MODE)bit JDK."
|
||||
echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
|
||||
|
||||
CROSS_COMPILING_MSG = \
|
||||
echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
|
||||
@ -392,19 +391,78 @@ CROSS_COMPILING_MSG = \
|
||||
test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
echo '#!/bin/sh'; \
|
||||
echo "#!/bin/sh"; \
|
||||
echo ""; \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
echo '. ./env.sh'; \
|
||||
echo "if [ \"$(CROSS_COMPILE_ARCH)\" != \"\" ]; then { $(CROSS_COMPILING_MSG); exit 0; }; fi"; \
|
||||
echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \
|
||||
echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \
|
||||
echo "then"; \
|
||||
echo " $(WRONG_DATA_MODE_MSG); exit 0;"; \
|
||||
echo ""; \
|
||||
echo "# Include environment settings for gamma run"; \
|
||||
echo ""; \
|
||||
echo ". ./env.sh"; \
|
||||
echo ""; \
|
||||
echo "# Do not run gamma test for cross compiles"; \
|
||||
echo ""; \
|
||||
echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
|
||||
echo " $(CROSS_COMPILING_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
|
||||
echo ""; \
|
||||
echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
|
||||
echo " $(NO_JAVA_HOME_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Check JAVA_HOME version to be used for the test"; \
|
||||
echo ""; \
|
||||
echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
|
||||
echo "if [ \$$? -ne 0 ]; then "; \
|
||||
echo " $(WRONG_DATA_MODE_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Use gamma_g if it exists"; \
|
||||
echo ""; \
|
||||
echo "GAMMA_PROG=gamma"; \
|
||||
echo "if [ -f gamma_g ]; then "; \
|
||||
echo " GAMMA_PROG=gamma_g"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
|
||||
echo " # Ensure architecture for gamma and JAVA_HOME is the same."; \
|
||||
echo " # NOTE: gamma assumes the OpenJDK directory layout."; \
|
||||
echo ""; \
|
||||
echo " GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
|
||||
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
|
||||
echo " if [ ! -f \$${JVM_LIB} ]; then"; \
|
||||
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
|
||||
echo " fi"; \
|
||||
echo " if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
|
||||
echo " $(WRONG_DATA_MODE_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo " fi"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Compile Queens program for test"; \
|
||||
echo ""; \
|
||||
echo "rm -f Queens.class"; \
|
||||
echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
|
||||
echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \
|
||||
echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \
|
||||
echo ""; \
|
||||
echo "# Set library path solely for gamma launcher test run"; \
|
||||
echo ""; \
|
||||
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
|
||||
echo "export LD_LIBRARY_PATH"; \
|
||||
echo "unset LD_LIBRARY_PATH_32"; \
|
||||
echo "unset LD_LIBRARY_PATH_64"; \
|
||||
echo ""; \
|
||||
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
|
||||
echo " DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
|
||||
echo " export DYLD_LIBRARY_PATH"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
|
||||
echo ""; \
|
||||
echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
|
||||
) > $@
|
||||
$(QUIETLY) chmod +x $@
|
||||
|
||||
|
@ -98,6 +98,10 @@ CPPFLAGS = \
|
||||
${JRE_VERSION} \
|
||||
${VM_DISTRO}
|
||||
|
||||
ifndef JAVASE_EMBEDDED
|
||||
CFLAGS += -DINCLUDE_TRACE
|
||||
endif
|
||||
|
||||
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
|
||||
CFLAGS += $(CFLAGS_WARN/BYFILE)
|
||||
|
||||
@ -143,6 +147,12 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
|
||||
|
||||
ifndef JAVASE_EMBEDDED
|
||||
SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
|
||||
find $(HS_ALT_SRC)/share/vm/jfr -type d; \
|
||||
fi)
|
||||
endif
|
||||
|
||||
CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
|
||||
CORE_PATHS+=$(GENERATED)/jvmtifiles
|
||||
|
||||
|
@ -118,7 +118,7 @@ SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
|
||||
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
|
||||
|
||||
BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \
|
||||
env.ksh env.csh jdkpath.sh .dbxrc test_gamma
|
||||
env.sh env.csh jdkpath.sh .dbxrc test_gamma
|
||||
|
||||
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
|
||||
ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
|
||||
@ -313,22 +313,19 @@ sa.make: $(BUILDTREE_MAKE)
|
||||
echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
|
||||
) > $@
|
||||
|
||||
env.ksh: $(BUILDTREE_MAKE)
|
||||
env.sh: $(BUILDTREE_MAKE)
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
[ -n "$$JAVA_HOME" ] && { echo ": \$${JAVA_HOME:=$${JAVA_HOME}}"; }; \
|
||||
{ \
|
||||
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
|
||||
echo "unset LD_LIBRARY_PATH_32"; \
|
||||
echo "unset LD_LIBRARY_PATH_64"; \
|
||||
echo "CLASSPATH=$${CLASSPATH:+$$CLASSPATH:}.:\$${JAVA_HOME}/jre/lib/rt.jar:\$${JAVA_HOME}/jre/lib/i18n.jar"; \
|
||||
} | sed s:$${JAVA_HOME:--------}:\$${JAVA_HOME}:g; \
|
||||
echo "HOTSPOT_BUILD_USER=\"$${LOGNAME:-$$USER} in `basename $(GAMMADIR)`\""; \
|
||||
echo "export JAVA_HOME LD_LIBRARY_PATH CLASSPATH HOTSPOT_BUILD_USER"; \
|
||||
) > $@
|
||||
|
||||
env.csh: env.ksh
|
||||
env.csh: env.sh
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
@ -384,23 +381,86 @@ JAVA_FLAG/32 = -d32
|
||||
JAVA_FLAG/64 = -d64
|
||||
|
||||
WRONG_DATA_MODE_MSG = \
|
||||
echo "JAVA_HOME must point to $(DATA_MODE)bit JDK."
|
||||
echo "JAVA_HOME must point to a $(DATA_MODE)-bit OpenJDK."
|
||||
|
||||
CROSS_COMPILING_MSG = \
|
||||
echo "Cross compiling for ARCH $(CROSS_COMPILE_ARCH), skipping gamma run."
|
||||
|
||||
test_gamma: $(BUILDTREE_MAKE) $(GAMMADIR)/make/test/Queens.java
|
||||
@echo Creating $@ ...
|
||||
$(QUIETLY) ( \
|
||||
echo '#!/bin/ksh'; \
|
||||
echo "#!/bin/sh"; \
|
||||
echo ""; \
|
||||
$(BUILDTREE_COMMENT); \
|
||||
echo '. ./env.ksh'; \
|
||||
echo "if [ -z \$$JAVA_HOME ]; then { $(NO_JAVA_HOME_MSG); exit 0; }; fi"; \
|
||||
echo "if ! \$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion 2>&1 > /dev/null"; \
|
||||
echo "then"; \
|
||||
echo " $(WRONG_DATA_MODE_MSG); exit 0;"; \
|
||||
echo ""; \
|
||||
echo "# Include environment settings for gamma run"; \
|
||||
echo ""; \
|
||||
echo ". ./env.sh"; \
|
||||
echo ""; \
|
||||
echo "# Do not run gamma test for cross compiles"; \
|
||||
echo ""; \
|
||||
echo "if [ -n \"$(CROSS_COMPILE_ARCH)\" ]; then "; \
|
||||
echo " $(CROSS_COMPILING_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Make sure JAVA_HOME is set as it is required for gamma"; \
|
||||
echo ""; \
|
||||
echo "if [ -z \"\$${JAVA_HOME}\" ]; then "; \
|
||||
echo " $(NO_JAVA_HOME_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Check JAVA_HOME version to be used for the test"; \
|
||||
echo ""; \
|
||||
echo "\$${JAVA_HOME}/bin/java $(JAVA_FLAG) -fullversion > /dev/null 2>&1"; \
|
||||
echo "if [ \$$? -ne 0 ]; then "; \
|
||||
echo " $(WRONG_DATA_MODE_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Use gamma_g if it exists"; \
|
||||
echo ""; \
|
||||
echo "GAMMA_PROG=gamma"; \
|
||||
echo "if [ -f gamma_g ]; then "; \
|
||||
echo " GAMMA_PROG=gamma_g"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
|
||||
echo " # Ensure architecture for gamma and JAVA_HOME is the same."; \
|
||||
echo " # NOTE: gamma assumes the OpenJDK directory layout."; \
|
||||
echo ""; \
|
||||
echo " GAMMA_ARCH=\"\`file \$${GAMMA_PROG} | awk '{print \$$NF}'\`\""; \
|
||||
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/libjava.$(LIBRARY_SUFFIX)\""; \
|
||||
echo " if [ ! -f \$${JVM_LIB} ]; then"; \
|
||||
echo " JVM_LIB=\"\$${JAVA_HOME}/jre/lib/$${LIBARCH}/libjava.$(LIBRARY_SUFFIX)\""; \
|
||||
echo " fi"; \
|
||||
echo " if [ ! -f \$${JVM_LIB} ] || [ -z \"\`file \$${JVM_LIB} | grep \$${GAMMA_ARCH}\`\" ]; then "; \
|
||||
echo " $(WRONG_DATA_MODE_MSG)"; \
|
||||
echo " exit 0"; \
|
||||
echo " fi"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Compile Queens program for test"; \
|
||||
echo ""; \
|
||||
echo "rm -f Queens.class"; \
|
||||
echo "\$${JAVA_HOME}/bin/javac -d . $(GAMMADIR)/make/test/Queens.java"; \
|
||||
echo '[ -f gamma_g ] && { gamma=gamma_g; }'; \
|
||||
echo './$${gamma:-gamma} $(TESTFLAGS) Queens < /dev/null'; \
|
||||
echo ""; \
|
||||
echo "# Set library path solely for gamma launcher test run"; \
|
||||
echo ""; \
|
||||
echo "LD_LIBRARY_PATH=.:$${LD_LIBRARY_PATH:+$$LD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
|
||||
echo "export LD_LIBRARY_PATH"; \
|
||||
echo "unset LD_LIBRARY_PATH_32"; \
|
||||
echo "unset LD_LIBRARY_PATH_64"; \
|
||||
echo ""; \
|
||||
echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
|
||||
echo " DYLD_LIBRARY_PATH=.:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/native_threads:\$${JAVA_HOME}/jre/lib:$${DYLD_LIBRARY_PATH:+$$DYLD_LIBRARY_PATH:}\$${JAVA_HOME}/jre/lib/${LIBARCH}/native_threads:\$${JAVA_HOME}/jre/lib/${LIBARCH}:${GCC_LIB}"; \
|
||||
echo " export DYLD_LIBRARY_PATH"; \
|
||||
echo "fi"; \
|
||||
echo ""; \
|
||||
echo "# Use the gamma launcher and JAVA_HOME to run the test"; \
|
||||
echo ""; \
|
||||
echo "./\$${GAMMA_PROG} $(TESTFLAGS) Queens < /dev/null"; \
|
||||
) > $@
|
||||
$(QUIETLY) chmod +x $@
|
||||
|
||||
|
@ -93,7 +93,7 @@ CFLAGS += $(CFLAGS_WARN)
|
||||
CFLAGS += $(CFLAGS/NOEX)
|
||||
|
||||
# Extra flags from gnumake's invocation or environment
|
||||
CFLAGS += $(EXTRA_CFLAGS)
|
||||
CFLAGS += $(EXTRA_CFLAGS) -DINCLUDE_TRACE
|
||||
|
||||
# Math Library (libm.so), do not use -lm.
|
||||
# There might be two versions of libm.so on the build system:
|
||||
@ -160,6 +160,10 @@ SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
|
||||
SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
|
||||
|
||||
SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
|
||||
find $(HS_ALT_SRC)/share/vm/jfr -type d; \
|
||||
fi)
|
||||
|
||||
CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
|
||||
CORE_PATHS+=$(GENERATED)/jvmtifiles
|
||||
|
||||
|
@ -35,6 +35,8 @@ cl 2>&1 | grep "IA-64" >NUL
|
||||
if %errorlevel% == 0 goto isia64
|
||||
cl 2>&1 | grep "AMD64" >NUL
|
||||
if %errorlevel% == 0 goto amd64
|
||||
cl 2>&1 | grep "x64" >NUL
|
||||
if %errorlevel% == 0 goto amd64
|
||||
set ARCH=x86
|
||||
set BUILDARCH=i486
|
||||
set Platform_arch=x86
|
||||
|
@ -73,6 +73,13 @@ done
|
||||
|
||||
BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles"
|
||||
|
||||
if [ -d "${ALTSRC}/share/vm/jfr" ]; then
|
||||
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent"
|
||||
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent/isolated_deps/util"
|
||||
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/jvm"
|
||||
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
|
||||
fi
|
||||
|
||||
CORE_PATHS="${BASE_PATHS}"
|
||||
# shared is already in BASE_PATHS. Should add vm/memory but that one is also in BASE_PATHS.
|
||||
if [ -d "${ALTSRC}/share/vm/gc_implementation" ]; then
|
||||
|
@ -58,7 +58,8 @@ ProjectCreatorIncludesPRIVATE=\
|
||||
-absoluteInclude $(HOTSPOTBUILDSPACE)/%f/generated \
|
||||
-ignorePath $(HOTSPOTBUILDSPACE)/%f/generated \
|
||||
-ignorePath src\share\vm\adlc \
|
||||
-ignorePath src\share\vm\shark
|
||||
-ignorePath src\share\vm\shark \
|
||||
-ignorePath posix
|
||||
|
||||
# This is referenced externally by both the IDE and batch builds
|
||||
ProjectCreatorOptions=
|
||||
@ -88,7 +89,7 @@ ProjectCreatorIDEOptions=\
|
||||
-jdkTargetRoot $(HOTSPOTJDKDIST) \
|
||||
-define ALIGN_STACK_FRAMES \
|
||||
-define VM_LITTLE_ENDIAN \
|
||||
-prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LINK_VER)" \
|
||||
-prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) set JAVA_HOME=$(HOTSPOTJDKDIST) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LINK_VER)" \
|
||||
-postbuild "" "Building hotspot.exe..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) nmake -f $(HOTSPOTWORKSPACE)\make\windows\projectfiles\common\Makefile LOCAL_MAKE=$(HOTSPOTBUILDSPACE)\%f\local.make JAVA_HOME=$(HOTSPOTJDKDIST) launcher" \
|
||||
-ignoreFile jsig.c \
|
||||
-ignoreFile jvmtiEnvRecommended.cpp \
|
||||
|
@ -19,7 +19,7 @@
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
# Resource file containing VERSIONINFO
|
||||
@ -30,7 +30,7 @@ Res_Files=.\version.res
|
||||
COMMONSRC=$(WorkSpace)\src
|
||||
ALTSRC=$(WorkSpace)\src\closed
|
||||
|
||||
!ifdef RELEASE
|
||||
!ifdef RELEASE
|
||||
!ifdef DEVELOP
|
||||
CPP_FLAGS=$(CPP_FLAGS) /D "DEBUG"
|
||||
!else
|
||||
@ -74,6 +74,10 @@ CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_TARGET=\"$(BUILD_FLAVOR)\""
|
||||
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
|
||||
CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""
|
||||
|
||||
!ifndef JAVASE_EMBEDDED
|
||||
CPP_FLAGS=$(CPP_FLAGS) /D "INCLUDE_TRACE"
|
||||
!endif
|
||||
|
||||
CPP_FLAGS=$(CPP_FLAGS) $(CPP_INCLUDE_DIRS)
|
||||
|
||||
# Define that so jni.h is on correct side
|
||||
@ -97,7 +101,7 @@ AGCT_EXPORT=/export:AsyncGetCallTrace
|
||||
!endif
|
||||
|
||||
# If you modify exports below please do the corresponding changes in
|
||||
# src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
|
||||
# src/share/tools/ProjectCreator/WinGammaPlatformVC7.java
|
||||
LINK_FLAGS=$(LINK_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \
|
||||
/export:JNI_GetDefaultJavaVMInitArgs \
|
||||
/export:JNI_CreateJavaVM \
|
||||
@ -170,6 +174,7 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/oops
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/prims
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/runtime
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/services
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/trace
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/utilities
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/libadt
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/os/windows/vm
|
||||
@ -177,6 +182,13 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/cpu/$(Platform_arch)/vm
|
||||
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto
|
||||
|
||||
!if exists($(ALTSRC)\share\vm\jfr)
|
||||
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent
|
||||
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent/isolated_deps/util
|
||||
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/jvm
|
||||
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr
|
||||
!endif
|
||||
|
||||
VM_PATH={$(VM_PATH)}
|
||||
|
||||
# Special case files not using precompiled header files.
|
||||
@ -263,6 +275,9 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
|
||||
{$(COMMONSRC)\share\vm\services}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(COMMONSRC)\share\vm\trace}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(COMMONSRC)\share\vm\utilities}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
@ -340,6 +355,9 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
|
||||
{$(ALTSRC)\share\vm\services}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\trace}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\utilities}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
@ -371,6 +389,18 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
|
||||
{..\generated\jvmtifiles}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\jfr}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\jfr\agent}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\jfr\agent\isolated_deps\util}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
{$(ALTSRC)\share\vm\jfr\jvm}.cpp.obj::
|
||||
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
|
||||
|
||||
default::
|
||||
|
||||
_build_pch_file.obj:
|
||||
|
@ -391,7 +391,7 @@ int LIR_Assembler::emit_exception_handler() {
|
||||
__ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
|
||||
__ delayed()->nop();
|
||||
__ should_not_reach_here();
|
||||
assert(code_offset() - offset <= exception_handler_size, "overflow");
|
||||
guarantee(code_offset() - offset <= exception_handler_size, "overflow");
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
@ -474,8 +474,7 @@ int LIR_Assembler::emit_deopt_handler() {
|
||||
AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
|
||||
__ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
|
||||
__ delayed()->nop();
|
||||
assert(code_offset() - offset <= deopt_handler_size, "overflow");
|
||||
debug_only(__ stop("should have gone to the caller");)
|
||||
guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
|
@ -69,7 +69,7 @@ enum {
|
||||
#else
|
||||
call_stub_size = 20,
|
||||
#endif // _LP64
|
||||
exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(10*4),
|
||||
deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(10*4) };
|
||||
exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
|
||||
deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64) };
|
||||
|
||||
#endif // CPU_SPARC_VM_C1_LIRASSEMBLER_SPARC_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -810,7 +810,7 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||
}
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
|
||||
#define DESCRIBE_FP_OFFSET(name) \
|
||||
values.describe(frame_no, fp() + frame::name##_offset, #name)
|
||||
@ -820,11 +820,19 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
|
||||
}
|
||||
|
||||
if (is_interpreted_frame()) {
|
||||
if (is_ricochet_frame()) {
|
||||
MethodHandles::RicochetFrame::describe(this, values, frame_no);
|
||||
} else if (is_interpreted_frame()) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_padding);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp);
|
||||
|
||||
// esp, according to Lesp (e.g. not depending on bci), if seems valid
|
||||
intptr_t* esp = *interpreter_frame_esp_addr();
|
||||
if ((esp >= sp()) && (esp < fp())) {
|
||||
values.describe(-1, esp, "*Lesp");
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_compiled_frame()) {
|
||||
@ -844,4 +852,3 @@ intptr_t *frame::initial_deoptimization_info() {
|
||||
// unused... but returns fp() to minimize changes introduced by 7087445
|
||||
return fp();
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -82,6 +82,8 @@ inline address* frame::O0_addr() const { return (address*) &younger_sp()[ I0->s
|
||||
|
||||
inline intptr_t* frame::sender_sp() const { return fp(); }
|
||||
|
||||
inline intptr_t* frame::real_fp() const { return fp(); }
|
||||
|
||||
// Used only in frame::oopmapreg_to_location
|
||||
// This return a value in VMRegImpl::slot_size
|
||||
inline int frame::pd_oop_map_offset_adjustment() const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -177,7 +177,7 @@ void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
|
||||
BLOCK_COMMENT("ricochet_blob.bounce");
|
||||
|
||||
if (VerifyMethodHandles) RicochetFrame::verify_clean(_masm);
|
||||
trace_method_handle(_masm, "ricochet_blob.bounce");
|
||||
trace_method_handle(_masm, "return/ricochet_blob.bounce");
|
||||
|
||||
__ JMP(L1_continuation, 0);
|
||||
__ delayed()->nop();
|
||||
@ -268,14 +268,16 @@ void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
|
||||
}
|
||||
|
||||
// Emit code to verify that FP is pointing at a valid ricochet frame.
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
enum {
|
||||
ARG_LIMIT = 255, SLOP = 45,
|
||||
// use this parameter for checking for garbage stack movements:
|
||||
UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
|
||||
// the slop defends against false alarms due to fencepost errors
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
|
||||
// The stack should look like this:
|
||||
// ... keep1 | dest=42 | keep2 | magic | handler | magic | recursive args | [RF]
|
||||
@ -1000,32 +1002,143 @@ void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
|
||||
BLOCK_COMMENT("} move_return_value");
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) {
|
||||
RicochetFrame* rf = new RicochetFrame(*fr);
|
||||
|
||||
// ricochet slots (kept in registers for sparc)
|
||||
values.describe(frame_no, rf->register_addr(I5_savedSP), err_msg("exact_sender_sp reg for #%d", frame_no));
|
||||
values.describe(frame_no, rf->register_addr(L5_conversion), err_msg("conversion reg for #%d", frame_no));
|
||||
values.describe(frame_no, rf->register_addr(L4_saved_args_base), err_msg("saved_args_base reg for #%d", frame_no));
|
||||
values.describe(frame_no, rf->register_addr(L3_saved_args_layout), err_msg("saved_args_layout reg for #%d", frame_no));
|
||||
values.describe(frame_no, rf->register_addr(L2_saved_target), err_msg("saved_target reg for #%d", frame_no));
|
||||
values.describe(frame_no, rf->register_addr(L1_continuation), err_msg("continuation reg for #%d", frame_no));
|
||||
|
||||
// relevant ricochet targets (in caller frame)
|
||||
values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no));
|
||||
values.describe(-1, (intptr_t *)(STACK_BIAS+(uintptr_t)rf->exact_sender_sp()), err_msg("*exact_sender_sp+STACK_BIAS for #%d", frame_no));
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
#ifndef PRODUCT
|
||||
extern "C" void print_method_handle(oop mh);
|
||||
void trace_method_handle_stub(const char* adaptername,
|
||||
oopDesc* mh,
|
||||
intptr_t* saved_sp) {
|
||||
intptr_t* saved_sp,
|
||||
intptr_t* args,
|
||||
intptr_t* tracing_fp) {
|
||||
bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have mh
|
||||
tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp);
|
||||
if (has_mh)
|
||||
|
||||
tty->print_cr("MH %s mh="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, (intptr_t) mh, saved_sp, args);
|
||||
|
||||
if (Verbose) {
|
||||
// dumping last frame with frame::describe
|
||||
|
||||
JavaThread* p = JavaThread::active();
|
||||
|
||||
ResourceMark rm;
|
||||
PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
|
||||
FrameValues values;
|
||||
|
||||
// Note: We want to allow trace_method_handle from any call site.
|
||||
// While trace_method_handle creates a frame, it may be entered
|
||||
// without a valid return PC in O7 (e.g. not just after a call).
|
||||
// Walking that frame could lead to failures due to that invalid PC.
|
||||
// => carefully detect that frame when doing the stack walking
|
||||
|
||||
// walk up to the right frame using the "tracing_fp" argument
|
||||
intptr_t* cur_sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
|
||||
frame cur_frame(cur_sp, frame::unpatchable, NULL);
|
||||
|
||||
while (cur_frame.fp() != (intptr_t *)(STACK_BIAS+(uintptr_t)tracing_fp)) {
|
||||
cur_frame = os::get_sender_for_C_frame(&cur_frame);
|
||||
}
|
||||
|
||||
// safely create a frame and call frame::describe
|
||||
intptr_t *dump_sp = cur_frame.sender_sp();
|
||||
intptr_t *dump_fp = cur_frame.link();
|
||||
|
||||
bool walkable = has_mh; // whether the traced frame shoud be walkable
|
||||
|
||||
// the sender for cur_frame is the caller of trace_method_handle
|
||||
if (walkable) {
|
||||
// The previous definition of walkable may have to be refined
|
||||
// if new call sites cause the next frame constructor to start
|
||||
// failing. Alternatively, frame constructors could be
|
||||
// modified to support the current or future non walkable
|
||||
// frames (but this is more intrusive and is not considered as
|
||||
// part of this RFE, which will instead use a simpler output).
|
||||
frame dump_frame = frame(dump_sp,
|
||||
cur_frame.sp(), // younger_sp
|
||||
false); // no adaptation
|
||||
dump_frame.describe(values, 1);
|
||||
} else {
|
||||
// Robust dump for frames which cannot be constructed from sp/younger_sp
|
||||
// Add descriptions without building a Java frame to avoid issues
|
||||
values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
|
||||
values.describe(-1, dump_sp, "sp");
|
||||
}
|
||||
|
||||
bool has_args = has_mh; // whether Gargs is meaningful
|
||||
|
||||
// mark args, if seems valid (may not be valid for some adapters)
|
||||
if (has_args) {
|
||||
if ((args >= dump_sp) && (args < dump_fp)) {
|
||||
values.describe(-1, args, "*G4_args");
|
||||
}
|
||||
}
|
||||
|
||||
// mark saved_sp, if seems valid (may not be valid for some adapters)
|
||||
intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp);
|
||||
if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) {
|
||||
values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS");
|
||||
}
|
||||
|
||||
// Note: the unextended_sp may not be correct
|
||||
tty->print_cr(" stack layout:");
|
||||
values.print(p);
|
||||
}
|
||||
|
||||
if (has_mh) {
|
||||
print_method_handle(mh);
|
||||
}
|
||||
}
|
||||
|
||||
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
|
||||
if (!TraceMethodHandles) return;
|
||||
BLOCK_COMMENT("trace_method_handle {");
|
||||
// save: Gargs, O5_savedSP
|
||||
__ save_frame(16);
|
||||
__ save_frame(16); // need space for saving required FPU state
|
||||
|
||||
__ set((intptr_t) adaptername, O0);
|
||||
__ mov(G3_method_handle, O1);
|
||||
__ mov(I5_savedSP, O2);
|
||||
__ mov(Gargs, O3);
|
||||
__ mov(I6, O4); // frame identifier for safe stack walking
|
||||
|
||||
// Save scratched registers that might be needed. Robustness is more
|
||||
// important than optimizing the saves for this debug only code.
|
||||
|
||||
// save FP result, valid at some call sites (adapter_opt_return_float, ...)
|
||||
Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
|
||||
__ stf(FloatRegisterImpl::D, Ftos_d, d_save);
|
||||
// Safely save all globals but G2 (handled by call_VM_leaf) and G7
|
||||
// (OS reserved).
|
||||
__ mov(G3_method_handle, L3);
|
||||
__ mov(Gargs, L4);
|
||||
__ mov(G5_method_type, L5);
|
||||
__ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
|
||||
__ mov(G6, L6);
|
||||
__ mov(G1, L1);
|
||||
|
||||
__ call_VM_leaf(L2 /* for G2 */, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
|
||||
|
||||
__ mov(L3, G3_method_handle);
|
||||
__ mov(L4, Gargs);
|
||||
__ mov(L5, G5_method_type);
|
||||
__ mov(L6, G6);
|
||||
__ mov(L1, G1);
|
||||
__ ldf(FloatRegisterImpl::D, d_save, Ftos_d);
|
||||
|
||||
__ restore();
|
||||
BLOCK_COMMENT("} trace_method_handle");
|
||||
}
|
||||
@ -1045,7 +1158,7 @@ int MethodHandles::adapter_conversion_ops_supported_mask() {
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
|
||||
// OP_COLLECT_ARGS is below...
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
|
||||
|(!UseRicochetFrames ? 0 :
|
||||
|(
|
||||
java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
|
||||
((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
|
||||
@ -1250,7 +1363,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
move_typed_arg(_masm, arg_type, false,
|
||||
prim_value_addr,
|
||||
Address(O0_argslot, 0),
|
||||
O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3)
|
||||
O2_scratch); // must be an even register for !_LP64 long moves (uses O2/O3)
|
||||
}
|
||||
|
||||
if (direct_to_method) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -145,6 +145,8 @@ class RicochetFrame : public ResourceObj {
|
||||
}
|
||||
|
||||
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
|
||||
|
||||
static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// Additional helper methods for MethodHandles code generation:
|
||||
|
@ -406,7 +406,7 @@ int LIR_Assembler::emit_exception_handler() {
|
||||
// search an exception handler (rax: exception oop, rdx: throwing pc)
|
||||
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
|
||||
__ should_not_reach_here();
|
||||
assert(code_offset() - offset <= exception_handler_size, "overflow");
|
||||
guarantee(code_offset() - offset <= exception_handler_size, "overflow");
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
@ -490,8 +490,7 @@ int LIR_Assembler::emit_deopt_handler() {
|
||||
|
||||
__ pushptr(here.addr());
|
||||
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
|
||||
|
||||
assert(code_offset() - offset <= deopt_handler_size, "overflow");
|
||||
guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
|
||||
__ end_a_stub();
|
||||
|
||||
return offset;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -651,13 +651,15 @@ intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||
return &interpreter_frame_tos_address()[index];
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
|
||||
#define DESCRIBE_FP_OFFSET(name) \
|
||||
values.describe(frame_no, fp() + frame::name##_offset, #name)
|
||||
|
||||
void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
if (is_interpreted_frame()) {
|
||||
if (is_ricochet_frame()) {
|
||||
MethodHandles::RicochetFrame::describe(this, values, frame_no);
|
||||
} else if (is_interpreted_frame()) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_method);
|
||||
@ -667,7 +669,6 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -675,3 +676,21 @@ intptr_t *frame::initial_deoptimization_info() {
|
||||
// used to reset the saved FP
|
||||
return fp();
|
||||
}
|
||||
|
||||
intptr_t* frame::real_fp() const {
|
||||
if (_cb != NULL) {
|
||||
// use the frame size if valid
|
||||
int size = _cb->frame_size();
|
||||
if ((size > 0) &&
|
||||
(! is_ricochet_frame())) {
|
||||
// Work-around: ricochet explicitly excluded because frame size is not
|
||||
// constant for the ricochet blob but its frame_size could not, for
|
||||
// some reasons, be declared as <= 0. This potentially confusing
|
||||
// size declaration should be fixed as another CR.
|
||||
return unextended_sp() + size;
|
||||
}
|
||||
}
|
||||
// else rely on fp()
|
||||
assert(! is_compiled_frame(), "unknown compiled frame size");
|
||||
return fp();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -188,6 +188,7 @@
|
||||
frame(intptr_t* sp, intptr_t* fp);
|
||||
|
||||
// accessors for the instance variables
|
||||
// Note: not necessarily the real 'frame pointer' (see real_fp)
|
||||
intptr_t* fp() const { return _fp; }
|
||||
|
||||
inline address* sender_pc_addr() const;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -279,14 +279,16 @@ void MethodHandles::RicochetFrame::leave_ricochet_frame(MacroAssembler* _masm,
|
||||
}
|
||||
|
||||
// Emit code to verify that RBP is pointing at a valid ricochet frame.
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
enum {
|
||||
ARG_LIMIT = 255, SLOP = 4,
|
||||
// use this parameter for checking for garbage stack movements:
|
||||
UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP)
|
||||
// the slop defends against false alarms due to fencepost errors
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
|
||||
// The stack should look like this:
|
||||
// ... keep1 | dest=42 | keep2 | RF | magic | handler | magic | recursive args |
|
||||
@ -990,6 +992,26 @@ void MethodHandles::move_return_value(MacroAssembler* _masm, BasicType type,
|
||||
BLOCK_COMMENT("} move_return_value");
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
#define DESCRIBE_RICOCHET_OFFSET(rf, name) \
|
||||
values.describe(frame_no, (intptr_t *) (((uintptr_t)rf) + MethodHandles::RicochetFrame::name##_offset_in_bytes()), #name)
|
||||
|
||||
void MethodHandles::RicochetFrame::describe(const frame* fr, FrameValues& values, int frame_no) {
|
||||
address bp = (address) fr->fp();
|
||||
RicochetFrame* rf = (RicochetFrame*)(bp - sender_link_offset_in_bytes());
|
||||
|
||||
// ricochet slots
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, exact_sender_sp);
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, conversion);
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, saved_args_base);
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, saved_args_layout);
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, saved_target);
|
||||
DESCRIBE_RICOCHET_OFFSET(rf, continuation);
|
||||
|
||||
// relevant ricochet targets (in caller frame)
|
||||
values.describe(-1, rf->saved_args_base(), err_msg("*saved_args_base for #%d", frame_no));
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
#ifndef PRODUCT
|
||||
extern "C" void print_method_handle(oop mh);
|
||||
@ -1001,11 +1023,12 @@ void trace_method_handle_stub(const char* adaptername,
|
||||
intptr_t* saved_bp) {
|
||||
// called as a leaf from native code: do not block the JVM!
|
||||
bool has_mh = (strstr(adaptername, "return/") == NULL); // return adapters don't have rcx_mh
|
||||
|
||||
intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
|
||||
intptr_t* base_sp = last_sp;
|
||||
typedef MethodHandles::RicochetFrame RicochetFrame;
|
||||
RicochetFrame* rfp = (RicochetFrame*)((address)saved_bp - RicochetFrame::sender_link_offset_in_bytes());
|
||||
if (!UseRicochetFrames || Universe::heap()->is_in((address) rfp->saved_args_base())) {
|
||||
if (Universe::heap()->is_in((address) rfp->saved_args_base())) {
|
||||
// Probably an interpreter frame.
|
||||
base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
|
||||
}
|
||||
@ -1030,13 +1053,64 @@ void trace_method_handle_stub(const char* adaptername,
|
||||
tty->cr();
|
||||
if (last_sp != saved_sp && last_sp != NULL)
|
||||
tty->print_cr("*** last_sp="PTR_FORMAT, (intptr_t)last_sp);
|
||||
int stack_dump_count = 16;
|
||||
if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
|
||||
stack_dump_count = (int)(saved_bp + 2 - saved_sp);
|
||||
if (stack_dump_count > 64) stack_dump_count = 48;
|
||||
for (i = 0; i < stack_dump_count; i += 4) {
|
||||
tty->print_cr(" dump at SP[%d] "PTR_FORMAT": "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT" "PTR_FORMAT,
|
||||
i, (intptr_t) &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
|
||||
|
||||
{
|
||||
// dumping last frame with frame::describe
|
||||
|
||||
JavaThread* p = JavaThread::active();
|
||||
|
||||
ResourceMark rm;
|
||||
PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
|
||||
FrameValues values;
|
||||
|
||||
// Note: We want to allow trace_method_handle from any call site.
|
||||
// While trace_method_handle creates a frame, it may be entered
|
||||
// without a PC on the stack top (e.g. not just after a call).
|
||||
// Walking that frame could lead to failures due to that invalid PC.
|
||||
// => carefully detect that frame when doing the stack walking
|
||||
|
||||
// Current C frame
|
||||
frame cur_frame = os::current_frame();
|
||||
|
||||
// Robust search of trace_calling_frame (independant of inlining).
|
||||
// Assumes saved_regs comes from a pusha in the trace_calling_frame.
|
||||
assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
|
||||
frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
|
||||
while (trace_calling_frame.fp() < saved_regs) {
|
||||
trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
|
||||
}
|
||||
|
||||
// safely create a frame and call frame::describe
|
||||
intptr_t *dump_sp = trace_calling_frame.sender_sp();
|
||||
intptr_t *dump_fp = trace_calling_frame.link();
|
||||
|
||||
bool walkable = has_mh; // whether the traced frame shoud be walkable
|
||||
|
||||
if (walkable) {
|
||||
// The previous definition of walkable may have to be refined
|
||||
// if new call sites cause the next frame constructor to start
|
||||
// failing. Alternatively, frame constructors could be
|
||||
// modified to support the current or future non walkable
|
||||
// frames (but this is more intrusive and is not considered as
|
||||
// part of this RFE, which will instead use a simpler output).
|
||||
frame dump_frame = frame(dump_sp, dump_fp);
|
||||
dump_frame.describe(values, 1);
|
||||
} else {
|
||||
// Stack may not be walkable (invalid PC above FP):
|
||||
// Add descriptions without building a Java frame to avoid issues
|
||||
values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
|
||||
values.describe(-1, dump_sp, "sp for #1");
|
||||
}
|
||||
|
||||
// mark saved_sp if seems valid
|
||||
if (has_mh) {
|
||||
if ((saved_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (saved_sp < dump_fp)) {
|
||||
values.describe(-1, saved_sp, "*saved_sp");
|
||||
}
|
||||
}
|
||||
|
||||
tty->print_cr(" stack layout:");
|
||||
values.print(p);
|
||||
}
|
||||
if (has_mh)
|
||||
print_method_handle(mh);
|
||||
@ -1066,26 +1140,49 @@ void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
|
||||
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
|
||||
if (!TraceMethodHandles) return;
|
||||
BLOCK_COMMENT("trace_method_handle {");
|
||||
__ push(rax);
|
||||
__ lea(rax, Address(rsp, wordSize * NOT_LP64(6) LP64_ONLY(14))); // entry_sp __ pusha();
|
||||
__ pusha();
|
||||
__ mov(rbx, rsp);
|
||||
__ enter();
|
||||
__ andptr(rsp, -16); // align stack if needed for FPU state
|
||||
__ pusha();
|
||||
__ mov(rbx, rsp); // for retreiving saved_regs
|
||||
// Note: saved_regs must be in the entered frame for the
|
||||
// robust stack walking implemented in trace_method_handle_stub.
|
||||
|
||||
// save FP result, valid at some call sites (adapter_opt_return_float, ...)
|
||||
__ increment(rsp, -2 * wordSize);
|
||||
if (UseSSE >= 2) {
|
||||
__ movdbl(Address(rsp, 0), xmm0);
|
||||
} else if (UseSSE == 1) {
|
||||
__ movflt(Address(rsp, 0), xmm0);
|
||||
} else {
|
||||
__ fst_d(Address(rsp, 0));
|
||||
}
|
||||
|
||||
// incoming state:
|
||||
// rcx: method handle
|
||||
// r13 or rsi: saved sp
|
||||
// To avoid calling convention issues, build a record on the stack and pass the pointer to that instead.
|
||||
// Note: fix the increment below if pushing more arguments
|
||||
__ push(rbp); // saved_bp
|
||||
__ push(rsi); // saved_sp
|
||||
__ push(rax); // entry_sp
|
||||
__ push(saved_last_sp_register()); // saved_sp
|
||||
__ push(rbp); // entry_sp (with extra align space)
|
||||
__ push(rbx); // pusha saved_regs
|
||||
__ push(rcx); // mh
|
||||
__ push(rcx); // adaptername
|
||||
__ push(rcx); // slot for adaptername
|
||||
__ movptr(Address(rsp, 0), (intptr_t) adaptername);
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
|
||||
__ leave();
|
||||
__ increment(rsp, 6 * wordSize); // MethodHandleStubArguments
|
||||
|
||||
if (UseSSE >= 2) {
|
||||
__ movdbl(xmm0, Address(rsp, 0));
|
||||
} else if (UseSSE == 1) {
|
||||
__ movflt(xmm0, Address(rsp, 0));
|
||||
} else {
|
||||
__ fld_d(Address(rsp, 0));
|
||||
}
|
||||
__ increment(rsp, 2 * wordSize);
|
||||
|
||||
__ popa();
|
||||
__ pop(rax);
|
||||
__ leave();
|
||||
BLOCK_COMMENT("} trace_method_handle");
|
||||
}
|
||||
#endif //PRODUCT
|
||||
@ -1104,7 +1201,7 @@ int MethodHandles::adapter_conversion_ops_supported_mask() {
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
|
||||
//OP_COLLECT_ARGS is below...
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
|
||||
|(!UseRicochetFrames ? 0 :
|
||||
|(
|
||||
java_lang_invoke_MethodTypeForm::vmlayout_offset_in_bytes() <= 0 ? 0 :
|
||||
((1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_REF)
|
||||
|(1<<java_lang_invoke_AdapterMethodHandle::OP_COLLECT_ARGS)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -224,6 +224,8 @@ class RicochetFrame {
|
||||
}
|
||||
|
||||
static void verify_clean(MacroAssembler* _masm) NOT_DEBUG_RETURN;
|
||||
|
||||
static void describe(const frame* fr, FrameValues& values, int frame_no) PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// Additional helper methods for MethodHandles code generation:
|
||||
|
@ -418,7 +418,7 @@ void ZeroFrame::identify_vp_word(int frame_index,
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
|
||||
void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -72,6 +72,10 @@ inline intptr_t* frame::sender_sp() const {
|
||||
return fp() + 1;
|
||||
}
|
||||
|
||||
inline intptr_t* frame::real_fp() const {
|
||||
return fp();
|
||||
}
|
||||
|
||||
inline intptr_t* frame::link() const {
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
|
@ -29,43 +29,3 @@ enum /* platform_dependent_constants */ {
|
||||
adapter_code_size = 0
|
||||
};
|
||||
|
||||
#define TARGET_ARCH_NYI_6939861 1
|
||||
// ..#ifdef TARGET_ARCH_NYI_6939861
|
||||
// .. // Here are some backward compatible declarations until the 6939861 ports are updated.
|
||||
// .. #define _adapter_flyby (_EK_LIMIT + 10)
|
||||
// .. #define _adapter_ricochet (_EK_LIMIT + 11)
|
||||
// .. #define _adapter_opt_spread_1 _adapter_opt_spread_1_ref
|
||||
// .. #define _adapter_opt_spread_more _adapter_opt_spread_ref
|
||||
// .. enum {
|
||||
// .. _INSERT_NO_MASK = -1,
|
||||
// .. _INSERT_REF_MASK = 0,
|
||||
// .. _INSERT_INT_MASK = 1,
|
||||
// .. _INSERT_LONG_MASK = 3
|
||||
// .. };
|
||||
// .. static void get_ek_bound_mh_info(EntryKind ek, BasicType& arg_type, int& arg_mask, int& arg_slots) {
|
||||
// .. arg_type = ek_bound_mh_arg_type(ek);
|
||||
// .. arg_mask = 0;
|
||||
// .. arg_slots = type2size[arg_type];;
|
||||
// .. }
|
||||
// .. static void get_ek_adapter_opt_swap_rot_info(EntryKind ek, int& swap_bytes, int& rotate) {
|
||||
// .. int swap_slots = ek_adapter_opt_swap_slots(ek);
|
||||
// .. rotate = ek_adapter_opt_swap_mode(ek);
|
||||
// .. swap_bytes = swap_slots * Interpreter::stackElementSize;
|
||||
// .. }
|
||||
// .. static int get_ek_adapter_opt_spread_info(EntryKind ek) {
|
||||
// .. return ek_adapter_opt_spread_count(ek);
|
||||
// .. }
|
||||
// ..
|
||||
// .. static void insert_arg_slots(MacroAssembler* _masm,
|
||||
// .. RegisterOrConstant arg_slots,
|
||||
// .. int arg_mask,
|
||||
// .. Register argslot_reg,
|
||||
// .. Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
|
||||
// ..
|
||||
// .. static void remove_arg_slots(MacroAssembler* _masm,
|
||||
// .. RegisterOrConstant arg_slots,
|
||||
// .. Register argslot_reg,
|
||||
// .. Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
|
||||
// ..
|
||||
// .. static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
|
||||
// ..#endif //TARGET_ARCH_NYI_6939861
|
||||
|
31
hotspot/src/os/bsd/vm/decoder_machO.cpp
Normal file
31
hotspot/src/os/bsd/vm/decoder_machO.cpp
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include "decoder_machO.hpp"
|
||||
#endif
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,45 +22,21 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "prims/jvm.h"
|
||||
#include "utilities/decoder.hpp"
|
||||
|
||||
#include <cxxabi.h>
|
||||
#ifndef OS_BSD_VM_DECODER_MACHO_HPP
|
||||
#define OS_BSD_VM_DECODER_MACHO_HPP
|
||||
|
||||
#ifdef __APPLE__
|
||||
|
||||
void Decoder::initialize() {
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
void Decoder::uninitialize() {
|
||||
_initialized = false;
|
||||
}
|
||||
|
||||
bool Decoder::can_decode_C_frame_in_vm() {
|
||||
return false;
|
||||
}
|
||||
|
||||
Decoder::decoder_status Decoder::decode(address addr, const char* filepath, char *buf, int buflen, int *offset) {
|
||||
return symbol_not_found;
|
||||
}
|
||||
#include "utilities/decoder.hpp"
|
||||
|
||||
// Just a placehold for now
|
||||
class MachODecoder: public NullDecoder {
|
||||
public:
|
||||
MachODecoder() { }
|
||||
~MachODecoder() { }
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
int status;
|
||||
char* result;
|
||||
size_t size = (size_t)buflen;
|
||||
#endif // OS_BSD_VM_DECODER_MACHO_HPP
|
||||
|
||||
// Don't pass buf to __cxa_demangle. In case of the 'buf' is too small,
|
||||
// __cxa_demangle will call system "realloc" for additional memory, which
|
||||
// may use different malloc/realloc mechanism that allocates 'buf'.
|
||||
if ((result = abi::__cxa_demangle(symbol, NULL, NULL, &status)) != NULL) {
|
||||
jio_snprintf(buf, buflen, "%s", result);
|
||||
// call c library's free
|
||||
::free(result);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -301,6 +301,12 @@ static char cpu_arch[] = "sparc";
|
||||
#error Add appropriate cpu_arch setting
|
||||
#endif
|
||||
|
||||
// Compiler variant
|
||||
#ifdef COMPILER2
|
||||
#define COMPILER_VARIANT "server"
|
||||
#else
|
||||
#define COMPILER_VARIANT "client"
|
||||
#endif
|
||||
|
||||
#ifndef _ALLBSD_SOURCE
|
||||
// pid_t gettid()
|
||||
@ -1920,7 +1926,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
return true;
|
||||
} else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -2507,7 +2513,7 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm.so or libjvm_g.so
|
||||
// Find the full path to the current module, libjvm or libjvm_g
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
@ -2532,11 +2538,11 @@ void os::jvm_path(char *buf, jint buflen) {
|
||||
|
||||
if (Arguments::created_by_gamma_launcher()) {
|
||||
// Support for the gamma launcher. Typical value for buf is
|
||||
// "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at
|
||||
// "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm". If "/jre/lib/" appears at
|
||||
// the right place in the string, then assume we are installed in a JDK and
|
||||
// we're done. Otherwise, check for a JAVA_HOME environment variable and fix
|
||||
// up the path so it looks like libjvm.so is installed there (append a
|
||||
// fake suffix hotspot/libjvm.so).
|
||||
// we're done. Otherwise, check for a JAVA_HOME environment variable and
|
||||
// construct a path to the JVM being overridden.
|
||||
|
||||
const char *p = buf + strlen(buf) - 1;
|
||||
for (int count = 0; p > buf && count < 5; ++count) {
|
||||
for (--p; p > buf && *p != '/'; --p)
|
||||
@ -2550,7 +2556,7 @@ void os::jvm_path(char *buf, jint buflen) {
|
||||
char* jrelib_p;
|
||||
int len;
|
||||
|
||||
// Check the current module name "libjvm.so" or "libjvm_g.so".
|
||||
// Check the current module name "libjvm" or "libjvm_g".
|
||||
p = strrchr(buf, '/');
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
p = strstr(p, "_g") ? "_g" : "";
|
||||
@ -2563,19 +2569,32 @@ void os::jvm_path(char *buf, jint buflen) {
|
||||
// modules image doesn't have "jre" subdirectory
|
||||
len = strlen(buf);
|
||||
jrelib_p = buf + len;
|
||||
snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
|
||||
|
||||
// Add the appropriate library subdir
|
||||
snprintf(jrelib_p, buflen-len, "/jre/lib");
|
||||
if (0 != access(buf, F_OK)) {
|
||||
snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
|
||||
snprintf(jrelib_p, buflen-len, "/lib");
|
||||
}
|
||||
|
||||
// Add the appropriate client or server subdir
|
||||
len = strlen(buf);
|
||||
jrelib_p = buf + len;
|
||||
snprintf(jrelib_p, buflen-len, "/%s", COMPILER_VARIANT);
|
||||
if (0 != access(buf, F_OK)) {
|
||||
snprintf(jrelib_p, buflen-len, "");
|
||||
}
|
||||
|
||||
// If the path exists within JAVA_HOME, add the JVM library name
|
||||
// to complete the path to JVM being overridden. Otherwise fallback
|
||||
// to the path to the current library.
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm[_g].so" instead of
|
||||
// "libjvm"debug_only("_g")".so" since for fastdebug version
|
||||
// we should have "libjvm.so" but debug_only("_g") adds "_g"!
|
||||
// Use current module name "libjvm[_g]" instead of
|
||||
// "libjvm"debug_only("_g")"" since for fastdebug version
|
||||
// we should have "libjvm" but debug_only("_g") adds "_g"!
|
||||
len = strlen(buf);
|
||||
snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
|
||||
snprintf(buf + len, buflen-len, "/libjvm%s%s", p, JNI_LIB_SUFFIX);
|
||||
} else {
|
||||
// Go back to path of .so
|
||||
// Fall back to path of current library
|
||||
rp = realpath(dli_fname, buf);
|
||||
if (rp == NULL)
|
||||
return;
|
||||
@ -3570,26 +3589,28 @@ void os::loop_breaker(int attempts) {
|
||||
// It is only used when ThreadPriorityPolicy=1 and requires root privilege.
|
||||
|
||||
#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__)
|
||||
int os::java_to_os_priority[MaxPriority + 1] = {
|
||||
int os::java_to_os_priority[CriticalPriority + 1] = {
|
||||
19, // 0 Entry should never be used
|
||||
|
||||
0, // 1 MinPriority
|
||||
3, // 2
|
||||
6, // 3
|
||||
|
||||
10, // 4
|
||||
15, // 5 NormPriority
|
||||
18, // 6
|
||||
10, // 4
|
||||
15, // 5 NormPriority
|
||||
18, // 6
|
||||
|
||||
21, // 7
|
||||
25, // 8
|
||||
28, // 9 NearMaxPriority
|
||||
21, // 7
|
||||
25, // 8
|
||||
28, // 9 NearMaxPriority
|
||||
|
||||
31 // 10 MaxPriority
|
||||
31, // 10 MaxPriority
|
||||
|
||||
31 // 11 CriticalPriority
|
||||
};
|
||||
#elif defined(__APPLE__)
|
||||
/* Using Mach high-level priority assignments */
|
||||
int os::java_to_os_priority[MaxPriority + 1] = {
|
||||
int os::java_to_os_priority[CriticalPriority + 1] = {
|
||||
0, // 0 Entry should never be used (MINPRI_USER)
|
||||
|
||||
27, // 1 MinPriority
|
||||
@ -3604,10 +3625,12 @@ int os::java_to_os_priority[MaxPriority + 1] = {
|
||||
34, // 8
|
||||
35, // 9 NearMaxPriority
|
||||
|
||||
36 // 10 MaxPriority
|
||||
36, // 10 MaxPriority
|
||||
|
||||
36 // 11 CriticalPriority
|
||||
};
|
||||
#else
|
||||
int os::java_to_os_priority[MaxPriority + 1] = {
|
||||
int os::java_to_os_priority[CriticalPriority + 1] = {
|
||||
19, // 0 Entry should never be used
|
||||
|
||||
4, // 1 MinPriority
|
||||
@ -3622,7 +3645,9 @@ int os::java_to_os_priority[MaxPriority + 1] = {
|
||||
-3, // 8
|
||||
-4, // 9 NearMaxPriority
|
||||
|
||||
-5 // 10 MaxPriority
|
||||
-5, // 10 MaxPriority
|
||||
|
||||
-5 // 11 CriticalPriority
|
||||
};
|
||||
#endif
|
||||
|
||||
@ -3638,6 +3663,9 @@ static int prio_init() {
|
||||
ThreadPriorityPolicy = 0;
|
||||
}
|
||||
}
|
||||
if (UseCriticalJavaThreadPriority) {
|
||||
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -23,11 +23,11 @@
|
||||
*/
|
||||
|
||||
#include "prims/jvm.h"
|
||||
#include "utilities/decoder.hpp"
|
||||
#include "utilities/decoder_elf.hpp"
|
||||
|
||||
#include <cxxabi.h>
|
||||
|
||||
bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
bool ElfDecoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
int status;
|
||||
char* result;
|
||||
size_t size = (size_t)buflen;
|
||||
@ -43,3 +43,4 @@ bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1732,7 +1732,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
return true;
|
||||
} else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -3383,7 +3383,7 @@ void os::loop_breaker(int attempts) {
|
||||
// this reason, the code should not be used as default (ThreadPriorityPolicy=0).
|
||||
// It is only used when ThreadPriorityPolicy=1 and requires root privilege.
|
||||
|
||||
int os::java_to_os_priority[MaxPriority + 1] = {
|
||||
int os::java_to_os_priority[CriticalPriority + 1] = {
|
||||
19, // 0 Entry should never be used
|
||||
|
||||
4, // 1 MinPriority
|
||||
@ -3398,7 +3398,9 @@ int os::java_to_os_priority[MaxPriority + 1] = {
|
||||
-3, // 8
|
||||
-4, // 9 NearMaxPriority
|
||||
|
||||
-5 // 10 MaxPriority
|
||||
-5, // 10 MaxPriority
|
||||
|
||||
-5 // 11 CriticalPriority
|
||||
};
|
||||
|
||||
static int prio_init() {
|
||||
@ -3413,6 +3415,9 @@ static int prio_init() {
|
||||
ThreadPriorityPolicy = 0;
|
||||
}
|
||||
}
|
||||
if (UseCriticalJavaThreadPriority) {
|
||||
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -701,6 +701,14 @@ GetJREPath(char *path, jint pathsize, char * arch, jboolean speculative)
|
||||
char libjava[MAXPATHLEN];
|
||||
|
||||
if (GetApplicationHome(path, pathsize)) {
|
||||
|
||||
/* Is the JRE universal, i.e. no arch dir? */
|
||||
sprintf(libjava, "%s/jre/lib/" JAVA_DLL, path);
|
||||
if (access(libjava, F_OK) == 0) {
|
||||
strcat(path, "/jre");
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Is JRE co-located with the application? */
|
||||
sprintf(libjava, "%s/lib/%s/" JAVA_DLL, path, arch);
|
||||
if (access(libjava, F_OK) == 0) {
|
||||
@ -734,7 +742,7 @@ LoadJavaVM(const char *jvmpath, InvocationFunctions *ifn)
|
||||
ifn->GetDefaultJavaVMInitArgs = JNI_GetDefaultJavaVMInitArgs;
|
||||
return JNI_TRUE;
|
||||
#else
|
||||
Dl_info dlinfo;
|
||||
Dl_info dlinfo;
|
||||
void *libjvm;
|
||||
|
||||
if (_launcher_debug) {
|
||||
|
@ -22,10 +22,11 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "utilities/decoder.hpp"
|
||||
#include "utilities/decoder_elf.hpp"
|
||||
|
||||
#include <demangle.h>
|
||||
|
||||
bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
bool ElfDecoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
return !cplus_demangle(symbol, buf, (size_t)buflen);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,17 +28,17 @@
|
||||
// This is embedded via include into the class OSThread
|
||||
|
||||
private:
|
||||
|
||||
thread_t _thread_id; // Solaris thread id
|
||||
unsigned int _lwp_id; // lwp ID, only used with bound threads
|
||||
sigset_t _caller_sigmask; // Caller's signal mask
|
||||
bool _vm_created_thread; // true if the VM create this thread
|
||||
// false if primary thread or attached thread
|
||||
thread_t _thread_id; // Solaris thread id
|
||||
uint _lwp_id; // lwp ID, only used with bound threads
|
||||
int _native_priority; // Saved native priority when starting
|
||||
// a bound thread
|
||||
sigset_t _caller_sigmask; // Caller's signal mask
|
||||
bool _vm_created_thread; // true if the VM created this thread,
|
||||
// false if primary thread or attached thread
|
||||
public:
|
||||
|
||||
thread_t thread_id() const { return _thread_id; }
|
||||
|
||||
unsigned int lwp_id() const { return _lwp_id; }
|
||||
thread_t thread_id() const { return _thread_id; }
|
||||
uint lwp_id() const { return _lwp_id; }
|
||||
int native_priority() const { return _native_priority; }
|
||||
|
||||
// Set and get state of _vm_created_thread flag
|
||||
void set_vm_created() { _vm_created_thread = true; }
|
||||
@ -62,8 +62,9 @@
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
void set_thread_id(thread_t id) { _thread_id = id; }
|
||||
void set_lwp_id(unsigned int id){ _lwp_id = id; }
|
||||
void set_thread_id(thread_t id) { _thread_id = id; }
|
||||
void set_lwp_id(uint id) { _lwp_id = id; }
|
||||
void set_native_priority(int prio) { _native_priority = prio; }
|
||||
|
||||
// ***************************************************************
|
||||
// interrupt support. interrupts (using signals) are used to get
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -114,6 +114,7 @@
|
||||
# include <sys/rtpriocntl.h>
|
||||
# include <sys/tspriocntl.h>
|
||||
# include <sys/iapriocntl.h>
|
||||
# include <sys/fxpriocntl.h>
|
||||
# include <sys/loadavg.h>
|
||||
# include <string.h>
|
||||
# include <stdio.h>
|
||||
@ -129,8 +130,8 @@
|
||||
#ifdef _GNU_SOURCE
|
||||
// See bug #6514594
|
||||
extern "C" int madvise(caddr_t, size_t, int);
|
||||
extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
|
||||
int attr, int mask);
|
||||
extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
|
||||
int attr, int mask);
|
||||
#endif //_GNU_SOURCE
|
||||
|
||||
/*
|
||||
@ -215,8 +216,9 @@ struct memcntl_mha {
|
||||
#define MaximumPriority 127
|
||||
|
||||
// Values for ThreadPriorityPolicy == 1
|
||||
int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64,
|
||||
80, 96, 112, 124, 127 };
|
||||
int prio_policy1[CriticalPriority+1] = {
|
||||
-99999, 0, 16, 32, 48, 64,
|
||||
80, 96, 112, 124, 127, 127 };
|
||||
|
||||
// System parameters used internally
|
||||
static clock_t clock_tics_per_sec = 100;
|
||||
@ -1048,15 +1050,22 @@ extern "C" void* java_start(void* thread_addr) {
|
||||
}
|
||||
|
||||
// If the creator called set priority before we started,
|
||||
// we need to call set priority now that we have an lwp.
|
||||
// Get the priority from libthread and set the priority
|
||||
// for the new Solaris lwp.
|
||||
// we need to call set_native_priority now that we have an lwp.
|
||||
// We used to get the priority from thr_getprio (we called
|
||||
// thr_setprio way back in create_thread) and pass it to
|
||||
// set_native_priority, but Solaris scales the priority
|
||||
// in java_to_os_priority, so when we read it back here,
|
||||
// we pass trash to set_native_priority instead of what's
|
||||
// in java_to_os_priority. So we save the native priority
|
||||
// in the osThread and recall it here.
|
||||
|
||||
if ( osthr->thread_id() != -1 ) {
|
||||
if ( UseThreadPriorities ) {
|
||||
thr_getprio(osthr->thread_id(), &prio);
|
||||
int prio = osthr->native_priority();
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n",
|
||||
osthr->thread_id(), osthr->lwp_id(), prio );
|
||||
tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
|
||||
INTPTR_FORMAT ", setting priority: %d\n",
|
||||
osthr->thread_id(), osthr->lwp_id(), prio);
|
||||
}
|
||||
os::set_native_priority(thread, prio);
|
||||
}
|
||||
@ -1353,13 +1362,12 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
|
||||
// Remember that we created this thread so we can set priority on it
|
||||
osthread->set_vm_created();
|
||||
|
||||
// Set the default thread priority otherwise use NormalPriority
|
||||
|
||||
if ( UseThreadPriorities ) {
|
||||
thr_setprio(tid, (DefaultThreadPriority == -1) ?
|
||||
// Set the default thread priority. If using bound threads, setting
|
||||
// lwp priority will be delayed until thread start.
|
||||
set_native_priority(thread,
|
||||
DefaultThreadPriority == -1 ?
|
||||
java_to_os_priority[NormPriority] :
|
||||
DefaultThreadPriority);
|
||||
}
|
||||
|
||||
// Initial thread state is INITIALIZED, not SUSPENDED
|
||||
osthread->set_state(INITIALIZED);
|
||||
@ -1997,7 +2005,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
}
|
||||
if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -2015,7 +2023,7 @@ bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
return true;
|
||||
} else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
|
||||
if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
|
||||
dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
|
||||
buf, buflen, offset, dlinfo.dli_fname)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -3728,7 +3736,7 @@ typedef struct {
|
||||
} SchedInfo;
|
||||
|
||||
|
||||
static SchedInfo tsLimits, iaLimits, rtLimits;
|
||||
static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
|
||||
|
||||
#ifdef ASSERT
|
||||
static int ReadBackValidate = 1;
|
||||
@ -3739,6 +3747,8 @@ static int myMax = 0;
|
||||
static int myCur = 0;
|
||||
static bool priocntl_enable = false;
|
||||
|
||||
static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
|
||||
static int java_MaxPriority_to_os_priority = 0; // Saved mapping
|
||||
|
||||
// Call the version of priocntl suitable for all supported versions
|
||||
// of Solaris. We need to call through this wrapper so that we can
|
||||
@ -3783,19 +3793,27 @@ int lwp_priocntl_init ()
|
||||
if (os::Solaris::T2_libthread() || UseBoundThreads) {
|
||||
// If ThreadPriorityPolicy is 1, switch tables
|
||||
if (ThreadPriorityPolicy == 1) {
|
||||
for (i = 0 ; i < MaxPriority+1; i++)
|
||||
for (i = 0 ; i < CriticalPriority+1; i++)
|
||||
os::java_to_os_priority[i] = prio_policy1[i];
|
||||
}
|
||||
if (UseCriticalJavaThreadPriority) {
|
||||
// MaxPriority always maps to the FX scheduling class and criticalPrio.
|
||||
// See set_native_priority() and set_lwp_class_and_priority().
|
||||
// Save original MaxPriority mapping in case attempt to
|
||||
// use critical priority fails.
|
||||
java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
|
||||
// Set negative to distinguish from other priorities
|
||||
os::java_to_os_priority[MaxPriority] = -criticalPrio;
|
||||
}
|
||||
}
|
||||
// Not using Bound Threads, set to ThreadPolicy 1
|
||||
else {
|
||||
for ( i = 0 ; i < MaxPriority+1; i++ ) {
|
||||
for ( i = 0 ; i < CriticalPriority+1; i++ ) {
|
||||
os::java_to_os_priority[i] = prio_policy1[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
// Get IDs for a set of well-known scheduling classes.
|
||||
// TODO-FIXME: GETCLINFO returns the current # of classes in the
|
||||
// the system. We should have a loop that iterates over the
|
||||
@ -3828,24 +3846,33 @@ int lwp_priocntl_init ()
|
||||
rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
|
||||
rtLimits.minPrio = 0;
|
||||
|
||||
strcpy(ClassInfo.pc_clname, "FX");
|
||||
ClassInfo.pc_cid = -1;
|
||||
rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
|
||||
if (rslt < 0) return errno;
|
||||
assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
|
||||
fxLimits.schedPolicy = ClassInfo.pc_cid;
|
||||
fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
|
||||
fxLimits.minPrio = 0;
|
||||
|
||||
// Query our "current" scheduling class.
|
||||
// This will normally be IA,TS or, rarely, RT.
|
||||
memset (&ParmInfo, 0, sizeof(ParmInfo));
|
||||
// This will normally be IA, TS or, rarely, FX or RT.
|
||||
memset(&ParmInfo, 0, sizeof(ParmInfo));
|
||||
ParmInfo.pc_cid = PC_CLNULL;
|
||||
rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo );
|
||||
if ( rslt < 0 ) return errno;
|
||||
rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
|
||||
if (rslt < 0) return errno;
|
||||
myClass = ParmInfo.pc_cid;
|
||||
|
||||
// We now know our scheduling classId, get specific information
|
||||
// the class.
|
||||
// about the class.
|
||||
ClassInfo.pc_cid = myClass;
|
||||
ClassInfo.pc_clname[0] = 0;
|
||||
rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo );
|
||||
if ( rslt < 0 ) return errno;
|
||||
rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
|
||||
if (rslt < 0) return errno;
|
||||
|
||||
if (ThreadPriorityVerbose)
|
||||
tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
|
||||
}
|
||||
|
||||
memset(&ParmInfo, 0, sizeof(pcparms_t));
|
||||
ParmInfo.pc_cid = PC_CLNULL;
|
||||
@ -3865,6 +3892,11 @@ int lwp_priocntl_init ()
|
||||
myMin = tsLimits.minPrio;
|
||||
myMax = tsLimits.maxPrio;
|
||||
myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict
|
||||
} else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
|
||||
fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
|
||||
myMin = fxLimits.minPrio;
|
||||
myMax = fxLimits.maxPrio;
|
||||
myMax = MIN2(myMax, (int)fxInfo->fx_uprilim); // clamp - restrict
|
||||
} else {
|
||||
// No clue - punt
|
||||
if (ThreadPriorityVerbose)
|
||||
@ -3872,8 +3904,9 @@ int lwp_priocntl_init ()
|
||||
return EINVAL; // no clue, punt
|
||||
}
|
||||
|
||||
if (ThreadPriorityVerbose)
|
||||
tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
|
||||
}
|
||||
|
||||
priocntl_enable = true; // Enable changing priorities
|
||||
return 0;
|
||||
@ -3882,6 +3915,7 @@ int lwp_priocntl_init ()
|
||||
#define IAPRI(x) ((iaparms_t *)((x).pc_clparms))
|
||||
#define RTPRI(x) ((rtparms_t *)((x).pc_clparms))
|
||||
#define TSPRI(x) ((tsparms_t *)((x).pc_clparms))
|
||||
#define FXPRI(x) ((fxparms_t *)((x).pc_clparms))
|
||||
|
||||
|
||||
// scale_to_lwp_priority
|
||||
@ -3900,13 +3934,13 @@ int scale_to_lwp_priority (int rMin, int rMax, int x)
|
||||
}
|
||||
|
||||
|
||||
// set_lwp_priority
|
||||
// set_lwp_class_and_priority
|
||||
//
|
||||
// Set the priority of the lwp. This call should only be made
|
||||
// when using bound threads (T2 threads are bound by default).
|
||||
// Set the class and priority of the lwp. This call should only
|
||||
// be made when using bound threads (T2 threads are bound by default).
|
||||
//
|
||||
int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
|
||||
{
|
||||
int set_lwp_class_and_priority(int ThreadID, int lwpid,
|
||||
int newPrio, int new_class, bool scale) {
|
||||
int rslt;
|
||||
int Actual, Expected, prv;
|
||||
pcparms_t ParmInfo; // for GET-SET
|
||||
@ -3927,19 +3961,20 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
|
||||
// If lwp hasn't started yet, just return
|
||||
// the _start routine will call us again.
|
||||
if ( lwpid <= 0 ) {
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set",
|
||||
tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
|
||||
INTPTR_FORMAT " to %d, lwpid not set",
|
||||
ThreadID, newPrio);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
|
||||
tty->print_cr ("set_lwp_class_and_priority("
|
||||
INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
|
||||
ThreadID, lwpid, newPrio);
|
||||
}
|
||||
|
||||
@ -3948,40 +3983,70 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
|
||||
rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
|
||||
if (rslt < 0) return errno;
|
||||
|
||||
if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
|
||||
int cur_class = ParmInfo.pc_cid;
|
||||
ParmInfo.pc_cid = (id_t)new_class;
|
||||
|
||||
if (new_class == rtLimits.schedPolicy) {
|
||||
rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms;
|
||||
rtInfo->rt_pri = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio);
|
||||
rtInfo->rt_pri = scale ? scale_to_lwp_priority(rtLimits.minPrio,
|
||||
rtLimits.maxPrio, newPrio)
|
||||
: newPrio;
|
||||
rtInfo->rt_tqsecs = RT_NOCHANGE;
|
||||
rtInfo->rt_tqnsecs = RT_NOCHANGE;
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
|
||||
}
|
||||
} else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
|
||||
iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
|
||||
int maxClamped = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim);
|
||||
iaInfo->ia_upri = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio);
|
||||
iaInfo->ia_uprilim = IA_NOCHANGE;
|
||||
} else if (new_class == iaLimits.schedPolicy) {
|
||||
iaparms_t* iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
|
||||
int maxClamped = MIN2(iaLimits.maxPrio,
|
||||
cur_class == new_class
|
||||
? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
|
||||
iaInfo->ia_upri = scale ? scale_to_lwp_priority(iaLimits.minPrio,
|
||||
maxClamped, newPrio)
|
||||
: newPrio;
|
||||
iaInfo->ia_uprilim = cur_class == new_class
|
||||
? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
|
||||
iaInfo->ia_mode = IA_NOCHANGE;
|
||||
iaInfo->ia_nice = cur_class == new_class ? IA_NOCHANGE : NZERO;
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr ("IA: [%d...%d] %d->%d\n",
|
||||
iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
|
||||
tty->print_cr("IA: [%d...%d] %d->%d\n",
|
||||
iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
|
||||
}
|
||||
} else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
|
||||
tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
|
||||
int maxClamped = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim);
|
||||
prv = tsInfo->ts_upri;
|
||||
tsInfo->ts_upri = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio);
|
||||
tsInfo->ts_uprilim = IA_NOCHANGE;
|
||||
} else if (new_class == tsLimits.schedPolicy) {
|
||||
tsparms_t* tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
|
||||
int maxClamped = MIN2(tsLimits.maxPrio,
|
||||
cur_class == new_class
|
||||
? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
|
||||
tsInfo->ts_upri = scale ? scale_to_lwp_priority(tsLimits.minPrio,
|
||||
maxClamped, newPrio)
|
||||
: newPrio;
|
||||
tsInfo->ts_uprilim = cur_class == new_class
|
||||
? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr ("TS: %d [%d...%d] %d->%d\n",
|
||||
prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
|
||||
tty->print_cr("TS: [%d...%d] %d->%d\n",
|
||||
tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
|
||||
}
|
||||
} else if (new_class == fxLimits.schedPolicy) {
|
||||
fxparms_t* fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
|
||||
int maxClamped = MIN2(fxLimits.maxPrio,
|
||||
cur_class == new_class
|
||||
? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
|
||||
fxInfo->fx_upri = scale ? scale_to_lwp_priority(fxLimits.minPrio,
|
||||
maxClamped, newPrio)
|
||||
: newPrio;
|
||||
fxInfo->fx_uprilim = cur_class == new_class
|
||||
? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
|
||||
fxInfo->fx_tqsecs = FX_NOCHANGE;
|
||||
fxInfo->fx_tqnsecs = FX_NOCHANGE;
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr("FX: [%d...%d] %d->%d\n",
|
||||
fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
|
||||
}
|
||||
if (prv == tsInfo->ts_upri) return 0;
|
||||
} else {
|
||||
if ( ThreadPriorityVerbose ) {
|
||||
tty->print_cr ("Unknown scheduling class\n");
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr("Unknown new scheduling class %d\n", new_class);
|
||||
}
|
||||
return EINVAL; // no clue, punt
|
||||
return EINVAL; // no clue, punt
|
||||
}
|
||||
|
||||
rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
|
||||
@ -4016,16 +4081,20 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
|
||||
} else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
|
||||
Actual = TSPRI(ReadBack)->ts_upri;
|
||||
Expected = TSPRI(ParmInfo)->ts_upri;
|
||||
} else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
|
||||
Actual = FXPRI(ReadBack)->fx_upri;
|
||||
Expected = FXPRI(ParmInfo)->fx_upri;
|
||||
} else {
|
||||
if ( ThreadPriorityVerbose ) {
|
||||
tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid);
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
|
||||
ParmInfo.pc_cid);
|
||||
}
|
||||
}
|
||||
|
||||
if (Actual != Expected) {
|
||||
if ( ThreadPriorityVerbose ) {
|
||||
tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
|
||||
lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
|
||||
if (ThreadPriorityVerbose) {
|
||||
tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
|
||||
lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -4033,8 +4102,6 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Solaris only gives access to 128 real priorities at a time,
|
||||
// so we expand Java's ten to fill this range. This would be better
|
||||
// if we dynamically adjusted relative priorities.
|
||||
@ -4055,8 +4122,7 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
|
||||
// which do not explicitly alter their thread priorities.
|
||||
//
|
||||
|
||||
|
||||
int os::java_to_os_priority[MaxPriority + 1] = {
|
||||
int os::java_to_os_priority[CriticalPriority + 1] = {
|
||||
-99999, // 0 Entry should never be used
|
||||
|
||||
0, // 1 MinPriority
|
||||
@ -4071,17 +4137,51 @@ int os::java_to_os_priority[MaxPriority + 1] = {
|
||||
127, // 8
|
||||
127, // 9 NearMaxPriority
|
||||
|
||||
127 // 10 MaxPriority
|
||||
127, // 10 MaxPriority
|
||||
|
||||
-criticalPrio // 11 CriticalPriority
|
||||
};
|
||||
|
||||
|
||||
OSReturn os::set_native_priority(Thread* thread, int newpri) {
|
||||
OSThread* osthread = thread->osthread();
|
||||
|
||||
// Save requested priority in case the thread hasn't been started
|
||||
osthread->set_native_priority(newpri);
|
||||
|
||||
// Check for critical priority request
|
||||
bool fxcritical = false;
|
||||
if (newpri == -criticalPrio) {
|
||||
fxcritical = true;
|
||||
newpri = criticalPrio;
|
||||
}
|
||||
|
||||
assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
|
||||
if ( !UseThreadPriorities ) return OS_OK;
|
||||
int status = thr_setprio(thread->osthread()->thread_id(), newpri);
|
||||
if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) )
|
||||
status |= (set_lwp_priority (thread->osthread()->thread_id(),
|
||||
thread->osthread()->lwp_id(), newpri ));
|
||||
if (!UseThreadPriorities) return OS_OK;
|
||||
|
||||
int status = 0;
|
||||
|
||||
if (!fxcritical) {
|
||||
// Use thr_setprio only if we have a priority that thr_setprio understands
|
||||
status = thr_setprio(thread->osthread()->thread_id(), newpri);
|
||||
}
|
||||
|
||||
if (os::Solaris::T2_libthread() ||
|
||||
(UseBoundThreads && osthread->is_vm_created())) {
|
||||
int lwp_status =
|
||||
set_lwp_class_and_priority(osthread->thread_id(),
|
||||
osthread->lwp_id(),
|
||||
newpri,
|
||||
fxcritical ? fxLimits.schedPolicy : myClass,
|
||||
!fxcritical);
|
||||
if (lwp_status != 0 && fxcritical) {
|
||||
// Try again, this time without changing the scheduling class
|
||||
newpri = java_MaxPriority_to_os_priority;
|
||||
lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
|
||||
osthread->lwp_id(),
|
||||
newpri, myClass, false);
|
||||
}
|
||||
status |= lwp_status;
|
||||
}
|
||||
return (status == 0) ? OS_OK : OS_ERR;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,22 +24,24 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "prims/jvm.h"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/decoder.hpp"
|
||||
#include "decoder_windows.hpp"
|
||||
|
||||
HMODULE Decoder::_dbghelp_handle = NULL;
|
||||
bool Decoder::_can_decode_in_vm = false;
|
||||
pfn_SymGetSymFromAddr64 Decoder::_pfnSymGetSymFromAddr64 = NULL;
|
||||
pfn_UndecorateSymbolName Decoder::_pfnUndecorateSymbolName = NULL;
|
||||
WindowsDecoder::WindowsDecoder() {
|
||||
_dbghelp_handle = NULL;
|
||||
_can_decode_in_vm = false;
|
||||
_pfnSymGetSymFromAddr64 = NULL;
|
||||
_pfnUndecorateSymbolName = NULL;
|
||||
|
||||
void Decoder::initialize() {
|
||||
if (!_initialized) {
|
||||
_initialized = true;
|
||||
_decoder_status = no_error;
|
||||
initialize();
|
||||
}
|
||||
|
||||
HINSTANCE handle = os::win32::load_Windows_dll("dbghelp.dll", NULL, 0);
|
||||
void WindowsDecoder::initialize() {
|
||||
if (!has_error() && _dbghelp_handle == NULL) {
|
||||
HMODULE handle = ::LoadLibrary("dbghelp.dll");
|
||||
if (!handle) {
|
||||
_decoder_status = helper_not_found;
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
_dbghelp_handle = handle;
|
||||
@ -70,32 +72,29 @@ void Decoder::initialize() {
|
||||
|
||||
// find out if jvm.dll contains private symbols, by decoding
|
||||
// current function and comparing the result
|
||||
address addr = (address)Decoder::initialize;
|
||||
address addr = (address)Decoder::decode;
|
||||
char buf[MAX_PATH];
|
||||
if (decode(addr, buf, sizeof(buf), NULL) == no_error) {
|
||||
_can_decode_in_vm = !strcmp(buf, "Decoder::initialize");
|
||||
if (decode(addr, buf, sizeof(buf), NULL)) {
|
||||
_can_decode_in_vm = !strcmp(buf, "Decoder::decode");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Decoder::uninitialize() {
|
||||
assert(_initialized, "Decoder not yet initialized");
|
||||
void WindowsDecoder::uninitialize() {
|
||||
_pfnSymGetSymFromAddr64 = NULL;
|
||||
_pfnUndecorateSymbolName = NULL;
|
||||
if (_dbghelp_handle != NULL) {
|
||||
::FreeLibrary(_dbghelp_handle);
|
||||
}
|
||||
_initialized = false;
|
||||
_dbghelp_handle = NULL;
|
||||
}
|
||||
|
||||
bool Decoder::can_decode_C_frame_in_vm() {
|
||||
initialize();
|
||||
return _can_decode_in_vm;
|
||||
bool WindowsDecoder::can_decode_C_frame_in_vm() const {
|
||||
return (!has_error() && _can_decode_in_vm);
|
||||
}
|
||||
|
||||
|
||||
Decoder::decoder_status Decoder::decode(address addr, char *buf, int buflen, int *offset) {
|
||||
assert(_initialized, "Decoder not yet initialized");
|
||||
bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath) {
|
||||
if (_pfnSymGetSymFromAddr64 != NULL) {
|
||||
PIMAGEHLP_SYMBOL64 pSymbol;
|
||||
char symbolInfo[MAX_PATH + sizeof(IMAGEHLP_SYMBOL64)];
|
||||
@ -105,19 +104,20 @@ Decoder::decoder_status Decoder::decode(address addr, char *buf, int buflen, int
|
||||
DWORD64 displacement;
|
||||
if (_pfnSymGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
|
||||
if (buf != NULL) {
|
||||
if (!demangle(pSymbol->Name, buf, buflen)) {
|
||||
if (demangle(pSymbol->Name, buf, buflen)) {
|
||||
jio_snprintf(buf, buflen, "%s", pSymbol->Name);
|
||||
}
|
||||
}
|
||||
if (offset != NULL) *offset = (int)displacement;
|
||||
return no_error;
|
||||
if(offset != NULL) *offset = (int)displacement;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return helper_not_found;
|
||||
if (buf != NULL && buflen > 0) buf[0] = '\0';
|
||||
if (offset != NULL) *offset = -1;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Decoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
assert(_initialized, "Decoder not yet initialized");
|
||||
bool WindowsDecoder::demangle(const char* symbol, char *buf, int buflen) {
|
||||
return _pfnUndecorateSymbolName != NULL &&
|
||||
_pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE);
|
||||
}
|
||||
|
61
hotspot/src/os/windows/vm/decoder_windows.hpp
Normal file
61
hotspot/src/os/windows/vm/decoder_windows.hpp
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_WINDOWS_VM_DECODER_WINDOWS_HPP
|
||||
#define OS_WINDOWS_VM_DECIDER_WINDOWS_HPP
|
||||
|
||||
#include <windows.h>
|
||||
#include <imagehlp.h>
|
||||
|
||||
#include "utilities/decoder.hpp"
|
||||
|
||||
// functions needed for decoding symbols
|
||||
typedef DWORD (WINAPI *pfn_SymSetOptions)(DWORD);
|
||||
typedef BOOL (WINAPI *pfn_SymInitialize)(HANDLE, PCTSTR, BOOL);
|
||||
typedef BOOL (WINAPI *pfn_SymGetSymFromAddr64)(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64);
|
||||
typedef DWORD (WINAPI *pfn_UndecorateSymbolName)(const char*, char*, DWORD, DWORD);
|
||||
|
||||
class WindowsDecoder: public NullDecoder {
|
||||
|
||||
public:
|
||||
WindowsDecoder();
|
||||
~WindowsDecoder() { uninitialize(); };
|
||||
|
||||
bool can_decode_C_frame_in_vm() const;
|
||||
bool demangle(const char* symbol, char *buf, int buflen);
|
||||
bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath = NULL);
|
||||
|
||||
private:
|
||||
void initialize();
|
||||
void uninitialize();
|
||||
|
||||
private:
|
||||
HMODULE _dbghelp_handle;
|
||||
bool _can_decode_in_vm;
|
||||
pfn_SymGetSymFromAddr64 _pfnSymGetSymFromAddr64;
|
||||
pfn_UndecorateSymbolName _pfnUndecorateSymbolName;
|
||||
};
|
||||
|
||||
#endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1391,7 +1391,7 @@ bool os::dll_address_to_library_name(address addr, char* buf,
|
||||
|
||||
bool os::dll_address_to_function_name(address addr, char *buf,
|
||||
int buflen, int *offset) {
|
||||
if (Decoder::decode(addr, buf, buflen, offset) == Decoder::no_error) {
|
||||
if (Decoder::decode(addr, buf, buflen, offset)) {
|
||||
return true;
|
||||
}
|
||||
if (offset != NULL) *offset = -1;
|
||||
@ -3296,7 +3296,7 @@ void os::yield_all(int attempts) {
|
||||
// so we compress Java's ten down to seven. It would be better
|
||||
// if we dynamically adjusted relative priorities.
|
||||
|
||||
int os::java_to_os_priority[MaxPriority + 1] = {
|
||||
int os::java_to_os_priority[CriticalPriority + 1] = {
|
||||
THREAD_PRIORITY_IDLE, // 0 Entry should never be used
|
||||
THREAD_PRIORITY_LOWEST, // 1 MinPriority
|
||||
THREAD_PRIORITY_LOWEST, // 2
|
||||
@ -3307,10 +3307,11 @@ int os::java_to_os_priority[MaxPriority + 1] = {
|
||||
THREAD_PRIORITY_ABOVE_NORMAL, // 7
|
||||
THREAD_PRIORITY_ABOVE_NORMAL, // 8
|
||||
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
|
||||
THREAD_PRIORITY_HIGHEST // 10 MaxPriority
|
||||
THREAD_PRIORITY_HIGHEST, // 10 MaxPriority
|
||||
THREAD_PRIORITY_HIGHEST // 11 CriticalPriority
|
||||
};
|
||||
|
||||
int prio_policy1[MaxPriority + 1] = {
|
||||
int prio_policy1[CriticalPriority + 1] = {
|
||||
THREAD_PRIORITY_IDLE, // 0 Entry should never be used
|
||||
THREAD_PRIORITY_LOWEST, // 1 MinPriority
|
||||
THREAD_PRIORITY_LOWEST, // 2
|
||||
@ -3321,17 +3322,21 @@ int prio_policy1[MaxPriority + 1] = {
|
||||
THREAD_PRIORITY_ABOVE_NORMAL, // 7
|
||||
THREAD_PRIORITY_HIGHEST, // 8
|
||||
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
|
||||
THREAD_PRIORITY_TIME_CRITICAL // 10 MaxPriority
|
||||
THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority
|
||||
THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority
|
||||
};
|
||||
|
||||
static int prio_init() {
|
||||
// If ThreadPriorityPolicy is 1, switch tables
|
||||
if (ThreadPriorityPolicy == 1) {
|
||||
int i;
|
||||
for (i = 0; i < MaxPriority + 1; i++) {
|
||||
for (i = 0; i < CriticalPriority + 1; i++) {
|
||||
os::java_to_os_priority[i] = prio_policy1[i];
|
||||
}
|
||||
}
|
||||
if (UseCriticalJavaThreadPriority) {
|
||||
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1354,9 +1354,10 @@ class LIR_OpBranch: public LIR_Op {
|
||||
CodeStub* _stub; // if this is a branch to a stub, this is the stub
|
||||
|
||||
public:
|
||||
LIR_OpBranch(LIR_Condition cond, Label* lbl)
|
||||
LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl)
|
||||
: LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
|
||||
, _cond(cond)
|
||||
, _type(type)
|
||||
, _label(lbl)
|
||||
, _block(NULL)
|
||||
, _ublock(NULL)
|
||||
@ -2053,7 +2054,7 @@ class LIR_List: public CompilationResourceObj {
|
||||
void jump(CodeStub* stub) {
|
||||
append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
|
||||
}
|
||||
void branch(LIR_Condition cond, Label* lbl) { append(new LIR_OpBranch(cond, lbl)); }
|
||||
void branch(LIR_Condition cond, BasicType type, Label* lbl) { append(new LIR_OpBranch(cond, type, lbl)); }
|
||||
void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
|
||||
assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
|
||||
append(new LIR_OpBranch(cond, type, block));
|
||||
|
@ -2350,7 +2350,7 @@ void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegi
|
||||
} else {
|
||||
LabelObj* L = new LabelObj();
|
||||
__ cmp(lir_cond_less, value, low_key);
|
||||
__ branch(lir_cond_less, L->label());
|
||||
__ branch(lir_cond_less, T_INT, L->label());
|
||||
__ cmp(lir_cond_lessEqual, value, high_key);
|
||||
__ branch(lir_cond_lessEqual, T_INT, dest);
|
||||
__ branch_destination(L->label());
|
||||
|
@ -413,8 +413,9 @@ static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, meth
|
||||
}
|
||||
bci = branch_bci + offset;
|
||||
}
|
||||
|
||||
assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
|
||||
osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
|
||||
assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
|
||||
return osr_nm;
|
||||
}
|
||||
|
||||
|
@ -1347,7 +1347,13 @@ class BacktraceBuilder: public StackObj {
|
||||
return _backtrace();
|
||||
}
|
||||
|
||||
inline void push(methodOop method, short bci, TRAPS) {
|
||||
inline void push(methodOop method, int bci, TRAPS) {
|
||||
// Smear the -1 bci to 0 since the array only holds unsigned
|
||||
// shorts. The later line number lookup would just smear the -1
|
||||
// to a 0 even if it could be recorded.
|
||||
if (bci == SynchronizationEntryBCI) bci = 0;
|
||||
assert(bci == (jushort)bci, "doesn't fit");
|
||||
|
||||
if (_index >= trace_chunk_size) {
|
||||
methodHandle mhandle(THREAD, method);
|
||||
expand(CHECK);
|
||||
@ -1574,8 +1580,13 @@ void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle t
|
||||
int chunk_count = 0;
|
||||
|
||||
for (;!st.at_end(); st.next()) {
|
||||
// add element
|
||||
bcis->ushort_at_put(chunk_count, st.bci());
|
||||
// Add entry and smear the -1 bci to 0 since the array only holds
|
||||
// unsigned shorts. The later line number lookup would just smear
|
||||
// the -1 to a 0 even if it could be recorded.
|
||||
int bci = st.bci();
|
||||
if (bci == SynchronizationEntryBCI) bci = 0;
|
||||
assert(bci == (jushort)bci, "doesn't fit");
|
||||
bcis->ushort_at_put(chunk_count, bci);
|
||||
methods->obj_at_put(chunk_count, st.method());
|
||||
|
||||
chunk_count++;
|
||||
|
@ -204,6 +204,24 @@ Symbol* SymbolTable::lookup_only(const char* name, int len,
|
||||
return s;
|
||||
}
|
||||
|
||||
// Look up the address of the literal in the SymbolTable for this Symbol*
|
||||
// Do not create any new symbols
|
||||
// Do not increment the reference count to keep this alive
|
||||
Symbol** SymbolTable::lookup_symbol_addr(Symbol* sym){
|
||||
unsigned int hash = hash_symbol((char*)sym->bytes(), sym->utf8_length());
|
||||
int index = the_table()->hash_to_index(hash);
|
||||
|
||||
for (HashtableEntry<Symbol*>* e = the_table()->bucket(index); e != NULL; e = e->next()) {
|
||||
if (e->hash() == hash) {
|
||||
Symbol* literal_sym = e->literal();
|
||||
if (sym == literal_sym) {
|
||||
return e->literal_addr();
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Suggestion: Push unicode-based lookup all the way into the hashing
|
||||
// and probing logic, so there is no need for convert_to_utf8 until
|
||||
// an actual new Symbol* is created.
|
||||
|
@ -144,6 +144,9 @@ public:
|
||||
|
||||
static void release(Symbol* sym);
|
||||
|
||||
// Look up the address of the literal in the SymbolTable for this Symbol*
|
||||
static Symbol** lookup_symbol_addr(Symbol* sym);
|
||||
|
||||
// jchar (utf16) version of lookups
|
||||
static Symbol* lookup_unicode(const jchar* name, int len, TRAPS);
|
||||
static Symbol* lookup_only_unicode(const jchar* name, int len, unsigned int& hash);
|
||||
|
@ -2131,6 +2131,12 @@ void SystemDictionary::update_dictionary(int d_index, unsigned int d_hash,
|
||||
}
|
||||
}
|
||||
|
||||
// Assign a classid if one has not already been assigned. The
|
||||
// counter does not need to be atomically incremented since this
|
||||
// is only done while holding the SystemDictionary_lock.
|
||||
// All loaded classes get a unique ID.
|
||||
TRACE_INIT_ID(k);
|
||||
|
||||
// Check for a placeholder. If there, remove it and make a
|
||||
// new system dictionary entry.
|
||||
placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -855,23 +855,23 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
|
||||
// Note that this only sets the JavaThread _priority field, which by
|
||||
// definition is limited to Java priorities and not OS priorities.
|
||||
// The os-priority is set in the CompilerThread startup code itself
|
||||
|
||||
java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
|
||||
// CLEANUP PRIORITIES: This -if- statement hids a bug whereby the compiler
|
||||
// threads never have their OS priority set. The assumption here is to
|
||||
// enable the Performance group to do flag tuning, figure out a suitable
|
||||
// CompilerThreadPriority, and then remove this 'if' statement (and
|
||||
// comment) and unconditionally set the priority.
|
||||
|
||||
// Compiler Threads should be at the highest Priority
|
||||
if ( CompilerThreadPriority != -1 )
|
||||
os::set_native_priority( compiler_thread, CompilerThreadPriority );
|
||||
else
|
||||
os::set_native_priority( compiler_thread, os::java_to_os_priority[NearMaxPriority]);
|
||||
// Note that we cannot call os::set_priority because it expects Java
|
||||
// priorities and we are *explicitly* using OS priorities so that it's
|
||||
// possible to set the compiler thread priority higher than any Java
|
||||
// thread.
|
||||
|
||||
// Note that I cannot call os::set_priority because it expects Java
|
||||
// priorities and I am *explicitly* using OS priorities so that it's
|
||||
// possible to set the compiler thread priority higher than any Java
|
||||
// thread.
|
||||
int native_prio = CompilerThreadPriority;
|
||||
if (native_prio == -1) {
|
||||
if (UseCriticalCompilerThreadPriority) {
|
||||
native_prio = os::java_to_os_priority[CriticalPriority];
|
||||
} else {
|
||||
native_prio = os::java_to_os_priority[NearMaxPriority];
|
||||
}
|
||||
}
|
||||
os::set_native_priority(compiler_thread, native_prio);
|
||||
|
||||
java_lang_Thread::set_daemon(thread_oop());
|
||||
|
||||
@ -879,6 +879,7 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
|
||||
Threads::add(compiler_thread);
|
||||
Thread::start(compiler_thread);
|
||||
}
|
||||
|
||||
// Let go of Threads_lock before yielding
|
||||
os::yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
|
||||
|
||||
@ -961,7 +962,7 @@ void CompileBroker::compile_method_base(methodHandle method,
|
||||
methodHandle hot_method,
|
||||
int hot_count,
|
||||
const char* comment,
|
||||
TRAPS) {
|
||||
Thread* thread) {
|
||||
// do nothing if compiler thread(s) is not available
|
||||
if (!_initialized ) {
|
||||
return;
|
||||
@ -1037,7 +1038,7 @@ void CompileBroker::compile_method_base(methodHandle method,
|
||||
|
||||
// Acquire our lock.
|
||||
{
|
||||
MutexLocker locker(queue->lock(), THREAD);
|
||||
MutexLocker locker(queue->lock(), thread);
|
||||
|
||||
// Make sure the method has not slipped into the queues since
|
||||
// last we checked; note that those checks were "fast bail-outs".
|
||||
@ -1119,7 +1120,7 @@ void CompileBroker::compile_method_base(methodHandle method,
|
||||
nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
int comp_level,
|
||||
methodHandle hot_method, int hot_count,
|
||||
const char* comment, TRAPS) {
|
||||
const char* comment, Thread* THREAD) {
|
||||
// make sure arguments make sense
|
||||
assert(method->method_holder()->klass_part()->oop_is_instance(), "not an instance method");
|
||||
assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
|
||||
@ -1173,10 +1174,10 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
assert(!HAS_PENDING_EXCEPTION, "No exception should be present");
|
||||
// some prerequisites that are compiler specific
|
||||
if (compiler(comp_level)->is_c2() || compiler(comp_level)->is_shark()) {
|
||||
method->constants()->resolve_string_constants(CHECK_0);
|
||||
method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NULL);
|
||||
// Resolve all classes seen in the signature of the method
|
||||
// we are compiling.
|
||||
methodOopDesc::load_signature_classes(method, CHECK_0);
|
||||
methodOopDesc::load_signature_classes(method, CHECK_AND_CLEAR_NULL);
|
||||
}
|
||||
|
||||
// If the method is native, do the lookup in the thread requesting
|
||||
@ -1230,7 +1231,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, CHECK_0);
|
||||
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, THREAD);
|
||||
}
|
||||
|
||||
// return requested nmethod
|
||||
|
@ -333,7 +333,7 @@ class CompileBroker: AllStatic {
|
||||
methodHandle hot_method,
|
||||
int hot_count,
|
||||
const char* comment,
|
||||
TRAPS);
|
||||
Thread* thread);
|
||||
static CompileQueue* compile_queue(int comp_level) {
|
||||
if (is_c2_compile(comp_level)) return _c2_method_queue;
|
||||
if (is_c1_compile(comp_level)) return _c1_method_queue;
|
||||
@ -363,7 +363,7 @@ class CompileBroker: AllStatic {
|
||||
int comp_level,
|
||||
methodHandle hot_method,
|
||||
int hot_count,
|
||||
const char* comment, TRAPS);
|
||||
const char* comment, Thread* thread);
|
||||
|
||||
static void compiler_thread_loop();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -75,10 +75,25 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
|
||||
set_name("Concurrent Mark-Sweep GC Thread");
|
||||
|
||||
if (os::create_thread(this, os::cgc_thread)) {
|
||||
// XXX: need to set this to low priority
|
||||
// unless "agressive mode" set; priority
|
||||
// should be just less than that of VMThread.
|
||||
os::set_priority(this, NearMaxPriority);
|
||||
// An old comment here said: "Priority should be just less
|
||||
// than that of VMThread". Since the VMThread runs at
|
||||
// NearMaxPriority, the old comment was inaccurate, but
|
||||
// changing the default priority to NearMaxPriority-1
|
||||
// could change current behavior, so the default of
|
||||
// NearMaxPriority stays in place.
|
||||
//
|
||||
// Note that there's a possibility of the VMThread
|
||||
// starving if UseCriticalCMSThreadPriority is on.
|
||||
// That won't happen on Solaris for various reasons,
|
||||
// but may well happen on non-Solaris platforms.
|
||||
int native_prio;
|
||||
if (UseCriticalCMSThreadPriority) {
|
||||
native_prio = os::java_to_os_priority[CriticalPriority];
|
||||
} else {
|
||||
native_prio = os::java_to_os_priority[NearMaxPriority];
|
||||
}
|
||||
os::set_native_priority(this, native_prio);
|
||||
|
||||
if (!DisableStartThread) {
|
||||
os::start_thread(this);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -84,8 +84,8 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
|
||||
}
|
||||
|
||||
// iteration
|
||||
bool iterate(BitMapClosure* cl) { return _bm.iterate(cl); }
|
||||
bool iterate(BitMapClosure* cl, MemRegion mr);
|
||||
inline bool iterate(BitMapClosure* cl, MemRegion mr);
|
||||
inline bool iterate(BitMapClosure* cl);
|
||||
|
||||
// Return the address corresponding to the next marked bit at or after
|
||||
// "addr", and before "limit", if "limit" is non-NULL. If there is no
|
||||
@ -349,10 +349,62 @@ typedef enum {
|
||||
high_verbose // per object verbose
|
||||
} CMVerboseLevel;
|
||||
|
||||
class YoungList;
|
||||
|
||||
// Root Regions are regions that are not empty at the beginning of a
|
||||
// marking cycle and which we might collect during an evacuation pause
|
||||
// while the cycle is active. Given that, during evacuation pauses, we
|
||||
// do not copy objects that are explicitly marked, what we have to do
|
||||
// for the root regions is to scan them and mark all objects reachable
|
||||
// from them. According to the SATB assumptions, we only need to visit
|
||||
// each object once during marking. So, as long as we finish this scan
|
||||
// before the next evacuation pause, we can copy the objects from the
|
||||
// root regions without having to mark them or do anything else to them.
|
||||
//
|
||||
// Currently, we only support root region scanning once (at the start
|
||||
// of the marking cycle) and the root regions are all the survivor
|
||||
// regions populated during the initial-mark pause.
|
||||
class CMRootRegions VALUE_OBJ_CLASS_SPEC {
|
||||
private:
|
||||
YoungList* _young_list;
|
||||
ConcurrentMark* _cm;
|
||||
|
||||
volatile bool _scan_in_progress;
|
||||
volatile bool _should_abort;
|
||||
HeapRegion* volatile _next_survivor;
|
||||
|
||||
public:
|
||||
CMRootRegions();
|
||||
// We actually do most of the initialization in this method.
|
||||
void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
|
||||
|
||||
// Reset the claiming / scanning of the root regions.
|
||||
void prepare_for_scan();
|
||||
|
||||
// Forces get_next() to return NULL so that the iteration aborts early.
|
||||
void abort() { _should_abort = true; }
|
||||
|
||||
// Return true if the CM thread are actively scanning root regions,
|
||||
// false otherwise.
|
||||
bool scan_in_progress() { return _scan_in_progress; }
|
||||
|
||||
// Claim the next root region to scan atomically, or return NULL if
|
||||
// all have been claimed.
|
||||
HeapRegion* claim_next();
|
||||
|
||||
// Flag that we're done with root region scanning and notify anyone
|
||||
// who's waiting on it. If aborted is false, assume that all regions
|
||||
// have been claimed.
|
||||
void scan_finished();
|
||||
|
||||
// If CM threads are still scanning root regions, wait until they
|
||||
// are done. Return true if we had to wait, false otherwise.
|
||||
bool wait_until_scan_finished();
|
||||
};
|
||||
|
||||
class ConcurrentMarkThread;
|
||||
|
||||
class ConcurrentMark: public CHeapObj {
|
||||
class ConcurrentMark : public CHeapObj {
|
||||
friend class ConcurrentMarkThread;
|
||||
friend class CMTask;
|
||||
friend class CMBitMapClosure;
|
||||
@ -386,7 +438,7 @@ protected:
|
||||
|
||||
FreeRegionList _cleanup_list;
|
||||
|
||||
// CMS marking support structures
|
||||
// Concurrent marking support structures
|
||||
CMBitMap _markBitMap1;
|
||||
CMBitMap _markBitMap2;
|
||||
CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
|
||||
@ -400,6 +452,9 @@ protected:
|
||||
HeapWord* _heap_start;
|
||||
HeapWord* _heap_end;
|
||||
|
||||
// Root region tracking and claiming.
|
||||
CMRootRegions _root_regions;
|
||||
|
||||
// For gray objects
|
||||
CMMarkStack _markStack; // Grey objects behind global finger.
|
||||
CMRegionStack _regionStack; // Grey regions behind global finger.
|
||||
@ -426,7 +481,6 @@ protected:
|
||||
WorkGangBarrierSync _first_overflow_barrier_sync;
|
||||
WorkGangBarrierSync _second_overflow_barrier_sync;
|
||||
|
||||
|
||||
// this is set by any task, when an overflow on the global data
|
||||
// structures is detected.
|
||||
volatile bool _has_overflown;
|
||||
@ -554,9 +608,9 @@ protected:
|
||||
bool has_overflown() { return _has_overflown; }
|
||||
void set_has_overflown() { _has_overflown = true; }
|
||||
void clear_has_overflown() { _has_overflown = false; }
|
||||
bool restart_for_overflow() { return _restart_for_overflow; }
|
||||
|
||||
bool has_aborted() { return _has_aborted; }
|
||||
bool restart_for_overflow() { return _restart_for_overflow; }
|
||||
|
||||
// Methods to enter the two overflow sync barriers
|
||||
void enter_first_sync_barrier(int task_num);
|
||||
@ -578,6 +632,27 @@ protected:
|
||||
}
|
||||
}
|
||||
|
||||
// Live Data Counting data structures...
|
||||
// These data structures are initialized at the start of
|
||||
// marking. They are written to while marking is active.
|
||||
// They are aggregated during remark; the aggregated values
|
||||
// are then used to populate the _region_bm, _card_bm, and
|
||||
// the total live bytes, which are then subsequently updated
|
||||
// during cleanup.
|
||||
|
||||
// An array of bitmaps (one bit map per task). Each bitmap
|
||||
// is used to record the cards spanned by the live objects
|
||||
// marked by that task/worker.
|
||||
BitMap* _count_card_bitmaps;
|
||||
|
||||
// Used to record the number of marked live bytes
|
||||
// (for each region, by worker thread).
|
||||
size_t** _count_marked_bytes;
|
||||
|
||||
// Card index of the bottom of the G1 heap. Used for biasing indices into
|
||||
// the card bitmaps.
|
||||
intptr_t _heap_bottom_card_num;
|
||||
|
||||
public:
|
||||
// Manipulation of the global mark stack.
|
||||
// Notice that the first mark_stack_push is CAS-based, whereas the
|
||||
@ -671,6 +746,8 @@ public:
|
||||
// Returns true if there are any aborted memory regions.
|
||||
bool has_aborted_regions();
|
||||
|
||||
CMRootRegions* root_regions() { return &_root_regions; }
|
||||
|
||||
bool concurrent_marking_in_progress() {
|
||||
return _concurrent_marking_in_progress;
|
||||
}
|
||||
@ -703,6 +780,7 @@ public:
|
||||
|
||||
ConcurrentMark(ReservedSpace rs, int max_regions);
|
||||
~ConcurrentMark();
|
||||
|
||||
ConcurrentMarkThread* cmThread() { return _cmThread; }
|
||||
|
||||
CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
|
||||
@ -720,8 +798,17 @@ public:
|
||||
// G1CollectedHeap
|
||||
|
||||
// This notifies CM that a root during initial-mark needs to be
|
||||
// grayed. It is MT-safe.
|
||||
inline void grayRoot(oop obj, size_t word_size);
|
||||
// grayed. It is MT-safe. word_size is the size of the object in
|
||||
// words. It is passed explicitly as sometimes we cannot calculate
|
||||
// it from the given object because it might be in an inconsistent
|
||||
// state (e.g., in to-space and being copied). So the caller is
|
||||
// responsible for dealing with this issue (e.g., get the size from
|
||||
// the from-space image when the to-space image might be
|
||||
// inconsistent) and always passing the size. hr is the region that
|
||||
// contains the object and it's passed optionally from callers who
|
||||
// might already have it (no point in recalculating it).
|
||||
inline void grayRoot(oop obj, size_t word_size,
|
||||
uint worker_id, HeapRegion* hr = NULL);
|
||||
|
||||
// It's used during evacuation pauses to gray a region, if
|
||||
// necessary, and it's MT-safe. It assumes that the caller has
|
||||
@ -772,6 +859,13 @@ public:
|
||||
void checkpointRootsInitialPre();
|
||||
void checkpointRootsInitialPost();
|
||||
|
||||
// Scan all the root regions and mark everything reachable from
|
||||
// them.
|
||||
void scanRootRegions();
|
||||
|
||||
// Scan a single root region and mark everything reachable from it.
|
||||
void scanRootRegion(HeapRegion* hr, uint worker_id);
|
||||
|
||||
// Do concurrent phase of marking, to a tentative transitive closure.
|
||||
void markFromRoots();
|
||||
|
||||
@ -781,15 +875,13 @@ public:
|
||||
|
||||
void checkpointRootsFinal(bool clear_all_soft_refs);
|
||||
void checkpointRootsFinalWork();
|
||||
void calcDesiredRegions();
|
||||
void cleanup();
|
||||
void completeCleanup();
|
||||
|
||||
// Mark in the previous bitmap. NB: this is usually read-only, so use
|
||||
// this carefully!
|
||||
inline void markPrev(oop p);
|
||||
inline void markNext(oop p);
|
||||
void clear(oop p);
|
||||
|
||||
// Clears marks for all objects in the given range, for the prev,
|
||||
// next, or both bitmaps. NB: the previous bitmap is usually
|
||||
// read-only, so use this carefully!
|
||||
@ -913,6 +1005,114 @@ public:
|
||||
bool verbose_high() {
|
||||
return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
|
||||
}
|
||||
|
||||
// Counting data structure accessors
|
||||
|
||||
// Returns the card number of the bottom of the G1 heap.
|
||||
// Used in biasing indices into accounting card bitmaps.
|
||||
intptr_t heap_bottom_card_num() const {
|
||||
return _heap_bottom_card_num;
|
||||
}
|
||||
|
||||
// Returns the card bitmap for a given task or worker id.
|
||||
BitMap* count_card_bitmap_for(uint worker_id) {
|
||||
assert(0 <= worker_id && worker_id < _max_task_num, "oob");
|
||||
assert(_count_card_bitmaps != NULL, "uninitialized");
|
||||
BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
|
||||
assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
|
||||
return task_card_bm;
|
||||
}
|
||||
|
||||
// Returns the array containing the marked bytes for each region,
|
||||
// for the given worker or task id.
|
||||
size_t* count_marked_bytes_array_for(uint worker_id) {
|
||||
assert(0 <= worker_id && worker_id < _max_task_num, "oob");
|
||||
assert(_count_marked_bytes != NULL, "uninitialized");
|
||||
size_t* marked_bytes_array = _count_marked_bytes[worker_id];
|
||||
assert(marked_bytes_array != NULL, "uninitialized");
|
||||
return marked_bytes_array;
|
||||
}
|
||||
|
||||
// Returns the index in the liveness accounting card table bitmap
|
||||
// for the given address
|
||||
inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
|
||||
|
||||
// Counts the size of the given memory region in the the given
|
||||
// marked_bytes array slot for the given HeapRegion.
|
||||
// Sets the bits in the given card bitmap that are associated with the
|
||||
// cards that are spanned by the memory region.
|
||||
inline void count_region(MemRegion mr, HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm);
|
||||
|
||||
// Counts the given memory region in the task/worker counting
|
||||
// data structures for the given worker id.
|
||||
inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
|
||||
|
||||
// Counts the given memory region in the task/worker counting
|
||||
// data structures for the given worker id.
|
||||
inline void count_region(MemRegion mr, uint worker_id);
|
||||
|
||||
// Counts the given object in the given task/worker counting
|
||||
// data structures.
|
||||
inline void count_object(oop obj, HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm);
|
||||
|
||||
// Counts the given object in the task/worker counting data
|
||||
// structures for the given worker id.
|
||||
inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the given task/worker counting structures.
|
||||
inline bool par_mark_and_count(oop obj, HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm);
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the task/worker counting structures for the
|
||||
// given worker id.
|
||||
inline bool par_mark_and_count(oop obj, size_t word_size,
|
||||
HeapRegion* hr, uint worker_id);
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the task/worker counting structures for the
|
||||
// given worker id.
|
||||
inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
|
||||
|
||||
// Similar to the above routine but we don't know the heap region that
|
||||
// contains the object to be marked/counted, which this routine looks up.
|
||||
inline bool par_mark_and_count(oop obj, uint worker_id);
|
||||
|
||||
// Similar to the above routine but there are times when we cannot
|
||||
// safely calculate the size of obj due to races and we, therefore,
|
||||
// pass the size in as a parameter. It is the caller's reponsibility
|
||||
// to ensure that the size passed in for obj is valid.
|
||||
inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
|
||||
|
||||
// Unconditionally mark the given object, and unconditinally count
|
||||
// the object in the counting structures for worker id 0.
|
||||
// Should *not* be called from parallel code.
|
||||
inline bool mark_and_count(oop obj, HeapRegion* hr);
|
||||
|
||||
// Similar to the above routine but we don't know the heap region that
|
||||
// contains the object to be marked/counted, which this routine looks up.
|
||||
// Should *not* be called from parallel code.
|
||||
inline bool mark_and_count(oop obj);
|
||||
|
||||
protected:
|
||||
// Clear all the per-task bitmaps and arrays used to store the
|
||||
// counting data.
|
||||
void clear_all_count_data();
|
||||
|
||||
// Aggregates the counting data for each worker/task
|
||||
// that was constructed while marking. Also sets
|
||||
// the amount of marked bytes for each region and
|
||||
// the top at concurrent mark count.
|
||||
void aggregate_count_data();
|
||||
|
||||
// Verification routine
|
||||
void verify_count_data();
|
||||
};
|
||||
|
||||
// A class representing a marking task.
|
||||
@ -1031,6 +1231,12 @@ private:
|
||||
|
||||
TruncatedSeq _marking_step_diffs_ms;
|
||||
|
||||
// Counting data structures. Embedding the task's marked_bytes_array
|
||||
// and card bitmap into the actual task saves having to go through
|
||||
// the ConcurrentMark object.
|
||||
size_t* _marked_bytes_array;
|
||||
BitMap* _card_bm;
|
||||
|
||||
// LOTS of statistics related with this task
|
||||
#if _MARKING_STATS_
|
||||
NumberSeq _all_clock_intervals_ms;
|
||||
@ -1196,6 +1402,7 @@ public:
|
||||
}
|
||||
|
||||
CMTask(int task_num, ConcurrentMark *cm,
|
||||
size_t* marked_bytes, BitMap* card_bm,
|
||||
CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
|
||||
|
||||
// it prints statistics associated with this task
|
||||
|
@ -28,6 +28,214 @@
|
||||
#include "gc_implementation/g1/concurrentMark.hpp"
|
||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||
|
||||
// Returns the index in the liveness accounting card bitmap
|
||||
// for the given address
|
||||
inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
|
||||
// Below, the term "card num" means the result of shifting an address
|
||||
// by the card shift -- address 0 corresponds to card number 0. One
|
||||
// must subtract the card num of the bottom of the heap to obtain a
|
||||
// card table index.
|
||||
|
||||
intptr_t card_num = intptr_t(uintptr_t(addr) >> CardTableModRefBS::card_shift);
|
||||
return card_num - heap_bottom_card_num();
|
||||
}
|
||||
|
||||
// Counts the given memory region in the given task/worker
|
||||
// counting data structures.
|
||||
inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm) {
|
||||
G1CollectedHeap* g1h = _g1h;
|
||||
HeapWord* start = mr.start();
|
||||
HeapWord* last = mr.last();
|
||||
size_t region_size_bytes = mr.byte_size();
|
||||
size_t index = hr->hrs_index();
|
||||
|
||||
assert(!hr->continuesHumongous(), "should not be HC region");
|
||||
assert(hr == g1h->heap_region_containing(start), "sanity");
|
||||
assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
|
||||
assert(marked_bytes_array != NULL, "pre-condition");
|
||||
assert(task_card_bm != NULL, "pre-condition");
|
||||
|
||||
// Add to the task local marked bytes for this region.
|
||||
marked_bytes_array[index] += region_size_bytes;
|
||||
|
||||
BitMap::idx_t start_idx = card_bitmap_index_for(start);
|
||||
BitMap::idx_t last_idx = card_bitmap_index_for(last);
|
||||
|
||||
// The card bitmap is task/worker specific => no need to use 'par' routines.
|
||||
// Set bits in the inclusive bit range [start_idx, last_idx].
|
||||
//
|
||||
// For small ranges use a simple loop; otherwise use set_range
|
||||
// The range are the cards that are spanned by the object/region
|
||||
// so 8 cards will allow objects/regions up to 4K to be handled
|
||||
// using the loop.
|
||||
if ((last_idx - start_idx) <= 8) {
|
||||
for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
|
||||
task_card_bm->set_bit(i);
|
||||
}
|
||||
} else {
|
||||
assert(last_idx < task_card_bm->size(), "sanity");
|
||||
// Note: BitMap::set_range() is exclusive.
|
||||
task_card_bm->set_range(start_idx, last_idx+1);
|
||||
}
|
||||
}
|
||||
|
||||
// Counts the given memory region in the task/worker counting
|
||||
// data structures for the given worker id.
|
||||
inline void ConcurrentMark::count_region(MemRegion mr,
|
||||
HeapRegion* hr,
|
||||
uint worker_id) {
|
||||
size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
|
||||
BitMap* task_card_bm = count_card_bitmap_for(worker_id);
|
||||
count_region(mr, hr, marked_bytes_array, task_card_bm);
|
||||
}
|
||||
|
||||
// Counts the given memory region, which may be a single object, in the
|
||||
// task/worker counting data structures for the given worker id.
|
||||
inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) {
|
||||
HeapWord* addr = mr.start();
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
|
||||
count_region(mr, hr, worker_id);
|
||||
}
|
||||
|
||||
// Counts the given object in the given task/worker counting data structures.
|
||||
inline void ConcurrentMark::count_object(oop obj,
|
||||
HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm) {
|
||||
MemRegion mr((HeapWord*)obj, obj->size());
|
||||
count_region(mr, hr, marked_bytes_array, task_card_bm);
|
||||
}
|
||||
|
||||
// Counts the given object in the task/worker counting data
|
||||
// structures for the given worker id.
|
||||
inline void ConcurrentMark::count_object(oop obj,
|
||||
HeapRegion* hr,
|
||||
uint worker_id) {
|
||||
size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
|
||||
BitMap* task_card_bm = count_card_bitmap_for(worker_id);
|
||||
HeapWord* addr = (HeapWord*) obj;
|
||||
count_object(obj, hr, marked_bytes_array, task_card_bm);
|
||||
}
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the given task/worker counting structures.
|
||||
inline bool ConcurrentMark::par_mark_and_count(oop obj,
|
||||
HeapRegion* hr,
|
||||
size_t* marked_bytes_array,
|
||||
BitMap* task_card_bm) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_nextMarkBitMap->parMark(addr)) {
|
||||
// Update the task specific count data for the object.
|
||||
count_object(obj, hr, marked_bytes_array, task_card_bm);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the task/worker counting structures for the
|
||||
// given worker id.
|
||||
inline bool ConcurrentMark::par_mark_and_count(oop obj,
|
||||
size_t word_size,
|
||||
HeapRegion* hr,
|
||||
uint worker_id) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_nextMarkBitMap->parMark(addr)) {
|
||||
MemRegion mr(addr, word_size);
|
||||
count_region(mr, hr, worker_id);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Attempts to mark the given object and, if successful, counts
|
||||
// the object in the task/worker counting structures for the
|
||||
// given worker id.
|
||||
inline bool ConcurrentMark::par_mark_and_count(oop obj,
|
||||
HeapRegion* hr,
|
||||
uint worker_id) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_nextMarkBitMap->parMark(addr)) {
|
||||
// Update the task specific count data for the object.
|
||||
count_object(obj, hr, worker_id);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// As above - but we don't know the heap region containing the
|
||||
// object and so have to supply it.
|
||||
inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
|
||||
return par_mark_and_count(obj, hr, worker_id);
|
||||
}
|
||||
|
||||
// Similar to the above routine but we already know the size, in words, of
|
||||
// the object that we wish to mark/count
|
||||
inline bool ConcurrentMark::par_mark_and_count(oop obj,
|
||||
size_t word_size,
|
||||
uint worker_id) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
if (_nextMarkBitMap->parMark(addr)) {
|
||||
// Update the task specific count data for the object.
|
||||
MemRegion mr(addr, word_size);
|
||||
count_region(mr, worker_id);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Unconditionally mark the given object, and unconditinally count
|
||||
// the object in the counting structures for worker id 0.
|
||||
// Should *not* be called from parallel code.
|
||||
inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
_nextMarkBitMap->mark(addr);
|
||||
// Update the task specific count data for the object.
|
||||
count_object(obj, hr, 0 /* worker_id */);
|
||||
return true;
|
||||
}
|
||||
|
||||
// As above - but we don't have the heap region containing the
|
||||
// object, so we have to supply it.
|
||||
inline bool ConcurrentMark::mark_and_count(oop obj) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
|
||||
return mark_and_count(obj, hr);
|
||||
}
|
||||
|
||||
inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
|
||||
HeapWord* start_addr = MAX2(startWord(), mr.start());
|
||||
HeapWord* end_addr = MIN2(endWord(), mr.end());
|
||||
|
||||
if (end_addr > start_addr) {
|
||||
// Right-open interval [start-offset, end-offset).
|
||||
BitMap::idx_t start_offset = heapWordToOffset(start_addr);
|
||||
BitMap::idx_t end_offset = heapWordToOffset(end_addr);
|
||||
|
||||
start_offset = _bm.get_next_one_offset(start_offset, end_offset);
|
||||
while (start_offset < end_offset) {
|
||||
HeapWord* obj_addr = offsetToHeapWord(start_offset);
|
||||
oop obj = (oop) obj_addr;
|
||||
if (!cl->do_bit(start_offset)) {
|
||||
return false;
|
||||
}
|
||||
HeapWord* next_addr = MIN2(obj_addr + obj->size(), end_addr);
|
||||
BitMap::idx_t next_offset = heapWordToOffset(next_addr);
|
||||
start_offset = _bm.get_next_one_offset(next_offset, end_offset);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
|
||||
MemRegion mr(startWord(), sizeInWords());
|
||||
return iterate(cl, mr);
|
||||
}
|
||||
|
||||
inline void CMTask::push(oop obj) {
|
||||
HeapWord* objAddr = (HeapWord*) obj;
|
||||
assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
|
||||
@ -84,7 +292,7 @@ inline void CMTask::deal_with_reference(oop obj) {
|
||||
|
||||
HeapWord* objAddr = (HeapWord*) obj;
|
||||
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
|
||||
if (_g1h->is_in_g1_reserved(objAddr)) {
|
||||
if (_g1h->is_in_g1_reserved(objAddr)) {
|
||||
assert(obj != NULL, "null check is implicit");
|
||||
if (!_nextMarkBitMap->isMarked(objAddr)) {
|
||||
// Only get the containing region if the object is not marked on the
|
||||
@ -98,9 +306,9 @@ inline void CMTask::deal_with_reference(oop obj) {
|
||||
}
|
||||
|
||||
// we need to mark it first
|
||||
if (_nextMarkBitMap->parMark(objAddr)) {
|
||||
if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
|
||||
// No OrderAccess:store_load() is needed. It is implicit in the
|
||||
// CAS done in parMark(objAddr) above
|
||||
// CAS done in CMBitMap::parMark() call in the routine above.
|
||||
HeapWord* global_finger = _cm->finger();
|
||||
|
||||
#if _CHECK_BOTH_FINGERS_
|
||||
@ -160,25 +368,20 @@ inline void ConcurrentMark::markPrev(oop p) {
|
||||
((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
|
||||
}
|
||||
|
||||
inline void ConcurrentMark::markNext(oop p) {
|
||||
assert(!_nextMarkBitMap->isMarked((HeapWord*) p), "sanity");
|
||||
_nextMarkBitMap->mark((HeapWord*) p);
|
||||
}
|
||||
|
||||
inline void ConcurrentMark::grayRoot(oop obj, size_t word_size) {
|
||||
inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
|
||||
uint worker_id, HeapRegion* hr) {
|
||||
assert(obj != NULL, "pre-condition");
|
||||
HeapWord* addr = (HeapWord*) obj;
|
||||
|
||||
// Currently we don't do anything with word_size but we will use it
|
||||
// in the very near future in the liveness calculation piggy-backing
|
||||
// changes.
|
||||
|
||||
#ifdef ASSERT
|
||||
HeapRegion* hr = _g1h->heap_region_containing(addr);
|
||||
if (hr == NULL) {
|
||||
hr = _g1h->heap_region_containing_raw(addr);
|
||||
} else {
|
||||
assert(hr->is_in(addr), "pre-condition");
|
||||
}
|
||||
assert(hr != NULL, "sanity");
|
||||
assert(!hr->is_survivor(), "should not allocate survivors during IM");
|
||||
assert(addr < hr->next_top_at_mark_start(),
|
||||
err_msg("addr: "PTR_FORMAT" hr: "HR_FORMAT" NTAMS: "PTR_FORMAT,
|
||||
addr, HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start()));
|
||||
// Given that we're looking for a region that contains an object
|
||||
// header it's impossible to get back a HC region.
|
||||
assert(!hr->continuesHumongous(), "sanity");
|
||||
|
||||
// We cannot assert that word_size == obj->size() given that obj
|
||||
// might not be in a consistent state (another thread might be in
|
||||
// the process of copying it). So the best thing we can do is to
|
||||
@ -188,10 +391,11 @@ inline void ConcurrentMark::grayRoot(oop obj, size_t word_size) {
|
||||
err_msg("size: "SIZE_FORMAT" capacity: "SIZE_FORMAT" "HR_FORMAT,
|
||||
word_size * HeapWordSize, hr->capacity(),
|
||||
HR_FORMAT_PARAMS(hr)));
|
||||
#endif // ASSERT
|
||||
|
||||
if (!_nextMarkBitMap->isMarked(addr)) {
|
||||
_nextMarkBitMap->parMark(addr);
|
||||
if (addr < hr->next_top_at_mark_start()) {
|
||||
if (!_nextMarkBitMap->isMarked(addr)) {
|
||||
par_mark_and_count(obj, word_size, hr, worker_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,9 +44,7 @@ ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) :
|
||||
_started(false),
|
||||
_in_progress(false),
|
||||
_vtime_accum(0.0),
|
||||
_vtime_mark_accum(0.0),
|
||||
_vtime_count_accum(0.0)
|
||||
{
|
||||
_vtime_mark_accum(0.0) {
|
||||
create_and_start();
|
||||
}
|
||||
|
||||
@ -94,9 +92,36 @@ void ConcurrentMarkThread::run() {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
double cycle_start = os::elapsedVTime();
|
||||
double mark_start_sec = os::elapsedTime();
|
||||
char verbose_str[128];
|
||||
|
||||
// We have to ensure that we finish scanning the root regions
|
||||
// before the next GC takes place. To ensure this we have to
|
||||
// make sure that we do not join the STS until the root regions
|
||||
// have been scanned. If we did then it's possible that a
|
||||
// subsequent GC could block us from joining the STS and proceed
|
||||
// without the root regions have been scanned which would be a
|
||||
// correctness issue.
|
||||
|
||||
double scan_start = os::elapsedTime();
|
||||
if (!cm()->has_aborted()) {
|
||||
if (PrintGC) {
|
||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
|
||||
}
|
||||
|
||||
_cm->scanRootRegions();
|
||||
|
||||
double scan_end = os::elapsedTime();
|
||||
if (PrintGC) {
|
||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf]",
|
||||
scan_end - scan_start);
|
||||
}
|
||||
}
|
||||
|
||||
double mark_start_sec = os::elapsedTime();
|
||||
if (PrintGC) {
|
||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||
@ -148,36 +173,12 @@ void ConcurrentMarkThread::run() {
|
||||
}
|
||||
} while (cm()->restart_for_overflow());
|
||||
|
||||
double counting_start_time = os::elapsedVTime();
|
||||
if (!cm()->has_aborted()) {
|
||||
double count_start_sec = os::elapsedTime();
|
||||
if (PrintGC) {
|
||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||
gclog_or_tty->print_cr("[GC concurrent-count-start]");
|
||||
}
|
||||
|
||||
_sts.join();
|
||||
_cm->calcDesiredRegions();
|
||||
_sts.leave();
|
||||
|
||||
if (!cm()->has_aborted()) {
|
||||
double count_end_sec = os::elapsedTime();
|
||||
if (PrintGC) {
|
||||
gclog_or_tty->date_stamp(PrintGCDateStamps);
|
||||
gclog_or_tty->stamp(PrintGCTimeStamps);
|
||||
gclog_or_tty->print_cr("[GC concurrent-count-end, %1.7lf]",
|
||||
count_end_sec - count_start_sec);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
double end_time = os::elapsedVTime();
|
||||
_vtime_count_accum += (end_time - counting_start_time);
|
||||
// Update the total virtual time before doing this, since it will try
|
||||
// to measure it to get the vtime for this marking. We purposely
|
||||
// neglect the presumably-short "completeCleanup" phase here.
|
||||
_vtime_accum = (end_time - _vtime_start);
|
||||
|
||||
if (!cm()->has_aborted()) {
|
||||
if (g1_policy->adaptive_young_list_length()) {
|
||||
double now = os::elapsedTime();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -40,7 +40,6 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
|
||||
double _vtime_accum; // Accumulated virtual time.
|
||||
|
||||
double _vtime_mark_accum;
|
||||
double _vtime_count_accum;
|
||||
|
||||
public:
|
||||
virtual void run();
|
||||
@ -69,8 +68,6 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
|
||||
double vtime_accum();
|
||||
// Marking virtual time so far
|
||||
double vtime_mark_accum();
|
||||
// Counting virtual time so far.
|
||||
double vtime_count_accum() { return _vtime_count_accum; }
|
||||
|
||||
ConcurrentMark* cm() { return _cm; }
|
||||
|
||||
|
@ -174,13 +174,10 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
YoungList::YoungList(G1CollectedHeap* g1h)
|
||||
: _g1h(g1h), _head(NULL),
|
||||
_length(0),
|
||||
_last_sampled_rs_lengths(0),
|
||||
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
|
||||
{
|
||||
guarantee( check_list_empty(false), "just making sure..." );
|
||||
YoungList::YoungList(G1CollectedHeap* g1h) :
|
||||
_g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
|
||||
_survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
|
||||
guarantee(check_list_empty(false), "just making sure...");
|
||||
}
|
||||
|
||||
void YoungList::push_region(HeapRegion *hr) {
|
||||
@ -1029,6 +1026,15 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
assert(isHumongous(word_size), "attempt_allocation_humongous() "
|
||||
"should only be called for humongous allocations");
|
||||
|
||||
// Humongous objects can exhaust the heap quickly, so we should check if we
|
||||
// need to start a marking cycle at each humongous object allocation. We do
|
||||
// the check before we do the actual allocation. The reason for doing it
|
||||
// before the allocation is that we avoid having to keep track of the newly
|
||||
// allocated memory while we do a GC.
|
||||
if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) {
|
||||
collect(GCCause::_g1_humongous_allocation);
|
||||
}
|
||||
|
||||
// We will loop until a) we manage to successfully perform the
|
||||
// allocation or b) we successfully schedule a collection which
|
||||
// fails to perform the allocation. b) is the only case when we'll
|
||||
@ -1111,7 +1117,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
||||
return _mutator_alloc_region.attempt_allocation_locked(word_size,
|
||||
false /* bot_updates */);
|
||||
} else {
|
||||
return humongous_obj_allocate(word_size);
|
||||
HeapWord* result = humongous_obj_allocate(word_size);
|
||||
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
|
||||
g1_policy()->set_initiate_conc_mark_if_possible();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
@ -1257,7 +1267,18 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
double start = os::elapsedTime();
|
||||
g1_policy()->record_full_collection_start();
|
||||
|
||||
// Note: When we have a more flexible GC logging framework that
|
||||
// allows us to add optional attributes to a GC log record we
|
||||
// could consider timing and reporting how long we wait in the
|
||||
// following two methods.
|
||||
wait_while_free_regions_coming();
|
||||
// If we start the compaction before the CM threads finish
|
||||
// scanning the root regions we might trip them over as we'll
|
||||
// be moving objects / updating references. So let's wait until
|
||||
// they are done. By telling them to abort, they should complete
|
||||
// early.
|
||||
_cm->root_regions()->abort();
|
||||
_cm->root_regions()->wait_until_scan_finished();
|
||||
append_secondary_free_list_if_not_empty_with_lock();
|
||||
|
||||
gc_prologue(true);
|
||||
@ -1286,7 +1307,8 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
ref_processor_cm()->verify_no_references_recorded();
|
||||
|
||||
// Abandon current iterations of concurrent marking and concurrent
|
||||
// refinement, if any are in progress.
|
||||
// refinement, if any are in progress. We have to do this before
|
||||
// wait_until_scan_finished() below.
|
||||
concurrent_mark()->abort();
|
||||
|
||||
// Make sure we'll choose a new allocation region afterwards.
|
||||
@ -2295,7 +2317,8 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
|
||||
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
||||
return
|
||||
((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
|
||||
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
|
||||
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
|
||||
cause == GCCause::_g1_humongous_allocation);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -3545,19 +3568,25 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
verify_region_sets_optional();
|
||||
verify_dirty_young_regions();
|
||||
|
||||
// This call will decide whether this pause is an initial-mark
|
||||
// pause. If it is, during_initial_mark_pause() will return true
|
||||
// for the duration of this pause.
|
||||
g1_policy()->decide_on_conc_mark_initiation();
|
||||
|
||||
// We do not allow initial-mark to be piggy-backed on a mixed GC.
|
||||
assert(!g1_policy()->during_initial_mark_pause() ||
|
||||
g1_policy()->gcs_are_young(), "sanity");
|
||||
|
||||
// We also do not allow mixed GCs during marking.
|
||||
assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
|
||||
|
||||
// Record whether this pause is an initial mark. When the current
|
||||
// thread has completed its logging output and it's safe to signal
|
||||
// the CM thread, the flag's value in the policy has been reset.
|
||||
bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
|
||||
|
||||
// Inner scope for scope based logging, timers, and stats collection
|
||||
{
|
||||
// This call will decide whether this pause is an initial-mark
|
||||
// pause. If it is, during_initial_mark_pause() will return true
|
||||
// for the duration of this pause.
|
||||
g1_policy()->decide_on_conc_mark_initiation();
|
||||
|
||||
// We do not allow initial-mark to be piggy-backed on a mixed GC.
|
||||
assert(!g1_policy()->during_initial_mark_pause() ||
|
||||
g1_policy()->gcs_are_young(), "sanity");
|
||||
|
||||
// We also do not allow mixed GCs during marking.
|
||||
assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
|
||||
|
||||
char verbose_str[128];
|
||||
sprintf(verbose_str, "GC pause ");
|
||||
if (g1_policy()->gcs_are_young()) {
|
||||
@ -3613,7 +3642,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
Universe::verify(/* allow dirty */ false,
|
||||
/* silent */ false,
|
||||
/* option */ VerifyOption_G1UsePrevMarking);
|
||||
|
||||
}
|
||||
|
||||
COMPILER2_PRESENT(DerivedPointerTable::clear());
|
||||
@ -3656,6 +3684,18 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
g1_policy()->record_collection_pause_start(start_time_sec,
|
||||
start_used_bytes);
|
||||
|
||||
double scan_wait_start = os::elapsedTime();
|
||||
// We have to wait until the CM threads finish scanning the
|
||||
// root regions as it's the only way to ensure that all the
|
||||
// objects on them have been correctly scanned before we start
|
||||
// moving them during the GC.
|
||||
bool waited = _cm->root_regions()->wait_until_scan_finished();
|
||||
if (waited) {
|
||||
double scan_wait_end = os::elapsedTime();
|
||||
double wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
|
||||
g1_policy()->record_root_region_scan_wait_time(wait_time_ms);
|
||||
}
|
||||
|
||||
#if YOUNG_LIST_VERBOSE
|
||||
gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
|
||||
_young_list->print();
|
||||
@ -3765,16 +3805,14 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
}
|
||||
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
// We have to do this before we notify the CM threads that
|
||||
// they can start working to make sure that all the
|
||||
// appropriate initialization is done on the CM object.
|
||||
concurrent_mark()->checkpointRootsInitialPost();
|
||||
set_marking_started();
|
||||
// CAUTION: after the doConcurrentMark() call below,
|
||||
// the concurrent marking thread(s) could be running
|
||||
// concurrently with us. Make sure that anything after
|
||||
// this point does not assume that we are the only GC thread
|
||||
// running. Note: of course, the actual marking work will
|
||||
// not start until the safepoint itself is released in
|
||||
// ConcurrentGCThread::safepoint_desynchronize().
|
||||
doConcurrentMark();
|
||||
// Note that we don't actually trigger the CM thread at
|
||||
// this point. We do that later when we're sure that
|
||||
// the current thread has completed its logging output.
|
||||
}
|
||||
|
||||
allocate_dummy_regions();
|
||||
@ -3884,6 +3922,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
}
|
||||
}
|
||||
|
||||
// The closing of the inner scope, immediately above, will complete
|
||||
// the PrintGC logging output. The record_collection_pause_end() call
|
||||
// above will complete the logging output of PrintGCDetails.
|
||||
//
|
||||
// It is not yet to safe, however, to tell the concurrent mark to
|
||||
// start as we have some optional output below. We don't want the
|
||||
// output from the concurrent mark thread interfering with this
|
||||
// logging output either.
|
||||
|
||||
_hrs.verify_optional();
|
||||
verify_region_sets_optional();
|
||||
|
||||
@ -3901,6 +3948,21 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
g1_rem_set()->print_summary_info();
|
||||
}
|
||||
|
||||
// It should now be safe to tell the concurrent mark thread to start
|
||||
// without its logging output interfering with the logging output
|
||||
// that came from the pause.
|
||||
|
||||
if (should_start_conc_mark) {
|
||||
// CAUTION: after the doConcurrentMark() call below,
|
||||
// the concurrent marking thread(s) could be running
|
||||
// concurrently with us. Make sure that anything after
|
||||
// this point does not assume that we are the only GC thread
|
||||
// running. Note: of course, the actual marking work will
|
||||
// not start until the safepoint itself is released in
|
||||
// ConcurrentGCThread::safepoint_desynchronize().
|
||||
doConcurrentMark();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -4162,7 +4224,7 @@ HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
|
||||
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
|
||||
ParGCAllocBuffer(gclab_word_size), _retired(false) { }
|
||||
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
|
||||
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
|
||||
: _g1h(g1h),
|
||||
_refs(g1h->task_queue(queue_num)),
|
||||
_dcq(&g1h->dirty_card_queue_set()),
|
||||
@ -4283,6 +4345,7 @@ G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
|
||||
G1ParScanThreadState* par_scan_state) :
|
||||
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
|
||||
_par_scan_state(par_scan_state),
|
||||
_worker_id(par_scan_state->queue_num()),
|
||||
_during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
|
||||
_mark_in_progress(_g1->mark_in_progress()) { }
|
||||
|
||||
@ -4294,7 +4357,7 @@ void G1ParCopyHelper::mark_object(oop obj) {
|
||||
#endif // ASSERT
|
||||
|
||||
// We know that the object is not moving so it's safe to read its size.
|
||||
_cm->grayRoot(obj, (size_t) obj->size());
|
||||
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
|
||||
}
|
||||
|
||||
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
|
||||
@ -4316,7 +4379,7 @@ void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
|
||||
// worker so we cannot trust that its to-space image is
|
||||
// well-formed. So we have to read its size from its from-space
|
||||
// image which we know should not be changing.
|
||||
_cm->grayRoot(to_obj, (size_t) from_obj->size());
|
||||
_cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
|
||||
}
|
||||
|
||||
oop G1ParCopyHelper::copy_to_survivor_space(oop old) {
|
||||
@ -4406,6 +4469,8 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
|
||||
assert(barrier != G1BarrierRS || obj != NULL,
|
||||
"Precondition: G1BarrierRS implies obj is non-NULL");
|
||||
|
||||
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
|
||||
|
||||
// here the null check is implicit in the cset_fast_test() test
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
oop forwardee;
|
||||
@ -4424,7 +4489,7 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
|
||||
|
||||
// When scanning the RS, we only care about objs in CS.
|
||||
if (barrier == G1BarrierRS) {
|
||||
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
|
||||
_par_scan_state->update_rs(_from, p, _worker_id);
|
||||
}
|
||||
} else {
|
||||
// The object is not in collection set. If we're a root scanning
|
||||
@ -4436,7 +4501,7 @@ void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
|
||||
}
|
||||
|
||||
if (barrier == G1BarrierEvac && obj != NULL) {
|
||||
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
|
||||
_par_scan_state->update_rs(_from, p, _worker_id);
|
||||
}
|
||||
|
||||
if (do_gen_barrier && obj != NULL) {
|
||||
@ -5666,16 +5731,6 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
|
||||
|
||||
// And the region is empty.
|
||||
assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
|
||||
|
||||
// If marking is in progress then clear any objects marked in
|
||||
// the current region. Note mark_in_progress() returns false,
|
||||
// even during an initial mark pause, until the set_marking_started()
|
||||
// call which takes place later in the pause.
|
||||
if (mark_in_progress()) {
|
||||
assert(!g1_policy()->during_initial_mark_pause(), "sanity");
|
||||
_cm->nextMarkBitMap()->clearRange(used_mr);
|
||||
}
|
||||
|
||||
free_region(cur, &pre_used, &local_free_list, false /* par */);
|
||||
} else {
|
||||
cur->uninstall_surv_rate_group();
|
||||
@ -5742,8 +5797,9 @@ void G1CollectedHeap::set_free_regions_coming() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::reset_free_regions_coming() {
|
||||
assert(free_regions_coming(), "pre-condition");
|
||||
|
||||
{
|
||||
assert(free_regions_coming(), "pre-condition");
|
||||
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
_free_regions_coming = false;
|
||||
SecondaryFreeList_lock->notify_all();
|
||||
|
@ -355,6 +355,7 @@ private:
|
||||
// explicitly started if:
|
||||
// (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
|
||||
// (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
|
||||
// (c) cause == _g1_humongous_allocation
|
||||
bool should_do_concurrent_full_gc(GCCause::Cause cause);
|
||||
|
||||
// Keeps track of how many "full collections" (i.e., Full GCs or
|
||||
@ -1172,6 +1173,10 @@ public:
|
||||
_old_set.remove(hr);
|
||||
}
|
||||
|
||||
size_t non_young_capacity_bytes() {
|
||||
return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
|
||||
}
|
||||
|
||||
void set_free_regions_coming();
|
||||
void reset_free_regions_coming();
|
||||
bool free_regions_coming() { return _free_regions_coming; }
|
||||
@ -1904,7 +1909,7 @@ protected:
|
||||
G1ParScanPartialArrayClosure* _partial_scan_cl;
|
||||
|
||||
int _hash_seed;
|
||||
int _queue_num;
|
||||
uint _queue_num;
|
||||
|
||||
size_t _term_attempts;
|
||||
|
||||
@ -1948,7 +1953,7 @@ protected:
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);
|
||||
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
|
||||
|
||||
~G1ParScanThreadState() {
|
||||
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
|
||||
@ -2040,7 +2045,7 @@ public:
|
||||
}
|
||||
|
||||
int* hash_seed() { return &_hash_seed; }
|
||||
int queue_num() { return _queue_num; }
|
||||
uint queue_num() { return _queue_num; }
|
||||
|
||||
size_t term_attempts() const { return _term_attempts; }
|
||||
void note_term_attempt() { _term_attempts++; }
|
||||
|
@ -141,6 +141,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
|
||||
_cur_clear_ct_time_ms(0.0),
|
||||
_mark_closure_time_ms(0.0),
|
||||
_root_region_scan_wait_time_ms(0.0),
|
||||
|
||||
_cur_ref_proc_time_ms(0.0),
|
||||
_cur_ref_enq_time_ms(0.0),
|
||||
@ -213,8 +214,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
_survivor_bytes_before_gc(0),
|
||||
_capacity_before_gc(0),
|
||||
|
||||
_prev_collection_pause_used_at_end_bytes(0),
|
||||
|
||||
_eden_cset_region_length(0),
|
||||
_survivor_cset_region_length(0),
|
||||
_old_cset_region_length(0),
|
||||
@ -905,19 +904,10 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
||||
gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
|
||||
}
|
||||
|
||||
if (!during_initial_mark_pause()) {
|
||||
// We only need to do this here as the policy will only be applied
|
||||
// to the GC we're about to start. so, no point is calculating this
|
||||
// every time we calculate / recalculate the target young length.
|
||||
update_survivors_policy();
|
||||
} else {
|
||||
// The marking phase has a "we only copy implicitly live
|
||||
// objects during marking" invariant. The easiest way to ensure it
|
||||
// holds is not to allocate any survivor regions and tenure all
|
||||
// objects. In the future we might change this and handle survivor
|
||||
// regions specially during marking.
|
||||
tenure_all_objects();
|
||||
}
|
||||
// We only need to do this here as the policy will only be applied
|
||||
// to the GC we're about to start. so, no point is calculating this
|
||||
// every time we calculate / recalculate the target young length.
|
||||
update_survivors_policy();
|
||||
|
||||
assert(_g1->used() == _g1->recalculate_used(),
|
||||
err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
|
||||
@ -969,6 +959,9 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
||||
// This is initialized to zero here and is set during
|
||||
// the evacuation pause if marking is in progress.
|
||||
_cur_satb_drain_time_ms = 0.0;
|
||||
// This is initialized to zero here and is set during the evacuation
|
||||
// pause if we actually waited for the root region scanning to finish.
|
||||
_root_region_scan_wait_time_ms = 0.0;
|
||||
|
||||
_last_gc_was_young = false;
|
||||
|
||||
@ -1140,6 +1133,50 @@ double G1CollectorPolicy::max_sum(double* data1, double* data2) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
|
||||
if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t marking_initiating_used_threshold =
|
||||
(_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
|
||||
size_t cur_used_bytes = _g1->non_young_capacity_bytes();
|
||||
size_t alloc_byte_size = alloc_word_size * HeapWordSize;
|
||||
|
||||
if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
|
||||
if (gcs_are_young()) {
|
||||
ergo_verbose5(ErgoConcCycles,
|
||||
"request concurrent cycle initiation",
|
||||
ergo_format_reason("occupancy higher than threshold")
|
||||
ergo_format_byte("occupancy")
|
||||
ergo_format_byte("allocation request")
|
||||
ergo_format_byte_perc("threshold")
|
||||
ergo_format_str("source"),
|
||||
cur_used_bytes,
|
||||
alloc_byte_size,
|
||||
marking_initiating_used_threshold,
|
||||
(double) InitiatingHeapOccupancyPercent,
|
||||
source);
|
||||
return true;
|
||||
} else {
|
||||
ergo_verbose5(ErgoConcCycles,
|
||||
"do not request concurrent cycle initiation",
|
||||
ergo_format_reason("still doing mixed collections")
|
||||
ergo_format_byte("occupancy")
|
||||
ergo_format_byte("allocation request")
|
||||
ergo_format_byte_perc("threshold")
|
||||
ergo_format_str("source"),
|
||||
cur_used_bytes,
|
||||
alloc_byte_size,
|
||||
marking_initiating_used_threshold,
|
||||
(double) InitiatingHeapOccupancyPercent,
|
||||
source);
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Anything below that is considered to be zero
|
||||
#define MIN_TIMER_GRANULARITY 0.0000001
|
||||
|
||||
@ -1166,44 +1203,16 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
||||
#endif // PRODUCT
|
||||
|
||||
last_pause_included_initial_mark = during_initial_mark_pause();
|
||||
if (last_pause_included_initial_mark)
|
||||
if (last_pause_included_initial_mark) {
|
||||
record_concurrent_mark_init_end(0.0);
|
||||
|
||||
size_t marking_initiating_used_threshold =
|
||||
(_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
|
||||
|
||||
if (!_g1->mark_in_progress() && !_last_young_gc) {
|
||||
assert(!last_pause_included_initial_mark, "invariant");
|
||||
if (cur_used_bytes > marking_initiating_used_threshold) {
|
||||
if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
|
||||
assert(!during_initial_mark_pause(), "we should not see this here");
|
||||
|
||||
ergo_verbose3(ErgoConcCycles,
|
||||
"request concurrent cycle initiation",
|
||||
ergo_format_reason("occupancy higher than threshold")
|
||||
ergo_format_byte("occupancy")
|
||||
ergo_format_byte_perc("threshold"),
|
||||
cur_used_bytes,
|
||||
marking_initiating_used_threshold,
|
||||
(double) InitiatingHeapOccupancyPercent);
|
||||
|
||||
// Note: this might have already been set, if during the last
|
||||
// pause we decided to start a cycle but at the beginning of
|
||||
// this pause we decided to postpone it. That's OK.
|
||||
set_initiate_conc_mark_if_possible();
|
||||
} else {
|
||||
ergo_verbose2(ErgoConcCycles,
|
||||
"do not request concurrent cycle initiation",
|
||||
ergo_format_reason("occupancy lower than previous occupancy")
|
||||
ergo_format_byte("occupancy")
|
||||
ergo_format_byte("previous occupancy"),
|
||||
cur_used_bytes,
|
||||
_prev_collection_pause_used_at_end_bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_prev_collection_pause_used_at_end_bytes = cur_used_bytes;
|
||||
if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
|
||||
// Note: this might have already been set, if during the last
|
||||
// pause we decided to start a cycle but at the beginning of
|
||||
// this pause we decided to postpone it. That's OK.
|
||||
set_initiate_conc_mark_if_possible();
|
||||
}
|
||||
|
||||
_mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
|
||||
end_time_sec, false);
|
||||
@ -1257,6 +1266,10 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
||||
// is in progress.
|
||||
other_time_ms -= _cur_satb_drain_time_ms;
|
||||
|
||||
// Subtract the root region scanning wait time. It's initialized to
|
||||
// zero at the start of the pause.
|
||||
other_time_ms -= _root_region_scan_wait_time_ms;
|
||||
|
||||
if (parallel) {
|
||||
other_time_ms -= _cur_collection_par_time_ms;
|
||||
} else {
|
||||
@ -1289,6 +1302,8 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
||||
// each other. Therefore we unconditionally record the SATB drain
|
||||
// time - even if it's zero.
|
||||
body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
|
||||
body_summary->record_root_region_scan_wait_time_ms(
|
||||
_root_region_scan_wait_time_ms);
|
||||
|
||||
body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
|
||||
body_summary->record_satb_filtering_time_ms(satb_filtering_time);
|
||||
@ -1385,6 +1400,9 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
|
||||
(last_pause_included_initial_mark) ? " (initial-mark)" : "",
|
||||
elapsed_ms / 1000.0);
|
||||
|
||||
if (_root_region_scan_wait_time_ms > 0.0) {
|
||||
print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
|
||||
}
|
||||
if (parallel) {
|
||||
print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
|
||||
print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
|
||||
@ -1988,6 +2006,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
|
||||
if (summary->get_total_seq()->num() > 0) {
|
||||
print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
|
||||
if (body_summary != NULL) {
|
||||
print_summary(1, "Root Region Scan Wait", body_summary->get_root_region_scan_wait_seq());
|
||||
if (parallel) {
|
||||
print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
|
||||
print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
|
||||
@ -2029,15 +2048,17 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
|
||||
// parallel
|
||||
NumberSeq* other_parts[] = {
|
||||
body_summary->get_satb_drain_seq(),
|
||||
body_summary->get_root_region_scan_wait_seq(),
|
||||
body_summary->get_parallel_seq(),
|
||||
body_summary->get_clear_ct_seq()
|
||||
};
|
||||
calc_other_times_ms = NumberSeq(summary->get_total_seq(),
|
||||
3, other_parts);
|
||||
4, other_parts);
|
||||
} else {
|
||||
// serial
|
||||
NumberSeq* other_parts[] = {
|
||||
body_summary->get_satb_drain_seq(),
|
||||
body_summary->get_root_region_scan_wait_seq(),
|
||||
body_summary->get_update_rs_seq(),
|
||||
body_summary->get_ext_root_scan_seq(),
|
||||
body_summary->get_satb_filtering_seq(),
|
||||
@ -2045,7 +2066,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
|
||||
body_summary->get_obj_copy_seq()
|
||||
};
|
||||
calc_other_times_ms = NumberSeq(summary->get_total_seq(),
|
||||
6, other_parts);
|
||||
7, other_parts);
|
||||
}
|
||||
check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
|
||||
}
|
||||
|
@ -65,6 +65,7 @@ public:
|
||||
|
||||
class MainBodySummary: public CHeapObj {
|
||||
define_num_seq(satb_drain) // optional
|
||||
define_num_seq(root_region_scan_wait)
|
||||
define_num_seq(parallel) // parallel only
|
||||
define_num_seq(ext_root_scan)
|
||||
define_num_seq(satb_filtering)
|
||||
@ -177,7 +178,6 @@ private:
|
||||
double _cur_collection_start_sec;
|
||||
size_t _cur_collection_pause_used_at_start_bytes;
|
||||
size_t _cur_collection_pause_used_regions_at_start;
|
||||
size_t _prev_collection_pause_used_at_end_bytes;
|
||||
double _cur_collection_par_time_ms;
|
||||
double _cur_satb_drain_time_ms;
|
||||
double _cur_clear_ct_time_ms;
|
||||
@ -716,6 +716,7 @@ private:
|
||||
double _mark_remark_start_sec;
|
||||
double _mark_cleanup_start_sec;
|
||||
double _mark_closure_time_ms;
|
||||
double _root_region_scan_wait_time_ms;
|
||||
|
||||
// Update the young list target length either by setting it to the
|
||||
// desired fixed value or by calculating it using G1's pause
|
||||
@ -800,6 +801,8 @@ public:
|
||||
|
||||
GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
|
||||
|
||||
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
|
||||
|
||||
// Update the heuristic info to record a collection pause of the given
|
||||
// start time, where the given number of bytes were used at the start.
|
||||
// This may involve changing the desired size of a collection set.
|
||||
@ -816,6 +819,10 @@ public:
|
||||
_mark_closure_time_ms = mark_closure_time_ms;
|
||||
}
|
||||
|
||||
void record_root_region_scan_wait_time(double time_ms) {
|
||||
_root_region_scan_wait_time_ms = time_ms;
|
||||
}
|
||||
|
||||
void record_concurrent_mark_remark_start();
|
||||
void record_concurrent_mark_remark_end();
|
||||
|
||||
@ -1146,11 +1153,6 @@ public:
|
||||
_survivor_surv_rate_group->stop_adding_regions();
|
||||
}
|
||||
|
||||
void tenure_all_objects() {
|
||||
_max_survivor_regions = 0;
|
||||
_tenuring_threshold = 0;
|
||||
}
|
||||
|
||||
void record_survivor_regions(size_t regions,
|
||||
HeapRegion* head,
|
||||
HeapRegion* tail) {
|
||||
|
@ -70,16 +70,20 @@ private:
|
||||
OopsInHeapRegionClosure *_update_rset_cl;
|
||||
bool _during_initial_mark;
|
||||
bool _during_conc_mark;
|
||||
uint _worker_id;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
|
||||
HeapRegion* hr,
|
||||
OopsInHeapRegionClosure* update_rset_cl,
|
||||
bool during_initial_mark,
|
||||
bool during_conc_mark) :
|
||||
bool during_conc_mark,
|
||||
uint worker_id) :
|
||||
_g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
|
||||
_update_rset_cl(update_rset_cl),
|
||||
_during_initial_mark(during_initial_mark),
|
||||
_during_conc_mark(during_conc_mark) { }
|
||||
_during_conc_mark(during_conc_mark),
|
||||
_worker_id(worker_id) { }
|
||||
|
||||
size_t marked_bytes() { return _marked_bytes; }
|
||||
|
||||
@ -123,7 +127,7 @@ public:
|
||||
// explicitly and all objects in the CSet are considered
|
||||
// (implicitly) live. So, we won't mark them explicitly and
|
||||
// we'll leave them over NTAMS.
|
||||
_cm->markNext(obj);
|
||||
_cm->grayRoot(obj, obj_size, _worker_id, _hr);
|
||||
}
|
||||
_marked_bytes += (obj_size * HeapWordSize);
|
||||
obj->set_mark(markOopDesc::prototype());
|
||||
@ -155,12 +159,14 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
ConcurrentMark* _cm;
|
||||
OopsInHeapRegionClosure *_update_rset_cl;
|
||||
uint _worker_id;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
|
||||
OopsInHeapRegionClosure* update_rset_cl) :
|
||||
OopsInHeapRegionClosure* update_rset_cl,
|
||||
uint worker_id) :
|
||||
_g1h(g1h), _update_rset_cl(update_rset_cl),
|
||||
_cm(_g1h->concurrent_mark()) { }
|
||||
_worker_id(worker_id), _cm(_g1h->concurrent_mark()) { }
|
||||
|
||||
bool doHeapRegion(HeapRegion *hr) {
|
||||
bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
|
||||
@ -173,7 +179,8 @@ public:
|
||||
if (hr->evacuation_failed()) {
|
||||
RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
|
||||
during_initial_mark,
|
||||
during_conc_mark);
|
||||
during_conc_mark,
|
||||
_worker_id);
|
||||
|
||||
MemRegion mr(hr->bottom(), hr->end());
|
||||
// We'll recreate the prev marking info so we'll first clear
|
||||
@ -226,7 +233,7 @@ public:
|
||||
update_rset_cl = &immediate_update;
|
||||
}
|
||||
|
||||
RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl);
|
||||
RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl, worker_id);
|
||||
|
||||
HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
|
||||
_g1h->collection_set_iterate_from(hr, &rsfp_cl);
|
||||
|
@ -89,16 +89,15 @@ class G1CollectedHeap;
|
||||
//
|
||||
// * Min Capacity
|
||||
//
|
||||
// We set this to 0 for all spaces. We could consider setting the old
|
||||
// min capacity to the min capacity of the heap (see 7078465).
|
||||
// We set this to 0 for all spaces.
|
||||
//
|
||||
// * Max Capacity
|
||||
//
|
||||
// For jstat, we set the max capacity of all spaces to heap_capacity,
|
||||
// given that we don't always have a reasonably upper bound on how big
|
||||
// each space can grow. For the memory pools, we actually make the max
|
||||
// capacity undefined. We could consider setting the old max capacity
|
||||
// to the max capacity of the heap (see 7078465).
|
||||
// given that we don't always have a reasonable upper bound on how big
|
||||
// each space can grow. For the memory pools, we make the max
|
||||
// capacity undefined with the exception of the old memory pool for
|
||||
// which we make the max capacity same as the max heap capacity.
|
||||
//
|
||||
// If we had more accurate occupancy / capacity information per
|
||||
// region set the above calculations would be greatly simplified and
|
||||
|
@ -51,6 +51,7 @@ protected:
|
||||
G1RemSet* _g1_rem;
|
||||
ConcurrentMark* _cm;
|
||||
G1ParScanThreadState* _par_scan_state;
|
||||
uint _worker_id;
|
||||
bool _during_initial_mark;
|
||||
bool _mark_in_progress;
|
||||
public:
|
||||
@ -219,6 +220,7 @@ public:
|
||||
|
||||
// Closure for iterating over object fields during concurrent marking
|
||||
class G1CMOopClosure : public OopClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
ConcurrentMark* _cm;
|
||||
CMTask* _task;
|
||||
@ -229,4 +231,92 @@ public:
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
// Closure to scan the root regions during concurrent marking
|
||||
class G1RootRegionScanClosure : public OopClosure {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
ConcurrentMark* _cm;
|
||||
uint _worker_id;
|
||||
public:
|
||||
G1RootRegionScanClosure(G1CollectedHeap* g1h, ConcurrentMark* cm,
|
||||
uint worker_id) :
|
||||
_g1h(g1h), _cm(cm), _worker_id(worker_id) { }
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop( oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
// Closure that applies the given two closures in sequence.
|
||||
// Used by the RSet refinement code (when updating RSets
|
||||
// during an evacuation pause) to record cards containing
|
||||
// pointers into the collection set.
|
||||
|
||||
class G1Mux2Closure : public OopClosure {
|
||||
OopClosure* _c1;
|
||||
OopClosure* _c2;
|
||||
public:
|
||||
G1Mux2Closure(OopClosure *c1, OopClosure *c2);
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
// A closure that returns true if it is actually applied
|
||||
// to a reference
|
||||
|
||||
class G1TriggerClosure : public OopClosure {
|
||||
bool _triggered;
|
||||
public:
|
||||
G1TriggerClosure();
|
||||
bool triggered() const { return _triggered; }
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
// A closure which uses a triggering closure to determine
|
||||
// whether to apply an oop closure.
|
||||
|
||||
class G1InvokeIfNotTriggeredClosure: public OopClosure {
|
||||
G1TriggerClosure* _trigger_cl;
|
||||
OopClosure* _oop_cl;
|
||||
public:
|
||||
G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
class G1UpdateRSOrPushRefOopClosure: public OopClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
G1RemSet* _g1_rem_set;
|
||||
HeapRegion* _from;
|
||||
OopsInHeapRegionClosure* _push_ref_cl;
|
||||
bool _record_refs_into_cset;
|
||||
int _worker_i;
|
||||
|
||||
public:
|
||||
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
||||
G1RemSet* rs,
|
||||
OopsInHeapRegionClosure* push_ref_cl,
|
||||
bool record_refs_into_cset,
|
||||
int worker_i = 0);
|
||||
|
||||
void set_from(HeapRegion* from) {
|
||||
assert(from != NULL, "from region must be non-NULL");
|
||||
_from = from;
|
||||
}
|
||||
|
||||
bool self_forwarded(oop obj) {
|
||||
bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
|
||||
return result;
|
||||
}
|
||||
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
|
||||
template <class T> void do_oop_nv(T* p);
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,8 @@
|
||||
// perf-critical inner loop.
|
||||
#define FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT 0
|
||||
|
||||
template <class T> inline void FilterIntoCSClosure::do_oop_nv(T* p) {
|
||||
template <class T>
|
||||
inline void FilterIntoCSClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop) &&
|
||||
_g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) {
|
||||
@ -53,7 +54,8 @@ template <class T> inline void FilterIntoCSClosure::do_oop_nv(T* p) {
|
||||
|
||||
#define FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT 0
|
||||
|
||||
template <class T> inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
|
||||
template <class T>
|
||||
inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
HeapWord* obj_hw = (HeapWord*)oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
@ -67,7 +69,8 @@ template <class T> inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
|
||||
}
|
||||
|
||||
// This closure is applied to the fields of the objects that have just been copied.
|
||||
template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
template <class T>
|
||||
inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
@ -96,7 +99,8 @@ template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
|
||||
template <class T>
|
||||
inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
@ -111,7 +115,8 @@ template <class T> inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
|
||||
}
|
||||
}
|
||||
|
||||
template <class T> inline void G1CMOopClosure::do_oop_nv(T* p) {
|
||||
template <class T>
|
||||
inline void G1CMOopClosure::do_oop_nv(T* p) {
|
||||
assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
|
||||
assert(!_g1h->is_on_master_free_list(
|
||||
_g1h->heap_region_containing((HeapWord*) p)), "invariant");
|
||||
@ -125,4 +130,97 @@ template <class T> inline void G1CMOopClosure::do_oop_nv(T* p) {
|
||||
_task->deal_with_reference(obj);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
|
||||
if (hr != NULL) {
|
||||
_cm->grayRoot(obj, obj->size(), _worker_id, hr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void G1Mux2Closure::do_oop_nv(T* p) {
|
||||
// Apply first closure; then apply the second.
|
||||
_c1->do_oop(p);
|
||||
_c2->do_oop(p);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void G1TriggerClosure::do_oop_nv(T* p) {
|
||||
// Record that this closure was actually applied (triggered).
|
||||
_triggered = true;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) {
|
||||
if (!_trigger_cl->triggered()) {
|
||||
_oop_cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
#ifdef ASSERT
|
||||
// can't do because of races
|
||||
// assert(obj == NULL || obj->is_oop(), "expected an oop");
|
||||
|
||||
// Do the safe subset of is_oop
|
||||
if (obj != NULL) {
|
||||
#ifdef CHECK_UNHANDLED_OOPS
|
||||
oopDesc* o = obj.obj();
|
||||
#else
|
||||
oopDesc* o = obj;
|
||||
#endif // CHECK_UNHANDLED_OOPS
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
assert(_from != NULL, "from region must be non-NULL");
|
||||
|
||||
HeapRegion* to = _g1->heap_region_containing(obj);
|
||||
if (to != NULL && _from != to) {
|
||||
// The _record_refs_into_cset flag is true during the RSet
|
||||
// updating part of an evacuation pause. It is false at all
|
||||
// other times:
|
||||
// * rebuilding the rembered sets after a full GC
|
||||
// * during concurrent refinement.
|
||||
// * updating the remembered sets of regions in the collection
|
||||
// set in the event of an evacuation failure (when deferred
|
||||
// updates are enabled).
|
||||
|
||||
if (_record_refs_into_cset && to->in_collection_set()) {
|
||||
// We are recording references that point into the collection
|
||||
// set and this particular reference does exactly that...
|
||||
// If the referenced object has already been forwarded
|
||||
// to itself, we are handling an evacuation failure and
|
||||
// we have already visited/tried to copy this object
|
||||
// there is no need to retry.
|
||||
if (!self_forwarded(obj)) {
|
||||
assert(_push_ref_cl != NULL, "should not be null");
|
||||
// Push the reference in the refs queue of the G1ParScanThreadState
|
||||
// instance for this worker thread.
|
||||
_push_ref_cl->do_oop(p);
|
||||
}
|
||||
|
||||
// Deferred updates to the CSet are either discarded (in the normal case),
|
||||
// or processed (if an evacuation failure occurs) at the end
|
||||
// of the collection.
|
||||
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
|
||||
} else {
|
||||
// We either don't care about pushing references that point into the
|
||||
// collection set (i.e. we're not during an evacuation pause) _or_
|
||||
// the reference doesn't point into the collection set. Either way
|
||||
// we add the reference directly to the RSet of the region containing
|
||||
// the referenced object.
|
||||
_g1_rem_set->par_write_ref(_from, p, _worker_i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -569,40 +569,26 @@ void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||
|
||||
static IntHistogram out_of_histo(50, 50);
|
||||
|
||||
class TriggerClosure : public OopClosure {
|
||||
bool _trigger;
|
||||
public:
|
||||
TriggerClosure() : _trigger(false) { }
|
||||
bool value() const { return _trigger; }
|
||||
template <class T> void do_oop_nv(T* p) { _trigger = true; }
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
|
||||
class InvokeIfNotTriggeredClosure: public OopClosure {
|
||||
TriggerClosure* _t;
|
||||
OopClosure* _oc;
|
||||
public:
|
||||
InvokeIfNotTriggeredClosure(TriggerClosure* t, OopClosure* oc):
|
||||
_t(t), _oc(oc) { }
|
||||
template <class T> void do_oop_nv(T* p) {
|
||||
if (!_t->value()) _oc->do_oop(p);
|
||||
}
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
G1TriggerClosure::G1TriggerClosure() :
|
||||
_triggered(false) { }
|
||||
|
||||
class Mux2Closure : public OopClosure {
|
||||
OopClosure* _c1;
|
||||
OopClosure* _c2;
|
||||
public:
|
||||
Mux2Closure(OopClosure *c1, OopClosure *c2) : _c1(c1), _c2(c2) { }
|
||||
template <class T> void do_oop_nv(T* p) {
|
||||
_c1->do_oop(p); _c2->do_oop(p);
|
||||
}
|
||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||
};
|
||||
G1InvokeIfNotTriggeredClosure::G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t_cl,
|
||||
OopClosure* oop_cl) :
|
||||
_trigger_cl(t_cl), _oop_cl(oop_cl) { }
|
||||
|
||||
G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) :
|
||||
_c1(c1), _c2(c2) { }
|
||||
|
||||
G1UpdateRSOrPushRefOopClosure::
|
||||
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
||||
G1RemSet* rs,
|
||||
OopsInHeapRegionClosure* push_ref_cl,
|
||||
bool record_refs_into_cset,
|
||||
int worker_i) :
|
||||
_g1(g1h), _g1_rem_set(rs), _from(NULL),
|
||||
_record_refs_into_cset(record_refs_into_cset),
|
||||
_push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
|
||||
|
||||
bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
bool check_for_refs_into_cset) {
|
||||
@ -629,17 +615,17 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length");
|
||||
oops_in_heap_closure = _cset_rs_update_cl[worker_i];
|
||||
}
|
||||
UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
|
||||
_g1->g1_rem_set(),
|
||||
oops_in_heap_closure,
|
||||
check_for_refs_into_cset,
|
||||
worker_i);
|
||||
G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
|
||||
_g1->g1_rem_set(),
|
||||
oops_in_heap_closure,
|
||||
check_for_refs_into_cset,
|
||||
worker_i);
|
||||
update_rs_oop_cl.set_from(r);
|
||||
|
||||
TriggerClosure trigger_cl;
|
||||
G1TriggerClosure trigger_cl;
|
||||
FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
|
||||
InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
|
||||
Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
|
||||
G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
|
||||
G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
|
||||
|
||||
FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
|
||||
(check_for_refs_into_cset ?
|
||||
@ -688,7 +674,7 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
|
||||
_conc_refine_cards++;
|
||||
}
|
||||
|
||||
return trigger_cl.value();
|
||||
return trigger_cl.triggered();
|
||||
}
|
||||
|
||||
bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -191,44 +191,5 @@ public:
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
};
|
||||
|
||||
class UpdateRSOrPushRefOopClosure: public OopClosure {
|
||||
G1CollectedHeap* _g1;
|
||||
G1RemSet* _g1_rem_set;
|
||||
HeapRegion* _from;
|
||||
OopsInHeapRegionClosure* _push_ref_cl;
|
||||
bool _record_refs_into_cset;
|
||||
int _worker_i;
|
||||
|
||||
template <class T> void do_oop_work(T* p);
|
||||
|
||||
public:
|
||||
UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
|
||||
G1RemSet* rs,
|
||||
OopsInHeapRegionClosure* push_ref_cl,
|
||||
bool record_refs_into_cset,
|
||||
int worker_i = 0) :
|
||||
_g1(g1h),
|
||||
_g1_rem_set(rs),
|
||||
_from(NULL),
|
||||
_record_refs_into_cset(record_refs_into_cset),
|
||||
_push_ref_cl(push_ref_cl),
|
||||
_worker_i(worker_i) { }
|
||||
|
||||
void set_from(HeapRegion* from) {
|
||||
assert(from != NULL, "from region must be non-NULL");
|
||||
_from = from;
|
||||
}
|
||||
|
||||
bool self_forwarded(oop obj) {
|
||||
bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
|
||||
return result;
|
||||
}
|
||||
|
||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
virtual void do_oop(oop* p) { do_oop_work(p); }
|
||||
|
||||
bool apply_to_weak_ref_discovered_field() { return true; }
|
||||
};
|
||||
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -85,66 +85,4 @@ inline void UpdateRSetImmediate::do_oop_work(T* p) {
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline void UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
#ifdef ASSERT
|
||||
// can't do because of races
|
||||
// assert(obj == NULL || obj->is_oop(), "expected an oop");
|
||||
|
||||
// Do the safe subset of is_oop
|
||||
if (obj != NULL) {
|
||||
#ifdef CHECK_UNHANDLED_OOPS
|
||||
oopDesc* o = obj.obj();
|
||||
#else
|
||||
oopDesc* o = obj;
|
||||
#endif // CHECK_UNHANDLED_OOPS
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
assert(_from != NULL, "from region must be non-NULL");
|
||||
|
||||
HeapRegion* to = _g1->heap_region_containing(obj);
|
||||
if (to != NULL && _from != to) {
|
||||
// The _record_refs_into_cset flag is true during the RSet
|
||||
// updating part of an evacuation pause. It is false at all
|
||||
// other times:
|
||||
// * rebuilding the rembered sets after a full GC
|
||||
// * during concurrent refinement.
|
||||
// * updating the remembered sets of regions in the collection
|
||||
// set in the event of an evacuation failure (when deferred
|
||||
// updates are enabled).
|
||||
|
||||
if (_record_refs_into_cset && to->in_collection_set()) {
|
||||
// We are recording references that point into the collection
|
||||
// set and this particular reference does exactly that...
|
||||
// If the referenced object has already been forwarded
|
||||
// to itself, we are handling an evacuation failure and
|
||||
// we have already visited/tried to copy this object
|
||||
// there is no need to retry.
|
||||
if (!self_forwarded(obj)) {
|
||||
assert(_push_ref_cl != NULL, "should not be null");
|
||||
// Push the reference in the refs queue of the G1ParScanThreadState
|
||||
// instance for this worker thread.
|
||||
_push_ref_cl->do_oop(p);
|
||||
}
|
||||
|
||||
// Deferred updates to the CSet are either discarded (in the normal case),
|
||||
// or processed (if an evacuation failure occurs) at the end
|
||||
// of the collection.
|
||||
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
|
||||
} else {
|
||||
// We either don't care about pushing references that point into the
|
||||
// collection set (i.e. we're not during an evacuation pause) _or_
|
||||
// the reference doesn't point into the collection set. Either way
|
||||
// we add the reference directly to the RSet of the region containing
|
||||
// the referenced object.
|
||||
_g1_rem_set->par_write_ref(_from, p, _worker_i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,12 +32,14 @@
|
||||
|
||||
// Forward declarations.
|
||||
enum G1Barrier {
|
||||
G1BarrierNone, G1BarrierRS, G1BarrierEvac
|
||||
G1BarrierNone,
|
||||
G1BarrierRS,
|
||||
G1BarrierEvac
|
||||
};
|
||||
|
||||
template<bool do_gen_barrier, G1Barrier barrier,
|
||||
bool do_mark_object>
|
||||
template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
|
||||
class G1ParCopyClosure;
|
||||
|
||||
class G1ParScanClosure;
|
||||
class G1ParPushHeapRSClosure;
|
||||
|
||||
@ -46,6 +48,13 @@ typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
|
||||
class FilterIntoCSClosure;
|
||||
class FilterOutOfRegionClosure;
|
||||
class G1CMOopClosure;
|
||||
class G1RootRegionScanClosure;
|
||||
|
||||
// Specialized oop closures from g1RemSet.cpp
|
||||
class G1Mux2Closure;
|
||||
class G1TriggerClosure;
|
||||
class G1InvokeIfNotTriggeredClosure;
|
||||
class G1UpdateRSOrPushRefOopClosure;
|
||||
|
||||
#ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
|
||||
#error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
|
||||
@ -57,7 +66,12 @@ class G1CMOopClosure;
|
||||
f(G1ParPushHeapRSClosure,_nv) \
|
||||
f(FilterIntoCSClosure,_nv) \
|
||||
f(FilterOutOfRegionClosure,_nv) \
|
||||
f(G1CMOopClosure,_nv)
|
||||
f(G1CMOopClosure,_nv) \
|
||||
f(G1RootRegionScanClosure,_nv) \
|
||||
f(G1Mux2Closure,_nv) \
|
||||
f(G1TriggerClosure,_nv) \
|
||||
f(G1InvokeIfNotTriggeredClosure,_nv) \
|
||||
f(G1UpdateRSOrPushRefOopClosure,_nv)
|
||||
|
||||
#ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
|
||||
#error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."
|
||||
|
@ -659,7 +659,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
// If we're within a stop-world GC, then we might look at a card in a
|
||||
// GC alloc region that extends onto a GC LAB, which may not be
|
||||
// parseable. Stop such at the "saved_mark" of the region.
|
||||
if (G1CollectedHeap::heap()->is_gc_active()) {
|
||||
if (g1h->is_gc_active()) {
|
||||
mr = mr.intersection(used_region_at_save_marks());
|
||||
} else {
|
||||
mr = mr.intersection(used_region());
|
||||
@ -688,53 +688,63 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
OrderAccess::storeload();
|
||||
}
|
||||
|
||||
// Cache the boundaries of the memory region in some const locals
|
||||
HeapWord* const start = mr.start();
|
||||
HeapWord* const end = mr.end();
|
||||
|
||||
// We used to use "block_start_careful" here. But we're actually happy
|
||||
// to update the BOT while we do this...
|
||||
HeapWord* cur = block_start(mr.start());
|
||||
assert(cur <= mr.start(), "Postcondition");
|
||||
HeapWord* cur = block_start(start);
|
||||
assert(cur <= start, "Postcondition");
|
||||
|
||||
while (cur <= mr.start()) {
|
||||
if (oop(cur)->klass_or_null() == NULL) {
|
||||
oop obj;
|
||||
|
||||
HeapWord* next = cur;
|
||||
while (next <= start) {
|
||||
cur = next;
|
||||
obj = oop(cur);
|
||||
if (obj->klass_or_null() == NULL) {
|
||||
// Ran into an unparseable point.
|
||||
return cur;
|
||||
}
|
||||
// Otherwise...
|
||||
int sz = oop(cur)->size();
|
||||
if (cur + sz > mr.start()) break;
|
||||
// Otherwise, go on.
|
||||
cur = cur + sz;
|
||||
next = (cur + obj->size());
|
||||
}
|
||||
oop obj;
|
||||
obj = oop(cur);
|
||||
// If we finish this loop...
|
||||
assert(cur <= mr.start()
|
||||
&& obj->klass_or_null() != NULL
|
||||
&& cur + obj->size() > mr.start(),
|
||||
|
||||
// If we finish the above loop...We have a parseable object that
|
||||
// begins on or before the start of the memory region, and ends
|
||||
// inside or spans the entire region.
|
||||
|
||||
assert(obj == oop(cur), "sanity");
|
||||
assert(cur <= start &&
|
||||
obj->klass_or_null() != NULL &&
|
||||
(cur + obj->size()) > start,
|
||||
"Loop postcondition");
|
||||
|
||||
if (!g1h->is_obj_dead(obj)) {
|
||||
obj->oop_iterate(cl, mr);
|
||||
}
|
||||
|
||||
HeapWord* next;
|
||||
while (cur < mr.end()) {
|
||||
while (cur < end) {
|
||||
obj = oop(cur);
|
||||
if (obj->klass_or_null() == NULL) {
|
||||
// Ran into an unparseable point.
|
||||
return cur;
|
||||
};
|
||||
|
||||
// Otherwise:
|
||||
next = (cur + obj->size());
|
||||
|
||||
if (!g1h->is_obj_dead(obj)) {
|
||||
if (next < mr.end()) {
|
||||
if (next < end || !obj->is_objArray()) {
|
||||
// This object either does not span the MemRegion
|
||||
// boundary, or if it does it's not an array.
|
||||
// Apply closure to whole object.
|
||||
obj->oop_iterate(cl);
|
||||
} else {
|
||||
// this obj spans the boundary. If it's an array, stop at the
|
||||
// boundary.
|
||||
if (obj->is_objArray()) {
|
||||
obj->oop_iterate(cl, mr);
|
||||
} else {
|
||||
obj->oop_iterate(cl);
|
||||
}
|
||||
// This obj is an array that spans the boundary.
|
||||
// Stop at the boundary.
|
||||
obj->oop_iterate(cl, mr);
|
||||
}
|
||||
}
|
||||
cur = next;
|
||||
|
@ -374,7 +374,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
ParVerifyClaimValue = 4,
|
||||
RebuildRSClaimValue = 5,
|
||||
CompleteMarkCSetClaimValue = 6,
|
||||
ParEvacFailureClaimValue = 7
|
||||
ParEvacFailureClaimValue = 7,
|
||||
AggregateCountClaimValue = 8,
|
||||
VerifyCountClaimValue = 9
|
||||
};
|
||||
|
||||
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
|
||||
|
@ -72,10 +72,11 @@ inline void HeapRegion::note_end_of_marking() {
|
||||
}
|
||||
|
||||
inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
|
||||
if (during_initial_mark) {
|
||||
if (is_survivor()) {
|
||||
assert(false, "should not allocate survivors during IM");
|
||||
} else {
|
||||
if (is_survivor()) {
|
||||
// This is how we always allocate survivors.
|
||||
assert(_next_top_at_mark_start == bottom(), "invariant");
|
||||
} else {
|
||||
if (during_initial_mark) {
|
||||
// During initial-mark we'll explicitly mark any objects on old
|
||||
// regions that are pointed to by roots. Given that explicit
|
||||
// marks only make sense under NTAMS it'd be nice if we could
|
||||
@ -84,11 +85,6 @@ inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
|
||||
// NTAMS to the end of the region so all marks will be below
|
||||
// NTAMS. We'll set it to the actual top when we retire this region.
|
||||
_next_top_at_mark_start = end();
|
||||
}
|
||||
} else {
|
||||
if (is_survivor()) {
|
||||
// This is how we always allocate survivors.
|
||||
assert(_next_top_at_mark_start == bottom(), "invariant");
|
||||
} else {
|
||||
// We could have re-used this old region as to-space over a
|
||||
// couple of GCs since the start of the concurrent marking
|
||||
@ -101,19 +97,15 @@ inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
|
||||
}
|
||||
|
||||
inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
|
||||
if (during_initial_mark) {
|
||||
if (is_survivor()) {
|
||||
assert(false, "should not allocate survivors during IM");
|
||||
} else {
|
||||
if (is_survivor()) {
|
||||
// This is how we always allocate survivors.
|
||||
assert(_next_top_at_mark_start == bottom(), "invariant");
|
||||
} else {
|
||||
if (during_initial_mark) {
|
||||
// See the comment for note_start_of_copying() for the details
|
||||
// on this.
|
||||
assert(_next_top_at_mark_start == end(), "pre-condition");
|
||||
_next_top_at_mark_start = top();
|
||||
}
|
||||
} else {
|
||||
if (is_survivor()) {
|
||||
// This is how we always allocate survivors.
|
||||
assert(_next_top_at_mark_start == bottom(), "invariant");
|
||||
} else {
|
||||
// See the comment for note_start_of_copying() for the details
|
||||
// on this.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -59,6 +59,7 @@ class HRSPhaseSetter;
|
||||
class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
|
||||
friend class hrs_ext_msg;
|
||||
friend class HRSPhaseSetter;
|
||||
friend class VMStructs;
|
||||
|
||||
protected:
|
||||
static size_t calculate_region_num(HeapRegion* hr);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -40,6 +40,8 @@
|
||||
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
|
||||
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
|
||||
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
|
||||
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
|
||||
nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \
|
||||
\
|
||||
nonstatic_field(G1MonitoringSupport, _eden_committed, size_t) \
|
||||
nonstatic_field(G1MonitoringSupport, _eden_used, size_t) \
|
||||
@ -47,6 +49,10 @@
|
||||
nonstatic_field(G1MonitoringSupport, _survivor_used, size_t) \
|
||||
nonstatic_field(G1MonitoringSupport, _old_committed, size_t) \
|
||||
nonstatic_field(G1MonitoringSupport, _old_used, size_t) \
|
||||
\
|
||||
nonstatic_field(HeapRegionSetBase, _length, size_t) \
|
||||
nonstatic_field(HeapRegionSetBase, _region_num, size_t) \
|
||||
nonstatic_field(HeapRegionSetBase, _total_used_bytes, size_t) \
|
||||
|
||||
|
||||
#define VM_TYPES_G1(declare_type, declare_toplevel_type) \
|
||||
@ -55,6 +61,7 @@
|
||||
\
|
||||
declare_type(HeapRegion, ContiguousSpace) \
|
||||
declare_toplevel_type(HeapRegionSeq) \
|
||||
declare_toplevel_type(HeapRegionSetBase) \
|
||||
declare_toplevel_type(G1MonitoringSupport) \
|
||||
\
|
||||
declare_toplevel_type(G1CollectedHeap*) \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -74,8 +74,9 @@ void VM_G1IncCollectionPause::doit() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
assert(!_should_initiate_conc_mark ||
|
||||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
|
||||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
|
||||
"only a GC locker or a System.gc() induced GC should start a cycle");
|
||||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
|
||||
_gc_cause == GCCause::_g1_humongous_allocation),
|
||||
"only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
|
||||
|
||||
if (_word_size > 0) {
|
||||
// An allocation has been requested. So, try to do that first.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -84,6 +84,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
|
||||
case _g1_inc_collection_pause:
|
||||
return "G1 Evacuation Pause";
|
||||
|
||||
case _g1_humongous_allocation:
|
||||
return "G1 Humongous Allocation";
|
||||
|
||||
case _last_ditch_collection:
|
||||
return "Last ditch collection";
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -66,6 +66,7 @@ class GCCause : public AllStatic {
|
||||
_adaptive_size_policy,
|
||||
|
||||
_g1_inc_collection_pause,
|
||||
_g1_humongous_allocation,
|
||||
|
||||
_last_ditch_collection,
|
||||
_last_gc_cause
|
||||
|
@ -859,7 +859,9 @@ IRT_ENTRY(nmethod*,
|
||||
const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
|
||||
const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
|
||||
|
||||
assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
|
||||
nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, thread);
|
||||
assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
|
||||
|
||||
if (osr_nm != NULL) {
|
||||
// We may need to do on-stack replacement which requires that no
|
||||
|
@ -158,6 +158,9 @@ klassOop Klass::base_create_klass_oop(KlassHandle& klass, int size,
|
||||
kl->set_next_sibling(NULL);
|
||||
kl->set_alloc_count(0);
|
||||
kl->set_alloc_size(0);
|
||||
#ifdef TRACE_SET_KLASS_TRACE_ID
|
||||
TRACE_SET_KLASS_TRACE_ID(kl, 0);
|
||||
#endif
|
||||
|
||||
kl->set_prototype_header(markOopDesc::prototype());
|
||||
kl->set_biased_lock_revocation_count(0);
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "oops/klassPS.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "trace/traceMacros.hpp"
|
||||
#include "utilities/accessFlags.hpp"
|
||||
#ifndef SERIALGC
|
||||
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
|
||||
@ -80,6 +81,7 @@
|
||||
// [last_biased_lock_bulk_revocation_time] (64 bits)
|
||||
// [prototype_header]
|
||||
// [biased_lock_revocation_count]
|
||||
// [trace_id]
|
||||
|
||||
|
||||
// Forward declarations.
|
||||
@ -263,6 +265,9 @@ class Klass : public Klass_vtbl {
|
||||
markOop _prototype_header; // Used when biased locking is both enabled and disabled for this type
|
||||
jint _biased_lock_revocation_count;
|
||||
|
||||
#ifdef TRACE_DEFINE_KLASS_TRACE_ID
|
||||
TRACE_DEFINE_KLASS_TRACE_ID;
|
||||
#endif
|
||||
public:
|
||||
|
||||
// returns the enclosing klassOop
|
||||
@ -683,6 +688,9 @@ class Klass : public Klass_vtbl {
|
||||
jlong last_biased_lock_bulk_revocation_time() { return _last_biased_lock_bulk_revocation_time; }
|
||||
void set_last_biased_lock_bulk_revocation_time(jlong cur_time) { _last_biased_lock_bulk_revocation_time = cur_time; }
|
||||
|
||||
#ifdef TRACE_DEFINE_KLASS_METHODS
|
||||
TRACE_DEFINE_KLASS_METHODS;
|
||||
#endif
|
||||
|
||||
// garbage collection support
|
||||
virtual void follow_weak_klass_links(
|
||||
|
@ -83,6 +83,7 @@ methodOop methodKlass::allocate(constMethodHandle xconst,
|
||||
m->set_max_stack(0);
|
||||
m->set_max_locals(0);
|
||||
m->set_intrinsic_id(vmIntrinsics::_none);
|
||||
m->set_jfr_towrite(false);
|
||||
m->set_method_data(NULL);
|
||||
m->set_interpreter_throwout_count(0);
|
||||
m->set_vtable_index(methodOopDesc::garbage_vtable_index);
|
||||
|
@ -77,7 +77,7 @@
|
||||
// | method_size | max_stack |
|
||||
// | max_locals | size_of_parameters |
|
||||
// |------------------------------------------------------|
|
||||
// | intrinsic_id, (unused) | throwout_count |
|
||||
// |intrinsic_id| flags | throwout_count |
|
||||
// |------------------------------------------------------|
|
||||
// | num_breakpoints | (unused) |
|
||||
// |------------------------------------------------------|
|
||||
@ -124,6 +124,8 @@ class methodOopDesc : public oopDesc {
|
||||
u2 _max_locals; // Number of local variables used by this method
|
||||
u2 _size_of_parameters; // size of the parameter block (receiver + arguments) in words
|
||||
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
|
||||
u1 _jfr_towrite : 1, // Flags
|
||||
: 7;
|
||||
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
|
||||
u2 _number_of_breakpoints; // fullspeed debugging support
|
||||
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
|
||||
@ -225,6 +227,7 @@ class methodOopDesc : public oopDesc {
|
||||
void clear_number_of_breakpoints() { _number_of_breakpoints = 0; }
|
||||
|
||||
// index into instanceKlass methods() array
|
||||
// note: also used by jfr
|
||||
u2 method_idnum() const { return constMethod()->method_idnum(); }
|
||||
void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); }
|
||||
|
||||
@ -650,6 +653,9 @@ class methodOopDesc : public oopDesc {
|
||||
void init_intrinsic_id(); // updates from _none if a match
|
||||
static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
|
||||
|
||||
bool jfr_towrite() { return _jfr_towrite; }
|
||||
void set_jfr_towrite(bool towrite) { _jfr_towrite = towrite; }
|
||||
|
||||
// On-stack replacement support
|
||||
bool has_osr_nmethod(int level, bool match_level) {
|
||||
return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
|
||||
|
@ -284,13 +284,13 @@ class Block : public CFGElement {
|
||||
// helper function that adds caller save registers to MachProjNode
|
||||
void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
|
||||
// Schedule a call next in the block
|
||||
uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call);
|
||||
uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
|
||||
|
||||
// Perform basic-block local scheduling
|
||||
Node *select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot);
|
||||
Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
|
||||
void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
|
||||
void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
|
||||
bool schedule_local(PhaseCFG *cfg, Matcher &m, int *ready_cnt, VectorSet &next_call);
|
||||
bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
|
||||
// Cleanup if any code lands between a Call and his Catch
|
||||
void call_catch_cleanup(Block_Array &bbs);
|
||||
// Detect implicit-null-check opportunities. Basically, find NULL checks
|
||||
|
@ -1344,8 +1344,8 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
|
||||
|
||||
// Schedule locally. Right now a simple topological sort.
|
||||
// Later, do a real latency aware scheduler.
|
||||
int *ready_cnt = NEW_RESOURCE_ARRAY(int,C->unique());
|
||||
memset( ready_cnt, -1, C->unique() * sizeof(int) );
|
||||
uint max_idx = C->unique();
|
||||
GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
|
||||
visited.Clear();
|
||||
for (i = 0; i < _num_blocks; i++) {
|
||||
if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
|
||||
|
@ -404,7 +404,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
|
||||
// remaining cases (most), choose the instruction with the greatest latency
|
||||
// (that is, the most number of pseudo-cycles required to the end of the
|
||||
// routine). If there is a tie, choose the instruction with the most inputs.
|
||||
Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot) {
|
||||
Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
|
||||
|
||||
// If only a single entry on the stack, use it
|
||||
uint cnt = worklist.size();
|
||||
@ -465,7 +465,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSe
|
||||
|
||||
// More than this instruction pending for successor to be ready,
|
||||
// don't choose this if other opportunities are ready
|
||||
if (ready_cnt[use->_idx] > 1)
|
||||
if (ready_cnt.at(use->_idx) > 1)
|
||||
n_choice = 1;
|
||||
}
|
||||
|
||||
@ -565,7 +565,7 @@ void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_p
|
||||
|
||||
|
||||
//------------------------------sched_call-------------------------------------
|
||||
uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
|
||||
uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
|
||||
RegMask regs;
|
||||
|
||||
// Schedule all the users of the call right now. All the users are
|
||||
@ -574,8 +574,9 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
|
||||
for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {
|
||||
Node* n = mcall->fast_out(i);
|
||||
assert( n->is_MachProj(), "" );
|
||||
--ready_cnt[n->_idx];
|
||||
assert( !ready_cnt[n->_idx], "" );
|
||||
int n_cnt = ready_cnt.at(n->_idx)-1;
|
||||
ready_cnt.at_put(n->_idx, n_cnt);
|
||||
assert( n_cnt == 0, "" );
|
||||
// Schedule next to call
|
||||
_nodes.map(node_cnt++, n);
|
||||
// Collect defined registers
|
||||
@ -590,7 +591,9 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
|
||||
Node* m = n->fast_out(j); // Get user
|
||||
if( bbs[m->_idx] != this ) continue;
|
||||
if( m->is_Phi() ) continue;
|
||||
if( !--ready_cnt[m->_idx] )
|
||||
int m_cnt = ready_cnt.at(m->_idx)-1;
|
||||
ready_cnt.at_put(m->_idx, m_cnt);
|
||||
if( m_cnt == 0 )
|
||||
worklist.push(m);
|
||||
}
|
||||
|
||||
@ -655,7 +658,7 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
|
||||
|
||||
//------------------------------schedule_local---------------------------------
|
||||
// Topological sort within a block. Someday become a real scheduler.
|
||||
bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, VectorSet &next_call) {
|
||||
bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) {
|
||||
// Already "sorted" are the block start Node (as the first entry), and
|
||||
// the block-ending Node and any trailing control projections. We leave
|
||||
// these alone. PhiNodes and ParmNodes are made to follow the block start
|
||||
@ -695,7 +698,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
|
||||
local++; // One more block-local input
|
||||
}
|
||||
ready_cnt[n->_idx] = local; // Count em up
|
||||
ready_cnt.at_put(n->_idx, local); // Count em up
|
||||
|
||||
#ifdef ASSERT
|
||||
if( UseConcMarkSweepGC || UseG1GC ) {
|
||||
@ -729,7 +732,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
}
|
||||
}
|
||||
for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
|
||||
ready_cnt[_nodes[i2]->_idx] = 0;
|
||||
ready_cnt.at_put(_nodes[i2]->_idx, 0);
|
||||
|
||||
// All the prescheduled guys do not hold back internal nodes
|
||||
uint i3;
|
||||
@ -737,8 +740,10 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
Node *n = _nodes[i3]; // Get pre-scheduled
|
||||
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* m = n->fast_out(j);
|
||||
if( cfg->_bbs[m->_idx] ==this ) // Local-block user
|
||||
ready_cnt[m->_idx]--; // Fix ready count
|
||||
if( cfg->_bbs[m->_idx] ==this ) { // Local-block user
|
||||
int m_cnt = ready_cnt.at(m->_idx)-1;
|
||||
ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -747,7 +752,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
Node_List worklist;
|
||||
for(uint i4=i3; i4<node_cnt; i4++ ) { // Put ready guys on worklist
|
||||
Node *m = _nodes[i4];
|
||||
if( !ready_cnt[m->_idx] ) { // Zero ready count?
|
||||
if( !ready_cnt.at(m->_idx) ) { // Zero ready count?
|
||||
if (m->is_iteratively_computed()) {
|
||||
// Push induction variable increments last to allow other uses
|
||||
// of the phi to be scheduled first. The select() method breaks
|
||||
@ -775,14 +780,14 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
for (uint j=0; j<_nodes.size(); j++) {
|
||||
Node *n = _nodes[j];
|
||||
int idx = n->_idx;
|
||||
tty->print("# ready cnt:%3d ", ready_cnt[idx]);
|
||||
tty->print("# ready cnt:%3d ", ready_cnt.at(idx));
|
||||
tty->print("latency:%3d ", cfg->_node_latency->at_grow(idx));
|
||||
tty->print("%4d: %s\n", idx, n->Name());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
uint max_idx = matcher.C->unique();
|
||||
uint max_idx = (uint)ready_cnt.length();
|
||||
// Pull from worklist and schedule
|
||||
while( worklist.size() ) { // Worklist is not ready
|
||||
|
||||
@ -840,11 +845,13 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
|
||||
Node* m = n->fast_out(i5); // Get user
|
||||
if( cfg->_bbs[m->_idx] != this ) continue;
|
||||
if( m->is_Phi() ) continue;
|
||||
if (m->_idx > max_idx) { // new node, skip it
|
||||
if (m->_idx >= max_idx) { // new node, skip it
|
||||
assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
|
||||
continue;
|
||||
}
|
||||
if( !--ready_cnt[m->_idx] )
|
||||
int m_cnt = ready_cnt.at(m->_idx)-1;
|
||||
ready_cnt.at_put(m->_idx, m_cnt);
|
||||
if( m_cnt == 0 )
|
||||
worklist.push(m);
|
||||
}
|
||||
}
|
||||
|
@ -1718,8 +1718,10 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
|
||||
bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
|
||||
if (ReduceFieldZeroing || is_instance) {
|
||||
Node* value = can_see_stored_value(mem,phase);
|
||||
if (value != NULL && value->is_Con())
|
||||
if (value != NULL && value->is_Con()) {
|
||||
assert(value->bottom_type()->higher_equal(_type),"sanity");
|
||||
return value->bottom_type();
|
||||
}
|
||||
}
|
||||
|
||||
if (is_instance) {
|
||||
@ -1759,6 +1761,20 @@ Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return LoadNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
const Type* LoadBNode::Value(PhaseTransform *phase) const {
|
||||
Node* mem = in(MemNode::Memory);
|
||||
Node* value = can_see_stored_value(mem,phase);
|
||||
if (value != NULL && value->is_Con() &&
|
||||
!value->bottom_type()->higher_equal(_type)) {
|
||||
// If the input to the store does not fit with the load's result type,
|
||||
// it must be truncated. We can't delay until Ideal call since
|
||||
// a singleton Value is needed for split_thru_phi optimization.
|
||||
int con = value->get_int();
|
||||
return TypeInt::make((con << 24) >> 24);
|
||||
}
|
||||
return LoadNode::Value(phase);
|
||||
}
|
||||
|
||||
//--------------------------LoadUBNode::Ideal-------------------------------------
|
||||
//
|
||||
// If the previous store is to the same address as this load,
|
||||
@ -1775,6 +1791,20 @@ Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
return LoadNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
const Type* LoadUBNode::Value(PhaseTransform *phase) const {
|
||||
Node* mem = in(MemNode::Memory);
|
||||
Node* value = can_see_stored_value(mem,phase);
|
||||
if (value != NULL && value->is_Con() &&
|
||||
!value->bottom_type()->higher_equal(_type)) {
|
||||
// If the input to the store does not fit with the load's result type,
|
||||
// it must be truncated. We can't delay until Ideal call since
|
||||
// a singleton Value is needed for split_thru_phi optimization.
|
||||
int con = value->get_int();
|
||||
return TypeInt::make(con & 0xFF);
|
||||
}
|
||||
return LoadNode::Value(phase);
|
||||
}
|
||||
|
||||
//--------------------------LoadUSNode::Ideal-------------------------------------
|
||||
//
|
||||
// If the previous store is to the same address as this load,
|
||||
@ -1791,6 +1821,20 @@ Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return LoadNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
const Type* LoadUSNode::Value(PhaseTransform *phase) const {
|
||||
Node* mem = in(MemNode::Memory);
|
||||
Node* value = can_see_stored_value(mem,phase);
|
||||
if (value != NULL && value->is_Con() &&
|
||||
!value->bottom_type()->higher_equal(_type)) {
|
||||
// If the input to the store does not fit with the load's result type,
|
||||
// it must be truncated. We can't delay until Ideal call since
|
||||
// a singleton Value is needed for split_thru_phi optimization.
|
||||
int con = value->get_int();
|
||||
return TypeInt::make(con & 0xFFFF);
|
||||
}
|
||||
return LoadNode::Value(phase);
|
||||
}
|
||||
|
||||
//--------------------------LoadSNode::Ideal--------------------------------------
|
||||
//
|
||||
// If the previous store is to the same address as this load,
|
||||
@ -1809,6 +1853,20 @@ Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return LoadNode::Ideal(phase, can_reshape);
|
||||
}
|
||||
|
||||
const Type* LoadSNode::Value(PhaseTransform *phase) const {
|
||||
Node* mem = in(MemNode::Memory);
|
||||
Node* value = can_see_stored_value(mem,phase);
|
||||
if (value != NULL && value->is_Con() &&
|
||||
!value->bottom_type()->higher_equal(_type)) {
|
||||
// If the input to the store does not fit with the load's result type,
|
||||
// it must be truncated. We can't delay until Ideal call since
|
||||
// a singleton Value is needed for split_thru_phi optimization.
|
||||
int con = value->get_int();
|
||||
return TypeInt::make((con << 16) >> 16);
|
||||
}
|
||||
return LoadNode::Value(phase);
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//----------------------------LoadKlassNode::make------------------------------
|
||||
// Polymorphic factory method:
|
||||
|
@ -215,6 +215,7 @@ public:
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
virtual int store_Opcode() const { return Op_StoreB; }
|
||||
virtual BasicType memory_type() const { return T_BYTE; }
|
||||
};
|
||||
@ -228,6 +229,7 @@ public:
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
virtual int store_Opcode() const { return Op_StoreB; }
|
||||
virtual BasicType memory_type() const { return T_BYTE; }
|
||||
};
|
||||
@ -241,10 +243,25 @@ public:
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
virtual int store_Opcode() const { return Op_StoreC; }
|
||||
virtual BasicType memory_type() const { return T_CHAR; }
|
||||
};
|
||||
|
||||
//------------------------------LoadSNode--------------------------------------
|
||||
// Load a short (16bits signed) from memory
|
||||
class LoadSNode : public LoadNode {
|
||||
public:
|
||||
LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
|
||||
: LoadNode(c,mem,adr,at,ti) {}
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *Value(PhaseTransform *phase) const;
|
||||
virtual int store_Opcode() const { return Op_StoreC; }
|
||||
virtual BasicType memory_type() const { return T_SHORT; }
|
||||
};
|
||||
|
||||
//------------------------------LoadINode--------------------------------------
|
||||
// Load an integer from memory
|
||||
class LoadINode : public LoadNode {
|
||||
@ -433,19 +450,6 @@ public:
|
||||
};
|
||||
|
||||
|
||||
//------------------------------LoadSNode--------------------------------------
|
||||
// Load a short (16bits signed) from memory
|
||||
class LoadSNode : public LoadNode {
|
||||
public:
|
||||
LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
|
||||
: LoadNode(c,mem,adr,at,ti) {}
|
||||
virtual int Opcode() const;
|
||||
virtual uint ideal_reg() const { return Op_RegI; }
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual int store_Opcode() const { return Op_StoreC; }
|
||||
virtual BasicType memory_type() const { return T_SHORT; }
|
||||
};
|
||||
|
||||
//------------------------------StoreNode--------------------------------------
|
||||
// Store value; requires Store, Address and Value
|
||||
class StoreNode : public MemNode {
|
||||
|
@ -71,14 +71,14 @@ void Parse::do_checkcast() {
|
||||
// Throw uncommon trap if class is not loaded or the value we are casting
|
||||
// _from_ is not loaded, and value is not null. If the value _is_ NULL,
|
||||
// then the checkcast does nothing.
|
||||
const TypeInstPtr *tp = _gvn.type(obj)->isa_instptr();
|
||||
if (!will_link || (tp && !tp->is_loaded())) {
|
||||
const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr();
|
||||
if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) {
|
||||
if (C->log() != NULL) {
|
||||
if (!will_link) {
|
||||
C->log()->elem("assert_null reason='checkcast' klass='%d'",
|
||||
C->log()->identify(klass));
|
||||
}
|
||||
if (tp && !tp->is_loaded()) {
|
||||
if (tp && tp->klass() && !tp->klass()->is_loaded()) {
|
||||
// %%% Cannot happen?
|
||||
C->log()->elem("assert_null reason='checkcast source' klass='%d'",
|
||||
C->log()->identify(tp->klass()));
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include "oops/typeArrayOop.hpp"
|
||||
#include "prims/jni.h"
|
||||
#include "prims/jniCheck.hpp"
|
||||
#include "prims/jniExport.hpp"
|
||||
#include "prims/jniFastGetField.hpp"
|
||||
#include "prims/jvm.h"
|
||||
#include "prims/jvm_misc.hpp"
|
||||
@ -66,6 +67,8 @@
|
||||
#include "runtime/signature.hpp"
|
||||
#include "runtime/vm_operations.hpp"
|
||||
#include "services/runtimeService.hpp"
|
||||
#include "trace/tracing.hpp"
|
||||
#include "trace/traceEventTypes.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "utilities/dtrace.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
@ -5139,6 +5142,11 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v
|
||||
if (JvmtiExport::should_post_thread_life()) {
|
||||
JvmtiExport::post_thread_start(thread);
|
||||
}
|
||||
|
||||
EVENT_BEGIN(TraceEventThreadStart, event);
|
||||
EVENT_COMMIT(event,
|
||||
EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
|
||||
|
||||
// Check if we should compile all classes on bootclasspath
|
||||
NOT_PRODUCT(if (CompileTheWorld) ClassLoader::compile_the_world();)
|
||||
// Since this is not a JVM_ENTRY we have to set the thread state manually before leaving.
|
||||
@ -5337,6 +5345,10 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae
|
||||
JvmtiExport::post_thread_start(thread);
|
||||
}
|
||||
|
||||
EVENT_BEGIN(TraceEventThreadStart, event);
|
||||
EVENT_COMMIT(event,
|
||||
EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
|
||||
|
||||
*(JNIEnv**)penv = thread->jni_environment();
|
||||
|
||||
// Now leaving the VM, so change thread_state. This is normally automatically taken care
|
||||
@ -5464,8 +5476,7 @@ jint JNICALL jni_GetEnv(JavaVM *vm, void **penv, jint version) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (JvmtiExport::is_jvmti_version(version)) {
|
||||
ret = JvmtiExport::get_jvmti_interface(vm, penv, version);
|
||||
if (JniExportedInterface::GetExportedInterface(vm, penv, version, &ret)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
42
hotspot/src/share/vm/prims/jniExport.hpp
Normal file
42
hotspot/src/share/vm/prims/jniExport.hpp
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_PRIMS_JNI_EXPORT_HPP
|
||||
#define SHARE_VM_PRIMS_JNI_EXPORT_HPP
|
||||
|
||||
#include "prims/jni.h"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
|
||||
class JniExportedInterface {
|
||||
public:
|
||||
static bool GetExportedInterface(JavaVM* vm, void** penv, jint version, jint* iface) {
|
||||
if (JvmtiExport::is_jvmti_version(version)) {
|
||||
*iface = JvmtiExport::get_jvmti_interface(vm, penv, version);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_PRIMS_JNI_EXPORT_HPP
|
@ -319,6 +319,15 @@ void JvmtiThreadState::process_pending_step_for_popframe() {
|
||||
// clearing the flag indicates we are done with the PopFrame() dance
|
||||
clr_pending_step_for_popframe();
|
||||
|
||||
// If exception was thrown in this frame, need to reset jvmti thread state.
|
||||
// Single stepping may not get enabled correctly by the agent since
|
||||
// exception state is passed in MethodExit event which may be sent at some
|
||||
// time in the future. JDWP agent ignores MethodExit events if caused by
|
||||
// an exception.
|
||||
//
|
||||
if (is_exception_detected()) {
|
||||
clear_exception_detected();
|
||||
}
|
||||
// If step is pending for popframe then it may not be
|
||||
// a repeat step. The new_bci and method_id is same as current_bci
|
||||
// and current method_id after pop and step for recursive calls.
|
||||
@ -385,6 +394,15 @@ void JvmtiThreadState::process_pending_step_for_earlyret() {
|
||||
// the ForceEarlyReturn() dance
|
||||
clr_pending_step_for_earlyret();
|
||||
|
||||
// If exception was thrown in this frame, need to reset jvmti thread state.
|
||||
// Single stepping may not get enabled correctly by the agent since
|
||||
// exception state is passed in MethodExit event which may be sent at some
|
||||
// time in the future. JDWP agent ignores MethodExit events if caused by
|
||||
// an exception.
|
||||
//
|
||||
if (is_exception_detected()) {
|
||||
clear_exception_detected();
|
||||
}
|
||||
// If step is pending for earlyret then it may not be a repeat step.
|
||||
// The new_bci and method_id is same as current_bci and current
|
||||
// method_id after earlyret and step for recursive calls.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user