Merge
This commit is contained in:
commit
be7804bbf5
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.gc_implementation.g1;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Observable;
|
||||
import java.util.Observer;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.VMObject;
|
||||
import sun.jvm.hotspot.runtime.VMObjectFactory;
|
||||
import sun.jvm.hotspot.types.AddressField;
|
||||
import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
// Mirror class for G1HeapRegionTable. It's essentially an index -> HeapRegion map.
|
||||
|
||||
public class G1HeapRegionTable extends VMObject {
|
||||
// HeapRegion** _base;
|
||||
static private AddressField baseField;
|
||||
// uint _length;
|
||||
static private CIntegerField lengthField;
|
||||
// HeapRegion** _biased_base
|
||||
static private AddressField biasedBaseField;
|
||||
// size_t _bias
|
||||
static private CIntegerField biasField;
|
||||
// uint _shift_by
|
||||
static private CIntegerField shiftByField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
public void update(Observable o, Object data) {
|
||||
initialize(VM.getVM().getTypeDataBase());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static private synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("G1HeapRegionTable");
|
||||
|
||||
baseField = type.getAddressField("_base");
|
||||
lengthField = type.getCIntegerField("_length");
|
||||
biasedBaseField = type.getAddressField("_biased_base");
|
||||
biasField = type.getCIntegerField("_bias");
|
||||
shiftByField = type.getCIntegerField("_shift_by");
|
||||
}
|
||||
|
||||
private HeapRegion at(long index) {
|
||||
Address arrayAddr = baseField.getValue(addr);
|
||||
// Offset of &_base[index]
|
||||
long offset = index * VM.getVM().getAddressSize();
|
||||
Address regionAddr = arrayAddr.getAddressAt(offset);
|
||||
return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
|
||||
regionAddr);
|
||||
}
|
||||
|
||||
public long length() {
|
||||
return lengthField.getValue(addr);
|
||||
}
|
||||
|
||||
public long bias() {
|
||||
return biasField.getValue(addr);
|
||||
}
|
||||
|
||||
public long shiftBy() {
|
||||
return shiftByField.getValue(addr);
|
||||
}
|
||||
|
||||
private class HeapRegionIterator implements Iterator<HeapRegion> {
|
||||
private long index;
|
||||
private long length;
|
||||
|
||||
@Override
|
||||
public boolean hasNext() { return index < length; }
|
||||
|
||||
@Override
|
||||
public HeapRegion next() { return at(index++); }
|
||||
|
||||
@Override
|
||||
public void remove() { /* not supported */ }
|
||||
|
||||
HeapRegionIterator(Address addr) {
|
||||
index = 0;
|
||||
length = length();
|
||||
}
|
||||
}
|
||||
|
||||
public Iterator<HeapRegion> heapRegionIterator() {
|
||||
return new HeapRegionIterator(addr);
|
||||
}
|
||||
|
||||
public G1HeapRegionTable(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,13 +37,11 @@ import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
// Mirror class for HeapRegionSeq. It's essentially an index -> HeapRegion map.
|
||||
// Mirror class for HeapRegionSeq. It essentially encapsulates the G1HeapRegionTable.
|
||||
|
||||
public class HeapRegionSeq extends VMObject {
|
||||
// HeapRegion** _regions;
|
||||
static private AddressField regionsField;
|
||||
// uint _length;
|
||||
static private CIntegerField lengthField;
|
||||
// G1HeapRegionTable _regions
|
||||
static private long regionsFieldOffset;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver(new Observer() {
|
||||
@ -56,44 +54,21 @@ public class HeapRegionSeq extends VMObject {
|
||||
static private synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("HeapRegionSeq");
|
||||
|
||||
regionsField = type.getAddressField("_regions");
|
||||
lengthField = type.getCIntegerField("_length");
|
||||
regionsFieldOffset = type.getField("_regions").getOffset();
|
||||
}
|
||||
|
||||
private HeapRegion at(long index) {
|
||||
Address arrayAddr = regionsField.getValue(addr);
|
||||
// Offset of &_region[index]
|
||||
long offset = index * VM.getVM().getAddressSize();
|
||||
Address regionAddr = arrayAddr.getAddressAt(offset);
|
||||
return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
|
||||
regionAddr);
|
||||
private G1HeapRegionTable regions() {
|
||||
Address regionsAddr = addr.addOffsetTo(regionsFieldOffset);
|
||||
return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class,
|
||||
regionsAddr);
|
||||
}
|
||||
|
||||
public long length() {
|
||||
return lengthField.getValue(addr);
|
||||
}
|
||||
|
||||
private class HeapRegionIterator implements Iterator<HeapRegion> {
|
||||
private long index;
|
||||
private long length;
|
||||
|
||||
@Override
|
||||
public boolean hasNext() { return index < length; }
|
||||
|
||||
@Override
|
||||
public HeapRegion next() { return at(index++); }
|
||||
|
||||
@Override
|
||||
public void remove() { /* not supported */ }
|
||||
|
||||
HeapRegionIterator(Address addr) {
|
||||
index = 0;
|
||||
length = length();
|
||||
}
|
||||
return regions().length();
|
||||
}
|
||||
|
||||
public Iterator<HeapRegion> heapRegionIterator() {
|
||||
return new HeapRegionIterator(addr);
|
||||
return regions().heapRegionIterator();
|
||||
}
|
||||
|
||||
public HeapRegionSeq(Address addr) {
|
||||
|
@ -334,6 +334,11 @@ $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C2_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/64/%.diz: $(C2_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
# MacOS X
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM
|
||||
$(install-dir)
|
||||
$(EXPORT_SERVER_DIR)/%.dSYM: $(C2_BUILD_DIR)/%.dSYM
|
||||
$(install-dir)
|
||||
endif
|
||||
|
||||
# Client (C1)
|
||||
@ -379,6 +384,11 @@ $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
# MacOS X
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(C1_BUILD_DIR)/%.dSYM
|
||||
$(install-dir)
|
||||
$(EXPORT_CLIENT_DIR)/%.dSYM: $(C1_BUILD_DIR)/%.dSYM
|
||||
$(install-dir)
|
||||
endif
|
||||
|
||||
# Minimal1
|
||||
@ -424,6 +434,7 @@ $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
$(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
# MacOS X does not support Minimal1 config
|
||||
endif
|
||||
|
||||
# Zero
|
||||
@ -446,6 +457,11 @@ $(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.diz: $(ZERO_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
# MacOS X
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(ZERO_BUILD_DIR)/%.dSYM
|
||||
$(install-dir)
|
||||
$(EXPORT_SERVER_DIR)/%.dSYM: $(ZERO_BUILD_DIR)/%.dSYM
|
||||
$(install-dir)
|
||||
endif
|
||||
|
||||
# Shark
|
||||
@ -468,6 +484,11 @@ $(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_BUILD_DIR)/%.debuginfo
|
||||
$(install-file)
|
||||
$(EXPORT_SERVER_DIR)/%.diz: $(SHARK_BUILD_DIR)/%.diz
|
||||
$(install-file)
|
||||
# MacOS X
|
||||
$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: $(SHARK_BUILD_DIR)/%.dSYM
|
||||
$(install-dir)
|
||||
$(EXPORT_SERVER_DIR)/%.dSYM: $(SHARK_BUILD_DIR)/%.dSYM
|
||||
$(install-dir)
|
||||
endif
|
||||
|
||||
$(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/%
|
||||
|
@ -204,6 +204,7 @@ TARGETS_MINIMAL1 = $(addsuffix minimal1,$(TARGETS))
|
||||
BUILDTREE_MAKE = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
|
||||
BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) LIBRARY_SUFFIX=$(LIBRARY_SUFFIX)
|
||||
BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
|
||||
BUILDTREE_VARS += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY) ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) ZIPEXE=$(ZIPEXE)
|
||||
|
||||
BUILDTREE = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
|
||||
|
||||
@ -337,9 +338,11 @@ treeminimal1: $(SUBDIRS_MINIMAL1)
|
||||
|
||||
# Doc target. This is the same for all build options.
|
||||
# Hence create a docs directory beside ...$(ARCH)_[...]
|
||||
# We specify 'BUILD_FLAVOR=product' so that the proper
|
||||
# ENABLE_FULL_DEBUG_SYMBOLS value is used.
|
||||
docs: checks
|
||||
$(QUIETLY) mkdir -p $(SUBDIR_DOCS)
|
||||
$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) jvmtidocs
|
||||
$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs
|
||||
|
||||
# Synonyms for win32-like targets.
|
||||
compiler2: debug product
|
||||
|
@ -261,6 +261,16 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
|
||||
echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
|
||||
[ -n "$(CFLAGS_BROWSE)" ] && \
|
||||
echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \
|
||||
[ -n "$(ENABLE_FULL_DEBUG_SYMBOLS)" ] && \
|
||||
echo && echo "ENABLE_FULL_DEBUG_SYMBOLS = $(ENABLE_FULL_DEBUG_SYMBOLS)"; \
|
||||
[ -n "$(OBJCOPY)" ] && \
|
||||
echo && echo "OBJCOPY = $(OBJCOPY)"; \
|
||||
[ -n "$(STRIP_POLICY)" ] && \
|
||||
echo && echo "STRIP_POLICY = $(STRIP_POLICY)"; \
|
||||
[ -n "$(ZIP_DEBUGINFO_FILES)" ] && \
|
||||
echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
|
||||
[ -n "$(ZIPEXE)" ] && \
|
||||
echo && echo "ZIPEXE = $(ZIPEXE)"; \
|
||||
[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
|
||||
echo && \
|
||||
echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
|
||||
|
@ -136,10 +136,127 @@ ifeq ($(JVM_VARIANTS),)
|
||||
endif
|
||||
endif
|
||||
|
||||
OS_VENDOR:=$(shell uname -s)
|
||||
|
||||
# determine if HotSpot is being built in JDK6 or earlier version
|
||||
JDK6_OR_EARLIER=0
|
||||
ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1"
|
||||
# if the longer variable names (newer build style) are set, then check those
|
||||
ifeq "$(shell expr \( $(JDK_MAJOR_VERSION) = 1 \& $(JDK_MINOR_VERSION) \< 7 \))" "1"
|
||||
JDK6_OR_EARLIER=1
|
||||
endif
|
||||
else
|
||||
# the longer variables aren't set so check the shorter variable names
|
||||
ifeq "$(shell expr \( '$(JDK_MAJOR_VER)' = 1 \& '$(JDK_MINOR_VER)' \< 7 \))" "1"
|
||||
JDK6_OR_EARLIER=1
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(JDK6_OR_EARLIER),0)
|
||||
# Full Debug Symbols is supported on JDK7 or newer.
|
||||
# The Full Debug Symbols (FDS) default for BUILD_FLAVOR == product
|
||||
# builds is enabled with debug info files ZIP'ed to save space. For
|
||||
# BUILD_FLAVOR != product builds, FDS is always enabled, after all a
|
||||
# debug build without debug info isn't very useful.
|
||||
# The ZIP_DEBUGINFO_FILES option only has meaning when FDS is enabled.
|
||||
#
|
||||
# If you invoke a build with FULL_DEBUG_SYMBOLS=0, then FDS will be
|
||||
# disabled for a BUILD_FLAVOR == product build.
|
||||
#
|
||||
# Note: Use of a different variable name for the FDS override option
|
||||
# versus the FDS enabled check is intentional (FULL_DEBUG_SYMBOLS
|
||||
# versus ENABLE_FULL_DEBUG_SYMBOLS). For auto build systems that pass
|
||||
# in options via environment variables, use of distinct variables
|
||||
# prevents strange behaviours. For example, in a BUILD_FLAVOR !=
|
||||
# product build, the FULL_DEBUG_SYMBOLS environment variable will be
|
||||
# 0, but the ENABLE_FULL_DEBUG_SYMBOLS make variable will be 1. If
|
||||
# the same variable name is used, then different values can be picked
|
||||
# up by different parts of the build. Just to be clear, we only need
|
||||
# two variable names because the incoming option value can be
|
||||
# overridden in some situations, e.g., a BUILD_FLAVOR != product
|
||||
# build.
|
||||
|
||||
# Due to the multiple sub-make processes that occur this logic gets
|
||||
# executed multiple times. We reduce the noise by at least checking that
|
||||
# BUILD_FLAVOR has been set.
|
||||
ifneq ($(BUILD_FLAVOR),)
|
||||
ifeq ($(BUILD_FLAVOR), product)
|
||||
FULL_DEBUG_SYMBOLS ?= 1
|
||||
ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
|
||||
else
|
||||
# debug variants always get Full Debug Symbols (if available)
|
||||
ENABLE_FULL_DEBUG_SYMBOLS = 1
|
||||
endif
|
||||
_JUNK_ := $(shell \
|
||||
echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
|
||||
# since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
|
||||
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
# MacOS X doesn't use OBJCOPY or STRIP_POLICY
|
||||
OBJCOPY=
|
||||
STRIP_POLICY=
|
||||
ZIP_DEBUGINFO_FILES ?= 1
|
||||
else
|
||||
# Default OBJCOPY comes from GNU Binutils on BSD
|
||||
ifeq ($(CROSS_COMPILE_ARCH),)
|
||||
DEF_OBJCOPY=/usr/bin/objcopy
|
||||
else
|
||||
# Assume objcopy is part of the cross-compilation toolset
|
||||
ifneq ($(ALT_COMPILER_PATH),)
|
||||
DEF_OBJCOPY=$(ALT_COMPILER_PATH)/objcopy
|
||||
endif
|
||||
endif
|
||||
OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
|
||||
ifneq ($(ALT_OBJCOPY),)
|
||||
_JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
|
||||
OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
|
||||
endif
|
||||
|
||||
ifeq ($(OBJCOPY),)
|
||||
_JUNK_ := $(shell \
|
||||
echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo" \
|
||||
"files. You may need to set ALT_OBJCOPY.")
|
||||
ENABLE_FULL_DEBUG_SYMBOLS=0
|
||||
_JUNK_ := $(shell \
|
||||
echo >&2 "INFO:" \
|
||||
"ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
|
||||
else
|
||||
_JUNK_ := $(shell \
|
||||
echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo" \
|
||||
"files.")
|
||||
|
||||
# Library stripping policies for .debuginfo configs:
|
||||
# all_strip - strips everything from the library
|
||||
# min_strip - strips most stuff from the library; leaves
|
||||
# minimum symbols
|
||||
# no_strip - does not strip the library at all
|
||||
#
|
||||
# Oracle security policy requires "all_strip". A waiver was
|
||||
# granted on 2011.09.01 that permits using "min_strip" in the
|
||||
# Java JDK and Java JRE.
|
||||
#
|
||||
# Currently, STRIP_POLICY is only used when Full Debug Symbols
|
||||
# is enabled.
|
||||
#
|
||||
STRIP_POLICY ?= min_strip
|
||||
|
||||
_JUNK_ := $(shell \
|
||||
echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
|
||||
|
||||
ZIP_DEBUGINFO_FILES ?= 1
|
||||
endif
|
||||
|
||||
_JUNK_ := $(shell \
|
||||
echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)")
|
||||
endif
|
||||
endif # ENABLE_FULL_DEBUG_SYMBOLS=1
|
||||
endif # BUILD_FLAVOR
|
||||
endif # JDK_6_OR_EARLIER
|
||||
|
||||
JDK_INCLUDE_SUBDIR=bsd
|
||||
|
||||
# Library suffix
|
||||
OS_VENDOR:=$(shell uname -s)
|
||||
ifeq ($(OS_VENDOR),Darwin)
|
||||
LIBRARY_SUFFIX=dylib
|
||||
else
|
||||
@ -150,6 +267,19 @@ EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
|
||||
|
||||
# client and server subdirectories have symbolic links to ../libjsig.so
|
||||
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
|
||||
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
|
||||
else
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
|
||||
else
|
||||
EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
|
||||
EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
|
||||
EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
|
||||
@ -157,34 +287,76 @@ EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
|
||||
ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
|
||||
EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
|
||||
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
|
||||
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.diz
|
||||
else
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX).dSYM
|
||||
else
|
||||
EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(JVM_VARIANT_CLIENT),true)
|
||||
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
|
||||
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
|
||||
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.diz
|
||||
else
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX).dSYM
|
||||
else
|
||||
EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(JVM_VARIANT_MINIMAL1),true)
|
||||
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/Xusage.txt
|
||||
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.$(LIBRARY_SUFFIX)
|
||||
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.diz
|
||||
else
|
||||
EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
# Serviceability Binaries
|
||||
# No SA Support for PPC, IA64, ARM or zero
|
||||
ADD_SA_BINARIES/x86 = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
|
||||
$(EXPORT_LIB_DIR)/sa-jdi.jar
|
||||
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
|
||||
else
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
|
||||
else
|
||||
ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
|
||||
$(EXPORT_LIB_DIR)/sa-jdi.jar
|
||||
ADD_SA_BINARIES/universal = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
|
||||
$(EXPORT_LIB_DIR)/sa-jdi.jar
|
||||
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
|
||||
else
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
|
||||
else
|
||||
ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ADD_SA_BINARIES/ppc =
|
||||
ADD_SA_BINARIES/ia64 =
|
||||
ADD_SA_BINARIES/arm =
|
||||
@ -225,6 +397,19 @@ ifeq ($(OS_VENDOR), Darwin)
|
||||
# Files to simply copy in place
|
||||
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
|
||||
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.diz
|
||||
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.diz
|
||||
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.diz
|
||||
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.diz
|
||||
else
|
||||
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX).dSYM
|
||||
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX).dSYM
|
||||
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
|
||||
UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
|
||||
endif
|
||||
endif
|
||||
|
||||
endif
|
||||
endif
|
||||
|
@ -39,9 +39,15 @@ DtraceOutDir = $(GENERATED)/dtracefiles
|
||||
JVM_DB = libjvm_db
|
||||
LIBJVM_DB = libjvm_db.dylib
|
||||
|
||||
LIBJVM_DB_DEBUGINFO = libjvm_db.dylib.dSYM
|
||||
LIBJVM_DB_DIZ = libjvm_db.diz
|
||||
|
||||
JVM_DTRACE = jvm_dtrace
|
||||
LIBJVM_DTRACE = libjvm_dtrace.dylib
|
||||
|
||||
LIBJVM_DTRACE_DEBUGINFO = libjvm_dtrace.dylib.dSYM
|
||||
LIBJVM_DTRACE_DIZ = libjvm_dtrace.diz
|
||||
|
||||
JVMOFFS = JvmOffsets
|
||||
JVMOFFS.o = $(JVMOFFS).o
|
||||
GENOFFS = generate$(JVMOFFS)
|
||||
@ -76,21 +82,87 @@ ISA = $(subst i386,i486,$(BUILDARCH))
|
||||
# Making 64/libjvm_db.so: 64-bit version of libjvm_db.so which handles 32-bit libjvm.so
|
||||
ifneq ("${ISA}","${BUILDARCH}")
|
||||
|
||||
XLIBJVM_DB = 64/$(LIBJVM_DB)
|
||||
XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
|
||||
XLIBJVM_DIR = 64
|
||||
XLIBJVM_DB = $(XLIBJVM_DIR)/$(LIBJVM_DB)
|
||||
XLIBJVM_DTRACE = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE)
|
||||
XARCH = $(subst sparcv9,v9,$(shell echo $(ISA)))
|
||||
|
||||
XLIBJVM_DB_DEBUGINFO = $(XLIBJVM_DIR)/$(LIBJVM_DB_DEBUGINFO)
|
||||
XLIBJVM_DB_DIZ = $(XLIBJVM_DIR)/$(LIBJVM_DB_DIZ)
|
||||
XLIBJVM_DTRACE_DEBUGINFO = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DEBUGINFO)
|
||||
XLIBJVM_DTRACE_DIZ = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DIZ)
|
||||
|
||||
$(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
|
||||
@echo Making $@
|
||||
$(QUIETLY) mkdir -p 64/ ; \
|
||||
$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
|
||||
$(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. -I$(GENERATED) \
|
||||
$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c #-lc
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
$(DSYMUTIL) $@
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
|
||||
# is not in the archived name:
|
||||
( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -r -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) )
|
||||
$(RM) -r $(XLIBJVM_DB_DEBUGINFO)
|
||||
endif
|
||||
else
|
||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DB_DEBUGINFO)
|
||||
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
|
||||
# is not in the link name:
|
||||
$(QUIETLY) ( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) )
|
||||
ifeq ($(STRIP_POLICY),all_strip)
|
||||
$(QUIETLY) $(STRIP) $@
|
||||
else
|
||||
ifeq ($(STRIP_POLICY),min_strip)
|
||||
$(QUIETLY) $(STRIP) -x $@
|
||||
# implied else here is no stripping at all
|
||||
endif
|
||||
endif
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
|
||||
# is not in the archived name:
|
||||
( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) )
|
||||
$(RM) $(XLIBJVM_DB_DEBUGINFO)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
$(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
|
||||
@echo Making $@
|
||||
$(QUIETLY) mkdir -p 64/ ; \
|
||||
$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
|
||||
$(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. \
|
||||
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
$(DSYMUTIL) $@
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
|
||||
# is not in the archived name:
|
||||
( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -r -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) )
|
||||
$(RM) -r $(XLIBJVM_DTRACE_DEBUGINFO)
|
||||
endif
|
||||
else
|
||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO)
|
||||
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
|
||||
# is not in the link name:
|
||||
( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) )
|
||||
ifeq ($(STRIP_POLICY),all_strip)
|
||||
$(QUIETLY) $(STRIP) $@
|
||||
else
|
||||
ifeq ($(STRIP_POLICY),min_strip)
|
||||
$(QUIETLY) $(STRIP) -x $@
|
||||
# implied else here is no stripping at all
|
||||
endif
|
||||
endif
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
# Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
|
||||
# is not in the archived name:
|
||||
( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) )
|
||||
$(RM) $(XLIBJVM_DTRACE_DEBUGINFO)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
endif # ifneq ("${ISA}","${BUILDARCH}")
|
||||
|
||||
@ -134,11 +206,59 @@ $(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_D
|
||||
@echo Making $@
|
||||
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
|
||||
$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -Wall # -lc
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
$(DSYMUTIL) $@
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
$(ZIPEXE) -q -r -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO)
|
||||
$(RM) -r $(LIBJVM_DB_DEBUGINFO)
|
||||
endif
|
||||
else
|
||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO)
|
||||
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@
|
||||
ifeq ($(STRIP_POLICY),all_strip)
|
||||
$(QUIETLY) $(STRIP) $@
|
||||
else
|
||||
ifeq ($(STRIP_POLICY),min_strip)
|
||||
$(QUIETLY) $(STRIP) -x $@
|
||||
# implied else here is no stripping at all
|
||||
endif
|
||||
endif
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
$(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO)
|
||||
$(RM) $(LIBJVM_DB_DEBUGINFO)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
$(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
|
||||
@echo Making $@
|
||||
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. \
|
||||
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
$(DSYMUTIL) $@
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
$(ZIPEXE) -q -r -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO)
|
||||
$(RM) -r $(LIBJVM_DTRACE_DEBUGINFO)
|
||||
endif
|
||||
else
|
||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO)
|
||||
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@
|
||||
ifeq ($(STRIP_POLICY),all_strip)
|
||||
$(QUIETLY) $(STRIP) $@
|
||||
else
|
||||
ifeq ($(STRIP_POLICY),min_strip)
|
||||
$(QUIETLY) $(STRIP) -x $@
|
||||
# implied else here is no stripping at all
|
||||
endif
|
||||
endif
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
$(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO)
|
||||
$(RM) $(LIBJVM_DTRACE_DEBUGINFO)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
#$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
|
||||
# $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
|
||||
|
@ -83,6 +83,11 @@ ifeq ($(SPEC),)
|
||||
AS = $(CC) -c
|
||||
endif
|
||||
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
ifeq ($(DSYMUTIL),)
|
||||
DSYMUTIL=dsymutil
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(USE_CLANG), true)
|
||||
CC_VER_MAJOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f1)
|
||||
@ -434,6 +439,36 @@ else
|
||||
ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
|
||||
DEBUG_CFLAGS += -gstabs
|
||||
endif
|
||||
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
FASTDEBUG_CFLAGS/ia64 = -g
|
||||
FASTDEBUG_CFLAGS/amd64 = -g
|
||||
FASTDEBUG_CFLAGS/arm = -g
|
||||
FASTDEBUG_CFLAGS/ppc = -g
|
||||
FASTDEBUG_CFLAGS += $(FASTDEBUG_CFLAGS/$(BUILDARCH))
|
||||
ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# Clang doesn't understand -gstabs
|
||||
FASTDEBUG_CFLAGS += -g
|
||||
else
|
||||
FASTDEBUG_CFLAGS += -gstabs
|
||||
endif
|
||||
endif
|
||||
|
||||
OPT_CFLAGS/ia64 = -g
|
||||
OPT_CFLAGS/amd64 = -g
|
||||
OPT_CFLAGS/arm = -g
|
||||
OPT_CFLAGS/ppc = -g
|
||||
OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
|
||||
ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
|
||||
ifeq ($(USE_CLANG), true)
|
||||
# Clang doesn't understand -gstabs
|
||||
OPT_CFLAGS += -g
|
||||
else
|
||||
OPT_CFLAGS += -gstabs
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
# If we are building HEADLESS, pass on to VM
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,13 +29,21 @@ JSIG = jsig
|
||||
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
LIBJSIG = lib$(JSIG).dylib
|
||||
|
||||
LIBJSIG_DEBUGINFO = lib$(JSIG).dylib.dSYM
|
||||
LIBJSIG_DIZ = lib$(JSIG).diz
|
||||
else
|
||||
LIBJSIG = lib$(JSIG).so
|
||||
|
||||
LIBJSIG_DEBUGINFO = lib$(JSIG).debuginfo
|
||||
LIBJSIG_DIZ = lib$(JSIG).diz
|
||||
endif
|
||||
|
||||
JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
|
||||
|
||||
DEST_JSIG = $(JDK_LIBDIR)/$(LIBJSIG)
|
||||
DEST_JSIG = $(JDK_LIBDIR)/$(LIBJSIG)
|
||||
DEST_JSIG_DEBUGINFO = $(JDK_LIBDIR)/$(LIBJSIG_DEBUGINFO)
|
||||
DEST_JSIG_DIZ = $(JDK_LIBDIR)/$(LIBJSIG_DIZ)
|
||||
|
||||
LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig
|
||||
|
||||
@ -55,9 +63,42 @@ $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
|
||||
@echo Making signal interposition lib...
|
||||
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
|
||||
$(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $<
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
$(DSYMUTIL) $@
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
$(ZIPEXE) -q -r -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
|
||||
$(RM) -r $(LIBJSIG_DEBUGINFO)
|
||||
endif
|
||||
else
|
||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
|
||||
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
|
||||
ifeq ($(STRIP_POLICY),all_strip)
|
||||
$(QUIETLY) $(STRIP) $@
|
||||
else
|
||||
ifeq ($(STRIP_POLICY),min_strip)
|
||||
$(QUIETLY) $(STRIP) -g $@
|
||||
# implied else here is no stripping at all
|
||||
endif
|
||||
endif
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
|
||||
$(RM) $(LIBJSIG_DEBUGINFO)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
install_jsig: $(LIBJSIG)
|
||||
@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
$(QUIETLY) test -d $(LIBJSIG_DEBUGINFO) && \
|
||||
cp -f -r $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
|
||||
else
|
||||
$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
|
||||
cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
|
||||
endif
|
||||
$(QUIETLY) test -f $(LIBJSIG_DIZ) && \
|
||||
cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ)
|
||||
$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
|
||||
|
||||
.PHONY: install_jsig
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,15 +43,17 @@ MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-product
|
||||
SYSDEFS += -DPRODUCT
|
||||
VERSION = optimized
|
||||
|
||||
# use -g to strip library as -x will discard its symbol table; -x is fine for
|
||||
# executables.
|
||||
ifdef CROSS_COMPILE_ARCH
|
||||
STRIP = $(ALT_COMPILER_PATH)/strip
|
||||
else
|
||||
STRIP = strip
|
||||
endif
|
||||
STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
|
||||
STRIP_AOUT = $(STRIP) -x $@ || exit 1;
|
||||
ifneq ($(OS_VENDOR), Darwin)
|
||||
# use -g to strip library as -x will discard its symbol table; -x is fine for
|
||||
# executables.
|
||||
ifdef CROSS_COMPILE_ARCH
|
||||
STRIP = $(ALT_COMPILER_PATH)/strip
|
||||
else
|
||||
STRIP = strip
|
||||
endif
|
||||
STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
|
||||
STRIP_AOUT = $(STRIP) -x $@ || exit 1;
|
||||
|
||||
# Don't strip in VM build; JDK build will strip libraries later
|
||||
# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
|
||||
# Don't strip in VM build; JDK build will strip libraries later
|
||||
# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
|
||||
endif
|
||||
|
@ -28,9 +28,15 @@
|
||||
SAPROC = saproc
|
||||
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
LIBSAPROC = lib$(SAPROC).dylib
|
||||
LIBSAPROC = lib$(SAPROC).dylib
|
||||
|
||||
LIBSAPROC_DEBUGINFO = lib$(SAPROC).dylib.dSYM
|
||||
LIBSAPROC_DIZ = lib$(SAPROC).diz
|
||||
else
|
||||
LIBSAPROC = lib$(SAPROC).so
|
||||
LIBSAPROC = lib$(SAPROC).so
|
||||
|
||||
LIBSAPROC_DEBUGINFO = lib$(SAPROC).debuginfo
|
||||
LIBSAPROC_DIZ = lib$(SAPROC).diz
|
||||
endif
|
||||
|
||||
AGENT_DIR = $(GAMMADIR)/agent
|
||||
@ -70,7 +76,9 @@ endif
|
||||
|
||||
SAMAPFILE = $(SASRCDIR)/mapfile
|
||||
|
||||
DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC)
|
||||
DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC)
|
||||
DEST_SAPROC_DEBUGINFO = $(JDK_LIBDIR)/$(LIBSAPROC_DEBUGINFO)
|
||||
DEST_SAPROC_DIZ = $(JDK_LIBDIR)/$(LIBSAPROC_DIZ)
|
||||
|
||||
# DEBUG_BINARIES overrides everything, use full -g debug information
|
||||
ifeq ($(DEBUG_BINARIES), true)
|
||||
@ -117,11 +125,42 @@ $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
|
||||
$(SA_DEBUG_CFLAGS) \
|
||||
-o $@ \
|
||||
$(SALIBS)
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
$(DSYMUTIL) $@
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
$(ZIPEXE) -q -r -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
|
||||
$(RM) -r $(LIBSAPROC_DEBUGINFO)
|
||||
endif
|
||||
else
|
||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
|
||||
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
|
||||
ifeq ($(STRIP_POLICY),all_strip)
|
||||
$(QUIETLY) $(STRIP) $@
|
||||
else
|
||||
ifeq ($(STRIP_POLICY),min_strip)
|
||||
$(QUIETLY) $(STRIP) -g $@
|
||||
# implied else here is no stripping at all
|
||||
endif
|
||||
endif
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
|
||||
$(RM) $(LIBSAPROC_DEBUGINFO)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
install_saproc: $(BUILDLIBSAPROC)
|
||||
$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then \
|
||||
echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)"; \
|
||||
cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done"; \
|
||||
fi
|
||||
@echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)"
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
$(QUIETLY) test -d $(LIBSAPROC_DEBUGINFO) && \
|
||||
cp -f -r $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
|
||||
else
|
||||
$(QUIETLY) test -f $(LIBSAPROC_DEBUGINFO) && \
|
||||
cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
|
||||
endif
|
||||
$(QUIETLY) test -f $(LIBSAPROC_DIZ) && \
|
||||
cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ)
|
||||
$(QUIETLY) cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done"
|
||||
|
||||
.PHONY: install_saproc
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -19,7 +19,7 @@
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
# macosx universal builds
|
||||
@ -35,15 +35,15 @@ universal_debug:
|
||||
all_product_universal:
|
||||
# $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_PRODUCT_TARGETS)
|
||||
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_PRODUCT_TARGETS)
|
||||
$(QUIETLY) $(MAKE) EXPORT_SUBDIR= universalize
|
||||
$(QUIETLY) $(MAKE) BUILD_FLAVOR=product EXPORT_SUBDIR= universalize
|
||||
all_fastdebug_universal:
|
||||
# $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_FASTDEBUG_TARGETS)
|
||||
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_FASTDEBUG_TARGETS)
|
||||
$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/fastdebug universalize
|
||||
$(QUIETLY) $(MAKE) BUILD_FLAVOR=fastdebug EXPORT_SUBDIR=/fastdebug universalize
|
||||
all_debug_universal:
|
||||
# $(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_DEBUG_TARGETS)
|
||||
$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_DEBUG_TARGETS)
|
||||
$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/debug universalize
|
||||
$(QUIETLY) $(MAKE) BUILD_FLAVOR=debug EXPORT_SUBDIR=/debug universalize
|
||||
|
||||
|
||||
# Consolidate architecture builds into a single Universal binary
|
||||
@ -57,18 +57,18 @@ $(UNIVERSAL_LIPO_LIST):
|
||||
if [ -n "$${BUILT_LIPO_FILES}" ]; then \
|
||||
$(MKDIR) -p $(shell dirname $@); \
|
||||
lipo -create -output $@ $${BUILT_LIPO_FILES}; \
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Copy built non-universal binaries in place
|
||||
# - copies directories; including empty dirs
|
||||
# - copies files, symlinks, other non-directory files
|
||||
$(UNIVERSAL_COPY_LIST):
|
||||
BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \
|
||||
BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) -prune 2>/dev/null`"; \
|
||||
if [ -n "$${BUILT_COPY_FILES}" ]; then \
|
||||
for i in $${BUILT_COPY_FILES}; do \
|
||||
if [ -f $${i} ]; then \
|
||||
$(MKDIR) -p $(shell dirname $@); \
|
||||
$(CP) $${i} $@; \
|
||||
fi; \
|
||||
$(MKDIR) -p $(shell dirname $@); \
|
||||
$(CP) -R $${i} $@; \
|
||||
done; \
|
||||
fi
|
||||
|
||||
|
@ -60,10 +60,16 @@ Src_Dirs_I += $(GENERATED)
|
||||
# The order is important for the precompiled headers to work.
|
||||
INCLUDES += $(PRECOMPILED_HEADER_DIR:%=-I%) $(Src_Dirs_I:%=-I%)
|
||||
|
||||
ifeq (${VERSION}, debug)
|
||||
# SYMFLAG is used by {jsig,saproc}.make
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
# always build with debug info when we can create .dSYM/.debuginfo files
|
||||
SYMFLAG = -g
|
||||
else
|
||||
SYMFLAG =
|
||||
ifeq (${VERSION}, debug)
|
||||
SYMFLAG = -g
|
||||
else
|
||||
SYMFLAG =
|
||||
endif
|
||||
endif
|
||||
|
||||
# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
|
||||
@ -147,8 +153,14 @@ ifeq ($(OS_VENDOR), Darwin)
|
||||
ifeq (${VERSION}, $(filter ${VERSION}, debug fastdebug))
|
||||
CFLAGS += -DALLOW_OPERATOR_NEW_USAGE
|
||||
endif
|
||||
|
||||
LIBJVM_DEBUGINFO = lib$(JVM).dylib.dSYM
|
||||
LIBJVM_DIZ = lib$(JVM).diz
|
||||
else
|
||||
LIBJVM = lib$(JVM).so
|
||||
|
||||
LIBJVM_DEBUGINFO = lib$(JVM).debuginfo
|
||||
LIBJVM_DIZ = lib$(JVM).diz
|
||||
endif
|
||||
|
||||
SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
|
||||
@ -322,10 +334,47 @@ $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
|
||||
rm -f $@.1; ln -s $@ $@.1; \
|
||||
}
|
||||
|
||||
DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM)
|
||||
ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
$(DSYMUTIL) $@
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
$(ZIPEXE) -q -r -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
|
||||
$(RM) -r $(LIBJVM_DEBUGINFO)
|
||||
endif
|
||||
else
|
||||
$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
|
||||
$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
|
||||
ifeq ($(STRIP_POLICY),all_strip)
|
||||
$(QUIETLY) $(STRIP) $@
|
||||
else
|
||||
ifeq ($(STRIP_POLICY),min_strip)
|
||||
$(QUIETLY) $(STRIP) -g $@
|
||||
# implied else here is no stripping at all
|
||||
endif
|
||||
endif
|
||||
ifeq ($(ZIP_DEBUGINFO_FILES),1)
|
||||
$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
|
||||
$(RM) $(LIBJVM_DEBUGINFO)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
DEST_SUBDIR = $(JDK_LIBDIR)/$(VM_SUBDIR)
|
||||
DEST_JVM = $(DEST_SUBDIR)/$(LIBJVM)
|
||||
DEST_JVM_DEBUGINFO = $(DEST_SUBDIR)/$(LIBJVM_DEBUGINFO)
|
||||
DEST_JVM_DIZ = $(DEST_SUBDIR)/$(LIBJVM_DIZ)
|
||||
|
||||
install_jvm: $(LIBJVM)
|
||||
@echo "Copying $(LIBJVM) to $(DEST_JVM)"
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
$(QUIETLY) test -d $(LIBJVM_DEBUGINFO) && \
|
||||
cp -f -r $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
|
||||
else
|
||||
$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
|
||||
cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
|
||||
endif
|
||||
$(QUIETLY) test -f $(LIBJVM_DIZ) && \
|
||||
cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ)
|
||||
$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
@ -340,11 +389,8 @@ include $(MAKEFILES_DIR)/saproc.make
|
||||
#----------------------------------------------------------------------
|
||||
|
||||
ifeq ($(OS_VENDOR), Darwin)
|
||||
$(LIBJVM).dSYM: $(LIBJVM)
|
||||
dsymutil $(LIBJVM)
|
||||
|
||||
# no libjvm_db for macosx
|
||||
build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM
|
||||
build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck
|
||||
echo "Doing vm.make build:"
|
||||
else
|
||||
build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
|
||||
|
@ -77,6 +77,16 @@ define install-file
|
||||
@$(RM) $@
|
||||
$(CP) $< $@
|
||||
endef
|
||||
|
||||
# MacOS X strongly discourages 'cp -r' and provides 'cp -R' instead.
|
||||
# May need to have a MacOS X specific definition of install-dir
|
||||
# sometime in the future.
|
||||
define install-dir
|
||||
@$(MKDIR) -p $(@D)
|
||||
@$(RM) -r $@
|
||||
$(CP) -r $< $@
|
||||
endef
|
||||
|
||||
define prep-target
|
||||
@$(MKDIR) -p $(@D)
|
||||
@$(RM) $@
|
||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
|
||||
|
||||
HS_MAJOR_VER=25
|
||||
HS_MINOR_VER=0
|
||||
HS_BUILD_NUMBER=54
|
||||
HS_BUILD_NUMBER=55
|
||||
|
||||
JDK_MAJOR_VER=1
|
||||
JDK_MINOR_VER=8
|
||||
|
@ -3100,6 +3100,10 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
fatal("Type profiling not implemented on this platform");
|
||||
}
|
||||
|
||||
void LIR_Assembler::align_backward_branch_target() {
|
||||
__ align(OptoLoopAlignment);
|
||||
}
|
||||
|
@ -1076,6 +1076,25 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
|
||||
|
||||
__ verify_not_null_oop(Oexception);
|
||||
|
||||
#ifdef ASSERT
|
||||
// check that fields in JavaThread for exception oop and issuing pc are
|
||||
// empty before writing to them
|
||||
Label oop_empty;
|
||||
Register scratch = I7; // We can use I7 here because it's overwritten later anyway.
|
||||
__ ld_ptr(Address(G2_thread, JavaThread::exception_oop_offset()), scratch);
|
||||
__ br_null(scratch, false, Assembler::pt, oop_empty);
|
||||
__ delayed()->nop();
|
||||
__ stop("exception oop already set");
|
||||
__ bind(oop_empty);
|
||||
|
||||
Label pc_empty;
|
||||
__ ld_ptr(Address(G2_thread, JavaThread::exception_pc_offset()), scratch);
|
||||
__ br_null(scratch, false, Assembler::pt, pc_empty);
|
||||
__ delayed()->nop();
|
||||
__ stop("exception pc already set");
|
||||
__ bind(pc_empty);
|
||||
#endif
|
||||
|
||||
// save the exception and issuing pc in the thread
|
||||
__ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
|
||||
__ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
|
||||
|
@ -76,6 +76,8 @@ define_pd_global(bool, UseMembar, false);
|
||||
// GC Ergo Flags
|
||||
define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
|
||||
|
||||
define_pd_global(uintx, TypeProfileLevel, 0);
|
||||
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
\
|
||||
product(intx, UseVIS, 99, \
|
||||
|
@ -3581,6 +3581,7 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
// the pending exception will be picked up the interpreter.
|
||||
__ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
|
||||
__ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
|
||||
__ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
|
||||
__ bind(noException);
|
||||
|
||||
// deallocate the deoptimization frame taking care to preserve the return values
|
||||
|
@ -3632,6 +3632,161 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
Register obj = op->obj()->as_register();
|
||||
Register tmp = op->tmp()->as_pointer_register();
|
||||
Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
|
||||
ciKlass* exact_klass = op->exact_klass();
|
||||
intptr_t current_klass = op->current_klass();
|
||||
bool not_null = op->not_null();
|
||||
bool no_conflict = op->no_conflict();
|
||||
|
||||
Label update, next, none;
|
||||
|
||||
bool do_null = !not_null;
|
||||
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
|
||||
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
|
||||
|
||||
assert(do_null || do_update, "why are we here?");
|
||||
assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
|
||||
|
||||
__ verify_oop(obj);
|
||||
|
||||
if (tmp != obj) {
|
||||
__ mov(tmp, obj);
|
||||
}
|
||||
if (do_null) {
|
||||
__ testptr(tmp, tmp);
|
||||
__ jccb(Assembler::notZero, update);
|
||||
if (!TypeEntries::was_null_seen(current_klass)) {
|
||||
__ orptr(mdo_addr, TypeEntries::null_seen);
|
||||
}
|
||||
if (do_update) {
|
||||
#ifndef ASSERT
|
||||
__ jmpb(next);
|
||||
}
|
||||
#else
|
||||
__ jmp(next);
|
||||
}
|
||||
} else {
|
||||
__ testptr(tmp, tmp);
|
||||
__ jccb(Assembler::notZero, update);
|
||||
__ stop("unexpect null obj");
|
||||
#endif
|
||||
}
|
||||
|
||||
__ bind(update);
|
||||
|
||||
if (do_update) {
|
||||
#ifdef ASSERT
|
||||
if (exact_klass != NULL) {
|
||||
Label ok;
|
||||
__ load_klass(tmp, tmp);
|
||||
__ push(tmp);
|
||||
__ mov_metadata(tmp, exact_klass->constant_encoding());
|
||||
__ cmpptr(tmp, Address(rsp, 0));
|
||||
__ jccb(Assembler::equal, ok);
|
||||
__ stop("exact klass and actual klass differ");
|
||||
__ bind(ok);
|
||||
__ pop(tmp);
|
||||
}
|
||||
#endif
|
||||
if (!no_conflict) {
|
||||
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
|
||||
if (exact_klass != NULL) {
|
||||
__ mov_metadata(tmp, exact_klass->constant_encoding());
|
||||
} else {
|
||||
__ load_klass(tmp, tmp);
|
||||
}
|
||||
|
||||
__ xorptr(tmp, mdo_addr);
|
||||
__ testptr(tmp, TypeEntries::type_klass_mask);
|
||||
// klass seen before, nothing to do. The unknown bit may have been
|
||||
// set already but no need to check.
|
||||
__ jccb(Assembler::zero, next);
|
||||
|
||||
__ testptr(tmp, TypeEntries::type_unknown);
|
||||
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
|
||||
|
||||
if (TypeEntries::is_type_none(current_klass)) {
|
||||
__ cmpptr(mdo_addr, 0);
|
||||
__ jccb(Assembler::equal, none);
|
||||
__ cmpptr(mdo_addr, TypeEntries::null_seen);
|
||||
__ jccb(Assembler::equal, none);
|
||||
// There is a chance that the checks above (re-reading profiling
|
||||
// data from memory) fail if another thread has just set the
|
||||
// profiling to this obj's klass
|
||||
__ xorptr(tmp, mdo_addr);
|
||||
__ testptr(tmp, TypeEntries::type_klass_mask);
|
||||
__ jccb(Assembler::zero, next);
|
||||
}
|
||||
} else {
|
||||
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
|
||||
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
|
||||
|
||||
__ movptr(tmp, mdo_addr);
|
||||
__ testptr(tmp, TypeEntries::type_unknown);
|
||||
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
|
||||
}
|
||||
|
||||
// different than before. Cannot keep accurate profile.
|
||||
__ orptr(mdo_addr, TypeEntries::type_unknown);
|
||||
|
||||
if (TypeEntries::is_type_none(current_klass)) {
|
||||
__ jmpb(next);
|
||||
|
||||
__ bind(none);
|
||||
// first time here. Set profile type.
|
||||
__ movptr(mdo_addr, tmp);
|
||||
}
|
||||
} else {
|
||||
// There's a single possible klass at this profile point
|
||||
assert(exact_klass != NULL, "should be");
|
||||
if (TypeEntries::is_type_none(current_klass)) {
|
||||
__ mov_metadata(tmp, exact_klass->constant_encoding());
|
||||
__ xorptr(tmp, mdo_addr);
|
||||
__ testptr(tmp, TypeEntries::type_klass_mask);
|
||||
#ifdef ASSERT
|
||||
__ jcc(Assembler::zero, next);
|
||||
|
||||
{
|
||||
Label ok;
|
||||
__ push(tmp);
|
||||
__ cmpptr(mdo_addr, 0);
|
||||
__ jcc(Assembler::equal, ok);
|
||||
__ cmpptr(mdo_addr, TypeEntries::null_seen);
|
||||
__ jcc(Assembler::equal, ok);
|
||||
// may have been set by another thread
|
||||
__ mov_metadata(tmp, exact_klass->constant_encoding());
|
||||
__ xorptr(tmp, mdo_addr);
|
||||
__ testptr(tmp, TypeEntries::type_mask);
|
||||
__ jcc(Assembler::zero, ok);
|
||||
|
||||
__ stop("unexpected profiling mismatch");
|
||||
__ bind(ok);
|
||||
__ pop(tmp);
|
||||
}
|
||||
#else
|
||||
__ jccb(Assembler::zero, next);
|
||||
#endif
|
||||
// first time here. Set profile type.
|
||||
__ movptr(mdo_addr, tmp);
|
||||
} else {
|
||||
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
|
||||
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
|
||||
|
||||
__ movptr(tmp, mdo_addr);
|
||||
__ testptr(tmp, TypeEntries::type_unknown);
|
||||
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
|
||||
|
||||
__ orptr(mdo_addr, TypeEntries::type_unknown);
|
||||
}
|
||||
}
|
||||
|
||||
__ bind(next);
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::emit_delay(LIR_OpDelay*) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
@ -79,6 +79,8 @@ define_pd_global(bool, UseMembar, false);
|
||||
// GC Ergo Flags
|
||||
define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
|
||||
|
||||
define_pd_global(uintx, TypeProfileLevel, 11);
|
||||
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
\
|
||||
develop(bool, IEEEPrecision, true, \
|
||||
|
@ -1046,6 +1046,158 @@ void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
|
||||
Label update, next, none;
|
||||
|
||||
verify_oop(obj);
|
||||
|
||||
testptr(obj, obj);
|
||||
jccb(Assembler::notZero, update);
|
||||
orptr(mdo_addr, TypeEntries::null_seen);
|
||||
jmpb(next);
|
||||
|
||||
bind(update);
|
||||
load_klass(obj, obj);
|
||||
|
||||
xorptr(obj, mdo_addr);
|
||||
testptr(obj, TypeEntries::type_klass_mask);
|
||||
jccb(Assembler::zero, next); // klass seen before, nothing to
|
||||
// do. The unknown bit may have been
|
||||
// set already but no need to check.
|
||||
|
||||
testptr(obj, TypeEntries::type_unknown);
|
||||
jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
|
||||
|
||||
cmpptr(mdo_addr, 0);
|
||||
jccb(Assembler::equal, none);
|
||||
cmpptr(mdo_addr, TypeEntries::null_seen);
|
||||
jccb(Assembler::equal, none);
|
||||
// There is a chance that the checks above (re-reading profiling
|
||||
// data from memory) fail if another thread has just set the
|
||||
// profiling to this obj's klass
|
||||
xorptr(obj, mdo_addr);
|
||||
testptr(obj, TypeEntries::type_klass_mask);
|
||||
jccb(Assembler::zero, next);
|
||||
|
||||
// different than before. Cannot keep accurate profile.
|
||||
orptr(mdo_addr, TypeEntries::type_unknown);
|
||||
jmpb(next);
|
||||
|
||||
bind(none);
|
||||
// first time here. Set profile type.
|
||||
movptr(mdo_addr, obj);
|
||||
|
||||
bind(next);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
|
||||
if (!ProfileInterpreter) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (MethodData::profile_arguments() || MethodData::profile_return()) {
|
||||
Label profile_continue;
|
||||
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
|
||||
|
||||
cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
|
||||
jcc(Assembler::notEqual, profile_continue);
|
||||
|
||||
if (MethodData::profile_arguments()) {
|
||||
Label done;
|
||||
int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
|
||||
addptr(mdp, off_to_args);
|
||||
|
||||
for (int i = 0; i < TypeProfileArgsLimit; i++) {
|
||||
if (i > 0 || MethodData::profile_return()) {
|
||||
// If return value type is profiled we may have no argument to profile
|
||||
movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
||||
subl(tmp, i*TypeStackSlotEntries::per_arg_count());
|
||||
cmpl(tmp, TypeStackSlotEntries::per_arg_count());
|
||||
jcc(Assembler::less, done);
|
||||
}
|
||||
movptr(tmp, Address(callee, Method::const_offset()));
|
||||
load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
|
||||
// stack offset o (zero based) from the start of the argument
|
||||
// list, for n arguments translates into offset n - o - 1 from
|
||||
// the end of the argument list
|
||||
subl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
|
||||
subl(tmp, 1);
|
||||
Address arg_addr = argument_address(tmp);
|
||||
movptr(tmp, arg_addr);
|
||||
|
||||
Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
|
||||
profile_obj_type(tmp, mdo_arg_addr);
|
||||
|
||||
int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
|
||||
addptr(mdp, to_add);
|
||||
off_to_args += to_add;
|
||||
}
|
||||
|
||||
if (MethodData::profile_return()) {
|
||||
movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
||||
subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
|
||||
}
|
||||
|
||||
bind(done);
|
||||
|
||||
if (MethodData::profile_return()) {
|
||||
// We're right after the type profile for the last
|
||||
// argument. tmp is the number of cell left in the
|
||||
// CallTypeData/VirtualCallTypeData to reach its end. Non null
|
||||
// if there's a return to profile.
|
||||
assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
|
||||
shll(tmp, exact_log2(DataLayout::cell_size));
|
||||
addptr(mdp, tmp);
|
||||
}
|
||||
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
|
||||
} else {
|
||||
assert(MethodData::profile_return(), "either profile call args or call ret");
|
||||
update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
|
||||
}
|
||||
|
||||
// mdp points right after the end of the
|
||||
// CallTypeData/VirtualCallTypeData, right after the cells for the
|
||||
// return value type if there's one
|
||||
|
||||
bind(profile_continue);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
|
||||
assert_different_registers(mdp, ret, tmp, rsi);
|
||||
if (ProfileInterpreter && MethodData::profile_return()) {
|
||||
Label profile_continue, done;
|
||||
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
if (MethodData::profile_return_jsr292_only()) {
|
||||
// If we don't profile all invoke bytecodes we must make sure
|
||||
// it's a bytecode we indeed profile. We can't go back to the
|
||||
// begining of the ProfileData we intend to update to check its
|
||||
// type because we're right after it and we don't known its
|
||||
// length
|
||||
Label do_profile;
|
||||
cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
|
||||
jcc(Assembler::equal, do_profile);
|
||||
cmpb(Address(rsi, 0), Bytecodes::_invokehandle);
|
||||
jcc(Assembler::equal, do_profile);
|
||||
get_method(tmp);
|
||||
cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
|
||||
jcc(Assembler::notEqual, profile_continue);
|
||||
|
||||
bind(do_profile);
|
||||
}
|
||||
|
||||
Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
|
||||
mov(tmp, ret);
|
||||
profile_obj_type(tmp, mdo_ret_addr);
|
||||
|
||||
bind(profile_continue);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_call(Register mdp) {
|
||||
if (ProfileInterpreter) {
|
||||
|
@ -215,6 +215,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void profile_taken_branch(Register mdp, Register bumped_count);
|
||||
void profile_not_taken_branch(Register mdp);
|
||||
void profile_obj_type(Register obj, const Address& mdo_addr);
|
||||
void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
|
||||
void profile_return_type(Register mdp, Register ret, Register tmp);
|
||||
void profile_call(Register mdp);
|
||||
void profile_final_call(Register mdp);
|
||||
void profile_virtual_call(Register receiver, Register mdp, Register scratch2,
|
||||
|
@ -1067,6 +1067,159 @@ void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
|
||||
Label update, next, none;
|
||||
|
||||
verify_oop(obj);
|
||||
|
||||
testptr(obj, obj);
|
||||
jccb(Assembler::notZero, update);
|
||||
orptr(mdo_addr, TypeEntries::null_seen);
|
||||
jmpb(next);
|
||||
|
||||
bind(update);
|
||||
load_klass(obj, obj);
|
||||
|
||||
xorptr(obj, mdo_addr);
|
||||
testptr(obj, TypeEntries::type_klass_mask);
|
||||
jccb(Assembler::zero, next); // klass seen before, nothing to
|
||||
// do. The unknown bit may have been
|
||||
// set already but no need to check.
|
||||
|
||||
testptr(obj, TypeEntries::type_unknown);
|
||||
jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
|
||||
|
||||
// There is a chance that by the time we do these checks (re-reading
|
||||
// profiling data from memory) another thread has set the profling
|
||||
// to this obj's klass and we set the profiling as unknow
|
||||
// erroneously
|
||||
cmpptr(mdo_addr, 0);
|
||||
jccb(Assembler::equal, none);
|
||||
cmpptr(mdo_addr, TypeEntries::null_seen);
|
||||
jccb(Assembler::equal, none);
|
||||
// There is a chance that the checks above (re-reading profiling
|
||||
// data from memory) fail if another thread has just set the
|
||||
// profiling to this obj's klass
|
||||
xorptr(obj, mdo_addr);
|
||||
testptr(obj, TypeEntries::type_klass_mask);
|
||||
jccb(Assembler::zero, next);
|
||||
|
||||
// different than before. Cannot keep accurate profile.
|
||||
orptr(mdo_addr, TypeEntries::type_unknown);
|
||||
jmpb(next);
|
||||
|
||||
bind(none);
|
||||
// first time here. Set profile type.
|
||||
movptr(mdo_addr, obj);
|
||||
|
||||
bind(next);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
|
||||
if (!ProfileInterpreter) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (MethodData::profile_arguments() || MethodData::profile_return()) {
|
||||
Label profile_continue;
|
||||
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
|
||||
|
||||
cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
|
||||
jcc(Assembler::notEqual, profile_continue);
|
||||
|
||||
if (MethodData::profile_arguments()) {
|
||||
Label done;
|
||||
int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
|
||||
addptr(mdp, off_to_args);
|
||||
|
||||
for (int i = 0; i < TypeProfileArgsLimit; i++) {
|
||||
if (i > 0 || MethodData::profile_return()) {
|
||||
// If return value type is profiled we may have no argument to profile
|
||||
movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
||||
subl(tmp, i*TypeStackSlotEntries::per_arg_count());
|
||||
cmpl(tmp, TypeStackSlotEntries::per_arg_count());
|
||||
jcc(Assembler::less, done);
|
||||
}
|
||||
movptr(tmp, Address(callee, Method::const_offset()));
|
||||
load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
|
||||
subq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
|
||||
subl(tmp, 1);
|
||||
Address arg_addr = argument_address(tmp);
|
||||
movptr(tmp, arg_addr);
|
||||
|
||||
Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
|
||||
profile_obj_type(tmp, mdo_arg_addr);
|
||||
|
||||
int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
|
||||
addptr(mdp, to_add);
|
||||
off_to_args += to_add;
|
||||
}
|
||||
|
||||
if (MethodData::profile_return()) {
|
||||
movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
||||
subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
|
||||
}
|
||||
|
||||
bind(done);
|
||||
|
||||
if (MethodData::profile_return()) {
|
||||
// We're right after the type profile for the last
|
||||
// argument. tmp is the number of cell left in the
|
||||
// CallTypeData/VirtualCallTypeData to reach its end. Non null
|
||||
// if there's a return to profile.
|
||||
assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
|
||||
shll(tmp, exact_log2(DataLayout::cell_size));
|
||||
addptr(mdp, tmp);
|
||||
}
|
||||
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
|
||||
} else {
|
||||
assert(MethodData::profile_return(), "either profile call args or call ret");
|
||||
update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
|
||||
}
|
||||
|
||||
// mdp points right after the end of the
|
||||
// CallTypeData/VirtualCallTypeData, right after the cells for the
|
||||
// return value type if there's one
|
||||
|
||||
bind(profile_continue);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
|
||||
assert_different_registers(mdp, ret, tmp, r13);
|
||||
if (ProfileInterpreter && MethodData::profile_return()) {
|
||||
Label profile_continue, done;
|
||||
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
if (MethodData::profile_return_jsr292_only()) {
|
||||
// If we don't profile all invoke bytecodes we must make sure
|
||||
// it's a bytecode we indeed profile. We can't go back to the
|
||||
// begining of the ProfileData we intend to update to check its
|
||||
// type because we're right after it and we don't known its
|
||||
// length
|
||||
Label do_profile;
|
||||
cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
|
||||
jcc(Assembler::equal, do_profile);
|
||||
cmpb(Address(r13, 0), Bytecodes::_invokehandle);
|
||||
jcc(Assembler::equal, do_profile);
|
||||
get_method(tmp);
|
||||
cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
|
||||
jcc(Assembler::notEqual, profile_continue);
|
||||
|
||||
bind(do_profile);
|
||||
}
|
||||
|
||||
Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
|
||||
mov(tmp, ret);
|
||||
profile_obj_type(tmp, mdo_ret_addr);
|
||||
|
||||
bind(profile_continue);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_call(Register mdp) {
|
||||
if (ProfileInterpreter) {
|
||||
|
@ -224,6 +224,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void profile_taken_branch(Register mdp, Register bumped_count);
|
||||
void profile_not_taken_branch(Register mdp);
|
||||
void profile_obj_type(Register obj, const Address& mdo_addr);
|
||||
void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
|
||||
void profile_return_type(Register mdp, Register ret, Register tmp);
|
||||
void profile_call(Register mdp);
|
||||
void profile_final_call(Register mdp);
|
||||
void profile_virtual_call(Register receiver, Register mdp,
|
||||
|
@ -773,6 +773,7 @@ class MacroAssembler: public Assembler {
|
||||
void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
|
||||
void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
|
||||
void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
|
||||
void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
|
||||
|
||||
void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
|
||||
void testptr(Register src1, Register src2);
|
||||
|
@ -194,6 +194,12 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
__ restore_bcp();
|
||||
__ restore_locals();
|
||||
|
||||
if (incoming_state == atos) {
|
||||
Register mdp = rbx;
|
||||
Register tmp = rcx;
|
||||
__ profile_return_type(mdp, rax, tmp);
|
||||
}
|
||||
|
||||
Label L_got_cache, L_giant_index;
|
||||
if (EnableInvokeDynamic) {
|
||||
__ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
|
||||
|
@ -177,6 +177,12 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
__ restore_bcp();
|
||||
__ restore_locals();
|
||||
|
||||
if (state == atos) {
|
||||
Register mdp = rbx;
|
||||
Register tmp = rcx;
|
||||
__ profile_return_type(mdp, rax, tmp);
|
||||
}
|
||||
|
||||
Label L_got_cache, L_giant_index;
|
||||
if (EnableInvokeDynamic) {
|
||||
__ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
|
||||
|
@ -2970,6 +2970,7 @@ void TemplateTable::invokevirtual_helper(Register index,
|
||||
|
||||
// profile this call
|
||||
__ profile_final_call(rax);
|
||||
__ profile_arguments_type(rax, method, rsi, true);
|
||||
|
||||
__ jump_from_interpreted(method, rax);
|
||||
|
||||
@ -2984,6 +2985,7 @@ void TemplateTable::invokevirtual_helper(Register index,
|
||||
|
||||
// get target Method* & entry point
|
||||
__ lookup_virtual_method(rax, index, method);
|
||||
__ profile_arguments_type(rdx, method, rsi, true);
|
||||
__ jump_from_interpreted(method, rdx);
|
||||
}
|
||||
|
||||
@ -3013,6 +3015,7 @@ void TemplateTable::invokespecial(int byte_no) {
|
||||
__ null_check(rcx);
|
||||
// do the call
|
||||
__ profile_call(rax);
|
||||
__ profile_arguments_type(rax, rbx, rsi, false);
|
||||
__ jump_from_interpreted(rbx, rax);
|
||||
}
|
||||
|
||||
@ -3023,6 +3026,7 @@ void TemplateTable::invokestatic(int byte_no) {
|
||||
prepare_invoke(byte_no, rbx); // get f1 Method*
|
||||
// do the call
|
||||
__ profile_call(rax);
|
||||
__ profile_arguments_type(rax, rbx, rsi, false);
|
||||
__ jump_from_interpreted(rbx, rax);
|
||||
}
|
||||
|
||||
@ -3082,6 +3086,8 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ testptr(rbx, rbx);
|
||||
__ jcc(Assembler::zero, no_such_method);
|
||||
|
||||
__ profile_arguments_type(rdx, rbx, rsi, true);
|
||||
|
||||
// do the call
|
||||
// rcx: receiver
|
||||
// rbx,: Method*
|
||||
@ -3138,6 +3144,7 @@ void TemplateTable::invokehandle(int byte_no) {
|
||||
|
||||
// FIXME: profile the LambdaForm also
|
||||
__ profile_final_call(rax);
|
||||
__ profile_arguments_type(rdx, rbx_method, rsi, true);
|
||||
|
||||
__ jump_from_interpreted(rbx_method, rdx);
|
||||
}
|
||||
@ -3171,6 +3178,7 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
// %%% should make a type profile for any invokedynamic that takes a ref argument
|
||||
// profile this call
|
||||
__ profile_call(rsi);
|
||||
__ profile_arguments_type(rdx, rbx, rsi, false);
|
||||
|
||||
__ verify_oop(rax_callsite);
|
||||
|
||||
|
@ -3026,6 +3026,7 @@ void TemplateTable::invokevirtual_helper(Register index,
|
||||
|
||||
// profile this call
|
||||
__ profile_final_call(rax);
|
||||
__ profile_arguments_type(rax, method, r13, true);
|
||||
|
||||
__ jump_from_interpreted(method, rax);
|
||||
|
||||
@ -3040,6 +3041,7 @@ void TemplateTable::invokevirtual_helper(Register index,
|
||||
|
||||
// get target Method* & entry point
|
||||
__ lookup_virtual_method(rax, index, method);
|
||||
__ profile_arguments_type(rdx, method, r13, true);
|
||||
__ jump_from_interpreted(method, rdx);
|
||||
}
|
||||
|
||||
@ -3069,6 +3071,7 @@ void TemplateTable::invokespecial(int byte_no) {
|
||||
__ null_check(rcx);
|
||||
// do the call
|
||||
__ profile_call(rax);
|
||||
__ profile_arguments_type(rax, rbx, r13, false);
|
||||
__ jump_from_interpreted(rbx, rax);
|
||||
}
|
||||
|
||||
@ -3079,6 +3082,7 @@ void TemplateTable::invokestatic(int byte_no) {
|
||||
prepare_invoke(byte_no, rbx); // get f1 Method*
|
||||
// do the call
|
||||
__ profile_call(rax);
|
||||
__ profile_arguments_type(rax, rbx, r13, false);
|
||||
__ jump_from_interpreted(rbx, rax);
|
||||
}
|
||||
|
||||
@ -3136,6 +3140,8 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ testptr(rbx, rbx);
|
||||
__ jcc(Assembler::zero, no_such_method);
|
||||
|
||||
__ profile_arguments_type(rdx, rbx, r13, true);
|
||||
|
||||
// do the call
|
||||
// rcx: receiver
|
||||
// rbx,: Method*
|
||||
@ -3193,6 +3199,7 @@ void TemplateTable::invokehandle(int byte_no) {
|
||||
|
||||
// FIXME: profile the LambdaForm also
|
||||
__ profile_final_call(rax);
|
||||
__ profile_arguments_type(rdx, rbx_method, r13, true);
|
||||
|
||||
__ jump_from_interpreted(rbx_method, rdx);
|
||||
}
|
||||
@ -3226,6 +3233,7 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
// %%% should make a type profile for any invokedynamic that takes a ref argument
|
||||
// profile this call
|
||||
__ profile_call(r13);
|
||||
__ profile_arguments_type(rdx, rbx_method, r13, false);
|
||||
|
||||
__ verify_oop(rax_callsite);
|
||||
|
||||
|
@ -159,9 +159,21 @@ julong os::available_memory() {
|
||||
return Bsd::available_memory();
|
||||
}
|
||||
|
||||
// available here means free
|
||||
julong os::Bsd::available_memory() {
|
||||
// XXXBSD: this is just a stopgap implementation
|
||||
return physical_memory() >> 2;
|
||||
uint64_t available = physical_memory() >> 2;
|
||||
#ifdef __APPLE__
|
||||
mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
|
||||
vm_statistics64_data_t vmstat;
|
||||
kern_return_t kerr = host_statistics64(mach_host_self(), HOST_VM_INFO64,
|
||||
(host_info64_t)&vmstat, &count);
|
||||
assert(kerr == KERN_SUCCESS,
|
||||
"host_statistics64 failed - check mach_host_self() and count");
|
||||
if (kerr == KERN_SUCCESS) {
|
||||
available = vmstat.free_count * os::vm_page_size();
|
||||
}
|
||||
#endif
|
||||
return available;
|
||||
}
|
||||
|
||||
julong os::physical_memory() {
|
||||
|
@ -935,6 +935,7 @@ void Canonicalizer::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {}
|
||||
void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {}
|
||||
void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
|
||||
void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
|
||||
void Canonicalizer::do_ProfileReturnType(ProfileReturnType* x) {}
|
||||
void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
|
||||
void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
|
||||
void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
|
||||
|
@ -104,6 +104,7 @@ class Canonicalizer: InstructionVisitor {
|
||||
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
|
||||
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
|
||||
virtual void do_ProfileCall (ProfileCall* x);
|
||||
virtual void do_ProfileReturnType (ProfileReturnType* x);
|
||||
virtual void do_ProfileInvoke (ProfileInvoke* x);
|
||||
virtual void do_RuntimeCall (RuntimeCall* x);
|
||||
virtual void do_MemBar (MemBar* x);
|
||||
|
@ -601,6 +601,17 @@ void Compilation::bailout(const char* msg) {
|
||||
}
|
||||
}
|
||||
|
||||
ciKlass* Compilation::cha_exact_type(ciType* type) {
|
||||
if (type != NULL && type->is_loaded() && type->is_instance_klass()) {
|
||||
ciInstanceKlass* ik = type->as_instance_klass();
|
||||
assert(ik->exact_klass() == NULL, "no cha for final klass");
|
||||
if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
|
||||
dependency_recorder()->assert_leaf_type(ik);
|
||||
return ik;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void Compilation::print_timers() {
|
||||
// tty->print_cr(" Native methods : %6.3f s, Average : %2.3f", CompileBroker::_t_native_compilation.seconds(), CompileBroker::_t_native_compilation.seconds() / CompileBroker::_total_native_compile_count);
|
||||
|
@ -246,6 +246,8 @@ class Compilation: public StackObj {
|
||||
(RangeCheckElimination || UseLoopInvariantCodeMotion) &&
|
||||
method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
|
||||
}
|
||||
|
||||
ciKlass* cha_exact_type(ciType* type);
|
||||
};
|
||||
|
||||
|
||||
|
@ -42,26 +42,16 @@
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
||||
volatile int Compiler::_runtimes = uninitialized;
|
||||
|
||||
Compiler::Compiler() {
|
||||
}
|
||||
Compiler::Compiler () {}
|
||||
|
||||
|
||||
Compiler::~Compiler() {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
|
||||
void Compiler::initialize_all() {
|
||||
void Compiler::init_c1_runtime() {
|
||||
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
|
||||
Arena* arena = new (mtCompiler) Arena();
|
||||
Runtime1::initialize(buffer_blob);
|
||||
FrameMap::initialize();
|
||||
// initialize data structures
|
||||
ValueType::initialize(arena);
|
||||
// Instruction::initialize();
|
||||
// BlockBegin::initialize();
|
||||
GraphBuilder::initialize();
|
||||
// note: to use more than one instance of LinearScan at a time this function call has to
|
||||
// be moved somewhere outside of this constructor:
|
||||
@ -70,32 +60,33 @@ void Compiler::initialize_all() {
|
||||
|
||||
|
||||
void Compiler::initialize() {
|
||||
if (_runtimes != initialized) {
|
||||
initialize_runtimes( initialize_all, &_runtimes);
|
||||
// Buffer blob must be allocated per C1 compiler thread at startup
|
||||
BufferBlob* buffer_blob = init_buffer_blob();
|
||||
|
||||
if (should_perform_init()) {
|
||||
if (buffer_blob == NULL) {
|
||||
// When we come here we are in state 'initializing'; entire C1 compilation
|
||||
// can be shut down.
|
||||
set_state(failed);
|
||||
} else {
|
||||
init_c1_runtime();
|
||||
set_state(initialized);
|
||||
}
|
||||
}
|
||||
mark_initialized();
|
||||
}
|
||||
|
||||
|
||||
BufferBlob* Compiler::get_buffer_blob(ciEnv* env) {
|
||||
BufferBlob* Compiler::init_buffer_blob() {
|
||||
// Allocate buffer blob once at startup since allocation for each
|
||||
// compilation seems to be too expensive (at least on Intel win32).
|
||||
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
|
||||
if (buffer_blob != NULL) {
|
||||
return buffer_blob;
|
||||
}
|
||||
assert (CompilerThread::current()->get_buffer_blob() == NULL, "Should initialize only once");
|
||||
|
||||
// setup CodeBuffer. Preallocate a BufferBlob of size
|
||||
// NMethodSizeLimit plus some extra space for constants.
|
||||
int code_buffer_size = Compilation::desired_max_code_buffer_size() +
|
||||
Compilation::desired_max_constant_size();
|
||||
|
||||
buffer_blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
|
||||
code_buffer_size);
|
||||
if (buffer_blob == NULL) {
|
||||
CompileBroker::handle_full_code_cache();
|
||||
env->record_failure("CodeCache is full");
|
||||
} else {
|
||||
BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size);
|
||||
if (buffer_blob != NULL) {
|
||||
CompilerThread::current()->set_buffer_blob(buffer_blob);
|
||||
}
|
||||
|
||||
@ -104,15 +95,8 @@ BufferBlob* Compiler::get_buffer_blob(ciEnv* env) {
|
||||
|
||||
|
||||
void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) {
|
||||
BufferBlob* buffer_blob = Compiler::get_buffer_blob(env);
|
||||
if (buffer_blob == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!is_initialized()) {
|
||||
initialize();
|
||||
}
|
||||
|
||||
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
|
||||
assert(buffer_blob != NULL, "Must exist");
|
||||
// invoke compilation
|
||||
{
|
||||
// We are nested here because we need for the destructor
|
||||
|
@ -30,11 +30,9 @@
|
||||
// There is one instance of the Compiler per CompilerThread.
|
||||
|
||||
class Compiler: public AbstractCompiler {
|
||||
|
||||
private:
|
||||
|
||||
// Tracks whether runtime has been initialized
|
||||
static volatile int _runtimes;
|
||||
static void init_c1_runtime();
|
||||
BufferBlob* init_buffer_blob();
|
||||
|
||||
public:
|
||||
// Creation
|
||||
@ -46,19 +44,12 @@ class Compiler: public AbstractCompiler {
|
||||
|
||||
virtual bool is_c1() { return true; };
|
||||
|
||||
BufferBlob* get_buffer_blob(ciEnv* env);
|
||||
|
||||
// Missing feature tests
|
||||
virtual bool supports_native() { return true; }
|
||||
virtual bool supports_osr () { return true; }
|
||||
|
||||
// Customization
|
||||
virtual bool needs_adapters () { return false; }
|
||||
virtual bool needs_stubs () { return false; }
|
||||
|
||||
// Initialization
|
||||
virtual void initialize();
|
||||
static void initialize_all();
|
||||
|
||||
// Compilation entry point for methods
|
||||
virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci);
|
||||
|
@ -1466,9 +1466,22 @@ void GraphBuilder::method_return(Value x) {
|
||||
// State at end of inlined method is the state of the caller
|
||||
// without the method parameters on stack, including the
|
||||
// return value, if any, of the inlined method on operand stack.
|
||||
int invoke_bci = state()->caller_state()->bci();
|
||||
set_state(state()->caller_state()->copy_for_parsing());
|
||||
if (x != NULL) {
|
||||
state()->push(x->type(), x);
|
||||
if (profile_calls() && MethodData::profile_return() && x->type()->is_object_kind()) {
|
||||
ciMethod* caller = state()->scope()->method();
|
||||
ciMethodData* md = caller->method_data_or_null();
|
||||
ciProfileData* data = md->bci_to_data(invoke_bci);
|
||||
if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
|
||||
bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return();
|
||||
// May not be true in case of an inlined call through a method handle intrinsic.
|
||||
if (has_return) {
|
||||
profile_return_type(x, method(), caller, invoke_bci);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Goto* goto_callee = new Goto(continuation(), false);
|
||||
|
||||
@ -1658,6 +1671,42 @@ Dependencies* GraphBuilder::dependency_recorder() const {
|
||||
return compilation()->dependency_recorder();
|
||||
}
|
||||
|
||||
// How many arguments do we want to profile?
|
||||
Values* GraphBuilder::args_list_for_profiling(int& start, bool may_have_receiver) {
|
||||
int n = 0;
|
||||
assert(start == 0, "should be initialized");
|
||||
if (MethodData::profile_arguments()) {
|
||||
ciProfileData* data = method()->method_data()->bci_to_data(bci());
|
||||
if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
|
||||
n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
|
||||
bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
|
||||
start = has_receiver ? 1 : 0;
|
||||
}
|
||||
}
|
||||
if (n > 0) {
|
||||
return new Values(n);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Collect arguments that we want to profile in a list
|
||||
Values* GraphBuilder::collect_args_for_profiling(Values* args, bool may_have_receiver) {
|
||||
int start = 0;
|
||||
Values* obj_args = args_list_for_profiling(start, may_have_receiver);
|
||||
if (obj_args == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
int s = obj_args->size();
|
||||
for (int i = start, j = 0; j < s; i++) {
|
||||
if (args->at(i)->type()->is_object_kind()) {
|
||||
obj_args->push(args->at(i));
|
||||
j++;
|
||||
}
|
||||
}
|
||||
assert(s == obj_args->length(), "missed on arg?");
|
||||
return obj_args;
|
||||
}
|
||||
|
||||
|
||||
void GraphBuilder::invoke(Bytecodes::Code code) {
|
||||
bool will_link;
|
||||
@ -1957,7 +2006,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
||||
} else if (exact_target != NULL) {
|
||||
target_klass = exact_target->holder();
|
||||
}
|
||||
profile_call(target, recv, target_klass);
|
||||
profile_call(target, recv, target_klass, collect_args_for_profiling(args, false), false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1972,6 +2021,9 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
||||
push(result_type, result);
|
||||
}
|
||||
}
|
||||
if (profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
|
||||
profile_return_type(result, target);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -3509,7 +3561,7 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
||||
recv = args->at(0);
|
||||
null_check(recv);
|
||||
}
|
||||
profile_call(callee, recv, NULL);
|
||||
profile_call(callee, recv, NULL, collect_args_for_profiling(args, true), true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3520,6 +3572,10 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
||||
Value value = append_split(result);
|
||||
if (result_type != voidType) push(result_type, value);
|
||||
|
||||
if (callee != method() && profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
|
||||
profile_return_type(result, callee);
|
||||
}
|
||||
|
||||
// done
|
||||
return true;
|
||||
}
|
||||
@ -3763,7 +3819,28 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
|
||||
compilation()->set_would_profile(true);
|
||||
|
||||
if (profile_calls()) {
|
||||
profile_call(callee, recv, holder_known ? callee->holder() : NULL);
|
||||
int start = 0;
|
||||
Values* obj_args = args_list_for_profiling(start, has_receiver);
|
||||
if (obj_args != NULL) {
|
||||
int s = obj_args->size();
|
||||
// if called through method handle invoke, some arguments may have been popped
|
||||
for (int i = args_base+start, j = 0; j < obj_args->size() && i < state()->stack_size(); ) {
|
||||
Value v = state()->stack_at_inc(i);
|
||||
if (v->type()->is_object_kind()) {
|
||||
obj_args->push(v);
|
||||
j++;
|
||||
}
|
||||
}
|
||||
#ifdef ASSERT
|
||||
{
|
||||
bool ignored_will_link;
|
||||
ciSignature* declared_signature = NULL;
|
||||
ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
|
||||
assert(s == obj_args->length() || real_target->is_method_handle_intrinsic(), "missed on arg?");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
profile_call(callee, recv, holder_known ? callee->holder() : NULL, obj_args, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4251,8 +4328,23 @@ void GraphBuilder::print_stats() {
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder) {
|
||||
append(new ProfileCall(method(), bci(), callee, recv, known_holder));
|
||||
void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) {
|
||||
append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined));
|
||||
}
|
||||
|
||||
void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) {
|
||||
assert((m == NULL) == (invoke_bci < 0), "invalid method and invalid bci together");
|
||||
if (m == NULL) {
|
||||
m = method();
|
||||
}
|
||||
if (invoke_bci < 0) {
|
||||
invoke_bci = bci();
|
||||
}
|
||||
ciMethodData* md = m->method_data_or_null();
|
||||
ciProfileData* data = md->bci_to_data(invoke_bci);
|
||||
if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
|
||||
append(new ProfileReturnType(m , invoke_bci, callee, ret));
|
||||
}
|
||||
}
|
||||
|
||||
void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
|
||||
|
@ -374,7 +374,8 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
void print_inlining(ciMethod* callee, const char* msg = NULL, bool success = true);
|
||||
|
||||
void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder);
|
||||
void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder, Values* obj_args, bool inlined);
|
||||
void profile_return_type(Value ret, ciMethod* callee, ciMethod* m = NULL, int bci = -1);
|
||||
void profile_invocation(ciMethod* inlinee, ValueStack* state);
|
||||
|
||||
// Shortcuts to profiling control.
|
||||
@ -386,6 +387,9 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
|
||||
bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
|
||||
bool profile_checkcasts() { return _compilation->profile_checkcasts(); }
|
||||
|
||||
Values* args_list_for_profiling(int& start, bool may_have_receiver);
|
||||
Values* collect_args_for_profiling(Values* args, bool may_have_receiver);
|
||||
|
||||
public:
|
||||
NOT_PRODUCT(void print_stats();)
|
||||
|
||||
|
@ -104,6 +104,14 @@ void Instruction::state_values_do(ValueVisitor* f) {
|
||||
}
|
||||
}
|
||||
|
||||
ciType* Instruction::exact_type() const {
|
||||
ciType* t = declared_type();
|
||||
if (t != NULL && t->is_klass()) {
|
||||
return t->as_klass()->exact_klass();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
void Instruction::check_state(ValueStack* state) {
|
||||
@ -135,9 +143,7 @@ void Instruction::print(InstructionPrinter& ip) {
|
||||
|
||||
// perform constant and interval tests on index value
|
||||
bool AccessIndexed::compute_needs_range_check() {
|
||||
|
||||
if (length()) {
|
||||
|
||||
Constant* clength = length()->as_Constant();
|
||||
Constant* cindex = index()->as_Constant();
|
||||
if (clength && cindex) {
|
||||
@ -157,34 +163,8 @@ bool AccessIndexed::compute_needs_range_check() {
|
||||
}
|
||||
|
||||
|
||||
ciType* Local::exact_type() const {
|
||||
ciType* type = declared_type();
|
||||
|
||||
// for primitive arrays, the declared type is the exact type
|
||||
if (type->is_type_array_klass()) {
|
||||
return type;
|
||||
} else if (type->is_instance_klass()) {
|
||||
ciInstanceKlass* ik = (ciInstanceKlass*)type;
|
||||
if (ik->is_loaded() && ik->is_final() && !ik->is_interface()) {
|
||||
return type;
|
||||
}
|
||||
} else if (type->is_obj_array_klass()) {
|
||||
ciObjArrayKlass* oak = (ciObjArrayKlass*)type;
|
||||
ciType* base = oak->base_element_type();
|
||||
if (base->is_instance_klass()) {
|
||||
ciInstanceKlass* ik = base->as_instance_klass();
|
||||
if (ik->is_loaded() && ik->is_final()) {
|
||||
return type;
|
||||
}
|
||||
} else if (base->is_primitive_type()) {
|
||||
return type;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ciType* Constant::exact_type() const {
|
||||
if (type()->is_object()) {
|
||||
if (type()->is_object() && type()->as_ObjectType()->is_loaded()) {
|
||||
return type()->as_ObjectType()->exact_type();
|
||||
}
|
||||
return NULL;
|
||||
@ -192,19 +172,18 @@ ciType* Constant::exact_type() const {
|
||||
|
||||
ciType* LoadIndexed::exact_type() const {
|
||||
ciType* array_type = array()->exact_type();
|
||||
if (array_type == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(array_type->is_array_klass(), "what else?");
|
||||
ciArrayKlass* ak = (ciArrayKlass*)array_type;
|
||||
if (array_type != NULL) {
|
||||
assert(array_type->is_array_klass(), "what else?");
|
||||
ciArrayKlass* ak = (ciArrayKlass*)array_type;
|
||||
|
||||
if (ak->element_type()->is_instance_klass()) {
|
||||
ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
|
||||
if (ik->is_loaded() && ik->is_final()) {
|
||||
return ik;
|
||||
if (ak->element_type()->is_instance_klass()) {
|
||||
ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
|
||||
if (ik->is_loaded() && ik->is_final()) {
|
||||
return ik;
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return Instruction::exact_type();
|
||||
}
|
||||
|
||||
|
||||
@ -224,22 +203,6 @@ ciType* LoadField::declared_type() const {
|
||||
}
|
||||
|
||||
|
||||
ciType* LoadField::exact_type() const {
|
||||
ciType* type = declared_type();
|
||||
// for primitive arrays, the declared type is the exact type
|
||||
if (type->is_type_array_klass()) {
|
||||
return type;
|
||||
}
|
||||
if (type->is_instance_klass()) {
|
||||
ciInstanceKlass* ik = (ciInstanceKlass*)type;
|
||||
if (ik->is_loaded() && ik->is_final()) {
|
||||
return type;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
ciType* NewTypeArray::exact_type() const {
|
||||
return ciTypeArrayKlass::make(elt_type());
|
||||
}
|
||||
@ -264,16 +227,6 @@ ciType* CheckCast::declared_type() const {
|
||||
return klass();
|
||||
}
|
||||
|
||||
ciType* CheckCast::exact_type() const {
|
||||
if (klass()->is_instance_klass()) {
|
||||
ciInstanceKlass* ik = (ciInstanceKlass*)klass();
|
||||
if (ik->is_loaded() && ik->is_final()) {
|
||||
return ik;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Implementation of ArithmeticOp
|
||||
|
||||
bool ArithmeticOp::is_commutative() const {
|
||||
|
@ -107,6 +107,7 @@ class UnsafePrefetch;
|
||||
class UnsafePrefetchRead;
|
||||
class UnsafePrefetchWrite;
|
||||
class ProfileCall;
|
||||
class ProfileReturnType;
|
||||
class ProfileInvoke;
|
||||
class RuntimeCall;
|
||||
class MemBar;
|
||||
@ -211,6 +212,7 @@ class InstructionVisitor: public StackObj {
|
||||
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x) = 0;
|
||||
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0;
|
||||
virtual void do_ProfileCall (ProfileCall* x) = 0;
|
||||
virtual void do_ProfileReturnType (ProfileReturnType* x) = 0;
|
||||
virtual void do_ProfileInvoke (ProfileInvoke* x) = 0;
|
||||
virtual void do_RuntimeCall (RuntimeCall* x) = 0;
|
||||
virtual void do_MemBar (MemBar* x) = 0;
|
||||
@ -322,6 +324,36 @@ class Instruction: public CompilationResourceObj {
|
||||
_type = type;
|
||||
}
|
||||
|
||||
// Helper class to keep track of which arguments need a null check
|
||||
class ArgsNonNullState {
|
||||
private:
|
||||
int _nonnull_state; // mask identifying which args are nonnull
|
||||
public:
|
||||
ArgsNonNullState()
|
||||
: _nonnull_state(AllBits) {}
|
||||
|
||||
// Does argument number i needs a null check?
|
||||
bool arg_needs_null_check(int i) const {
|
||||
// No data is kept for arguments starting at position 33 so
|
||||
// conservatively assume that they need a null check.
|
||||
if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
|
||||
return is_set_nth_bit(_nonnull_state, i);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Set whether argument number i needs a null check or not
|
||||
void set_arg_needs_null_check(int i, bool check) {
|
||||
if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
|
||||
if (check) {
|
||||
_nonnull_state |= nth_bit(i);
|
||||
} else {
|
||||
_nonnull_state &= ~(nth_bit(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
void* operator new(size_t size) throw() {
|
||||
Compilation* c = Compilation::current();
|
||||
@ -566,7 +598,7 @@ class Instruction: public CompilationResourceObj {
|
||||
virtual void other_values_do(ValueVisitor* f) { /* usually no other - override on demand */ }
|
||||
void values_do(ValueVisitor* f) { input_values_do(f); state_values_do(f); other_values_do(f); }
|
||||
|
||||
virtual ciType* exact_type() const { return NULL; }
|
||||
virtual ciType* exact_type() const;
|
||||
virtual ciType* declared_type() const { return NULL; }
|
||||
|
||||
// hashing
|
||||
@ -689,7 +721,6 @@ LEAF(Local, Instruction)
|
||||
int java_index() const { return _java_index; }
|
||||
|
||||
virtual ciType* declared_type() const { return _declared_type; }
|
||||
virtual ciType* exact_type() const;
|
||||
|
||||
// generic
|
||||
virtual void input_values_do(ValueVisitor* f) { /* no values */ }
|
||||
@ -806,7 +837,6 @@ LEAF(LoadField, AccessField)
|
||||
{}
|
||||
|
||||
ciType* declared_type() const;
|
||||
ciType* exact_type() const;
|
||||
|
||||
// generic
|
||||
HASHING2(LoadField, !needs_patching() && !field()->is_volatile(), obj()->subst(), offset()) // cannot be eliminated if needs patching or if volatile
|
||||
@ -1299,6 +1329,7 @@ BASE(NewArray, StateSplit)
|
||||
|
||||
virtual bool needs_exception_state() const { return false; }
|
||||
|
||||
ciType* exact_type() const { return NULL; }
|
||||
ciType* declared_type() const;
|
||||
|
||||
// generic
|
||||
@ -1422,7 +1453,6 @@ LEAF(CheckCast, TypeCheck)
|
||||
}
|
||||
|
||||
ciType* declared_type() const;
|
||||
ciType* exact_type() const;
|
||||
};
|
||||
|
||||
|
||||
@ -1490,7 +1520,7 @@ LEAF(Intrinsic, StateSplit)
|
||||
vmIntrinsics::ID _id;
|
||||
Values* _args;
|
||||
Value _recv;
|
||||
int _nonnull_state; // mask identifying which args are nonnull
|
||||
ArgsNonNullState _nonnull_state;
|
||||
|
||||
public:
|
||||
// preserves_state can be set to true for Intrinsics
|
||||
@ -1511,7 +1541,6 @@ LEAF(Intrinsic, StateSplit)
|
||||
, _id(id)
|
||||
, _args(args)
|
||||
, _recv(NULL)
|
||||
, _nonnull_state(AllBits)
|
||||
{
|
||||
assert(args != NULL, "args must exist");
|
||||
ASSERT_VALUES
|
||||
@ -1537,21 +1566,12 @@ LEAF(Intrinsic, StateSplit)
|
||||
Value receiver() const { assert(has_receiver(), "must have receiver"); return _recv; }
|
||||
bool preserves_state() const { return check_flag(PreservesStateFlag); }
|
||||
|
||||
bool arg_needs_null_check(int i) {
|
||||
if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
|
||||
return is_set_nth_bit(_nonnull_state, i);
|
||||
}
|
||||
return true;
|
||||
bool arg_needs_null_check(int i) const {
|
||||
return _nonnull_state.arg_needs_null_check(i);
|
||||
}
|
||||
|
||||
void set_arg_needs_null_check(int i, bool check) {
|
||||
if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
|
||||
if (check) {
|
||||
_nonnull_state |= nth_bit(i);
|
||||
} else {
|
||||
_nonnull_state &= ~(nth_bit(i));
|
||||
}
|
||||
}
|
||||
_nonnull_state.set_arg_needs_null_check(i, check);
|
||||
}
|
||||
|
||||
// generic
|
||||
@ -2450,34 +2470,87 @@ LEAF(UnsafePrefetchWrite, UnsafePrefetch)
|
||||
|
||||
LEAF(ProfileCall, Instruction)
|
||||
private:
|
||||
ciMethod* _method;
|
||||
int _bci_of_invoke;
|
||||
ciMethod* _callee; // the method that is called at the given bci
|
||||
Value _recv;
|
||||
ciKlass* _known_holder;
|
||||
ciMethod* _method;
|
||||
int _bci_of_invoke;
|
||||
ciMethod* _callee; // the method that is called at the given bci
|
||||
Value _recv;
|
||||
ciKlass* _known_holder;
|
||||
Values* _obj_args; // arguments for type profiling
|
||||
ArgsNonNullState _nonnull_state; // Do we know whether some arguments are never null?
|
||||
bool _inlined; // Are we profiling a call that is inlined
|
||||
|
||||
public:
|
||||
ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder)
|
||||
ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined)
|
||||
: Instruction(voidType)
|
||||
, _method(method)
|
||||
, _bci_of_invoke(bci)
|
||||
, _callee(callee)
|
||||
, _recv(recv)
|
||||
, _known_holder(known_holder)
|
||||
, _obj_args(obj_args)
|
||||
, _inlined(inlined)
|
||||
{
|
||||
// The ProfileCall has side-effects and must occur precisely where located
|
||||
pin();
|
||||
}
|
||||
|
||||
ciMethod* method() { return _method; }
|
||||
int bci_of_invoke() { return _bci_of_invoke; }
|
||||
ciMethod* callee() { return _callee; }
|
||||
Value recv() { return _recv; }
|
||||
ciKlass* known_holder() { return _known_holder; }
|
||||
ciMethod* method() const { return _method; }
|
||||
int bci_of_invoke() const { return _bci_of_invoke; }
|
||||
ciMethod* callee() const { return _callee; }
|
||||
Value recv() const { return _recv; }
|
||||
ciKlass* known_holder() const { return _known_holder; }
|
||||
int nb_profiled_args() const { return _obj_args == NULL ? 0 : _obj_args->length(); }
|
||||
Value profiled_arg_at(int i) const { return _obj_args->at(i); }
|
||||
bool arg_needs_null_check(int i) const {
|
||||
return _nonnull_state.arg_needs_null_check(i);
|
||||
}
|
||||
bool inlined() const { return _inlined; }
|
||||
|
||||
virtual void input_values_do(ValueVisitor* f) { if (_recv != NULL) f->visit(&_recv); }
|
||||
void set_arg_needs_null_check(int i, bool check) {
|
||||
_nonnull_state.set_arg_needs_null_check(i, check);
|
||||
}
|
||||
|
||||
virtual void input_values_do(ValueVisitor* f) {
|
||||
if (_recv != NULL) {
|
||||
f->visit(&_recv);
|
||||
}
|
||||
for (int i = 0; i < nb_profiled_args(); i++) {
|
||||
f->visit(_obj_args->adr_at(i));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
LEAF(ProfileReturnType, Instruction)
|
||||
private:
|
||||
ciMethod* _method;
|
||||
ciMethod* _callee;
|
||||
int _bci_of_invoke;
|
||||
Value _ret;
|
||||
|
||||
public:
|
||||
ProfileReturnType(ciMethod* method, int bci, ciMethod* callee, Value ret)
|
||||
: Instruction(voidType)
|
||||
, _method(method)
|
||||
, _callee(callee)
|
||||
, _bci_of_invoke(bci)
|
||||
, _ret(ret)
|
||||
{
|
||||
set_needs_null_check(true);
|
||||
// The ProfileType has side-effects and must occur precisely where located
|
||||
pin();
|
||||
}
|
||||
|
||||
ciMethod* method() const { return _method; }
|
||||
ciMethod* callee() const { return _callee; }
|
||||
int bci_of_invoke() const { return _bci_of_invoke; }
|
||||
Value ret() const { return _ret; }
|
||||
|
||||
virtual void input_values_do(ValueVisitor* f) {
|
||||
if (_ret != NULL) {
|
||||
f->visit(&_ret);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Call some C runtime function that doesn't safepoint,
|
||||
// optionally passing the current thread as the first argument.
|
||||
|
@ -892,10 +892,24 @@ void InstructionPrinter::do_ProfileCall(ProfileCall* x) {
|
||||
if (x->known_holder() != NULL) {
|
||||
output()->print(", ");
|
||||
print_klass(x->known_holder());
|
||||
output()->print(" ");
|
||||
}
|
||||
for (int i = 0; i < x->nb_profiled_args(); i++) {
|
||||
if (i > 0) output()->print(", ");
|
||||
print_value(x->profiled_arg_at(i));
|
||||
if (x->arg_needs_null_check(i)) {
|
||||
output()->print(" [NC]");
|
||||
}
|
||||
}
|
||||
output()->put(')');
|
||||
}
|
||||
|
||||
void InstructionPrinter::do_ProfileReturnType(ProfileReturnType* x) {
|
||||
output()->print("profile ret type ");
|
||||
print_value(x->ret());
|
||||
output()->print(" %s.%s", x->method()->holder()->name()->as_utf8(), x->method()->name()->as_utf8());
|
||||
output()->put(')');
|
||||
}
|
||||
void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) {
|
||||
output()->print("profile_invoke ");
|
||||
output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8());
|
||||
|
@ -132,6 +132,7 @@ class InstructionPrinter: public InstructionVisitor {
|
||||
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
|
||||
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
|
||||
virtual void do_ProfileCall (ProfileCall* x);
|
||||
virtual void do_ProfileReturnType (ProfileReturnType* x);
|
||||
virtual void do_ProfileInvoke (ProfileInvoke* x);
|
||||
virtual void do_RuntimeCall (RuntimeCall* x);
|
||||
virtual void do_MemBar (MemBar* x);
|
||||
|
@ -1001,6 +1001,17 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
|
||||
assert(opProfileCall->_tmp1->is_valid(), "used"); do_temp(opProfileCall->_tmp1);
|
||||
break;
|
||||
}
|
||||
|
||||
// LIR_OpProfileType:
|
||||
case lir_profile_type: {
|
||||
assert(op->as_OpProfileType() != NULL, "must be");
|
||||
LIR_OpProfileType* opProfileType = (LIR_OpProfileType*)op;
|
||||
|
||||
do_input(opProfileType->_mdp); do_temp(opProfileType->_mdp);
|
||||
do_input(opProfileType->_obj);
|
||||
do_temp(opProfileType->_tmp);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@ -1151,6 +1162,10 @@ void LIR_OpProfileCall::emit_code(LIR_Assembler* masm) {
|
||||
masm->emit_profile_call(this);
|
||||
}
|
||||
|
||||
void LIR_OpProfileType::emit_code(LIR_Assembler* masm) {
|
||||
masm->emit_profile_type(this);
|
||||
}
|
||||
|
||||
// LIR_List
|
||||
LIR_List::LIR_List(Compilation* compilation, BlockBegin* block)
|
||||
: _operations(8)
|
||||
@ -1803,6 +1818,8 @@ const char * LIR_Op::name() const {
|
||||
case lir_cas_int: s = "cas_int"; break;
|
||||
// LIR_OpProfileCall
|
||||
case lir_profile_call: s = "profile_call"; break;
|
||||
// LIR_OpProfileType
|
||||
case lir_profile_type: s = "profile_type"; break;
|
||||
// LIR_OpAssert
|
||||
#ifdef ASSERT
|
||||
case lir_assert: s = "assert"; break;
|
||||
@ -2086,6 +2103,15 @@ void LIR_OpProfileCall::print_instr(outputStream* out) const {
|
||||
tmp1()->print(out); out->print(" ");
|
||||
}
|
||||
|
||||
// LIR_OpProfileType
|
||||
void LIR_OpProfileType::print_instr(outputStream* out) const {
|
||||
out->print("exact = "); exact_klass()->print_name_on(out);
|
||||
out->print("current = "); ciTypeEntries::print_ciklass(out, current_klass());
|
||||
mdp()->print(out); out->print(" ");
|
||||
obj()->print(out); out->print(" ");
|
||||
tmp()->print(out); out->print(" ");
|
||||
}
|
||||
|
||||
#endif // PRODUCT
|
||||
|
||||
// Implementation of LIR_InsertionBuffer
|
||||
|
@ -882,6 +882,7 @@ class LIR_OpLock;
|
||||
class LIR_OpTypeCheck;
|
||||
class LIR_OpCompareAndSwap;
|
||||
class LIR_OpProfileCall;
|
||||
class LIR_OpProfileType;
|
||||
#ifdef ASSERT
|
||||
class LIR_OpAssert;
|
||||
#endif
|
||||
@ -1005,6 +1006,7 @@ enum LIR_Code {
|
||||
, end_opCompareAndSwap
|
||||
, begin_opMDOProfile
|
||||
, lir_profile_call
|
||||
, lir_profile_type
|
||||
, end_opMDOProfile
|
||||
, begin_opAssert
|
||||
, lir_assert
|
||||
@ -1145,6 +1147,7 @@ class LIR_Op: public CompilationResourceObj {
|
||||
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
|
||||
virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
|
||||
virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
|
||||
virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
|
||||
#ifdef ASSERT
|
||||
virtual LIR_OpAssert* as_OpAssert() { return NULL; }
|
||||
#endif
|
||||
@ -1925,8 +1928,8 @@ class LIR_OpProfileCall : public LIR_Op {
|
||||
|
||||
public:
|
||||
// Destroys recv
|
||||
LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
|
||||
: LIR_Op(code, LIR_OprFact::illegalOpr, NULL) // no result, no info
|
||||
LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
|
||||
: LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL) // no result, no info
|
||||
, _profiled_method(profiled_method)
|
||||
, _profiled_bci(profiled_bci)
|
||||
, _profiled_callee(profiled_callee)
|
||||
@ -1948,6 +1951,45 @@ class LIR_OpProfileCall : public LIR_Op {
|
||||
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// LIR_OpProfileType
|
||||
class LIR_OpProfileType : public LIR_Op {
|
||||
friend class LIR_OpVisitState;
|
||||
|
||||
private:
|
||||
LIR_Opr _mdp;
|
||||
LIR_Opr _obj;
|
||||
LIR_Opr _tmp;
|
||||
ciKlass* _exact_klass; // non NULL if we know the klass statically (no need to load it from _obj)
|
||||
intptr_t _current_klass; // what the profiling currently reports
|
||||
bool _not_null; // true if we know statically that _obj cannot be null
|
||||
bool _no_conflict; // true if we're profling parameters, _exact_klass is not NULL and we know
|
||||
// _exact_klass it the only possible type for this parameter in any context.
|
||||
|
||||
public:
|
||||
// Destroys recv
|
||||
LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
|
||||
: LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL) // no result, no info
|
||||
, _mdp(mdp)
|
||||
, _obj(obj)
|
||||
, _exact_klass(exact_klass)
|
||||
, _current_klass(current_klass)
|
||||
, _tmp(tmp)
|
||||
, _not_null(not_null)
|
||||
, _no_conflict(no_conflict) { }
|
||||
|
||||
LIR_Opr mdp() const { return _mdp; }
|
||||
LIR_Opr obj() const { return _obj; }
|
||||
LIR_Opr tmp() const { return _tmp; }
|
||||
ciKlass* exact_klass() const { return _exact_klass; }
|
||||
intptr_t current_klass() const { return _current_klass; }
|
||||
bool not_null() const { return _not_null; }
|
||||
bool no_conflict() const { return _no_conflict; }
|
||||
|
||||
virtual void emit_code(LIR_Assembler* masm);
|
||||
virtual LIR_OpProfileType* as_OpProfileType() { return this; }
|
||||
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
class LIR_InsertionBuffer;
|
||||
|
||||
//--------------------------------LIR_List---------------------------------------------------
|
||||
@ -2247,7 +2289,10 @@ class LIR_List: public CompilationResourceObj {
|
||||
ciMethod* profiled_method, int profiled_bci);
|
||||
// MethodData* profiling
|
||||
void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
|
||||
append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass));
|
||||
append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
|
||||
}
|
||||
void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
|
||||
append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
|
||||
}
|
||||
|
||||
void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
|
||||
|
@ -208,6 +208,7 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||
void emit_call(LIR_OpJavaCall* op);
|
||||
void emit_rtcall(LIR_OpRTCall* op);
|
||||
void emit_profile_call(LIR_OpProfileCall* op);
|
||||
void emit_profile_type(LIR_OpProfileType* op);
|
||||
void emit_delay(LIR_OpDelay* op);
|
||||
|
||||
void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack);
|
||||
|
@ -2571,6 +2571,78 @@ void LIRGenerator::do_Goto(Goto* x) {
|
||||
}
|
||||
|
||||
|
||||
ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k) {
|
||||
ciKlass* result = NULL;
|
||||
bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
|
||||
bool do_update = !TypeEntries::is_type_unknown(profiled_k);
|
||||
// known not to be null or null bit already set and already set to
|
||||
// unknown: nothing we can do to improve profiling
|
||||
if (!do_null && !do_update) {
|
||||
return result;
|
||||
}
|
||||
|
||||
ciKlass* exact_klass = NULL;
|
||||
Compilation* comp = Compilation::current();
|
||||
if (do_update) {
|
||||
// try to find exact type, using CHA if possible, so that loading
|
||||
// the klass from the object can be avoided
|
||||
ciType* type = arg->exact_type();
|
||||
if (type == NULL) {
|
||||
type = arg->declared_type();
|
||||
type = comp->cha_exact_type(type);
|
||||
}
|
||||
assert(type == NULL || type->is_klass(), "type should be class");
|
||||
exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
|
||||
|
||||
do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
|
||||
}
|
||||
|
||||
if (!do_null && !do_update) {
|
||||
return result;
|
||||
}
|
||||
|
||||
ciKlass* exact_signature_k = NULL;
|
||||
if (do_update) {
|
||||
// Is the type from the signature exact (the only one possible)?
|
||||
exact_signature_k = signature_k->exact_klass();
|
||||
if (exact_signature_k == NULL) {
|
||||
exact_signature_k = comp->cha_exact_type(signature_k);
|
||||
} else {
|
||||
result = exact_signature_k;
|
||||
do_update = false;
|
||||
// Known statically. No need to emit any code: prevent
|
||||
// LIR_Assembler::emit_profile_type() from emitting useless code
|
||||
profiled_k = ciTypeEntries::with_status(result, profiled_k);
|
||||
}
|
||||
if (exact_signature_k != NULL && exact_klass != exact_signature_k) {
|
||||
assert(exact_klass == NULL, "arg and signature disagree?");
|
||||
// sometimes the type of the signature is better than the best type
|
||||
// the compiler has
|
||||
exact_klass = exact_signature_k;
|
||||
do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
|
||||
}
|
||||
}
|
||||
|
||||
if (!do_null && !do_update) {
|
||||
return result;
|
||||
}
|
||||
|
||||
if (mdp == LIR_OprFact::illegalOpr) {
|
||||
mdp = new_register(T_METADATA);
|
||||
__ metadata2reg(md->constant_encoding(), mdp);
|
||||
if (md_base_offset != 0) {
|
||||
LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
|
||||
mdp = new_pointer_register();
|
||||
__ leal(LIR_OprFact::address(base_type_address), mdp);
|
||||
}
|
||||
}
|
||||
LIRItem value(arg, this);
|
||||
value.load_item();
|
||||
__ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
|
||||
value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
|
||||
return result;
|
||||
}
|
||||
|
||||
void LIRGenerator::do_Base(Base* x) {
|
||||
__ std_entry(LIR_OprFact::illegalOpr);
|
||||
// Emit moves from physical registers / stack slots to virtual registers
|
||||
@ -3004,12 +3076,52 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
|
||||
}
|
||||
}
|
||||
|
||||
void LIRGenerator::profile_arguments(ProfileCall* x) {
|
||||
if (MethodData::profile_arguments()) {
|
||||
int bci = x->bci_of_invoke();
|
||||
ciMethodData* md = x->method()->method_data_or_null();
|
||||
ciProfileData* data = md->bci_to_data(bci);
|
||||
if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
|
||||
ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
|
||||
int base_offset = md->byte_offset_of_slot(data, extra);
|
||||
LIR_Opr mdp = LIR_OprFact::illegalOpr;
|
||||
ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
|
||||
|
||||
Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
|
||||
int start = 0;
|
||||
int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
|
||||
if (x->nb_profiled_args() < stop) {
|
||||
// if called through method handle invoke, some arguments may have been popped
|
||||
stop = x->nb_profiled_args();
|
||||
}
|
||||
ciSignature* sig = x->callee()->signature();
|
||||
// method handle call to virtual method
|
||||
bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
|
||||
ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
|
||||
for (int i = 0; i < stop; i++) {
|
||||
int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
|
||||
ciKlass* exact = profile_arg_type(md, base_offset, off,
|
||||
args->type(i), x->profiled_arg_at(i+start), mdp,
|
||||
!x->arg_needs_null_check(i+start), sig_stream.next_klass());
|
||||
if (exact != NULL) {
|
||||
md->set_argument_type(bci, i, exact);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void LIRGenerator::do_ProfileCall(ProfileCall* x) {
|
||||
// Need recv in a temporary register so it interferes with the other temporaries
|
||||
LIR_Opr recv = LIR_OprFact::illegalOpr;
|
||||
LIR_Opr mdo = new_register(T_OBJECT);
|
||||
// tmp is used to hold the counters on SPARC
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
|
||||
if (x->nb_profiled_args() > 0) {
|
||||
profile_arguments(x);
|
||||
}
|
||||
|
||||
if (x->recv() != NULL) {
|
||||
LIRItem value(x->recv(), this);
|
||||
value.load_item();
|
||||
@ -3019,6 +3131,21 @@ void LIRGenerator::do_ProfileCall(ProfileCall* x) {
|
||||
__ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
|
||||
}
|
||||
|
||||
void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
|
||||
int bci = x->bci_of_invoke();
|
||||
ciMethodData* md = x->method()->method_data_or_null();
|
||||
ciProfileData* data = md->bci_to_data(bci);
|
||||
assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
|
||||
ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
|
||||
LIR_Opr mdp = LIR_OprFact::illegalOpr;
|
||||
ciKlass* exact = profile_arg_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
|
||||
ret->type(), x->ret(), mdp,
|
||||
!x->needs_null_check(), x->callee()->signature()->return_type()->as_klass());
|
||||
if (exact != NULL) {
|
||||
md->set_return_type(bci, exact);
|
||||
}
|
||||
}
|
||||
|
||||
void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
|
||||
// We can safely ignore accessors here, since c2 will inline them anyway,
|
||||
// accessors are also always mature.
|
||||
@ -3053,7 +3180,11 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
|
||||
int offset = -1;
|
||||
LIR_Opr counter_holder;
|
||||
if (level == CompLevel_limited_profile) {
|
||||
address counters_adr = method->ensure_method_counters();
|
||||
MethodCounters* counters_adr = method->ensure_method_counters();
|
||||
if (counters_adr == NULL) {
|
||||
bailout("method counters allocation failed");
|
||||
return;
|
||||
}
|
||||
counter_holder = new_pointer_register();
|
||||
__ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
|
||||
offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
|
||||
|
@ -434,6 +434,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
|
||||
void do_ThreadIDIntrinsic(Intrinsic* x);
|
||||
void do_ClassIDIntrinsic(Intrinsic* x);
|
||||
#endif
|
||||
ciKlass* profile_arg_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k);
|
||||
void profile_arguments(ProfileCall* x);
|
||||
|
||||
public:
|
||||
Compilation* compilation() const { return _compilation; }
|
||||
@ -534,6 +536,7 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
|
||||
virtual void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
|
||||
virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
|
||||
virtual void do_ProfileCall (ProfileCall* x);
|
||||
virtual void do_ProfileReturnType (ProfileReturnType* x);
|
||||
virtual void do_ProfileInvoke (ProfileInvoke* x);
|
||||
virtual void do_RuntimeCall (RuntimeCall* x);
|
||||
virtual void do_MemBar (MemBar* x);
|
||||
|
@ -531,6 +531,7 @@ public:
|
||||
void do_UnsafePrefetchRead (UnsafePrefetchRead* x);
|
||||
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
|
||||
void do_ProfileCall (ProfileCall* x);
|
||||
void do_ProfileReturnType (ProfileReturnType* x);
|
||||
void do_ProfileInvoke (ProfileInvoke* x);
|
||||
void do_RuntimeCall (RuntimeCall* x);
|
||||
void do_MemBar (MemBar* x);
|
||||
@ -657,6 +658,8 @@ class NullCheckEliminator: public ValueVisitor {
|
||||
void handle_Intrinsic (Intrinsic* x);
|
||||
void handle_ExceptionObject (ExceptionObject* x);
|
||||
void handle_Phi (Phi* x);
|
||||
void handle_ProfileCall (ProfileCall* x);
|
||||
void handle_ProfileReturnType (ProfileReturnType* x);
|
||||
};
|
||||
|
||||
|
||||
@ -715,7 +718,9 @@ void NullCheckVisitor::do_UnsafePutObject(UnsafePutObject* x) {}
|
||||
void NullCheckVisitor::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {}
|
||||
void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead* x) {}
|
||||
void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
|
||||
void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_last_explicit_null_check(); }
|
||||
void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_last_explicit_null_check();
|
||||
nce()->handle_ProfileCall(x); }
|
||||
void NullCheckVisitor::do_ProfileReturnType (ProfileReturnType* x) { nce()->handle_ProfileReturnType(x); }
|
||||
void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {}
|
||||
void NullCheckVisitor::do_RuntimeCall (RuntimeCall* x) {}
|
||||
void NullCheckVisitor::do_MemBar (MemBar* x) {}
|
||||
@ -1134,6 +1139,15 @@ void NullCheckEliminator::handle_Phi(Phi* x) {
|
||||
}
|
||||
}
|
||||
|
||||
void NullCheckEliminator::handle_ProfileCall(ProfileCall* x) {
|
||||
for (int i = 0; i < x->nb_profiled_args(); i++) {
|
||||
x->set_arg_needs_null_check(i, !set_contains(x->profiled_arg_at(i)));
|
||||
}
|
||||
}
|
||||
|
||||
void NullCheckEliminator::handle_ProfileReturnType(ProfileReturnType* x) {
|
||||
x->set_needs_null_check(!set_contains(x->ret()));
|
||||
}
|
||||
|
||||
void Optimizer::eliminate_null_checks() {
|
||||
ResourceMark rm;
|
||||
|
@ -162,7 +162,8 @@ public:
|
||||
void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ };
|
||||
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
|
||||
void do_ProfileCall (ProfileCall* x) { /* nothing to do */ };
|
||||
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
|
||||
void do_ProfileReturnType (ProfileReturnType* x) { /* nothing to do */ };
|
||||
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
|
||||
void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ };
|
||||
void do_MemBar (MemBar* x) { /* nothing to do */ };
|
||||
void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
|
||||
|
@ -542,8 +542,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
|
||||
// exception handler can cause class loading, which might throw an
|
||||
// exception and those fields are expected to be clear during
|
||||
// normal bytecode execution.
|
||||
thread->set_exception_oop(NULL);
|
||||
thread->set_exception_pc(NULL);
|
||||
thread->clear_exception_oop_and_pc();
|
||||
|
||||
continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
|
||||
// If an exception was thrown during exception dispatch, the exception oop may have changed
|
||||
|
@ -203,6 +203,7 @@ class ValueNumberingVisitor: public InstructionVisitor {
|
||||
void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ }
|
||||
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
|
||||
void do_ProfileCall (ProfileCall* x) { /* nothing to do */ }
|
||||
void do_ProfileReturnType (ProfileReturnType* x) { /* nothing to do */ }
|
||||
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
|
||||
void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ };
|
||||
void do_MemBar (MemBar* x) { /* nothing to do */ };
|
||||
|
@ -102,6 +102,7 @@ friend class ciMethodData; \
|
||||
friend class ciMethodHandle; \
|
||||
friend class ciMethodType; \
|
||||
friend class ciReceiverTypeData; \
|
||||
friend class ciTypeEntries; \
|
||||
friend class ciSymbol; \
|
||||
friend class ciArray; \
|
||||
friend class ciObjArray; \
|
||||
|
@ -1154,9 +1154,12 @@ ciInstance* ciEnv::unloaded_ciinstance() {
|
||||
GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
|
||||
}
|
||||
|
||||
void ciEnv::dump_replay_data(outputStream* out) {
|
||||
VM_ENTRY_MARK;
|
||||
MutexLocker ml(Compile_lock);
|
||||
// ------------------------------------------------------------------
|
||||
// ciEnv::dump_replay_data*
|
||||
|
||||
// Don't change thread state and acquire any locks.
|
||||
// Safe to call from VM error reporter.
|
||||
void ciEnv::dump_replay_data_unsafe(outputStream* out) {
|
||||
ResourceMark rm;
|
||||
#if INCLUDE_JVMTI
|
||||
out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables);
|
||||
@ -1181,3 +1184,10 @@ void ciEnv::dump_replay_data(outputStream* out) {
|
||||
entry_bci, comp_level);
|
||||
out->flush();
|
||||
}
|
||||
|
||||
void ciEnv::dump_replay_data(outputStream* out) {
|
||||
GUARDED_VM_ENTRY(
|
||||
MutexLocker ml(Compile_lock);
|
||||
dump_replay_data_unsafe(out);
|
||||
)
|
||||
}
|
||||
|
@ -452,6 +452,7 @@ public:
|
||||
|
||||
// Dump the compilation replay data for the ciEnv to the stream.
|
||||
void dump_replay_data(outputStream* out);
|
||||
void dump_replay_data_unsafe(outputStream* out);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CI_CIENV_HPP
|
||||
|
@ -671,7 +671,6 @@ class StaticFinalFieldPrinter : public FieldClosure {
|
||||
|
||||
|
||||
void ciInstanceKlass::dump_replay_data(outputStream* out) {
|
||||
ASSERT_IN_VM;
|
||||
ResourceMark rm;
|
||||
|
||||
InstanceKlass* ik = get_instanceKlass();
|
||||
|
@ -235,6 +235,13 @@ public:
|
||||
bool is_instance_klass() const { return true; }
|
||||
bool is_java_klass() const { return true; }
|
||||
|
||||
virtual ciKlass* exact_klass() {
|
||||
if (is_loaded() && is_final() && !is_interface()) {
|
||||
return this;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Dump the current state of this klass for compilation replay.
|
||||
virtual void dump_replay_data(outputStream* out);
|
||||
};
|
||||
|
@ -66,7 +66,9 @@ ciKlass::ciKlass(ciSymbol* name, BasicType bt) : ciType(bt) {
|
||||
// ------------------------------------------------------------------
|
||||
// ciKlass::is_subtype_of
|
||||
bool ciKlass::is_subtype_of(ciKlass* that) {
|
||||
assert(is_loaded() && that->is_loaded(), "must be loaded");
|
||||
assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
|
||||
assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
|
||||
|
||||
// Check to see if the klasses are identical.
|
||||
if (this == that) {
|
||||
return true;
|
||||
@ -83,8 +85,8 @@ bool ciKlass::is_subtype_of(ciKlass* that) {
|
||||
// ------------------------------------------------------------------
|
||||
// ciKlass::is_subclass_of
|
||||
bool ciKlass::is_subclass_of(ciKlass* that) {
|
||||
assert(is_loaded() && that->is_loaded(), "must be loaded");
|
||||
// Check to see if the klasses are identical.
|
||||
assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
|
||||
assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
|
||||
|
||||
VM_ENTRY_MARK;
|
||||
Klass* this_klass = get_Klass();
|
||||
|
@ -41,6 +41,7 @@ class ciKlass : public ciType {
|
||||
friend class ciEnv;
|
||||
friend class ciField;
|
||||
friend class ciMethod;
|
||||
friend class ciMethodData;
|
||||
friend class ciObjArrayKlass;
|
||||
|
||||
private:
|
||||
@ -121,6 +122,8 @@ public:
|
||||
// What kind of ciObject is this?
|
||||
bool is_klass() const { return true; }
|
||||
|
||||
virtual ciKlass* exact_klass() = 0;
|
||||
|
||||
void print_name_on(outputStream* st);
|
||||
};
|
||||
|
||||
|
@ -846,7 +846,9 @@ bool ciMethod::has_member_arg() const {
|
||||
// Return true if allocation was successful or no MDO is required.
|
||||
bool ciMethod::ensure_method_data(methodHandle h_m) {
|
||||
EXCEPTION_CONTEXT;
|
||||
if (is_native() || is_abstract() || h_m()->is_accessor()) return true;
|
||||
if (is_native() || is_abstract() || h_m()->is_accessor()) {
|
||||
return true;
|
||||
}
|
||||
if (h_m()->method_data() == NULL) {
|
||||
Method::build_interpreter_method_data(h_m, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
@ -903,22 +905,21 @@ ciMethodData* ciMethod::method_data() {
|
||||
// NULL otherwise.
|
||||
ciMethodData* ciMethod::method_data_or_null() {
|
||||
ciMethodData *md = method_data();
|
||||
if (md->is_empty()) return NULL;
|
||||
if (md->is_empty()) {
|
||||
return NULL;
|
||||
}
|
||||
return md;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::ensure_method_counters
|
||||
//
|
||||
address ciMethod::ensure_method_counters() {
|
||||
MethodCounters* ciMethod::ensure_method_counters() {
|
||||
check_is_loaded();
|
||||
VM_ENTRY_MARK;
|
||||
methodHandle mh(THREAD, get_Method());
|
||||
MethodCounters *counter = mh->method_counters();
|
||||
if (counter == NULL) {
|
||||
counter = Method::build_method_counters(mh(), CHECK_AND_CLEAR_NULL);
|
||||
}
|
||||
return (address)counter;
|
||||
MethodCounters* method_counters = mh->get_method_counters(CHECK_NULL);
|
||||
return method_counters;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
@ -1247,7 +1248,6 @@ ciMethodBlocks *ciMethod::get_method_blocks() {
|
||||
#undef FETCH_FLAG_FROM_VM
|
||||
|
||||
void ciMethod::dump_replay_data(outputStream* st) {
|
||||
ASSERT_IN_VM;
|
||||
ResourceMark rm;
|
||||
Method* method = get_Method();
|
||||
MethodCounters* mcs = method->method_counters();
|
||||
|
@ -265,7 +265,7 @@ class ciMethod : public ciMetadata {
|
||||
bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const;
|
||||
bool check_call(int refinfo_index, bool is_static) const;
|
||||
bool ensure_method_data(); // make sure it exists in the VM also
|
||||
address ensure_method_counters();
|
||||
MethodCounters* ensure_method_counters();
|
||||
int instructions_size();
|
||||
int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC
|
||||
|
||||
|
@ -78,7 +78,9 @@ ciMethodData::ciMethodData() : ciMetadata(NULL) {
|
||||
|
||||
void ciMethodData::load_data() {
|
||||
MethodData* mdo = get_MethodData();
|
||||
if (mdo == NULL) return;
|
||||
if (mdo == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// To do: don't copy the data if it is not "ripe" -- require a minimum #
|
||||
// of invocations.
|
||||
@ -123,7 +125,7 @@ void ciMethodData::load_data() {
|
||||
#endif
|
||||
}
|
||||
|
||||
void ciReceiverTypeData::translate_receiver_data_from(ProfileData* data) {
|
||||
void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
Klass* k = data->as_ReceiverTypeData()->receiver(row);
|
||||
if (k != NULL) {
|
||||
@ -134,6 +136,18 @@ void ciReceiverTypeData::translate_receiver_data_from(ProfileData* data) {
|
||||
}
|
||||
|
||||
|
||||
void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries* entries) {
|
||||
for (int i = 0; i < _number_of_entries; i++) {
|
||||
intptr_t k = entries->type(i);
|
||||
TypeStackSlotEntries::set_type(i, translate_klass(k));
|
||||
}
|
||||
}
|
||||
|
||||
void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
|
||||
intptr_t k = ret->type();
|
||||
set_type(translate_klass(k));
|
||||
}
|
||||
|
||||
// Get the data at an arbitrary (sort of) data index.
|
||||
ciProfileData* ciMethodData::data_at(int data_index) {
|
||||
if (out_of_bounds(data_index)) {
|
||||
@ -164,6 +178,10 @@ ciProfileData* ciMethodData::data_at(int data_index) {
|
||||
return new ciMultiBranchData(data_layout);
|
||||
case DataLayout::arg_info_data_tag:
|
||||
return new ciArgInfoData(data_layout);
|
||||
case DataLayout::call_type_data_tag:
|
||||
return new ciCallTypeData(data_layout);
|
||||
case DataLayout::virtual_call_type_data_tag:
|
||||
return new ciVirtualCallTypeData(data_layout);
|
||||
};
|
||||
}
|
||||
|
||||
@ -286,6 +304,34 @@ void ciMethodData::set_would_profile(bool p) {
|
||||
}
|
||||
}
|
||||
|
||||
void ciMethodData::set_argument_type(int bci, int i, ciKlass* k) {
|
||||
VM_ENTRY_MARK;
|
||||
MethodData* mdo = get_MethodData();
|
||||
if (mdo != NULL) {
|
||||
ProfileData* data = mdo->bci_to_data(bci);
|
||||
if (data->is_CallTypeData()) {
|
||||
data->as_CallTypeData()->set_argument_type(i, k->get_Klass());
|
||||
} else {
|
||||
assert(data->is_VirtualCallTypeData(), "no arguments!");
|
||||
data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ciMethodData::set_return_type(int bci, ciKlass* k) {
|
||||
VM_ENTRY_MARK;
|
||||
MethodData* mdo = get_MethodData();
|
||||
if (mdo != NULL) {
|
||||
ProfileData* data = mdo->bci_to_data(bci);
|
||||
if (data->is_CallTypeData()) {
|
||||
data->as_CallTypeData()->set_return_type(k->get_Klass());
|
||||
} else {
|
||||
assert(data->is_VirtualCallTypeData(), "no arguments!");
|
||||
data->as_VirtualCallTypeData()->set_return_type(k->get_Klass());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool ciMethodData::has_escape_info() {
|
||||
return eflag_set(MethodData::estimated);
|
||||
}
|
||||
@ -373,7 +419,6 @@ void ciMethodData::print_impl(outputStream* st) {
|
||||
}
|
||||
|
||||
void ciMethodData::dump_replay_data(outputStream* out) {
|
||||
ASSERT_IN_VM;
|
||||
ResourceMark rm;
|
||||
MethodData* mdo = get_MethodData();
|
||||
Method* method = mdo->method();
|
||||
@ -477,7 +522,50 @@ void ciMethodData::print_data_on(outputStream* st) {
|
||||
}
|
||||
}
|
||||
|
||||
void ciReceiverTypeData::print_receiver_data_on(outputStream* st) {
|
||||
void ciTypeEntries::print_ciklass(outputStream* st, intptr_t k) {
|
||||
if (TypeEntries::is_type_none(k)) {
|
||||
st->print("none");
|
||||
} else if (TypeEntries::is_type_unknown(k)) {
|
||||
st->print("unknown");
|
||||
} else {
|
||||
valid_ciklass(k)->print_name_on(st);
|
||||
}
|
||||
if (TypeEntries::was_null_seen(k)) {
|
||||
st->print(" (null seen)");
|
||||
}
|
||||
}
|
||||
|
||||
void ciTypeStackSlotEntries::print_data_on(outputStream* st) const {
|
||||
for (int i = 0; i < _number_of_entries; i++) {
|
||||
_pd->tab(st);
|
||||
st->print("%d: stack (%u) ", i, stack_slot(i));
|
||||
print_ciklass(st, type(i));
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
|
||||
void ciReturnTypeEntry::print_data_on(outputStream* st) const {
|
||||
_pd->tab(st);
|
||||
st->print("ret ");
|
||||
print_ciklass(st, type());
|
||||
st->cr();
|
||||
}
|
||||
|
||||
void ciCallTypeData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "ciCallTypeData");
|
||||
if (has_arguments()) {
|
||||
tab(st, true);
|
||||
st->print("argument types");
|
||||
args()->print_data_on(st);
|
||||
}
|
||||
if (has_return()) {
|
||||
tab(st, true);
|
||||
st->print("return type");
|
||||
ret()->print_data_on(st);
|
||||
}
|
||||
}
|
||||
|
||||
void ciReceiverTypeData::print_receiver_data_on(outputStream* st) const {
|
||||
uint row;
|
||||
int entries = 0;
|
||||
for (row = 0; row < row_limit(); row++) {
|
||||
@ -493,13 +581,28 @@ void ciReceiverTypeData::print_receiver_data_on(outputStream* st) {
|
||||
}
|
||||
}
|
||||
|
||||
void ciReceiverTypeData::print_data_on(outputStream* st) {
|
||||
void ciReceiverTypeData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "ciReceiverTypeData");
|
||||
print_receiver_data_on(st);
|
||||
}
|
||||
|
||||
void ciVirtualCallData::print_data_on(outputStream* st) {
|
||||
void ciVirtualCallData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "ciVirtualCallData");
|
||||
rtd_super()->print_receiver_data_on(st);
|
||||
}
|
||||
|
||||
void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "ciVirtualCallTypeData");
|
||||
rtd_super()->print_receiver_data_on(st);
|
||||
if (has_arguments()) {
|
||||
tab(st, true);
|
||||
st->print("argument types");
|
||||
args()->print_data_on(st);
|
||||
}
|
||||
if (has_return()) {
|
||||
tab(st, true);
|
||||
st->print("return type");
|
||||
ret()->print_data_on(st);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -41,6 +41,8 @@ class ciBranchData;
|
||||
class ciArrayData;
|
||||
class ciMultiBranchData;
|
||||
class ciArgInfoData;
|
||||
class ciCallTypeData;
|
||||
class ciVirtualCallTypeData;
|
||||
|
||||
typedef ProfileData ciProfileData;
|
||||
|
||||
@ -59,6 +61,103 @@ public:
|
||||
ciJumpData(DataLayout* layout) : JumpData(layout) {};
|
||||
};
|
||||
|
||||
class ciTypeEntries {
|
||||
protected:
|
||||
static intptr_t translate_klass(intptr_t k) {
|
||||
Klass* v = TypeEntries::valid_klass(k);
|
||||
if (v != NULL) {
|
||||
ciKlass* klass = CURRENT_ENV->get_klass(v);
|
||||
return with_status(klass, k);
|
||||
}
|
||||
return with_status(NULL, k);
|
||||
}
|
||||
|
||||
public:
|
||||
static ciKlass* valid_ciklass(intptr_t k) {
|
||||
if (!TypeEntries::is_type_none(k) &&
|
||||
!TypeEntries::is_type_unknown(k)) {
|
||||
return (ciKlass*)TypeEntries::klass_part(k);
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static intptr_t with_status(ciKlass* k, intptr_t in) {
|
||||
return TypeEntries::with_status((intptr_t)k, in);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
static void print_ciklass(outputStream* st, intptr_t k);
|
||||
#endif
|
||||
};
|
||||
|
||||
class ciTypeStackSlotEntries : public TypeStackSlotEntries, ciTypeEntries {
|
||||
public:
|
||||
void translate_type_data_from(const TypeStackSlotEntries* args);
|
||||
|
||||
ciKlass* valid_type(int i) const {
|
||||
return valid_ciklass(type(i));
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
class ciReturnTypeEntry : public ReturnTypeEntry, ciTypeEntries {
|
||||
public:
|
||||
void translate_type_data_from(const ReturnTypeEntry* ret);
|
||||
|
||||
ciKlass* valid_type() const {
|
||||
return valid_ciklass(type());
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
class ciCallTypeData : public CallTypeData {
|
||||
public:
|
||||
ciCallTypeData(DataLayout* layout) : CallTypeData(layout) {}
|
||||
|
||||
ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)CallTypeData::args(); }
|
||||
ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)CallTypeData::ret(); }
|
||||
|
||||
void translate_type_data_from(const ProfileData* data) {
|
||||
if (has_arguments()) {
|
||||
args()->translate_type_data_from(data->as_CallTypeData()->args());
|
||||
}
|
||||
if (has_return()) {
|
||||
ret()->translate_type_data_from(data->as_CallTypeData()->ret());
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t argument_type(int i) const {
|
||||
assert(has_arguments(), "no arg type profiling data");
|
||||
return args()->type(i);
|
||||
}
|
||||
|
||||
ciKlass* valid_argument_type(int i) const {
|
||||
assert(has_arguments(), "no arg type profiling data");
|
||||
return args()->valid_type(i);
|
||||
}
|
||||
|
||||
intptr_t return_type() const {
|
||||
assert(has_return(), "no ret type profiling data");
|
||||
return ret()->type();
|
||||
}
|
||||
|
||||
ciKlass* valid_return_type() const {
|
||||
assert(has_return(), "no ret type profiling data");
|
||||
return ret()->valid_type();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
class ciReceiverTypeData : public ReceiverTypeData {
|
||||
public:
|
||||
ciReceiverTypeData(DataLayout* layout) : ReceiverTypeData(layout) {};
|
||||
@ -69,7 +168,7 @@ public:
|
||||
(intptr_t) recv);
|
||||
}
|
||||
|
||||
ciKlass* receiver(uint row) {
|
||||
ciKlass* receiver(uint row) const {
|
||||
assert((uint)row < row_limit(), "oob");
|
||||
ciKlass* recv = (ciKlass*)intptr_at(receiver0_offset + row * receiver_type_row_cell_count);
|
||||
assert(recv == NULL || recv->is_klass(), "wrong type");
|
||||
@ -77,19 +176,19 @@ public:
|
||||
}
|
||||
|
||||
// Copy & translate from oop based ReceiverTypeData
|
||||
virtual void translate_from(ProfileData* data) {
|
||||
virtual void translate_from(const ProfileData* data) {
|
||||
translate_receiver_data_from(data);
|
||||
}
|
||||
void translate_receiver_data_from(ProfileData* data);
|
||||
void translate_receiver_data_from(const ProfileData* data);
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st);
|
||||
void print_receiver_data_on(outputStream* st);
|
||||
void print_data_on(outputStream* st) const;
|
||||
void print_receiver_data_on(outputStream* st) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
class ciVirtualCallData : public VirtualCallData {
|
||||
// Fake multiple inheritance... It's a ciReceiverTypeData also.
|
||||
ciReceiverTypeData* rtd_super() { return (ciReceiverTypeData*) this; }
|
||||
ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
|
||||
|
||||
public:
|
||||
ciVirtualCallData(DataLayout* layout) : VirtualCallData(layout) {};
|
||||
@ -103,11 +202,65 @@ public:
|
||||
}
|
||||
|
||||
// Copy & translate from oop based VirtualCallData
|
||||
virtual void translate_from(ProfileData* data) {
|
||||
virtual void translate_from(const ProfileData* data) {
|
||||
rtd_super()->translate_receiver_data_from(data);
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st);
|
||||
void print_data_on(outputStream* st) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
class ciVirtualCallTypeData : public VirtualCallTypeData {
|
||||
private:
|
||||
// Fake multiple inheritance... It's a ciReceiverTypeData also.
|
||||
ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
|
||||
public:
|
||||
ciVirtualCallTypeData(DataLayout* layout) : VirtualCallTypeData(layout) {}
|
||||
|
||||
void set_receiver(uint row, ciKlass* recv) {
|
||||
rtd_super()->set_receiver(row, recv);
|
||||
}
|
||||
|
||||
ciKlass* receiver(uint row) const {
|
||||
return rtd_super()->receiver(row);
|
||||
}
|
||||
|
||||
ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)VirtualCallTypeData::args(); }
|
||||
ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)VirtualCallTypeData::ret(); }
|
||||
|
||||
// Copy & translate from oop based VirtualCallData
|
||||
virtual void translate_from(const ProfileData* data) {
|
||||
rtd_super()->translate_receiver_data_from(data);
|
||||
if (has_arguments()) {
|
||||
args()->translate_type_data_from(data->as_VirtualCallTypeData()->args());
|
||||
}
|
||||
if (has_return()) {
|
||||
ret()->translate_type_data_from(data->as_VirtualCallTypeData()->ret());
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t argument_type(int i) const {
|
||||
assert(has_arguments(), "no arg type profiling data");
|
||||
return args()->type(i);
|
||||
}
|
||||
|
||||
ciKlass* valid_argument_type(int i) const {
|
||||
assert(has_arguments(), "no arg type profiling data");
|
||||
return args()->valid_type(i);
|
||||
}
|
||||
|
||||
intptr_t return_type() const {
|
||||
assert(has_return(), "no ret type profiling data");
|
||||
return ret()->type();
|
||||
}
|
||||
|
||||
ciKlass* valid_return_type() const {
|
||||
assert(has_return(), "no ret type profiling data");
|
||||
return ret()->valid_type();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print_data_on(outputStream* st) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -232,8 +385,6 @@ private:
|
||||
public:
|
||||
bool is_method_data() const { return true; }
|
||||
|
||||
void set_mature() { _state = mature_state; }
|
||||
|
||||
bool is_empty() { return _state == empty_state; }
|
||||
bool is_mature() { return _state == mature_state; }
|
||||
|
||||
@ -249,6 +400,10 @@ public:
|
||||
// Also set the numer of loops and blocks in the method.
|
||||
// Again, this is used to determine if a method is trivial.
|
||||
void set_compilation_stats(short loops, short blocks);
|
||||
// If the compiler finds a profiled type that is known statically
|
||||
// for sure, set it in the MethodData
|
||||
void set_argument_type(int bci, int i, ciKlass* k);
|
||||
void set_return_type(int bci, ciKlass* k);
|
||||
|
||||
void load_data();
|
||||
|
||||
|
@ -179,3 +179,16 @@ ciObjArrayKlass* ciObjArrayKlass::make_impl(ciKlass* element_klass) {
|
||||
ciObjArrayKlass* ciObjArrayKlass::make(ciKlass* element_klass) {
|
||||
GUARDED_VM_ENTRY(return make_impl(element_klass);)
|
||||
}
|
||||
|
||||
ciKlass* ciObjArrayKlass::exact_klass() {
|
||||
ciType* base = base_element_type();
|
||||
if (base->is_instance_klass()) {
|
||||
ciInstanceKlass* ik = base->as_instance_klass();
|
||||
if (ik->exact_klass() != NULL) {
|
||||
return this;
|
||||
}
|
||||
} else if (base->is_primitive_type()) {
|
||||
return this;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -73,6 +73,8 @@ public:
|
||||
bool is_obj_array_klass() const { return true; }
|
||||
|
||||
static ciObjArrayKlass* make(ciKlass* element_klass);
|
||||
|
||||
virtual ciKlass* exact_klass();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CI_CIOBJARRAYKLASS_HPP
|
||||
|
@ -965,14 +965,12 @@ void ciReplay::initialize(ciMethod* m) {
|
||||
tty->cr();
|
||||
} else {
|
||||
EXCEPTION_CONTEXT;
|
||||
MethodCounters* mcs = method->method_counters();
|
||||
// m->_instructions_size = rec->instructions_size;
|
||||
m->_instructions_size = -1;
|
||||
m->_interpreter_invocation_count = rec->interpreter_invocation_count;
|
||||
m->_interpreter_throwout_count = rec->interpreter_throwout_count;
|
||||
if (mcs == NULL) {
|
||||
mcs = Method::build_method_counters(method, CHECK_AND_CLEAR);
|
||||
}
|
||||
MethodCounters* mcs = method->get_method_counters(CHECK_AND_CLEAR);
|
||||
guarantee(mcs != NULL, "method counters allocation failed");
|
||||
mcs->invocation_counter()->_counter = rec->invocation_counter;
|
||||
mcs->backedge_counter()->_counter = rec->backedge_counter;
|
||||
}
|
||||
|
@ -277,11 +277,14 @@ public:
|
||||
class ciSignatureStream : public StackObj {
|
||||
private:
|
||||
ciSignature* _sig;
|
||||
int _pos;
|
||||
int _pos;
|
||||
// holder is a method's holder
|
||||
ciKlass* _holder;
|
||||
public:
|
||||
ciSignatureStream(ciSignature* signature) {
|
||||
ciSignatureStream(ciSignature* signature, ciKlass* holder = NULL) {
|
||||
_sig = signature;
|
||||
_pos = 0;
|
||||
_holder = holder;
|
||||
}
|
||||
|
||||
bool at_return_type() { return _pos == _sig->count(); }
|
||||
@ -301,6 +304,23 @@ public:
|
||||
return _sig->type_at(_pos);
|
||||
}
|
||||
}
|
||||
|
||||
// next klass in the signature
|
||||
ciKlass* next_klass() {
|
||||
ciKlass* sig_k;
|
||||
if (_holder != NULL) {
|
||||
sig_k = _holder;
|
||||
_holder = NULL;
|
||||
} else {
|
||||
while (!type()->is_klass()) {
|
||||
next();
|
||||
}
|
||||
assert(!at_return_type(), "passed end of signature");
|
||||
sig_k = type()->as_klass();
|
||||
next();
|
||||
}
|
||||
return sig_k;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -57,6 +57,10 @@ public:
|
||||
|
||||
// Make an array klass corresponding to the specified primitive type.
|
||||
static ciTypeArrayKlass* make(BasicType type);
|
||||
|
||||
virtual ciKlass* exact_klass() {
|
||||
return this;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CI_CITYPEARRAYKLASS_HPP
|
||||
|
@ -857,7 +857,6 @@ static Method* new_method(
|
||||
m->set_max_locals(params);
|
||||
m->constMethod()->set_stackmap_data(NULL);
|
||||
m->set_code(code_start);
|
||||
m->set_force_inline(true);
|
||||
|
||||
return m;
|
||||
}
|
||||
|
@ -2439,19 +2439,19 @@ void ClassVerifier::verify_invoke_instructions(
|
||||
&& !ref_class_type.equals(current_type())
|
||||
&& !ref_class_type.equals(VerificationType::reference_type(
|
||||
current_class()->super()->name()))) {
|
||||
bool subtype = ref_class_type.is_assignable_from(
|
||||
current_type(), this, CHECK_VERIFY(this));
|
||||
bool subtype = false;
|
||||
if (!current_class()->is_anonymous()) {
|
||||
subtype = ref_class_type.is_assignable_from(
|
||||
current_type(), this, CHECK_VERIFY(this));
|
||||
} else {
|
||||
subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
|
||||
current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
|
||||
}
|
||||
if (!subtype) {
|
||||
if (current_class()->is_anonymous()) {
|
||||
subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
|
||||
current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
|
||||
}
|
||||
if (!subtype) {
|
||||
verify_error(ErrorContext::bad_code(bci),
|
||||
"Bad invokespecial instruction: "
|
||||
"current class isn't assignable to reference class.");
|
||||
return;
|
||||
}
|
||||
verify_error(ErrorContext::bad_code(bci),
|
||||
"Bad invokespecial instruction: "
|
||||
"current class isn't assignable to reference class.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Match method descriptor with operand stack
|
||||
@ -2470,17 +2470,13 @@ void ClassVerifier::verify_invoke_instructions(
|
||||
if (!current_class()->is_anonymous()) {
|
||||
current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
|
||||
} else {
|
||||
// anonymous class invokespecial calls: either the
|
||||
// operand stack/objectref is a subtype of the current class OR
|
||||
// the objectref is a subtype of the host_klass of the current class
|
||||
// anonymous class invokespecial calls: check if the
|
||||
// objectref is a subtype of the host_klass of the current class
|
||||
// to allow an anonymous class to reference methods in the host_klass
|
||||
VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
|
||||
bool subtype = current_type().is_assignable_from(top, this, CHECK_VERIFY(this));
|
||||
if (!subtype) {
|
||||
VerificationType hosttype =
|
||||
VerificationType::reference_type(current_class()->host_klass()->name());
|
||||
subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
|
||||
}
|
||||
VerificationType hosttype =
|
||||
VerificationType::reference_type(current_class()->host_klass()->name());
|
||||
bool subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
|
||||
if (!subtype) {
|
||||
verify_error( ErrorContext::bad_type(current_frame->offset(),
|
||||
current_frame->stack_top_ctx(),
|
||||
|
@ -245,8 +245,8 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
|
||||
}
|
||||
|
||||
|
||||
void* BufferBlob::operator new(size_t s, unsigned size) throw() {
|
||||
void* p = CodeCache::allocate(size);
|
||||
void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
|
||||
void* p = CodeCache::allocate(size, is_critical);
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -277,7 +277,10 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
|
||||
unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
blob = new (size) AdapterBlob(size, cb);
|
||||
// The parameter 'true' indicates a critical memory allocation.
|
||||
// This means that CodeCacheMinimumFreeSpace is used, if necessary
|
||||
const bool is_critical = true;
|
||||
blob = new (size, is_critical) AdapterBlob(size, cb);
|
||||
}
|
||||
// Track memory usage statistic after releasing CodeCache_lock
|
||||
MemoryService::track_code_cache_memory_usage();
|
||||
@ -299,7 +302,10 @@ MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
|
||||
size += round_to(buffer_size, oopSize);
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
blob = new (size) MethodHandlesAdapterBlob(size);
|
||||
// The parameter 'true' indicates a critical memory allocation.
|
||||
// This means that CodeCacheMinimumFreeSpace is used, if necessary
|
||||
const bool is_critical = true;
|
||||
blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
|
||||
}
|
||||
// Track memory usage statistic after releasing CodeCache_lock
|
||||
MemoryService::track_code_cache_memory_usage();
|
||||
|
@ -209,7 +209,7 @@ class BufferBlob: public CodeBlob {
|
||||
BufferBlob(const char* name, int size);
|
||||
BufferBlob(const char* name, int size, CodeBuffer* cb);
|
||||
|
||||
void* operator new(size_t s, unsigned size) throw();
|
||||
void* operator new(size_t s, unsigned size, bool is_critical = false) throw();
|
||||
|
||||
public:
|
||||
// Creation
|
||||
@ -253,7 +253,6 @@ public:
|
||||
class MethodHandlesAdapterBlob: public BufferBlob {
|
||||
private:
|
||||
MethodHandlesAdapterBlob(int size) : BufferBlob("MethodHandles adapters", size) {}
|
||||
MethodHandlesAdapterBlob(int size, CodeBuffer* cb) : BufferBlob("MethodHandles adapters", size, cb) {}
|
||||
|
||||
public:
|
||||
// Creation
|
||||
|
@ -24,41 +24,42 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "compiler/abstractCompiler.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
void AbstractCompiler::initialize_runtimes(initializer f, volatile int* state) {
|
||||
if (*state != initialized) {
|
||||
|
||||
// We are thread in native here...
|
||||
CompilerThread* thread = CompilerThread::current();
|
||||
bool do_initialization = false;
|
||||
{
|
||||
ThreadInVMfromNative tv(thread);
|
||||
ResetNoHandleMark rnhm;
|
||||
MutexLocker only_one(CompileThread_lock, thread);
|
||||
if ( *state == uninitialized) {
|
||||
do_initialization = true;
|
||||
*state = initializing;
|
||||
} else {
|
||||
while (*state == initializing ) {
|
||||
CompileThread_lock->wait();
|
||||
}
|
||||
bool AbstractCompiler::should_perform_init() {
|
||||
if (_compiler_state != initialized) {
|
||||
MutexLocker only_one(CompileThread_lock);
|
||||
|
||||
if (_compiler_state == uninitialized) {
|
||||
_compiler_state = initializing;
|
||||
return true;
|
||||
} else {
|
||||
while (_compiler_state == initializing) {
|
||||
CompileThread_lock->wait();
|
||||
}
|
||||
}
|
||||
if (do_initialization) {
|
||||
// We can not hold any locks here since JVMTI events may call agents
|
||||
|
||||
// Compiler(s) run as native
|
||||
|
||||
(*f)();
|
||||
|
||||
// To in_vm so we can use the lock
|
||||
|
||||
ThreadInVMfromNative tv(thread);
|
||||
ResetNoHandleMark rnhm;
|
||||
MutexLocker only_one(CompileThread_lock, thread);
|
||||
assert(*state == initializing, "wrong state");
|
||||
*state = initialized;
|
||||
CompileThread_lock->notify_all();
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool AbstractCompiler::should_perform_shutdown() {
|
||||
// Since this method can be called by multiple threads, the lock ensures atomicity of
|
||||
// decrementing '_num_compiler_threads' and the following operations.
|
||||
MutexLocker only_one(CompileThread_lock);
|
||||
_num_compiler_threads--;
|
||||
assert (CompileBroker::is_compilation_disabled_forever(), "Must be set, otherwise thread waits forever");
|
||||
|
||||
// Only the last thread will perform shutdown operations
|
||||
if (_num_compiler_threads == 0) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void AbstractCompiler::set_state(int state) {
|
||||
// Ensure that ste is only set by one thread at a time
|
||||
MutexLocker only_one(CompileThread_lock);
|
||||
_compiler_state = state;
|
||||
CompileThread_lock->notify_all();
|
||||
}
|
||||
|
@ -27,22 +27,25 @@
|
||||
|
||||
#include "ci/compilerInterface.hpp"
|
||||
|
||||
typedef void (*initializer)(void);
|
||||
|
||||
class AbstractCompiler : public CHeapObj<mtCompiler> {
|
||||
private:
|
||||
bool _is_initialized; // Mark whether compiler object is initialized
|
||||
volatile int _num_compiler_threads;
|
||||
|
||||
protected:
|
||||
volatile int _compiler_state;
|
||||
// Used for tracking global state of compiler runtime initialization
|
||||
enum { uninitialized, initializing, initialized };
|
||||
enum { uninitialized, initializing, initialized, failed, shut_down };
|
||||
|
||||
// This method will call the initialization method "f" once (per compiler class/subclass)
|
||||
// and do so without holding any locks
|
||||
void initialize_runtimes(initializer f, volatile int* state);
|
||||
// This method returns true for the first compiler thread that reaches that methods.
|
||||
// This thread will initialize the compiler runtime.
|
||||
bool should_perform_init();
|
||||
|
||||
public:
|
||||
AbstractCompiler() : _is_initialized(false) {}
|
||||
AbstractCompiler() : _compiler_state(uninitialized), _num_compiler_threads(0) {}
|
||||
|
||||
// This function determines the compiler thread that will perform the
|
||||
// shutdown of the corresponding compiler runtime.
|
||||
bool should_perform_shutdown();
|
||||
|
||||
// Name of this compiler
|
||||
virtual const char* name() = 0;
|
||||
@ -74,17 +77,18 @@ class AbstractCompiler : public CHeapObj<mtCompiler> {
|
||||
#endif // TIERED
|
||||
|
||||
// Customization
|
||||
virtual bool needs_stubs () = 0;
|
||||
virtual void initialize () = 0;
|
||||
|
||||
void mark_initialized() { _is_initialized = true; }
|
||||
bool is_initialized() { return _is_initialized; }
|
||||
|
||||
virtual void initialize() = 0;
|
||||
void set_num_compiler_threads(int num) { _num_compiler_threads = num; }
|
||||
int num_compiler_threads() { return _num_compiler_threads; }
|
||||
|
||||
// Get/set state of compiler objects
|
||||
bool is_initialized() { return _compiler_state == initialized; }
|
||||
bool is_failed () { return _compiler_state == failed;}
|
||||
void set_state (int state);
|
||||
void set_shut_down () { set_state(shut_down); }
|
||||
// Compilation entry point for methods
|
||||
virtual void compile_method(ciEnv* env,
|
||||
ciMethod* target,
|
||||
int entry_bci) {
|
||||
virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
|
@ -186,7 +186,7 @@ CompileQueue* CompileBroker::_c2_method_queue = NULL;
|
||||
CompileQueue* CompileBroker::_c1_method_queue = NULL;
|
||||
CompileTask* CompileBroker::_task_free_list = NULL;
|
||||
|
||||
GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
|
||||
GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
|
||||
|
||||
|
||||
class CompilationLog : public StringEventLog {
|
||||
@ -587,9 +587,6 @@ void CompileTask::log_task_done(CompileLog* log) {
|
||||
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileQueue::add
|
||||
//
|
||||
// Add a CompileTask to a CompileQueue
|
||||
void CompileQueue::add(CompileTask* task) {
|
||||
assert(lock()->owned_by_self(), "must own lock");
|
||||
@ -626,6 +623,16 @@ void CompileQueue::add(CompileTask* task) {
|
||||
lock()->notify_all();
|
||||
}
|
||||
|
||||
void CompileQueue::delete_all() {
|
||||
assert(lock()->owned_by_self(), "must own lock");
|
||||
if (_first != NULL) {
|
||||
for (CompileTask* task = _first; task != NULL; task = task->next()) {
|
||||
delete task;
|
||||
}
|
||||
_first = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileQueue::get
|
||||
//
|
||||
@ -640,6 +647,11 @@ CompileTask* CompileQueue::get() {
|
||||
// case we perform code cache sweeps to free memory such that we can re-enable
|
||||
// compilation.
|
||||
while (_first == NULL) {
|
||||
// Exit loop if compilation is disabled forever
|
||||
if (CompileBroker::is_compilation_disabled_forever()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
|
||||
// Wait a certain amount of time to possibly do another sweep.
|
||||
// We must wait until stack scanning has happened so that we can
|
||||
@ -664,9 +676,17 @@ CompileTask* CompileQueue::get() {
|
||||
// remains unchanged. This behavior is desired, since we want to keep
|
||||
// the stable state, i.e., we do not want to evict methods from the
|
||||
// code cache if it is unnecessary.
|
||||
lock()->wait();
|
||||
// We need a timed wait here, since compiler threads can exit if compilation
|
||||
// is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
|
||||
// is not critical and we do not want idle compiler threads to wake up too often.
|
||||
lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
|
||||
}
|
||||
}
|
||||
|
||||
if (CompileBroker::is_compilation_disabled_forever()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
CompileTask* task = CompilationPolicy::policy()->select_task(this);
|
||||
remove(task);
|
||||
return task;
|
||||
@ -891,10 +911,8 @@ void CompileBroker::compilation_init() {
|
||||
}
|
||||
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileBroker::make_compiler_thread
|
||||
CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS) {
|
||||
CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
|
||||
AbstractCompiler* comp, TRAPS) {
|
||||
CompilerThread* compiler_thread = NULL;
|
||||
|
||||
Klass* k =
|
||||
@ -961,6 +979,7 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
|
||||
java_lang_Thread::set_daemon(thread_oop());
|
||||
|
||||
compiler_thread->set_threadObj(thread_oop());
|
||||
compiler_thread->set_compiler(comp);
|
||||
Threads::add(compiler_thread);
|
||||
Thread::start(compiler_thread);
|
||||
}
|
||||
@ -972,25 +991,24 @@ CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQue
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileBroker::init_compiler_threads
|
||||
//
|
||||
// Initialize the compilation queue
|
||||
void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
|
||||
EXCEPTION_MARK;
|
||||
#if !defined(ZERO) && !defined(SHARK)
|
||||
assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
|
||||
#endif // !ZERO && !SHARK
|
||||
// Initialize the compilation queue
|
||||
if (c2_compiler_count > 0) {
|
||||
_c2_method_queue = new CompileQueue("C2MethodQueue", MethodCompileQueue_lock);
|
||||
_compilers[1]->set_num_compiler_threads(c2_compiler_count);
|
||||
}
|
||||
if (c1_compiler_count > 0) {
|
||||
_c1_method_queue = new CompileQueue("C1MethodQueue", MethodCompileQueue_lock);
|
||||
_compilers[0]->set_num_compiler_threads(c1_compiler_count);
|
||||
}
|
||||
|
||||
int compiler_count = c1_compiler_count + c2_compiler_count;
|
||||
|
||||
_method_threads =
|
||||
_compiler_threads =
|
||||
new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
|
||||
|
||||
char name_buffer[256];
|
||||
@ -998,21 +1016,22 @@ void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler
|
||||
// Create a name for our thread.
|
||||
sprintf(name_buffer, "C2 CompilerThread%d", i);
|
||||
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
|
||||
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, CHECK);
|
||||
_method_threads->append(new_thread);
|
||||
// Shark and C2
|
||||
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK);
|
||||
_compiler_threads->append(new_thread);
|
||||
}
|
||||
|
||||
for (int i = c2_compiler_count; i < compiler_count; i++) {
|
||||
// Create a name for our thread.
|
||||
sprintf(name_buffer, "C1 CompilerThread%d", i);
|
||||
CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
|
||||
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, CHECK);
|
||||
_method_threads->append(new_thread);
|
||||
// C1
|
||||
CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK);
|
||||
_compiler_threads->append(new_thread);
|
||||
}
|
||||
|
||||
if (UsePerfData) {
|
||||
PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes,
|
||||
compiler_count, CHECK);
|
||||
PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1028,27 +1047,6 @@ void CompileBroker::mark_on_stack() {
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileBroker::is_idle
|
||||
bool CompileBroker::is_idle() {
|
||||
if (_c2_method_queue != NULL && !_c2_method_queue->is_empty()) {
|
||||
return false;
|
||||
} else if (_c1_method_queue != NULL && !_c1_method_queue->is_empty()) {
|
||||
return false;
|
||||
} else {
|
||||
int num_threads = _method_threads->length();
|
||||
for (int i=0; i<num_threads; i++) {
|
||||
if (_method_threads->at(i)->task() != NULL) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// No pending or active compilations.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileBroker::compile_method
|
||||
//
|
||||
@ -1551,6 +1549,101 @@ void CompileBroker::wait_for_completion(CompileTask* task) {
|
||||
free_task(task);
|
||||
}
|
||||
|
||||
// Initialize compiler thread(s) + compiler object(s). The postcondition
|
||||
// of this function is that the compiler runtimes are initialized and that
|
||||
//compiler threads can start compiling.
|
||||
bool CompileBroker::init_compiler_runtime() {
|
||||
CompilerThread* thread = CompilerThread::current();
|
||||
AbstractCompiler* comp = thread->compiler();
|
||||
// Final sanity check - the compiler object must exist
|
||||
guarantee(comp != NULL, "Compiler object must exist");
|
||||
|
||||
int system_dictionary_modification_counter;
|
||||
{
|
||||
MutexLocker locker(Compile_lock, thread);
|
||||
system_dictionary_modification_counter = SystemDictionary::number_of_modifications();
|
||||
}
|
||||
|
||||
{
|
||||
// Must switch to native to allocate ci_env
|
||||
ThreadToNativeFromVM ttn(thread);
|
||||
ciEnv ci_env(NULL, system_dictionary_modification_counter);
|
||||
// Cache Jvmti state
|
||||
ci_env.cache_jvmti_state();
|
||||
// Cache DTrace flags
|
||||
ci_env.cache_dtrace_flags();
|
||||
|
||||
// Switch back to VM state to do compiler initialization
|
||||
ThreadInVMfromNative tv(thread);
|
||||
ResetNoHandleMark rnhm;
|
||||
|
||||
|
||||
if (!comp->is_shark()) {
|
||||
// Perform per-thread and global initializations
|
||||
comp->initialize();
|
||||
}
|
||||
}
|
||||
|
||||
if (comp->is_failed()) {
|
||||
disable_compilation_forever();
|
||||
// If compiler initialization failed, no compiler thread that is specific to a
|
||||
// particular compiler runtime will ever start to compile methods.
|
||||
|
||||
shutdown_compiler_runtime(comp, thread);
|
||||
return false;
|
||||
}
|
||||
|
||||
// C1 specific check
|
||||
if (comp->is_c1() && (thread->get_buffer_blob() == NULL)) {
|
||||
warning("Initialization of %s thread failed (no space to run compilers)", thread->name());
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// If C1 and/or C2 initialization failed, we shut down all compilation.
|
||||
// We do this to keep things simple. This can be changed if it ever turns out to be
|
||||
// a problem.
|
||||
void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
|
||||
// Free buffer blob, if allocated
|
||||
if (thread->get_buffer_blob() != NULL) {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
CodeCache::free(thread->get_buffer_blob());
|
||||
}
|
||||
|
||||
if (comp->should_perform_shutdown()) {
|
||||
// There are two reasons for shutting down the compiler
|
||||
// 1) compiler runtime initialization failed
|
||||
// 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
|
||||
warning("Shutting down compiler %s (no space to run compilers)", comp->name());
|
||||
|
||||
// Only one thread per compiler runtime object enters here
|
||||
// Set state to shut down
|
||||
comp->set_shut_down();
|
||||
|
||||
MutexLocker mu(MethodCompileQueue_lock, thread);
|
||||
CompileQueue* queue;
|
||||
if (_c1_method_queue != NULL) {
|
||||
_c1_method_queue->delete_all();
|
||||
queue = _c1_method_queue;
|
||||
_c1_method_queue = NULL;
|
||||
delete _c1_method_queue;
|
||||
}
|
||||
|
||||
if (_c2_method_queue != NULL) {
|
||||
_c2_method_queue->delete_all();
|
||||
queue = _c2_method_queue;
|
||||
_c2_method_queue = NULL;
|
||||
delete _c2_method_queue;
|
||||
}
|
||||
|
||||
// We could delete compiler runtimes also. However, there are references to
|
||||
// the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then
|
||||
// fail. This can be done later if necessary.
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileBroker::compiler_thread_loop
|
||||
//
|
||||
@ -1558,7 +1651,6 @@ void CompileBroker::wait_for_completion(CompileTask* task) {
|
||||
void CompileBroker::compiler_thread_loop() {
|
||||
CompilerThread* thread = CompilerThread::current();
|
||||
CompileQueue* queue = thread->queue();
|
||||
|
||||
// For the thread that initializes the ciObjectFactory
|
||||
// this resource mark holds all the shared objects
|
||||
ResourceMark rm;
|
||||
@ -1587,64 +1679,77 @@ void CompileBroker::compiler_thread_loop() {
|
||||
log->end_elem();
|
||||
}
|
||||
|
||||
while (true) {
|
||||
{
|
||||
// We need this HandleMark to avoid leaking VM handles.
|
||||
HandleMark hm(thread);
|
||||
// If compiler thread/runtime initialization fails, exit the compiler thread
|
||||
if (!init_compiler_runtime()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
|
||||
// the code cache is really full
|
||||
handle_full_code_cache();
|
||||
}
|
||||
// Poll for new compilation tasks as long as the JVM runs. Compilation
|
||||
// should only be disabled if something went wrong while initializing the
|
||||
// compiler runtimes. This, in turn, should not happen. The only known case
|
||||
// when compiler runtime initialization fails is if there is not enough free
|
||||
// space in the code cache to generate the necessary stubs, etc.
|
||||
while (!is_compilation_disabled_forever()) {
|
||||
// We need this HandleMark to avoid leaking VM handles.
|
||||
HandleMark hm(thread);
|
||||
|
||||
CompileTask* task = queue->get();
|
||||
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
|
||||
// the code cache is really full
|
||||
handle_full_code_cache();
|
||||
}
|
||||
|
||||
// Give compiler threads an extra quanta. They tend to be bursty and
|
||||
// this helps the compiler to finish up the job.
|
||||
if( CompilerThreadHintNoPreempt )
|
||||
os::hint_no_preempt();
|
||||
CompileTask* task = queue->get();
|
||||
if (task == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// trace per thread time and compile statistics
|
||||
CompilerCounters* counters = ((CompilerThread*)thread)->counters();
|
||||
PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
|
||||
// Give compiler threads an extra quanta. They tend to be bursty and
|
||||
// this helps the compiler to finish up the job.
|
||||
if( CompilerThreadHintNoPreempt )
|
||||
os::hint_no_preempt();
|
||||
|
||||
// Assign the task to the current thread. Mark this compilation
|
||||
// thread as active for the profiler.
|
||||
CompileTaskWrapper ctw(task);
|
||||
nmethodLocker result_handle; // (handle for the nmethod produced by this task)
|
||||
task->set_code_handle(&result_handle);
|
||||
methodHandle method(thread, task->method());
|
||||
// trace per thread time and compile statistics
|
||||
CompilerCounters* counters = ((CompilerThread*)thread)->counters();
|
||||
PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
|
||||
|
||||
// Never compile a method if breakpoints are present in it
|
||||
if (method()->number_of_breakpoints() == 0) {
|
||||
// Compile the method.
|
||||
if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
|
||||
// Assign the task to the current thread. Mark this compilation
|
||||
// thread as active for the profiler.
|
||||
CompileTaskWrapper ctw(task);
|
||||
nmethodLocker result_handle; // (handle for the nmethod produced by this task)
|
||||
task->set_code_handle(&result_handle);
|
||||
methodHandle method(thread, task->method());
|
||||
|
||||
// Never compile a method if breakpoints are present in it
|
||||
if (method()->number_of_breakpoints() == 0) {
|
||||
// Compile the method.
|
||||
if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
|
||||
#ifdef COMPILER1
|
||||
// Allow repeating compilations for the purpose of benchmarking
|
||||
// compile speed. This is not useful for customers.
|
||||
if (CompilationRepeat != 0) {
|
||||
int compile_count = CompilationRepeat;
|
||||
while (compile_count > 0) {
|
||||
invoke_compiler_on_method(task);
|
||||
nmethod* nm = method->code();
|
||||
if (nm != NULL) {
|
||||
nm->make_zombie();
|
||||
method->clear_code();
|
||||
}
|
||||
compile_count--;
|
||||
// Allow repeating compilations for the purpose of benchmarking
|
||||
// compile speed. This is not useful for customers.
|
||||
if (CompilationRepeat != 0) {
|
||||
int compile_count = CompilationRepeat;
|
||||
while (compile_count > 0) {
|
||||
invoke_compiler_on_method(task);
|
||||
nmethod* nm = method->code();
|
||||
if (nm != NULL) {
|
||||
nm->make_zombie();
|
||||
method->clear_code();
|
||||
}
|
||||
compile_count--;
|
||||
}
|
||||
#endif /* COMPILER1 */
|
||||
invoke_compiler_on_method(task);
|
||||
} else {
|
||||
// After compilation is disabled, remove remaining methods from queue
|
||||
method->clear_queued_for_compilation();
|
||||
}
|
||||
#endif /* COMPILER1 */
|
||||
invoke_compiler_on_method(task);
|
||||
} else {
|
||||
// After compilation is disabled, remove remaining methods from queue
|
||||
method->clear_queued_for_compilation();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shut down compiler runtime
|
||||
shutdown_compiler_runtime(thread->compiler(), thread);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileBroker::init_compiler_thread_log
|
||||
@ -1953,11 +2058,14 @@ void CompileBroker::handle_full_code_cache() {
|
||||
// Since code cache is full, immediately stop new compiles
|
||||
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
|
||||
NMethodSweeper::log_sweep("disable_compiler");
|
||||
|
||||
// Switch to 'vm_state'. This ensures that possibly_sweep() can be called
|
||||
// without having to consider the state in which the current thread is.
|
||||
ThreadInVMfromUnknown in_vm;
|
||||
NMethodSweeper::possibly_sweep();
|
||||
}
|
||||
} else {
|
||||
UseCompiler = false;
|
||||
AlwaysCompileLoopMethods = false;
|
||||
disable_compilation_forever();
|
||||
}
|
||||
}
|
||||
codecache_print(/* detailed= */ true);
|
||||
|
@ -213,8 +213,12 @@ class CompileQueue : public CHeapObj<mtCompiler> {
|
||||
|
||||
// Redefine Classes support
|
||||
void mark_on_stack();
|
||||
|
||||
void delete_all();
|
||||
void print();
|
||||
|
||||
~CompileQueue() {
|
||||
assert (is_empty(), " Compile Queue must be empty");
|
||||
}
|
||||
};
|
||||
|
||||
// CompileTaskWrapper
|
||||
@ -266,7 +270,7 @@ class CompileBroker: AllStatic {
|
||||
static CompileQueue* _c1_method_queue;
|
||||
static CompileTask* _task_free_list;
|
||||
|
||||
static GrowableArray<CompilerThread*>* _method_threads;
|
||||
static GrowableArray<CompilerThread*>* _compiler_threads;
|
||||
|
||||
// performance counters
|
||||
static PerfCounter* _perf_total_compilation;
|
||||
@ -311,7 +315,7 @@ class CompileBroker: AllStatic {
|
||||
static int _sum_nmethod_code_size;
|
||||
static long _peak_compilation_time;
|
||||
|
||||
static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
|
||||
static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
|
||||
static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
|
||||
static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level);
|
||||
static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
|
||||
@ -351,6 +355,9 @@ class CompileBroker: AllStatic {
|
||||
if (is_c1_compile(comp_level)) return _c1_method_queue;
|
||||
return NULL;
|
||||
}
|
||||
static bool init_compiler_runtime();
|
||||
static void shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread);
|
||||
|
||||
public:
|
||||
enum {
|
||||
// The entry bci used for non-OSR compilations.
|
||||
@ -378,9 +385,7 @@ class CompileBroker: AllStatic {
|
||||
const char* comment, Thread* thread);
|
||||
|
||||
static void compiler_thread_loop();
|
||||
|
||||
static uint get_compilation_id() { return _compilation_id; }
|
||||
static bool is_idle();
|
||||
|
||||
// Set _should_block.
|
||||
// Call this from the VM, with Threads_lock held and a safepoint requested.
|
||||
@ -391,8 +396,9 @@ class CompileBroker: AllStatic {
|
||||
|
||||
enum {
|
||||
// Flags for toggling compiler activity
|
||||
stop_compilation = 0,
|
||||
run_compilation = 1
|
||||
stop_compilation = 0,
|
||||
run_compilation = 1,
|
||||
shutdown_compilaton = 2
|
||||
};
|
||||
|
||||
static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
|
||||
@ -401,6 +407,16 @@ class CompileBroker: AllStatic {
|
||||
jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
|
||||
return (old == (1-new_state));
|
||||
}
|
||||
|
||||
static void disable_compilation_forever() {
|
||||
UseCompiler = false;
|
||||
AlwaysCompileLoopMethods = false;
|
||||
Atomic::xchg(shutdown_compilaton, &_should_compile_new_jobs);
|
||||
}
|
||||
|
||||
static bool is_compilation_disabled_forever() {
|
||||
return _should_compile_new_jobs == shutdown_compilaton;
|
||||
}
|
||||
static void handle_full_code_cache();
|
||||
|
||||
// Return total compilation ticks
|
||||
|
@ -344,6 +344,10 @@ G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAG_IS_CMDLINE(NewSize) && FLAG_IS_CMDLINE(MaxNewSize) && NewSize > MaxNewSize) {
|
||||
vm_exit_during_initialization("Initial young gen size set larger than the maximum young gen size");
|
||||
}
|
||||
|
||||
if (FLAG_IS_CMDLINE(NewSize)) {
|
||||
_min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
|
||||
1U);
|
||||
|
@ -211,7 +211,7 @@ void VM_CollectForMetadataAllocation::doit() {
|
||||
// a GC that freed space for the allocation.
|
||||
if (!MetadataAllocationFailALot) {
|
||||
_result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
||||
}
|
||||
}
|
||||
|
||||
if (_result == NULL) {
|
||||
if (UseConcMarkSweepGC) {
|
||||
@ -223,9 +223,7 @@ void VM_CollectForMetadataAllocation::doit() {
|
||||
_loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
|
||||
}
|
||||
if (_result == NULL) {
|
||||
// Don't clear the soft refs. This GC is for reclaiming metadata
|
||||
// and is unrelated to the fullness of the Java heap which should
|
||||
// be the criteria for clearing SoftReferences.
|
||||
// Don't clear the soft refs yet.
|
||||
if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
|
||||
gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
|
||||
}
|
||||
@ -235,7 +233,7 @@ void VM_CollectForMetadataAllocation::doit() {
|
||||
_result =
|
||||
_loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
||||
}
|
||||
if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) {
|
||||
if (_result == NULL) {
|
||||
// If still failing, allow the Metaspace to expand.
|
||||
// See delta_capacity_until_GC() for explanation of the
|
||||
// amount of the expansion.
|
||||
@ -243,7 +241,16 @@ void VM_CollectForMetadataAllocation::doit() {
|
||||
// or a MaxMetaspaceSize has been specified on the command line.
|
||||
_result =
|
||||
_loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
|
||||
|
||||
if (_result == NULL) {
|
||||
// If expansion failed, do a last-ditch collection and try allocating
|
||||
// again. A last-ditch collection will clear softrefs. This
|
||||
// behavior is similar to the last-ditch collection done for perm
|
||||
// gen when it was full and a collection for failed allocation
|
||||
// did not free perm gen space.
|
||||
heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
|
||||
_result =
|
||||
_loader_data->metaspace_non_null()->allocate(_size, _mdtype);
|
||||
}
|
||||
}
|
||||
if (Verbose && PrintGCDetails && _result == NULL) {
|
||||
gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -158,6 +159,22 @@ CallInfo::CallInfo(Method* resolved_method, Klass* resolved_klass) {
|
||||
index = vt->index_of_miranda(resolved_method->name(),
|
||||
resolved_method->signature());
|
||||
kind = CallInfo::vtable_call;
|
||||
} else if (resolved_method->has_vtable_index()) {
|
||||
// Can occur if an interface redeclares a method of Object.
|
||||
|
||||
#ifdef ASSERT
|
||||
// Ensure that this is really the case.
|
||||
KlassHandle object_klass = SystemDictionary::Object_klass();
|
||||
Method * object_resolved_method = object_klass()->vtable()->method_at(index);
|
||||
assert(object_resolved_method->name() == resolved_method->name(),
|
||||
err_msg("Object and interface method names should match at vtable index %d, %s != %s",
|
||||
index, object_resolved_method->name()->as_C_string(), resolved_method->name()->as_C_string()));
|
||||
assert(object_resolved_method->signature() == resolved_method->signature(),
|
||||
err_msg("Object and interface method signatures should match at vtable index %d, %s != %s",
|
||||
index, object_resolved_method->signature()->as_C_string(), resolved_method->signature()->as_C_string()));
|
||||
#endif // ASSERT
|
||||
|
||||
kind = CallInfo::vtable_call;
|
||||
} else {
|
||||
// A regular interface call.
|
||||
kind = CallInfo::itable_call;
|
||||
@ -454,7 +471,7 @@ void LinkResolver::resolve_method_statically(methodHandle& resolved_method, Klas
|
||||
Symbol* method_name = vmSymbols::invoke_name();
|
||||
Symbol* method_signature = pool->signature_ref_at(index);
|
||||
KlassHandle current_klass(THREAD, pool->pool_holder());
|
||||
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
|
||||
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, false, CHECK);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -476,22 +493,34 @@ void LinkResolver::resolve_method_statically(methodHandle& resolved_method, Klas
|
||||
|
||||
if (code == Bytecodes::_invokeinterface) {
|
||||
resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
|
||||
} else if (code == Bytecodes::_invokevirtual) {
|
||||
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, true, CHECK);
|
||||
} else {
|
||||
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
|
||||
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, false, CHECK);
|
||||
}
|
||||
}
|
||||
|
||||
void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle resolved_klass,
|
||||
Symbol* method_name, Symbol* method_signature,
|
||||
KlassHandle current_klass, bool check_access, TRAPS) {
|
||||
KlassHandle current_klass, bool check_access,
|
||||
bool require_methodref, TRAPS) {
|
||||
|
||||
Handle nested_exception;
|
||||
|
||||
// 1. lookup method in resolved klass and its super klasses
|
||||
// 1. check if methodref required, that resolved_klass is not interfacemethodref
|
||||
if (require_methodref && resolved_klass->is_interface()) {
|
||||
ResourceMark rm(THREAD);
|
||||
char buf[200];
|
||||
jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected",
|
||||
resolved_klass()->external_name());
|
||||
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
|
||||
}
|
||||
|
||||
// 2. lookup method in resolved klass and its super klasses
|
||||
lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK);
|
||||
|
||||
if (resolved_method.is_null()) { // not found in the class hierarchy
|
||||
// 2. lookup method in all the interfaces implemented by the resolved klass
|
||||
// 3. lookup method in all the interfaces implemented by the resolved klass
|
||||
lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
|
||||
|
||||
if (resolved_method.is_null()) {
|
||||
@ -505,7 +534,7 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res
|
||||
}
|
||||
|
||||
if (resolved_method.is_null()) {
|
||||
// 3. method lookup failed
|
||||
// 4. method lookup failed
|
||||
ResourceMark rm(THREAD);
|
||||
THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(),
|
||||
Method::name_and_sig_as_C_string(resolved_klass(),
|
||||
@ -515,15 +544,6 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res
|
||||
}
|
||||
}
|
||||
|
||||
// 4. check if klass is not interface
|
||||
if (resolved_klass->is_interface() && resolved_method->is_abstract()) {
|
||||
ResourceMark rm(THREAD);
|
||||
char buf[200];
|
||||
jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected",
|
||||
resolved_klass()->external_name());
|
||||
THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
|
||||
}
|
||||
|
||||
// 5. check if method is concrete
|
||||
if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) {
|
||||
ResourceMark rm(THREAD);
|
||||
@ -833,7 +853,7 @@ void LinkResolver::linktime_resolve_static_method(methodHandle& resolved_method,
|
||||
Symbol* method_name, Symbol* method_signature,
|
||||
KlassHandle current_klass, bool check_access, TRAPS) {
|
||||
|
||||
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
|
||||
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, false, CHECK);
|
||||
assert(resolved_method->name() != vmSymbols::class_initializer_name(), "should have been checked in verifier");
|
||||
|
||||
// check if static
|
||||
@ -867,7 +887,7 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
|
||||
// and the selected method is recalculated relative to the direct superclass
|
||||
// superinterface.method, which explicitly does not check shadowing
|
||||
|
||||
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
|
||||
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, false, CHECK);
|
||||
|
||||
// check if method name is <init>, that it is found in same klass as static type
|
||||
if (resolved_method->name() == vmSymbols::object_initializer_name() &&
|
||||
@ -1013,7 +1033,7 @@ void LinkResolver::linktime_resolve_virtual_method(methodHandle &resolved_method
|
||||
Symbol* method_name, Symbol* method_signature,
|
||||
KlassHandle current_klass, bool check_access, TRAPS) {
|
||||
// normal method resolution
|
||||
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
|
||||
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, true, CHECK);
|
||||
|
||||
assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier");
|
||||
assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier");
|
||||
|
@ -136,7 +136,7 @@ class LinkResolver: AllStatic {
|
||||
static void resolve_pool (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
|
||||
|
||||
static void resolve_interface_method(methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
|
||||
static void resolve_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
|
||||
static void resolve_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool require_methodref, TRAPS);
|
||||
|
||||
static void linktime_resolve_static_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
|
||||
static void linktime_resolve_special_method (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "memory/binaryTreeDictionary.hpp"
|
||||
#include "memory/freeList.hpp"
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "memory/metablock.hpp"
|
||||
#include "memory/metachunk.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "memory/metablock.hpp"
|
||||
#include "memory/metachunk.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/freeBlockDictionary.hpp"
|
||||
#include "memory/freeList.hpp"
|
||||
#include "memory/metablock.hpp"
|
||||
#include "memory/metachunk.hpp"
|
||||
#include "memory/sharedHeap.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
@ -1,68 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/metablock.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
// Blocks of space for metadata are allocated out of Metachunks.
|
||||
//
|
||||
// Metachunk are allocated out of MetadataVirtualspaces and once
|
||||
// allocated there is no explicit link between a Metachunk and
|
||||
// the MetadataVirtualspaces from which it was allocated.
|
||||
//
|
||||
// Each SpaceManager maintains a
|
||||
// list of the chunks it is using and the current chunk. The current
|
||||
// chunk is the chunk from which allocations are done. Space freed in
|
||||
// a chunk is placed on the free list of blocks (BlockFreelist) and
|
||||
// reused from there.
|
||||
//
|
||||
// Future modification
|
||||
//
|
||||
// The Metachunk can conceivable be replaced by the Chunk in
|
||||
// allocation.hpp. Note that the latter Chunk is the space for
|
||||
// allocation (allocations from the chunk are out of the space in
|
||||
// the Chunk after the header for the Chunk) where as Metachunks
|
||||
// point to space in a VirtualSpace. To replace Metachunks with
|
||||
// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
|
||||
size_t Metablock::_min_block_byte_size = sizeof(Metablock);
|
||||
|
||||
// New blocks returned by the Metaspace are zero initialized.
|
||||
// We should fix the constructors to not assume this instead.
|
||||
Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
|
||||
if (p == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Metablock* result = (Metablock*) p;
|
||||
|
||||
// Clear the memory
|
||||
Copy::fill_to_aligned_words((HeapWord*)result, word_size);
|
||||
#ifdef ASSERT
|
||||
result->set_word_size(word_size);
|
||||
#endif
|
||||
return result;
|
||||
}
|
@ -1,101 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
#ifndef SHARE_VM_MEMORY_METABLOCK_HPP
|
||||
#define SHARE_VM_MEMORY_METABLOCK_HPP
|
||||
|
||||
// Metablock are the unit of allocation from a Chunk. It is initialized
|
||||
// with the size of the requested allocation. That size is overwritten
|
||||
// once the allocation returns.
|
||||
//
|
||||
// A Metablock may be reused by its SpaceManager but are never moved between
|
||||
// SpaceManagers. There is no explicit link to the Metachunk
|
||||
// from which it was allocated. Metablock may be deallocated and
|
||||
// put on a freelist but the space is never freed, rather
|
||||
// the Metachunk it is a part of will be deallocated when it's
|
||||
// associated class loader is collected.
|
||||
|
||||
class Metablock VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// Used to align the allocation (see below).
|
||||
union block_t {
|
||||
void* _data[3];
|
||||
struct header_t {
|
||||
size_t _word_size;
|
||||
Metablock* _next;
|
||||
Metablock* _prev;
|
||||
} _header;
|
||||
} _block;
|
||||
static size_t _min_block_byte_size;
|
||||
|
||||
typedef union block_t Block;
|
||||
typedef struct header_t Header;
|
||||
const Block* block() const { return &_block; }
|
||||
const Block::header_t* header() const { return &(block()->_header); }
|
||||
public:
|
||||
|
||||
static Metablock* initialize(MetaWord* p, size_t word_size);
|
||||
|
||||
// This places the body of the block at a 2 word boundary
|
||||
// because every block starts on a 2 word boundary. Work out
|
||||
// how to make the body on a 2 word boundary if the block
|
||||
// starts on a arbitrary boundary. JJJ
|
||||
|
||||
size_t word_size() const { return header()->_word_size; }
|
||||
void set_word_size(size_t v) { _block._header._word_size = v; }
|
||||
size_t size() const volatile { return _block._header._word_size; }
|
||||
void set_size(size_t v) { _block._header._word_size = v; }
|
||||
Metablock* next() const { return header()->_next; }
|
||||
void set_next(Metablock* v) { _block._header._next = v; }
|
||||
Metablock* prev() const { return header()->_prev; }
|
||||
void set_prev(Metablock* v) { _block._header._prev = v; }
|
||||
|
||||
static size_t min_block_byte_size() { return _min_block_byte_size; }
|
||||
|
||||
bool is_free() { return header()->_word_size != 0; }
|
||||
void clear_next() { set_next(NULL); }
|
||||
void link_prev(Metablock* ptr) { set_prev(ptr); }
|
||||
uintptr_t* end() { return ((uintptr_t*) this) + size(); }
|
||||
bool cantCoalesce() const { return false; }
|
||||
void link_next(Metablock* ptr) { set_next(ptr); }
|
||||
void link_after(Metablock* ptr){
|
||||
link_next(ptr);
|
||||
if (ptr != NULL) ptr->link_prev(this);
|
||||
}
|
||||
|
||||
// Should not be needed in a free list of Metablocks
|
||||
void markNotFree() { ShouldNotReachHere(); }
|
||||
|
||||
// Debug support
|
||||
#ifdef ASSERT
|
||||
void* prev_addr() const { return (void*)&_block._header._prev; }
|
||||
void* next_addr() const { return (void*)&_block._header._next; }
|
||||
void* size_addr() const { return (void*)&_block._header._word_size; }
|
||||
#endif
|
||||
bool verify_chunk_in_free_list(Metablock* tc) const { return true; }
|
||||
bool verify_par_locked() { return true; }
|
||||
|
||||
void assert_is_mangled() const {/* Don't check "\*/}
|
||||
};
|
||||
#endif // SHARE_VM_MEMORY_METABLOCK_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,42 +29,39 @@
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
class VirtualSpaceNode;
|
||||
//
|
||||
// Future modification
|
||||
//
|
||||
// The Metachunk can conceivable be replaced by the Chunk in
|
||||
// allocation.hpp. Note that the latter Chunk is the space for
|
||||
// allocation (allocations from the chunk are out of the space in
|
||||
// the Chunk after the header for the Chunk) where as Metachunks
|
||||
// point to space in a VirtualSpace. To replace Metachunks with
|
||||
// Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
|
||||
|
||||
const size_t metadata_chunk_initialize = 0xf7f7f7f7;
|
||||
|
||||
size_t Metachunk::_overhead =
|
||||
Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord;
|
||||
size_t Metachunk::object_alignment() {
|
||||
// Must align pointers and sizes to 8,
|
||||
// so that 64 bit types get correctly aligned.
|
||||
const size_t alignment = 8;
|
||||
|
||||
// Make sure that the Klass alignment also agree.
|
||||
STATIC_ASSERT(alignment == (size_t)KlassAlignmentInBytes);
|
||||
|
||||
return alignment;
|
||||
}
|
||||
|
||||
size_t Metachunk::overhead() {
|
||||
return align_size_up(sizeof(Metachunk), object_alignment()) / BytesPerWord;
|
||||
}
|
||||
|
||||
// Metachunk methods
|
||||
|
||||
Metachunk::Metachunk(size_t word_size,
|
||||
VirtualSpaceNode* container) :
|
||||
_word_size(word_size),
|
||||
_bottom(NULL),
|
||||
_end(NULL),
|
||||
VirtualSpaceNode* container)
|
||||
: Metabase<Metachunk>(word_size),
|
||||
_top(NULL),
|
||||
_next(NULL),
|
||||
_prev(NULL),
|
||||
_container(container)
|
||||
{
|
||||
_bottom = (MetaWord*)this;
|
||||
_top = (MetaWord*)this + _overhead;
|
||||
_end = (MetaWord*)this + word_size;
|
||||
_top = initial_top();
|
||||
#ifdef ASSERT
|
||||
set_is_free(false);
|
||||
set_is_tagged_free(false);
|
||||
size_t data_word_size = pointer_delta(end(),
|
||||
top(),
|
||||
_top,
|
||||
sizeof(MetaWord));
|
||||
Copy::fill_to_words((HeapWord*) top(),
|
||||
Copy::fill_to_words((HeapWord*)_top,
|
||||
data_word_size,
|
||||
metadata_chunk_initialize);
|
||||
#endif
|
||||
@ -82,22 +79,18 @@ MetaWord* Metachunk::allocate(size_t word_size) {
|
||||
|
||||
// _bottom points to the start of the chunk including the overhead.
|
||||
size_t Metachunk::used_word_size() const {
|
||||
return pointer_delta(_top, _bottom, sizeof(MetaWord));
|
||||
return pointer_delta(_top, bottom(), sizeof(MetaWord));
|
||||
}
|
||||
|
||||
size_t Metachunk::free_word_size() const {
|
||||
return pointer_delta(_end, _top, sizeof(MetaWord));
|
||||
}
|
||||
|
||||
size_t Metachunk::capacity_word_size() const {
|
||||
return pointer_delta(_end, _bottom, sizeof(MetaWord));
|
||||
return pointer_delta(end(), _top, sizeof(MetaWord));
|
||||
}
|
||||
|
||||
void Metachunk::print_on(outputStream* st) const {
|
||||
st->print_cr("Metachunk:"
|
||||
" bottom " PTR_FORMAT " top " PTR_FORMAT
|
||||
" end " PTR_FORMAT " size " SIZE_FORMAT,
|
||||
bottom(), top(), end(), word_size());
|
||||
bottom(), _top, end(), word_size());
|
||||
if (Verbose) {
|
||||
st->print_cr(" used " SIZE_FORMAT " free " SIZE_FORMAT,
|
||||
used_word_size(), free_word_size());
|
||||
@ -109,8 +102,8 @@ void Metachunk::mangle() {
|
||||
// Mangle the payload of the chunk and not the links that
|
||||
// maintain list of chunks.
|
||||
HeapWord* start = (HeapWord*)(bottom() + overhead());
|
||||
size_t word_size = capacity_word_size() - overhead();
|
||||
Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
|
||||
size_t size = word_size() - overhead();
|
||||
Copy::fill_to_words(start, size, metadata_chunk_initialize);
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
@ -118,9 +111,68 @@ void Metachunk::verify() {
|
||||
#ifdef ASSERT
|
||||
// Cannot walk through the blocks unless the blocks have
|
||||
// headers with sizes.
|
||||
assert(_bottom <= _top &&
|
||||
_top <= _end,
|
||||
assert(bottom() <= _top &&
|
||||
_top <= (MetaWord*)end(),
|
||||
"Chunk has been smashed");
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
/////////////// Unit tests ///////////////
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
class TestMetachunk {
|
||||
public:
|
||||
static void test() {
|
||||
size_t size = 2 * 1024 * 1024;
|
||||
void* memory = malloc(size);
|
||||
assert(memory != NULL, "Failed to malloc 2MB");
|
||||
|
||||
Metachunk* metachunk = ::new (memory) Metachunk(size / BytesPerWord, NULL);
|
||||
|
||||
assert(metachunk->bottom() == (MetaWord*)metachunk, "assert");
|
||||
assert(metachunk->end() == (uintptr_t*)metachunk + metachunk->size(), "assert");
|
||||
|
||||
// Check sizes
|
||||
assert(metachunk->size() == metachunk->word_size(), "assert");
|
||||
assert(metachunk->word_size() == pointer_delta(metachunk->end(), metachunk->bottom(),
|
||||
sizeof(MetaWord*)), "assert");
|
||||
|
||||
// Check usage
|
||||
assert(metachunk->used_word_size() == metachunk->overhead(), "assert");
|
||||
assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
|
||||
assert(metachunk->top() == metachunk->initial_top(), "assert");
|
||||
assert(metachunk->is_empty(), "assert");
|
||||
|
||||
// Allocate
|
||||
size_t alloc_size = 64; // Words
|
||||
assert(is_size_aligned(alloc_size, Metachunk::object_alignment()), "assert");
|
||||
|
||||
MetaWord* mem = metachunk->allocate(alloc_size);
|
||||
|
||||
// Check post alloc
|
||||
assert(mem == metachunk->initial_top(), "assert");
|
||||
assert(mem + alloc_size == metachunk->top(), "assert");
|
||||
assert(metachunk->used_word_size() == metachunk->overhead() + alloc_size, "assert");
|
||||
assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
|
||||
assert(!metachunk->is_empty(), "assert");
|
||||
|
||||
// Clear chunk
|
||||
metachunk->reset_empty();
|
||||
|
||||
// Check post clear
|
||||
assert(metachunk->used_word_size() == metachunk->overhead(), "assert");
|
||||
assert(metachunk->free_word_size() == metachunk->word_size() - metachunk->used_word_size(), "assert");
|
||||
assert(metachunk->top() == metachunk->initial_top(), "assert");
|
||||
assert(metachunk->is_empty(), "assert");
|
||||
|
||||
free(memory);
|
||||
}
|
||||
};
|
||||
|
||||
void TestMetachunk_test() {
|
||||
TestMetachunk::test();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,89 +24,44 @@
|
||||
#ifndef SHARE_VM_MEMORY_METACHUNK_HPP
|
||||
#define SHARE_VM_MEMORY_METACHUNK_HPP
|
||||
|
||||
// Metachunk - Quantum of allocation from a Virtualspace
|
||||
// Metachunks are reused (when freed are put on a global freelist) and
|
||||
// have no permanent association to a SpaceManager.
|
||||
|
||||
// +--------------+ <- end
|
||||
// | | --+ ---+
|
||||
// | | | free |
|
||||
// | | | |
|
||||
// | | | | capacity
|
||||
// | | | |
|
||||
// | | <- top --+ |
|
||||
// | | ---+ |
|
||||
// | | | used |
|
||||
// | | | |
|
||||
// | | | |
|
||||
// +--------------+ <- bottom ---+ ---+
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class VirtualSpaceNode;
|
||||
|
||||
class Metachunk VALUE_OBJ_CLASS_SPEC {
|
||||
// link to support lists of chunks
|
||||
Metachunk* _next;
|
||||
Metachunk* _prev;
|
||||
VirtualSpaceNode* _container;
|
||||
|
||||
MetaWord* _bottom;
|
||||
MetaWord* _end;
|
||||
MetaWord* _top;
|
||||
// Super class of Metablock and Metachunk to allow them to
|
||||
// be put on the FreeList and in the BinaryTreeDictionary.
|
||||
template <class T>
|
||||
class Metabase VALUE_OBJ_CLASS_SPEC {
|
||||
size_t _word_size;
|
||||
// Used in a guarantee() so included in the Product builds
|
||||
// even through it is only for debugging.
|
||||
bool _is_free;
|
||||
T* _next;
|
||||
T* _prev;
|
||||
|
||||
// Metachunks are allocated out of a MetadataVirtualSpace and
|
||||
// and use some of its space to describe itself (plus alignment
|
||||
// considerations). Metadata is allocated in the rest of the chunk.
|
||||
// This size is the overhead of maintaining the Metachunk within
|
||||
// the space.
|
||||
static size_t _overhead;
|
||||
protected:
|
||||
Metabase(size_t word_size) : _word_size(word_size), _next(NULL), _prev(NULL) {}
|
||||
|
||||
public:
|
||||
Metachunk(size_t word_size , VirtualSpaceNode* container);
|
||||
T* next() const { return _next; }
|
||||
T* prev() const { return _prev; }
|
||||
void set_next(T* v) { _next = v; assert(v != this, "Boom");}
|
||||
void set_prev(T* v) { _prev = v; assert(v != this, "Boom");}
|
||||
void clear_next() { set_next(NULL); }
|
||||
void clear_prev() { set_prev(NULL); }
|
||||
|
||||
// Used to add a Metachunk to a list of Metachunks
|
||||
void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
|
||||
void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
|
||||
void set_container(VirtualSpaceNode* v) { _container = v; }
|
||||
|
||||
MetaWord* allocate(size_t word_size);
|
||||
|
||||
// Accessors
|
||||
Metachunk* next() const { return _next; }
|
||||
Metachunk* prev() const { return _prev; }
|
||||
VirtualSpaceNode* container() const { return _container; }
|
||||
MetaWord* bottom() const { return _bottom; }
|
||||
MetaWord* end() const { return _end; }
|
||||
MetaWord* top() const { return _top; }
|
||||
size_t word_size() const { return _word_size; }
|
||||
size_t size() const volatile { return _word_size; }
|
||||
void set_size(size_t v) { _word_size = v; }
|
||||
bool is_free() { return _is_free; }
|
||||
void set_is_free(bool v) { _is_free = v; }
|
||||
static size_t overhead() { return _overhead; }
|
||||
void clear_next() { set_next(NULL); }
|
||||
void link_prev(Metachunk* ptr) { set_prev(ptr); }
|
||||
uintptr_t* end() { return ((uintptr_t*) this) + size(); }
|
||||
bool cantCoalesce() const { return false; }
|
||||
void link_next(Metachunk* ptr) { set_next(ptr); }
|
||||
void link_after(Metachunk* ptr){
|
||||
|
||||
void link_next(T* ptr) { set_next(ptr); }
|
||||
void link_prev(T* ptr) { set_prev(ptr); }
|
||||
void link_after(T* ptr) {
|
||||
link_next(ptr);
|
||||
if (ptr != NULL) ptr->link_prev(this);
|
||||
if (ptr != NULL) ptr->link_prev((T*)this);
|
||||
}
|
||||
|
||||
// Reset top to bottom so chunk can be reused.
|
||||
void reset_empty() { _top = (_bottom + _overhead); _next = NULL; _prev = NULL; }
|
||||
bool is_empty() { return _top == (_bottom + _overhead); }
|
||||
uintptr_t* end() const { return ((uintptr_t*) this) + size(); }
|
||||
|
||||
// used (has been allocated)
|
||||
// free (available for future allocations)
|
||||
// capacity (total size of chunk)
|
||||
size_t used_word_size() const;
|
||||
size_t free_word_size() const;
|
||||
size_t capacity_word_size()const;
|
||||
bool cantCoalesce() const { return false; }
|
||||
|
||||
// Debug support
|
||||
#ifdef ASSERT
|
||||
@ -114,14 +69,99 @@ class Metachunk VALUE_OBJ_CLASS_SPEC {
|
||||
void* next_addr() const { return (void*)&_next; }
|
||||
void* size_addr() const { return (void*)&_word_size; }
|
||||
#endif
|
||||
bool verify_chunk_in_free_list(Metachunk* tc) const { return true; }
|
||||
bool verify_chunk_in_free_list(T* tc) const { return true; }
|
||||
bool verify_par_locked() { return true; }
|
||||
|
||||
void assert_is_mangled() const {/* Don't check "\*/}
|
||||
|
||||
bool is_free() { return true; }
|
||||
};
|
||||
|
||||
// Metachunk - Quantum of allocation from a Virtualspace
|
||||
// Metachunks are reused (when freed are put on a global freelist) and
|
||||
// have no permanent association to a SpaceManager.
|
||||
|
||||
// +--------------+ <- end --+ --+
|
||||
// | | | |
|
||||
// | | | free |
|
||||
// | | | |
|
||||
// | | | | size | capacity
|
||||
// | | | |
|
||||
// | | <- top -- + |
|
||||
// | | | |
|
||||
// | | | used |
|
||||
// | | | |
|
||||
// | | | |
|
||||
// +--------------+ <- bottom --+ --+
|
||||
|
||||
class Metachunk : public Metabase<Metachunk> {
|
||||
friend class TestMetachunk;
|
||||
// The VirtualSpaceNode containing this chunk.
|
||||
VirtualSpaceNode* _container;
|
||||
|
||||
// Current allocation top.
|
||||
MetaWord* _top;
|
||||
|
||||
DEBUG_ONLY(bool _is_tagged_free;)
|
||||
|
||||
MetaWord* initial_top() const { return (MetaWord*)this + overhead(); }
|
||||
MetaWord* top() const { return _top; }
|
||||
|
||||
public:
|
||||
// Metachunks are allocated out of a MetadataVirtualSpace and
|
||||
// and use some of its space to describe itself (plus alignment
|
||||
// considerations). Metadata is allocated in the rest of the chunk.
|
||||
// This size is the overhead of maintaining the Metachunk within
|
||||
// the space.
|
||||
|
||||
// Alignment of each allocation in the chunks.
|
||||
static size_t object_alignment();
|
||||
|
||||
// Size of the Metachunk header, including alignment.
|
||||
static size_t overhead();
|
||||
|
||||
Metachunk(size_t word_size , VirtualSpaceNode* container);
|
||||
|
||||
MetaWord* allocate(size_t word_size);
|
||||
|
||||
VirtualSpaceNode* container() const { return _container; }
|
||||
|
||||
MetaWord* bottom() const { return (MetaWord*) this; }
|
||||
|
||||
// Reset top to bottom so chunk can be reused.
|
||||
void reset_empty() { _top = initial_top(); clear_next(); clear_prev(); }
|
||||
bool is_empty() { return _top == initial_top(); }
|
||||
|
||||
// used (has been allocated)
|
||||
// free (available for future allocations)
|
||||
size_t word_size() const { return size(); }
|
||||
size_t used_word_size() const;
|
||||
size_t free_word_size() const;
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_tagged_free() { return _is_tagged_free; }
|
||||
void set_is_tagged_free(bool v) { _is_tagged_free = v; }
|
||||
#endif
|
||||
|
||||
NOT_PRODUCT(void mangle();)
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
void verify();
|
||||
};
|
||||
|
||||
// Metablock is the unit of allocation from a Chunk.
|
||||
//
|
||||
// A Metablock may be reused by its SpaceManager but are never moved between
|
||||
// SpaceManagers. There is no explicit link to the Metachunk
|
||||
// from which it was allocated. Metablock may be deallocated and
|
||||
// put on a freelist but the space is never freed, rather
|
||||
// the Metachunk it is a part of will be deallocated when it's
|
||||
// associated class loader is collected.
|
||||
|
||||
class Metablock : public Metabase<Metablock> {
|
||||
friend class VMStructs;
|
||||
public:
|
||||
Metablock(size_t word_size) : Metabase<Metablock>(word_size) {}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_MEMORY_METACHUNK_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -65,6 +65,7 @@ class MetadataFactory : AllStatic {
|
||||
static void free_array(ClassLoaderData* loader_data, Array<T>* data) {
|
||||
if (data != NULL) {
|
||||
assert(loader_data != NULL, "shouldn't pass null");
|
||||
assert(!data->is_shared(), "cannot deallocate array in shared spaces");
|
||||
int size = data->size();
|
||||
if (DumpSharedSpaces) {
|
||||
loader_data->ro_metaspace()->deallocate((MetaWord*)data, size, false);
|
||||
@ -83,6 +84,7 @@ class MetadataFactory : AllStatic {
|
||||
// Call metadata's deallocate function which will call deallocate fields
|
||||
assert(!DumpSharedSpaces, "cannot deallocate metadata when dumping CDS archive");
|
||||
assert(!md->on_stack(), "can't deallocate things on stack");
|
||||
assert(!md->is_shared(), "cannot deallocate if in shared spaces");
|
||||
md->deallocate_contents(loader_data);
|
||||
loader_data->metaspace_non_null()->deallocate((MetaWord*)md, size, md->is_klass());
|
||||
}
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/freeList.hpp"
|
||||
#include "memory/gcLocker.hpp"
|
||||
#include "memory/metablock.hpp"
|
||||
#include "memory/metachunk.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
@ -49,13 +48,10 @@
|
||||
|
||||
typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
|
||||
typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
|
||||
// Define this macro to enable slow integrity checking of
|
||||
// the free chunk lists
|
||||
|
||||
// Set this constant to enable slow integrity checking of the free chunk lists
|
||||
const bool metaspace_slow_verify = false;
|
||||
|
||||
// Parameters for stress mode testing
|
||||
const uint metadata_deallocate_a_lot_block = 10;
|
||||
const uint metadata_deallocate_a_lock_chunk = 3;
|
||||
size_t const allocation_from_dictionary_limit = 4 * K;
|
||||
|
||||
MetaWord* last_allocated = 0;
|
||||
@ -92,24 +88,9 @@ volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
|
||||
uint MetaspaceGC::_shrink_factor = 0;
|
||||
bool MetaspaceGC::_should_concurrent_collect = false;
|
||||
|
||||
// Blocks of space for metadata are allocated out of Metachunks.
|
||||
//
|
||||
// Metachunk are allocated out of MetadataVirtualspaces and once
|
||||
// allocated there is no explicit link between a Metachunk and
|
||||
// the MetadataVirtualspaces from which it was allocated.
|
||||
//
|
||||
// Each SpaceManager maintains a
|
||||
// list of the chunks it is using and the current chunk. The current
|
||||
// chunk is the chunk from which allocations are done. Space freed in
|
||||
// a chunk is placed on the free list of blocks (BlockFreelist) and
|
||||
// reused from there.
|
||||
|
||||
typedef class FreeList<Metachunk> ChunkList;
|
||||
|
||||
// Manages the global free lists of chunks.
|
||||
// Has three lists of free chunks, and a total size and
|
||||
// count that includes all three
|
||||
|
||||
class ChunkManager : public CHeapObj<mtInternal> {
|
||||
|
||||
// Free list of chunks of different sizes.
|
||||
@ -119,7 +100,6 @@ class ChunkManager : public CHeapObj<mtInternal> {
|
||||
// HumongousChunk
|
||||
ChunkList _free_chunks[NumberOfFreeLists];
|
||||
|
||||
|
||||
// HumongousChunk
|
||||
ChunkTreeDictionary _humongous_dictionary;
|
||||
|
||||
@ -166,7 +146,6 @@ class ChunkManager : public CHeapObj<mtInternal> {
|
||||
|
||||
// add or delete (return) a chunk to the global freelist.
|
||||
Metachunk* chunk_freelist_allocate(size_t word_size);
|
||||
void chunk_freelist_deallocate(Metachunk* chunk);
|
||||
|
||||
// Map a size to a list index assuming that there are lists
|
||||
// for special, small, medium, and humongous chunks.
|
||||
@ -200,9 +179,7 @@ class ChunkManager : public CHeapObj<mtInternal> {
|
||||
// Returns the list for the given chunk word size.
|
||||
ChunkList* find_free_chunks_list(size_t word_size);
|
||||
|
||||
// Add and remove from a list by size. Selects
|
||||
// list based on size of chunk.
|
||||
void free_chunks_put(Metachunk* chuck);
|
||||
// Remove from a list by size. Selects list based on size of chunk.
|
||||
Metachunk* free_chunks_get(size_t chunk_word_size);
|
||||
|
||||
// Debug support
|
||||
@ -230,7 +207,6 @@ class ChunkManager : public CHeapObj<mtInternal> {
|
||||
// to the allocation of a quantum of metadata).
|
||||
class BlockFreelist VALUE_OBJ_CLASS_SPEC {
|
||||
BlockTreeDictionary* _dictionary;
|
||||
static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
|
||||
|
||||
// Only allocate and split from freelist if the size of the allocation
|
||||
// is at least 1/4th the size of the available block.
|
||||
@ -258,6 +234,7 @@ class BlockFreelist VALUE_OBJ_CLASS_SPEC {
|
||||
void print_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
// A VirtualSpaceList node.
|
||||
class VirtualSpaceNode : public CHeapObj<mtClass> {
|
||||
friend class VirtualSpaceList;
|
||||
|
||||
@ -414,13 +391,13 @@ void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
|
||||
Metachunk* chunk = first_chunk();
|
||||
Metachunk* invalid_chunk = (Metachunk*) top();
|
||||
while (chunk < invalid_chunk ) {
|
||||
assert(chunk->is_free(), "Should be marked free");
|
||||
MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
|
||||
chunk_manager->remove_chunk(chunk);
|
||||
assert(chunk->next() == NULL &&
|
||||
chunk->prev() == NULL,
|
||||
"Was not removed from its list");
|
||||
chunk = (Metachunk*) next;
|
||||
assert(chunk->is_tagged_free(), "Should be tagged free");
|
||||
MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
|
||||
chunk_manager->remove_chunk(chunk);
|
||||
assert(chunk->next() == NULL &&
|
||||
chunk->prev() == NULL,
|
||||
"Was not removed from its list");
|
||||
chunk = (Metachunk*) next;
|
||||
}
|
||||
}
|
||||
|
||||
@ -434,7 +411,7 @@ uint VirtualSpaceNode::container_count_slow() {
|
||||
// Don't count the chunks on the free lists. Those are
|
||||
// still part of the VirtualSpaceNode but not currently
|
||||
// counted.
|
||||
if (!chunk->is_free()) {
|
||||
if (!chunk->is_tagged_free()) {
|
||||
count++;
|
||||
}
|
||||
chunk = (Metachunk*) next;
|
||||
@ -550,44 +527,16 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
|
||||
|
||||
class Metadebug : AllStatic {
|
||||
// Debugging support for Metaspaces
|
||||
static int _deallocate_block_a_lot_count;
|
||||
static int _deallocate_chunk_a_lot_count;
|
||||
static int _allocation_fail_alot_count;
|
||||
|
||||
public:
|
||||
static int deallocate_block_a_lot_count() {
|
||||
return _deallocate_block_a_lot_count;
|
||||
}
|
||||
static void set_deallocate_block_a_lot_count(int v) {
|
||||
_deallocate_block_a_lot_count = v;
|
||||
}
|
||||
static void inc_deallocate_block_a_lot_count() {
|
||||
_deallocate_block_a_lot_count++;
|
||||
}
|
||||
static int deallocate_chunk_a_lot_count() {
|
||||
return _deallocate_chunk_a_lot_count;
|
||||
}
|
||||
static void reset_deallocate_chunk_a_lot_count() {
|
||||
_deallocate_chunk_a_lot_count = 1;
|
||||
}
|
||||
static void inc_deallocate_chunk_a_lot_count() {
|
||||
_deallocate_chunk_a_lot_count++;
|
||||
}
|
||||
|
||||
static void init_allocation_fail_alot_count();
|
||||
#ifdef ASSERT
|
||||
static bool test_metadata_failure();
|
||||
#endif
|
||||
|
||||
static void deallocate_chunk_a_lot(SpaceManager* sm,
|
||||
size_t chunk_word_size);
|
||||
static void deallocate_block_a_lot(SpaceManager* sm,
|
||||
size_t chunk_word_size);
|
||||
|
||||
};
|
||||
|
||||
int Metadebug::_deallocate_block_a_lot_count = 0;
|
||||
int Metadebug::_deallocate_chunk_a_lot_count = 0;
|
||||
int Metadebug::_allocation_fail_alot_count = 0;
|
||||
|
||||
// SpaceManager - used by Metaspace to handle allocations
|
||||
@ -753,14 +702,11 @@ class SpaceManager : public CHeapObj<mtClass> {
|
||||
#endif
|
||||
|
||||
size_t get_raw_word_size(size_t word_size) {
|
||||
// If only the dictionary is going to be used (i.e., no
|
||||
// indexed free list), then there is a minimum size requirement.
|
||||
// MinChunkSize is a placeholder for the real minimum size JJJ
|
||||
size_t byte_size = word_size * BytesPerWord;
|
||||
|
||||
size_t raw_bytes_size = MAX2(byte_size,
|
||||
Metablock::min_block_byte_size());
|
||||
raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
|
||||
size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock));
|
||||
raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment());
|
||||
|
||||
size_t raw_word_size = raw_bytes_size / BytesPerWord;
|
||||
assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
|
||||
|
||||
@ -813,17 +759,8 @@ BlockFreelist::~BlockFreelist() {
|
||||
}
|
||||
}
|
||||
|
||||
Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
|
||||
Metablock* block = (Metablock*) p;
|
||||
block->set_word_size(word_size);
|
||||
block->set_prev(NULL);
|
||||
block->set_next(NULL);
|
||||
|
||||
return block;
|
||||
}
|
||||
|
||||
void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
|
||||
Metablock* free_chunk = initialize_free_chunk(p, word_size);
|
||||
Metablock* free_chunk = ::new (p) Metablock(word_size);
|
||||
if (dictionary() == NULL) {
|
||||
_dictionary = new BlockTreeDictionary();
|
||||
}
|
||||
@ -1069,7 +1006,7 @@ void ChunkManager::remove_chunk(Metachunk* chunk) {
|
||||
}
|
||||
|
||||
// Chunk is being removed from the chunks free list.
|
||||
dec_free_chunks_total(chunk->capacity_word_size());
|
||||
dec_free_chunks_total(chunk->word_size());
|
||||
}
|
||||
|
||||
// Walk the list of VirtualSpaceNodes and delete
|
||||
@ -1563,54 +1500,6 @@ void MetaspaceGC::compute_new_size() {
|
||||
|
||||
// Metadebug methods
|
||||
|
||||
void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
|
||||
size_t chunk_word_size){
|
||||
#ifdef ASSERT
|
||||
VirtualSpaceList* vsl = sm->vs_list();
|
||||
if (MetaDataDeallocateALot &&
|
||||
Metadebug::deallocate_chunk_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
|
||||
Metadebug::reset_deallocate_chunk_a_lot_count();
|
||||
for (uint i = 0; i < metadata_deallocate_a_lock_chunk; i++) {
|
||||
Metachunk* dummy_chunk = vsl->current_virtual_space()->take_from_committed(chunk_word_size);
|
||||
if (dummy_chunk == NULL) {
|
||||
break;
|
||||
}
|
||||
sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
|
||||
|
||||
if (TraceMetadataChunkAllocation && Verbose) {
|
||||
gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
|
||||
sm->sum_count_in_chunks_in_use());
|
||||
dummy_chunk->print_on(gclog_or_tty);
|
||||
gclog_or_tty->print_cr(" Free chunks total %d count %d",
|
||||
sm->chunk_manager()->free_chunks_total_words(),
|
||||
sm->chunk_manager()->free_chunks_count());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Metadebug::inc_deallocate_chunk_a_lot_count();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
|
||||
size_t raw_word_size){
|
||||
#ifdef ASSERT
|
||||
if (MetaDataDeallocateALot &&
|
||||
Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
|
||||
Metadebug::set_deallocate_block_a_lot_count(0);
|
||||
for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
|
||||
MetaWord* dummy_block = sm->allocate_work(raw_word_size);
|
||||
if (dummy_block == 0) {
|
||||
break;
|
||||
}
|
||||
sm->deallocate(dummy_block, raw_word_size);
|
||||
}
|
||||
} else {
|
||||
Metadebug::inc_deallocate_block_a_lot_count();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Metadebug::init_allocation_fail_alot_count() {
|
||||
if (MetadataAllocationFailALot) {
|
||||
_allocation_fail_alot_count =
|
||||
@ -1754,31 +1643,6 @@ ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) {
|
||||
return free_chunks(index);
|
||||
}
|
||||
|
||||
void ChunkManager::free_chunks_put(Metachunk* chunk) {
|
||||
assert_lock_strong(SpaceManager::expand_lock());
|
||||
ChunkList* free_list = find_free_chunks_list(chunk->word_size());
|
||||
chunk->set_next(free_list->head());
|
||||
free_list->set_head(chunk);
|
||||
// chunk is being returned to the chunk free list
|
||||
inc_free_chunks_total(chunk->capacity_word_size());
|
||||
slow_locked_verify();
|
||||
}
|
||||
|
||||
void ChunkManager::chunk_freelist_deallocate(Metachunk* chunk) {
|
||||
// The deallocation of a chunk originates in the freelist
|
||||
// manangement code for a Metaspace and does not hold the
|
||||
// lock.
|
||||
assert(chunk != NULL, "Deallocating NULL");
|
||||
assert_lock_strong(SpaceManager::expand_lock());
|
||||
slow_locked_verify();
|
||||
if (TraceMetadataChunkAllocation) {
|
||||
gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
|
||||
PTR_FORMAT " size " SIZE_FORMAT,
|
||||
chunk, chunk->word_size());
|
||||
}
|
||||
free_chunks_put(chunk);
|
||||
}
|
||||
|
||||
Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
|
||||
assert_lock_strong(SpaceManager::expand_lock());
|
||||
|
||||
@ -1822,7 +1686,7 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
|
||||
}
|
||||
|
||||
// Chunk is being removed from the chunks free list.
|
||||
dec_free_chunks_total(chunk->capacity_word_size());
|
||||
dec_free_chunks_total(chunk->word_size());
|
||||
|
||||
// Remove it from the links to this freelist
|
||||
chunk->set_next(NULL);
|
||||
@ -1830,7 +1694,7 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
|
||||
#ifdef ASSERT
|
||||
// Chunk is no longer on any freelist. Setting to false make container_count_slow()
|
||||
// work.
|
||||
chunk->set_is_free(false);
|
||||
chunk->set_is_tagged_free(false);
|
||||
#endif
|
||||
chunk->container()->inc_container_count();
|
||||
|
||||
@ -1962,7 +1826,7 @@ size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
|
||||
for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
|
||||
Metachunk* chunk = chunks_in_use(i);
|
||||
while (chunk != NULL) {
|
||||
sum += chunk->capacity_word_size();
|
||||
sum += chunk->word_size();
|
||||
chunk = chunk->next();
|
||||
}
|
||||
}
|
||||
@ -2098,10 +1962,6 @@ MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
|
||||
size_t grow_chunks_by_words = calc_chunk_size(word_size);
|
||||
Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
|
||||
|
||||
if (next != NULL) {
|
||||
Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
|
||||
}
|
||||
|
||||
MetaWord* mem = NULL;
|
||||
|
||||
// If a chunk was available, add it to the in-use chunk list
|
||||
@ -2210,7 +2070,7 @@ void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
|
||||
// Capture the next link before it is changed
|
||||
// by the call to return_chunk_at_head();
|
||||
Metachunk* next = cur->next();
|
||||
cur->set_is_free(true);
|
||||
DEBUG_ONLY(cur->set_is_tagged_free(true);)
|
||||
list->return_chunk_at_head(cur);
|
||||
cur = next;
|
||||
}
|
||||
@ -2282,7 +2142,7 @@ SpaceManager::~SpaceManager() {
|
||||
|
||||
while (humongous_chunks != NULL) {
|
||||
#ifdef ASSERT
|
||||
humongous_chunks->set_is_free(true);
|
||||
humongous_chunks->set_is_tagged_free(true);
|
||||
#endif
|
||||
if (TraceMetadataChunkAllocation && Verbose) {
|
||||
gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ",
|
||||
@ -2446,7 +2306,6 @@ MetaWord* SpaceManager::allocate(size_t word_size) {
|
||||
if (p == NULL) {
|
||||
p = allocate_work(raw_word_size);
|
||||
}
|
||||
Metadebug::deallocate_block_a_lot(this, raw_word_size);
|
||||
|
||||
return p;
|
||||
}
|
||||
@ -2545,7 +2404,7 @@ void SpaceManager::dump(outputStream* const out) const {
|
||||
curr->print_on(out);
|
||||
curr_total += curr->word_size();
|
||||
used += curr->used_word_size();
|
||||
capacity += curr->capacity_word_size();
|
||||
capacity += curr->word_size();
|
||||
waste += curr->free_word_size() + curr->overhead();;
|
||||
}
|
||||
}
|
||||
@ -3396,7 +3255,7 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
|
||||
}
|
||||
|
||||
|
||||
Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
||||
MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
||||
bool read_only, MetaspaceObj::Type type, TRAPS) {
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
assert(false, "Should not allocate with exception pending");
|
||||
@ -3415,10 +3274,14 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
||||
MetaWord* result = space->allocate(word_size, NonClassType);
|
||||
if (result == NULL) {
|
||||
report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
|
||||
} else {
|
||||
space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
|
||||
}
|
||||
return Metablock::initialize(result, word_size);
|
||||
|
||||
space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
|
||||
|
||||
// Zero initialize.
|
||||
Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
|
||||
@ -3443,7 +3306,10 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return Metablock::initialize(result, word_size);
|
||||
// Zero initialize.
|
||||
Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
|
||||
|
@ -139,7 +139,6 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
// Allocate space for metadata of type mdtype. This is space
|
||||
// within a Metachunk and is used by
|
||||
// allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
|
||||
// which returns a Metablock.
|
||||
MetaWord* allocate(size_t word_size, MetadataType mdtype);
|
||||
|
||||
// Virtual Space lists for both classes and other metadata
|
||||
@ -217,8 +216,8 @@ class Metaspace : public CHeapObj<mtClass> {
|
||||
size_t used_bytes_slow(MetadataType mdtype) const;
|
||||
size_t capacity_bytes_slow(MetadataType mdtype) const;
|
||||
|
||||
static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size,
|
||||
bool read_only, MetaspaceObj::Type type, TRAPS);
|
||||
static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size,
|
||||
bool read_only, MetaspaceObj::Type type, TRAPS);
|
||||
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
|
||||
|
||||
MetaWord* expand_and_allocate(size_t size,
|
||||
|
@ -869,18 +869,9 @@ void ConstantPool::unreference_symbols() {
|
||||
bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2,
|
||||
int index2, TRAPS) {
|
||||
|
||||
jbyte t1 = tag_at(index1).value();
|
||||
jbyte t2 = cp2->tag_at(index2).value();
|
||||
|
||||
|
||||
// JVM_CONSTANT_UnresolvedClassInError is equal to JVM_CONSTANT_UnresolvedClass
|
||||
// when comparing
|
||||
if (t1 == JVM_CONSTANT_UnresolvedClassInError) {
|
||||
t1 = JVM_CONSTANT_UnresolvedClass;
|
||||
}
|
||||
if (t2 == JVM_CONSTANT_UnresolvedClassInError) {
|
||||
t2 = JVM_CONSTANT_UnresolvedClass;
|
||||
}
|
||||
// The error tags are equivalent to non-error tags when comparing
|
||||
jbyte t1 = tag_at(index1).non_error_value();
|
||||
jbyte t2 = cp2->tag_at(index2).non_error_value();
|
||||
|
||||
if (t1 != t2) {
|
||||
// Not the same entry type so there is nothing else to check. Note
|
||||
@ -1001,8 +992,8 @@ bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2,
|
||||
|
||||
case JVM_CONSTANT_MethodType:
|
||||
{
|
||||
int k1 = method_type_index_at(index1);
|
||||
int k2 = cp2->method_type_index_at(index2);
|
||||
int k1 = method_type_index_at_error_ok(index1);
|
||||
int k2 = cp2->method_type_index_at_error_ok(index2);
|
||||
bool match = compare_entry_to(k1, cp2, k2, CHECK_false);
|
||||
if (match) {
|
||||
return true;
|
||||
@ -1011,11 +1002,11 @@ bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2,
|
||||
|
||||
case JVM_CONSTANT_MethodHandle:
|
||||
{
|
||||
int k1 = method_handle_ref_kind_at(index1);
|
||||
int k2 = cp2->method_handle_ref_kind_at(index2);
|
||||
int k1 = method_handle_ref_kind_at_error_ok(index1);
|
||||
int k2 = cp2->method_handle_ref_kind_at_error_ok(index2);
|
||||
if (k1 == k2) {
|
||||
int i1 = method_handle_index_at(index1);
|
||||
int i2 = cp2->method_handle_index_at(index2);
|
||||
int i1 = method_handle_index_at_error_ok(index1);
|
||||
int i2 = cp2->method_handle_index_at_error_ok(index2);
|
||||
bool match = compare_entry_to(i1, cp2, i2, CHECK_false);
|
||||
if (match) {
|
||||
return true;
|
||||
@ -1329,14 +1320,6 @@ void ConstantPool::copy_entry_to(constantPoolHandle from_cp, int from_i,
|
||||
}
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_UnresolvedClassInError:
|
||||
{
|
||||
Symbol* k = from_cp->unresolved_klass_at(from_i);
|
||||
to_cp->unresolved_klass_at_put(to_i, k);
|
||||
to_cp->tag_at_put(to_i, JVM_CONSTANT_UnresolvedClassInError);
|
||||
} break;
|
||||
|
||||
|
||||
case JVM_CONSTANT_String:
|
||||
{
|
||||
Symbol* s = from_cp->unresolved_string_at(from_i);
|
||||
@ -1352,15 +1335,17 @@ void ConstantPool::copy_entry_to(constantPoolHandle from_cp, int from_i,
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_MethodType:
|
||||
case JVM_CONSTANT_MethodTypeInError:
|
||||
{
|
||||
jint k = from_cp->method_type_index_at(from_i);
|
||||
jint k = from_cp->method_type_index_at_error_ok(from_i);
|
||||
to_cp->method_type_index_at_put(to_i, k);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_MethodHandle:
|
||||
case JVM_CONSTANT_MethodHandleInError:
|
||||
{
|
||||
int k1 = from_cp->method_handle_ref_kind_at(from_i);
|
||||
int k2 = from_cp->method_handle_index_at(from_i);
|
||||
int k1 = from_cp->method_handle_ref_kind_at_error_ok(from_i);
|
||||
int k2 = from_cp->method_handle_index_at_error_ok(from_i);
|
||||
to_cp->method_handle_index_at_put(to_i, k1, k2);
|
||||
} break;
|
||||
|
||||
|
@ -320,7 +320,8 @@ InstanceKlass::InstanceKlass(int vtable_len,
|
||||
|
||||
void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data,
|
||||
Array<Method*>* methods) {
|
||||
if (methods != NULL && methods != Universe::the_empty_method_array()) {
|
||||
if (methods != NULL && methods != Universe::the_empty_method_array() &&
|
||||
!methods->is_shared()) {
|
||||
for (int i = 0; i < methods->length(); i++) {
|
||||
Method* method = methods->at(i);
|
||||
if (method == NULL) continue; // maybe null if error processing
|
||||
@ -344,13 +345,14 @@ void InstanceKlass::deallocate_interfaces(ClassLoaderData* loader_data,
|
||||
// check that the interfaces don't come from super class
|
||||
Array<Klass*>* sti = (super_klass == NULL) ? NULL :
|
||||
InstanceKlass::cast(super_klass)->transitive_interfaces();
|
||||
if (ti != sti) {
|
||||
if (ti != sti && ti != NULL && !ti->is_shared()) {
|
||||
MetadataFactory::free_array<Klass*>(loader_data, ti);
|
||||
}
|
||||
}
|
||||
|
||||
// local interfaces can be empty
|
||||
if (local_interfaces != Universe::the_empty_klass_array()) {
|
||||
if (local_interfaces != Universe::the_empty_klass_array() &&
|
||||
local_interfaces != NULL && !local_interfaces->is_shared()) {
|
||||
MetadataFactory::free_array<Klass*>(loader_data, local_interfaces);
|
||||
}
|
||||
}
|
||||
@ -380,21 +382,25 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
|
||||
deallocate_methods(loader_data, methods());
|
||||
set_methods(NULL);
|
||||
|
||||
if (method_ordering() != Universe::the_empty_int_array()) {
|
||||
if (method_ordering() != NULL &&
|
||||
method_ordering() != Universe::the_empty_int_array() &&
|
||||
!method_ordering()->is_shared()) {
|
||||
MetadataFactory::free_array<int>(loader_data, method_ordering());
|
||||
}
|
||||
set_method_ordering(NULL);
|
||||
|
||||
// default methods can be empty
|
||||
if (default_methods() != NULL &&
|
||||
default_methods() != Universe::the_empty_method_array()) {
|
||||
default_methods() != Universe::the_empty_method_array() &&
|
||||
!default_methods()->is_shared()) {
|
||||
MetadataFactory::free_array<Method*>(loader_data, default_methods());
|
||||
}
|
||||
// Do NOT deallocate the default methods, they are owned by superinterfaces.
|
||||
set_default_methods(NULL);
|
||||
|
||||
// default methods vtable indices can be empty
|
||||
if (default_vtable_indices() != NULL) {
|
||||
if (default_vtable_indices() != NULL &&
|
||||
!default_vtable_indices()->is_shared()) {
|
||||
MetadataFactory::free_array<int>(loader_data, default_vtable_indices());
|
||||
}
|
||||
set_default_vtable_indices(NULL);
|
||||
@ -403,8 +409,10 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
|
||||
// This array is in Klass, but remove it with the InstanceKlass since
|
||||
// this place would be the only caller and it can share memory with transitive
|
||||
// interfaces.
|
||||
if (secondary_supers() != Universe::the_empty_klass_array() &&
|
||||
secondary_supers() != transitive_interfaces()) {
|
||||
if (secondary_supers() != NULL &&
|
||||
secondary_supers() != Universe::the_empty_klass_array() &&
|
||||
secondary_supers() != transitive_interfaces() &&
|
||||
!secondary_supers()->is_shared()) {
|
||||
MetadataFactory::free_array<Klass*>(loader_data, secondary_supers());
|
||||
}
|
||||
set_secondary_supers(NULL);
|
||||
@ -413,24 +421,32 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
|
||||
set_transitive_interfaces(NULL);
|
||||
set_local_interfaces(NULL);
|
||||
|
||||
MetadataFactory::free_array<jushort>(loader_data, fields());
|
||||
if (fields() != NULL && !fields()->is_shared()) {
|
||||
MetadataFactory::free_array<jushort>(loader_data, fields());
|
||||
}
|
||||
set_fields(NULL, 0);
|
||||
|
||||
// If a method from a redefined class is using this constant pool, don't
|
||||
// delete it, yet. The new class's previous version will point to this.
|
||||
if (constants() != NULL) {
|
||||
assert (!constants()->on_stack(), "shouldn't be called if anything is onstack");
|
||||
MetadataFactory::free_metadata(loader_data, constants());
|
||||
if (!constants()->is_shared()) {
|
||||
MetadataFactory::free_metadata(loader_data, constants());
|
||||
}
|
||||
set_constants(NULL);
|
||||
}
|
||||
|
||||
if (inner_classes() != Universe::the_empty_short_array()) {
|
||||
if (inner_classes() != NULL &&
|
||||
inner_classes() != Universe::the_empty_short_array() &&
|
||||
!inner_classes()->is_shared()) {
|
||||
MetadataFactory::free_array<jushort>(loader_data, inner_classes());
|
||||
}
|
||||
set_inner_classes(NULL);
|
||||
|
||||
// We should deallocate the Annotations instance
|
||||
MetadataFactory::free_metadata(loader_data, annotations());
|
||||
// We should deallocate the Annotations instance if it's not in shared spaces.
|
||||
if (annotations() != NULL && !annotations()->is_shared()) {
|
||||
MetadataFactory::free_metadata(loader_data, annotations());
|
||||
}
|
||||
set_annotations(NULL);
|
||||
}
|
||||
|
||||
|
@ -805,6 +805,7 @@ class Method : public Metadata {
|
||||
private:
|
||||
void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
|
||||
|
||||
public:
|
||||
MethodCounters* get_method_counters(TRAPS) {
|
||||
if (_method_counters == NULL) {
|
||||
build_method_counters(this, CHECK_AND_CLEAR_NULL);
|
||||
@ -812,7 +813,6 @@ class Method : public Metadata {
|
||||
return _method_counters;
|
||||
}
|
||||
|
||||
public:
|
||||
bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); }
|
||||
void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); }
|
||||
void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); }
|
||||
|
@ -56,6 +56,11 @@ void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
|
||||
if (needs_array_len(tag)) {
|
||||
set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
|
||||
}
|
||||
if (tag == call_type_data_tag) {
|
||||
CallTypeData::initialize(this, cell_count);
|
||||
} else if (tag == virtual_call_type_data_tag) {
|
||||
VirtualCallTypeData::initialize(this, cell_count);
|
||||
}
|
||||
}
|
||||
|
||||
void DataLayout::clean_weak_klass_links(BoolObjectClosure* cl) {
|
||||
@ -76,7 +81,7 @@ ProfileData::ProfileData() {
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ProfileData::print_shared(outputStream* st, const char* name) {
|
||||
void ProfileData::print_shared(outputStream* st, const char* name) const {
|
||||
st->print("bci: %d", bci());
|
||||
st->fill_to(tab_width_one);
|
||||
st->print("%s", name);
|
||||
@ -91,8 +96,8 @@ void ProfileData::print_shared(outputStream* st, const char* name) {
|
||||
st->print("flags(%d) ", flags);
|
||||
}
|
||||
|
||||
void ProfileData::tab(outputStream* st) {
|
||||
st->fill_to(tab_width_two);
|
||||
void ProfileData::tab(outputStream* st, bool first) const {
|
||||
st->fill_to(first ? tab_width_one : tab_width_two);
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
@ -104,7 +109,7 @@ void ProfileData::tab(outputStream* st) {
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
void BitData::print_data_on(outputStream* st) {
|
||||
void BitData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "BitData");
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
@ -115,7 +120,7 @@ void BitData::print_data_on(outputStream* st) {
|
||||
// A CounterData corresponds to a simple counter.
|
||||
|
||||
#ifndef PRODUCT
|
||||
void CounterData::print_data_on(outputStream* st) {
|
||||
void CounterData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "CounterData");
|
||||
st->print_cr("count(%u)", count());
|
||||
}
|
||||
@ -145,12 +150,207 @@ void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void JumpData::print_data_on(outputStream* st) {
|
||||
void JumpData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "JumpData");
|
||||
st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
int TypeStackSlotEntries::compute_cell_count(Symbol* signature, int max) {
|
||||
ResourceMark rm;
|
||||
SignatureStream ss(signature);
|
||||
int args_count = MIN2(ss.reference_parameter_count(), max);
|
||||
return args_count * per_arg_cell_count;
|
||||
}
|
||||
|
||||
int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
|
||||
assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
|
||||
assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
|
||||
Bytecode_invoke inv(stream->method(), stream->bci());
|
||||
int args_cell = 0;
|
||||
if (arguments_profiling_enabled()) {
|
||||
args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), TypeProfileArgsLimit);
|
||||
}
|
||||
int ret_cell = 0;
|
||||
if (return_profiling_enabled() && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) {
|
||||
ret_cell = ReturnTypeEntry::static_cell_count();
|
||||
}
|
||||
int header_cell = 0;
|
||||
if (args_cell + ret_cell > 0) {
|
||||
header_cell = header_cell_count();
|
||||
}
|
||||
|
||||
return header_cell + args_cell + ret_cell;
|
||||
}
|
||||
|
||||
class ArgumentOffsetComputer : public SignatureInfo {
|
||||
private:
|
||||
int _max;
|
||||
GrowableArray<int> _offsets;
|
||||
|
||||
void set(int size, BasicType type) { _size += size; }
|
||||
void do_object(int begin, int end) {
|
||||
if (_offsets.length() < _max) {
|
||||
_offsets.push(_size);
|
||||
}
|
||||
SignatureInfo::do_object(begin, end);
|
||||
}
|
||||
void do_array (int begin, int end) {
|
||||
if (_offsets.length() < _max) {
|
||||
_offsets.push(_size);
|
||||
}
|
||||
SignatureInfo::do_array(begin, end);
|
||||
}
|
||||
|
||||
public:
|
||||
ArgumentOffsetComputer(Symbol* signature, int max)
|
||||
: SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) {
|
||||
}
|
||||
|
||||
int total() { lazy_iterate_parameters(); return _size; }
|
||||
|
||||
int off_at(int i) const { return _offsets.at(i); }
|
||||
};
|
||||
|
||||
void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver) {
|
||||
ResourceMark rm;
|
||||
ArgumentOffsetComputer aos(signature, _number_of_entries);
|
||||
aos.total();
|
||||
for (int i = 0; i < _number_of_entries; i++) {
|
||||
set_stack_slot(i, aos.off_at(i) + (has_receiver ? 1 : 0));
|
||||
set_type(i, type_none());
|
||||
}
|
||||
}
|
||||
|
||||
void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
|
||||
assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
|
||||
Bytecode_invoke inv(stream->method(), stream->bci());
|
||||
|
||||
SignatureStream ss(inv.signature());
|
||||
if (has_arguments()) {
|
||||
#ifdef ASSERT
|
||||
ResourceMark rm;
|
||||
int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
|
||||
assert(count > 0, "room for args type but none found?");
|
||||
check_number_of_arguments(count);
|
||||
#endif
|
||||
_args.post_initialize(inv.signature(), inv.has_receiver());
|
||||
}
|
||||
|
||||
if (has_return()) {
|
||||
assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
|
||||
_ret.post_initialize();
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
|
||||
assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
|
||||
Bytecode_invoke inv(stream->method(), stream->bci());
|
||||
|
||||
if (has_arguments()) {
|
||||
#ifdef ASSERT
|
||||
ResourceMark rm;
|
||||
SignatureStream ss(inv.signature());
|
||||
int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
|
||||
assert(count > 0, "room for args type but none found?");
|
||||
check_number_of_arguments(count);
|
||||
#endif
|
||||
_args.post_initialize(inv.signature(), inv.has_receiver());
|
||||
}
|
||||
|
||||
if (has_return()) {
|
||||
assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
|
||||
_ret.post_initialize();
|
||||
}
|
||||
}
|
||||
|
||||
bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) {
|
||||
return !is_type_none(p) &&
|
||||
!((Klass*)klass_part(p))->is_loader_alive(is_alive_cl);
|
||||
}
|
||||
|
||||
void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
|
||||
for (int i = 0; i < _number_of_entries; i++) {
|
||||
intptr_t p = type(i);
|
||||
if (is_loader_alive(is_alive_cl, p)) {
|
||||
set_type(i, type_none());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
|
||||
intptr_t p = type();
|
||||
if (is_loader_alive(is_alive_cl, p)) {
|
||||
set_type(type_none());
|
||||
}
|
||||
}
|
||||
|
||||
bool TypeEntriesAtCall::return_profiling_enabled() {
|
||||
return MethodData::profile_return();
|
||||
}
|
||||
|
||||
bool TypeEntriesAtCall::arguments_profiling_enabled() {
|
||||
return MethodData::profile_arguments();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void TypeEntries::print_klass(outputStream* st, intptr_t k) {
|
||||
if (is_type_none(k)) {
|
||||
st->print("none");
|
||||
} else if (is_type_unknown(k)) {
|
||||
st->print("unknown");
|
||||
} else {
|
||||
valid_klass(k)->print_value_on(st);
|
||||
}
|
||||
if (was_null_seen(k)) {
|
||||
st->print(" (null seen)");
|
||||
}
|
||||
}
|
||||
|
||||
void TypeStackSlotEntries::print_data_on(outputStream* st) const {
|
||||
for (int i = 0; i < _number_of_entries; i++) {
|
||||
_pd->tab(st);
|
||||
st->print("%d: stack(%u) ", i, stack_slot(i));
|
||||
print_klass(st, type(i));
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
|
||||
void ReturnTypeEntry::print_data_on(outputStream* st) const {
|
||||
_pd->tab(st);
|
||||
print_klass(st, type());
|
||||
st->cr();
|
||||
}
|
||||
|
||||
void CallTypeData::print_data_on(outputStream* st) const {
|
||||
CounterData::print_data_on(st);
|
||||
if (has_arguments()) {
|
||||
tab(st, true);
|
||||
st->print("argument types");
|
||||
_args.print_data_on(st);
|
||||
}
|
||||
if (has_return()) {
|
||||
tab(st, true);
|
||||
st->print("return type");
|
||||
_ret.print_data_on(st);
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualCallTypeData::print_data_on(outputStream* st) const {
|
||||
VirtualCallData::print_data_on(st);
|
||||
if (has_arguments()) {
|
||||
tab(st, true);
|
||||
st->print("argument types");
|
||||
_args.print_data_on(st);
|
||||
}
|
||||
if (has_return()) {
|
||||
tab(st, true);
|
||||
st->print("return type");
|
||||
_ret.print_data_on(st);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// ==================================================================
|
||||
// ReceiverTypeData
|
||||
//
|
||||
@ -169,7 +369,7 @@ void ReceiverTypeData::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ReceiverTypeData::print_receiver_data_on(outputStream* st) {
|
||||
void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
|
||||
uint row;
|
||||
int entries = 0;
|
||||
for (row = 0; row < row_limit(); row++) {
|
||||
@ -190,11 +390,11 @@ void ReceiverTypeData::print_receiver_data_on(outputStream* st) {
|
||||
}
|
||||
}
|
||||
}
|
||||
void ReceiverTypeData::print_data_on(outputStream* st) {
|
||||
void ReceiverTypeData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "ReceiverTypeData");
|
||||
print_receiver_data_on(st);
|
||||
}
|
||||
void VirtualCallData::print_data_on(outputStream* st) {
|
||||
void VirtualCallData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "VirtualCallData");
|
||||
print_receiver_data_on(st);
|
||||
}
|
||||
@ -246,7 +446,7 @@ address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
void RetData::print_data_on(outputStream* st) {
|
||||
void RetData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "RetData");
|
||||
uint row;
|
||||
int entries = 0;
|
||||
@ -281,7 +481,7 @@ void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void BranchData::print_data_on(outputStream* st) {
|
||||
void BranchData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "BranchData");
|
||||
st->print_cr("taken(%u) displacement(%d)",
|
||||
taken(), displacement());
|
||||
@ -355,7 +555,7 @@ void MultiBranchData::post_initialize(BytecodeStream* stream,
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void MultiBranchData::print_data_on(outputStream* st) {
|
||||
void MultiBranchData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "MultiBranchData");
|
||||
st->print_cr("default_count(%u) displacement(%d)",
|
||||
default_count(), default_displacement());
|
||||
@ -369,7 +569,7 @@ void MultiBranchData::print_data_on(outputStream* st) {
|
||||
#endif
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ArgInfoData::print_data_on(outputStream* st) {
|
||||
void ArgInfoData::print_data_on(outputStream* st) const {
|
||||
print_shared(st, "ArgInfoData");
|
||||
int nargs = number_of_args();
|
||||
for (int i = 0; i < nargs; i++) {
|
||||
@ -407,7 +607,11 @@ int MethodData::bytecode_cell_count(Bytecodes::Code code) {
|
||||
}
|
||||
case Bytecodes::_invokespecial:
|
||||
case Bytecodes::_invokestatic:
|
||||
return CounterData::static_cell_count();
|
||||
if (MethodData::profile_arguments() || MethodData::profile_return()) {
|
||||
return variable_cell_count;
|
||||
} else {
|
||||
return CounterData::static_cell_count();
|
||||
}
|
||||
case Bytecodes::_goto:
|
||||
case Bytecodes::_goto_w:
|
||||
case Bytecodes::_jsr:
|
||||
@ -415,9 +619,17 @@ int MethodData::bytecode_cell_count(Bytecodes::Code code) {
|
||||
return JumpData::static_cell_count();
|
||||
case Bytecodes::_invokevirtual:
|
||||
case Bytecodes::_invokeinterface:
|
||||
return VirtualCallData::static_cell_count();
|
||||
if (MethodData::profile_arguments() || MethodData::profile_return()) {
|
||||
return variable_cell_count;
|
||||
} else {
|
||||
return VirtualCallData::static_cell_count();
|
||||
}
|
||||
case Bytecodes::_invokedynamic:
|
||||
return CounterData::static_cell_count();
|
||||
if (MethodData::profile_arguments() || MethodData::profile_return()) {
|
||||
return variable_cell_count;
|
||||
} else {
|
||||
return CounterData::static_cell_count();
|
||||
}
|
||||
case Bytecodes::_ret:
|
||||
return RetData::static_cell_count();
|
||||
case Bytecodes::_ifeq:
|
||||
@ -453,7 +665,36 @@ int MethodData::compute_data_size(BytecodeStream* stream) {
|
||||
return 0;
|
||||
}
|
||||
if (cell_count == variable_cell_count) {
|
||||
cell_count = MultiBranchData::compute_cell_count(stream);
|
||||
switch (stream->code()) {
|
||||
case Bytecodes::_lookupswitch:
|
||||
case Bytecodes::_tableswitch:
|
||||
cell_count = MultiBranchData::compute_cell_count(stream);
|
||||
break;
|
||||
case Bytecodes::_invokespecial:
|
||||
case Bytecodes::_invokestatic:
|
||||
case Bytecodes::_invokedynamic:
|
||||
assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
|
||||
if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
|
||||
profile_return_for_invoke(stream->method(), stream->bci())) {
|
||||
cell_count = CallTypeData::compute_cell_count(stream);
|
||||
} else {
|
||||
cell_count = CounterData::static_cell_count();
|
||||
}
|
||||
break;
|
||||
case Bytecodes::_invokevirtual:
|
||||
case Bytecodes::_invokeinterface: {
|
||||
assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
|
||||
if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
|
||||
profile_return_for_invoke(stream->method(), stream->bci())) {
|
||||
cell_count = VirtualCallTypeData::compute_cell_count(stream);
|
||||
} else {
|
||||
cell_count = VirtualCallData::static_cell_count();
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
fatal("unexpected bytecode for var length profile data");
|
||||
}
|
||||
}
|
||||
// Note: cell_count might be zero, meaning that there is just
|
||||
// a DataLayout header, with no extra cells.
|
||||
@ -499,6 +740,7 @@ int MethodData::compute_allocation_size_in_bytes(methodHandle method) {
|
||||
// Add a cell to record information about modified arguments.
|
||||
int arg_size = method->size_of_parameters();
|
||||
object_size += DataLayout::compute_size_in_bytes(arg_size+1);
|
||||
|
||||
return object_size;
|
||||
}
|
||||
|
||||
@ -534,10 +776,21 @@ int MethodData::initialize_data(BytecodeStream* stream,
|
||||
}
|
||||
break;
|
||||
case Bytecodes::_invokespecial:
|
||||
case Bytecodes::_invokestatic:
|
||||
cell_count = CounterData::static_cell_count();
|
||||
tag = DataLayout::counter_data_tag;
|
||||
case Bytecodes::_invokestatic: {
|
||||
int counter_data_cell_count = CounterData::static_cell_count();
|
||||
if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
|
||||
profile_return_for_invoke(stream->method(), stream->bci())) {
|
||||
cell_count = CallTypeData::compute_cell_count(stream);
|
||||
} else {
|
||||
cell_count = counter_data_cell_count;
|
||||
}
|
||||
if (cell_count > counter_data_cell_count) {
|
||||
tag = DataLayout::call_type_data_tag;
|
||||
} else {
|
||||
tag = DataLayout::counter_data_tag;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Bytecodes::_goto:
|
||||
case Bytecodes::_goto_w:
|
||||
case Bytecodes::_jsr:
|
||||
@ -546,15 +799,37 @@ int MethodData::initialize_data(BytecodeStream* stream,
|
||||
tag = DataLayout::jump_data_tag;
|
||||
break;
|
||||
case Bytecodes::_invokevirtual:
|
||||
case Bytecodes::_invokeinterface:
|
||||
cell_count = VirtualCallData::static_cell_count();
|
||||
tag = DataLayout::virtual_call_data_tag;
|
||||
case Bytecodes::_invokeinterface: {
|
||||
int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
|
||||
if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
|
||||
profile_return_for_invoke(stream->method(), stream->bci())) {
|
||||
cell_count = VirtualCallTypeData::compute_cell_count(stream);
|
||||
} else {
|
||||
cell_count = virtual_call_data_cell_count;
|
||||
}
|
||||
if (cell_count > virtual_call_data_cell_count) {
|
||||
tag = DataLayout::virtual_call_type_data_tag;
|
||||
} else {
|
||||
tag = DataLayout::virtual_call_data_tag;
|
||||
}
|
||||
break;
|
||||
case Bytecodes::_invokedynamic:
|
||||
}
|
||||
case Bytecodes::_invokedynamic: {
|
||||
// %%% should make a type profile for any invokedynamic that takes a ref argument
|
||||
cell_count = CounterData::static_cell_count();
|
||||
tag = DataLayout::counter_data_tag;
|
||||
int counter_data_cell_count = CounterData::static_cell_count();
|
||||
if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
|
||||
profile_return_for_invoke(stream->method(), stream->bci())) {
|
||||
cell_count = CallTypeData::compute_cell_count(stream);
|
||||
} else {
|
||||
cell_count = counter_data_cell_count;
|
||||
}
|
||||
if (cell_count > counter_data_cell_count) {
|
||||
tag = DataLayout::call_type_data_tag;
|
||||
} else {
|
||||
tag = DataLayout::counter_data_tag;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Bytecodes::_ret:
|
||||
cell_count = RetData::static_cell_count();
|
||||
tag = DataLayout::ret_data_tag;
|
||||
@ -585,6 +860,11 @@ int MethodData::initialize_data(BytecodeStream* stream,
|
||||
break;
|
||||
}
|
||||
assert(tag == DataLayout::multi_branch_data_tag ||
|
||||
((MethodData::profile_arguments() || MethodData::profile_return()) &&
|
||||
(tag == DataLayout::call_type_data_tag ||
|
||||
tag == DataLayout::counter_data_tag ||
|
||||
tag == DataLayout::virtual_call_type_data_tag ||
|
||||
tag == DataLayout::virtual_call_data_tag)) ||
|
||||
cell_count == bytecode_cell_count(c), "cell counts must agree");
|
||||
if (cell_count >= 0) {
|
||||
assert(tag != DataLayout::no_tag, "bad tag");
|
||||
@ -631,6 +911,10 @@ ProfileData* DataLayout::data_in() {
|
||||
return new MultiBranchData(this);
|
||||
case DataLayout::arg_info_data_tag:
|
||||
return new ArgInfoData(this);
|
||||
case DataLayout::call_type_data_tag:
|
||||
return new CallTypeData(this);
|
||||
case DataLayout::virtual_call_type_data_tag:
|
||||
return new VirtualCallTypeData(this);
|
||||
};
|
||||
}
|
||||
|
||||
@ -898,3 +1182,70 @@ void MethodData::verify_data_on(outputStream* st) {
|
||||
NEEDS_CLEANUP;
|
||||
// not yet implemented.
|
||||
}
|
||||
|
||||
bool MethodData::profile_jsr292(methodHandle m, int bci) {
|
||||
if (m->is_compiled_lambda_form()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Bytecode_invoke inv(m , bci);
|
||||
return inv.is_invokedynamic() || inv.is_invokehandle();
|
||||
}
|
||||
|
||||
int MethodData::profile_arguments_flag() {
|
||||
return TypeProfileLevel % 10;
|
||||
}
|
||||
|
||||
bool MethodData::profile_arguments() {
|
||||
return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all;
|
||||
}
|
||||
|
||||
bool MethodData::profile_arguments_jsr292_only() {
|
||||
return profile_arguments_flag() == type_profile_jsr292;
|
||||
}
|
||||
|
||||
bool MethodData::profile_all_arguments() {
|
||||
return profile_arguments_flag() == type_profile_all;
|
||||
}
|
||||
|
||||
bool MethodData::profile_arguments_for_invoke(methodHandle m, int bci) {
|
||||
if (!profile_arguments()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (profile_all_arguments()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
assert(profile_arguments_jsr292_only(), "inconsistent");
|
||||
return profile_jsr292(m, bci);
|
||||
}
|
||||
|
||||
int MethodData::profile_return_flag() {
|
||||
return TypeProfileLevel / 10;
|
||||
}
|
||||
|
||||
bool MethodData::profile_return() {
|
||||
return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
|
||||
}
|
||||
|
||||
bool MethodData::profile_return_jsr292_only() {
|
||||
return profile_return_flag() == type_profile_jsr292;
|
||||
}
|
||||
|
||||
bool MethodData::profile_all_return() {
|
||||
return profile_return_flag() == type_profile_all;
|
||||
}
|
||||
|
||||
bool MethodData::profile_return_for_invoke(methodHandle m, int bci) {
|
||||
if (!profile_return()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (profile_all_return()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
assert(profile_return_jsr292_only(), "inconsistent");
|
||||
return profile_jsr292(m, bci);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -197,6 +197,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
// negative filter: should callee NOT be inlined?
|
||||
bool InlineTree::should_not_inline(ciMethod *callee_method,
|
||||
ciMethod* caller_method,
|
||||
JVMState* jvms,
|
||||
WarmCallInfo* wci_result) {
|
||||
|
||||
const char* fail_msg = NULL;
|
||||
@ -226,7 +227,7 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
|
||||
// don't inline exception code unless the top method belongs to an
|
||||
// exception class
|
||||
if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
|
||||
ciMethod* top_method = caller_jvms() ? caller_jvms()->of_depth(1)->method() : method();
|
||||
ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method();
|
||||
if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
|
||||
wci_result->set_profit(wci_result->profit() * 0.1);
|
||||
}
|
||||
@ -328,7 +329,7 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
|
||||
// return true if ok
|
||||
// Relocated from "InliningClosure::try_to_inline"
|
||||
bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
int caller_bci, ciCallProfile& profile,
|
||||
int caller_bci, JVMState* jvms, ciCallProfile& profile,
|
||||
WarmCallInfo* wci_result, bool& should_delay) {
|
||||
|
||||
// Old algorithm had funny accumulating BC-size counters
|
||||
@ -346,7 +347,7 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
wci_result)) {
|
||||
return false;
|
||||
}
|
||||
if (should_not_inline(callee_method, caller_method, wci_result)) {
|
||||
if (should_not_inline(callee_method, caller_method, jvms, wci_result)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -397,24 +398,35 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
}
|
||||
|
||||
// detect direct and indirect recursive inlining
|
||||
if (!callee_method->is_compiled_lambda_form()) {
|
||||
{
|
||||
// count the current method and the callee
|
||||
int inline_level = (method() == callee_method) ? 1 : 0;
|
||||
if (inline_level > MaxRecursiveInlineLevel) {
|
||||
set_msg("recursively inlining too deep");
|
||||
return false;
|
||||
const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form();
|
||||
int inline_level = 0;
|
||||
if (!is_compiled_lambda_form) {
|
||||
if (method() == callee_method) {
|
||||
inline_level++;
|
||||
}
|
||||
}
|
||||
// count callers of current method and callee
|
||||
JVMState* jvms = caller_jvms();
|
||||
while (jvms != NULL && jvms->has_method()) {
|
||||
if (jvms->method() == callee_method) {
|
||||
inline_level++;
|
||||
if (inline_level > MaxRecursiveInlineLevel) {
|
||||
set_msg("recursively inlining too deep");
|
||||
return false;
|
||||
Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL;
|
||||
for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) {
|
||||
if (j->method() == callee_method) {
|
||||
if (is_compiled_lambda_form) {
|
||||
// Since compiled lambda forms are heavily reused we allow recursive inlining. If it is truly
|
||||
// a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the
|
||||
// compiler stack.
|
||||
Node* caller_argument0 = j->map()->argument(j, 0)->uncast();
|
||||
if (caller_argument0 == callee_argument0) {
|
||||
inline_level++;
|
||||
}
|
||||
} else {
|
||||
inline_level++;
|
||||
}
|
||||
}
|
||||
jvms = jvms->caller();
|
||||
}
|
||||
if (inline_level > MaxRecursiveInlineLevel) {
|
||||
set_msg("recursive inlining is too deep");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -536,7 +548,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
|
||||
// Check if inlining policy says no.
|
||||
WarmCallInfo wci = *(initial_wci);
|
||||
bool success = try_to_inline(callee_method, caller_method, caller_bci,
|
||||
profile, &wci, should_delay);
|
||||
jvms, profile, &wci, should_delay);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (UseOldInlining && InlineWarmCalls
|
||||
|
@ -44,9 +44,6 @@
|
||||
# include "adfiles/ad_ppc.hpp"
|
||||
#endif
|
||||
|
||||
|
||||
volatile int C2Compiler::_runtimes = uninitialized;
|
||||
|
||||
// register information defined by ADLC
|
||||
extern const char register_save_policy[];
|
||||
extern const int register_save_type[];
|
||||
@ -57,7 +54,7 @@ const char* C2Compiler::retry_no_subsuming_loads() {
|
||||
const char* C2Compiler::retry_no_escape_analysis() {
|
||||
return "retry without escape analysis";
|
||||
}
|
||||
void C2Compiler::initialize_runtime() {
|
||||
bool C2Compiler::init_c2_runtime() {
|
||||
|
||||
// Check assumptions used while running ADLC
|
||||
Compile::adlc_verification();
|
||||
@ -90,41 +87,31 @@ void C2Compiler::initialize_runtime() {
|
||||
|
||||
CompilerThread* thread = CompilerThread::current();
|
||||
|
||||
HandleMark handle_mark(thread);
|
||||
|
||||
OptoRuntime::generate(thread->env());
|
||||
|
||||
HandleMark handle_mark(thread);
|
||||
return OptoRuntime::generate(thread->env());
|
||||
}
|
||||
|
||||
|
||||
void C2Compiler::initialize() {
|
||||
|
||||
// This method can only be called once per C2Compiler object
|
||||
// The first compiler thread that gets here will initialize the
|
||||
// small amount of global state (and runtime stubs) that c2 needs.
|
||||
// small amount of global state (and runtime stubs) that C2 needs.
|
||||
|
||||
// There is a race possible once at startup and then we're fine
|
||||
|
||||
// Note that this is being called from a compiler thread not the
|
||||
// main startup thread.
|
||||
|
||||
if (_runtimes != initialized) {
|
||||
initialize_runtimes( initialize_runtime, &_runtimes);
|
||||
if (should_perform_init()) {
|
||||
bool successful = C2Compiler::init_c2_runtime();
|
||||
int new_state = (successful) ? initialized : failed;
|
||||
set_state(new_state);
|
||||
}
|
||||
|
||||
// Mark this compiler object as ready to roll
|
||||
mark_initialized();
|
||||
}
|
||||
|
||||
void C2Compiler::compile_method(ciEnv* env,
|
||||
ciMethod* target,
|
||||
int entry_bci) {
|
||||
if (!is_initialized()) {
|
||||
initialize();
|
||||
}
|
||||
void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
|
||||
assert(is_initialized(), "Compiler thread must be initialized");
|
||||
|
||||
bool subsume_loads = SubsumeLoads;
|
||||
bool do_escape_analysis = DoEscapeAnalysis &&
|
||||
!env->jvmti_can_access_local_variables();
|
||||
bool do_escape_analysis = DoEscapeAnalysis && !env->jvmti_can_access_local_variables();
|
||||
bool eliminate_boxing = EliminateAutoBox;
|
||||
while (!env->failing()) {
|
||||
// Attempt to compile while subsuming loads into machine instructions.
|
||||
|
@ -28,24 +28,17 @@
|
||||
#include "compiler/abstractCompiler.hpp"
|
||||
|
||||
class C2Compiler : public AbstractCompiler {
|
||||
private:
|
||||
|
||||
static void initialize_runtime();
|
||||
private:
|
||||
static bool init_c2_runtime();
|
||||
|
||||
public:
|
||||
// Name
|
||||
const char *name() { return "C2"; }
|
||||
|
||||
static volatile int _runtimes;
|
||||
|
||||
#ifdef TIERED
|
||||
virtual bool is_c2() { return true; };
|
||||
#endif // TIERED
|
||||
|
||||
// Customization
|
||||
bool needs_adapters () { return true; }
|
||||
bool needs_stubs () { return true; }
|
||||
|
||||
void initialize();
|
||||
|
||||
// Compilation entry point for methods
|
||||
|
@ -52,6 +52,7 @@ class PhaseChaitin;
|
||||
class LRG : public ResourceObj {
|
||||
friend class VMStructs;
|
||||
public:
|
||||
static const uint AllStack_size = 0xFFFFF; // This mask size is used to tell that the mask of this LRG supports stack positions
|
||||
enum { SPILL_REG=29999 }; // Register number of a spilled LRG
|
||||
|
||||
double _cost; // 2 for loads/1 for stores times block freq
|
||||
@ -80,14 +81,21 @@ public:
|
||||
private:
|
||||
uint _eff_degree; // Effective degree: Sum of neighbors _num_regs
|
||||
public:
|
||||
int degree() const { assert( _degree_valid, "" ); return _eff_degree; }
|
||||
int degree() const { assert( _degree_valid , "" ); return _eff_degree; }
|
||||
// Degree starts not valid and any change to the IFG neighbor
|
||||
// set makes it not valid.
|
||||
void set_degree( uint degree ) { _eff_degree = degree; debug_only(_degree_valid = 1;) }
|
||||
void set_degree( uint degree ) {
|
||||
_eff_degree = degree;
|
||||
debug_only(_degree_valid = 1;)
|
||||
assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
|
||||
}
|
||||
// Made a change that hammered degree
|
||||
void invalid_degree() { debug_only(_degree_valid=0;) }
|
||||
// Incrementally modify degree. If it was correct, it should remain correct
|
||||
void inc_degree( uint mod ) { _eff_degree += mod; }
|
||||
void inc_degree( uint mod ) {
|
||||
_eff_degree += mod;
|
||||
assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
|
||||
}
|
||||
// Compute the degree between 2 live ranges
|
||||
int compute_degree( LRG &l ) const;
|
||||
|
||||
@ -95,9 +103,9 @@ private:
|
||||
RegMask _mask; // Allowed registers for this LRG
|
||||
uint _mask_size; // cache of _mask.Size();
|
||||
public:
|
||||
int compute_mask_size() const { return _mask.is_AllStack() ? 65535 : _mask.Size(); }
|
||||
int compute_mask_size() const { return _mask.is_AllStack() ? AllStack_size : _mask.Size(); }
|
||||
void set_mask_size( int size ) {
|
||||
assert((size == 65535) || (size == (int)_mask.Size()), "");
|
||||
assert((size == (int)AllStack_size) || (size == (int)_mask.Size()), "");
|
||||
_mask_size = size;
|
||||
#ifdef ASSERT
|
||||
_msize_valid=1;
|
||||
|
@ -47,6 +47,7 @@
|
||||
#include "opto/machnode.hpp"
|
||||
#include "opto/macro.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/mathexactnode.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/node.hpp"
|
||||
@ -2986,6 +2987,32 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
|
||||
n->set_req(MemBarNode::Precedent, top());
|
||||
}
|
||||
break;
|
||||
// Must set a control edge on all nodes that produce a FlagsProj
|
||||
// so they can't escape the block that consumes the flags.
|
||||
// Must also set the non throwing branch as the control
|
||||
// for all nodes that depends on the result. Unless the node
|
||||
// already have a control that isn't the control of the
|
||||
// flag producer
|
||||
case Op_FlagsProj:
|
||||
{
|
||||
MathExactNode* math = (MathExactNode*) n->in(0);
|
||||
Node* ctrl = math->control_node();
|
||||
Node* non_throwing = math->non_throwing_branch();
|
||||
math->set_req(0, ctrl);
|
||||
|
||||
Node* result = math->result_node();
|
||||
if (result != NULL) {
|
||||
for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* out = result->fast_out(j);
|
||||
if (out->in(0) == NULL) {
|
||||
out->set_req(0, non_throwing);
|
||||
} else if (out->in(0) == ctrl) {
|
||||
out->set_req(0, non_throwing);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert( !n->is_Call(), "" );
|
||||
assert( !n->is_Mem(), "" );
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user