diff --git a/.hgignore b/.hgignore
index ca1b0b21ee2..0092bd4ff5a 100644
--- a/.hgignore
+++ b/.hgignore
@@ -1,3 +1,3 @@
^build/
^dist/
-^nbproject/private/
+/nbproject/private/
diff --git a/.hgtags b/.hgtags
index 68872664ece..fdf92c01ecc 100644
--- a/.hgtags
+++ b/.hgtags
@@ -51,3 +51,7 @@ ce74bd35ce948d629a356e168797f44b593b1578 jdk7-b73
4e7661eaa211e186674f6cbefec4aef1144ac2a0 jdk7-b74
946518568340c4e511549318f19f47f06b7f5f9b jdk7-b75
09e0b33177af2b98a03c9ca19eedf61440bd1cf6 jdk7-b76
+1d0121b741f029dc4b828e4b36ba6fda92907dd7 jdk7-b77
+4061c66ba1af1a2e27c2c839ba887407dd3ce050 jdk7-b78
+e9c98378f6b9256c0595ef2985ca5899f0c0e274 jdk7-b79
+e6abd38682d237306d6c147c17538ec9e7f8e3a7 jdk7-b80
diff --git a/.hgtags-top-repo b/.hgtags-top-repo
index a82460bf48f..7f37898d353 100644
--- a/.hgtags-top-repo
+++ b/.hgtags-top-repo
@@ -51,3 +51,7 @@ e1b972ff53cd58f825791f8ed9b2deffd16e768c jdk7-b68
2c88089b6e1c053597418099a14232182c387edc jdk7-b74
d1516b9f23954b29b8e76e6f4efc467c08c78133 jdk7-b75
c8b63075403d53a208104a8a6ea5072c1cb66aab jdk7-b76
+1f17ca8353babb13f4908c1f87d11508232518c8 jdk7-b77
+ab4ae8f4514693a9fe17ca2fec0239d8f8450d2c jdk7-b78
+20aeeb51713990dbea6929a2e100a8bbf5df70d4 jdk7-b79
+a3242906c7747b5d9bcc3d118c7c3c69aa40f4b7 jdk7-b80
diff --git a/Makefile b/Makefile
index a0b2dc384f9..1131c8b251a 100644
--- a/Makefile
+++ b/Makefile
@@ -51,7 +51,7 @@ endif
# For start and finish echo lines
TITLE_TEXT = Control $(PLATFORM) $(ARCH) $(RELEASE)
-DAYE_STAMP = `$(DATE) '+%y-%m-%d %H:%M'`
+DATE_STAMP = `$(DATE) '+%y-%m-%d %H:%M'`
START_ECHO = echo "$(TITLE_TEXT) $@ build started: $(DATE_STAMP)"
FINISH_ECHO = echo "$(TITLE_TEXT) $@ build finished: $(DATE_STAMP)"
@@ -188,7 +188,7 @@ FRESH_DEBUG_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)-$(DEBUG_NAME)/j2sdk-image
create_fresh_product_bootdir: FRC
@$(START_ECHO)
$(MAKE) ALT_OUTPUTDIR=$(ABS_BOOTDIR_OUTPUTDIR) \
- NO_DOCS=true \
+ GENERATE_DOCS=false \
BOOT_CYCLE_SETTINGS= \
build_product_image
@$(FINISH_ECHO)
@@ -196,7 +196,7 @@ create_fresh_product_bootdir: FRC
create_fresh_debug_bootdir: FRC
@$(START_ECHO)
$(MAKE) ALT_OUTPUTDIR=$(ABS_BOOTDIR_OUTPUTDIR) \
- NO_DOCS=true \
+ GENERATE_DOCS=false \
BOOT_CYCLE_DEBUG_SETTINGS= \
build_debug_image
@$(FINISH_ECHO)
@@ -204,7 +204,7 @@ create_fresh_debug_bootdir: FRC
create_fresh_fastdebug_bootdir: FRC
@$(START_ECHO)
$(MAKE) ALT_OUTPUTDIR=$(ABS_BOOTDIR_OUTPUTDIR) \
- NO_DOCS=true \
+ GENERATE_DOCS=false \
BOOT_CYCLE_DEBUG_SETTINGS= \
build_fastdebug_image
@$(FINISH_ECHO)
@@ -253,7 +253,7 @@ generic_debug_build:
$(MAKE) \
ALT_OUTPUTDIR=$(ABS_OUTPUTDIR)-$(DEBUG_NAME) \
DEBUG_NAME=$(DEBUG_NAME) \
- NO_DOCS=true \
+ GENERATE_DOCS=false \
$(BOOT_CYCLE_DEBUG_SETTINGS) \
generic_build_repo_series
@$(FINISH_ECHO)
@@ -323,7 +323,7 @@ openjdk_build:
$(MKDIR) -p $(OPENJDK_OUTPUTDIR)
($(CD) $(OPENJDK_BUILDDIR) && $(MAKE) \
OPENJDK=true \
- NO_DOCS=true \
+ GENERATE_DOCS=false \
ALT_JDK_DEVTOOLS_DIR=$(JDK_DEVTOOLS_DIR) \
ALT_OUTPUTDIR=$(OPENJDK_OUTPUTDIR) \
ALT_BINARY_PLUGS_PATH=$(OPENJDK_PLUGS) \
diff --git a/corba/.hgignore b/corba/.hgignore
index ca1b0b21ee2..0092bd4ff5a 100644
--- a/corba/.hgignore
+++ b/corba/.hgignore
@@ -1,3 +1,3 @@
^build/
^dist/
-^nbproject/private/
+/nbproject/private/
diff --git a/corba/.hgtags b/corba/.hgtags
index 9e8067101da..2472579cb11 100644
--- a/corba/.hgtags
+++ b/corba/.hgtags
@@ -51,3 +51,7 @@ b751c528c55560cf2adeaeef24b39ca1f4d1cbf7 jdk7-b73
5d0cf59a3203b9f57aceebc33ae656b884987955 jdk7-b74
0fb137085952c8e47878e240d1cb40f14de463c4 jdk7-b75
937144222e2219939101b0129d26a872a7956b13 jdk7-b76
+6881f0383f623394b5ec73f27a5f329ff55d0467 jdk7-b77
+a7f7276b48cd74d8eb1baa83fbf3d1ef4a2603c8 jdk7-b78
+ec0421b5703b677e2226cf4bf7ae4eaafd8061c5 jdk7-b79
+0336e70ca0aeabc783cc01658f36cb6e27ea7934 jdk7-b80
diff --git a/corba/src/share/classes/com/sun/tools/corba/se/idl/constExpr/Expression.java b/corba/src/share/classes/com/sun/tools/corba/se/idl/constExpr/Expression.java
index 1dc4583e568..99d41510b02 100644
--- a/corba/src/share/classes/com/sun/tools/corba/se/idl/constExpr/Expression.java
+++ b/corba/src/share/classes/com/sun/tools/corba/se/idl/constExpr/Expression.java
@@ -123,7 +123,7 @@ public abstract class Expression
/**
* Coerces a number to the target type of this expression.
- * @parm number The number to coerce.
+ * @param obj The number to coerce.
* @return the value of number coerced to the (target) type of
* this expression.
**/
@@ -142,7 +142,7 @@ public abstract class Expression
/**
* Coerces an integral value (BigInteger) to its corresponding unsigned
* representation, if the target type of this expression is unsigned.
- * @parm b The BigInteger to be coerced.
+ * @param b The BigInteger to be coerced.
* @return the value of an integral type coerced to its corresponding
* unsigned integral type, if the target type of this expression is
* unsigned.
@@ -170,7 +170,7 @@ public abstract class Expression
/**
* Coerces an integral value (BigInteger) to its corresponding signed
* representation, if the target type of this expression is signed.
- * @parm b The BigInteger to be coerced.
+ * @param b The BigInteger to be coerced.
* @return the value of an integral type coerced to its corresponding
* signed integral type, if the target type of this expression is
* signed.
diff --git a/corba/src/share/classes/javax/rmi/PortableRemoteObject.java b/corba/src/share/classes/javax/rmi/PortableRemoteObject.java
index 432eb4a6fc8..10c3b95200e 100644
--- a/corba/src/share/classes/javax/rmi/PortableRemoteObject.java
+++ b/corba/src/share/classes/javax/rmi/PortableRemoteObject.java
@@ -161,7 +161,7 @@ public class PortableRemoteObject {
* happens implicitly when the object is sent or received as an argument
* on a remote method call, but in some circumstances it is useful to
* perform this action by making an explicit call. See the
- * {@link Stub#connect} method for more information.
+ * {@link javax.rmi.CORBA.Stub#connect} method for more information.
* @param target the object to connect.
* @param source a previously connected object.
* @throws RemoteException if source
is not connected
diff --git a/corba/src/share/classes/org/omg/CORBA/SetOverrideType.java b/corba/src/share/classes/org/omg/CORBA/SetOverrideType.java
index acf946871ee..a1e1acc595f 100644
--- a/corba/src/share/classes/org/omg/CORBA/SetOverrideType.java
+++ b/corba/src/share/classes/org/omg/CORBA/SetOverrideType.java
@@ -31,7 +31,7 @@ package org.omg.CORBA;
* indicate whether policies should replace the
* existing policies of an Object
or be added to them.
*
- * The method {@link omg.org.CORBA.Object._set_policy_override} takes
+ * The method {@link org.omg.CORBA.Object#_set_policy_override} takes
* either SetOverrideType.SET_OVERRIDE
or
* SetOverrideType.ADD_OVERRIDE
as its second argument.
* The method _set_policy_override
diff --git a/corba/src/share/classes/org/omg/CORBA/TCKind.java b/corba/src/share/classes/org/omg/CORBA/TCKind.java
index bb3a748f26a..8aa33032da7 100644
--- a/corba/src/share/classes/org/omg/CORBA/TCKind.java
+++ b/corba/src/share/classes/org/omg/CORBA/TCKind.java
@@ -545,8 +545,6 @@ public class TCKind {
* @param _value the int
to convert. It must be one of
* the int
constants in the class
* TCKind
.
- * @return a new TCKind
instance whose value
- * field matches the given int
*/
@Deprecated
protected TCKind(int _value){
diff --git a/corba/src/share/classes/org/omg/CORBA/UnknownUserException.java b/corba/src/share/classes/org/omg/CORBA/UnknownUserException.java
index 8509b2d5146..c66bbc99f1d 100644
--- a/corba/src/share/classes/org/omg/CORBA/UnknownUserException.java
+++ b/corba/src/share/classes/org/omg/CORBA/UnknownUserException.java
@@ -56,7 +56,7 @@ public final class UnknownUserException extends UserException {
* Constructs an UnknownUserException
object that contains the given
* Any
object.
*
- * @ param a an Any
object that contains a user exception returned
+ * @param a an Any
object that contains a user exception returned
* by the server
*/
public UnknownUserException(Any a) {
diff --git a/corba/src/share/classes/org/omg/CORBA/portable/ServantObject.java b/corba/src/share/classes/org/omg/CORBA/portable/ServantObject.java
index c2c04e11cab..dd1ed8d0e26 100644
--- a/corba/src/share/classes/org/omg/CORBA/portable/ServantObject.java
+++ b/corba/src/share/classes/org/omg/CORBA/portable/ServantObject.java
@@ -43,7 +43,6 @@ public class ServantObject
/** The real servant. The local stub may cast this field to the expected type, and then
* invoke the operation directly. Note, the object may or may not be the actual servant
* instance.
- * @return The real servant
*/
public java.lang.Object servant;
}
diff --git a/corba/src/share/classes/org/omg/CosNaming/nameservice.idl b/corba/src/share/classes/org/omg/CosNaming/nameservice.idl
index 226454d0fbd..7543484b0ed 100644
--- a/corba/src/share/classes/org/omg/CosNaming/nameservice.idl
+++ b/corba/src/share/classes/org/omg/CosNaming/nameservice.idl
@@ -256,7 +256,7 @@ module CosNaming
*
* @param n Name of the object
*
- * @parm obj The Object to rebind with the given name
+ * @param obj The Object to rebind with the given name
*
* @exception org.omg.CosNaming.NamingContextPackage.NotFound Indicates the name does not identify a binding.
*
diff --git a/corba/src/share/classes/org/omg/PortableInterceptor/Interceptors.idl b/corba/src/share/classes/org/omg/PortableInterceptor/Interceptors.idl
index ba94343d73d..8edefdf16ae 100644
--- a/corba/src/share/classes/org/omg/PortableInterceptor/Interceptors.idl
+++ b/corba/src/share/classes/org/omg/PortableInterceptor/Interceptors.idl
@@ -1730,7 +1730,7 @@ module PortableInterceptor {
*
* Any number of components may exist with the same component ID.
*
- * @param a_component The IOP.TaggedComponent to add.
+ * @param tagged_component The IOP.TaggedComponent to add.
*/
void add_ior_component
(in IOP::TaggedComponent tagged_component);
@@ -1744,7 +1744,7 @@ module PortableInterceptor {
*
* Any number of components may exist with the same component ID.
*
- * @param a_component The IOP.TaggedComponent
to add.
+ * @param tagged_component The IOP.TaggedComponent
to add.
* @param profile_id The profile id of the profile to
* which this component will be added.
* @exception BAD_PARAM thrown, with a standard minor code of 29, if the
diff --git a/hotspot/.hgignore b/hotspot/.hgignore
index fec499bf63f..9818ff1af6e 100644
--- a/hotspot/.hgignore
+++ b/hotspot/.hgignore
@@ -1,6 +1,6 @@
^build/
^dist/
-^nbproject/private/
+/nbproject/private/
^src/share/tools/hsdis/build/
^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/
^src/share/tools/IdealGraphVisualizer/build/
diff --git a/hotspot/.hgtags b/hotspot/.hgtags
index d91cec8c3fe..84c4c8bc4c3 100644
--- a/hotspot/.hgtags
+++ b/hotspot/.hgtags
@@ -51,3 +51,7 @@ faf94d94786b621f8e13cbcc941ca69c6d967c3f jdk7-b73
f4b900403d6e4b0af51447bd13bbe23fe3a1dac7 jdk7-b74
d8dd291a362acb656026a9c0a9da48501505a1e7 jdk7-b75
9174bb32e934965288121f75394874eeb1fcb649 jdk7-b76
+455105fc81d941482f8f8056afaa7aa0949c9300 jdk7-b77
+e703499b4b51e3af756ae77c3d5e8b3058a14e4e jdk7-b78
+a5a6adfca6ecefb5894a848debabfe442ff50e25 jdk7-b79
+3003ddd1d4330b06cb4691ae74d600d3685899eb jdk7-b80
diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java
index 629d23c54bd..0a157356207 100644
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java
@@ -63,12 +63,12 @@ public class SystemDictionary {
javaSystemLoaderField = type.getOopField("_java_system_loader");
nofBuckets = db.lookupIntConstant("SystemDictionary::_nof_buckets").intValue();
- objectKlassField = type.getOopField(WK_KLASS("object_klass"));
- classLoaderKlassField = type.getOopField(WK_KLASS("classloader_klass"));
- stringKlassField = type.getOopField(WK_KLASS("string_klass"));
- systemKlassField = type.getOopField(WK_KLASS("system_klass"));
- threadKlassField = type.getOopField(WK_KLASS("thread_klass"));
- threadGroupKlassField = type.getOopField(WK_KLASS("threadGroup_klass"));
+ objectKlassField = type.getOopField(WK_KLASS("Object_klass"));
+ classLoaderKlassField = type.getOopField(WK_KLASS("ClassLoader_klass"));
+ stringKlassField = type.getOopField(WK_KLASS("String_klass"));
+ systemKlassField = type.getOopField(WK_KLASS("System_klass"));
+ threadKlassField = type.getOopField(WK_KLASS("Thread_klass"));
+ threadGroupKlassField = type.getOopField(WK_KLASS("ThreadGroup_klass"));
}
// This WK functions must follow the definitions in systemDictionary.hpp:
diff --git a/hotspot/make/hotspot_version b/hotspot/make/hotspot_version
index 5da770b9f1e..4dfc3baf752 100644
--- a/hotspot/make/hotspot_version
+++ b/hotspot/make/hotspot_version
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2009
HS_MAJOR_VER=17
HS_MINOR_VER=0
-HS_BUILD_NUMBER=05
+HS_BUILD_NUMBER=08
JDK_MAJOR_VER=1
JDK_MINOR_VER=7
diff --git a/hotspot/make/linux/makefiles/debug.make b/hotspot/make/linux/makefiles/debug.make
index 4743745228d..1837d97311f 100644
--- a/hotspot/make/linux/makefiles/debug.make
+++ b/hotspot/make/linux/makefiles/debug.make
@@ -38,7 +38,7 @@ _JUNK_ := $(shell echo -e >&2 ""\
"Please use 'make jvmg' to build debug JVM. \n" \
"----------------------------------------------------------------------\n")
-G_SUFFIX =
+G_SUFFIX = _g
VERSION = debug
SYSDEFS += -DASSERT -DDEBUG
PICFLAGS = DEFAULT
diff --git a/hotspot/make/linux/makefiles/fastdebug.make b/hotspot/make/linux/makefiles/fastdebug.make
index 740b7584364..c875123ea88 100644
--- a/hotspot/make/linux/makefiles/fastdebug.make
+++ b/hotspot/make/linux/makefiles/fastdebug.make
@@ -58,7 +58,7 @@ CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
# Linker mapfile
MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
-G_SUFFIX =
+G_SUFFIX = _g
VERSION = optimized
SYSDEFS += -DASSERT -DFASTDEBUG
PICFLAGS = DEFAULT
diff --git a/hotspot/make/linux/makefiles/jsig.make b/hotspot/make/linux/makefiles/jsig.make
index 6dcff6aa72a..611793967d8 100644
--- a/hotspot/make/linux/makefiles/jsig.make
+++ b/hotspot/make/linux/makefiles/jsig.make
@@ -25,9 +25,12 @@
# Rules to build signal interposition library, used by vm.make
# libjsig[_g].so: signal interposition library
-JSIG = jsig$(G_SUFFIX)
+JSIG = jsig
LIBJSIG = lib$(JSIG).so
+JSIG_G = $(JSIG)$(G_SUFFIX)
+LIBJSIG_G = lib$(JSIG_G).so
+
JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
DEST_JSIG = $(JDK_LIBDIR)/$(LIBJSIG)
@@ -50,6 +53,7 @@ $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
@echo Making signal interposition lib...
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
$(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
+ $(QUIETLY) [ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
install_jsig: $(LIBJSIG)
@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
diff --git a/hotspot/make/linux/makefiles/jvmg.make b/hotspot/make/linux/makefiles/jvmg.make
index 4b09db64d21..edd0d9c5476 100644
--- a/hotspot/make/linux/makefiles/jvmg.make
+++ b/hotspot/make/linux/makefiles/jvmg.make
@@ -35,7 +35,7 @@ CFLAGS += $(DEBUG_CFLAGS/BYFILE)
# Linker mapfile
MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
-G_SUFFIX =
+G_SUFFIX = _g
VERSION = debug
SYSDEFS += -DASSERT -DDEBUG
PICFLAGS = DEFAULT
diff --git a/hotspot/make/linux/makefiles/launcher.make b/hotspot/make/linux/makefiles/launcher.make
index e367409f00e..e7f813c2df6 100644
--- a/hotspot/make/linux/makefiles/launcher.make
+++ b/hotspot/make/linux/makefiles/launcher.make
@@ -25,7 +25,9 @@
# Rules to build gamma launcher, used by vm.make
# gamma[_g]: launcher
-LAUNCHER = gamma$(G_SUFFIX)
+
+LAUNCHER = gamma
+LAUNCHER_G = $(LAUNCHER)$(G_SUFFIX)
LAUNCHERDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher
LAUNCHERFLAGS = $(ARCHFLAG) \
@@ -70,4 +72,5 @@ $(LAUNCHER): $(LAUNCHER.o) $(LIBJVM) $(LAUNCHER_MAPFILE)
$(LINK_LAUNCHER/PRE_HOOK) \
$(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \
$(LINK_LAUNCHER/POST_HOOK) \
+ [ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \
}
diff --git a/hotspot/make/linux/makefiles/saproc.make b/hotspot/make/linux/makefiles/saproc.make
index 696157969ea..4cbd9cb9b2b 100644
--- a/hotspot/make/linux/makefiles/saproc.make
+++ b/hotspot/make/linux/makefiles/saproc.make
@@ -25,9 +25,13 @@
# Rules to build serviceability agent library, used by vm.make
# libsaproc[_g].so: serviceability agent
-SAPROC = saproc$(G_SUFFIX)
+
+SAPROC = saproc
LIBSAPROC = lib$(SAPROC).so
+SAPROC_G = $(SAPROC)$(G_SUFFIX)
+LIBSAPROC_G = lib$(SAPROC_G).so
+
AGENT_DIR = $(GAMMADIR)/agent
SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)
@@ -75,6 +79,7 @@ $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
$(SA_DEBUG_CFLAGS) \
-o $@ \
-lthread_db
+ $(QUIETLY) [ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
install_saproc: checkAndBuildSA
$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then \
diff --git a/hotspot/make/linux/makefiles/vm.make b/hotspot/make/linux/makefiles/vm.make
index 641d0d79514..fb847e31922 100644
--- a/hotspot/make/linux/makefiles/vm.make
+++ b/hotspot/make/linux/makefiles/vm.make
@@ -113,8 +113,9 @@ include $(MAKEFILES_DIR)/dtrace.make
#----------------------------------------------------------------------
# JVM
-JVM = jvm$(G_SUFFIX)
-LIBJVM = lib$(JVM).so
+JVM = jvm
+LIBJVM = lib$(JVM).so
+LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
JVM_OBJ_FILES = $(Obj_Files)
@@ -201,6 +202,7 @@ $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
$(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
$(LINK_LIB.CC/POST_HOOK) \
rm -f $@.1; ln -s $@ $@.1; \
+ [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
if [ -x /usr/sbin/selinuxenabled ] ; then \
/usr/sbin/selinuxenabled; \
if [ $$? = 0 ] ; then \
diff --git a/hotspot/make/solaris/makefiles/debug.make b/hotspot/make/solaris/makefiles/debug.make
index 4fdf4a7463e..ba312ffeb99 100644
--- a/hotspot/make/solaris/makefiles/debug.make
+++ b/hotspot/make/solaris/makefiles/debug.make
@@ -54,7 +54,7 @@ _JUNK_ := $(shell echo >&2 ""\
"Please use 'gnumake jvmg' to build debug JVM. \n" \
"-------------------------------------------------------------------------\n")
-G_SUFFIX =
+G_SUFFIX = _g
VERSION = debug
SYSDEFS += -DASSERT -DDEBUG
PICFLAGS = DEFAULT
diff --git a/hotspot/make/solaris/makefiles/dtrace.make b/hotspot/make/solaris/makefiles/dtrace.make
index 0ba875dc58e..4482f26fffa 100644
--- a/hotspot/make/solaris/makefiles/dtrace.make
+++ b/hotspot/make/solaris/makefiles/dtrace.make
@@ -24,8 +24,8 @@
# Rules to build jvm_db/dtrace, used by vm.make
-# we build libjvm_dtrace/libjvm_db/dtrace for COMPILER1 and COMPILER2
-# but not for CORE configuration
+# We build libjvm_dtrace/libjvm_db/dtrace for COMPILER1 and COMPILER2
+# but not for CORE or KERNEL configurations.
ifneq ("${TYPE}", "CORE")
ifneq ("${TYPE}", "KERNEL")
@@ -37,12 +37,13 @@ dtraceCheck:
else
-
JVM_DB = libjvm_db
-LIBJVM_DB = libjvm$(G_SUFFIX)_db.so
+LIBJVM_DB = libjvm_db.so
+LIBJVM_DB_G = libjvm$(G_SUFFIX)_db.so
JVM_DTRACE = jvm_dtrace
-LIBJVM_DTRACE = libjvm$(G_SUFFIX)_dtrace.so
+LIBJVM_DTRACE = libjvm_dtrace.so
+LIBJVM_DTRACE_G = libjvm$(G_SUFFIX)_dtrace.so
JVMOFFS = JvmOffsets
JVMOFFS.o = $(JVMOFFS).o
@@ -77,7 +78,7 @@ LFLAGS_JVM_DB += -D_REENTRANT $(PICFLAG)
LFLAGS_JVM_DTRACE += -D_REENTRANT $(PICFLAG)
else
LFLAGS_JVM_DB += -mt $(PICFLAG) -xnolib
-LFLAGS_JVM_DTRACE += -mt $(PICFLAG) -xnolib
+LFLAGS_JVM_DTRACE += -mt $(PICFLAG) -xnolib -ldl
endif
ISA = $(subst i386,i486,$(shell isainfo -n))
@@ -86,18 +87,24 @@ ISA = $(subst i386,i486,$(shell isainfo -n))
ifneq ("${ISA}","${BUILDARCH}")
XLIBJVM_DB = 64/$(LIBJVM_DB)
+XLIBJVM_DB_G = 64/$(LIBJVM_DB_G)
XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
+XLIBJVM_DTRACE_G = 64/$(LIBJVM_DTRACE_G)
$(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
@echo Making $@
$(QUIETLY) mkdir -p 64/ ; \
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \
$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
+ [ -f $(XLIBJVM_DB_G) ] || { ln -s $(LIBJVM_DB) $(XLIBJVM_DB_G); }
+
$(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
@echo Making $@
$(QUIETLY) mkdir -p 64/ ; \
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
+ [ -f $(XLIBJVM_DTRACE_G) ] || { ln -s $(LIBJVM_DTRACE) $(XLIBJVM_DTRACE_G); }
+
endif # ifneq ("${ISA}","${BUILDARCH}")
ifdef USE_GCC
@@ -142,11 +149,13 @@ $(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_D
@echo Making $@
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
+ [ -f $(LIBJVM_DB_G) ] || { ln -s $@ $(LIBJVM_DB_G); }
$(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
@echo Making $@
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. \
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
+ [ -f $(LIBJVM_DTRACE_G) ] || { ln -s $@ $(LIBJVM_DTRACE_G); }
$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
$(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
diff --git a/hotspot/make/solaris/makefiles/fastdebug.make b/hotspot/make/solaris/makefiles/fastdebug.make
index 084814d6758..4edeb373f9e 100644
--- a/hotspot/make/solaris/makefiles/fastdebug.make
+++ b/hotspot/make/solaris/makefiles/fastdebug.make
@@ -90,7 +90,6 @@ endif # Platform_compiler == sparcWorks
# for this method for now. (fix this when dtrace bug 6258412 is fixed)
OPT_CFLAGS/ciEnv.o = $(OPT_CFLAGS) -xinline=no%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_
-
# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings
@@ -115,8 +114,7 @@ MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
# and mustn't be otherwise.
MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
-
-G_SUFFIX =
+G_SUFFIX = _g
VERSION = optimized
SYSDEFS += -DASSERT -DFASTDEBUG -DCHECK_UNHANDLED_OOPS
PICFLAGS = DEFAULT
diff --git a/hotspot/make/solaris/makefiles/jsig.make b/hotspot/make/solaris/makefiles/jsig.make
index e4a4aef771a..81497767202 100644
--- a/hotspot/make/solaris/makefiles/jsig.make
+++ b/hotspot/make/solaris/makefiles/jsig.make
@@ -25,8 +25,11 @@
# Rules to build signal interposition library, used by vm.make
# libjsig[_g].so: signal interposition library
-JSIG = jsig$(G_SUFFIX)
-LIBJSIG = lib$(JSIG).so
+JSIG = jsig
+LIBJSIG = lib$(JSIG).so
+
+JSIG_G = $(JSIG)$(G_SUFFIX)
+LIBJSIG_G = lib$(JSIG_G).so
JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
@@ -46,6 +49,7 @@ $(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
@echo Making signal interposition lib...
$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
$(LFLAGS_JSIG) -o $@ $< -ldl
+ [ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
install_jsig: $(LIBJSIG)
@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
diff --git a/hotspot/make/solaris/makefiles/jvmg.make b/hotspot/make/solaris/makefiles/jvmg.make
index e6603005fdd..160ea5df1d8 100644
--- a/hotspot/make/solaris/makefiles/jvmg.make
+++ b/hotspot/make/solaris/makefiles/jvmg.make
@@ -51,7 +51,7 @@ MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
# and mustn't be otherwise.
MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
-G_SUFFIX =
+G_SUFFIX = _g
VERSION = debug
SYSDEFS += -DASSERT -DDEBUG
PICFLAGS = DEFAULT
diff --git a/hotspot/make/solaris/makefiles/launcher.make b/hotspot/make/solaris/makefiles/launcher.make
index 2224d58017d..bf32444c5c3 100644
--- a/hotspot/make/solaris/makefiles/launcher.make
+++ b/hotspot/make/solaris/makefiles/launcher.make
@@ -25,7 +25,8 @@
# Rules to build gamma launcher, used by vm.make
# gamma[_g]: launcher
-LAUNCHER = gamma$(G_SUFFIX)
+LAUNCHER = gamma
+LAUNCHER_G = $(LAUNCHER)$(G_SUFFIX)
LAUNCHERDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/launcher
LAUNCHERFLAGS = $(ARCHFLAG) \
@@ -88,5 +89,6 @@ $(LAUNCHER): $(LAUNCHER.o) $(LIBJVM) $(LAUNCHER_MAPFILE)
$(LINK_LAUNCHER/PRE_HOOK) \
$(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \
$(LINK_LAUNCHER/POST_HOOK) \
+ [ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \
;; \
esac
diff --git a/hotspot/make/solaris/makefiles/saproc.make b/hotspot/make/solaris/makefiles/saproc.make
index 906cd9d998e..38751e8f1bf 100644
--- a/hotspot/make/solaris/makefiles/saproc.make
+++ b/hotspot/make/solaris/makefiles/saproc.make
@@ -25,9 +25,13 @@
# Rules to build serviceability agent library, used by vm.make
# libsaproc[_g].so: serviceability agent
-SAPROC = saproc$(G_SUFFIX)
+
+SAPROC = saproc
LIBSAPROC = lib$(SAPROC).so
+SAPROC_G = $(SAPROC)$(G_SUFFIX)
+LIBSAPROC_G = lib$(SAPROC_G).so
+
AGENT_DIR = $(GAMMADIR)/agent
SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)/proc
@@ -69,6 +73,7 @@ $(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
$(SA_LFLAGS) \
-o $@ \
-ldl -ldemangle -lthread -lc
+ [ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
install_saproc: checkAndBuildSA
$(QUIETLY) if [ -f $(LIBSAPROC) ] ; then \
diff --git a/hotspot/make/solaris/makefiles/sparcWorks.make b/hotspot/make/solaris/makefiles/sparcWorks.make
index d1648383857..acab5192183 100644
--- a/hotspot/make/solaris/makefiles/sparcWorks.make
+++ b/hotspot/make/solaris/makefiles/sparcWorks.make
@@ -281,8 +281,6 @@ else
OPT_CFLAGS=-xO4 $(EXTRA_OPT_CFLAGS)
endif
-CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_sparc/vm/solaris_sparc.il
-
endif # sparc
ifeq ("${Platform_arch_model}", "x86_32")
@@ -293,13 +291,14 @@ OPT_CFLAGS=-xtarget=pentium $(EXTRA_OPT_CFLAGS)
# [phh] Is this still true for 6.1?
OPT_CFLAGS+=-xO3
-CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_x86/vm/solaris_x86_32.il
-
endif # 32bit x86
# no more exceptions
CFLAGS/NOEX=-noex
+# Inline functions
+CFLAGS += $(GAMMADIR)/src/os_cpu/solaris_${Platform_arch}/vm/solaris_${Platform_arch_model}.il
+
# Reduce code bloat by reverting back to 5.0 behavior for static initializers
CFLAGS += -Qoption ccfe -one_static_init
@@ -312,6 +311,15 @@ PICFLAG/DEFAULT = $(PICFLAG)
PICFLAG/BETTER = $(PICFLAG/DEFAULT)
PICFLAG/BYFILE = $(PICFLAG/$@)$(PICFLAG/DEFAULT$(PICFLAG/$@))
+# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
+MAPFLAG = -M FILENAME
+
+# Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj
+SONAMEFLAG = -h SONAME
+
+# Build shared library
+SHARED_FLAG = -G
+
# Would be better if these weren't needed, since we link with CC, but
# at present removing them causes run-time errors
LFLAGS += -library=Crun
diff --git a/hotspot/make/solaris/makefiles/vm.make b/hotspot/make/solaris/makefiles/vm.make
index 32850b18bbd..058bb6bd7e4 100644
--- a/hotspot/make/solaris/makefiles/vm.make
+++ b/hotspot/make/solaris/makefiles/vm.make
@@ -108,11 +108,16 @@ ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 505), 1)
# older libm before libCrun, just to make sure it's found and used first.
LIBS += -lsocket -lsched -ldl $(LIBM) -lCrun -lthread -ldoor -lc
else
+ifeq ($(COMPILER_REV_NUMERIC), 502)
+# SC6.1 has it's own libm.so: specifying anything else provokes a name conflict.
+LIBS += -ldl -lthread -lsocket -lm -lsched -ldoor
+else
LIBS += -ldl -lthread -lsocket $(LIBM) -lsched -ldoor
-endif
+endif # 502
+endif # 505
else
LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc
-endif
+endif # sparcWorks
# By default, link the *.o into the library, not the executable.
LINK_INTO$(LINK_INTO) = LIBJVM
@@ -126,8 +131,9 @@ include $(MAKEFILES_DIR)/dtrace.make
#----------------------------------------------------------------------
# JVM
-JVM = jvm$(G_SUFFIX)
-LIBJVM = lib$(JVM).so
+JVM = jvm
+LIBJVM = lib$(JVM).so
+LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
JVM_OBJ_FILES = $(Obj_Files) $(DTRACE_OBJS)
@@ -173,11 +179,12 @@ $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE)
-sbfast|-xsbfast) \
;; \
*) \
- echo Linking vm...; \
- $(LINK_LIB.CC/PRE_HOOK) \
- $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
- $(LINK_LIB.CC/POST_HOOK) \
- rm -f $@.1; ln -s $@ $@.1; \
+ echo Linking vm...; \
+ $(LINK_LIB.CC/PRE_HOOK) \
+ $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
+ $(LINK_LIB.CC/POST_HOOK) \
+ rm -f $@.1; ln -s $@ $@.1; \
+ [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
;; \
esac
diff --git a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
index 2583f7cee23..2b054b43e65 100644
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
@@ -189,14 +189,17 @@ void LIR_Assembler::osr_entry() {
Register OSR_buf = osrBufferPointer()->as_register();
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
int monitor_offset = BytesPerWord * method()->max_locals() +
- (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
+ (2 * BytesPerWord) * (number_of_locks - 1);
+ // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
+ // the OSR buffer using 2 word entries: first the lock and then
+ // the oop.
for (int i = 0; i < number_of_locks; i++) {
- int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
+ int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
#ifdef ASSERT
// verify the interpreter's monitor has a non-null object
{
Label L;
- __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
+ __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
__ cmp(G0, O7);
__ br(Assembler::notEqual, false, Assembler::pt, L);
__ delayed()->nop();
@@ -205,9 +208,9 @@ void LIR_Assembler::osr_entry() {
}
#endif // ASSERT
// Copy the lock field into the compiled activation.
- __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes(), O7);
+ __ ld_ptr(OSR_buf, slot_offset + 0, O7);
__ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
- __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
+ __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
__ st_ptr(O7, frame_map()->address_for_monitor_object(i));
}
}
@@ -953,9 +956,11 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
} else {
#ifdef _LP64
assert(base != to_reg->as_register_lo(), "can't handle this");
+ assert(O7 != to_reg->as_register_lo(), "can't handle this");
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
+ __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
__ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
- __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
+ __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
#else
if (base == to_reg->as_register_lo()) {
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
@@ -976,8 +981,8 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
FloatRegister reg = to_reg->as_double_reg();
// split unaligned loads
if (unaligned || PatchALot) {
- __ ldf(FloatRegisterImpl::S, base, offset + BytesPerWord, reg->successor());
- __ ldf(FloatRegisterImpl::S, base, offset, reg);
+ __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
+ __ ldf(FloatRegisterImpl::S, base, offset, reg);
} else {
__ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
}
@@ -2200,6 +2205,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Register len = O2;
__ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
+ LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
if (shift == 0) {
__ add(src_ptr, src_pos, src_ptr);
} else {
@@ -2208,6 +2214,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
}
__ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
+ LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
if (shift == 0) {
__ add(dst_ptr, dst_pos, dst_ptr);
} else {
diff --git a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
index 2a69ade2156..87b47124352 100644
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
@@ -144,17 +144,17 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
if (index->is_register()) {
// apply the shift and accumulate the displacement
if (shift > 0) {
- LIR_Opr tmp = new_register(T_INT);
+ LIR_Opr tmp = new_pointer_register();
__ shift_left(index, shift, tmp);
index = tmp;
}
if (disp != 0) {
- LIR_Opr tmp = new_register(T_INT);
+ LIR_Opr tmp = new_pointer_register();
if (Assembler::is_simm13(disp)) {
- __ add(tmp, LIR_OprFact::intConst(disp), tmp);
+ __ add(tmp, LIR_OprFact::intptrConst(disp), tmp);
index = tmp;
} else {
- __ move(LIR_OprFact::intConst(disp), tmp);
+ __ move(LIR_OprFact::intptrConst(disp), tmp);
__ add(tmp, index, tmp);
index = tmp;
}
@@ -162,8 +162,8 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
}
} else if (disp != 0 && !Assembler::is_simm13(disp)) {
// index is illegal so replace it with the displacement loaded into a register
- index = new_register(T_INT);
- __ move(LIR_OprFact::intConst(disp), index);
+ index = new_pointer_register();
+ __ move(LIR_OprFact::intptrConst(disp), index);
disp = 0;
}
diff --git a/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp b/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp
index cb2bab6ea93..d47f7ce41d5 100644
--- a/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/c1_globals_sparc.hpp
@@ -22,10 +22,9 @@
*
*/
-//
// Sets the default values for platform dependent flags used by the client compiler.
// (see c1_globals.hpp)
-//
+
#ifndef TIERED
define_pd_global(bool, BackgroundCompilation, true );
define_pd_global(bool, CICompileOSR, true );
@@ -48,27 +47,24 @@ define_pd_global(intx, OnStackReplacePercentage, 1400 );
define_pd_global(bool, UseTLAB, true );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, FreqInlineSize, 325 );
-define_pd_global(intx, NewRatio, 8 ); // Design center runs on 1.3.1
define_pd_global(bool, ResizeTLAB, true );
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx,CodeCacheMinBlockLength, 1);
-define_pd_global(uintx, PermSize, 12*M );
-define_pd_global(uintx, MaxPermSize, 64*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true);
+define_pd_global(uintx,PermSize, 12*M );
+define_pd_global(uintx,MaxPermSize, 64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(intx, NewSizeThreadIncrease, 16*K );
-define_pd_global(uintx, DefaultMaxRAM, 1*G);
+define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(intx, InitialCodeCacheSize, 160*K);
-#endif // TIERED
+#endif // !TIERED
define_pd_global(bool, UseTypeProfile, false);
define_pd_global(bool, RoundFPResults, false);
-
-define_pd_global(bool, LIRFillDelaySlots, true);
+define_pd_global(bool, LIRFillDelaySlots, true );
define_pd_global(bool, OptimizeSinglePrecision, false);
-define_pd_global(bool, CSEArrayLength, true);
+define_pd_global(bool, CSEArrayLength, true );
define_pd_global(bool, TwoOperandLIRForm, false);
-
-define_pd_global(intx, SafepointPollOffset, 0);
+define_pd_global(intx, SafepointPollOffset, 0 );
diff --git a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp
index 92f69be26bb..2df4dbd8ede 100644
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp
@@ -59,7 +59,6 @@ define_pd_global(intx, FLOATPRESSURE, 52); // C2 on V9 gets to u
define_pd_global(intx, FreqInlineSize, 175);
define_pd_global(intx, INTPRESSURE, 48); // large register set
define_pd_global(intx, InteriorEntryAlignment, 16); // = CodeEntryAlignment
-define_pd_global(intx, NewRatio, 2);
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
// The default setting 16/16 seems to work best.
// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
@@ -83,25 +82,25 @@ define_pd_global(bool, OptoScheduling, true);
// sequence of instructions to load a 64 bit pointer.
//
// InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize, 48*M);
-define_pd_global(intx, CodeCacheExpansionSize, 64*K);
+define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(intx, ReservedCodeCacheSize, 48*M);
+define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM, 32*G);
+define_pd_global(uint64_t,MaxRAM, 128ULL*G);
#else
// InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize, 32*M);
-define_pd_global(intx, CodeCacheExpansionSize, 32*K);
+define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(intx, ReservedCodeCacheSize, 32*M);
+define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM, 1*G);
+define_pd_global(uint64_t,MaxRAM, 4ULL*G);
#endif
-define_pd_global(uintx,CodeCacheMinBlockLength, 4);
+define_pd_global(uintx,CodeCacheMinBlockLength, 4);
// Heap related flags
-define_pd_global(uintx, PermSize, ScaleForWordSize(16*M));
-define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
+define_pd_global(uintx,PermSize, ScaleForWordSize(16*M));
+define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
diff --git a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp
index ff28c96cfc7..e115ef2a9e0 100644
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp
@@ -22,10 +22,8 @@
*
*/
-//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
-//
// For sparc we do not do call backs when a thread is in the interpreter, because the
// interpreter dispatch needs at least two instructions - first to load the dispatch address
@@ -41,26 +39,23 @@ define_pd_global(bool, NeedsDeoptSuspend, true); // register window ma
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
-define_pd_global(intx, CodeEntryAlignment, 32);
-define_pd_global(uintx, TLABSize, 0);
-define_pd_global(uintx, NewSize, ScaleForWordSize((2048 * K) + (2 * (64 * K))));
-define_pd_global(intx, SurvivorRatio, 8);
-define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
-define_pd_global(intx, InlineSmallCode, 1500);
+define_pd_global(intx, CodeEntryAlignment, 32);
+define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
+define_pd_global(intx, InlineSmallCode, 1500);
#ifdef _LP64
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
-define_pd_global(intx, ThreadStackSize, 1024);
-define_pd_global(intx, VMThreadStackSize, 1024);
+define_pd_global(intx, ThreadStackSize, 1024);
+define_pd_global(intx, VMThreadStackSize, 1024);
#else
-define_pd_global(intx, ThreadStackSize, 512);
-define_pd_global(intx, VMThreadStackSize, 512);
+define_pd_global(intx, ThreadStackSize, 512);
+define_pd_global(intx, VMThreadStackSize, 512);
#endif
define_pd_global(intx, StackYellowPages, 2);
define_pd_global(intx, StackRedPages, 1);
define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
-define_pd_global(intx, PreInflateSpin, 40); // Determined by running design center
+define_pd_global(intx, PreInflateSpin, 40); // Determined by running design center
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
diff --git a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp
index 9f0dd7166fd..d68d2b7702c 100644
--- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -394,6 +394,11 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
}
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+ // No special entry points that preclude compilation
+ return true;
+}
+
// This method tells the deoptimizer how big an interpreted frame must be:
int AbstractInterpreter::size_activation(methodOop method,
int tempcount,
diff --git a/hotspot/src/cpu/sparc/vm/sparc.ad b/hotspot/src/cpu/sparc/vm/sparc.ad
index 2c56575e09f..73e94021152 100644
--- a/hotspot/src/cpu/sparc/vm/sparc.ad
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad
@@ -1,5 +1,5 @@
//
-// Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
+// Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -1885,6 +1885,10 @@ RegMask Matcher::modL_proj_mask() {
return RegMask();
}
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+ return RegMask();
+}
+
%}
@@ -6664,7 +6668,7 @@ instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{
ins_pipe(ialu_imm);
%}
-instruct cmovII_U_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
+instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
ins_cost(150);
size(4);
@@ -6673,7 +6677,7 @@ instruct cmovII_U_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
ins_pipe(ialu_reg);
%}
-instruct cmovII_U_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
+instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
ins_cost(140);
size(4);
@@ -6719,6 +6723,16 @@ instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{
ins_pipe(ialu_reg);
%}
+// This instruction also works with CmpN so we don't need cmovNN_reg.
+instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{
+ match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
+ ins_cost(150);
+ size(4);
+ format %{ "MOV$cmp $icc,$src,$dst" %}
+ ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(ialu_reg);
+%}
+
instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{
match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src)));
ins_cost(150);
@@ -6756,6 +6770,16 @@ instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{
ins_pipe(ialu_reg);
%}
+instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
+ match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+ ins_cost(150);
+
+ size(4);
+ format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
+ ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(ialu_reg);
+%}
+
instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
ins_cost(140);
@@ -6766,6 +6790,16 @@ instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
ins_pipe(ialu_imm);
%}
+instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
+ match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+ ins_cost(140);
+
+ size(4);
+ format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
+ ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(ialu_imm);
+%}
+
instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{
match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
ins_cost(150);
@@ -6805,6 +6839,17 @@ instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{
ins_pipe(int_conditional_float_move);
%}
+instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
+ match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
+ ins_cost(150);
+
+ size(4);
+ format %{ "FMOVS$cmp $icc,$src,$dst" %}
+ opcode(0x101);
+ ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(int_conditional_float_move);
+%}
+
// Conditional move,
instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{
match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src)));
@@ -6838,6 +6883,17 @@ instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{
ins_pipe(int_conditional_double_move);
%}
+instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
+ match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
+ ins_cost(150);
+
+ size(4);
+ format %{ "FMOVD$cmp $icc,$src,$dst" %}
+ opcode(0x102);
+ ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(int_conditional_double_move);
+%}
+
// Conditional move,
instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{
match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src)));
@@ -6877,6 +6933,17 @@ instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
%}
+instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
+ match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+ ins_cost(150);
+
+ size(4);
+ format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
+ ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(ialu_reg);
+%}
+
+
instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{
match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src)));
ins_cost(150);
diff --git a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
index a2dc1501462..66c5a218dcc 100644
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2862,6 +2862,9 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
+
+ // Don't initialize the platform math functions since sparc
+ // doesn't have intrinsics for these operations.
}
diff --git a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
index b83ed82cf13..ada795d7ea1 100644
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp
@@ -150,8 +150,7 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
}
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
- assert(!unbox, "NYI");//6815692//
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
address compiled_entry = __ pc();
Label cont;
diff --git a/hotspot/src/cpu/x86/vm/assembler_x86.cpp b/hotspot/src/cpu/x86/vm/assembler_x86.cpp
index aa93bc8f23c..c22a8c1d01a 100644
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp
@@ -2251,6 +2251,7 @@ void Assembler::popf() {
emit_byte(0x9D);
}
+#ifndef _LP64 // no 32bit push/pop on amd64
void Assembler::popl(Address dst) {
// NOTE: this will adjust stack by 8byte on 64bits
InstructionMark im(this);
@@ -2258,6 +2259,7 @@ void Assembler::popl(Address dst) {
emit_byte(0x8F);
emit_operand(rax, dst);
}
+#endif
void Assembler::prefetch_prefix(Address src) {
prefix(src);
@@ -2428,6 +2430,7 @@ void Assembler::pushf() {
emit_byte(0x9C);
}
+#ifndef _LP64 // no 32bit push/pop on amd64
void Assembler::pushl(Address src) {
// Note this will push 64bit on 64bit
InstructionMark im(this);
@@ -2435,6 +2438,7 @@ void Assembler::pushl(Address src) {
emit_byte(0xFF);
emit_operand(rsi, src);
}
+#endif
void Assembler::pxor(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
@@ -5591,7 +5595,12 @@ void MacroAssembler::align(int modulus) {
}
void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
- andpd(dst, as_Address(src));
+ if (reachable(src)) {
+ andpd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ andpd(dst, Address(rscratch1, 0));
+ }
}
void MacroAssembler::andptr(Register dst, int32_t imm32) {
@@ -6078,11 +6087,21 @@ void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
}
void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
- comisd(dst, as_Address(src));
+ if (reachable(src)) {
+ comisd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ comisd(dst, Address(rscratch1, 0));
+ }
}
void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
- comiss(dst, as_Address(src));
+ if (reachable(src)) {
+ comiss(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ comiss(dst, Address(rscratch1, 0));
+ }
}
@@ -7647,7 +7666,7 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
#ifdef ASSERT
Label L;
- testl(tmp, tmp);
+ testptr(tmp, tmp);
jccb(Assembler::notZero, L);
hlt();
bind(L);
diff --git a/hotspot/src/cpu/x86/vm/assembler_x86.hpp b/hotspot/src/cpu/x86/vm/assembler_x86.hpp
index 7aa0d0877a4..f44ae2dde48 100644
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp
@@ -1244,7 +1244,9 @@ private:
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
+#ifndef _LP64 // no 32bit push/pop on amd64
void popl(Address dst);
+#endif
#ifdef _LP64
void popq(Address dst);
@@ -1285,7 +1287,9 @@ private:
// Interleave Low Bytes
void punpcklbw(XMMRegister dst, XMMRegister src);
+#ifndef _LP64 // no 32bit push/pop on amd64
void pushl(Address src);
+#endif
void pushq(Address src);
diff --git a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
index f8cdb23ee82..2fae5406861 100644
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
@@ -301,22 +301,25 @@ void LIR_Assembler::osr_entry() {
Register OSR_buf = osrBufferPointer()->as_pointer_register();
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
int monitor_offset = BytesPerWord * method()->max_locals() +
- (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
+ (2 * BytesPerWord) * (number_of_locks - 1);
+ // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
+ // the OSR buffer using 2 word entries: first the lock and then
+ // the oop.
for (int i = 0; i < number_of_locks; i++) {
- int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
+ int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
#ifdef ASSERT
// verify the interpreter's monitor has a non-null object
{
Label L;
- __ cmpptr(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
+ __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L);
__ stop("locked object is NULL");
__ bind(L);
}
#endif
- __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes()));
+ __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
__ movptr(frame_map()->address_for_monitor_lock(i), rbx);
- __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()));
+ __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
__ movptr(frame_map()->address_for_monitor_object(i), rbx);
}
}
@@ -785,7 +788,13 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
ShouldNotReachHere();
__ movoop(as_Address(addr, noreg), c->as_jobject());
} else {
+#ifdef _LP64
+ __ movoop(rscratch1, c->as_jobject());
+ null_check_here = code_offset();
+ __ movptr(as_Address_lo(addr), rscratch1);
+#else
__ movoop(as_Address(addr), c->as_jobject());
+#endif
}
}
break;
@@ -1118,8 +1127,14 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
__ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
} else {
+#ifndef _LP64
__ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
__ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
+#else
+ //no pushl on 64bits
+ __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
+ __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
+#endif
}
} else if (src->is_double_stack()) {
@@ -3136,8 +3151,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
#ifdef _LP64
assert_different_registers(c_rarg0, dst, dst_pos, length);
+ __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
assert_different_registers(c_rarg1, length);
+ __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
__ mov(c_rarg2, length);
diff --git a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
index 2e2c1364717..f98bfaa8ea3 100644
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
@@ -755,8 +755,19 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
}
LIR_Opr addr = new_pointer_register();
- __ move(obj.result(), addr);
- __ add(addr, offset.result(), addr);
+ LIR_Address* a;
+ if(offset.result()->is_constant()) {
+ a = new LIR_Address(obj.result(),
+ NOT_LP64(offset.result()->as_constant_ptr()->as_jint()) LP64_ONLY((int)offset.result()->as_constant_ptr()->as_jlong()),
+ as_BasicType(type));
+ } else {
+ a = new LIR_Address(obj.result(),
+ offset.result(),
+ LIR_Address::times_1,
+ 0,
+ as_BasicType(type));
+ }
+ __ leal(LIR_OprFact::address(a), addr);
if (type == objectType) { // Write-barrier needed for Object fields.
// Do the pre-write barrier, if any.
diff --git a/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp b/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp
index 659dcca4732..bbf96cba1f2 100644
--- a/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/c1_globals_x86.hpp
@@ -22,10 +22,8 @@
*
*/
-//
// Sets the default values for platform dependent flags used by the client compiler.
// (see c1_globals.hpp)
-//
#ifndef TIERED
define_pd_global(bool, BackgroundCompilation, true );
@@ -48,27 +46,24 @@ define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 933 );
define_pd_global(intx, FreqInlineSize, 325 );
-define_pd_global(intx, NewRatio, 12 );
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx,CodeCacheMinBlockLength, 1);
-define_pd_global(uintx, PermSize, 12*M );
-define_pd_global(uintx, MaxPermSize, 64*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true);
-define_pd_global(uintx, DefaultMaxRAM, 1*G);
+define_pd_global(uintx,PermSize, 12*M );
+define_pd_global(uintx,MaxPermSize, 64*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
+define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );
-#endif // TIERED
+#endif // !TIERED
define_pd_global(bool, UseTypeProfile, false);
define_pd_global(bool, RoundFPResults, true );
-
define_pd_global(bool, LIRFillDelaySlots, false);
-define_pd_global(bool, OptimizeSinglePrecision, true);
+define_pd_global(bool, OptimizeSinglePrecision, true );
define_pd_global(bool, CSEArrayLength, false);
-define_pd_global(bool, TwoOperandLIRForm, true);
+define_pd_global(bool, TwoOperandLIRForm, true );
-
-define_pd_global(intx, SafepointPollOffset, 256);
+define_pd_global(intx, SafepointPollOffset, 256 );
diff --git a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp
index 6b3d7250442..b299e5a5480 100644
--- a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp
@@ -22,7 +22,6 @@
*
*/
-//
// Sets the default values for platform dependent flags used by the server compiler.
// (see c2_globals.hpp). Alpha-sorted.
@@ -46,8 +45,8 @@ define_pd_global(intx, CompileThreshold, 1000);
define_pd_global(intx, CompileThreshold, 10000);
#endif // TIERED
define_pd_global(intx, Tier2CompileThreshold, 10000);
-define_pd_global(intx, Tier3CompileThreshold, 20000 );
-define_pd_global(intx, Tier4CompileThreshold, 40000 );
+define_pd_global(intx, Tier3CompileThreshold, 20000);
+define_pd_global(intx, Tier4CompileThreshold, 40000);
define_pd_global(intx, BackEdgeThreshold, 100000);
define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
@@ -61,7 +60,6 @@ define_pd_global(intx, FreqInlineSize, 325);
#ifdef AMD64
define_pd_global(intx, INTPRESSURE, 13);
define_pd_global(intx, InteriorEntryAlignment, 16);
-define_pd_global(intx, NewRatio, 2);
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, LoopUnrollLimit, 60);
// InitialCodeCacheSize derived from specjbb2000 run.
@@ -69,19 +67,18 @@ define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multip
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM, 32*G);
+define_pd_global(uint64_t,MaxRAM, 128ULL*G);
#else
define_pd_global(intx, INTPRESSURE, 6);
define_pd_global(intx, InteriorEntryAlignment, 4);
-define_pd_global(intx, NewRatio, 8); // Design center runs on 1.3.1
define_pd_global(intx, NewSizeThreadIncrease, 4*K);
-define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
+define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 2304*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
-define_pd_global(uintx, DefaultMaxRAM, 1*G);
+define_pd_global(uint64_t,MaxRAM, 4ULL*G);
#endif // AMD64
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, RegisterCostAreaRatio, 16000);
@@ -97,8 +94,8 @@ define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(uintx,CodeCacheMinBlockLength, 4);
// Heap related flags
-define_pd_global(uintx, PermSize, ScaleForWordSize(16*M));
-define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
+define_pd_global(uintx,PermSize, ScaleForWordSize(16*M));
+define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
diff --git a/hotspot/src/cpu/x86/vm/frame_x86.cpp b/hotspot/src/cpu/x86/vm/frame_x86.cpp
index 8ec4ba76295..7bbd7311dfa 100644
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp
@@ -330,6 +330,14 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
// This is the sp before any possible extension (adapter/locals).
intptr_t* unextended_sp = interpreter_frame_sender_sp();
+ address sender_pc = this->sender_pc();
+ CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
+ assert(sender_cb, "sanity");
+ nmethod* sender_nm = sender_cb->as_nmethod_or_null();
+ if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
+ unextended_sp = (intptr_t*) at(link_offset);
+ }
+
// The interpreter and compiler(s) always save EBP/RBP in a known
// location on entry. We must record where that location is
// so this if EBP/RBP was live on callout from c2 we can find
@@ -352,7 +360,7 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
#endif // AMD64
}
#endif /* COMPILER2 */
- return frame(sp, unextended_sp, link(), sender_pc());
+ return frame(sp, unextended_sp, link(), sender_pc);
}
@@ -375,6 +383,18 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
+ intptr_t* unextended_sp = sender_sp;
+ // If we are returning to a compiled method handle call site,
+ // the saved_fp will in fact be a saved value of the unextended SP.
+ // The simplest way to tell whether we are returning to such a call
+ // site is as follows:
+ CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
+ assert(sender_cb, "sanity");
+ nmethod* sender_nm = sender_cb->as_nmethod_or_null();
+ if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
+ unextended_sp = saved_fp;
+ }
+
if (map->update_map()) {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
// For C1, the runtime stub might not have oop maps, so set this flag
@@ -399,7 +419,7 @@ frame frame::sender_for_compiled_frame(RegisterMap* map) const {
}
assert(sender_sp != sp(), "must have changed");
- return frame(sender_sp, saved_fp, sender_pc);
+ return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
}
frame frame::sender(RegisterMap* map) const {
diff --git a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp
index c3bfdae6d01..1f2065ba449 100644
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp
@@ -225,11 +225,12 @@ inline methodOop* frame::interpreter_frame_method_addr() const {
// top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const {
intptr_t* last_sp = interpreter_frame_last_sp();
- if (last_sp == NULL ) {
+ if (last_sp == NULL) {
return sp();
} else {
- // sp() may have been extended by an adapter
- assert(last_sp < fp() && last_sp >= sp(), "bad tos");
+ // sp() may have been extended or shrunk by an adapter. At least
+ // check that we don't fall behind the legal region.
+ assert(last_sp < (intptr_t*) interpreter_frame_monitor_begin(), "bad tos");
return last_sp;
}
}
diff --git a/hotspot/src/cpu/x86/vm/globals_x86.hpp b/hotspot/src/cpu/x86/vm/globals_x86.hpp
index f5586c1ee56..764e7ef284a 100644
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp
@@ -22,17 +22,16 @@
*
*/
-//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
-//
-define_pd_global(bool, ConvertSleepToYield, true);
-define_pd_global(bool, ShareVtableStubs, true);
-define_pd_global(bool, CountInterpCalls, true);
+define_pd_global(bool, ConvertSleepToYield, true);
+define_pd_global(bool, ShareVtableStubs, true);
+define_pd_global(bool, CountInterpCalls, true);
+define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
-define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
-define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
+define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
+define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
// See 4827828 for this change. There is no globals_core_i486.hpp. I can't
// assign a different value for C2 without touching a number of files. Use
@@ -42,29 +41,24 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NUL
// the uep and the vep doesn't get real alignment but just slops on by
// only assured that the entry instruction meets the 5 byte size requirement.
#ifdef COMPILER2
-define_pd_global(intx, CodeEntryAlignment, 32);
+define_pd_global(intx, CodeEntryAlignment, 32);
#else
-define_pd_global(intx, CodeEntryAlignment, 16);
+define_pd_global(intx, CodeEntryAlignment, 16);
#endif // COMPILER2
+define_pd_global(intx, InlineFrequencyCount, 100);
+define_pd_global(intx, InlineSmallCode, 1000);
-define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
-
-define_pd_global(uintx, TLABSize, 0);
+define_pd_global(intx, StackYellowPages, 2);
+define_pd_global(intx, StackRedPages, 1);
#ifdef AMD64
-define_pd_global(uintx, NewSize, ScaleForWordSize(2048 * K));
// Very large C++ stack frames using solaris-amd64 optimized builds
// due to lack of optimization caused by C++ compiler bugs
define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
#else
-define_pd_global(uintx, NewSize, 1024 * K);
define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
#endif // AMD64
-define_pd_global(intx, InlineFrequencyCount, 100);
-define_pd_global(intx, InlineSmallCode, 1000);
-define_pd_global(intx, PreInflateSpin, 10);
-define_pd_global(intx, StackYellowPages, 2);
-define_pd_global(intx, StackRedPages, 1);
+define_pd_global(intx, PreInflateSpin, 10);
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp
index e06e423a43a..a30092523a8 100644
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp
@@ -196,6 +196,9 @@ void InterpreterMacroAssembler::get_cache_index_at_bcp(Register reg, int bcp_off
} else {
assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
movl(reg, Address(rsi, bcp_offset));
+ // Check if the secondary index definition is still ~x, otherwise
+ // we have to change the following assembler code to calculate the
+ // plain index.
assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
notl(reg); // convert to plain index
}
diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp
index ad449c8bdd2..9418540dc09 100644
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp
@@ -185,12 +185,30 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
}
+void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
+ int bcp_offset,
+ bool giant_index) {
+ assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+ if (!giant_index) {
+ load_unsigned_short(index, Address(r13, bcp_offset));
+ } else {
+ assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
+ movl(index, Address(r13, bcp_offset));
+ // Check if the secondary index definition is still ~x, otherwise
+ // we have to change the following assembler code to calculate the
+ // plain index.
+ assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
+ notl(index); // convert to plain index
+ }
+}
+
+
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
Register index,
- int bcp_offset) {
- assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+ int bcp_offset,
+ bool giant_index) {
assert(cache != index, "must use different registers");
- load_unsigned_short(index, Address(r13, bcp_offset));
+ get_cache_index_at_bcp(index, bcp_offset, giant_index);
movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
@@ -200,10 +218,10 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
Register tmp,
- int bcp_offset) {
- assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+ int bcp_offset,
+ bool giant_index) {
assert(cache != tmp, "must use different register");
- load_unsigned_short(tmp, Address(r13, bcp_offset));
+ get_cache_index_at_bcp(tmp, bcp_offset, giant_index);
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
@@ -1236,7 +1254,8 @@ void InterpreterMacroAssembler::profile_final_call(Register mdp) {
void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
Register mdp,
- Register reg2) {
+ Register reg2,
+ bool receiver_can_be_null) {
if (ProfileInterpreter) {
Label profile_continue;
@@ -1246,8 +1265,15 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
// We are making a call. Increment the count.
increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+ Label skip_receiver_profile;
+ if (receiver_can_be_null) {
+ testptr(receiver, receiver);
+ jcc(Assembler::zero, skip_receiver_profile);
+ }
+
// Record the receiver type.
record_klass_in_profile(receiver, mdp, reg2);
+ bind(skip_receiver_profile);
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_constant(mdp,
diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp
index c35cb3a1940..0cfc9bf7fb8 100644
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp
@@ -95,9 +95,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index,
- int bcp_offset);
+ int bcp_offset, bool giant_index = false);
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
- int bcp_offset);
+ int bcp_offset, bool giant_index = false);
+ void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false);
void pop_ptr(Register r = rax);
@@ -236,7 +237,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void profile_call(Register mdp);
void profile_final_call(Register mdp);
void profile_virtual_call(Register receiver, Register mdp,
- Register scratch2);
+ Register scratch2,
+ bool receiver_can_be_null = false);
void profile_ret(Register return_bci, Register mdp);
void profile_null_seen(Register mdp);
void profile_typecheck(Register mdp, Register klass, Register scratch);
diff --git a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp
index c3cbf56cab8..6c234a33a18 100644
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp
@@ -277,12 +277,11 @@ address InterpreterGenerator::generate_abstract_entry(void) {
address entry_point = __ pc();
// abstract method entry
- // remove return address. Not really needed, since exception
- // handling throws away expression stack
- __ pop(rbx);
- // adjust stack to what a normal return would do
- __ mov(rsp, r13);
+ // pop return address, reset last_sp to NULL
+ __ empty_expression_stack();
+ __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
+ __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
@@ -300,7 +299,10 @@ address InterpreterGenerator::generate_method_handle_entry(void) {
if (!EnableMethodHandles) {
return generate_abstract_entry();
}
- return generate_abstract_entry(); //6815692//
+
+ address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm);
+
+ return entry_point;
}
diff --git a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
index a682a81b833..fc11240e030 100644
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp
@@ -65,9 +65,9 @@ static void verify_argslot(MacroAssembler* _masm, Register rax_argslot,
// Verify that argslot lies within (rsp, rbp].
Label L_ok, L_bad;
__ cmpptr(rax_argslot, rbp);
- __ jcc(Assembler::above, L_bad);
+ __ jccb(Assembler::above, L_bad);
__ cmpptr(rsp, rax_argslot);
- __ jcc(Assembler::below, L_ok);
+ __ jccb(Assembler::below, L_ok);
__ bind(L_bad);
__ stop(error_message);
__ bind(L_ok);
@@ -136,9 +136,9 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
if (arg_slots.is_register()) {
Label L_ok, L_bad;
__ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
- __ jcc(Assembler::greater, L_bad);
+ __ jccb(Assembler::greater, L_bad);
__ testl(arg_slots.as_register(), -stack_move_unit() - 1);
- __ jcc(Assembler::zero, L_ok);
+ __ jccb(Assembler::zero, L_ok);
__ bind(L_bad);
__ stop("assert arg_slots <= 0 and clear low bits");
__ bind(L_ok);
@@ -173,7 +173,7 @@ void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
__ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
__ addptr(rdx_temp, wordSize);
__ cmpptr(rdx_temp, rax_argslot);
- __ jcc(Assembler::less, loop);
+ __ jccb(Assembler::less, loop);
}
// Now move the argslot down, to point to the opened-up space.
@@ -211,9 +211,9 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
Label L_ok, L_bad;
__ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
__ cmpptr(rbx_temp, rbp);
- __ jcc(Assembler::above, L_bad);
+ __ jccb(Assembler::above, L_bad);
__ cmpptr(rsp, rax_argslot);
- __ jcc(Assembler::below, L_ok);
+ __ jccb(Assembler::below, L_ok);
__ bind(L_bad);
__ stop("deleted argument(s) must fall within current frame");
__ bind(L_ok);
@@ -221,9 +221,9 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
if (arg_slots.is_register()) {
Label L_ok, L_bad;
__ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
- __ jcc(Assembler::less, L_bad);
+ __ jccb(Assembler::less, L_bad);
__ testl(arg_slots.as_register(), -stack_move_unit() - 1);
- __ jcc(Assembler::zero, L_ok);
+ __ jccb(Assembler::zero, L_ok);
__ bind(L_bad);
__ stop("assert arg_slots >= 0 and clear low bits");
__ bind(L_ok);
@@ -258,7 +258,7 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
__ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
__ addptr(rdx_temp, -wordSize);
__ cmpptr(rdx_temp, rsp);
- __ jcc(Assembler::greaterEqual, loop);
+ __ jccb(Assembler::greaterEqual, loop);
}
// Now move the argslot up, to point to the just-copied block.
@@ -268,8 +268,9 @@ void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
}
#ifndef PRODUCT
+extern "C" void print_method_handle(oop mh);
void trace_method_handle_stub(const char* adaptername,
- oopDesc* mh,
+ oop mh,
intptr_t* entry_sp,
intptr_t* saved_sp,
intptr_t* saved_bp) {
@@ -280,6 +281,7 @@ void trace_method_handle_stub(const char* adaptername,
adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
if (last_sp != saved_sp)
printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
+ if (Verbose) print_method_handle(mh);
}
#endif //PRODUCT
@@ -382,11 +384,11 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// FIXME: fill in _raise_exception_method with a suitable sun.dyn method
__ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
__ testptr(rbx_method, rbx_method);
- __ jcc(Assembler::zero, no_method);
+ __ jccb(Assembler::zero, no_method);
int jobject_oop_offset = 0;
__ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject
__ testptr(rbx_method, rbx_method);
- __ jcc(Assembler::zero, no_method);
+ __ jccb(Assembler::zero, no_method);
__ verify_oop(rbx_method);
__ push(rdi_pc); // and restore caller PC
__ jmp(rbx_method_fie);
@@ -448,7 +450,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
rbx_index, Address::times_ptr,
base + vtableEntry::method_offset_in_bytes());
Register rbx_method = rbx_temp;
- __ movl(rbx_method, vtable_entry_addr);
+ __ movptr(rbx_method, vtable_entry_addr);
__ verify_oop(rbx_method);
__ jmp(rbx_method_fie);
@@ -533,16 +535,15 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
if (arg_type == T_OBJECT) {
__ movptr(Address(rax_argslot, 0), rbx_temp);
} else {
- __ load_sized_value(rbx_temp, prim_value_addr,
+ __ load_sized_value(rdx_temp, prim_value_addr,
type2aelembytes(arg_type), is_signed_subword_type(arg_type));
- __ movptr(Address(rax_argslot, 0), rbx_temp);
+ __ movptr(Address(rax_argslot, 0), rdx_temp);
#ifndef _LP64
if (arg_slots == 2) {
- __ movl(rbx_temp, prim_value_addr.plus_disp(wordSize));
- __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rbx_temp);
+ __ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
+ __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rdx_temp);
}
#endif //_LP64
- break;
}
if (direct_to_method) {
@@ -584,7 +585,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Label done;
__ movptr(rdx_temp, vmarg);
__ testl(rdx_temp, rdx_temp);
- __ jcc(Assembler::zero, done); // no cast if null
+ __ jccb(Assembler::zero, done); // no cast if null
__ load_klass(rdx_temp, rdx_temp);
// live at this point:
@@ -675,24 +676,24 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// (now we are done with the old MH)
// original 32-bit vmdata word must be of this form:
- // | MBZ:16 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
- __ xchgl(rcx, rbx_vminfo); // free rcx for shifts
+ // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
+ __ xchgptr(rcx, rbx_vminfo); // free rcx for shifts
__ shll(rdx_temp /*, rcx*/);
Label zero_extend, done;
__ testl(rcx, CONV_VMINFO_SIGN_FLAG);
- __ jcc(Assembler::zero, zero_extend);
+ __ jccb(Assembler::zero, zero_extend);
// this path is taken for int->byte, int->short
__ sarl(rdx_temp /*, rcx*/);
- __ jmp(done);
+ __ jmpb(done);
__ bind(zero_extend);
// this is taken for int->char
__ shrl(rdx_temp /*, rcx*/);
__ bind(done);
- __ movptr(vmarg, rdx_temp);
- __ xchgl(rcx, rbx_vminfo); // restore rcx_recv
+ __ movl(vmarg, rdx_temp);
+ __ xchgptr(rcx, rbx_vminfo); // restore rcx_recv
__ jump_to_method_handle_entry(rcx_recv, rdx_temp);
}
@@ -861,7 +862,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Verify that argslot > destslot, by at least swap_bytes.
Label L_ok;
__ cmpptr(rax_argslot, rbx_destslot);
- __ jcc(Assembler::aboveEqual, L_ok);
+ __ jccb(Assembler::aboveEqual, L_ok);
__ stop("source must be above destination (upward rotation)");
__ bind(L_ok);
}
@@ -877,7 +878,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
__ addptr(rax_argslot, -wordSize);
__ cmpptr(rax_argslot, rbx_destslot);
- __ jcc(Assembler::aboveEqual, loop);
+ __ jccb(Assembler::aboveEqual, loop);
} else {
__ addptr(rax_argslot, swap_bytes);
#ifdef ASSERT
@@ -885,7 +886,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
// Verify that argslot < destslot, by at least swap_bytes.
Label L_ok;
__ cmpptr(rax_argslot, rbx_destslot);
- __ jcc(Assembler::belowEqual, L_ok);
+ __ jccb(Assembler::belowEqual, L_ok);
__ stop("source must be below destination (downward rotation)");
__ bind(L_ok);
}
@@ -901,7 +902,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
__ addptr(rax_argslot, wordSize);
__ cmpptr(rax_argslot, rbx_destslot);
- __ jcc(Assembler::belowEqual, loop);
+ __ jccb(Assembler::belowEqual, loop);
}
// pop the original first chunk into the destination slot, now free
@@ -967,7 +968,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ addptr(rax_argslot, wordSize);
__ addptr(rdx_newarg, wordSize);
__ cmpptr(rdx_newarg, rbx_oldarg);
- __ jcc(Assembler::less, loop);
+ __ jccb(Assembler::less, loop);
__ pop(rdi); // restore temp
@@ -1119,7 +1120,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
}
__ addptr(rax_argslot, Interpreter::stackElementSize());
__ cmpptr(rax_argslot, rdx_argslot_limit);
- __ jcc(Assembler::less, loop);
+ __ jccb(Assembler::less, loop);
} else if (length_constant == 0) {
__ bind(skip_array_check);
// nothing to copy
diff --git a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp
index 5a0de22f475..428d239d494 100644
--- a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp
@@ -43,11 +43,11 @@ ExceptionBlob* OptoRuntime::_exception_blob;
// This code is entered with a jmp.
//
// Arguments:
-// rax,: exception oop
+// rax: exception oop
// rdx: exception pc
//
// Results:
-// rax,: exception oop
+// rax: exception oop
// rdx: exception pc in caller or ???
// destination: exception handler of caller
//
@@ -113,17 +113,17 @@ void OptoRuntime::generate_exception_blob() {
__ addptr(rsp, return_off * wordSize); // Epilog!
__ pop(rdx); // Exception pc
+ // rax: exception handler for given
- // rax,: exception handler for given
+ // Restore SP from BP if the exception PC is a MethodHandle call.
+ __ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0);
+ __ cmovptr(Assembler::notEqual, rsp, rbp);
// We have a handler in rax, (could be deopt blob)
// rdx - throwing pc, deopt blob will need it.
__ push(rax);
- // rcx contains handler address
-
- __ get_thread(rcx); // TLS
// Get the exception
__ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
// Get the exception pc in case we are deoptimized
@@ -137,7 +137,7 @@ void OptoRuntime::generate_exception_blob() {
__ pop(rcx);
- // rax,: exception oop
+ // rax: exception oop
// rcx: exception handler
// rdx: exception pc
__ jmp (rcx);
diff --git a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
index 68cb61979db..269f71d989f 100644
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
@@ -638,6 +638,10 @@ static void gen_i2c_adapter(MacroAssembler *masm,
__ movptr(rax, Address(rsp, 0));
+ // Must preserve original SP for loading incoming arguments because
+ // we need to align the outgoing SP for compiled code.
+ __ movptr(r11, rsp);
+
// Cut-out for having no stack args. Since up to 2 int/oop args are passed
// in registers, we will occasionally have no stack args.
int comp_words_on_stack = 0;
@@ -661,6 +665,10 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// as far as the placement of the call instruction
__ push(rax);
+ // Put saved SP in another register
+ const Register saved_sp = rax;
+ __ movptr(saved_sp, r11);
+
// Will jump to the compiled code just as if compiled code was doing it.
// Pre-load the register-jump target early, to schedule it better.
__ movptr(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
@@ -680,11 +688,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
"scrambled load targets?");
// Load in argument order going down.
- // int ld_off = (total_args_passed + comp_words_on_stack -i)*wordSize;
- // base ld_off on r13 (sender_sp) as the stack alignment makes offsets from rsp
- // unpredictable
- int ld_off = ((total_args_passed - 1) - i)*Interpreter::stackElementSize();
-
+ int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
// Point to interpreter value (vs. tag)
int next_off = ld_off - Interpreter::stackElementSize();
//
@@ -699,10 +703,14 @@ static void gen_i2c_adapter(MacroAssembler *masm,
if (r_1->is_stack()) {
// Convert stack slot to an SP offset (+ wordSize to account for return address )
int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
+
+ // We can use r13 as a temp here because compiled code doesn't need r13 as an input
+ // and if we end up going thru a c2i because of a miss a reasonable value of r13
+ // will be generated.
if (!r_2->is_valid()) {
// sign extend???
- __ movl(rax, Address(r13, ld_off));
- __ movptr(Address(rsp, st_off), rax);
+ __ movl(r13, Address(saved_sp, ld_off));
+ __ movptr(Address(rsp, st_off), r13);
} else {
//
// We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
@@ -715,9 +723,9 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// ld_off is MSW so get LSW
const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
next_off : ld_off;
- __ movq(rax, Address(r13, offset));
+ __ movq(r13, Address(saved_sp, offset));
// st_off is LSW (i.e. reg.first())
- __ movq(Address(rsp, st_off), rax);
+ __ movq(Address(rsp, st_off), r13);
}
} else if (r_1->is_Register()) { // Register argument
Register r = r_1->as_Register();
@@ -732,16 +740,16 @@ static void gen_i2c_adapter(MacroAssembler *masm,
next_off : ld_off;
// this can be a misaligned move
- __ movq(r, Address(r13, offset));
+ __ movq(r, Address(saved_sp, offset));
} else {
// sign extend and use a full word?
- __ movl(r, Address(r13, ld_off));
+ __ movl(r, Address(saved_sp, ld_off));
}
} else {
if (!r_2->is_valid()) {
- __ movflt(r_1->as_XMMRegister(), Address(r13, ld_off));
+ __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
} else {
- __ movdbl(r_1->as_XMMRegister(), Address(r13, next_off));
+ __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
}
}
}
@@ -3319,6 +3327,10 @@ void OptoRuntime::generate_exception_blob() {
// rax: exception handler
+ // Restore SP from BP if the exception PC is a MethodHandle call.
+ __ cmpl(Address(r15_thread, JavaThread::is_method_handle_exception_offset()), 0);
+ __ cmovptr(Assembler::notEqual, rsp, rbp);
+
// We have a handler in rax (could be deopt blob).
__ mov(r8, rax);
diff --git a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
index 3d6ca91a0ea..ad4b745ab42 100644
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2030,6 +2030,54 @@ class StubGenerator: public StubCodeGenerator {
entry_checkcast_arraycopy);
}
+ void generate_math_stubs() {
+ {
+ StubCodeMark mark(this, "StubRoutines", "log");
+ StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
+
+ __ fld_d(Address(rsp, 4));
+ __ flog();
+ __ ret(0);
+ }
+ {
+ StubCodeMark mark(this, "StubRoutines", "log10");
+ StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
+
+ __ fld_d(Address(rsp, 4));
+ __ flog10();
+ __ ret(0);
+ }
+ {
+ StubCodeMark mark(this, "StubRoutines", "sin");
+ StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
+
+ __ fld_d(Address(rsp, 4));
+ __ trigfunc('s');
+ __ ret(0);
+ }
+ {
+ StubCodeMark mark(this, "StubRoutines", "cos");
+ StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
+
+ __ fld_d(Address(rsp, 4));
+ __ trigfunc('c');
+ __ ret(0);
+ }
+ {
+ StubCodeMark mark(this, "StubRoutines", "tan");
+ StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
+
+ __ fld_d(Address(rsp, 4));
+ __ trigfunc('t');
+ __ ret(0);
+ }
+
+ // The intrinsic version of these seem to return the same value as
+ // the strict version.
+ StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
+ StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
+ }
+
public:
// Information about frame layout at time of blocking runtime call.
// Note that we only have to preserve callee-saved registers since
@@ -2228,6 +2276,8 @@ class StubGenerator: public StubCodeGenerator {
MethodHandles::generate_method_handle_stub(_masm, ek);
}
}
+
+ generate_math_stubs();
}
diff --git a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
index 6b0731490fd..70620836653 100644
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2731,6 +2731,79 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
}
+ void generate_math_stubs() {
+ {
+ StubCodeMark mark(this, "StubRoutines", "log");
+ StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
+
+ __ subq(rsp, 8);
+ __ movdbl(Address(rsp, 0), xmm0);
+ __ fld_d(Address(rsp, 0));
+ __ flog();
+ __ fstp_d(Address(rsp, 0));
+ __ movdbl(xmm0, Address(rsp, 0));
+ __ addq(rsp, 8);
+ __ ret(0);
+ }
+ {
+ StubCodeMark mark(this, "StubRoutines", "log10");
+ StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
+
+ __ subq(rsp, 8);
+ __ movdbl(Address(rsp, 0), xmm0);
+ __ fld_d(Address(rsp, 0));
+ __ flog10();
+ __ fstp_d(Address(rsp, 0));
+ __ movdbl(xmm0, Address(rsp, 0));
+ __ addq(rsp, 8);
+ __ ret(0);
+ }
+ {
+ StubCodeMark mark(this, "StubRoutines", "sin");
+ StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
+
+ __ subq(rsp, 8);
+ __ movdbl(Address(rsp, 0), xmm0);
+ __ fld_d(Address(rsp, 0));
+ __ trigfunc('s');
+ __ fstp_d(Address(rsp, 0));
+ __ movdbl(xmm0, Address(rsp, 0));
+ __ addq(rsp, 8);
+ __ ret(0);
+ }
+ {
+ StubCodeMark mark(this, "StubRoutines", "cos");
+ StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
+
+ __ subq(rsp, 8);
+ __ movdbl(Address(rsp, 0), xmm0);
+ __ fld_d(Address(rsp, 0));
+ __ trigfunc('c');
+ __ fstp_d(Address(rsp, 0));
+ __ movdbl(xmm0, Address(rsp, 0));
+ __ addq(rsp, 8);
+ __ ret(0);
+ }
+ {
+ StubCodeMark mark(this, "StubRoutines", "tan");
+ StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
+
+ __ subq(rsp, 8);
+ __ movdbl(Address(rsp, 0), xmm0);
+ __ fld_d(Address(rsp, 0));
+ __ trigfunc('t');
+ __ fstp_d(Address(rsp, 0));
+ __ movdbl(xmm0, Address(rsp, 0));
+ __ addq(rsp, 8);
+ __ ret(0);
+ }
+
+ // The intrinsic version of these seem to return the same value as
+ // the strict version.
+ StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
+ StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
+ }
+
#undef __
#define __ masm->
@@ -2935,6 +3008,18 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers
generate_arraycopy_stubs();
+
+ // generic method handle stubs
+ if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) {
+ for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
+ ek < MethodHandles::_EK_LIMIT;
+ ek = MethodHandles::EntryKind(1 + (int)ek)) {
+ StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
+ MethodHandles::generate_method_handle_stub(_masm, ek);
+ }
+ }
+
+ generate_math_stubs();
}
public:
diff --git a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
index b78f0e2a066..eecfb3fd114 100644
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -155,15 +155,8 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
}
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
TosState incoming_state = state;
- if (EnableInvokeDynamic) {
- if (unbox) {
- incoming_state = atos;
- }
- } else {
- assert(!unbox, "old behavior");
- }
Label interpreter_entry;
address compiled_entry = __ pc();
@@ -216,46 +209,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ restore_bcp();
__ restore_locals();
- Label L_fail;
-
- if (unbox && state != atos) {
- // cast and unbox
- BasicType type = as_BasicType(state);
- if (type == T_BYTE) type = T_BOOLEAN; // FIXME
- KlassHandle boxk = SystemDictionaryHandles::box_klass(type);
- __ mov32(rbx, ExternalAddress((address) boxk.raw_value()));
- __ testl(rax, rax);
- Label L_got_value, L_get_value;
- // convert nulls to zeroes (avoid NPEs here)
- if (!(type == T_FLOAT || type == T_DOUBLE)) {
- // if rax already contains zero bits, forge ahead
- __ jcc(Assembler::zero, L_got_value);
- } else {
- __ jcc(Assembler::notZero, L_get_value);
- __ fldz();
- __ jmp(L_got_value);
- }
- __ bind(L_get_value);
- __ cmp32(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
- __ jcc(Assembler::notEqual, L_fail);
- int offset = java_lang_boxing_object::value_offset_in_bytes(type);
- // Cf. TemplateTable::getfield_or_static
- switch (type) {
- case T_BYTE: // fall through:
- case T_BOOLEAN: __ load_signed_byte(rax, Address(rax, offset)); break;
- case T_CHAR: __ load_unsigned_short(rax, Address(rax, offset)); break;
- case T_SHORT: __ load_signed_short(rax, Address(rax, offset)); break;
- case T_INT: __ movl(rax, Address(rax, offset)); break;
- case T_FLOAT: __ fld_s(Address(rax, offset)); break;
- case T_DOUBLE: __ fld_d(Address(rax, offset)); break;
- // Access to java.lang.Double.value does not need to be atomic:
- case T_LONG: { __ movl(rdx, Address(rax, offset + 4));
- __ movl(rax, Address(rax, offset + 0)); } break;
- default: ShouldNotReachHere();
- }
- __ bind(L_got_value);
- }
-
Label L_got_cache, L_giant_index;
if (EnableInvokeDynamic) {
__ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
@@ -263,32 +216,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
}
__ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
__ bind(L_got_cache);
- if (unbox && state == atos) {
- // insert a casting conversion, to keep verifier sane
- Label L_ok, L_ok_pops;
- __ testl(rax, rax);
- __ jcc(Assembler::zero, L_ok);
- __ push(rax); // save the object to check
- __ push(rbx); // save CP cache reference
- __ movl(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
- __ movl(rbx, Address(rbx, rcx,
- Address::times_4, constantPoolCacheOopDesc::base_offset() +
- ConstantPoolCacheEntry::f1_offset()));
- __ movl(rbx, Address(rbx, __ delayed_value(sun_dyn_CallSiteImpl::type_offset_in_bytes, rcx)));
- __ movl(rbx, Address(rbx, __ delayed_value(java_dyn_MethodType::rtype_offset_in_bytes, rcx)));
- __ movl(rax, Address(rbx, __ delayed_value(java_lang_Class::klass_offset_in_bytes, rcx)));
- __ check_klass_subtype(rdx, rax, rbx, L_ok_pops);
- __ pop(rcx); // pop and discard CP cache
- __ mov(rbx, rax); // target supertype into rbx for L_fail
- __ pop(rax); // failed object into rax for L_fail
- __ jmp(L_fail);
-
- __ bind(L_ok_pops);
- // restore pushed temp regs:
- __ pop(rbx);
- __ pop(rax);
- __ bind(L_ok);
- }
__ movl(rbx, Address(rbx, rcx,
Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset()));
@@ -301,14 +228,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ bind(L_giant_index);
__ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
__ jmp(L_got_cache);
-
- if (unbox) {
- __ bind(L_fail);
- __ push(rbx); // missed klass (required)
- __ push(rax); // bad object (actual)
- __ movptr(rdx, ExternalAddress((address) &Interpreter::_throw_WrongMethodType_entry));
- __ call(rdx);
- }
}
return entry;
@@ -1512,6 +1431,23 @@ address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter:
}
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+ switch (method_kind(m)) {
+ case Interpreter::java_lang_math_sin : // fall thru
+ case Interpreter::java_lang_math_cos : // fall thru
+ case Interpreter::java_lang_math_tan : // fall thru
+ case Interpreter::java_lang_math_abs : // fall thru
+ case Interpreter::java_lang_math_log : // fall thru
+ case Interpreter::java_lang_math_log10 : // fall thru
+ case Interpreter::java_lang_math_sqrt :
+ return false;
+ default:
+ return true;
+ }
+}
+
// How much stack a method activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
@@ -1569,7 +1505,10 @@ int AbstractInterpreter::layout_activation(methodOop method,
if (interpreter_frame != NULL) {
#ifdef ASSERT
- assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
+ if (!EnableMethodHandles)
+ // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
+ // Probably, since deoptimization doesn't work yet.
+ assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
#endif
diff --git a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
index 4f0c3c9f779..44225441635 100644
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -100,21 +100,26 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
return entry;
}
-// Arguments are: required type in rarg1, failing object (or NULL) in rarg2
+// Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4.
address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
address entry = __ pc();
__ pop(c_rarg2); // failing object is at TOS
__ pop(c_rarg1); // required type is at TOS+8
- // expression stack must be empty before entering the VM if an
- // exception happened
+ __ verify_oop(c_rarg1);
+ __ verify_oop(c_rarg2);
+
+ // Various method handle types use interpreter registers as temps.
+ __ restore_bcp();
+ __ restore_locals();
+
+ // Expression stack must be empty before entering the VM for an exception.
__ empty_expression_stack();
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
- InterpreterRuntime::
- throw_WrongMethodTypeException),
+ InterpreterRuntime::throw_WrongMethodTypeException),
// pass required type, failing object (or NULL)
c_rarg1, c_rarg2);
return entry;
@@ -166,8 +171,7 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
- int step, bool unbox) {
- assert(!unbox, "NYI");//6815692//
+ int step) {
// amd64 doesn't need to do anything special about compiled returns
// to the interpreter so the code that exists on x86 to place a sentinel
@@ -183,15 +187,29 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
__ restore_bcp();
__ restore_locals();
- __ get_cache_and_index_at_bcp(rbx, rcx, 1);
+ Label L_got_cache, L_giant_index;
+ if (EnableInvokeDynamic) {
+ __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
+ __ jcc(Assembler::equal, L_giant_index);
+ }
+ __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
+ __ bind(L_got_cache);
__ movl(rbx, Address(rbx, rcx,
- Address::times_8,
+ Address::times_ptr,
in_bytes(constantPoolCacheOopDesc::base_offset()) +
3 * wordSize));
__ andl(rbx, 0xFF);
if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
__ lea(rsp, Address(rsp, rbx, Address::times_8));
__ dispatch_next(state, step);
+
+ // out of the main line of code...
+ if (EnableInvokeDynamic) {
+ __ bind(L_giant_index);
+ __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
+ __ jmp(L_got_cache);
+ }
+
return entry;
}
@@ -431,8 +449,12 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
__ addptr(rax, stack_base);
__ subptr(rax, stack_size);
+ // Use the maximum number of pages we might bang.
+ const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
+ (StackRedPages+StackYellowPages);
+
// add in the red and yellow zone sizes
- __ addptr(rax, (StackRedPages + StackYellowPages) * page_size);
+ __ addptr(rax, max_pages * page_size);
// check against the current stack bottom
__ cmpptr(rsp, rax);
@@ -1434,6 +1456,23 @@ address AbstractInterpreterGenerator::generate_method_entry(
generate_normal_entry(synchronized);
}
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+ switch (method_kind(m)) {
+ case Interpreter::java_lang_math_sin : // fall thru
+ case Interpreter::java_lang_math_cos : // fall thru
+ case Interpreter::java_lang_math_tan : // fall thru
+ case Interpreter::java_lang_math_abs : // fall thru
+ case Interpreter::java_lang_math_log : // fall thru
+ case Interpreter::java_lang_math_log10 : // fall thru
+ case Interpreter::java_lang_math_sqrt :
+ return false;
+ default:
+ return true;
+ }
+}
+
// How much stack a method activation needs in words.
int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
const int entry_size = frame::interpreter_frame_monitor_size();
@@ -1484,8 +1523,10 @@ int AbstractInterpreter::layout_activation(methodOop method,
tempcount* Interpreter::stackElementWords() + popframe_extra_args;
if (interpreter_frame != NULL) {
#ifdef ASSERT
- assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(),
- "Frame not properly walkable");
+ if (!EnableMethodHandles)
+ // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
+ // Probably, since deoptimization doesn't work yet.
+ assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
#endif
diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp
index 50ae3190953..8959b341023 100644
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp
@@ -2890,9 +2890,6 @@ void TemplateTable::count_calls(Register method, Register temp) {
void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
- bool is_invdyn_bootstrap = (byte_no < 0);
- if (is_invdyn_bootstrap) byte_no = -byte_no;
-
// determine flags
Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
@@ -2907,8 +2904,6 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
const Register flags = rdx;
assert_different_registers(method, index, recv, flags);
- assert(!is_invdyn_bootstrap || is_invokedynamic, "byte_no<0 hack only for invdyn");
-
// save 'interpreter return address'
__ save_bcp();
@@ -2944,9 +2939,7 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
// load return address
{
address table_addr;
- if (is_invdyn_bootstrap)
- table_addr = (address)Interpreter::return_5_unbox_addrs_by_index_table();
- else if (is_invokeinterface || is_invokedynamic)
+ if (is_invokeinterface || is_invokedynamic)
table_addr = (address)Interpreter::return_5_addrs_by_index_table();
else
table_addr = (address)Interpreter::return_3_addrs_by_index_table();
@@ -3153,54 +3146,10 @@ void TemplateTable::invokedynamic(int byte_no) {
__ profile_call(rsi);
}
- Label handle_unlinked_site;
- __ movptr(rcx, Address(rax, __ delayed_value(sun_dyn_CallSiteImpl::target_offset_in_bytes, rcx)));
- __ testptr(rcx, rcx);
- __ jcc(Assembler::zero, handle_unlinked_site);
-
+ __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
+ __ null_check(rcx);
__ prepare_to_jump_from_interpreted();
__ jump_to_method_handle_entry(rcx, rdx);
-
- // Initial calls come here...
- __ bind(handle_unlinked_site);
- __ pop(rcx); // remove return address pushed by prepare_invoke
-
- // box stacked arguments into an array for the bootstrap method
- address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::bootstrap_invokedynamic);
- __ restore_bcp(); // rsi must be correct for call_VM
- __ call_VM(rax, entry, rax);
- __ movl(rdi, rax); // protect bootstrap MH from prepare_invoke
-
- // recompute return address
- __ restore_bcp(); // rsi must be correct for prepare_invoke
- prepare_invoke(rax, rbx, -byte_no); // smashes rcx, rdx
- // rax: CallSite object (f1)
- // rbx: unused (f2)
- // rdi: bootstrap MH
- // rdx: flags
-
- // now load up the arglist, which has been neatly boxed
- __ get_thread(rcx);
- __ movptr(rdx, Address(rcx, JavaThread::vm_result_2_offset()));
- __ movptr(Address(rcx, JavaThread::vm_result_2_offset()), NULL_WORD);
- __ verify_oop(rdx);
- // rdx = arglist
-
- // save SP now, before we add the bootstrap call to the stack
- // We must preserve a fiction that the original arguments are outgoing,
- // because the return sequence will reset the stack to this point
- // and then pop all those arguments. It seems error-prone to use
- // a different argument list size just for bootstrapping.
- __ prepare_to_jump_from_interpreted();
-
- // Now let's play adapter, pushing the real arguments on the stack.
- __ pop(rbx); // return PC
- __ push(rdi); // boot MH
- __ push(rax); // call site
- __ push(rdx); // arglist
- __ push(rbx); // return PC, again
- __ mov(rcx, rdi);
- __ jump_to_method_handle_entry(rcx, rdx);
}
//----------------------------------------------------------------------------------------------------
diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
index 1180227b59f..f461b10e451 100644
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp
@@ -203,18 +203,15 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
__ jcc(Assembler::notEqual, fast_patch);
__ get_method(scratch);
// Let breakpoint table handling rewrite to quicker bytecode
- __ call_VM(noreg,
- CAST_FROM_FN_PTR(address,
- InterpreterRuntime::set_original_bytecode_at),
- scratch, r13, bc);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc);
#ifndef ASSERT
__ jmpb(patch_done);
- __ bind(fast_patch);
- }
#else
__ jmp(patch_done);
+#endif
__ bind(fast_patch);
}
+#ifdef ASSERT
Label okay;
__ load_unsigned_byte(scratch, at_bcp(0));
__ cmpl(scratch, (int) Bytecodes::java_code(bytecode));
@@ -2054,26 +2051,28 @@ void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits
}
}
-void TemplateTable::resolve_cache_and_index(int byte_no,
- Register Rcache,
- Register index) {
+void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
+ bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic);
const Register temp = rbx;
assert_different_registers(Rcache, index, temp);
const int shift_count = (1 + byte_no) * BitsPerByte;
Label resolved;
- __ get_cache_and_index_at_bcp(Rcache, index, 1);
- __ movl(temp, Address(Rcache,
- index, Address::times_8,
- constantPoolCacheOopDesc::base_offset() +
- ConstantPoolCacheEntry::indices_offset()));
- __ shrl(temp, shift_count);
- // have we resolved this bytecode?
- __ andl(temp, 0xFF);
- __ cmpl(temp, (int) bytecode());
- __ jcc(Assembler::equal, resolved);
+ __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
+ if (is_invokedynamic) {
+ // we are resolved if the f1 field contains a non-null CallSite object
+ __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD);
+ __ jcc(Assembler::notEqual, resolved);
+ } else {
+ __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+ __ shrl(temp, shift_count);
+ // have we resolved this bytecode?
+ __ andl(temp, 0xFF);
+ __ cmpl(temp, (int) bytecode());
+ __ jcc(Assembler::equal, resolved);
+ }
// resolve first time through
address entry;
@@ -2090,6 +2089,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
case Bytecodes::_invokeinterface:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);
break;
+ case Bytecodes::_invokedynamic:
+ entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
+ break;
default:
ShouldNotReachHere();
break;
@@ -2098,7 +2100,7 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
__ call_VM(noreg, entry, temp);
// Update registers with resolved info
- __ get_cache_and_index_at_bcp(Rcache, index, 1);
+ __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic);
__ bind(resolved);
}
@@ -2832,15 +2834,14 @@ void TemplateTable::count_calls(Register method, Register temp) {
ShouldNotReachHere();
}
-void TemplateTable::prepare_invoke(Register method,
- Register index,
- int byte_no,
- Bytecodes::Code code) {
+void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
// determine flags
+ Bytecodes::Code code = bytecode();
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
+ const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
const bool is_invokespecial = code == Bytecodes::_invokespecial;
- const bool load_receiver = code != Bytecodes::_invokestatic;
+ const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
const bool receiver_null_check = is_invokespecial;
const bool save_flags = is_invokeinterface || is_invokevirtual;
// setup registers & access constant pool cache
@@ -2858,9 +2859,13 @@ void TemplateTable::prepare_invoke(Register method,
__ movl(recv, flags);
__ andl(recv, 0xFF);
if (TaggedStackInterpreter) __ shll(recv, 1); // index*2
- __ movptr(recv, Address(rsp, recv, Address::times_8,
- -Interpreter::expr_offset_in_bytes(1)));
- __ verify_oop(recv);
+ Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1));
+ if (is_invokedynamic) {
+ __ lea(recv, recv_addr);
+ } else {
+ __ movptr(recv, recv_addr);
+ __ verify_oop(recv);
+ }
}
// do null check if needed
@@ -2878,10 +2883,14 @@ void TemplateTable::prepare_invoke(Register method,
ConstantPoolCacheEntry::verify_tosBits();
// load return address
{
- ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table());
- ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table());
- __ lea(rscratch1, (is_invokeinterface ? return_5 : return_3));
- __ movptr(flags, Address(rscratch1, flags, Address::times_8));
+ address table_addr;
+ if (is_invokeinterface || is_invokedynamic)
+ table_addr = (address)Interpreter::return_5_addrs_by_index_table();
+ else
+ table_addr = (address)Interpreter::return_3_addrs_by_index_table();
+ ExternalAddress table(table_addr);
+ __ lea(rscratch1, table);
+ __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
}
// push return address
@@ -2947,7 +2956,7 @@ void TemplateTable::invokevirtual_helper(Register index,
void TemplateTable::invokevirtual(int byte_no) {
transition(vtos, vtos);
- prepare_invoke(rbx, noreg, byte_no, bytecode());
+ prepare_invoke(rbx, noreg, byte_no);
// rbx: index
// rcx: receiver
@@ -2959,7 +2968,7 @@ void TemplateTable::invokevirtual(int byte_no) {
void TemplateTable::invokespecial(int byte_no) {
transition(vtos, vtos);
- prepare_invoke(rbx, noreg, byte_no, bytecode());
+ prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
__ profile_call(rax);
@@ -2969,7 +2978,7 @@ void TemplateTable::invokespecial(int byte_no) {
void TemplateTable::invokestatic(int byte_no) {
transition(vtos, vtos);
- prepare_invoke(rbx, noreg, byte_no, bytecode());
+ prepare_invoke(rbx, noreg, byte_no);
// do the call
__ verify_oop(rbx);
__ profile_call(rax);
@@ -2983,7 +2992,7 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
void TemplateTable::invokeinterface(int byte_no) {
transition(vtos, vtos);
- prepare_invoke(rax, rbx, byte_no, bytecode());
+ prepare_invoke(rax, rbx, byte_no);
// rax: Interface
// rbx: index
@@ -3072,7 +3081,24 @@ void TemplateTable::invokedynamic(int byte_no) {
return;
}
- __ stop("invokedynamic NYI");//6815692//
+ prepare_invoke(rax, rbx, byte_no);
+
+ // rax: CallSite object (f1)
+ // rbx: unused (f2)
+ // rcx: receiver address
+ // rdx: flags (unused)
+
+ if (ProfileInterpreter) {
+ Label L;
+ // %%% should make a type profile for any invokedynamic that takes a ref argument
+ // profile this call
+ __ profile_call(r13);
+ }
+
+ __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
+ __ null_check(rcx);
+ __ prepare_to_jump_from_interpreted();
+ __ jump_to_method_handle_entry(rcx, rdx);
}
diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp b/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp
index ec531a7bc1a..6a9fdf9ed90 100644
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.hpp
@@ -22,8 +22,7 @@
*
*/
- static void prepare_invoke(Register method, Register index, int byte_no,
- Bytecodes::Code code);
+ static void prepare_invoke(Register method, Register index, int byte_no);
static void invokevirtual_helper(Register index, Register recv,
Register flags);
static void volatile_barrier(Assembler::Membar_mask_bits order_constraint);
diff --git a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
index ae8efb86939..5965fd3ead8 100644
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp
@@ -255,6 +255,8 @@ void VM_Version::get_processor_features() {
if (!VM_Version::supports_sse2()) {
vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
}
+ // in 64 bit the use of SSE2 is the minimum
+ if (UseSSE < 2) UseSSE = 2;
#endif
// If the OS doesn't support SSE, we can't use this feature even if the HW does
diff --git a/hotspot/src/cpu/x86/vm/x86_32.ad b/hotspot/src/cpu/x86/vm/x86_32.ad
index 71657a809bf..72cb4175ab2 100644
--- a/hotspot/src/cpu/x86/vm/x86_32.ad
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad
@@ -268,22 +268,36 @@ static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CON
static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
+// Offset hacking within calls.
+static int pre_call_FPU_size() {
+ if (Compile::current()->in_24_bit_fp_mode())
+ return 6; // fldcw
+ return 0;
+}
+
+static int preserve_SP_size() {
+ return LP64_ONLY(1 +) 2; // [rex,] op, rm(reg/reg)
+}
+
// !!!!! Special hack to get all type of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset() {
- return 5 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0); // 5 bytes from start of call to where return address points
+ int offset = 5 + pre_call_FPU_size(); // 5 bytes from start of call to where return address points
+ if (_method_handle_invoke)
+ offset += preserve_SP_size();
+ return offset;
}
int MachCallDynamicJavaNode::ret_addr_offset() {
- return 10 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0); // 10 bytes from start of call to where return address points
+ return 10 + pre_call_FPU_size(); // 10 bytes from start of call to where return address points
}
static int sizeof_FFree_Float_Stack_All = -1;
int MachCallRuntimeNode::ret_addr_offset() {
assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
- return sizeof_FFree_Float_Stack_All + 5 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0);
+ return sizeof_FFree_Float_Stack_All + 5 + pre_call_FPU_size();
}
// Indicate if the safepoint node needs the polling page as an input.
@@ -299,8 +313,16 @@ bool SafePointNode::needs_polling_address_input() {
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
- if (Compile::current()->in_24_bit_fp_mode())
- current_offset += 6; // skip fldcw in pre_call_FPU, if any
+ current_offset += pre_call_FPU_size(); // skip fldcw, if any
+ current_offset += 1; // skip call opcode byte
+ return round_to(current_offset, alignment_required()) - current_offset;
+}
+
+// The address of the call instruction needs to be 4-byte aligned to
+// ensure that it does not span a cache line so that it can be patched.
+int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
+ current_offset += pre_call_FPU_size(); // skip fldcw, if any
+ current_offset += preserve_SP_size(); // skip mov rbp, rsp
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
@@ -308,8 +330,7 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
- if (Compile::current()->in_24_bit_fp_mode())
- current_offset += 6; // skip fldcw in pre_call_FPU, if any
+ current_offset += pre_call_FPU_size(); // skip fldcw, if any
current_offset += 5; // skip MOV instruction
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
@@ -1460,6 +1481,10 @@ RegMask Matcher::modL_proj_mask() {
return RegMask();
}
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+ return EBP_REG_mask;
+}
+
%}
//----------ENCODING BLOCK-----------------------------------------------------
@@ -1772,10 +1797,13 @@ encode %{
enc_class pre_call_FPU %{
// If method sets FPU control word restore it here
+ debug_only(int off0 = cbuf.code_size());
if( Compile::current()->in_24_bit_fp_mode() ) {
MacroAssembler masm(&cbuf);
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
+ debug_only(int off1 = cbuf.code_size());
+ assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
%}
enc_class post_call_FPU %{
@@ -1786,6 +1814,21 @@ encode %{
}
%}
+ enc_class preserve_SP %{
+ debug_only(int off0 = cbuf.code_size());
+ MacroAssembler _masm(&cbuf);
+ // RBP is preserved across all calls, even compiled calls.
+ // Use it to preserve RSP in places where the callee might change the SP.
+ __ movptr(rbp, rsp);
+ debug_only(int off1 = cbuf.code_size());
+ assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
+ %}
+
+ enc_class restore_SP %{
+ MacroAssembler _masm(&cbuf);
+ __ movptr(rsp, rbp);
+ %}
+
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
// who we intended to call.
@@ -13406,6 +13449,7 @@ instruct cmovXX_reg_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, regX dst,
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaDirect(method meth) %{
match(CallStaticJava);
+ predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);
ins_cost(300);
@@ -13420,6 +13464,30 @@ instruct CallStaticJavaDirect(method meth) %{
ins_alignment(4);
%}
+// Call Java Static Instruction (method handle version)
+// Note: If this code changes, the corresponding ret_addr_offset() and
+// compute_padding() functions will have to be adjusted.
+instruct CallStaticJavaHandle(method meth, eBPRegP ebp) %{
+ match(CallStaticJava);
+ predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
+ effect(USE meth);
+ // EBP is saved by all callees (for interpreter stack correction).
+ // We use it here for a similar purpose, in {preserve,restore}_SP.
+
+ ins_cost(300);
+ format %{ "CALL,static/MethodHandle " %}
+ opcode(0xE8); /* E8 cd */
+ ins_encode( pre_call_FPU,
+ preserve_SP,
+ Java_Static_Call( meth ),
+ restore_SP,
+ call_epilog,
+ post_call_FPU );
+ ins_pipe( pipe_slow );
+ ins_pc_relative(1);
+ ins_alignment(4);
+%}
+
// Call Java Dynamic Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
diff --git a/hotspot/src/cpu/x86/vm/x86_64.ad b/hotspot/src/cpu/x86/vm/x86_64.ad
index 5927b5081a7..86e28eed7ea 100644
--- a/hotspot/src/cpu/x86/vm/x86_64.ad
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad
@@ -551,12 +551,19 @@ source %{
#define __ _masm.
+static int preserve_SP_size() {
+ return LP64_ONLY(1 +) 2; // [rex,] op, rm(reg/reg)
+}
+
// !!!!! Special hack to get all types of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset()
{
- return 5; // 5 bytes from start of call to where return address points
+ int offset = 5; // 5 bytes from start of call to where return address points
+ if (_method_handle_invoke)
+ offset += preserve_SP_size();
+ return offset;
}
int MachCallDynamicJavaNode::ret_addr_offset()
@@ -587,6 +594,15 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const
return round_to(current_offset, alignment_required()) - current_offset;
}
+// The address of the call instruction needs to be 4-byte aligned to
+// ensure that it does not span a cache line so that it can be patched.
+int CallStaticJavaHandleNode::compute_padding(int current_offset) const
+{
+ current_offset += preserve_SP_size(); // skip mov rbp, rsp
+ current_offset += 1; // skip call opcode byte
+ return round_to(current_offset, alignment_required()) - current_offset;
+}
+
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
@@ -2113,6 +2129,10 @@ RegMask Matcher::modL_proj_mask() {
return LONG_RDX_REG_mask;
}
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+ return PTR_RBP_REG_mask;
+}
+
static Address build_address(int b, int i, int s, int d) {
Register index = as_Register(i);
Address::ScaleFactor scale = (Address::ScaleFactor)s;
@@ -2608,6 +2628,21 @@ encode %{
RELOC_DISP32);
%}
+ enc_class preserve_SP %{
+ debug_only(int off0 = cbuf.code_size());
+ MacroAssembler _masm(&cbuf);
+ // RBP is preserved across all calls, even compiled calls.
+ // Use it to preserve RSP in places where the callee might change the SP.
+ __ movptr(rbp, rsp);
+ debug_only(int off1 = cbuf.code_size());
+ assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
+ %}
+
+ enc_class restore_SP %{
+ MacroAssembler _masm(&cbuf);
+ __ movptr(rsp, rbp);
+ %}
+
enc_class Java_Static_Call(method meth)
%{
// JAVA STATIC CALL
@@ -12526,9 +12561,9 @@ instruct safePoint_poll(rFlagsReg cr)
// Call Java Static Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaDirect(method meth)
-%{
+instruct CallStaticJavaDirect(method meth) %{
match(CallStaticJava);
+ predicate(!((CallStaticJavaNode*) n)->is_method_handle_invoke());
effect(USE meth);
ins_cost(300);
@@ -12540,6 +12575,28 @@ instruct CallStaticJavaDirect(method meth)
ins_alignment(4);
%}
+// Call Java Static Instruction (method handle version)
+// Note: If this code changes, the corresponding ret_addr_offset() and
+// compute_padding() functions will have to be adjusted.
+instruct CallStaticJavaHandle(method meth, rbp_RegP rbp) %{
+ match(CallStaticJava);
+ predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
+ effect(USE meth);
+ // RBP is saved by all callees (for interpreter stack correction).
+ // We use it here for a similar purpose, in {preserve,restore}_SP.
+
+ ins_cost(300);
+ format %{ "call,static/MethodHandle " %}
+ opcode(0xE8); /* E8 cd */
+ ins_encode(preserve_SP,
+ Java_Static_Call(meth),
+ restore_SP,
+ call_epilog);
+ ins_pipe(pipe_slow);
+ ins_pc_relative(1);
+ ins_alignment(4);
+%}
+
// Call Java Dynamic Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
diff --git a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp
index 8c99fbf4556..8fcb75c8b36 100644
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp
@@ -204,6 +204,20 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
goto unwind_and_return;
}
+ // Update the invocation counter
+ if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
+ thread->set_do_not_unlock();
+ InvocationCounter *counter = method->invocation_counter();
+ counter->increment();
+ if (counter->reached_InvocationLimit()) {
+ CALL_VM_NOCHECK(
+ InterpreterRuntime::frequency_counter_overflow(thread, NULL));
+ if (HAS_PENDING_EXCEPTION)
+ goto unwind_and_return;
+ }
+ thread->clr_do_not_unlock();
+ }
+
// Lock if necessary
BasicObjectLock *monitor;
monitor = NULL;
@@ -231,7 +245,7 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
if (handlerAddr == NULL) {
CALL_VM_NOCHECK(InterpreterRuntime::prepare_native_call(thread, method));
if (HAS_PENDING_EXCEPTION)
- goto unwind_and_return;
+ goto unlock_unwind_and_return;
handlerAddr = method->signature_handler();
assert(handlerAddr != NULL, "eh?");
@@ -240,7 +254,7 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
CALL_VM_NOCHECK(handlerAddr =
InterpreterRuntime::slow_signature_handler(thread, method, NULL,NULL));
if (HAS_PENDING_EXCEPTION)
- goto unwind_and_return;
+ goto unlock_unwind_and_return;
}
handler = \
InterpreterRuntime::SignatureHandler::from_handlerAddr(handlerAddr);
@@ -351,10 +365,10 @@ void CppInterpreter::native_entry(methodOop method, intptr_t UNUSED, TRAPS) {
// Reset handle block
thread->active_handles()->clear();
- // Unlock if necessary. It seems totally wrong that this
- // is skipped in the event of an exception but apparently
- // the template interpreter does this so we do too.
- if (monitor && !HAS_PENDING_EXCEPTION) {
+ unlock_unwind_and_return:
+
+ // Unlock if necessary
+ if (monitor) {
BasicLock *lock = monitor->lock();
markOop header = lock->displaced_header();
oop rcvr = monitor->obj();
diff --git a/hotspot/src/cpu/zero/vm/frame_zero.cpp b/hotspot/src/cpu/zero/vm/frame_zero.cpp
index 1b3cafdc589..323912e1cb3 100644
--- a/hotspot/src/cpu/zero/vm/frame_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/frame_zero.cpp
@@ -36,11 +36,8 @@ bool frame::is_interpreted_frame() const {
return zeroframe()->is_interpreter_frame();
}
-bool frame::is_fake_stub_frame() const {
- return zeroframe()->is_fake_stub_frame();
-}
-
frame frame::sender_for_entry_frame(RegisterMap *map) const {
+ assert(zeroframe()->is_entry_frame(), "wrong type of frame");
assert(map != NULL, "map must be set");
assert(!entry_frame_is_first(), "next Java fp must be non zero");
assert(entry_frame_call_wrapper()->anchor()->last_Java_sp() == sender_sp(),
@@ -50,15 +47,10 @@ frame frame::sender_for_entry_frame(RegisterMap *map) const {
return frame(sender_sp(), sp() + 1);
}
-frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
- return frame(sender_sp(), sp() + 1);
-}
-
-frame frame::sender_for_compiled_frame(RegisterMap *map) const {
- return frame(sender_sp(), sp() + 1);
-}
-
-frame frame::sender_for_fake_stub_frame(RegisterMap *map) const {
+frame frame::sender_for_nonentry_frame(RegisterMap *map) const {
+ assert(zeroframe()->is_interpreter_frame() ||
+ zeroframe()->is_shark_frame() ||
+ zeroframe()->is_fake_stub_frame(), "wrong type of frame");
return frame(sender_sp(), sp() + 1);
}
@@ -69,17 +61,8 @@ frame frame::sender(RegisterMap* map) const {
if (is_entry_frame())
return sender_for_entry_frame(map);
-
- if (is_interpreted_frame())
- return sender_for_interpreter_frame(map);
-
- if (is_compiled_frame())
- return sender_for_compiled_frame(map);
-
- if (is_fake_stub_frame())
- return sender_for_fake_stub_frame(map);
-
- ShouldNotReachHere();
+ else
+ return sender_for_nonentry_frame(map);
}
#ifdef CC_INTERP
diff --git a/hotspot/src/cpu/zero/vm/frame_zero.hpp b/hotspot/src/cpu/zero/vm/frame_zero.hpp
index 81b6314571d..84d248fe0c4 100644
--- a/hotspot/src/cpu/zero/vm/frame_zero.hpp
+++ b/hotspot/src/cpu/zero/vm/frame_zero.hpp
@@ -65,10 +65,7 @@
}
public:
- bool is_fake_stub_frame() const;
-
- public:
- frame sender_for_fake_stub_frame(RegisterMap* map) const;
+ frame sender_for_nonentry_frame(RegisterMap* map) const;
public:
void zero_print_on_error(int index,
diff --git a/hotspot/src/cpu/zero/vm/globals_zero.hpp b/hotspot/src/cpu/zero/vm/globals_zero.hpp
index 89cf7077dee..e8a32f1d47a 100644
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp
@@ -23,10 +23,8 @@
*
*/
-//
// Set the default values for platform dependent flags used by the
// runtime system. See globals.hpp for details of what they do.
-//
define_pd_global(bool, ConvertSleepToYield, true);
define_pd_global(bool, ShareVtableStubs, true);
@@ -37,14 +35,7 @@ define_pd_global(bool, ImplicitNullChecks, true);
define_pd_global(bool, UncommonNullCast, true);
define_pd_global(intx, CodeEntryAlignment, 32);
-define_pd_global(uintx, TLABSize, 0);
-#ifdef _LP64
-define_pd_global(uintx, NewSize, ScaleForWordSize(2048 * K));
-#else
-define_pd_global(uintx, NewSize, ScaleForWordSize(1024 * K));
-#endif // _LP64
define_pd_global(intx, InlineFrequencyCount, 100);
-define_pd_global(intx, InlineSmallCode, 1000);
define_pd_global(intx, PreInflateSpin, 10);
define_pd_global(intx, StackYellowPages, 2);
diff --git a/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp b/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp
index 7bb4614980f..5adb87aef70 100644
--- a/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp
+++ b/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp
@@ -1,6 +1,6 @@
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
BasicType *in_sig_bt,
VMRegPair *in_regs,
BasicType ret_type) {
+#ifdef SHARK
+ return SharkCompiler::compiler()->generate_native_wrapper(masm,
+ method,
+ in_sig_bt,
+ ret_type);
+#else
ShouldNotCallThis();
+#endif // SHARK
}
int Deoptimization::last_frame_adjust(int callee_parameters,
diff --git a/hotspot/src/cpu/zero/vm/sharkFrame_zero.hpp b/hotspot/src/cpu/zero/vm/sharkFrame_zero.hpp
index 3337219a370..2473ebf7ae6 100644
--- a/hotspot/src/cpu/zero/vm/sharkFrame_zero.hpp
+++ b/hotspot/src/cpu/zero/vm/sharkFrame_zero.hpp
@@ -1,6 +1,6 @@
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
- * Copyright 2008 Red Hat, Inc.
+ * Copyright 2008, 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
// | ... |
class SharkFrame : public ZeroFrame {
- friend class SharkFunction;
+ friend class SharkStack;
private:
SharkFrame() : ZeroFrame() {
diff --git a/hotspot/src/os/linux/vm/os_linux.cpp b/hotspot/src/os/linux/vm/os_linux.cpp
index 281e81c264f..a4c54456504 100644
--- a/hotspot/src/os/linux/vm/os_linux.cpp
+++ b/hotspot/src/os/linux/vm/os_linux.cpp
@@ -223,8 +223,8 @@ static const char *unstable_chroot_error = "/proc file system not found.\n"
"environment on Linux when /proc filesystem is not mounted.";
void os::Linux::initialize_system_info() {
- _processor_count = sysconf(_SC_NPROCESSORS_CONF);
- if (_processor_count == 1) {
+ set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
+ if (processor_count() == 1) {
pid_t pid = os::Linux::gettid();
char fname[32];
jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
@@ -236,7 +236,7 @@ void os::Linux::initialize_system_info() {
}
}
_physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
- assert(_processor_count > 0, "linux error");
+ assert(processor_count() > 0, "linux error");
}
void os::init_system_properties_values() {
@@ -4683,6 +4683,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// Return immediately if a permit is available.
if (_counter > 0) {
_counter = 0 ;
+ OrderAccess::fence();
return ;
}
@@ -4725,6 +4726,7 @@ void Parker::park(bool isAbsolute, jlong time) {
_counter = 0;
status = pthread_mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
+ OrderAccess::fence();
return;
}
@@ -4765,6 +4767,7 @@ void Parker::park(bool isAbsolute, jlong time) {
jt->java_suspend_self();
}
+ OrderAccess::fence();
}
void Parker::unpark() {
diff --git a/hotspot/src/os/solaris/dtrace/libjvm_db.c b/hotspot/src/os/solaris/dtrace/libjvm_db.c
index b162f057b5b..7db194a517e 100644
--- a/hotspot/src/os/solaris/dtrace/libjvm_db.c
+++ b/hotspot/src/os/solaris/dtrace/libjvm_db.c
@@ -937,54 +937,56 @@ scope_desc_at(Nmethod_t *N, int32_t decode_offset, Vframe_t *vf)
return err;
}
-static int
-scopeDesc_chain(Nmethod_t *N)
-{
+static int scopeDesc_chain(Nmethod_t *N) {
int32_t decode_offset = 0;
int32_t err;
- if (debug > 2)
- fprintf(stderr, "\t scopeDesc_chain: BEGIN\n");
+ if (debug > 2) {
+ fprintf(stderr, "\t scopeDesc_chain: BEGIN\n");
+ }
err = ps_pread(N->J->P, N->pc_desc + OFFSET_PcDesc_scope_decode_offset,
&decode_offset, SZ32);
CHECK_FAIL(err);
while (decode_offset > 0) {
- if (debug > 2)
- fprintf(stderr, "\t scopeDesc_chain: decode_offset: %#x\n", decode_offset);
+ Vframe_t *vf = &N->vframes[N->vf_cnt];
- Vframe_t *vf = &N->vframes[N->vf_cnt];
+ if (debug > 2) {
+ fprintf(stderr, "\t scopeDesc_chain: decode_offset: %#x\n", decode_offset);
+ }
- err = scope_desc_at(N, decode_offset, vf);
+ err = scope_desc_at(N, decode_offset, vf);
+ CHECK_FAIL(err);
+
+ if (vf->methodIdx > N->oops_len) {
+ fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
+ return -1;
+ }
+ err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,
+ &vf->methodOop);
+ CHECK_FAIL(err);
+
+ if (vf->methodOop) {
+ N->vf_cnt++;
+ err = line_number_from_bci(N->J, vf);
CHECK_FAIL(err);
-
- if (vf->methodIdx > N->oops_len) {
- fprintf(stderr, "\t scopeDesc_chain: (methodIdx > oops_len) !\n");
- return -1;
+ if (debug > 2) {
+ fprintf(stderr, "\t scopeDesc_chain: methodOop: %#8llx, line: %ld\n",
+ vf->methodOop, vf->line);
}
- err = read_pointer(N->J, N->nm + N->oops_beg + (vf->methodIdx-1)*POINTER_SIZE,
- &vf->methodOop);
- CHECK_FAIL(err);
-
- if (vf->methodOop) {
- N->vf_cnt++;
- err = line_number_from_bci(N->J, vf);
- CHECK_FAIL(err);
- if (debug > 2) {
- fprintf(stderr, "\t scopeDesc_chain: methodOop: %#8llx, line: %ld\n",
- vf->methodOop, vf->line);
- }
- }
- decode_offset = vf->sender_decode_offset;
+ }
+ decode_offset = vf->sender_decode_offset;
+ }
+ if (debug > 2) {
+ fprintf(stderr, "\t scopeDesc_chain: END \n\n");
}
- if (debug > 2)
- fprintf(stderr, "\t scopeDesc_chain: END \n\n");
return PS_OK;
fail:
- if (debug)
- fprintf(stderr, "\t scopeDesc_chain: FAIL \n\n");
+ if (debug) {
+ fprintf(stderr, "\t scopeDesc_chain: FAIL \n\n");
+ }
return err;
}
diff --git a/hotspot/src/os/solaris/vm/os_solaris.cpp b/hotspot/src/os/solaris/vm/os_solaris.cpp
index 13abbf9d4b9..2ef21349881 100644
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp
@@ -457,7 +457,7 @@ static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB
void os::Solaris::initialize_system_info() {
- _processor_count = sysconf(_SC_NPROCESSORS_CONF);
+ set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
_processors_online = sysconf (_SC_NPROCESSORS_ONLN);
_physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
}
@@ -5803,6 +5803,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// Return immediately if a permit is available.
if (_counter > 0) {
_counter = 0 ;
+ OrderAccess::fence();
return ;
}
@@ -5846,6 +5847,7 @@ void Parker::park(bool isAbsolute, jlong time) {
_counter = 0;
status = os::Solaris::mutex_unlock(_mutex);
assert (status == 0, "invariant") ;
+ OrderAccess::fence();
return;
}
@@ -5892,6 +5894,7 @@ void Parker::park(bool isAbsolute, jlong time) {
jt->java_suspend_self();
}
+ OrderAccess::fence();
}
void Parker::unpark() {
diff --git a/hotspot/src/os/windows/vm/os_windows.cpp b/hotspot/src/os/windows/vm/os_windows.cpp
index 0c941d66db9..40f7bf5a95b 100644
--- a/hotspot/src/os/windows/vm/os_windows.cpp
+++ b/hotspot/src/os/windows/vm/os_windows.cpp
@@ -3150,7 +3150,7 @@ void os::win32::initialize_system_info() {
_vm_allocation_granularity = si.dwAllocationGranularity;
_processor_type = si.dwProcessorType;
_processor_level = si.wProcessorLevel;
- _processor_count = si.dwNumberOfProcessors;
+ set_processor_count(si.dwNumberOfProcessors);
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
diff --git a/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp b/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp
index a1cb9732ace..708cc3e085c 100644
--- a/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp
+++ b/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp
@@ -22,10 +22,9 @@
*
*/
-//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
-//
+
define_pd_global(bool, DontYieldALot, false);
#ifdef AMD64
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
@@ -39,11 +38,10 @@ define_pd_global(intx, VMThreadStackSize, 512);
#endif // AMD64
define_pd_global(intx, CompilerThreadStackSize, 0);
-define_pd_global(intx, SurvivorRatio, 8);
-define_pd_global(uintx, JVMInvokeMethodSlack, 8192);
+define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
// Only used on 64 bit platforms
-define_pd_global(uintx, HeapBaseMinAddress, 2*G);
+define_pd_global(uintx,HeapBaseMinAddress, 2*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
diff --git a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
index 572702e7d6d..b9a140a5136 100644
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
+++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp
@@ -1,6 +1,6 @@
/*
* Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -239,7 +239,21 @@ void os::Linux::set_fpu_control_word(int fpu) {
}
bool os::is_allocatable(size_t bytes) {
- ShouldNotCallThis();
+#ifdef _LP64
+ return true;
+#else
+ if (bytes < 2 * G) {
+ return true;
+ }
+
+ char* addr = reserve_memory(bytes, NULL);
+
+ if (addr != NULL) {
+ release_memory(addr, bytes);
+ }
+
+ return addr != NULL;
+#endif // _LP64
}
///////////////////////////////////////////////////////////////////////////////
diff --git a/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp b/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp
index 4b51eef5ee0..4b2749bc193 100644
--- a/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp
+++ b/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp
@@ -22,31 +22,25 @@
*
*/
-//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
-//
+
define_pd_global(bool, DontYieldALot, true); // Determined in the design center
#ifdef AMD64
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 1024);
-define_pd_global(intx, SurvivorRatio, 6);
-define_pd_global(uintx, JVMInvokeMethodSlack, 8*K);
+define_pd_global(uintx,JVMInvokeMethodSlack, 8*K);
#else
-// UseStackBanging is not pd
-// define_pd_global(bool, UseStackBanging, true);
-
// ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
// to run while keeping the number of threads that can be created high.
define_pd_global(intx, ThreadStackSize, 320);
define_pd_global(intx, VMThreadStackSize, 512);
-define_pd_global(intx, SurvivorRatio, 8);
-define_pd_global(uintx, JVMInvokeMethodSlack, 10*K);
+define_pd_global(uintx,JVMInvokeMethodSlack, 10*K);
#endif // AMD64
define_pd_global(intx, CompilerThreadStackSize, 0);
// Only used on 64 bit platforms
-define_pd_global(uintx, HeapBaseMinAddress, 256*M);
+define_pd_global(uintx,HeapBaseMinAddress, 256*M);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
diff --git a/hotspot/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp b/hotspot/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp
index 300541e0e96..e5cf1dfe371 100644
--- a/hotspot/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp
+++ b/hotspot/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp
@@ -22,10 +22,9 @@
*
*/
-//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
-//
+
define_pd_global(bool, DontYieldALot, false);
// Default stack size on Windows is determined by the executable (java.exe
@@ -35,8 +34,6 @@ define_pd_global(bool, DontYieldALot, false);
define_pd_global(intx, ThreadStackSize, 0); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 0); // 0 => use system default
-define_pd_global(intx, SurvivorRatio, 8);
-
#ifdef ASSERT
define_pd_global(intx, CompilerThreadStackSize, 1024);
#else
diff --git a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp
index caa99ded618..f567d6e120d 100644
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp
@@ -365,7 +365,7 @@ void BlockListBuilder::make_loop_header(BlockBegin* block) {
if (_next_loop_index < 31) _next_loop_index++;
} else {
// block already marked as loop header
- assert(is_power_of_2(_loop_map.at(block->block_id())), "exactly one bit must be set");
+ assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set");
}
}
diff --git a/hotspot/src/share/vm/c1/c1_IR.hpp b/hotspot/src/share/vm/c1/c1_IR.hpp
index f7bbea2ff3b..e1af926aef1 100644
--- a/hotspot/src/share/vm/c1/c1_IR.hpp
+++ b/hotspot/src/share/vm/c1/c1_IR.hpp
@@ -251,8 +251,9 @@ class IRScopeDebugInfo: public CompilationResourceObj {
DebugToken* expvals = recorder->create_scope_values(expressions());
DebugToken* monvals = recorder->create_monitor_values(monitors());
// reexecute allowed only for the topmost frame
- bool reexecute = topmost ? should_reexecute() : false;
- recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, locvals, expvals, monvals);
+ bool reexecute = topmost ? should_reexecute() : false;
+ bool is_method_handle_invoke = false;
+ recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, locvals, expvals, monvals);
}
};
diff --git a/hotspot/src/share/vm/c1/c1_LIR.hpp b/hotspot/src/share/vm/c1/c1_LIR.hpp
index c3da44e2329..fb51de4ca2d 100644
--- a/hotspot/src/share/vm/c1/c1_LIR.hpp
+++ b/hotspot/src/share/vm/c1/c1_LIR.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2000,7 +2000,7 @@ class LIR_OpVisitState: public StackObj {
typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
enum {
- maxNumberOfOperands = 14,
+ maxNumberOfOperands = 16,
maxNumberOfInfos = 4
};
diff --git a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
index 8eb667dda29..a393028792d 100644
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
@@ -1855,12 +1855,26 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
} else {
#ifdef X86
+#ifdef _LP64
+ if (!index_op->is_illegal() && index_op->type() == T_INT) {
+ LIR_Opr tmp = new_pointer_register();
+ __ convert(Bytecodes::_i2l, index_op, tmp);
+ index_op = tmp;
+ }
+#endif
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
#else
if (index_op->is_illegal() || log2_scale == 0) {
+#ifdef _LP64
+ if (!index_op->is_illegal() && index_op->type() == T_INT) {
+ LIR_Opr tmp = new_pointer_register();
+ __ convert(Bytecodes::_i2l, index_op, tmp);
+ index_op = tmp;
+ }
+#endif
addr = new LIR_Address(base_op, index_op, dst_type);
} else {
- LIR_Opr tmp = new_register(T_INT);
+ LIR_Opr tmp = new_pointer_register();
__ shift_left(index_op, log2_scale, tmp);
addr = new LIR_Address(base_op, tmp, dst_type);
}
@@ -1915,10 +1929,25 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
LIR_Opr index_op = idx.result();
if (log2_scale != 0) {
// temporary fix (platform dependent code without shift on Intel would be better)
- index_op = new_register(T_INT);
- __ move(idx.result(), index_op);
+ index_op = new_pointer_register();
+#ifdef _LP64
+ if(idx.result()->type() == T_INT) {
+ __ convert(Bytecodes::_i2l, idx.result(), index_op);
+ } else {
+#endif
+ __ move(idx.result(), index_op);
+#ifdef _LP64
+ }
+#endif
__ shift_left(index_op, log2_scale, index_op);
}
+#ifdef _LP64
+ else if(!index_op->is_illegal() && index_op->type() == T_INT) {
+ LIR_Opr tmp = new_pointer_register();
+ __ convert(Bytecodes::_i2l, index_op, tmp);
+ index_op = tmp;
+ }
+#endif
LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
__ move(value.result(), addr);
diff --git a/hotspot/src/share/vm/c1/c1_LinearScan.cpp b/hotspot/src/share/vm/c1/c1_LinearScan.cpp
index bab43e2ad21..ab049832121 100644
--- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp
@@ -2464,6 +2464,10 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArrayappend(&_int_0_scope_value);
+ scope_values->append(new ConstantLongValue(c->as_jlong_bits()));
+#else
if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) {
scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
@@ -2471,7 +2475,7 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArrayappend(new ConstantIntValue(c->as_jint_lo_bits()));
scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
}
-
+#endif
return 2;
}
@@ -2503,17 +2507,18 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArrayis_single_cpu()) {
bool is_oop = opr->is_oop_register();
int cache_idx = opr->cpu_regnr() * 2 + (is_oop ? 1 : 0);
+ Location::Type int_loc_type = NOT_LP64(Location::normal) LP64_ONLY(Location::int_in_long);
ScopeValue* sv = _scope_value_cache.at(cache_idx);
if (sv == NULL) {
- Location::Type loc_type = is_oop ? Location::oop : Location::normal;
+ Location::Type loc_type = is_oop ? Location::oop : int_loc_type;
VMReg rname = frame_map()->regname(opr);
sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
_scope_value_cache.at_put(cache_idx, sv);
}
// check if cached value is correct
- DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : Location::normal, frame_map()->regname(opr)))));
+ DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : int_loc_type, frame_map()->regname(opr)))));
scope_values->append(sv);
return 1;
diff --git a/hotspot/src/share/vm/c1/c1_Runtime1.cpp b/hotspot/src/share/vm/c1/c1_Runtime1.cpp
index 986cfd28561..9093885ce45 100644
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp
@@ -425,7 +425,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
assert(exception->is_oop(), "just checking");
// Check that exception is a subclass of Throwable, otherwise we have a VerifyError
- if (!(exception->is_a(SystemDictionary::throwable_klass()))) {
+ if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
if (ExitVMOnVerifyError) vm_exit(-1);
ShouldNotReachHere();
}
diff --git a/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp b/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp
index 8c5122c1486..86de9695cc5 100644
--- a/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp
+++ b/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp
@@ -61,9 +61,11 @@ class BCEscapeAnalyzer : public ResourceObj {
BCEscapeAnalyzer* _parent;
int _level;
+ public:
class ArgumentMap;
class StateInfo;
+ private:
// helper functions
bool is_argument(int i) { return i >= 0 && i < _arg_size; }
diff --git a/hotspot/src/share/vm/ci/ciCPCache.cpp b/hotspot/src/share/vm/ci/ciCPCache.cpp
new file mode 100644
index 00000000000..87bd409a615
--- /dev/null
+++ b/hotspot/src/share/vm/ci/ciCPCache.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_ciCPCache.cpp.incl"
+
+// ciCPCache
+
+// ------------------------------------------------------------------
+// ciCPCache::get_f1_offset
+size_t ciCPCache::get_f1_offset(int index) {
+ // Calculate the offset from the constantPoolCacheOop to the f1
+ // field.
+ ByteSize f1_offset =
+ constantPoolCacheOopDesc::entry_offset(index) +
+ ConstantPoolCacheEntry::f1_offset();
+
+ return in_bytes(f1_offset);
+}
+
+
+// ------------------------------------------------------------------
+// ciCPCache::print
+//
+// Print debugging information about the cache.
+void ciCPCache::print() {
+ Unimplemented();
+}
diff --git a/hotspot/src/share/vm/ci/ciCPCache.hpp b/hotspot/src/share/vm/ci/ciCPCache.hpp
new file mode 100644
index 00000000000..48e0c3b8fe7
--- /dev/null
+++ b/hotspot/src/share/vm/ci/ciCPCache.hpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// ciCPCache
+//
+// This class represents a constant pool cache.
+//
+// Note: This class is called ciCPCache as ciConstantPoolCache is used
+// for something different.
+class ciCPCache : public ciObject {
+public:
+ ciCPCache(constantPoolCacheHandle cpcache) : ciObject(cpcache) {}
+
+ // What kind of ciObject is this?
+ bool is_cpcache() const { return true; }
+
+ // Get the offset in bytes from the oop to the f1 field of the
+ // requested entry.
+ size_t get_f1_offset(int index);
+
+ void print();
+};
diff --git a/hotspot/src/share/vm/ci/ciCallSite.cpp b/hotspot/src/share/vm/ci/ciCallSite.cpp
new file mode 100644
index 00000000000..541432b914b
--- /dev/null
+++ b/hotspot/src/share/vm/ci/ciCallSite.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_ciCallSite.cpp.incl"
+
+// ciCallSite
+
+// ------------------------------------------------------------------
+// ciCallSite::get_target
+//
+// Return the target MethodHandle of this CallSite.
+ciMethodHandle* ciCallSite::get_target() const {
+ VM_ENTRY_MARK;
+ oop method_handle_oop = java_dyn_CallSite::target(get_oop());
+ return CURRENT_ENV->get_object(method_handle_oop)->as_method_handle();
+}
+
+// ------------------------------------------------------------------
+// ciCallSite::print
+//
+// Print debugging information about the CallSite.
+void ciCallSite::print() {
+ Unimplemented();
+}
diff --git a/hotspot/src/share/vm/ci/ciCallSite.hpp b/hotspot/src/share/vm/ci/ciCallSite.hpp
new file mode 100644
index 00000000000..3700ad54430
--- /dev/null
+++ b/hotspot/src/share/vm/ci/ciCallSite.hpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// ciCallSite
+//
+// The class represents a java.dyn.CallSite object.
+class ciCallSite : public ciInstance {
+public:
+ ciCallSite(instanceHandle h_i) : ciInstance(h_i) {}
+
+ // What kind of ciObject is this?
+ bool is_call_site() const { return true; }
+
+ // Return the target MethodHandle of this CallSite.
+ ciMethodHandle* get_target() const;
+
+ void print();
+};
diff --git a/hotspot/src/share/vm/ci/ciClassList.hpp b/hotspot/src/share/vm/ci/ciClassList.hpp
index f6a534477e2..5dc67b2c779 100644
--- a/hotspot/src/share/vm/ci/ciClassList.hpp
+++ b/hotspot/src/share/vm/ci/ciClassList.hpp
@@ -25,6 +25,7 @@
class ciEnv;
class ciObjectFactory;
class ciConstantPoolCache;
+class ciCPCache;
class ciField;
class ciConstant;
@@ -42,6 +43,8 @@ class ciTypeFlow;
class ciObject;
class ciNullObject;
class ciInstance;
+class ciCallSite;
+class ciMethodHandle;
class ciMethod;
class ciMethodData;
class ciReceiverTypeData; // part of ciMethodData
@@ -78,6 +81,7 @@ friend class ciObjectFactory;
// Any more access must be given explicitly.
#define CI_PACKAGE_ACCESS_TO \
friend class ciObjectFactory; \
+friend class ciCallSite; \
friend class ciConstantPoolCache; \
friend class ciField; \
friend class ciConstant; \
@@ -93,6 +97,7 @@ friend class ciNullObject; \
friend class ciInstance; \
friend class ciMethod; \
friend class ciMethodData; \
+friend class ciMethodHandle; \
friend class ciReceiverTypeData; \
friend class ciSymbol; \
friend class ciArray; \
diff --git a/hotspot/src/share/vm/ci/ciEnv.cpp b/hotspot/src/share/vm/ci/ciEnv.cpp
index b0a17b35c2e..e09c66a74dd 100644
--- a/hotspot/src/share/vm/ci/ciEnv.cpp
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp
@@ -38,14 +38,9 @@ ciInstanceKlassKlass* ciEnv::_instance_klass_klass_instance;
ciTypeArrayKlassKlass* ciEnv::_type_array_klass_klass_instance;
ciObjArrayKlassKlass* ciEnv::_obj_array_klass_klass_instance;
-ciInstanceKlass* ciEnv::_ArrayStoreException;
-ciInstanceKlass* ciEnv::_Class;
-ciInstanceKlass* ciEnv::_ClassCastException;
-ciInstanceKlass* ciEnv::_Object;
-ciInstanceKlass* ciEnv::_Throwable;
-ciInstanceKlass* ciEnv::_Thread;
-ciInstanceKlass* ciEnv::_OutOfMemoryError;
-ciInstanceKlass* ciEnv::_String;
+#define WK_KLASS_DEFN(name, ignore_s, ignore_o) ciInstanceKlass* ciEnv::_##name = NULL;
+WK_KLASSES_DO(WK_KLASS_DEFN)
+#undef WK_KLASS_DEFN
ciSymbol* ciEnv::_unloaded_cisymbol = NULL;
ciInstanceKlass* ciEnv::_unloaded_ciinstance_klass = NULL;
@@ -110,6 +105,8 @@ ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
_ArrayIndexOutOfBoundsException_instance = NULL;
_ArrayStoreException_instance = NULL;
_ClassCastException_instance = NULL;
+ _the_null_string = NULL;
+ _the_min_jint_string = NULL;
}
ciEnv::ciEnv(Arena* arena) {
@@ -163,6 +160,8 @@ ciEnv::ciEnv(Arena* arena) {
_ArrayIndexOutOfBoundsException_instance = NULL;
_ArrayStoreException_instance = NULL;
_ClassCastException_instance = NULL;
+ _the_null_string = NULL;
+ _the_min_jint_string = NULL;
}
ciEnv::~ciEnv() {
@@ -248,6 +247,22 @@ ciInstance* ciEnv::ClassCastException_instance() {
return _ClassCastException_instance;
}
+ciInstance* ciEnv::the_null_string() {
+ if (_the_null_string == NULL) {
+ VM_ENTRY_MARK;
+ _the_null_string = get_object(Universe::the_null_string())->as_instance();
+ }
+ return _the_null_string;
+}
+
+ciInstance* ciEnv::the_min_jint_string() {
+ if (_the_min_jint_string == NULL) {
+ VM_ENTRY_MARK;
+ _the_min_jint_string = get_object(Universe::the_min_jint_string())->as_instance();
+ }
+ return _the_min_jint_string;
+}
+
// ------------------------------------------------------------------
// ciEnv::get_method_from_handle
ciMethod* ciEnv::get_method_from_handle(jobject method) {
@@ -419,12 +434,11 @@ ciKlass* ciEnv::get_klass_by_name(ciKlass* accessing_klass,
// ciEnv::get_klass_by_index_impl
//
// Implementation of get_klass_by_index.
-ciKlass* ciEnv::get_klass_by_index_impl(ciInstanceKlass* accessor,
+ciKlass* ciEnv::get_klass_by_index_impl(constantPoolHandle cpool,
int index,
- bool& is_accessible) {
- assert(accessor->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
+ bool& is_accessible,
+ ciInstanceKlass* accessor) {
EXCEPTION_CONTEXT;
- constantPoolHandle cpool(THREAD, accessor->get_instanceKlass()->constants());
KlassHandle klass (THREAD, constantPoolOopDesc::klass_at_if_loaded(cpool, index));
symbolHandle klass_name;
if (klass.is_null()) {
@@ -486,22 +500,21 @@ ciKlass* ciEnv::get_klass_by_index_impl(ciInstanceKlass* accessor,
// ciEnv::get_klass_by_index
//
// Get a klass from the constant pool.
-ciKlass* ciEnv::get_klass_by_index(ciInstanceKlass* accessor,
+ciKlass* ciEnv::get_klass_by_index(constantPoolHandle cpool,
int index,
- bool& is_accessible) {
- GUARDED_VM_ENTRY(return get_klass_by_index_impl(accessor, index, is_accessible);)
+ bool& is_accessible,
+ ciInstanceKlass* accessor) {
+ GUARDED_VM_ENTRY(return get_klass_by_index_impl(cpool, index, is_accessible, accessor);)
}
// ------------------------------------------------------------------
// ciEnv::get_constant_by_index_impl
//
// Implementation of get_constant_by_index().
-ciConstant ciEnv::get_constant_by_index_impl(ciInstanceKlass* accessor,
- int index) {
+ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
+ int index,
+ ciInstanceKlass* accessor) {
EXCEPTION_CONTEXT;
- instanceKlass* ik_accessor = accessor->get_instanceKlass();
- assert(ik_accessor->is_linked(), "must be linked before accessing constant pool");
- constantPoolOop cpool = ik_accessor->constants();
constantTag tag = cpool->tag_at(index);
if (tag.is_int()) {
return ciConstant(T_INT, (jint)cpool->int_at(index));
@@ -529,7 +542,7 @@ ciConstant ciEnv::get_constant_by_index_impl(ciInstanceKlass* accessor,
} else if (tag.is_klass() || tag.is_unresolved_klass()) {
// 4881222: allow ldc to take a class type
bool ignore;
- ciKlass* klass = get_klass_by_index_impl(accessor, index, ignore);
+ ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore, accessor);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
record_out_of_memory_failure();
@@ -538,6 +551,11 @@ ciConstant ciEnv::get_constant_by_index_impl(ciInstanceKlass* accessor,
assert (klass->is_instance_klass() || klass->is_array_klass(),
"must be an instance or array klass ");
return ciConstant(T_OBJECT, klass);
+ } else if (tag.is_object()) {
+ oop obj = cpool->object_at(index);
+ assert(obj->is_instance(), "must be an instance");
+ ciObject* ciobj = get_object(obj);
+ return ciConstant(T_OBJECT, ciobj);
} else {
ShouldNotReachHere();
return ciConstant();
@@ -574,9 +592,10 @@ bool ciEnv::is_unresolved_klass_impl(instanceKlass* accessor, int index) const {
// Pull a constant out of the constant pool. How appropriate.
//
// Implementation note: this query is currently in no way cached.
-ciConstant ciEnv::get_constant_by_index(ciInstanceKlass* accessor,
- int index) {
- GUARDED_VM_ENTRY(return get_constant_by_index_impl(accessor, index); )
+ciConstant ciEnv::get_constant_by_index(constantPoolHandle cpool,
+ int index,
+ ciInstanceKlass* accessor) {
+ GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, index, accessor);)
}
// ------------------------------------------------------------------
@@ -586,7 +605,7 @@ ciConstant ciEnv::get_constant_by_index(ciInstanceKlass* accessor,
//
// Implementation note: this query is currently in no way cached.
bool ciEnv::is_unresolved_string(ciInstanceKlass* accessor,
- int index) const {
+ int index) const {
GUARDED_VM_ENTRY(return is_unresolved_string_impl(accessor->get_instanceKlass(), index); )
}
@@ -597,7 +616,7 @@ bool ciEnv::is_unresolved_string(ciInstanceKlass* accessor,
//
// Implementation note: this query is currently in no way cached.
bool ciEnv::is_unresolved_klass(ciInstanceKlass* accessor,
- int index) const {
+ int index) const {
GUARDED_VM_ENTRY(return is_unresolved_klass_impl(accessor->get_instanceKlass(), index); )
}
@@ -678,22 +697,17 @@ methodOop ciEnv::lookup_method(instanceKlass* accessor,
// ------------------------------------------------------------------
// ciEnv::get_method_by_index_impl
-ciMethod* ciEnv::get_method_by_index_impl(ciInstanceKlass* accessor,
- int index, Bytecodes::Code bc) {
- // Get the method's declared holder.
-
- assert(accessor->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
- constantPoolHandle cpool = accessor->get_instanceKlass()->constants();
+ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
+ int index, Bytecodes::Code bc,
+ ciInstanceKlass* accessor) {
int holder_index = cpool->klass_ref_index_at(index);
bool holder_is_accessible;
- ciKlass* holder = get_klass_by_index_impl(accessor, holder_index, holder_is_accessible);
+ ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor);
ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder);
// Get the method's name and signature.
- int nt_index = cpool->name_and_type_ref_index_at(index);
- int sig_index = cpool->signature_ref_index_at(nt_index);
symbolOop name_sym = cpool->name_ref_at(index);
- symbolOop sig_sym = cpool->symbol_at(sig_index);
+ symbolOop sig_sym = cpool->signature_ref_at(index);
if (holder_is_accessible) { // Our declared holder is loaded.
instanceKlass* lookup = declared_holder->get_instanceKlass();
@@ -714,6 +728,33 @@ ciMethod* ciEnv::get_method_by_index_impl(ciInstanceKlass* accessor,
}
+// ------------------------------------------------------------------
+// ciEnv::get_fake_invokedynamic_method_impl
+ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
+ int index, Bytecodes::Code bc) {
+ assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
+
+ // Get the CallSite from the constant pool cache.
+ ConstantPoolCacheEntry* cpc_entry = cpool->cache()->secondary_entry_at(index);
+ assert(cpc_entry != NULL && cpc_entry->is_secondary_entry(), "sanity");
+ Handle call_site = cpc_entry->f1();
+
+ // Call site might not be linked yet.
+ if (call_site.is_null()) {
+ ciInstanceKlass* mh_klass = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass();
+ ciSymbol* sig_sym = get_object(cpool->signature_ref_at(index))->as_symbol();
+ return get_unloaded_method(mh_klass, ciSymbol::invoke_name(), sig_sym);
+ }
+
+ // Get the methodOop from the CallSite.
+ methodOop method_oop = (methodOop) java_dyn_CallSite::vmmethod(call_site());
+ assert(method_oop != NULL, "sanity");
+ assert(method_oop->is_method_handle_invoke(), "consistent");
+
+ return get_object(method_oop)->as_method();
+}
+
+
// ------------------------------------------------------------------
// ciEnv::get_instance_klass_for_declared_method_holder
ciInstanceKlass* ciEnv::get_instance_klass_for_declared_method_holder(ciKlass* method_holder) {
@@ -736,15 +777,19 @@ ciInstanceKlass* ciEnv::get_instance_klass_for_declared_method_holder(ciKlass* m
}
-
-
// ------------------------------------------------------------------
// ciEnv::get_method_by_index
-ciMethod* ciEnv::get_method_by_index(ciInstanceKlass* accessor,
- int index, Bytecodes::Code bc) {
- GUARDED_VM_ENTRY(return get_method_by_index_impl(accessor, index, bc);)
+ciMethod* ciEnv::get_method_by_index(constantPoolHandle cpool,
+ int index, Bytecodes::Code bc,
+ ciInstanceKlass* accessor) {
+ if (bc == Bytecodes::_invokedynamic) {
+ GUARDED_VM_ENTRY(return get_fake_invokedynamic_method_impl(cpool, index, bc);)
+ } else {
+ GUARDED_VM_ENTRY(return get_method_by_index_impl(cpool, index, bc, accessor);)
+ }
}
+
// ------------------------------------------------------------------
// ciEnv::name_buffer
char *ciEnv::name_buffer(int req_len) {
diff --git a/hotspot/src/share/vm/ci/ciEnv.hpp b/hotspot/src/share/vm/ci/ciEnv.hpp
index e855dbf9e4f..63b5ffe57bf 100644
--- a/hotspot/src/share/vm/ci/ciEnv.hpp
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp
@@ -74,14 +74,9 @@ private:
static ciTypeArrayKlassKlass* _type_array_klass_klass_instance;
static ciObjArrayKlassKlass* _obj_array_klass_klass_instance;
- static ciInstanceKlass* _ArrayStoreException;
- static ciInstanceKlass* _Class;
- static ciInstanceKlass* _ClassCastException;
- static ciInstanceKlass* _Object;
- static ciInstanceKlass* _Throwable;
- static ciInstanceKlass* _Thread;
- static ciInstanceKlass* _OutOfMemoryError;
- static ciInstanceKlass* _String;
+#define WK_KLASS_DECL(name, ignore_s, ignore_o) static ciInstanceKlass* _##name;
+ WK_KLASSES_DO(WK_KLASS_DECL)
+#undef WK_KLASS_DECL
static ciSymbol* _unloaded_cisymbol;
static ciInstanceKlass* _unloaded_ciinstance_klass;
@@ -97,6 +92,9 @@ private:
ciInstance* _ArrayStoreException_instance;
ciInstance* _ClassCastException_instance;
+ ciInstance* _the_null_string; // The Java string "null"
+ ciInstance* _the_min_jint_string; // The Java string "-2147483648"
+
// Look up a klass by name from a particular class loader (the accessor's).
// If require_local, result must be defined in that class loader, or NULL.
// If !require_local, a result from remote class loader may be reported,
@@ -114,37 +112,45 @@ private:
bool require_local);
// Constant pool access.
- ciKlass* get_klass_by_index(ciInstanceKlass* loading_klass,
+ ciKlass* get_klass_by_index(constantPoolHandle cpool,
int klass_index,
- bool& is_accessible);
- ciConstant get_constant_by_index(ciInstanceKlass* loading_klass,
- int constant_index);
+ bool& is_accessible,
+ ciInstanceKlass* loading_klass);
+ ciConstant get_constant_by_index(constantPoolHandle cpool,
+ int constant_index,
+ ciInstanceKlass* accessor);
bool is_unresolved_string(ciInstanceKlass* loading_klass,
int constant_index) const;
bool is_unresolved_klass(ciInstanceKlass* loading_klass,
int constant_index) const;
ciField* get_field_by_index(ciInstanceKlass* loading_klass,
int field_index);
- ciMethod* get_method_by_index(ciInstanceKlass* loading_klass,
- int method_index, Bytecodes::Code bc);
+ ciMethod* get_method_by_index(constantPoolHandle cpool,
+ int method_index, Bytecodes::Code bc,
+ ciInstanceKlass* loading_klass);
// Implementation methods for loading and constant pool access.
ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass,
ciSymbol* klass_name,
bool require_local);
- ciKlass* get_klass_by_index_impl(ciInstanceKlass* loading_klass,
+ ciKlass* get_klass_by_index_impl(constantPoolHandle cpool,
int klass_index,
- bool& is_accessible);
- ciConstant get_constant_by_index_impl(ciInstanceKlass* loading_klass,
- int constant_index);
+ bool& is_accessible,
+ ciInstanceKlass* loading_klass);
+ ciConstant get_constant_by_index_impl(constantPoolHandle cpool,
+ int constant_index,
+ ciInstanceKlass* loading_klass);
bool is_unresolved_string_impl (instanceKlass* loading_klass,
int constant_index) const;
bool is_unresolved_klass_impl (instanceKlass* loading_klass,
int constant_index) const;
ciField* get_field_by_index_impl(ciInstanceKlass* loading_klass,
int field_index);
- ciMethod* get_method_by_index_impl(ciInstanceKlass* loading_klass,
- int method_index, Bytecodes::Code bc);
+ ciMethod* get_method_by_index_impl(constantPoolHandle cpool,
+ int method_index, Bytecodes::Code bc,
+ ciInstanceKlass* loading_klass);
+ ciMethod* get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
+ int index, Bytecodes::Code bc);
// Helper methods
bool check_klass_accessibility(ciKlass* accessing_klass,
@@ -286,30 +292,13 @@ public:
// Access to certain well known ciObjects.
- ciInstanceKlass* ArrayStoreException_klass() {
- return _ArrayStoreException;
- }
- ciInstanceKlass* Class_klass() {
- return _Class;
- }
- ciInstanceKlass* ClassCastException_klass() {
- return _ClassCastException;
- }
- ciInstanceKlass* Object_klass() {
- return _Object;
- }
- ciInstanceKlass* Throwable_klass() {
- return _Throwable;
- }
- ciInstanceKlass* Thread_klass() {
- return _Thread;
- }
- ciInstanceKlass* OutOfMemoryError_klass() {
- return _OutOfMemoryError;
- }
- ciInstanceKlass* String_klass() {
- return _String;
+#define WK_KLASS_FUNC(name, ignore_s, ignore_o) \
+ ciInstanceKlass* name() { \
+ return _##name;\
}
+ WK_KLASSES_DO(WK_KLASS_FUNC)
+#undef WK_KLASS_FUNC
+
ciInstance* NullPointerException_instance() {
assert(_NullPointerException_instance != NULL, "initialization problem");
return _NullPointerException_instance;
@@ -324,6 +313,9 @@ public:
ciInstance* ArrayStoreException_instance();
ciInstance* ClassCastException_instance();
+ ciInstance* the_null_string();
+ ciInstance* the_min_jint_string();
+
static ciSymbol* unloaded_cisymbol() {
return _unloaded_cisymbol;
}
diff --git a/hotspot/src/share/vm/ci/ciExceptionHandler.cpp b/hotspot/src/share/vm/ci/ciExceptionHandler.cpp
index 209f00e5b86..79f6ccec50f 100644
--- a/hotspot/src/share/vm/ci/ciExceptionHandler.cpp
+++ b/hotspot/src/share/vm/ci/ciExceptionHandler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2003 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,12 +34,16 @@
//
// Get the exception klass that this handler catches.
ciInstanceKlass* ciExceptionHandler::catch_klass() {
+ VM_ENTRY_MARK;
assert(!is_catch_all(), "bad index");
if (_catch_klass == NULL) {
bool will_link;
- ciKlass* k = CURRENT_ENV->get_klass_by_index(_loading_klass,
+ assert(_loading_klass->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
+ constantPoolHandle cpool(_loading_klass->get_instanceKlass()->constants());
+ ciKlass* k = CURRENT_ENV->get_klass_by_index(cpool,
_catch_klass_index,
- will_link);
+ will_link,
+ _loading_klass);
if (!will_link && k->is_loaded()) {
GUARDED_VM_ENTRY(
k = CURRENT_ENV->get_unloaded_klass(_loading_klass, k->name());
diff --git a/hotspot/src/share/vm/ci/ciField.cpp b/hotspot/src/share/vm/ci/ciField.cpp
index ed66c1781c4..39a5651dc68 100644
--- a/hotspot/src/share/vm/ci/ciField.cpp
+++ b/hotspot/src/share/vm/ci/ciField.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,7 +86,7 @@ ciField::ciField(ciInstanceKlass* klass, int index): _known_to_link_with(NULL) {
bool ignore;
// This is not really a class reference; the index always refers to the
// field's type signature, as a symbol. Linkage checks do not apply.
- _type = ciEnv::current(thread)->get_klass_by_index(klass, sig_index, ignore);
+ _type = ciEnv::current(thread)->get_klass_by_index(cpool, sig_index, ignore, klass);
} else {
_type = ciType::make(field_type);
}
@@ -100,9 +100,9 @@ ciField::ciField(ciInstanceKlass* klass, int index): _known_to_link_with(NULL) {
int holder_index = cpool->klass_ref_index_at(index);
bool holder_is_accessible;
ciInstanceKlass* declared_holder =
- ciEnv::current(thread)->get_klass_by_index(klass, holder_index,
- holder_is_accessible)
- ->as_instance_klass();
+ ciEnv::current(thread)->get_klass_by_index(cpool, holder_index,
+ holder_is_accessible,
+ klass)->as_instance_klass();
// The declared holder of this field may not have been loaded.
// Bail out with partial field information.
@@ -161,6 +161,18 @@ ciField::ciField(fieldDescriptor *fd): _known_to_link_with(NULL) {
"bootstrap classes must not create & cache unshared fields");
}
+static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
+ if (holder == NULL)
+ return false;
+ if (holder->name() == ciSymbol::java_lang_System())
+ // Never trust strangely unstable finals: System.out, etc.
+ return false;
+ // Even if general trusting is disabled, trust system-built closures in these packages.
+ if (holder->is_in_package("java/dyn") || holder->is_in_package("sun/dyn"))
+ return true;
+ return TrustFinalNonStaticFields;
+}
+
void ciField::initialize_from(fieldDescriptor* fd) {
// Get the flags, offset, and canonical holder of the field.
_flags = ciFlags(fd->access_flags());
@@ -168,8 +180,18 @@ void ciField::initialize_from(fieldDescriptor* fd) {
_holder = CURRENT_ENV->get_object(fd->field_holder())->as_instance_klass();
// Check to see if the field is constant.
- if (_holder->is_initialized() &&
- this->is_final() && this->is_static()) {
+ if (_holder->is_initialized() && this->is_final()) {
+ if (!this->is_static()) {
+ // A field can be constant if it's a final static field or if it's
+ // a final non-static field of a trusted class ({java,sun}.dyn).
+ if (trust_final_non_static_fields(_holder)) {
+ _is_constant = true;
+ return;
+ }
+ _is_constant = false;
+ return;
+ }
+
// This field just may be constant. The only cases where it will
// not be constant are:
//
@@ -182,8 +204,8 @@ void ciField::initialize_from(fieldDescriptor* fd) {
// java.lang.System.out, and java.lang.System.err.
klassOop k = _holder->get_klassOop();
- assert( SystemDictionary::system_klass() != NULL, "Check once per vm");
- if( k == SystemDictionary::system_klass() ) {
+ assert( SystemDictionary::System_klass() != NULL, "Check once per vm");
+ if( k == SystemDictionary::System_klass() ) {
// Check offsets for case 2: System.in, System.out, or System.err
if( _offset == java_lang_System::in_offset_in_bytes() ||
_offset == java_lang_System::out_offset_in_bytes() ||
diff --git a/hotspot/src/share/vm/ci/ciField.hpp b/hotspot/src/share/vm/ci/ciField.hpp
index 193d848d41c..ffe1f925e2c 100644
--- a/hotspot/src/share/vm/ci/ciField.hpp
+++ b/hotspot/src/share/vm/ci/ciField.hpp
@@ -138,10 +138,18 @@ public:
// Get the constant value of this field.
ciConstant constant_value() {
- assert(is_constant(), "illegal call to constant_value()");
+ assert(is_static() && is_constant(), "illegal call to constant_value()");
return _constant_value;
}
+ // Get the constant value of non-static final field in the given
+ // object.
+ ciConstant constant_value_of(ciObject* object) {
+ assert(!is_static() && is_constant(), "only if field is non-static constant");
+ assert(object->is_instance(), "must be instance");
+ return object->as_instance()->field_value(this);
+ }
+
// Check for link time errors. Accessing a field from a
// certain class via a certain bytecode may or may not be legal.
// This call checks to see if an exception may be raised by
diff --git a/hotspot/src/share/vm/ci/ciInstance.cpp b/hotspot/src/share/vm/ci/ciInstance.cpp
index 9d07a4a6229..c377a739ba3 100644
--- a/hotspot/src/share/vm/ci/ciInstance.cpp
+++ b/hotspot/src/share/vm/ci/ciInstance.cpp
@@ -36,7 +36,7 @@ ciType* ciInstance::java_mirror_type() {
VM_ENTRY_MARK;
oop m = get_oop();
// Return NULL if it is not java.lang.Class.
- if (m == NULL || m->klass() != SystemDictionary::class_klass()) {
+ if (m == NULL || m->klass() != SystemDictionary::Class_klass()) {
return NULL;
}
// Return either a primitive type or a klass.
diff --git a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp
index 1053727a93f..60fabd0bac8 100644
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp
@@ -75,7 +75,7 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
_java_mirror = NULL;
if (is_shared()) {
- if (h_k() != SystemDictionary::object_klass()) {
+ if (h_k() != SystemDictionary::Object_klass()) {
super();
}
java_mirror();
@@ -232,8 +232,48 @@ bool ciInstanceKlass::is_java_lang_Object() {
// ------------------------------------------------------------------
// ciInstanceKlass::uses_default_loader
bool ciInstanceKlass::uses_default_loader() {
- VM_ENTRY_MARK;
- return loader() == NULL;
+ // Note: We do not need to resolve the handle or enter the VM
+ // in order to test null-ness.
+ return _loader == NULL;
+}
+
+// ------------------------------------------------------------------
+// ciInstanceKlass::is_in_package
+//
+// Is this klass in the given package?
+bool ciInstanceKlass::is_in_package(const char* packagename, int len) {
+ // To avoid class loader mischief, this test always rejects application classes.
+ if (!uses_default_loader())
+ return false;
+ GUARDED_VM_ENTRY(
+ return is_in_package_impl(packagename, len);
+ )
+}
+
+bool ciInstanceKlass::is_in_package_impl(const char* packagename, int len) {
+ ASSERT_IN_VM;
+
+ // If packagename contains trailing '/' exclude it from the
+ // prefix-test since we test for it explicitly.
+ if (packagename[len - 1] == '/')
+ len--;
+
+ if (!name()->starts_with(packagename, len))
+ return false;
+
+ // Test if the class name is something like "java/lang".
+ if ((len + 1) > name()->utf8_length())
+ return false;
+
+ // Test for trailing '/'
+ if ((char) name()->byte_at(len) != '/')
+ return false;
+
+ // Make sure it's not actually in a subpackage:
+ if (name()->index_of_at(len+1, "/", 1) >= 0)
+ return false;
+
+ return true;
}
// ------------------------------------------------------------------
@@ -340,6 +380,20 @@ ciField* ciInstanceKlass::get_field_by_offset(int field_offset, bool is_static)
return field;
}
+// ------------------------------------------------------------------
+// ciInstanceKlass::get_field_by_name
+ciField* ciInstanceKlass::get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static) {
+ VM_ENTRY_MARK;
+ instanceKlass* k = get_instanceKlass();
+ fieldDescriptor fd;
+ klassOop def = k->find_field(name->get_symbolOop(), signature->get_symbolOop(), is_static, &fd);
+ if (def == NULL) {
+ return NULL;
+ }
+ ciField* field = new (CURRENT_THREAD_ENV->arena()) ciField(&fd);
+ return field;
+}
+
// ------------------------------------------------------------------
// ciInstanceKlass::non_static_fields.
diff --git a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp
index a60020adc34..29aeffa01f3 100644
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp
@@ -29,10 +29,11 @@
// be loaded.
class ciInstanceKlass : public ciKlass {
CI_PACKAGE_ACCESS
+ friend class ciBytecodeStream;
friend class ciEnv;
+ friend class ciExceptionHandler;
friend class ciMethod;
friend class ciField;
- friend class ciBytecodeStream;
private:
jobject _loader;
@@ -78,6 +79,8 @@ protected:
const char* type_string() { return "ciInstanceKlass"; }
+ bool is_in_package_impl(const char* packagename, int len);
+
void print_impl(outputStream* st);
ciConstantPoolCache* field_cache();
@@ -148,6 +151,7 @@ public:
ciInstanceKlass* get_canonical_holder(int offset);
ciField* get_field_by_offset(int field_offset, bool is_static);
+ ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
GrowableArray* non_static_fields();
@@ -195,6 +199,12 @@ public:
bool is_java_lang_Object();
+ // Is this klass in the given package?
+ bool is_in_package(const char* packagename) {
+ return is_in_package(packagename, (int) strlen(packagename));
+ }
+ bool is_in_package(const char* packagename, int len);
+
// What kind of ciObject is this?
bool is_instance_klass() { return true; }
bool is_java_klass() { return true; }
diff --git a/hotspot/src/share/vm/ci/ciKlass.cpp b/hotspot/src/share/vm/ci/ciKlass.cpp
index ac5da354422..b0f28620ab2 100644
--- a/hotspot/src/share/vm/ci/ciKlass.cpp
+++ b/hotspot/src/share/vm/ci/ciKlass.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
diff --git a/hotspot/src/share/vm/ci/ciKlass.hpp b/hotspot/src/share/vm/ci/ciKlass.hpp
index 1f2571718bc..3f1c6d7aa45 100644
--- a/hotspot/src/share/vm/ci/ciKlass.hpp
+++ b/hotspot/src/share/vm/ci/ciKlass.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -69,7 +69,7 @@ public:
ciKlass(KlassHandle k_h);
// What is the name of this klass?
- ciSymbol* name() { return _name; }
+ ciSymbol* name() const { return _name; }
// What is its layout helper value?
jint layout_helper() { return _layout_helper; }
diff --git a/hotspot/src/share/vm/ci/ciMethod.cpp b/hotspot/src/share/vm/ci/ciMethod.cpp
index f83429c1adf..3a271b3f226 100644
--- a/hotspot/src/share/vm/ci/ciMethod.cpp
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -687,7 +687,7 @@ int ciMethod::scale_count(int count, float prof_factor) {
// ------------------------------------------------------------------
// invokedynamic support
//
-bool ciMethod::is_method_handle_invoke() {
+bool ciMethod::is_method_handle_invoke() const {
check_is_loaded();
bool flag = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
#ifdef ASSERT
@@ -700,6 +700,12 @@ bool ciMethod::is_method_handle_invoke() {
return flag;
}
+bool ciMethod::is_method_handle_adapter() const {
+ check_is_loaded();
+ VM_ENTRY_MARK;
+ return get_methodOop()->is_method_handle_adapter();
+}
+
ciInstance* ciMethod::method_handle_type() {
check_is_loaded();
VM_ENTRY_MARK;
diff --git a/hotspot/src/share/vm/ci/ciMethod.hpp b/hotspot/src/share/vm/ci/ciMethod.hpp
index 1b65bc90c50..d574fa07b71 100644
--- a/hotspot/src/share/vm/ci/ciMethod.hpp
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,8 @@ class ciMethod : public ciObject {
CI_PACKAGE_ACCESS
friend class ciEnv;
friend class ciExceptionHandlerStream;
+ friend class ciBytecodeStream;
+ friend class ciMethodHandle;
private:
// General method information.
@@ -213,7 +215,10 @@ class ciMethod : public ciObject {
bool check_call(int refinfo_index, bool is_static) const;
void build_method_data(); // make sure it exists in the VM also
int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC
- bool is_method_handle_invoke();
+
+ // JSR 292 support
+ bool is_method_handle_invoke() const;
+ bool is_method_handle_adapter() const;
ciInstance* method_handle_type();
// What kind of ciObject is this?
@@ -251,4 +256,10 @@ class ciMethod : public ciObject {
// Print the name of this method in various incarnations.
void print_name(outputStream* st = tty);
void print_short_name(outputStream* st = tty);
+
+ methodOop get_method_handle_target() {
+ klassOop receiver_limit_oop = NULL;
+ int flags = 0;
+ return MethodHandles::decode_method(get_oop(), receiver_limit_oop, flags);
+ }
};
diff --git a/hotspot/src/share/vm/ci/ciMethodHandle.cpp b/hotspot/src/share/vm/ci/ciMethodHandle.cpp
new file mode 100644
index 00000000000..d9612192bf8
--- /dev/null
+++ b/hotspot/src/share/vm/ci/ciMethodHandle.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_ciMethodHandle.cpp.incl"
+
+// ciMethodHandle
+
+// ------------------------------------------------------------------
+// ciMethodHandle::get_adapter
+//
+// Return an adapter for this MethodHandle.
+ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
+ VM_ENTRY_MARK;
+
+ Handle h(get_oop());
+ methodHandle callee(_callee->get_methodOop());
+ MethodHandleCompiler mhc(h, callee, is_invokedynamic, THREAD);
+ methodHandle m = mhc.compile(CHECK_NULL);
+ return CURRENT_ENV->get_object(m())->as_method();
+}
+
+
+// ------------------------------------------------------------------
+// ciMethodHandle::print_impl
+//
+// Implementation of the print method.
+void ciMethodHandle::print_impl(outputStream* st) {
+ st->print(" type=");
+ get_oop()->print();
+}
diff --git a/hotspot/src/share/vm/ci/ciMethodHandle.hpp b/hotspot/src/share/vm/ci/ciMethodHandle.hpp
new file mode 100644
index 00000000000..26d317f248d
--- /dev/null
+++ b/hotspot/src/share/vm/ci/ciMethodHandle.hpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// ciMethodHandle
+//
+// The class represents a java.dyn.MethodHandle object.
+class ciMethodHandle : public ciInstance {
+private:
+ ciMethod* _callee;
+
+ // Return an adapter for this MethodHandle.
+ ciMethod* get_adapter(bool is_invokedynamic) const;
+
+protected:
+ void print_impl(outputStream* st);
+
+public:
+ ciMethodHandle(instanceHandle h_i) : ciInstance(h_i) {};
+
+ // What kind of ciObject is this?
+ bool is_method_handle() const { return true; }
+
+ ciMethod* callee() const { return _callee; }
+ void set_callee(ciMethod* m) { _callee = m; }
+
+ // Return an adapter for a MethodHandle call.
+ ciMethod* get_method_handle_adapter() const {
+ return get_adapter(false);
+ }
+
+ // Return an adapter for an invokedynamic call.
+ ciMethod* get_invokedynamic_adapter() const {
+ return get_adapter(true);
+ }
+};
diff --git a/hotspot/src/share/vm/ci/ciObject.hpp b/hotspot/src/share/vm/ci/ciObject.hpp
index 8d5e6b7f4dd..1f38e9c7109 100644
--- a/hotspot/src/share/vm/ci/ciObject.hpp
+++ b/hotspot/src/share/vm/ci/ciObject.hpp
@@ -131,9 +131,12 @@ public:
// What kind of ciObject is this?
virtual bool is_null_object() const { return false; }
+ virtual bool is_call_site() const { return false; }
+ virtual bool is_cpcache() const { return false; }
virtual bool is_instance() { return false; }
virtual bool is_method() { return false; }
virtual bool is_method_data() { return false; }
+ virtual bool is_method_handle() const { return false; }
virtual bool is_array() { return false; }
virtual bool is_obj_array() { return false; }
virtual bool is_type_array() { return false; }
@@ -185,6 +188,14 @@ public:
assert(is_null_object(), "bad cast");
return (ciNullObject*)this;
}
+ ciCallSite* as_call_site() {
+ assert(is_call_site(), "bad cast");
+ return (ciCallSite*) this;
+ }
+ ciCPCache* as_cpcache() {
+ assert(is_cpcache(), "bad cast");
+ return (ciCPCache*) this;
+ }
ciInstance* as_instance() {
assert(is_instance(), "bad cast");
return (ciInstance*)this;
@@ -197,6 +208,10 @@ public:
assert(is_method_data(), "bad cast");
return (ciMethodData*)this;
}
+ ciMethodHandle* as_method_handle() {
+ assert(is_method_handle(), "bad cast");
+ return (ciMethodHandle*) this;
+ }
ciArray* as_array() {
assert(is_array(), "bad cast");
return (ciArray*)this;
diff --git a/hotspot/src/share/vm/ci/ciObjectFactory.cpp b/hotspot/src/share/vm/ci/ciObjectFactory.cpp
index 6fb4edc4d9c..cfbdf1659eb 100644
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp
@@ -144,30 +144,13 @@ void ciObjectFactory::init_shared_objects() {
ciEnv::_obj_array_klass_klass_instance =
get(Universe::objArrayKlassKlassObj())
->as_obj_array_klass_klass();
- ciEnv::_ArrayStoreException =
- get(SystemDictionary::ArrayStoreException_klass())
- ->as_instance_klass();
- ciEnv::_Class =
- get(SystemDictionary::class_klass())
- ->as_instance_klass();
- ciEnv::_ClassCastException =
- get(SystemDictionary::ClassCastException_klass())
- ->as_instance_klass();
- ciEnv::_Object =
- get(SystemDictionary::object_klass())
- ->as_instance_klass();
- ciEnv::_Throwable =
- get(SystemDictionary::throwable_klass())
- ->as_instance_klass();
- ciEnv::_Thread =
- get(SystemDictionary::thread_klass())
- ->as_instance_klass();
- ciEnv::_OutOfMemoryError =
- get(SystemDictionary::OutOfMemoryError_klass())
- ->as_instance_klass();
- ciEnv::_String =
- get(SystemDictionary::string_klass())
- ->as_instance_klass();
+
+#define WK_KLASS_DEFN(name, ignore_s, opt) \
+ if (SystemDictionary::name() != NULL) \
+ ciEnv::_##name = get(SystemDictionary::name())->as_instance_klass();
+
+ WK_KLASSES_DO(WK_KLASS_DEFN)
+#undef WK_KLASS_DEFN
for (int len = -1; len != _ci_objects->length(); ) {
len = _ci_objects->length();
@@ -324,13 +307,21 @@ ciObject* ciObjectFactory::create_new_object(oop o) {
return new (arena()) ciMethodData(h_md);
} else if (o->is_instance()) {
instanceHandle h_i(THREAD, (instanceOop)o);
- return new (arena()) ciInstance(h_i);
+ if (java_dyn_CallSite::is_instance(o))
+ return new (arena()) ciCallSite(h_i);
+ else if (java_dyn_MethodHandle::is_instance(o))
+ return new (arena()) ciMethodHandle(h_i);
+ else
+ return new (arena()) ciInstance(h_i);
} else if (o->is_objArray()) {
objArrayHandle h_oa(THREAD, (objArrayOop)o);
return new (arena()) ciObjArray(h_oa);
} else if (o->is_typeArray()) {
typeArrayHandle h_ta(THREAD, (typeArrayOop)o);
return new (arena()) ciTypeArray(h_ta);
+ } else if (o->is_constantPoolCache()) {
+ constantPoolCacheHandle h_cpc(THREAD, (constantPoolCacheOop) o);
+ return new (arena()) ciCPCache(h_cpc);
}
// The oop is of some type not supported by the compiler interface.
@@ -567,7 +558,7 @@ ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
if (key->is_perm() && _non_perm_count == 0) {
return emptyBucket;
} else if (key->is_instance()) {
- if (key->klass() == SystemDictionary::class_klass()) {
+ if (key->klass() == SystemDictionary::Class_klass()) {
// class mirror instances are always perm
return emptyBucket;
}
diff --git a/hotspot/src/share/vm/ci/ciStreams.cpp b/hotspot/src/share/vm/ci/ciStreams.cpp
index d343ab8446d..52f17c33b47 100644
--- a/hotspot/src/share/vm/ci/ciStreams.cpp
+++ b/hotspot/src/share/vm/ci/ciStreams.cpp
@@ -186,8 +186,9 @@ int ciBytecodeStream::get_klass_index() const {
// If this bytecode is a new, newarray, multianewarray, instanceof,
// or checkcast, get the referenced klass.
ciKlass* ciBytecodeStream::get_klass(bool& will_link) {
- return CURRENT_ENV->get_klass_by_index(_holder, get_klass_index(),
- will_link);
+ VM_ENTRY_MARK;
+ constantPoolHandle cpool(_method->get_methodOop()->constants());
+ return CURRENT_ENV->get_klass_by_index(cpool, get_klass_index(), will_link, _holder);
}
// ------------------------------------------------------------------
@@ -213,7 +214,9 @@ int ciBytecodeStream::get_constant_index() const {
// If this bytecode is one of the ldc variants, get the referenced
// constant.
ciConstant ciBytecodeStream::get_constant() {
- return CURRENT_ENV->get_constant_by_index(_holder, get_constant_index());
+ VM_ENTRY_MARK;
+ constantPoolHandle cpool(_method->get_methodOop()->constants());
+ return CURRENT_ENV->get_constant_by_index(cpool, get_constant_index(), _holder);
}
// ------------------------------------------------------------------
@@ -264,9 +267,11 @@ ciField* ciBytecodeStream::get_field(bool& will_link) {
// There is no "will_link" result passed back. The user is responsible
// for checking linkability when retrieving the associated field.
ciInstanceKlass* ciBytecodeStream::get_declared_field_holder() {
+ VM_ENTRY_MARK;
+ constantPoolHandle cpool(_method->get_methodOop()->constants());
int holder_index = get_field_holder_index();
bool ignore;
- return CURRENT_ENV->get_klass_by_index(_holder, holder_index, ignore)
+ return CURRENT_ENV->get_klass_by_index(cpool, holder_index, ignore, _holder)
->as_instance_klass();
}
@@ -277,9 +282,10 @@ ciInstanceKlass* ciBytecodeStream::get_declared_field_holder() {
// referenced by the current bytecode. Used for generating
// deoptimization information.
int ciBytecodeStream::get_field_holder_index() {
- VM_ENTRY_MARK;
- constantPoolOop cpool = _holder->get_instanceKlass()->constants();
- return cpool->klass_ref_index_at(get_field_index());
+ GUARDED_VM_ENTRY(
+ constantPoolOop cpool = _holder->get_instanceKlass()->constants();
+ return cpool->klass_ref_index_at(get_field_index());
+ )
}
// ------------------------------------------------------------------
@@ -321,7 +327,9 @@ int ciBytecodeStream::get_method_index() {
//
// If this is a method invocation bytecode, get the invoked method.
ciMethod* ciBytecodeStream::get_method(bool& will_link) {
- ciMethod* m = CURRENT_ENV->get_method_by_index(_holder, get_method_index(),cur_bc());
+ VM_ENTRY_MARK;
+ constantPoolHandle cpool(_method->get_methodOop()->constants());
+ ciMethod* m = CURRENT_ENV->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
will_link = m->is_loaded();
return m;
}
@@ -338,11 +346,13 @@ ciMethod* ciBytecodeStream::get_method(bool& will_link) {
// There is no "will_link" result passed back. The user is responsible
// for checking linkability when retrieving the associated method.
ciKlass* ciBytecodeStream::get_declared_method_holder() {
+ VM_ENTRY_MARK;
+ constantPoolHandle cpool(_method->get_methodOop()->constants());
bool ignore;
- // report as Dynamic for invokedynamic, which is syntactically classless
+ // report as InvokeDynamic for invokedynamic, which is syntactically classless
if (cur_bc() == Bytecodes::_invokedynamic)
- return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_dyn_Dynamic(), false);
- return CURRENT_ENV->get_klass_by_index(_holder, get_method_holder_index(), ignore);
+ return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_dyn_InvokeDynamic(), false);
+ return CURRENT_ENV->get_klass_by_index(cpool, get_method_holder_index(), ignore, _holder);
}
// ------------------------------------------------------------------
@@ -352,8 +362,7 @@ ciKlass* ciBytecodeStream::get_declared_method_holder() {
// referenced by the current bytecode. Used for generating
// deoptimization information.
int ciBytecodeStream::get_method_holder_index() {
- VM_ENTRY_MARK;
- constantPoolOop cpool = _holder->get_instanceKlass()->constants();
+ constantPoolOop cpool = _method->get_methodOop()->constants();
return cpool->klass_ref_index_at(get_method_index());
}
@@ -370,3 +379,31 @@ int ciBytecodeStream::get_method_signature_index() {
int name_and_type_index = cpool->name_and_type_ref_index_at(method_index);
return cpool->signature_ref_index_at(name_and_type_index);
}
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_cpcache
+ciCPCache* ciBytecodeStream::get_cpcache() {
+ VM_ENTRY_MARK;
+ // Get the constant pool.
+ constantPoolOop cpool = _holder->get_instanceKlass()->constants();
+ constantPoolCacheOop cpcache = cpool->cache();
+
+ return CURRENT_ENV->get_object(cpcache)->as_cpcache();
+}
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_call_site
+ciCallSite* ciBytecodeStream::get_call_site() {
+ VM_ENTRY_MARK;
+ // Get the constant pool.
+ constantPoolOop cpool = _holder->get_instanceKlass()->constants();
+ constantPoolCacheOop cpcache = cpool->cache();
+
+ // Get the CallSite from the constant pool cache.
+ int method_index = get_method_index();
+ ConstantPoolCacheEntry* cpcache_entry = cpcache->secondary_entry_at(method_index);
+ oop call_site_oop = cpcache_entry->f1();
+
+ // Create a CallSite object and return it.
+ return CURRENT_ENV->get_object(call_site_oop)->as_call_site();
+}
diff --git a/hotspot/src/share/vm/ci/ciStreams.hpp b/hotspot/src/share/vm/ci/ciStreams.hpp
index 448e27cb16a..97a046f132f 100644
--- a/hotspot/src/share/vm/ci/ciStreams.hpp
+++ b/hotspot/src/share/vm/ci/ciStreams.hpp
@@ -232,6 +232,9 @@ public:
int get_method_holder_index();
int get_method_signature_index();
+ ciCPCache* get_cpcache();
+ ciCallSite* get_call_site();
+
private:
void assert_index_size(int required_size) const {
#ifdef ASSERT
diff --git a/hotspot/src/share/vm/ci/ciSymbol.cpp b/hotspot/src/share/vm/ci/ciSymbol.cpp
index 7284893e81d..e534f04c3de 100644
--- a/hotspot/src/share/vm/ci/ciSymbol.cpp
+++ b/hotspot/src/share/vm/ci/ciSymbol.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -59,6 +59,22 @@ int ciSymbol::byte_at(int i) {
GUARDED_VM_ENTRY(return get_symbolOop()->byte_at(i);)
}
+// ------------------------------------------------------------------
+// ciSymbol::starts_with
+//
+// Tests if the symbol starts with the given prefix.
+bool ciSymbol::starts_with(const char* prefix, int len) const {
+ GUARDED_VM_ENTRY(return get_symbolOop()->starts_with(prefix, len);)
+}
+
+// ------------------------------------------------------------------
+// ciSymbol::index_of
+//
+// Determines where the symbol contains the given substring.
+int ciSymbol::index_of_at(int i, const char* str, int len) const {
+ GUARDED_VM_ENTRY(return get_symbolOop()->index_of_at(i, str, len);)
+}
+
// ------------------------------------------------------------------
// ciSymbol::utf8_length
int ciSymbol::utf8_length() {
diff --git a/hotspot/src/share/vm/ci/ciSymbol.hpp b/hotspot/src/share/vm/ci/ciSymbol.hpp
index 701fb8023d7..abb3088edbf 100644
--- a/hotspot/src/share/vm/ci/ciSymbol.hpp
+++ b/hotspot/src/share/vm/ci/ciSymbol.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2001 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
// machine.
class ciSymbol : public ciObject {
CI_PACKAGE_ACCESS
+ // These friends all make direct use of get_symbolOop:
friend class ciEnv;
friend class ciInstanceKlass;
friend class ciSignature;
@@ -38,13 +39,13 @@ private:
ciSymbol(symbolOop s) : ciObject(s) {}
ciSymbol(symbolHandle s); // for use with vmSymbolHandles
- symbolOop get_symbolOop() { return (symbolOop)get_oop(); }
+ symbolOop get_symbolOop() const { return (symbolOop)get_oop(); }
const char* type_string() { return "ciSymbol"; }
void print_impl(outputStream* st);
- int byte_at(int i);
+ // This is public in symbolOop but private here, because the base can move:
jbyte* base();
// Make a ciSymbol from a C string (implementation).
@@ -55,6 +56,15 @@ public:
const char* as_utf8();
int utf8_length();
+ // Return the i-th utf8 byte, where i < utf8_length
+ int byte_at(int i);
+
+ // Tests if the symbol starts with the given prefix.
+ bool starts_with(const char* prefix, int len) const;
+
+ // Determines where the symbol contains the given substring.
+ int index_of_at(int i, const char* str, int len) const;
+
// What kind of ciObject is this?
bool is_symbol() { return true; }
diff --git a/hotspot/src/share/vm/ci/ciType.cpp b/hotspot/src/share/vm/ci/ciType.cpp
index ca2c79a102a..e94af7d89a4 100644
--- a/hotspot/src/share/vm/ci/ciType.cpp
+++ b/hotspot/src/share/vm/ci/ciType.cpp
@@ -111,7 +111,7 @@ ciType* ciType::make(BasicType t) {
// short, etc.
// Note: Bare T_ADDRESS means a raw pointer type, not a return_address.
assert((uint)t < T_CONFLICT+1, "range check");
- if (t == T_OBJECT) return ciEnv::_Object; // java/lang/Object
+ if (t == T_OBJECT) return ciEnv::_Object_klass; // java/lang/Object
assert(_basic_types[t] != NULL, "domain check");
return _basic_types[t];
}
diff --git a/hotspot/src/share/vm/ci/ciTypeFlow.cpp b/hotspot/src/share/vm/ci/ciTypeFlow.cpp
index d21ea761a4f..4aceca8410a 100644
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp
+++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp
@@ -635,8 +635,15 @@ void ciTypeFlow::StateVector::do_invoke(ciBytecodeStream* str,
ciMethod* method = str->get_method(will_link);
if (!will_link) {
// We weren't able to find the method.
- ciKlass* unloaded_holder = method->holder();
- trap(str, unloaded_holder, str->get_method_holder_index());
+ if (str->cur_bc() == Bytecodes::_invokedynamic) {
+ trap(str, NULL,
+ Deoptimization::make_trap_request
+ (Deoptimization::Reason_uninitialized,
+ Deoptimization::Action_reinterpret));
+ } else {
+ ciKlass* unloaded_holder = method->holder();
+ trap(str, unloaded_holder, str->get_method_holder_index());
+ }
} else {
ciSignature* signature = method->signature();
ciSignatureStream sigstr(signature);
@@ -1292,8 +1299,8 @@ bool ciTypeFlow::StateVector::apply_one_bytecode(ciBytecodeStream* str) {
case Bytecodes::_invokeinterface: do_invoke(str, true); break;
case Bytecodes::_invokespecial: do_invoke(str, true); break;
case Bytecodes::_invokestatic: do_invoke(str, false); break;
-
case Bytecodes::_invokevirtual: do_invoke(str, true); break;
+ case Bytecodes::_invokedynamic: do_invoke(str, false); break;
case Bytecodes::_istore: store_local_int(str->get_index()); break;
case Bytecodes::_istore_0: store_local_int(0); break;
diff --git a/hotspot/src/share/vm/ci/ciUtilities.hpp b/hotspot/src/share/vm/ci/ciUtilities.hpp
index 163e8089b65..709752d0d14 100644
--- a/hotspot/src/share/vm/ci/ciUtilities.hpp
+++ b/hotspot/src/share/vm/ci/ciUtilities.hpp
@@ -79,7 +79,7 @@
THREAD); \
if (HAS_PENDING_EXCEPTION) { \
if (PENDING_EXCEPTION->klass() == \
- SystemDictionary::threaddeath_klass()) { \
+ SystemDictionary::ThreadDeath_klass()) { \
/* Kill the compilation. */ \
fatal("unhandled ci exception"); \
return (result); \
diff --git a/hotspot/src/share/vm/classfile/classFileParser.cpp b/hotspot/src/share/vm/classfile/classFileParser.cpp
index 8235db6f082..8671af37de6 100644
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp
@@ -430,7 +430,7 @@ void ClassFileParser::patch_constant_pool(constantPoolHandle cp, int index, Hand
case JVM_CONSTANT_UnresolvedClass :
// Patching a class means pre-resolving it.
// The name in the constant pool is ignored.
- if (patch->klass() == SystemDictionary::class_klass()) { // %%% java_lang_Class::is_instance
+ if (patch->klass() == SystemDictionary::Class_klass()) { // %%% java_lang_Class::is_instance
guarantee_property(!java_lang_Class::is_primitive(patch()),
"Illegal class patch at %d in class file %s",
index, CHECK);
@@ -643,7 +643,7 @@ void ClassFileParser::verify_constantvalue(int constantvalue_index, int signatur
guarantee_property(value_type.is_int(), "Inconsistent constant value type in class file %s", CHECK);
break;
case T_OBJECT:
- guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;", 18)
+ guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;")
&& (value_type.is_string() || value_type.is_unresolved_string())),
"Bad string initial value in class file %s", CHECK);
break;
@@ -1718,9 +1718,7 @@ methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interf
m->set_exception_table(exception_handlers());
// Copy byte codes
- if (code_length > 0) {
- memcpy(m->code_base(), code_start, code_length);
- }
+ m->set_code(code_start);
// Copy line number table
if (linenumber_table != NULL) {
@@ -2511,23 +2509,12 @@ void ClassFileParser::java_dyn_MethodHandle_fix_pre(constantPoolHandle cp,
fac_ptr->nonstatic_byte_count -= 1;
(*fields_ptr)->ushort_at_put(i + instanceKlass::signature_index_offset,
word_sig_index);
- if (wordSize == jintSize) {
- fac_ptr->nonstatic_word_count += 1;
- } else {
- fac_ptr->nonstatic_double_count += 1;
- }
+ fac_ptr->nonstatic_word_count += 1;
- FieldAllocationType atype = (FieldAllocationType) (*fields_ptr)->ushort_at(i+4);
+ FieldAllocationType atype = (FieldAllocationType) (*fields_ptr)->ushort_at(i + instanceKlass::low_offset);
assert(atype == NONSTATIC_BYTE, "");
FieldAllocationType new_atype = NONSTATIC_WORD;
- if (wordSize > jintSize) {
- if (Universe::field_type_should_be_aligned(T_LONG)) {
- atype = NONSTATIC_ALIGNED_DOUBLE;
- } else {
- atype = NONSTATIC_DOUBLE;
- }
- }
- (*fields_ptr)->ushort_at_put(i+4, new_atype);
+ (*fields_ptr)->ushort_at_put(i + instanceKlass::low_offset, new_atype);
found_vmentry = true;
break;
@@ -3085,7 +3072,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
int len = fields->length();
for (int i = 0; i < len; i += instanceKlass::next_offset) {
int real_offset;
- FieldAllocationType atype = (FieldAllocationType) fields->ushort_at(i+4);
+ FieldAllocationType atype = (FieldAllocationType) fields->ushort_at(i + instanceKlass::low_offset);
switch (atype) {
case STATIC_OOP:
real_offset = next_static_oop_offset;
@@ -3173,8 +3160,8 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
default:
ShouldNotReachHere();
}
- fields->short_at_put(i+4, extract_low_short_from_int(real_offset) );
- fields->short_at_put(i+5, extract_high_short_from_int(real_offset) );
+ fields->short_at_put(i + instanceKlass::low_offset, extract_low_short_from_int(real_offset));
+ fields->short_at_put(i + instanceKlass::high_offset, extract_high_short_from_int(real_offset));
}
// Size of instances
@@ -3482,8 +3469,8 @@ void ClassFileParser::set_precomputed_flags(instanceKlassHandle k) {
#endif
// Check if this klass supports the java.lang.Cloneable interface
- if (SystemDictionary::cloneable_klass_loaded()) {
- if (k->is_subtype_of(SystemDictionary::cloneable_klass())) {
+ if (SystemDictionary::Cloneable_klass_loaded()) {
+ if (k->is_subtype_of(SystemDictionary::Cloneable_klass())) {
k->set_is_cloneable();
}
}
@@ -3766,8 +3753,9 @@ bool ClassFileParser::has_illegal_visibility(jint flags) {
}
bool ClassFileParser::is_supported_version(u2 major, u2 minor) {
- u2 max_version = JDK_Version::is_gte_jdk17x_version() ?
- JAVA_MAX_SUPPORTED_VERSION : JAVA_6_VERSION;
+ u2 max_version =
+ JDK_Version::is_gte_jdk17x_version() ? JAVA_MAX_SUPPORTED_VERSION :
+ (JDK_Version::is_gte_jdk16x_version() ? JAVA_6_VERSION : JAVA_1_5_VERSION);
return (major >= JAVA_MIN_SUPPORTED_VERSION) &&
(major <= max_version) &&
((major != max_version) ||
@@ -4188,7 +4176,7 @@ char* ClassFileParser::skip_over_field_name(char* name, bool slash_ok, unsigned
// Check if ch is Java identifier start or is Java identifier part
// 4672820: call java.lang.Character methods directly without generating separate tables.
EXCEPTION_MARK;
- instanceKlassHandle klass (THREAD, SystemDictionary::char_klass());
+ instanceKlassHandle klass (THREAD, SystemDictionary::Character_klass());
// return value
JavaValue result(T_BOOLEAN);
diff --git a/hotspot/src/share/vm/classfile/classLoader.cpp b/hotspot/src/share/vm/classfile/classLoader.cpp
index 669beb7ff32..2fb9de039bc 100644
--- a/hotspot/src/share/vm/classfile/classLoader.cpp
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp
@@ -819,7 +819,7 @@ objArrayOop ClassLoader::get_system_packages(TRAPS) {
_package_hash_table->copy_pkgnames(packages);
}
// Allocate objArray and fill with java.lang.String
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
nof_entries, CHECK_0);
objArrayHandle result(THREAD, r);
for (int i = 0; i < nof_entries; i++) {
diff --git a/hotspot/src/share/vm/classfile/javaAssertions.cpp b/hotspot/src/share/vm/classfile/javaAssertions.cpp
index 8f318e6a426..551dd2acd86 100644
--- a/hotspot/src/share/vm/classfile/javaAssertions.cpp
+++ b/hotspot/src/share/vm/classfile/javaAssertions.cpp
@@ -95,14 +95,14 @@ oop JavaAssertions::createAssertionStatusDirectives(TRAPS) {
int len;
typeArrayOop t;
len = OptionList::count(_packages);
- objArrayOop pn = oopFactory::new_objArray(SystemDictionary::string_klass(), len, CHECK_NULL);
+ objArrayOop pn = oopFactory::new_objArray(SystemDictionary::String_klass(), len, CHECK_NULL);
objArrayHandle pkgNames (THREAD, pn);
t = oopFactory::new_typeArray(T_BOOLEAN, len, CHECK_NULL);
typeArrayHandle pkgEnabled(THREAD, t);
fillJavaArrays(_packages, len, pkgNames, pkgEnabled, CHECK_NULL);
len = OptionList::count(_classes);
- objArrayOop cn = oopFactory::new_objArray(SystemDictionary::string_klass(), len, CHECK_NULL);
+ objArrayOop cn = oopFactory::new_objArray(SystemDictionary::String_klass(), len, CHECK_NULL);
objArrayHandle classNames (THREAD, cn);
t = oopFactory::new_typeArray(T_BOOLEAN, len, CHECK_NULL);
typeArrayHandle classEnabled(THREAD, t);
diff --git a/hotspot/src/share/vm/classfile/javaClasses.cpp b/hotspot/src/share/vm/classfile/javaClasses.cpp
index cc0db7a173b..173f2e26e5d 100644
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp
@@ -68,9 +68,9 @@ Handle java_lang_String::basic_create(int length, bool tenured, TRAPS) {
// and the char array it points to end up in the same cache line.
oop obj;
if (tenured) {
- obj = instanceKlass::cast(SystemDictionary::string_klass())->allocate_permanent_instance(CHECK_NH);
+ obj = instanceKlass::cast(SystemDictionary::String_klass())->allocate_permanent_instance(CHECK_NH);
} else {
- obj = instanceKlass::cast(SystemDictionary::string_klass())->allocate_instance(CHECK_NH);
+ obj = instanceKlass::cast(SystemDictionary::String_klass())->allocate_instance(CHECK_NH);
}
// Create the char array. The String object must be handlized here
@@ -293,7 +293,7 @@ char* java_lang_String::as_utf8_string(oop java_string, int start, int len) {
bool java_lang_String::equals(oop java_string, jchar* chars, int len) {
assert(SharedSkipVerify ||
- java_string->klass() == SystemDictionary::string_klass(),
+ java_string->klass() == SystemDictionary::String_klass(),
"must be java_string");
typeArrayOop value = java_lang_String::value(java_string);
int offset = java_lang_String::offset(java_string);
@@ -311,7 +311,7 @@ bool java_lang_String::equals(oop java_string, jchar* chars, int len) {
void java_lang_String::print(Handle java_string, outputStream* st) {
oop obj = java_string();
- assert(obj->klass() == SystemDictionary::string_klass(), "must be java_string");
+ assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
typeArrayOop value = java_lang_String::value(obj);
int offset = java_lang_String::offset(obj);
int length = java_lang_String::length(obj);
@@ -339,9 +339,9 @@ oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
// class is put into the system dictionary.
int computed_modifiers = k->compute_modifier_flags(CHECK_0);
k->set_modifier_flags(computed_modifiers);
- if (SystemDictionary::class_klass_loaded()) {
+ if (SystemDictionary::Class_klass_loaded()) {
// Allocate mirror (java.lang.Class instance)
- Handle mirror = instanceKlass::cast(SystemDictionary::class_klass())->allocate_permanent_instance(CHECK_0);
+ Handle mirror = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0);
// Setup indirections
mirror->obj_field_put(klass_offset, k());
k->set_java_mirror(mirror());
@@ -378,7 +378,7 @@ oop java_lang_Class::create_mirror(KlassHandle k, TRAPS) {
oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
// This should be improved by adding a field at the Java level or by
// introducing a new VM klass (see comment in ClassFileParser)
- oop java_class = instanceKlass::cast(SystemDictionary::class_klass())->allocate_permanent_instance(CHECK_0);
+ oop java_class = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0);
if (type != T_VOID) {
klassOop aklass = Universe::typeArrayKlassObj(type);
assert(aklass != NULL, "correct bootstrap");
@@ -502,7 +502,7 @@ BasicType java_lang_Class::as_BasicType(oop java_class, klassOop* reference_klas
oop java_lang_Class::primitive_mirror(BasicType t) {
oop mirror = Universe::java_mirror(t);
- assert(mirror != NULL && mirror->is_a(SystemDictionary::class_klass()), "must be a Class");
+ assert(mirror != NULL && mirror->is_a(SystemDictionary::Class_klass()), "must be a Class");
assert(java_lang_Class::is_primitive(mirror), "must be primitive");
return mirror;
}
@@ -515,14 +515,14 @@ void java_lang_Class::compute_offsets() {
assert(!offsets_computed, "offsets should be initialized only once");
offsets_computed = true;
- klassOop k = SystemDictionary::class_klass();
+ klassOop k = SystemDictionary::Class_klass();
// The classRedefinedCount field is only present starting in 1.5,
// so don't go fatal.
compute_optional_offset(classRedefinedCount_offset,
k, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
// The field indicating parallelCapable (parallelLockMap) is only present starting in 7,
- klassOop k1 = SystemDictionary::classloader_klass();
+ klassOop k1 = SystemDictionary::ClassLoader_klass();
compute_optional_offset(parallelCapable_offset,
k1, vmSymbols::parallelCapable_name(), vmSymbols::concurrenthashmap_signature());
}
@@ -588,7 +588,7 @@ int java_lang_Thread::_park_event_offset = 0 ;
void java_lang_Thread::compute_offsets() {
assert(_group_offset == 0, "offsets should be initialized only once");
- klassOop k = SystemDictionary::thread_klass();
+ klassOop k = SystemDictionary::Thread_klass();
compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::char_array_signature());
compute_offset(_group_offset, k, vmSymbols::group_name(), vmSymbols::threadgroup_signature());
compute_offset(_contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(), vmSymbols::classloader_signature());
@@ -847,7 +847,7 @@ bool java_lang_ThreadGroup::is_vmAllowSuspension(oop java_thread_group) {
void java_lang_ThreadGroup::compute_offsets() {
assert(_parent_offset == 0, "offsets should be initialized only once");
- klassOop k = SystemDictionary::threadGroup_klass();
+ klassOop k = SystemDictionary::ThreadGroup_klass();
compute_offset(_parent_offset, k, vmSymbols::parent_name(), vmSymbols::threadgroup_signature());
compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
@@ -1124,8 +1124,7 @@ class BacktraceBuilder: public StackObj {
if (_dirty && _methods != NULL) {
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
- bs->write_ref_array(MemRegion((HeapWord*)_methods->base(),
- _methods->array_size()));
+ bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
_dirty = false;
}
}
@@ -1345,7 +1344,7 @@ void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle t
// No-op if stack trace is disabled
if (!StackTraceInThrowable) return;
- assert(throwable->is_a(SystemDictionary::throwable_klass()), "sanity check");
+ assert(throwable->is_a(SystemDictionary::Throwable_klass()), "sanity check");
oop backtrace = java_lang_Throwable::backtrace(throwable());
assert(backtrace != NULL, "backtrace not preallocated");
@@ -1450,7 +1449,7 @@ oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
assert(JDK_Version::is_gte_jdk14x_version(), "should only be called in >= 1.4");
// Allocate java.lang.StackTraceElement instance
- klassOop k = SystemDictionary::stackTraceElement_klass();
+ klassOop k = SystemDictionary::StackTraceElement_klass();
assert(k != NULL, "must be loaded in 1.4+");
instanceKlassHandle ik (THREAD, k);
if (ik->should_be_initialized()) {
@@ -1488,7 +1487,7 @@ oop java_lang_StackTraceElement::create(methodHandle method, int bci, TRAPS) {
void java_lang_reflect_AccessibleObject::compute_offsets() {
- klassOop k = SystemDictionary::reflect_accessible_object_klass();
+ klassOop k = SystemDictionary::reflect_AccessibleObject_klass();
compute_offset(override_offset, k, vmSymbols::override_name(), vmSymbols::bool_signature());
}
@@ -1503,7 +1502,7 @@ void java_lang_reflect_AccessibleObject::set_override(oop reflect, jboolean valu
}
void java_lang_reflect_Method::compute_offsets() {
- klassOop k = SystemDictionary::reflect_method_klass();
+ klassOop k = SystemDictionary::reflect_Method_klass();
compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature());
compute_offset(name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
compute_offset(returnType_offset, k, vmSymbols::returnType_name(), vmSymbols::class_signature());
@@ -1524,7 +1523,7 @@ void java_lang_reflect_Method::compute_offsets() {
Handle java_lang_reflect_Method::create(TRAPS) {
assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
- klassOop klass = SystemDictionary::reflect_method_klass();
+ klassOop klass = SystemDictionary::reflect_Method_klass();
// This class is eagerly initialized during VM initialization, since we keep a refence
// to one of the methods
assert(instanceKlass::cast(klass)->is_initialized(), "must be initialized");
@@ -1666,7 +1665,7 @@ void java_lang_reflect_Method::set_annotation_default(oop method, oop value) {
}
void java_lang_reflect_Constructor::compute_offsets() {
- klassOop k = SystemDictionary::reflect_constructor_klass();
+ klassOop k = SystemDictionary::reflect_Constructor_klass();
compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature());
compute_offset(parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature());
compute_offset(exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature());
@@ -1790,7 +1789,7 @@ void java_lang_reflect_Constructor::set_parameter_annotations(oop method, oop va
}
void java_lang_reflect_Field::compute_offsets() {
- klassOop k = SystemDictionary::reflect_field_klass();
+ klassOop k = SystemDictionary::reflect_Field_klass();
compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature());
compute_offset(name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
compute_offset(type_offset, k, vmSymbols::type_name(), vmSymbols::class_signature());
@@ -1897,7 +1896,7 @@ void java_lang_reflect_Field::set_annotations(oop field, oop value) {
void sun_reflect_ConstantPool::compute_offsets() {
- klassOop k = SystemDictionary::reflect_constant_pool_klass();
+ klassOop k = SystemDictionary::reflect_ConstantPool_klass();
// This null test can be removed post beta
if (k != NULL) {
compute_offset(_cp_oop_offset, k, vmSymbols::constantPoolOop_name(), vmSymbols::object_signature());
@@ -1907,7 +1906,7 @@ void sun_reflect_ConstantPool::compute_offsets() {
Handle sun_reflect_ConstantPool::create(TRAPS) {
assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
- klassOop k = SystemDictionary::reflect_constant_pool_klass();
+ klassOop k = SystemDictionary::reflect_ConstantPool_klass();
instanceKlassHandle klass (THREAD, k);
// Ensure it is initialized
klass->initialize(CHECK_NH);
@@ -1927,7 +1926,7 @@ void sun_reflect_ConstantPool::set_cp_oop(oop reflect, oop value) {
}
void sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets() {
- klassOop k = SystemDictionary::reflect_unsafe_static_field_accessor_impl_klass();
+ klassOop k = SystemDictionary::reflect_UnsafeStaticFieldAccessorImpl_klass();
// This null test can be removed post beta
if (k != NULL) {
compute_offset(_base_offset, k,
@@ -2073,7 +2072,7 @@ void java_lang_boxing_object::print(BasicType type, jvalue* value, outputStream*
// Support for java_lang_ref_Reference
oop java_lang_ref_Reference::pending_list_lock() {
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass());
char *addr = (((char *)ik->start_of_static_fields()) + static_lock_offset);
if (UseCompressedOops) {
return oopDesc::load_decode_heap_oop((narrowOop *)addr);
@@ -2083,7 +2082,7 @@ oop java_lang_ref_Reference::pending_list_lock() {
}
HeapWord *java_lang_ref_Reference::pending_list_addr() {
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass());
char *addr = (((char *)ik->start_of_static_fields()) + static_pending_offset);
// XXX This might not be HeapWord aligned, almost rather be char *.
return (HeapWord*)addr;
@@ -2106,17 +2105,17 @@ jlong java_lang_ref_SoftReference::timestamp(oop ref) {
}
jlong java_lang_ref_SoftReference::clock() {
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::soft_reference_klass());
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass());
int offset = ik->offset_of_static_fields() + static_clock_offset;
- return SystemDictionary::soft_reference_klass()->long_field(offset);
+ return SystemDictionary::SoftReference_klass()->long_field(offset);
}
void java_lang_ref_SoftReference::set_clock(jlong value) {
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::soft_reference_klass());
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass());
int offset = ik->offset_of_static_fields() + static_clock_offset;
- SystemDictionary::soft_reference_klass()->long_field_put(offset, value);
+ SystemDictionary::SoftReference_klass()->long_field_put(offset, value);
}
@@ -2404,6 +2403,10 @@ oop java_dyn_MethodType::ptype(oop mt, int idx) {
return ptypes(mt)->obj_at(idx);
}
+int java_dyn_MethodType::ptype_count(oop mt) {
+ return ptypes(mt)->length();
+}
+
// Support for java_dyn_MethodTypeForm
@@ -2430,15 +2433,15 @@ oop java_dyn_MethodTypeForm::erasedType(oop mtform) {
}
-// Support for sun_dyn_CallSiteImpl
+// Support for java_dyn_CallSite
-int sun_dyn_CallSiteImpl::_type_offset;
-int sun_dyn_CallSiteImpl::_target_offset;
-int sun_dyn_CallSiteImpl::_vmmethod_offset;
+int java_dyn_CallSite::_type_offset;
+int java_dyn_CallSite::_target_offset;
+int java_dyn_CallSite::_vmmethod_offset;
-void sun_dyn_CallSiteImpl::compute_offsets() {
+void java_dyn_CallSite::compute_offsets() {
if (!EnableInvokeDynamic) return;
- klassOop k = SystemDictionary::CallSiteImpl_klass();
+ klassOop k = SystemDictionary::CallSite_klass();
if (k != NULL) {
compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_dyn_MethodType_signature(), true);
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
@@ -2446,23 +2449,23 @@ void sun_dyn_CallSiteImpl::compute_offsets() {
}
}
-oop sun_dyn_CallSiteImpl::type(oop site) {
+oop java_dyn_CallSite::type(oop site) {
return site->obj_field(_type_offset);
}
-oop sun_dyn_CallSiteImpl::target(oop site) {
+oop java_dyn_CallSite::target(oop site) {
return site->obj_field(_target_offset);
}
-void sun_dyn_CallSiteImpl::set_target(oop site, oop target) {
+void java_dyn_CallSite::set_target(oop site, oop target) {
site->obj_field_put(_target_offset, target);
}
-oop sun_dyn_CallSiteImpl::vmmethod(oop site) {
+oop java_dyn_CallSite::vmmethod(oop site) {
return site->obj_field(_vmmethod_offset);
}
-void sun_dyn_CallSiteImpl::set_vmmethod(oop site, oop ref) {
+void java_dyn_CallSite::set_vmmethod(oop site, oop ref) {
site->obj_field_put(_vmmethod_offset, ref);
}
@@ -2535,7 +2538,7 @@ oop java_lang_ClassLoader::non_reflection_class_loader(oop loader) {
// the generated bytecodes for reflection, and if so, "magically"
// delegate to its parent to prevent class loading from occurring
// in places where applications using reflection didn't expect it.
- klassOop delegating_cl_class = SystemDictionary::reflect_delegating_classloader_klass();
+ klassOop delegating_cl_class = SystemDictionary::reflect_DelegatingClassLoader_klass();
// This might be null in non-1.4 JDKs
if (delegating_cl_class != NULL && loader->is_a(delegating_cl_class)) {
return parent(loader);
@@ -2550,7 +2553,7 @@ oop java_lang_ClassLoader::non_reflection_class_loader(oop loader) {
void java_lang_System::compute_offsets() {
assert(offset_of_static_fields == 0, "offsets should be initialized only once");
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::system_klass());
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::System_klass());
offset_of_static_fields = ik->offset_of_static_fields();
}
@@ -2811,7 +2814,7 @@ void JavaClasses::compute_offsets() {
java_dyn_MethodTypeForm::compute_offsets();
}
if (EnableInvokeDynamic) {
- sun_dyn_CallSiteImpl::compute_offsets();
+ java_dyn_CallSite::compute_offsets();
}
java_security_AccessControlContext::compute_offsets();
// Initialize reflection classes. The layouts of these classes
diff --git a/hotspot/src/share/vm/classfile/javaClasses.hpp b/hotspot/src/share/vm/classfile/javaClasses.hpp
index 048fba8d4b0..af78d5e58b3 100644
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp
@@ -111,7 +111,7 @@ class java_lang_String : AllStatic {
// Testers
static bool is_instance(oop obj) {
- return obj != NULL && obj->klass() == SystemDictionary::string_klass();
+ return obj != NULL && obj->klass() == SystemDictionary::String_klass();
}
// Debugging
@@ -161,7 +161,7 @@ class java_lang_Class : AllStatic {
static void print_signature(oop java_class, outputStream *st);
// Testing
static bool is_instance(oop obj) {
- return obj != NULL && obj->klass() == SystemDictionary::class_klass();
+ return obj != NULL && obj->klass() == SystemDictionary::Class_klass();
}
static bool is_primitive(oop java_class);
static BasicType primitive_type(oop java_class);
@@ -1027,6 +1027,7 @@ class java_dyn_MethodType: AllStatic {
static oop form(oop mt);
static oop ptype(oop mt, int index);
+ static int ptype_count(oop mt);
static symbolOop as_signature(oop mt, bool intern_if_not_found, TRAPS);
static void print_signature(oop mt, outputStream* st);
@@ -1061,9 +1062,9 @@ class java_dyn_MethodTypeForm: AllStatic {
};
-// Interface to sun.dyn.CallSiteImpl objects
+// Interface to java.dyn.CallSite objects
-class sun_dyn_CallSiteImpl: AllStatic {
+class java_dyn_CallSite: AllStatic {
friend class JavaClasses;
private:
@@ -1083,6 +1084,14 @@ public:
static oop vmmethod(oop site);
static void set_vmmethod(oop site, oop ref);
+ // Testers
+ static bool is_subclass(klassOop klass) {
+ return Klass::cast(klass)->is_subclass_of(SystemDictionary::CallSite_klass());
+ }
+ static bool is_instance(oop obj) {
+ return obj != NULL && is_subclass(obj->klass());
+ }
+
// Accessors for code generation:
static int target_offset_in_bytes() { return _target_offset; }
static int type_offset_in_bytes() { return _type_offset; }
diff --git a/hotspot/src/share/vm/classfile/systemDictionary.cpp b/hotspot/src/share/vm/classfile/systemDictionary.cpp
index 5598c9fcfb3..fb22282e97c 100644
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp
@@ -60,10 +60,10 @@ oop SystemDictionary::java_system_loader() {
}
void SystemDictionary::compute_java_system_loader(TRAPS) {
- KlassHandle system_klass(THREAD, WK_KLASS(classloader_klass));
+ KlassHandle system_klass(THREAD, WK_KLASS(ClassLoader_klass));
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
- KlassHandle(THREAD, WK_KLASS(classloader_klass)),
+ KlassHandle(THREAD, WK_KLASS(ClassLoader_klass)),
vmSymbolHandles::getSystemClassLoader_name(),
vmSymbolHandles::void_classloader_signature(),
CHECK);
@@ -99,6 +99,15 @@ bool SystemDictionary::is_parallelCapable(Handle class_loader) {
return java_lang_Class::parallelCapable(class_loader());
}
// ----------------------------------------------------------------------------
+// ParallelDefineClass flag does not apply to bootclass loader
+bool SystemDictionary::is_parallelDefine(Handle class_loader) {
+ if (class_loader.is_null()) return false;
+ if (AllowParallelDefineClass && java_lang_Class::parallelCapable(class_loader())) {
+ return true;
+ }
+ return false;
+}
+// ----------------------------------------------------------------------------
// Resolving of classes
// Forwards to resolve_or_null
@@ -119,7 +128,7 @@ klassOop SystemDictionary::handle_resolution_exception(symbolHandle class_name,
// in which case we have to check whether the pending exception is a ClassNotFoundException,
// and if so convert it to a NoClassDefFoundError
// And chain the original ClassNotFoundException
- if (throw_error && PENDING_EXCEPTION->is_a(SystemDictionary::classNotFoundException_klass())) {
+ if (throw_error && PENDING_EXCEPTION->is_a(SystemDictionary::ClassNotFoundException_klass())) {
ResourceMark rm(THREAD);
assert(klass_h() == NULL, "Should not have result with exception pending");
Handle e(THREAD, PENDING_EXCEPTION);
@@ -350,7 +359,7 @@ void SystemDictionary::validate_protection_domain(instanceKlassHandle klass,
assert(class_loader() != NULL, "should not have non-null protection domain for null classloader");
- KlassHandle system_loader(THREAD, SystemDictionary::classloader_klass());
+ KlassHandle system_loader(THREAD, SystemDictionary::ClassLoader_klass());
JavaCalls::call_special(&result,
class_loader,
system_loader,
@@ -724,17 +733,17 @@ klassOop SystemDictionary::resolve_instance_class_or_null(symbolHandle class_nam
// Do actual loading
k = load_instance_class(name, class_loader, THREAD);
- // For UnsyncloadClass and AllowParallelDefineClass only:
+ // For UnsyncloadClass only
// If they got a linkageError, check if a parallel class load succeeded.
// If it did, then for bytecode resolution the specification requires
// that we return the same result we did for the other thread, i.e. the
// successfully loaded instanceKlass
// Should not get here for classloaders that support parallelism
- // with the new cleaner mechanism
+ // with the new cleaner mechanism, even with AllowParallelDefineClass
// Bootstrap goes through here to allow for an extra guarantee check
if (UnsyncloadClass || (class_loader.is_null())) {
if (k.is_null() && HAS_PENDING_EXCEPTION
- && PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) {
+ && PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
MutexLocker mu(SystemDictionary_lock, THREAD);
klassOop check = find_class(d_index, d_hash, name, class_loader);
if (check != NULL) {
@@ -1358,7 +1367,7 @@ instanceKlassHandle SystemDictionary::load_instance_class(symbolHandle class_nam
JavaValue result(T_OBJECT);
- KlassHandle spec_klass (THREAD, SystemDictionary::classloader_klass());
+ KlassHandle spec_klass (THREAD, SystemDictionary::ClassLoader_klass());
// Call public unsynchronized loadClass(String) directly for all class loaders
// for parallelCapable class loaders. JDK >=7, loadClass(String, boolean) will
@@ -1483,14 +1492,17 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
}
// Support parallel classloading
-// Initial implementation for bootstrap classloader
-// For custom class loaders that support parallel classloading,
+// All parallel class loaders, including bootstrap classloader
+// lock a placeholder entry for this class/class_loader pair
+// to allow parallel defines of different classes for this class loader
// With AllowParallelDefine flag==true, in case they do not synchronize around
// FindLoadedClass/DefineClass, calls, we check for parallel
// loading for them, wait if a defineClass is in progress
// and return the initial requestor's results
+// This flag does not apply to the bootstrap classloader.
// With AllowParallelDefine flag==false, call through to define_instance_class
// which will throw LinkageError: duplicate class definition.
+// False is the requested default.
// For better performance, the class loaders should synchronize
// findClass(), i.e. FindLoadedClass/DefineClassIfAbsent or they
// potentially waste time reading and parsing the bytestream.
@@ -1511,9 +1523,11 @@ instanceKlassHandle SystemDictionary::find_or_define_instance_class(symbolHandle
{
MutexLocker mu(SystemDictionary_lock, THREAD);
// First check if class already defined
- klassOop check = find_class(d_index, d_hash, name_h, class_loader);
- if (check != NULL) {
- return(instanceKlassHandle(THREAD, check));
+ if (UnsyncloadClass || (is_parallelDefine(class_loader))) {
+ klassOop check = find_class(d_index, d_hash, name_h, class_loader);
+ if (check != NULL) {
+ return(instanceKlassHandle(THREAD, check));
+ }
}
// Acquire define token for this class/classloader
@@ -1529,7 +1543,7 @@ instanceKlassHandle SystemDictionary::find_or_define_instance_class(symbolHandle
// Only special cases allow parallel defines and can use other thread's results
// Other cases fall through, and may run into duplicate defines
// caught by finding an entry in the SystemDictionary
- if ((UnsyncloadClass || AllowParallelDefineClass) && (probe->instanceKlass() != NULL)) {
+ if ((UnsyncloadClass || is_parallelDefine(class_loader)) && (probe->instanceKlass() != NULL)) {
probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
placeholders()->find_and_remove(p_index, p_hash, name_h, class_loader, THREAD);
SystemDictionary_lock->notify_all();
@@ -1930,13 +1944,13 @@ void SystemDictionary::initialize_wk_klasses_until(WKID limit_id, WKID &start_id
void SystemDictionary::initialize_preloaded_classes(TRAPS) {
- assert(WK_KLASS(object_klass) == NULL, "preloaded classes should only be initialized once");
+ assert(WK_KLASS(Object_klass) == NULL, "preloaded classes should only be initialized once");
// Preload commonly used klasses
WKID scan = FIRST_WKID;
// first do Object, String, Class
- initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(class_klass), scan, CHECK);
+ initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(Class_klass), scan, CHECK);
- debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(WK_KLASS(class_klass)));
+ debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(WK_KLASS(Class_klass)));
// Fixup mirrors for classes loaded before java.lang.Class.
// These calls iterate over the objects currently in the perm gen
@@ -1947,17 +1961,17 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
Universe::fixup_mirrors(CHECK);
// do a bunch more:
- initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(reference_klass), scan, CHECK);
+ initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(Reference_klass), scan, CHECK);
// Preload ref klasses and set reference types
- instanceKlass::cast(WK_KLASS(reference_klass))->set_reference_type(REF_OTHER);
- instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(reference_klass));
+ instanceKlass::cast(WK_KLASS(Reference_klass))->set_reference_type(REF_OTHER);
+ instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass));
- initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(phantom_reference_klass), scan, CHECK);
- instanceKlass::cast(WK_KLASS(soft_reference_klass))->set_reference_type(REF_SOFT);
- instanceKlass::cast(WK_KLASS(weak_reference_klass))->set_reference_type(REF_WEAK);
- instanceKlass::cast(WK_KLASS(final_reference_klass))->set_reference_type(REF_FINAL);
- instanceKlass::cast(WK_KLASS(phantom_reference_klass))->set_reference_type(REF_PHANTOM);
+ initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(PhantomReference_klass), scan, CHECK);
+ instanceKlass::cast(WK_KLASS(SoftReference_klass))->set_reference_type(REF_SOFT);
+ instanceKlass::cast(WK_KLASS(WeakReference_klass))->set_reference_type(REF_WEAK);
+ instanceKlass::cast(WK_KLASS(FinalReference_klass))->set_reference_type(REF_FINAL);
+ instanceKlass::cast(WK_KLASS(PhantomReference_klass))->set_reference_type(REF_PHANTOM);
WKID meth_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass);
WKID meth_group_end = WK_KLASS_ENUM_NAME(WrongMethodTypeException_klass);
@@ -1970,10 +1984,10 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
scan = WKID(meth_group_end+1);
}
WKID indy_group_start = WK_KLASS_ENUM_NAME(Linkage_klass);
- WKID indy_group_end = WK_KLASS_ENUM_NAME(Dynamic_klass);
+ WKID indy_group_end = WK_KLASS_ENUM_NAME(InvokeDynamic_klass);
initialize_wk_klasses_until(indy_group_start, scan, CHECK);
if (EnableInvokeDynamic) {
- initialize_wk_klasses_through(indy_group_start, scan, CHECK);
+ initialize_wk_klasses_through(indy_group_end, scan, CHECK);
}
if (_well_known_klasses[indy_group_start] == NULL) {
// Skip the rest of the dynamic typing classes, if Linkage is not loaded.
@@ -1982,14 +1996,14 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
initialize_wk_klasses_until(WKID_LIMIT, scan, CHECK);
- _box_klasses[T_BOOLEAN] = WK_KLASS(boolean_klass);
- _box_klasses[T_CHAR] = WK_KLASS(char_klass);
- _box_klasses[T_FLOAT] = WK_KLASS(float_klass);
- _box_klasses[T_DOUBLE] = WK_KLASS(double_klass);
- _box_klasses[T_BYTE] = WK_KLASS(byte_klass);
- _box_klasses[T_SHORT] = WK_KLASS(short_klass);
- _box_klasses[T_INT] = WK_KLASS(int_klass);
- _box_klasses[T_LONG] = WK_KLASS(long_klass);
+ _box_klasses[T_BOOLEAN] = WK_KLASS(Boolean_klass);
+ _box_klasses[T_CHAR] = WK_KLASS(Character_klass);
+ _box_klasses[T_FLOAT] = WK_KLASS(Float_klass);
+ _box_klasses[T_DOUBLE] = WK_KLASS(Double_klass);
+ _box_klasses[T_BYTE] = WK_KLASS(Byte_klass);
+ _box_klasses[T_SHORT] = WK_KLASS(Short_klass);
+ _box_klasses[T_INT] = WK_KLASS(Integer_klass);
+ _box_klasses[T_LONG] = WK_KLASS(Long_klass);
//_box_klasses[T_OBJECT] = WK_KLASS(object_klass);
//_box_klasses[T_ARRAY] = WK_KLASS(object_klass);
@@ -2000,11 +2014,11 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
#endif // KERNEL
{ // Compute whether we should use loadClass or loadClassInternal when loading classes.
- methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature());
+ methodOop method = instanceKlass::cast(ClassLoader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature());
_has_loadClassInternal = (method != NULL);
}
{ // Compute whether we should use checkPackageAccess or NOT
- methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
+ methodOop method = instanceKlass::cast(ClassLoader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
_has_checkPackageAccess = (method != NULL);
}
}
@@ -2326,6 +2340,8 @@ methodOop SystemDictionary::find_method_handle_invoke(symbolHandle signature,
SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature);
if (spe == NULL || spe->property_oop() == NULL) {
// Must create lots of stuff here, but outside of the SystemDictionary lock.
+ if (THREAD->is_Compiler_thread())
+ return NULL; // do not attempt from within compiler
Handle mt = compute_method_handle_type(signature(),
class_loader, protection_domain,
CHECK_NULL);
@@ -2358,7 +2374,7 @@ Handle SystemDictionary::compute_method_handle_type(symbolHandle signature,
TRAPS) {
Handle empty;
int npts = ArgumentCount(signature()).size();
- objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::class_klass(), npts, CHECK_(empty));
+ objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::Class_klass(), npts, CHECK_(empty));
int arg = 0;
Handle rt; // the return type from the signature
for (SignatureStream ss(signature()); !ss.is_done(); ss.next()) {
@@ -2404,7 +2420,7 @@ Handle SystemDictionary::make_dynamic_call_site(KlassHandle caller,
methodHandle mh_invdyn,
TRAPS) {
Handle empty;
- // call sun.dyn.CallSiteImpl::makeSite(caller, name, mtype, cmid, cbci)
+ // call java.dyn.CallSite::makeSite(caller, name, mtype, cmid, cbci)
oop name_str_oop = StringTable::intern(name(), CHECK_(empty)); // not a handle!
JavaCallArguments args(Handle(THREAD, caller->java_mirror()));
args.push_oop(name_str_oop);
@@ -2413,17 +2429,19 @@ Handle SystemDictionary::make_dynamic_call_site(KlassHandle caller,
args.push_int(caller_bci);
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
- SystemDictionary::CallSiteImpl_klass(),
+ SystemDictionary::CallSite_klass(),
vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(),
&args, CHECK_(empty));
oop call_site_oop = (oop) result.get_jobject();
assert(call_site_oop->is_oop()
- /*&& sun_dyn_CallSiteImpl::is_instance(call_site_oop)*/, "must be sane");
- sun_dyn_CallSiteImpl::set_vmmethod(call_site_oop, mh_invdyn());
+ /*&& java_dyn_CallSite::is_instance(call_site_oop)*/, "must be sane");
+ java_dyn_CallSite::set_vmmethod(call_site_oop, mh_invdyn());
if (TraceMethodHandles) {
+#ifndef PRODUCT
tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);
call_site_oop->print();
tty->cr();
+#endif //PRODUCT
}
return call_site_oop;
}
@@ -2436,9 +2454,17 @@ Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
instanceKlassHandle ik(THREAD, caller());
- if (ik->bootstrap_method() != NULL) {
- return Handle(THREAD, ik->bootstrap_method());
+ oop boot_method_oop = ik->bootstrap_method();
+ if (boot_method_oop != NULL) {
+ if (TraceMethodHandles) {
+ tty->print_cr("bootstrap method for "PTR_FORMAT" cached as "PTR_FORMAT":", ik(), boot_method_oop);
+ }
+ NOT_PRODUCT(if (!boot_method_oop->is_oop()) { tty->print_cr("*** boot MH of "PTR_FORMAT" = "PTR_FORMAT, ik(), boot_method_oop); ik()->print(); });
+ assert(boot_method_oop->is_oop()
+ && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
+ return Handle(THREAD, boot_method_oop);
}
+ boot_method_oop = NULL; // GC safety
// call java.dyn.Linkage::findBootstrapMethod(caller, sbk)
JavaCallArguments args(Handle(THREAD, ik->java_mirror()));
@@ -2452,9 +2478,18 @@ Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
vmSymbols::findBootstrapMethod_name(),
vmSymbols::findBootstrapMethod_signature(),
&args, CHECK_(empty));
- oop boot_method_oop = (oop) result.get_jobject();
+ boot_method_oop = (oop) result.get_jobject();
if (boot_method_oop != NULL) {
+ if (TraceMethodHandles) {
+#ifndef PRODUCT
+ tty->print_cr("--------");
+ tty->print_cr("bootstrap method for "PTR_FORMAT" computed as "PTR_FORMAT":", ik(), boot_method_oop);
+ ik()->print();
+ boot_method_oop->print();
+ tty->print_cr("========");
+#endif //PRODUCT
+ }
assert(boot_method_oop->is_oop()
&& java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
// probably no race conditions, but let's be careful:
@@ -2463,6 +2498,14 @@ Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
else
boot_method_oop = ik->bootstrap_method();
} else {
+ if (TraceMethodHandles) {
+#ifndef PRODUCT
+ tty->print_cr("--------");
+ tty->print_cr("bootstrap method for "PTR_FORMAT" computed as NULL:", ik());
+ ik()->print();
+ tty->print_cr("========");
+#endif //PRODUCT
+ }
boot_method_oop = ik->bootstrap_method();
}
diff --git a/hotspot/src/share/vm/classfile/systemDictionary.hpp b/hotspot/src/share/vm/classfile/systemDictionary.hpp
index b7c82033628..03b2aeb6b42 100644
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -82,55 +82,55 @@ class SymbolPropertyTable;
#define WK_KLASSES_DO(template) \
/* well-known classes */ \
- template(object_klass, java_lang_Object, Pre) \
- template(string_klass, java_lang_String, Pre) \
- template(class_klass, java_lang_Class, Pre) \
- template(cloneable_klass, java_lang_Cloneable, Pre) \
- template(classloader_klass, java_lang_ClassLoader, Pre) \
- template(serializable_klass, java_io_Serializable, Pre) \
- template(system_klass, java_lang_System, Pre) \
- template(throwable_klass, java_lang_Throwable, Pre) \
- template(error_klass, java_lang_Error, Pre) \
- template(threaddeath_klass, java_lang_ThreadDeath, Pre) \
- template(exception_klass, java_lang_Exception, Pre) \
- template(runtime_exception_klass, java_lang_RuntimeException, Pre) \
- template(protectionDomain_klass, java_security_ProtectionDomain, Pre) \
+ template(Object_klass, java_lang_Object, Pre) \
+ template(String_klass, java_lang_String, Pre) \
+ template(Class_klass, java_lang_Class, Pre) \
+ template(Cloneable_klass, java_lang_Cloneable, Pre) \
+ template(ClassLoader_klass, java_lang_ClassLoader, Pre) \
+ template(Serializable_klass, java_io_Serializable, Pre) \
+ template(System_klass, java_lang_System, Pre) \
+ template(Throwable_klass, java_lang_Throwable, Pre) \
+ template(Error_klass, java_lang_Error, Pre) \
+ template(ThreadDeath_klass, java_lang_ThreadDeath, Pre) \
+ template(Exception_klass, java_lang_Exception, Pre) \
+ template(RuntimeException_klass, java_lang_RuntimeException, Pre) \
+ template(ProtectionDomain_klass, java_security_ProtectionDomain, Pre) \
template(AccessControlContext_klass, java_security_AccessControlContext, Pre) \
- template(classNotFoundException_klass, java_lang_ClassNotFoundException, Pre) \
- template(noClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre) \
- template(linkageError_klass, java_lang_LinkageError, Pre) \
+ template(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre) \
+ template(NoClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre) \
+ template(LinkageError_klass, java_lang_LinkageError, Pre) \
template(ClassCastException_klass, java_lang_ClassCastException, Pre) \
template(ArrayStoreException_klass, java_lang_ArrayStoreException, Pre) \
- template(virtualMachineError_klass, java_lang_VirtualMachineError, Pre) \
+ template(VirtualMachineError_klass, java_lang_VirtualMachineError, Pre) \
template(OutOfMemoryError_klass, java_lang_OutOfMemoryError, Pre) \
template(StackOverflowError_klass, java_lang_StackOverflowError, Pre) \
template(IllegalMonitorStateException_klass, java_lang_IllegalMonitorStateException, Pre) \
- template(reference_klass, java_lang_ref_Reference, Pre) \
+ template(Reference_klass, java_lang_ref_Reference, Pre) \
\
/* Preload ref klasses and set reference types */ \
- template(soft_reference_klass, java_lang_ref_SoftReference, Pre) \
- template(weak_reference_klass, java_lang_ref_WeakReference, Pre) \
- template(final_reference_klass, java_lang_ref_FinalReference, Pre) \
- template(phantom_reference_klass, java_lang_ref_PhantomReference, Pre) \
- template(finalizer_klass, java_lang_ref_Finalizer, Pre) \
+ template(SoftReference_klass, java_lang_ref_SoftReference, Pre) \
+ template(WeakReference_klass, java_lang_ref_WeakReference, Pre) \
+ template(FinalReference_klass, java_lang_ref_FinalReference, Pre) \
+ template(PhantomReference_klass, java_lang_ref_PhantomReference, Pre) \
+ template(Finalizer_klass, java_lang_ref_Finalizer, Pre) \
\
- template(thread_klass, java_lang_Thread, Pre) \
- template(threadGroup_klass, java_lang_ThreadGroup, Pre) \
- template(properties_klass, java_util_Properties, Pre) \
- template(reflect_accessible_object_klass, java_lang_reflect_AccessibleObject, Pre) \
- template(reflect_field_klass, java_lang_reflect_Field, Pre) \
- template(reflect_method_klass, java_lang_reflect_Method, Pre) \
- template(reflect_constructor_klass, java_lang_reflect_Constructor, Pre) \
+ template(Thread_klass, java_lang_Thread, Pre) \
+ template(ThreadGroup_klass, java_lang_ThreadGroup, Pre) \
+ template(Properties_klass, java_util_Properties, Pre) \
+ template(reflect_AccessibleObject_klass, java_lang_reflect_AccessibleObject, Pre) \
+ template(reflect_Field_klass, java_lang_reflect_Field, Pre) \
+ template(reflect_Method_klass, java_lang_reflect_Method, Pre) \
+ template(reflect_Constructor_klass, java_lang_reflect_Constructor, Pre) \
\
/* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
- template(reflect_magic_klass, sun_reflect_MagicAccessorImpl, Opt) \
- template(reflect_method_accessor_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
- template(reflect_constructor_accessor_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
- template(reflect_delegating_classloader_klass, sun_reflect_DelegatingClassLoader, Opt) \
- template(reflect_constant_pool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15) \
- template(reflect_unsafe_static_field_accessor_impl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \
+ template(reflect_MagicAccessorImpl_klass, sun_reflect_MagicAccessorImpl, Opt) \
+ template(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
+ template(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
+ template(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt) \
+ template(reflect_ConstantPool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15) \
+ template(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \
\
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
template(MethodHandle_klass, java_dyn_MethodHandle, Opt) \
@@ -144,16 +144,14 @@ class SymbolPropertyTable;
template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \
template(Linkage_klass, java_dyn_Linkage, Opt) \
template(CallSite_klass, java_dyn_CallSite, Opt) \
- template(CallSiteImpl_klass, sun_dyn_CallSiteImpl, Opt) \
- template(Dynamic_klass, java_dyn_Dynamic, Opt) \
- /* Note: MethodHandle must be first, and Dynamic last in group */ \
+ template(InvokeDynamic_klass, java_dyn_InvokeDynamic, Opt) \
+ /* Note: MethodHandle must be first, and InvokeDynamic last in group */ \
\
- template(vector_klass, java_util_Vector, Pre) \
- template(hashtable_klass, java_util_Hashtable, Pre) \
- template(stringBuffer_klass, java_lang_StringBuffer, Pre) \
+ template(StringBuffer_klass, java_lang_StringBuffer, Pre) \
+ template(StringBuilder_klass, java_lang_StringBuilder, Pre) \
\
/* It's NULL in non-1.4 JDKs. */ \
- template(stackTraceElement_klass, java_lang_StackTraceElement, Opt) \
+ template(StackTraceElement_klass, java_lang_StackTraceElement, Opt) \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
template(java_nio_Buffer_klass, java_nio_Buffer, Opt) \
@@ -164,14 +162,14 @@ class SymbolPropertyTable;
template(sun_jkernel_DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
\
/* Preload boxing klasses */ \
- template(boolean_klass, java_lang_Boolean, Pre) \
- template(char_klass, java_lang_Character, Pre) \
- template(float_klass, java_lang_Float, Pre) \
- template(double_klass, java_lang_Double, Pre) \
- template(byte_klass, java_lang_Byte, Pre) \
- template(short_klass, java_lang_Short, Pre) \
- template(int_klass, java_lang_Integer, Pre) \
- template(long_klass, java_lang_Long, Pre) \
+ template(Boolean_klass, java_lang_Boolean, Pre) \
+ template(Character_klass, java_lang_Character, Pre) \
+ template(Float_klass, java_lang_Float, Pre) \
+ template(Double_klass, java_lang_Double, Pre) \
+ template(Byte_klass, java_lang_Byte, Pre) \
+ template(Short_klass, java_lang_Short, Pre) \
+ template(Integer_klass, java_lang_Integer, Pre) \
+ template(Long_klass, java_lang_Long, Pre) \
/*end*/
@@ -438,8 +436,8 @@ public:
// Tells whether ClassLoader.checkPackageAccess is present
static bool has_checkPackageAccess() { return _has_checkPackageAccess; }
- static bool class_klass_loaded() { return WK_KLASS(class_klass) != NULL; }
- static bool cloneable_klass_loaded() { return WK_KLASS(cloneable_klass) != NULL; }
+ static bool Class_klass_loaded() { return WK_KLASS(Class_klass) != NULL; }
+ static bool Cloneable_klass_loaded() { return WK_KLASS(Cloneable_klass) != NULL; }
// Returns default system loader
static oop java_system_loader();
@@ -578,6 +576,7 @@ private:
static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
static void check_loader_lock_contention(Handle loader_lock, TRAPS);
static bool is_parallelCapable(Handle class_loader);
+ static bool is_parallelDefine(Handle class_loader);
static klassOop find_shared_class(symbolHandle class_name);
diff --git a/hotspot/src/share/vm/classfile/verifier.cpp b/hotspot/src/share/vm/classfile/verifier.cpp
index dd947d19cfb..3a9136c7b9b 100644
--- a/hotspot/src/share/vm/classfile/verifier.cpp
+++ b/hotspot/src/share/vm/classfile/verifier.cpp
@@ -143,7 +143,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) {
symbolOop name = klass->name();
- klassOop refl_magic_klass = SystemDictionary::reflect_magic_klass();
+ klassOop refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass();
return (should_verify_for(klass->class_loader(), should_verify_class) &&
// return if the class is a bootstrapping class
@@ -1903,17 +1903,8 @@ void ClassVerifier::verify_invoke_instructions(
verify_cp_type(index, cp, types, CHECK_VERIFY(this));
// Get method name and signature
- symbolHandle method_name;
- symbolHandle method_sig;
- if (opcode == Bytecodes::_invokedynamic) {
- int name_index = cp->name_ref_index_at(index);
- int sig_index = cp->signature_ref_index_at(index);
- method_name = symbolHandle(THREAD, cp->symbol_at(name_index));
- method_sig = symbolHandle(THREAD, cp->symbol_at(sig_index));
- } else {
- method_name = symbolHandle(THREAD, cp->name_ref_at(index));
- method_sig = symbolHandle(THREAD, cp->signature_ref_at(index));
- }
+ symbolHandle method_name(THREAD, cp->name_ref_at(index));
+ symbolHandle method_sig(THREAD, cp->signature_ref_at(index));
if (!SignatureVerifier::is_valid_method_signature(method_sig)) {
class_format_error(
diff --git a/hotspot/src/share/vm/classfile/vmSymbols.cpp b/hotspot/src/share/vm/classfile/vmSymbols.cpp
index c805af344e7..6cc7f3c2d33 100644
--- a/hotspot/src/share/vm/classfile/vmSymbols.cpp
+++ b/hotspot/src/share/vm/classfile/vmSymbols.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,6 +70,7 @@ static const char* vm_symbol_bodies = VM_SYMBOLS_DO(VM_SYMBOL_BODY, VM_ALIAS_IGN
void vmSymbols::initialize(TRAPS) {
assert((int)SID_LIMIT <= (1< (1<find_method(mname, msig);
+}
+
#define VM_INTRINSIC_INITIALIZE(id, klass, name, sig, flags) #id "\0"
static const char* vm_intrinsic_name_bodies =
@@ -303,6 +356,11 @@ inline bool match_F_R(jshort flags) {
const int neg = JVM_ACC_STATIC | JVM_ACC_SYNCHRONIZED;
return (flags & (req | neg)) == req;
}
+inline bool match_F_Y(jshort flags) {
+ const int req = JVM_ACC_SYNCHRONIZED;
+ const int neg = JVM_ACC_STATIC;
+ return (flags & (req | neg)) == req;
+}
inline bool match_F_RN(jshort flags) {
const int req = JVM_ACC_NATIVE;
const int neg = JVM_ACC_STATIC | JVM_ACC_SYNCHRONIZED;
@@ -325,15 +383,15 @@ inline bool match_F_RNY(jshort flags) {
}
// These are for forming case labels:
-#define ID3(x, y, z) (( jint)(z) + \
- ((jint)(y) << vmSymbols::log2_SID_LIMIT) + \
- ((jint)(x) << (2*vmSymbols::log2_SID_LIMIT)) )
+#define ID3(x, y, z) (( jlong)(z) + \
+ ((jlong)(y) << vmSymbols::log2_SID_LIMIT) + \
+ ((jlong)(x) << (2*vmSymbols::log2_SID_LIMIT)) )
#define SID_ENUM(n) vmSymbols::VM_SYMBOL_ENUM_NAME(n)
-vmIntrinsics::ID vmIntrinsics::find_id(vmSymbols::SID holder,
- vmSymbols::SID name,
- vmSymbols::SID sig,
- jshort flags) {
+vmIntrinsics::ID vmIntrinsics::find_id_impl(vmSymbols::SID holder,
+ vmSymbols::SID name,
+ vmSymbols::SID sig,
+ jshort flags) {
assert((int)vmSymbols::SID_LIMIT <= (1<> shift) & mask) == 1021, "");
+ return vmSymbols::SID( (info >> shift) & mask );
}
vmSymbols::SID vmIntrinsics::name_for(vmIntrinsics::ID id) {
-#ifndef PRODUCT
-#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
- case id: return SID_ENUM(name);
-
- switch (id) {
- VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
- VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
- }
-#undef VM_INTRINSIC_CASE
-#endif //PRODUCT
- return vmSymbols::NO_SID;
+ jlong info = intrinsic_info(id);
+ int shift = vmSymbols::log2_SID_LIMIT + log2_FLAG_LIMIT, mask = right_n_bits(vmSymbols::log2_SID_LIMIT);
+ assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 1022, "");
+ return vmSymbols::SID( (info >> shift) & mask );
}
vmSymbols::SID vmIntrinsics::signature_for(vmIntrinsics::ID id) {
-#ifndef PRODUCT
-#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
- case id: return SID_ENUM(sig);
-
- switch (id) {
- VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
- VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
- }
-#undef VM_INTRINSIC_CASE
-#endif //PRODUCT
- return vmSymbols::NO_SID;
+ jlong info = intrinsic_info(id);
+ int shift = log2_FLAG_LIMIT, mask = right_n_bits(vmSymbols::log2_SID_LIMIT);
+ assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 1023, "");
+ return vmSymbols::SID( (info >> shift) & mask );
}
vmIntrinsics::Flags vmIntrinsics::flags_for(vmIntrinsics::ID id) {
-#ifndef PRODUCT
-#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
- case id: return fcode;
-
- switch (id) {
- VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
- VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
- }
-#undef VM_INTRINSIC_CASE
-#endif //PRODUCT
- return F_none;
+ jlong info = intrinsic_info(id);
+ int shift = 0, mask = right_n_bits(log2_FLAG_LIMIT);
+ assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 15, "");
+ return Flags( (info >> shift) & mask );
}
diff --git a/hotspot/src/share/vm/classfile/vmSymbols.hpp b/hotspot/src/share/vm/classfile/vmSymbols.hpp
index 04bb9369205..0b4652157d1 100644
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp
@@ -84,6 +84,7 @@
template(java_lang_reflect_Field, "java/lang/reflect/Field") \
template(java_lang_reflect_Array, "java/lang/reflect/Array") \
template(java_lang_StringBuffer, "java/lang/StringBuffer") \
+ template(java_lang_StringBuilder, "java/lang/StringBuilder") \
template(java_lang_CharSequence, "java/lang/CharSequence") \
template(java_security_AccessControlContext, "java/security/AccessControlContext") \
template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \
@@ -104,6 +105,7 @@
template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \
template(sun_jkernel_DownloadManager, "sun/jkernel/DownloadManager") \
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
+ template(setBootClassLoaderHook_name, "setBootClassLoaderHook") \
\
/* class file format tags */ \
template(tag_source_file, "SourceFile") \
@@ -217,7 +219,7 @@
template(base_name, "base") \
\
/* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \
- template(java_dyn_Dynamic, "java/dyn/Dynamic") \
+ template(java_dyn_InvokeDynamic, "java/dyn/InvokeDynamic") \
template(java_dyn_Linkage, "java/dyn/Linkage") \
template(java_dyn_CallSite, "java/dyn/CallSite") \
template(java_dyn_MethodHandle, "java/dyn/MethodHandle") \
@@ -233,10 +235,9 @@
template(sun_dyn_AdapterMethodHandle, "sun/dyn/AdapterMethodHandle") \
template(sun_dyn_BoundMethodHandle, "sun/dyn/BoundMethodHandle") \
template(sun_dyn_DirectMethodHandle, "sun/dyn/DirectMethodHandle") \
- template(sun_dyn_CallSiteImpl, "sun/dyn/CallSiteImpl") \
template(makeImpl_name, "makeImpl") /*MethodType::makeImpl*/ \
template(makeImpl_signature, "(Ljava/lang/Class;[Ljava/lang/Class;ZZ)Ljava/dyn/MethodType;") \
- template(makeSite_name, "makeSite") /*CallSiteImpl::makeImpl*/ \
+ template(makeSite_name, "makeSite") /*CallSite::makeSite*/ \
template(makeSite_signature, "(Ljava/lang/Class;Ljava/lang/String;Ljava/dyn/MethodType;II)Ljava/dyn/CallSite;") \
template(findBootstrapMethod_name, "findBootstrapMethod") \
template(findBootstrapMethod_signature, "(Ljava/lang/Class;Ljava/lang/Class;)Ljava/dyn/MethodHandle;") \
@@ -335,6 +336,7 @@
template(ptypes_name, "ptypes") \
template(form_name, "form") \
template(erasedType_name, "erasedType") \
+ template(append_name, "append") \
\
/* non-intrinsic name/signature pairs: */ \
template(register_method_name, "register") \
@@ -345,9 +347,14 @@
\
/* common signatures names */ \
template(void_method_signature, "()V") \
+ template(void_boolean_signature, "()Z") \
+ template(void_byte_signature, "()B") \
+ template(void_char_signature, "()C") \
+ template(void_short_signature, "()S") \
template(void_int_signature, "()I") \
template(void_long_signature, "()J") \
- template(void_boolean_signature, "()Z") \
+ template(void_float_signature, "()F") \
+ template(void_double_signature, "()D") \
template(int_void_signature, "(I)V") \
template(int_int_signature, "(I)I") \
template(int_bool_signature, "(I)Z") \
@@ -416,6 +423,13 @@
template(string_signature, "Ljava/lang/String;") \
template(reference_signature, "Ljava/lang/ref/Reference;") \
template(concurrenthashmap_signature, "Ljava/util/concurrent/ConcurrentHashMap;") \
+ template(String_StringBuilder_signature, "(Ljava/lang/String;)Ljava/lang/StringBuilder;") \
+ template(int_StringBuilder_signature, "(I)Ljava/lang/StringBuilder;") \
+ template(char_StringBuilder_signature, "(C)Ljava/lang/StringBuilder;") \
+ template(String_StringBuffer_signature, "(Ljava/lang/String;)Ljava/lang/StringBuffer;") \
+ template(int_StringBuffer_signature, "(I)Ljava/lang/StringBuffer;") \
+ template(char_StringBuffer_signature, "(C)Ljava/lang/StringBuffer;") \
+ template(int_String_signature, "(I)Ljava/lang/String;") \
/* signature symbols needed by intrinsics */ \
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, template, VM_ALIAS_IGNORE) \
\
@@ -815,12 +829,76 @@
/*the compiler does have special inlining code for these; bytecode inline is just fine */ \
\
do_intrinsic(_fillInStackTrace, java_lang_Throwable, fillInStackTrace_name, void_throwable_signature, F_RNY) \
- \
- do_intrinsic(_Object_init, java_lang_Object, object_initializer_name, void_method_signature, F_R) \
- /* (symbol object_initializer_name defined above) */ \
- \
+ \
+ do_intrinsic(_StringBuilder_void, java_lang_StringBuilder, object_initializer_name, void_method_signature, F_R) \
+ do_intrinsic(_StringBuilder_int, java_lang_StringBuilder, object_initializer_name, int_void_signature, F_R) \
+ do_intrinsic(_StringBuilder_String, java_lang_StringBuilder, object_initializer_name, string_void_signature, F_R) \
+ \
+ do_intrinsic(_StringBuilder_append_char, java_lang_StringBuilder, append_name, char_StringBuilder_signature, F_R) \
+ do_intrinsic(_StringBuilder_append_int, java_lang_StringBuilder, append_name, int_StringBuilder_signature, F_R) \
+ do_intrinsic(_StringBuilder_append_String, java_lang_StringBuilder, append_name, String_StringBuilder_signature, F_R) \
+ \
+ do_intrinsic(_StringBuilder_toString, java_lang_StringBuilder, toString_name, void_string_signature, F_R) \
+ \
+ do_intrinsic(_StringBuffer_void, java_lang_StringBuffer, object_initializer_name, void_method_signature, F_R) \
+ do_intrinsic(_StringBuffer_int, java_lang_StringBuffer, object_initializer_name, int_void_signature, F_R) \
+ do_intrinsic(_StringBuffer_String, java_lang_StringBuffer, object_initializer_name, string_void_signature, F_R) \
+ \
+ do_intrinsic(_StringBuffer_append_char, java_lang_StringBuffer, append_name, char_StringBuffer_signature, F_Y) \
+ do_intrinsic(_StringBuffer_append_int, java_lang_StringBuffer, append_name, int_StringBuffer_signature, F_Y) \
+ do_intrinsic(_StringBuffer_append_String, java_lang_StringBuffer, append_name, String_StringBuffer_signature, F_Y) \
+ \
+ do_intrinsic(_StringBuffer_toString, java_lang_StringBuffer, toString_name, void_string_signature, F_Y) \
+ \
+ do_intrinsic(_Integer_toString, java_lang_Integer, toString_name, int_String_signature, F_S) \
+ \
+ do_intrinsic(_String_String, java_lang_String, object_initializer_name, string_void_signature, F_R) \
+ \
+ do_intrinsic(_Object_init, java_lang_Object, object_initializer_name, void_method_signature, F_R) \
+ /* (symbol object_initializer_name defined above) */ \
+ \
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
/* (symbols invoke_name and invoke_signature defined above) */ \
+ do_intrinsic(_checkSpreadArgument, sun_dyn_MethodHandleImpl, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \
+ do_name( checkSpreadArgument_name, "checkSpreadArgument") \
+ do_name( checkSpreadArgument_signature, "(Ljava/lang/Object;I)V") \
+ \
+ /* unboxing methods: */ \
+ do_intrinsic(_booleanValue, java_lang_Boolean, booleanValue_name, void_boolean_signature, F_R) \
+ do_name( booleanValue_name, "booleanValue") \
+ do_intrinsic(_byteValue, java_lang_Byte, byteValue_name, void_byte_signature, F_R) \
+ do_name( byteValue_name, "byteValue") \
+ do_intrinsic(_charValue, java_lang_Character, charValue_name, void_char_signature, F_R) \
+ do_name( charValue_name, "charValue") \
+ do_intrinsic(_shortValue, java_lang_Short, shortValue_name, void_short_signature, F_R) \
+ do_name( shortValue_name, "shortValue") \
+ do_intrinsic(_intValue, java_lang_Integer, intValue_name, void_int_signature, F_R) \
+ do_name( intValue_name, "intValue") \
+ do_intrinsic(_longValue, java_lang_Long, longValue_name, void_long_signature, F_R) \
+ do_name( longValue_name, "longValue") \
+ do_intrinsic(_floatValue, java_lang_Float, floatValue_name, void_float_signature, F_R) \
+ do_name( floatValue_name, "floatValue") \
+ do_intrinsic(_doubleValue, java_lang_Double, doubleValue_name, void_double_signature, F_R) \
+ do_name( doubleValue_name, "doubleValue") \
+ \
+ /* boxing methods: */ \
+ do_name( valueOf_name, "valueOf") \
+ do_intrinsic(_Boolean_valueOf, java_lang_Boolean, valueOf_name, Boolean_valueOf_signature, F_S) \
+ do_name( Boolean_valueOf_signature, "(Z)Ljava/lang/Boolean;") \
+ do_intrinsic(_Byte_valueOf, java_lang_Byte, valueOf_name, Byte_valueOf_signature, F_S) \
+ do_name( Byte_valueOf_signature, "(B)Ljava/lang/Byte;") \
+ do_intrinsic(_Character_valueOf, java_lang_Character, valueOf_name, Character_valueOf_signature, F_S) \
+ do_name( Character_valueOf_signature, "(C)Ljava/lang/Character;") \
+ do_intrinsic(_Short_valueOf, java_lang_Short, valueOf_name, Short_valueOf_signature, F_S) \
+ do_name( Short_valueOf_signature, "(S)Ljava/lang/Short;") \
+ do_intrinsic(_Integer_valueOf, java_lang_Integer, valueOf_name, Integer_valueOf_signature, F_S) \
+ do_name( Integer_valueOf_signature, "(I)Ljava/lang/Integer;") \
+ do_intrinsic(_Long_valueOf, java_lang_Long, valueOf_name, Long_valueOf_signature, F_S) \
+ do_name( Long_valueOf_signature, "(J)Ljava/lang/Long;") \
+ do_intrinsic(_Float_valueOf, java_lang_Float, valueOf_name, Float_valueOf_signature, F_S) \
+ do_name( Float_valueOf_signature, "(F)Ljava/lang/Float;") \
+ do_intrinsic(_Double_valueOf, java_lang_Double, valueOf_name, Double_valueOf_signature, F_S) \
+ do_name( Double_valueOf_signature, "(D)Ljava/lang/Double;") \
\
/*end*/
@@ -946,11 +1024,17 @@ class vmIntrinsics: AllStatic {
enum Flags {
// AccessFlags syndromes relevant to intrinsics.
F_none = 0,
- F_R, // !static !synchronized (R="regular")
- F_S, // static !synchronized
- F_RN, // !static native !synchronized
- F_SN, // static native !synchronized
- F_RNY // !static native synchronized
+ F_R, // !static ?native !synchronized (R="regular")
+ F_S, // static ?native !synchronized
+ F_Y, // !static ?native synchronized
+ F_RN, // !static native !synchronized
+ F_SN, // static native !synchronized
+ F_RNY, // !static native synchronized
+
+ FLAG_LIMIT
+ };
+ enum {
+ log2_FLAG_LIMIT = 4 // checked by an assert at start-up
};
public:
@@ -962,15 +1046,32 @@ public:
static const char* name_at(ID id);
+private:
+ static ID find_id_impl(vmSymbols::SID holder,
+ vmSymbols::SID name,
+ vmSymbols::SID sig,
+ jshort flags);
+
+public:
// Given a method's class, name, signature, and access flags, report its ID.
static ID find_id(vmSymbols::SID holder,
vmSymbols::SID name,
vmSymbols::SID sig,
- jshort flags);
+ jshort flags) {
+ ID id = find_id_impl(holder, name, sig, flags);
+#ifdef ASSERT
+ // ID _none does not hold the following asserts.
+ if (id == _none) return id;
+#endif
+ assert( class_for(id) == holder, "correct id");
+ assert( name_for(id) == name, "correct id");
+ assert(signature_for(id) == sig, "correct id");
+ return id;
+ }
static void verify_method(ID actual_id, methodOop m) PRODUCT_RETURN;
- // No need for these in the product:
+ // Find out the symbols behind an intrinsic:
static vmSymbols::SID class_for(ID id);
static vmSymbols::SID name_for(ID id);
static vmSymbols::SID signature_for(ID id);
@@ -980,4 +1081,11 @@ public:
// Access to intrinsic methods:
static methodOop method_for(ID id);
+
+ // Wrapper object methods:
+ static ID for_boxing(BasicType type);
+ static ID for_unboxing(BasicType type);
+
+ // Raw conversion:
+ static ID for_raw_conversion(BasicType src, BasicType dest);
};
diff --git a/hotspot/src/share/vm/code/codeBlob.hpp b/hotspot/src/share/vm/code/codeBlob.hpp
index 81acc81fcac..bbd430a14c6 100644
--- a/hotspot/src/share/vm/code/codeBlob.hpp
+++ b/hotspot/src/share/vm/code/codeBlob.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -102,6 +102,9 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
virtual bool is_compiled_by_c2() const { return false; }
virtual bool is_compiled_by_c1() const { return false; }
+ // Casting
+ nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : NULL; }
+
// Boundaries
address header_begin() const { return (address) this; }
address header_end() const { return ((address) this) + _header_size; };
@@ -201,7 +204,8 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
virtual void print_value_on(outputStream* st) const PRODUCT_RETURN;
// Print the comment associated with offset on stream, if there is one
- void print_block_comment(outputStream* stream, intptr_t offset) {
+ virtual void print_block_comment(outputStream* stream, address block_begin) {
+ intptr_t offset = (intptr_t)(block_begin - instructions_begin());
_comments.print_block_comment(stream, offset);
}
diff --git a/hotspot/src/share/vm/code/debugInfoRec.cpp b/hotspot/src/share/vm/code/debugInfoRec.cpp
index fa24eb7c4c8..a1cac29439f 100644
--- a/hotspot/src/share/vm/code/debugInfoRec.cpp
+++ b/hotspot/src/share/vm/code/debugInfoRec.cpp
@@ -281,6 +281,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
ciMethod* method,
int bci,
bool reexecute,
+ bool is_method_handle_invoke,
DebugToken* locals,
DebugToken* expressions,
DebugToken* monitors) {
@@ -292,8 +293,9 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
int stream_offset = stream()->position();
last_pd->set_scope_decode_offset(stream_offset);
- // Record reexecute bit into pcDesc
+ // Record flags into pcDesc.
last_pd->set_should_reexecute(reexecute);
+ last_pd->set_is_method_handle_invoke(is_method_handle_invoke);
// serialize sender stream offest
stream()->write_int(sender_stream_offset);
diff --git a/hotspot/src/share/vm/code/debugInfoRec.hpp b/hotspot/src/share/vm/code/debugInfoRec.hpp
index bb896adeae2..c67efa09b25 100644
--- a/hotspot/src/share/vm/code/debugInfoRec.hpp
+++ b/hotspot/src/share/vm/code/debugInfoRec.hpp
@@ -88,6 +88,7 @@ class DebugInformationRecorder: public ResourceObj {
ciMethod* method,
int bci,
bool reexecute,
+ bool is_method_handle_invoke = false,
DebugToken* locals = NULL,
DebugToken* expressions = NULL,
DebugToken* monitors = NULL);
diff --git a/hotspot/src/share/vm/code/dependencies.cpp b/hotspot/src/share/vm/code/dependencies.cpp
index 0d38dc7c2f6..aa476fd7c4e 100644
--- a/hotspot/src/share/vm/code/dependencies.cpp
+++ b/hotspot/src/share/vm/code/dependencies.cpp
@@ -1528,19 +1528,23 @@ void DepChange::print() {
int nsup = 0, nint = 0;
for (ContextStream str(*this); str.next(); ) {
klassOop k = str.klass();
- switch (str._change_type) {
+ switch (str.change_type()) {
case Change_new_type:
tty->print_cr(" dependee = %s", instanceKlass::cast(k)->external_name());
break;
case Change_new_sub:
- if (!WizardMode)
- ++nsup;
- else tty->print_cr(" context super = %s", instanceKlass::cast(k)->external_name());
+ if (!WizardMode) {
+ ++nsup;
+ } else {
+ tty->print_cr(" context super = %s", instanceKlass::cast(k)->external_name());
+ }
break;
case Change_new_impl:
- if (!WizardMode)
- ++nint;
- else tty->print_cr(" context interface = %s", instanceKlass::cast(k)->external_name());
+ if (!WizardMode) {
+ ++nint;
+ } else {
+ tty->print_cr(" context interface = %s", instanceKlass::cast(k)->external_name());
+ }
break;
}
}
diff --git a/hotspot/src/share/vm/code/dependencies.hpp b/hotspot/src/share/vm/code/dependencies.hpp
index faf98b36537..ae3c077b4a8 100644
--- a/hotspot/src/share/vm/code/dependencies.hpp
+++ b/hotspot/src/share/vm/code/dependencies.hpp
@@ -470,7 +470,7 @@ class Dependencies: public ResourceObj {
// super types can be context types for a relevant dependency, which the
// new type could invalidate.
class DepChange : public StackObj {
- private:
+ public:
enum ChangeType {
NO_CHANGE = 0, // an uninvolved klass
Change_new_type, // a newly loaded type
@@ -480,6 +480,7 @@ class DepChange : public StackObj {
Start_Klass = CHANGE_LIMIT // internal indicator for ContextStream
};
+ private:
// each change set is rooted in exactly one new type (at present):
KlassHandle _new_type;
@@ -510,15 +511,15 @@ class DepChange : public StackObj {
// }
class ContextStream : public StackObj {
private:
- DepChange& _changes;
+ DepChange& _changes;
friend class DepChange;
// iteration variables:
- ChangeType _change_type;
- klassOop _klass;
- objArrayOop _ti_base; // i.e., transitive_interfaces
- int _ti_index;
- int _ti_limit;
+ ChangeType _change_type;
+ klassOop _klass;
+ objArrayOop _ti_base; // i.e., transitive_interfaces
+ int _ti_index;
+ int _ti_limit;
// start at the beginning:
void start() {
@@ -530,11 +531,11 @@ class DepChange : public StackObj {
_ti_limit = 0;
}
+ public:
ContextStream(DepChange& changes)
: _changes(changes)
{ start(); }
- public:
ContextStream(DepChange& changes, No_Safepoint_Verifier& nsv)
: _changes(changes)
// the nsv argument makes it safe to hold oops like _klass
@@ -542,6 +543,7 @@ class DepChange : public StackObj {
bool next();
+ ChangeType change_type() { return _change_type; }
klassOop klass() { return _klass; }
};
friend class DepChange::ContextStream;
diff --git a/hotspot/src/share/vm/code/nmethod.cpp b/hotspot/src/share/vm/code/nmethod.cpp
index 7f7ca11750f..82cfc7631d1 100644
--- a/hotspot/src/share/vm/code/nmethod.cpp
+++ b/hotspot/src/share/vm/code/nmethod.cpp
@@ -56,13 +56,13 @@ HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
#endif
bool nmethod::is_compiled_by_c1() const {
+ if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing
if (is_native_method()) return false;
- assert(compiler() != NULL, "must be");
return compiler()->is_c1();
}
bool nmethod::is_compiled_by_c2() const {
+ if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing
if (is_native_method()) return false;
- assert(compiler() != NULL, "must be");
return compiler()->is_c2();
}
@@ -414,9 +414,8 @@ int nmethod::total_size() const {
}
const char* nmethod::compile_kind() const {
- if (method() == NULL) return "unloaded";
- if (is_native_method()) return "c2n";
if (is_osr_method()) return "osr";
+ if (method() != NULL && is_native_method()) return "c2n";
return NULL;
}
@@ -1127,6 +1126,9 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
}
flags.state = unloaded;
+ // Log the unloading.
+ log_state_change();
+
// The methodOop is gone at this point
assert(_method == NULL, "Tautology");
@@ -1137,8 +1139,6 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
void nmethod::invalidate_osr_method() {
assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
- if (_entry_bci != InvalidOSREntryBci)
- inc_decompile_count();
// Remove from list of active nmethods
if (method() != NULL)
instanceKlass::cast(method()->method_holder())->remove_osr_nmethod(this);
@@ -1146,59 +1146,63 @@ void nmethod::invalidate_osr_method() {
_entry_bci = InvalidOSREntryBci;
}
-void nmethod::log_state_change(int state) const {
+void nmethod::log_state_change() const {
if (LogCompilation) {
if (xtty != NULL) {
ttyLocker ttyl; // keep the following output all in one block
- xtty->begin_elem("make_not_entrant %sthread='" UINTX_FORMAT "'",
- (state == zombie ? "zombie='1' " : ""),
- os::current_thread_id());
+ if (flags.state == unloaded) {
+ xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
+ os::current_thread_id());
+ } else {
+ xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
+ os::current_thread_id(),
+ (flags.state == zombie ? " zombie='1'" : ""));
+ }
log_identity(xtty);
xtty->stamp();
xtty->end_elem();
}
}
- if (PrintCompilation) {
- print_on(tty, state == zombie ? "made zombie " : "made not entrant ");
+ if (PrintCompilation && flags.state != unloaded) {
+ print_on(tty, flags.state == zombie ? "made zombie " : "made not entrant ");
tty->cr();
}
}
// Common functionality for both make_not_entrant and make_zombie
-void nmethod::make_not_entrant_or_zombie(int state) {
+bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
- // Code for an on-stack-replacement nmethod is removed when a class gets unloaded.
- // They never become zombie/non-entrant, so the nmethod sweeper will never remove
- // them. Instead the entry_bci is set to InvalidOSREntryBci, so the osr nmethod
- // will never be used anymore. That the nmethods only gets removed when class unloading
- // happens, make life much simpler, since the nmethods are not just going to disappear
- // out of the blue.
- if (is_osr_method()) {
- if (osr_entry_bci() != InvalidOSREntryBci) {
- // only log this once
- log_state_change(state);
- }
- invalidate_osr_method();
- return;
+ // If the method is already zombie there is nothing to do
+ if (is_zombie()) {
+ return false;
}
- // If the method is already zombie or set to the state we want, nothing to do
- if (is_zombie() || (state == not_entrant && is_not_entrant())) {
- return;
- }
-
- log_state_change(state);
-
// Make sure the nmethod is not flushed in case of a safepoint in code below.
nmethodLocker nml(this);
{
+ // invalidate osr nmethod before acquiring the patching lock since
+ // they both acquire leaf locks and we don't want a deadlock.
+ // This logic is equivalent to the logic below for patching the
+ // verified entry point of regular methods.
+ if (is_osr_method()) {
+ // this effectively makes the osr nmethod not entrant
+ invalidate_osr_method();
+ }
+
// Enter critical section. Does not block for safepoint.
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
+
+ if (flags.state == state) {
+ // another thread already performed this transition so nothing
+ // to do, but return false to indicate this.
+ return false;
+ }
+
// The caller can be calling the method statically or through an inline
// cache call.
- if (!is_not_entrant()) {
+ if (!is_osr_method() && !is_not_entrant()) {
NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
SharedRuntime::get_handle_wrong_method_stub());
assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
@@ -1217,6 +1221,10 @@ void nmethod::make_not_entrant_or_zombie(int state) {
// Change state
flags.state = state;
+
+ // Log the transition once
+ log_state_change();
+
} // leave critical region under Patching_lock
if (state == not_entrant) {
@@ -1240,7 +1248,6 @@ void nmethod::make_not_entrant_or_zombie(int state) {
// It's a true state change, so mark the method as decompiled.
inc_decompile_count();
-
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
// and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading).
@@ -1268,7 +1275,7 @@ void nmethod::make_not_entrant_or_zombie(int state) {
// Check whether method got unloaded at a safepoint before this,
// if so we can skip the flushing steps below
- if (method() == NULL) return;
+ if (method() == NULL) return true;
// Remove nmethod from method.
// We need to check if both the _code and _from_compiled_code_entry_point
@@ -1282,6 +1289,8 @@ void nmethod::make_not_entrant_or_zombie(int state) {
HandleMark hm;
method()->clear_code();
}
+
+ return true;
}
@@ -1715,9 +1724,9 @@ void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map
if (!method()->is_native()) {
SimpleScopeDesc ssd(this, fr.pc());
Bytecode_invoke* call = Bytecode_invoke_at(ssd.method(), ssd.bci());
- bool is_static = call->is_invokestatic();
+ bool has_receiver = call->has_receiver();
symbolOop signature = call->signature();
- fr.oops_compiled_arguments_do(signature, is_static, reg_map, f);
+ fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
}
}
@@ -1754,6 +1763,14 @@ void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
"must end with a sentinel");
#endif //ASSERT
+ // Search for MethodHandle invokes and tag the nmethod.
+ for (int i = 0; i < count; i++) {
+ if (pcs[i].is_method_handle_invoke()) {
+ set_has_method_handle_invokes(true);
+ break;
+ }
+ }
+
int size = count * sizeof(PcDesc);
assert(scopes_pcs_size() >= size, "oob");
memcpy(scopes_pcs_begin(), pcs, size);
@@ -2020,6 +2037,18 @@ bool nmethod::is_deopt_pc(address pc) {
}
+// -----------------------------------------------------------------------------
+// MethodHandle
+
+bool nmethod::is_method_handle_return(address return_pc) {
+ if (!has_method_handle_invokes()) return false;
+ PcDesc* pd = pc_desc_at(return_pc);
+ if (pd == NULL)
+ return false;
+ return pd->is_method_handle_invoke();
+}
+
+
// -----------------------------------------------------------------------------
// Verification
@@ -2370,6 +2399,107 @@ ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
return NULL;
}
+void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) {
+ if (block_begin == entry_point()) stream->print_cr("[Entry Point]");
+ if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]");
+ if (block_begin == exception_begin()) stream->print_cr("[Exception Handler]");
+ if (block_begin == stub_begin()) stream->print_cr("[Stub Code]");
+ if (block_begin == consts_begin()) stream->print_cr("[Constants]");
+ if (block_begin == entry_point()) {
+ methodHandle m = method();
+ if (m.not_null()) {
+ stream->print(" # ");
+ m->print_value_on(stream);
+ stream->cr();
+ }
+ if (m.not_null() && !is_osr_method()) {
+ ResourceMark rm;
+ int sizeargs = m->size_of_parameters();
+ BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
+ VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
+ {
+ int sig_index = 0;
+ if (!m->is_static())
+ sig_bt[sig_index++] = T_OBJECT; // 'this'
+ for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
+ BasicType t = ss.type();
+ sig_bt[sig_index++] = t;
+ if (type2size[t] == 2) {
+ sig_bt[sig_index++] = T_VOID;
+ } else {
+ assert(type2size[t] == 1, "size is 1 or 2");
+ }
+ }
+ assert(sig_index == sizeargs, "");
+ }
+ const char* spname = "sp"; // make arch-specific?
+ intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
+ int stack_slot_offset = this->frame_size() * wordSize;
+ int tab1 = 14, tab2 = 24;
+ int sig_index = 0;
+ int arg_index = (m->is_static() ? 0 : -1);
+ bool did_old_sp = false;
+ for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
+ bool at_this = (arg_index == -1);
+ bool at_old_sp = false;
+ BasicType t = (at_this ? T_OBJECT : ss.type());
+ assert(t == sig_bt[sig_index], "sigs in sync");
+ if (at_this)
+ stream->print(" # this: ");
+ else
+ stream->print(" # parm%d: ", arg_index);
+ stream->move_to(tab1);
+ VMReg fst = regs[sig_index].first();
+ VMReg snd = regs[sig_index].second();
+ if (fst->is_reg()) {
+ stream->print("%s", fst->name());
+ if (snd->is_valid()) {
+ stream->print(":%s", snd->name());
+ }
+ } else if (fst->is_stack()) {
+ int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
+ if (offset == stack_slot_offset) at_old_sp = true;
+ stream->print("[%s+0x%x]", spname, offset);
+ } else {
+ stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
+ }
+ stream->print(" ");
+ stream->move_to(tab2);
+ stream->print("= ");
+ if (at_this) {
+ m->method_holder()->print_value_on(stream);
+ } else {
+ bool did_name = false;
+ if (!at_this && ss.is_object()) {
+ symbolOop name = ss.as_symbol_or_null();
+ if (name != NULL) {
+ name->print_value_on(stream);
+ did_name = true;
+ }
+ }
+ if (!did_name)
+ stream->print("%s", type2name(t));
+ }
+ if (at_old_sp) {
+ stream->print(" (%s of caller)", spname);
+ did_old_sp = true;
+ }
+ stream->cr();
+ sig_index += type2size[t];
+ arg_index += 1;
+ if (!at_this) ss.next();
+ }
+ if (!did_old_sp) {
+ stream->print(" # ");
+ stream->move_to(tab1);
+ stream->print("[%s+0x%x]", spname, stack_slot_offset);
+ stream->print(" (%s of caller)", spname);
+ stream->cr();
+ }
+ }
+ }
+}
+
void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
// First, find an oopmap in (begin, end].
// We use the odd half-closed interval so that oop maps and scope descs
diff --git a/hotspot/src/share/vm/code/nmethod.hpp b/hotspot/src/share/vm/code/nmethod.hpp
index c7abdea89c8..26a7edaac81 100644
--- a/hotspot/src/share/vm/code/nmethod.hpp
+++ b/hotspot/src/share/vm/code/nmethod.hpp
@@ -81,18 +81,19 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
struct nmFlags {
friend class VMStructs;
- unsigned int version:8; // version number (0 = first version)
- unsigned int level:4; // optimization level
- unsigned int age:4; // age (in # of sweep steps)
+ unsigned int version:8; // version number (0 = first version)
+ unsigned int level:4; // optimization level
+ unsigned int age:4; // age (in # of sweep steps)
- unsigned int state:2; // {alive, zombie, unloaded)
+ unsigned int state:2; // {alive, zombie, unloaded)
- unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
- unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
- unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
- unsigned int markedForReclamation:1; // Used by NMethodSweeper
+ unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
+ unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
+ unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
+ unsigned int markedForReclamation:1; // Used by NMethodSweeper
- unsigned int has_unsafe_access:1; // May fault due to unsafe access.
+ unsigned int has_unsafe_access:1; // May fault due to unsafe access.
+ unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
void clear();
};
@@ -252,7 +253,9 @@ class nmethod : public CodeBlob {
void* operator new(size_t size, int nmethod_size);
const char* reloc_string_for(u_char* begin, u_char* end);
- void make_not_entrant_or_zombie(int state);
+ // Returns true if this thread changed the state of the nmethod or
+ // false if another thread performed the transition.
+ bool make_not_entrant_or_zombie(unsigned int state);
void inc_decompile_count();
// used to check that writes to nmFlags are done consistently.
@@ -375,10 +378,12 @@ class nmethod : public CodeBlob {
bool is_zombie() const { return flags.state == zombie; }
bool is_unloaded() const { return flags.state == unloaded; }
- // Make the nmethod non entrant. The nmethod will continue to be alive.
- // It is used when an uncommon trap happens.
- void make_not_entrant() { make_not_entrant_or_zombie(not_entrant); }
- void make_zombie() { make_not_entrant_or_zombie(zombie); }
+ // Make the nmethod non entrant. The nmethod will continue to be
+ // alive. It is used when an uncommon trap happens. Returns true
+ // if this thread changed the state of the nmethod or false if
+ // another thread performed the transition.
+ bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
+ bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
// used by jvmti to track if the unload event has been reported
bool unload_reported() { return _unload_reported; }
@@ -405,6 +410,9 @@ class nmethod : public CodeBlob {
bool has_unsafe_access() const { return flags.has_unsafe_access; }
void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
+ bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; }
+ void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; }
+
int level() const { return flags.level; }
void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
@@ -537,6 +545,9 @@ class nmethod : public CodeBlob {
address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
+ // MethodHandle
+ bool is_method_handle_return(address return_pc);
+
// jvmti support:
void post_compiled_method_load_event();
@@ -563,7 +574,14 @@ class nmethod : public CodeBlob {
// Logging
void log_identity(xmlStream* log) const;
void log_new_nmethod() const;
- void log_state_change(int state) const;
+ void log_state_change() const;
+
+ // Prints block-level comments, including nmethod specific block labels:
+ virtual void print_block_comment(outputStream* stream, address block_begin) {
+ print_nmethod_labels(stream, block_begin);
+ CodeBlob::print_block_comment(stream, block_begin);
+ }
+ void print_nmethod_labels(outputStream* stream, address block_begin);
// Prints a comment for one native instruction (reloc info, pc desc)
void print_code_comment_on(outputStream* st, int column, address begin, address end);
diff --git a/hotspot/src/share/vm/code/pcDesc.hpp b/hotspot/src/share/vm/code/pcDesc.hpp
index de9334b4cee..74d3baaf2f7 100644
--- a/hotspot/src/share/vm/code/pcDesc.hpp
+++ b/hotspot/src/share/vm/code/pcDesc.hpp
@@ -38,6 +38,7 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
int word;
struct {
unsigned int reexecute: 1;
+ unsigned int is_method_handle_invoke: 1;
} bits;
bool operator ==(const PcDescFlags& other) { return word == other.word; }
} _flags;
@@ -72,6 +73,9 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
_flags == pd->_flags;
}
+ bool is_method_handle_invoke() const { return _flags.bits.is_method_handle_invoke; }
+ void set_is_method_handle_invoke(bool z) { _flags.bits.is_method_handle_invoke = z; }
+
// Returns the real pc
address real_pc(const nmethod* code) const;
diff --git a/hotspot/src/share/vm/compiler/compileBroker.cpp b/hotspot/src/share/vm/compiler/compileBroker.cpp
index 799d9f89202..41d963a253e 100644
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp
@@ -1820,9 +1820,11 @@ void CompileBroker::print_times() {
CompileBroker::_t_standard_compilation.seconds(),
CompileBroker::_t_standard_compilation.seconds() / CompileBroker::_total_standard_compile_count);
tty->print_cr(" On stack replacement : %6.3f s, Average : %2.3f", CompileBroker::_t_osr_compilation.seconds(), CompileBroker::_t_osr_compilation.seconds() / CompileBroker::_total_osr_compile_count);
- compiler(CompLevel_fast_compile)->print_timers();
- if (compiler(CompLevel_fast_compile) != compiler(CompLevel_highest_tier)) {
- compiler(CompLevel_highest_tier)->print_timers();
+
+ if (compiler(CompLevel_fast_compile)) {
+ compiler(CompLevel_fast_compile)->print_timers();
+ if (compiler(CompLevel_fast_compile) != compiler(CompLevel_highest_tier))
+ compiler(CompLevel_highest_tier)->print_timers();
}
tty->cr();
diff --git a/hotspot/src/share/vm/compiler/compilerOracle.cpp b/hotspot/src/share/vm/compiler/compilerOracle.cpp
index 1829e044a06..73f36d7016b 100644
--- a/hotspot/src/share/vm/compiler/compilerOracle.cpp
+++ b/hotspot/src/share/vm/compiler/compilerOracle.cpp
@@ -392,18 +392,18 @@ static const char* patterns[] = {
};
static MethodMatcher::Mode check_mode(char name[], const char*& error_msg) {
- if (strcmp(name, "*") == 0) return MethodMatcher::Any;
-
int match = MethodMatcher::Exact;
- if (name[0] == '*') {
+ while (name[0] == '*') {
match |= MethodMatcher::Suffix;
strcpy(name, name + 1);
}
+ if (strcmp(name, "*") == 0) return MethodMatcher::Any;
+
size_t len = strlen(name);
- if (len > 0 && name[len - 1] == '*') {
+ while (len > 0 && name[len - 1] == '*') {
match |= MethodMatcher::Prefix;
- name[len - 1] = '\0';
+ name[--len] = '\0';
}
if (strstr(name, "*") != NULL) {
@@ -610,6 +610,14 @@ void compilerOracle_init() {
CompilerOracle::parse_from_string(CompileCommand, CompilerOracle::parse_from_line);
CompilerOracle::parse_from_string(CompileOnly, CompilerOracle::parse_compile_only);
CompilerOracle::parse_from_file();
+ if (lists[PrintCommand] != NULL) {
+ if (PrintAssembly) {
+ warning("CompileCommand and/or .hotspot_compiler file contains 'print' commands, but PrintAssembly is also enabled");
+ } else if (FLAG_IS_DEFAULT(DebugNonSafepoints)) {
+ warning("printing of assembly code is enabled; turning on DebugNonSafepoints to gain additional output");
+ DebugNonSafepoints = true;
+ }
+ }
}
diff --git a/hotspot/src/share/vm/compiler/disassembler.cpp b/hotspot/src/share/vm/compiler/disassembler.cpp
index 3e800e9b9e7..dc33af2ee5b 100644
--- a/hotspot/src/share/vm/compiler/disassembler.cpp
+++ b/hotspot/src/share/vm/compiler/disassembler.cpp
@@ -151,8 +151,10 @@ class decode_env {
outputStream* st = output();
if (_print_bytes && pc > pc0)
print_insn_bytes(pc0, pc);
- if (_nm != NULL)
+ if (_nm != NULL) {
_nm->print_code_comment_on(st, COMMENT_COLUMN, pc0, pc);
+ // this calls reloc_string_for which calls oop::print_value_on
+ }
// Output pc bucket ticks if we have any
if (total_ticks() != 0) {
@@ -273,8 +275,15 @@ void decode_env::print_address(address adr) {
oop obj;
if (_nm != NULL
&& (obj = _nm->embeddedOop_at(cur_insn())) != NULL
- && (address) obj == adr) {
+ && (address) obj == adr
+ && Universe::heap()->is_in(obj)
+ && Universe::heap()->is_in(obj->klass())) {
+ julong c = st->count();
obj->print_value_on(st);
+ if (st->count() == c) {
+ // No output. (Can happen in product builds.)
+ st->print("(a %s)", Klass::cast(obj->klass())->external_name());
+ }
return;
}
}
@@ -286,17 +295,9 @@ void decode_env::print_address(address adr) {
void decode_env::print_insn_labels() {
address p = cur_insn();
outputStream* st = output();
- nmethod* nm = _nm;
- if (nm != NULL) {
- if (p == nm->entry_point()) st->print_cr("[Entry Point]");
- if (p == nm->verified_entry_point()) st->print_cr("[Verified Entry Point]");
- if (p == nm->exception_begin()) st->print_cr("[Exception Handler]");
- if (p == nm->stub_begin()) st->print_cr("[Stub Code]");
- if (p == nm->consts_begin()) st->print_cr("[Constants]");
- }
CodeBlob* cb = _code;
if (cb != NULL) {
- cb->print_block_comment(st, (intptr_t)(p - cb->instructions_begin()));
+ cb->print_block_comment(st, p);
}
if (_print_pc) {
st->print(" " INTPTR_FORMAT ": ", (intptr_t) p);
diff --git a/hotspot/src/share/vm/compiler/methodLiveness.cpp b/hotspot/src/share/vm/compiler/methodLiveness.cpp
index a9a90a07197..4c53bcc1829 100644
--- a/hotspot/src/share/vm/compiler/methodLiveness.cpp
+++ b/hotspot/src/share/vm/compiler/methodLiveness.cpp
@@ -782,6 +782,7 @@ void MethodLiveness::BasicBlock::compute_gen_kill_single(ciBytecodeStream *instr
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokeinterface:
+ case Bytecodes::_invokedynamic:
case Bytecodes::_newarray:
case Bytecodes::_anewarray:
case Bytecodes::_checkcast:
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp
index eb1a1118d72..ec4caa22fc6 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp
@@ -62,12 +62,13 @@ TreeList* TreeList::as_TreeList(TreeChunk* tc) {
tl->link_head(tc);
tl->link_tail(tc);
tl->set_count(1);
- tl->init_statistics();
+ tl->init_statistics(true /* split_birth */);
tl->setParent(NULL);
tl->setLeft(NULL);
tl->setRight(NULL);
return tl;
}
+
TreeList* TreeList::as_TreeList(HeapWord* addr, size_t size) {
TreeChunk* tc = (TreeChunk*) addr;
assert(size >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk");
@@ -267,6 +268,31 @@ TreeChunk* TreeList::first_available() {
return retTC;
}
+// Returns the block with the largest heap address amongst
+// those in the list for this size; potentially slow and expensive,
+// use with caution!
+TreeChunk* TreeList::largest_address() {
+ guarantee(head() != NULL, "The head of the list cannot be NULL");
+ FreeChunk* fc = head()->next();
+ TreeChunk* retTC;
+ if (fc == NULL) {
+ retTC = head_as_TreeChunk();
+ } else {
+ // walk down the list and return the one with the highest
+ // heap address among chunks of this size.
+ FreeChunk* last = fc;
+ while (fc->next() != NULL) {
+ if ((HeapWord*)last < (HeapWord*)fc) {
+ last = fc;
+ }
+ fc = fc->next();
+ }
+ retTC = TreeChunk::as_TreeChunk(last);
+ }
+ assert(retTC->list() == this, "Wrong type of chunk.");
+ return retTC;
+}
+
BinaryTreeDictionary::BinaryTreeDictionary(MemRegion mr, bool splay):
_splay(splay)
{
@@ -379,7 +405,7 @@ BinaryTreeDictionary::getChunkFromTree(size_t size, Dither dither, bool splay)
break;
}
// The evm code reset the hint of the candidate as
- // at an interrim point. Why? Seems like this leaves
+ // at an interim point. Why? Seems like this leaves
// the hint pointing to a list that didn't work.
// curTL->set_hint(hintTL->size());
}
@@ -436,7 +462,7 @@ FreeChunk* BinaryTreeDictionary::findLargestDict() const {
TreeList *curTL = root();
if (curTL != NULL) {
while(curTL->right() != NULL) curTL = curTL->right();
- return curTL->first_available();
+ return curTL->largest_address();
} else {
return NULL;
}
@@ -664,7 +690,7 @@ void BinaryTreeDictionary::insertChunkInTree(FreeChunk* fc) {
}
}
TreeChunk* tc = TreeChunk::as_TreeChunk(fc);
- // This chunk is being returned to the binary try. It's embedded
+ // This chunk is being returned to the binary tree. Its embedded
// TreeList should be unused at this point.
tc->initialize();
if (curTL != NULL) { // exact match
@@ -807,6 +833,8 @@ void BinaryTreeDictionary::dictCensusUpdate(size_t size, bool split, bool birth)
}
bool BinaryTreeDictionary::coalDictOverPopulated(size_t size) {
+ if (FLSAlwaysCoalesceLarge) return true;
+
TreeList* list_of_size = findList(size);
// None of requested size implies overpopulated.
return list_of_size == NULL || list_of_size->coalDesired() <= 0 ||
@@ -854,17 +882,20 @@ class BeginSweepClosure : public AscendTreeCensusClosure {
double _percentage;
float _inter_sweep_current;
float _inter_sweep_estimate;
+ float _intra_sweep_estimate;
public:
BeginSweepClosure(double p, float inter_sweep_current,
- float inter_sweep_estimate) :
+ float inter_sweep_estimate,
+ float intra_sweep_estimate) :
_percentage(p),
_inter_sweep_current(inter_sweep_current),
- _inter_sweep_estimate(inter_sweep_estimate) { }
+ _inter_sweep_estimate(inter_sweep_estimate),
+ _intra_sweep_estimate(intra_sweep_estimate) { }
void do_list(FreeList* fl) {
double coalSurplusPercent = _percentage;
- fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate);
+ fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
fl->set_coalDesired((ssize_t)((double)fl->desired() * coalSurplusPercent));
fl->set_beforeSweep(fl->count());
fl->set_bfrSurp(fl->surplus());
@@ -939,9 +970,10 @@ FreeChunk* BinaryTreeDictionary::find_chunk_ends_at(HeapWord* target) const {
}
void BinaryTreeDictionary::beginSweepDictCensus(double coalSurplusPercent,
- float inter_sweep_current, float inter_sweep_estimate) {
+ float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
- inter_sweep_estimate);
+ inter_sweep_estimate,
+ intra_sweep_estimate);
bsc.do_tree(root());
}
@@ -1077,13 +1109,13 @@ void BinaryTreeDictionary::reportStatistics() const {
// Print census information - counts, births, deaths, etc.
// for each list in the tree. Also print some summary
// information.
-class printTreeCensusClosure : public AscendTreeCensusClosure {
+class PrintTreeCensusClosure : public AscendTreeCensusClosure {
int _print_line;
size_t _totalFree;
FreeList _total;
public:
- printTreeCensusClosure() {
+ PrintTreeCensusClosure() {
_print_line = 0;
_totalFree = 0;
}
@@ -1113,7 +1145,7 @@ void BinaryTreeDictionary::printDictCensus(void) const {
gclog_or_tty->print("\nBinaryTree\n");
FreeList::print_labels_on(gclog_or_tty, "size");
- printTreeCensusClosure ptc;
+ PrintTreeCensusClosure ptc;
ptc.do_tree(root());
FreeList* total = ptc.total();
@@ -1130,6 +1162,38 @@ void BinaryTreeDictionary::printDictCensus(void) const {
/(total->desired() != 0 ? (double)total->desired() : 1.0));
}
+class PrintFreeListsClosure : public AscendTreeCensusClosure {
+ outputStream* _st;
+ int _print_line;
+
+ public:
+ PrintFreeListsClosure(outputStream* st) {
+ _st = st;
+ _print_line = 0;
+ }
+ void do_list(FreeList* fl) {
+ if (++_print_line >= 40) {
+ FreeList::print_labels_on(_st, "size");
+ _print_line = 0;
+ }
+ fl->print_on(gclog_or_tty);
+ size_t sz = fl->size();
+ for (FreeChunk* fc = fl->head(); fc != NULL;
+ fc = fc->next()) {
+ _st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
+ fc, (HeapWord*)fc + sz,
+ fc->cantCoalesce() ? "\t CC" : "");
+ }
+ }
+};
+
+void BinaryTreeDictionary::print_free_lists(outputStream* st) const {
+
+ FreeList::print_labels_on(st, "size");
+ PrintFreeListsClosure pflc(st);
+ pflc.do_tree(root());
+}
+
// Verify the following tree invariants:
// . _root has no parent
// . parent and child point to each other
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp
index d45193be9fb..0a107da91ab 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp
@@ -42,9 +42,6 @@ class TreeList: public FreeList {
friend class AscendTreeCensusClosure;
friend class DescendTreeCensusClosure;
friend class DescendTreeSearchClosure;
- TreeList* _parent;
- TreeList* _left;
- TreeList* _right;
protected:
TreeList* parent() const { return _parent; }
@@ -82,6 +79,11 @@ class TreeList: public FreeList {
// to a TreeChunk.
TreeChunk* first_available();
+ // Returns the block with the largest heap address amongst
+ // those in the list for this size; potentially slow and expensive,
+ // use with caution!
+ TreeChunk* largest_address();
+
// removeChunkReplaceIfNeeded() removes the given "tc" from the TreeList.
// If "tc" is the first chunk in the list, it is also the
// TreeList that is the node in the tree. removeChunkReplaceIfNeeded()
@@ -254,8 +256,9 @@ class BinaryTreeDictionary: public FreeBlockDictionary {
// Methods called at the beginning of a sweep to prepare the
// statistics for the sweep.
void beginSweepDictCensus(double coalSurplusPercent,
- float sweep_current,
- float sweep_estimate);
+ float inter_sweep_current,
+ float inter_sweep_estimate,
+ float intra_sweep_estimate);
// Methods called after the end of a sweep to modify the
// statistics for the sweep.
void endSweepDictCensus(double splitSurplusPercent);
@@ -269,6 +272,7 @@ class BinaryTreeDictionary: public FreeBlockDictionary {
// Print the statistcis for all the lists in the tree. Also may
// print out summaries.
void printDictCensus(void) const;
+ void print_free_lists(outputStream* st) const;
// For debugging. Returns the sum of the _returnedBytes for
// all lists in the tree.
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp
index 00ef43f6957..b0ee1e8869e 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp
@@ -32,7 +32,9 @@
// threads. The second argument is in support of an extra locking
// check for CFL spaces' free list locks.
#ifndef PRODUCT
-void CMSLockVerifier::assert_locked(const Mutex* lock, const Mutex* p_lock) {
+void CMSLockVerifier::assert_locked(const Mutex* lock,
+ const Mutex* p_lock1,
+ const Mutex* p_lock2) {
if (!Universe::is_fully_initialized()) {
return;
}
@@ -40,7 +42,7 @@ void CMSLockVerifier::assert_locked(const Mutex* lock, const Mutex* p_lock) {
Thread* myThread = Thread::current();
if (lock == NULL) { // a "lock-free" structure, e.g. MUT, protected by CMS token
- assert(p_lock == NULL, "Unexpected state");
+ assert(p_lock1 == NULL && p_lock2 == NULL, "Unexpected caller error");
if (myThread->is_ConcurrentGC_thread()) {
// This test might have to change in the future, if there can be
// multiple peer CMS threads. But for now, if we're testing the CMS
@@ -60,36 +62,39 @@ void CMSLockVerifier::assert_locked(const Mutex* lock, const Mutex* p_lock) {
return;
}
- if (ParallelGCThreads == 0) {
+ if (myThread->is_VM_thread()
+ || myThread->is_ConcurrentGC_thread()
+ || myThread->is_Java_thread()) {
+ // Make sure that we are holding the associated lock.
assert_lock_strong(lock);
- } else {
- if (myThread->is_VM_thread()
- || myThread->is_ConcurrentGC_thread()
- || myThread->is_Java_thread()) {
- // Make sure that we are holding the associated lock.
- assert_lock_strong(lock);
- // The checking of p_lock is a spl case for CFLS' free list
- // locks: we make sure that none of the parallel GC work gang
- // threads are holding "sub-locks" of freeListLock(). We check only
- // the parDictionaryAllocLock because the others are too numerous.
- // This spl case code is somewhat ugly and any improvements
- // are welcome XXX FIX ME!!
- if (p_lock != NULL) {
- assert(!p_lock->is_locked() || p_lock->owned_by_self(),
- "Possible race between this and parallel GC threads");
- }
- } else if (myThread->is_GC_task_thread()) {
- // Make sure that the VM or CMS thread holds lock on our behalf
- // XXX If there were a concept of a gang_master for a (set of)
- // gang_workers, we could have used the identity of that thread
- // for checking ownership here; for now we just disjunct.
- assert(lock->owner() == VMThread::vm_thread() ||
- lock->owner() == ConcurrentMarkSweepThread::cmst(),
- "Should be locked by VM thread or CMS thread on my behalf");
- } else {
- // Make sure we didn't miss some obscure corner case
- ShouldNotReachHere();
+ // The checking of p_lock is a spl case for CFLS' free list
+ // locks: we make sure that none of the parallel GC work gang
+ // threads are holding "sub-locks" of freeListLock(). We check only
+ // the parDictionaryAllocLock because the others are too numerous.
+ // This spl case code is somewhat ugly and any improvements
+ // are welcome.
+ assert(p_lock1 == NULL || !p_lock1->is_locked() || p_lock1->owned_by_self(),
+ "Possible race between this and parallel GC threads");
+ assert(p_lock2 == NULL || !p_lock2->is_locked() || p_lock2->owned_by_self(),
+ "Possible race between this and parallel GC threads");
+ } else if (myThread->is_GC_task_thread()) {
+ // Make sure that the VM or CMS thread holds lock on our behalf
+ // XXX If there were a concept of a gang_master for a (set of)
+ // gang_workers, we could have used the identity of that thread
+ // for checking ownership here; for now we just disjunct.
+ assert(lock->owner() == VMThread::vm_thread() ||
+ lock->owner() == ConcurrentMarkSweepThread::cmst(),
+ "Should be locked by VM thread or CMS thread on my behalf");
+ if (p_lock1 != NULL) {
+ assert_lock_strong(p_lock1);
}
+ if (p_lock2 != NULL) {
+ assert_lock_strong(p_lock2);
+ }
+ } else {
+ // Make sure we didn't miss some other thread type calling into here;
+ // perhaps as a result of future VM evolution.
+ ShouldNotReachHere();
}
}
#endif
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp
index f2fe4514061..943eba0374c 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp
@@ -29,8 +29,11 @@
// the parallel threads.
class CMSLockVerifier: AllStatic {
public:
- static void assert_locked(const Mutex* lock, const Mutex* p_lock)
+ static void assert_locked(const Mutex* lock, const Mutex* p_lock1, const Mutex* p_lock2)
PRODUCT_RETURN;
+ static void assert_locked(const Mutex* lock, const Mutex* p_lock) {
+ assert_locked(lock, p_lock, NULL);
+ }
static void assert_locked(const Mutex* lock) {
assert_locked(lock, NULL);
}
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
index 6b4bd36d934..9e3b6cf81cc 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
@@ -62,18 +62,15 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
// implementation, namely, the simple binary tree (splaying
// temporarily disabled).
switch (dictionaryChoice) {
- case FreeBlockDictionary::dictionaryBinaryTree:
- _dictionary = new BinaryTreeDictionary(mr);
- break;
case FreeBlockDictionary::dictionarySplayTree:
case FreeBlockDictionary::dictionarySkipList:
default:
warning("dictionaryChoice: selected option not understood; using"
" default BinaryTreeDictionary implementation instead.");
+ case FreeBlockDictionary::dictionaryBinaryTree:
_dictionary = new BinaryTreeDictionary(mr);
break;
}
- splitBirth(mr.word_size());
assert(_dictionary != NULL, "CMS dictionary initialization");
// The indexed free lists are initially all empty and are lazily
// filled in on demand. Initialize the array elements to NULL.
@@ -388,6 +385,105 @@ size_t CompactibleFreeListSpace::max_alloc_in_words() const {
return res;
}
+void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
+const {
+ reportIndexedFreeListStatistics();
+ gclog_or_tty->print_cr("Layout of Indexed Freelists");
+ gclog_or_tty->print_cr("---------------------------");
+ FreeList::print_labels_on(st, "size");
+ for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+ _indexedFreeList[i].print_on(gclog_or_tty);
+ for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
+ fc = fc->next()) {
+ gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
+ fc, (HeapWord*)fc + i,
+ fc->cantCoalesce() ? "\t CC" : "");
+ }
+ }
+}
+
+void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
+const {
+ _promoInfo.print_on(st);
+}
+
+void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
+const {
+ _dictionary->reportStatistics();
+ st->print_cr("Layout of Freelists in Tree");
+ st->print_cr("---------------------------");
+ _dictionary->print_free_lists(st);
+}
+
+class BlkPrintingClosure: public BlkClosure {
+ const CMSCollector* _collector;
+ const CompactibleFreeListSpace* _sp;
+ const CMSBitMap* _live_bit_map;
+ const bool _post_remark;
+ outputStream* _st;
+public:
+ BlkPrintingClosure(const CMSCollector* collector,
+ const CompactibleFreeListSpace* sp,
+ const CMSBitMap* live_bit_map,
+ outputStream* st):
+ _collector(collector),
+ _sp(sp),
+ _live_bit_map(live_bit_map),
+ _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
+ _st(st) { }
+ size_t do_blk(HeapWord* addr);
+};
+
+size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
+ size_t sz = _sp->block_size_no_stall(addr, _collector);
+ assert(sz != 0, "Should always be able to compute a size");
+ if (_sp->block_is_obj(addr)) {
+ const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
+ _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
+ addr,
+ dead ? "dead" : "live",
+ sz,
+ (!dead && CMSPrintObjectsInDump) ? ":" : ".");
+ if (CMSPrintObjectsInDump && !dead) {
+ oop(addr)->print_on(_st);
+ _st->print_cr("--------------------------------------");
+ }
+ } else { // free block
+ _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
+ addr, sz, CMSPrintChunksInDump ? ":" : ".");
+ if (CMSPrintChunksInDump) {
+ ((FreeChunk*)addr)->print_on(_st);
+ _st->print_cr("--------------------------------------");
+ }
+ }
+ return sz;
+}
+
+void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
+ outputStream* st) {
+ st->print_cr("\n=========================");
+ st->print_cr("Block layout in CMS Heap:");
+ st->print_cr("=========================");
+ BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
+ blk_iterate(&bpcl);
+
+ st->print_cr("\n=======================================");
+ st->print_cr("Order & Layout of Promotion Info Blocks");
+ st->print_cr("=======================================");
+ print_promo_info_blocks(st);
+
+ st->print_cr("\n===========================");
+ st->print_cr("Order of Indexed Free Lists");
+ st->print_cr("=========================");
+ print_indexed_free_lists(st);
+
+ st->print_cr("\n=================================");
+ st->print_cr("Order of Free Lists in Dictionary");
+ st->print_cr("=================================");
+ print_dictionary_free_lists(st);
+}
+
+
void CompactibleFreeListSpace::reportFreeListStatistics() const {
assert_lock_strong(&_freelistLock);
assert(PrintFLSStatistics != 0, "Reporting error");
@@ -449,37 +545,37 @@ void CompactibleFreeListSpace::set_end(HeapWord* value) {
if (prevEnd != NULL) {
// Resize the underlying block offset table.
_bt.resize(pointer_delta(value, bottom()));
- if (value <= prevEnd) {
- assert(value >= unallocated_block(), "New end is below unallocated block");
- } else {
- // Now, take this new chunk and add it to the free blocks.
- // Note that the BOT has not yet been updated for this block.
- size_t newFcSize = pointer_delta(value, prevEnd);
- // XXX This is REALLY UGLY and should be fixed up. XXX
- if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
- // Mark the boundary of the new block in BOT
- _bt.mark_block(prevEnd, value);
- // put it all in the linAB
- if (ParallelGCThreads == 0) {
- _smallLinearAllocBlock._ptr = prevEnd;
- _smallLinearAllocBlock._word_size = newFcSize;
- repairLinearAllocBlock(&_smallLinearAllocBlock);
- } else { // ParallelGCThreads > 0
- MutexLockerEx x(parDictionaryAllocLock(),
- Mutex::_no_safepoint_check_flag);
- _smallLinearAllocBlock._ptr = prevEnd;
- _smallLinearAllocBlock._word_size = newFcSize;
- repairLinearAllocBlock(&_smallLinearAllocBlock);
- }
- // Births of chunks put into a LinAB are not recorded. Births
- // of chunks as they are allocated out of a LinAB are.
+ if (value <= prevEnd) {
+ assert(value >= unallocated_block(), "New end is below unallocated block");
} else {
- // Add the block to the free lists, if possible coalescing it
- // with the last free block, and update the BOT and census data.
- addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
+ // Now, take this new chunk and add it to the free blocks.
+ // Note that the BOT has not yet been updated for this block.
+ size_t newFcSize = pointer_delta(value, prevEnd);
+ // XXX This is REALLY UGLY and should be fixed up. XXX
+ if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
+ // Mark the boundary of the new block in BOT
+ _bt.mark_block(prevEnd, value);
+ // put it all in the linAB
+ if (ParallelGCThreads == 0) {
+ _smallLinearAllocBlock._ptr = prevEnd;
+ _smallLinearAllocBlock._word_size = newFcSize;
+ repairLinearAllocBlock(&_smallLinearAllocBlock);
+ } else { // ParallelGCThreads > 0
+ MutexLockerEx x(parDictionaryAllocLock(),
+ Mutex::_no_safepoint_check_flag);
+ _smallLinearAllocBlock._ptr = prevEnd;
+ _smallLinearAllocBlock._word_size = newFcSize;
+ repairLinearAllocBlock(&_smallLinearAllocBlock);
+ }
+ // Births of chunks put into a LinAB are not recorded. Births
+ // of chunks as they are allocated out of a LinAB are.
+ } else {
+ // Add the block to the free lists, if possible coalescing it
+ // with the last free block, and update the BOT and census data.
+ addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
+ }
}
}
- }
}
class FreeListSpace_DCTOC : public Filtering_DCTOC {
@@ -732,7 +828,7 @@ void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
UpwardsObjectClosure* cl) {
- assert_locked();
+ assert_locked(freelistLock());
NOT_PRODUCT(verify_objects_initialized());
Space::object_iterate_mem(mr, cl);
}
@@ -1212,12 +1308,15 @@ bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
void CompactibleFreeListSpace::assert_locked() const {
CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
}
+
+void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
+ CMSLockVerifier::assert_locked(lock);
+}
#endif
FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
// In the parallel case, the main thread holds the free list lock
// on behalf the parallel threads.
- assert_locked();
FreeChunk* fc;
{
// If GC is parallel, this might be called by several threads.
@@ -1298,17 +1397,18 @@ CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
res = blk->_ptr;
_bt.allocated(res, blk->_word_size);
} else if (size + MinChunkSize <= blk->_refillSize) {
+ size_t sz = blk->_word_size;
// Update _unallocated_block if the size is such that chunk would be
// returned to the indexed free list. All other chunks in the indexed
// free lists are allocated from the dictionary so that _unallocated_block
// has already been adjusted for them. Do it here so that the cost
// for all chunks added back to the indexed free lists.
- if (blk->_word_size < SmallForDictionary) {
- _bt.allocated(blk->_ptr, blk->_word_size);
+ if (sz < SmallForDictionary) {
+ _bt.allocated(blk->_ptr, sz);
}
// Return the chunk that isn't big enough, and then refill below.
- addChunkToFreeLists(blk->_ptr, blk->_word_size);
- _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
+ addChunkToFreeLists(blk->_ptr, sz);
+ splitBirth(sz);
// Don't keep statistics on adding back chunk from a LinAB.
} else {
// A refilled block would not satisfy the request.
@@ -1376,11 +1476,13 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
res = getChunkFromIndexedFreeListHelper(size);
}
_bt.verify_not_unallocated((HeapWord*) res, size);
+ assert(res == NULL || res->size() == size, "Incorrect block size");
return res;
}
FreeChunk*
-CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
+CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
+ bool replenish) {
assert_locked();
FreeChunk* fc = NULL;
if (size < SmallForDictionary) {
@@ -1398,54 +1500,66 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
// and replenishing indexed lists from the small linAB.
//
FreeChunk* newFc = NULL;
- size_t replenish_size = CMSIndexedFreeListReplenish * size;
+ const size_t replenish_size = CMSIndexedFreeListReplenish * size;
if (replenish_size < SmallForDictionary) {
// Do not replenish from an underpopulated size.
if (_indexedFreeList[replenish_size].surplus() > 0 &&
_indexedFreeList[replenish_size].head() != NULL) {
- newFc =
- _indexedFreeList[replenish_size].getChunkAtHead();
- } else {
+ newFc = _indexedFreeList[replenish_size].getChunkAtHead();
+ } else if (bestFitFirst()) {
newFc = bestFitSmall(replenish_size);
}
}
- if (newFc != NULL) {
- splitDeath(replenish_size);
- } else if (replenish_size > size) {
+ if (newFc == NULL && replenish_size > size) {
assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
- newFc =
- getChunkFromIndexedFreeListHelper(replenish_size);
+ newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
}
+ // Note: The stats update re split-death of block obtained above
+ // will be recorded below precisely when we know we are going to
+ // be actually splitting it into more than one pieces below.
if (newFc != NULL) {
- assert(newFc->size() == replenish_size, "Got wrong size");
- size_t i;
- FreeChunk *curFc, *nextFc;
- // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
- // The last chunk is not added to the lists but is returned as the
- // free chunk.
- for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
- i = 0;
- i < (CMSIndexedFreeListReplenish - 1);
- curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
- i++) {
+ if (replenish || CMSReplenishIntermediate) {
+ // Replenish this list and return one block to caller.
+ size_t i;
+ FreeChunk *curFc, *nextFc;
+ size_t num_blk = newFc->size() / size;
+ assert(num_blk >= 1, "Smaller than requested?");
+ assert(newFc->size() % size == 0, "Should be integral multiple of request");
+ if (num_blk > 1) {
+ // we are sure we will be splitting the block just obtained
+ // into multiple pieces; record the split-death of the original
+ splitDeath(replenish_size);
+ }
+ // carve up and link blocks 0, ..., num_blk - 2
+ // The last chunk is not added to the lists but is returned as the
+ // free chunk.
+ for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
+ i = 0;
+ i < (num_blk - 1);
+ curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
+ i++) {
+ curFc->setSize(size);
+ // Don't record this as a return in order to try and
+ // determine the "returns" from a GC.
+ _bt.verify_not_unallocated((HeapWord*) fc, size);
+ _indexedFreeList[size].returnChunkAtTail(curFc, false);
+ _bt.mark_block((HeapWord*)curFc, size);
+ splitBirth(size);
+ // Don't record the initial population of the indexed list
+ // as a split birth.
+ }
+
+ // check that the arithmetic was OK above
+ assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
+ "inconsistency in carving newFc");
curFc->setSize(size);
- // Don't record this as a return in order to try and
- // determine the "returns" from a GC.
- _bt.verify_not_unallocated((HeapWord*) fc, size);
- _indexedFreeList[size].returnChunkAtTail(curFc, false);
_bt.mark_block((HeapWord*)curFc, size);
splitBirth(size);
- // Don't record the initial population of the indexed list
- // as a split birth.
+ fc = curFc;
+ } else {
+ // Return entire block to caller
+ fc = newFc;
}
-
- // check that the arithmetic was OK above
- assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
- "inconsistency in carving newFc");
- curFc->setSize(size);
- _bt.mark_block((HeapWord*)curFc, size);
- splitBirth(size);
- return curFc;
}
}
} else {
@@ -1453,7 +1567,7 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
// replenish the indexed free list.
fc = getChunkFromDictionaryExact(size);
}
- assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
+ // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
return fc;
}
@@ -1512,6 +1626,11 @@ CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
// adjust _unallocated_block downward, as necessary
_bt.freed((HeapWord*)chunk, size);
_dictionary->returnChunk(chunk);
+#ifndef PRODUCT
+ if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
+ TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
+ }
+#endif // PRODUCT
}
void
@@ -1525,6 +1644,11 @@ CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
} else {
_indexedFreeList[size].returnChunkAtHead(fc);
}
+#ifndef PRODUCT
+ if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
+ _indexedFreeList[size].verify_stats();
+ }
+#endif // PRODUCT
}
// Add chunk to end of last block -- if it's the largest
@@ -1537,7 +1661,6 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
HeapWord* chunk, size_t size) {
// check that the chunk does lie in this space!
assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
- assert_locked();
// One of the parallel gc task threads may be here
// whilst others are allocating.
Mutex* lock = NULL;
@@ -1991,24 +2114,26 @@ double CompactibleFreeListSpace::flsFrag() const {
return frag;
}
-#define CoalSurplusPercent 1.05
-#define SplitSurplusPercent 1.10
-
void CompactibleFreeListSpace::beginSweepFLCensus(
float inter_sweep_current,
- float inter_sweep_estimate) {
+ float inter_sweep_estimate,
+ float intra_sweep_estimate) {
assert_locked();
size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList* fl = &_indexedFreeList[i];
- fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
- fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
+ if (PrintFLSStatistics > 1) {
+ gclog_or_tty->print("size[%d] : ", i);
+ }
+ fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
+ fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
fl->set_beforeSweep(fl->count());
fl->set_bfrSurp(fl->surplus());
}
- _dictionary->beginSweepDictCensus(CoalSurplusPercent,
+ _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
inter_sweep_current,
- inter_sweep_estimate);
+ inter_sweep_estimate,
+ intra_sweep_estimate);
}
void CompactibleFreeListSpace::setFLSurplus() {
@@ -2017,7 +2142,7 @@ void CompactibleFreeListSpace::setFLSurplus() {
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList *fl = &_indexedFreeList[i];
fl->set_surplus(fl->count() -
- (ssize_t)((double)fl->desired() * SplitSurplusPercent));
+ (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
}
}
@@ -2048,6 +2173,11 @@ void CompactibleFreeListSpace::clearFLCensus() {
}
void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
+ if (PrintFLSStatistics > 0) {
+ HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
+ gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
+ largestAddr);
+ }
setFLSurplus();
setFLHints();
if (PrintGC && PrintFLSCensus > 0) {
@@ -2055,7 +2185,7 @@ void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
}
clearFLCensus();
assert_locked();
- _dictionary->endSweepDictCensus(SplitSurplusPercent);
+ _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
}
bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
@@ -2312,13 +2442,18 @@ void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
}
void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
- FreeChunk* fc = _indexedFreeList[size].head();
+ FreeChunk* fc = _indexedFreeList[size].head();
+ FreeChunk* tail = _indexedFreeList[size].tail();
+ size_t num = _indexedFreeList[size].count();
+ size_t n = 0;
guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
- for (; fc != NULL; fc = fc->next()) {
+ for (; fc != NULL; fc = fc->next(), n++) {
guarantee(fc->size() == size, "Size inconsistency");
guarantee(fc->isFree(), "!free?");
guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
+ guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
}
+ guarantee(n == num, "Incorrect count");
}
#ifndef PRODUCT
@@ -2516,11 +2651,41 @@ void PromotionInfo::startTrackingPromotions() {
_tracking = true;
}
-void PromotionInfo::stopTrackingPromotions() {
+#define CMSPrintPromoBlockInfo 1
+
+void PromotionInfo::stopTrackingPromotions(uint worker_id) {
assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
"spooling inconsistency?");
_firstIndex = _nextIndex = 1;
_tracking = false;
+ if (CMSPrintPromoBlockInfo > 1) {
+ print_statistics(worker_id);
+ }
+}
+
+void PromotionInfo::print_statistics(uint worker_id) const {
+ assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
+ "Else will undercount");
+ assert(CMSPrintPromoBlockInfo > 0, "Else unnecessary call");
+ // Count the number of blocks and slots in the free pool
+ size_t slots = 0;
+ size_t blocks = 0;
+ for (SpoolBlock* cur_spool = _spareSpool;
+ cur_spool != NULL;
+ cur_spool = cur_spool->nextSpoolBlock) {
+ // the first entry is just a self-pointer; indices 1 through
+ // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
+ guarantee((void*)cur_spool->displacedHdr == (void*)&cur_spool->displacedHdr,
+ "first entry of displacedHdr should be self-referential");
+ slots += cur_spool->bufferSize - 1;
+ blocks++;
+ }
+ if (_spoolHead != NULL) {
+ slots += _spoolHead->bufferSize - 1;
+ blocks++;
+ }
+ gclog_or_tty->print_cr(" [worker %d] promo_blocks = %d, promo_slots = %d ",
+ worker_id, blocks, slots);
}
// When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
@@ -2584,15 +2749,84 @@ void PromotionInfo::verify() const {
guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
}
+void PromotionInfo::print_on(outputStream* st) const {
+ SpoolBlock* curSpool = NULL;
+ size_t i = 0;
+ st->print_cr("start & end indices: [" SIZE_FORMAT ", " SIZE_FORMAT ")",
+ _firstIndex, _nextIndex);
+ for (curSpool = _spoolHead; curSpool != _spoolTail && curSpool != NULL;
+ curSpool = curSpool->nextSpoolBlock) {
+ curSpool->print_on(st);
+ st->print_cr(" active ");
+ i++;
+ }
+ for (curSpool = _spoolTail; curSpool != NULL;
+ curSpool = curSpool->nextSpoolBlock) {
+ curSpool->print_on(st);
+ st->print_cr(" inactive ");
+ i++;
+ }
+ for (curSpool = _spareSpool; curSpool != NULL;
+ curSpool = curSpool->nextSpoolBlock) {
+ curSpool->print_on(st);
+ st->print_cr(" free ");
+ i++;
+ }
+ st->print_cr(SIZE_FORMAT " header spooling blocks", i);
+}
+
+void SpoolBlock::print_on(outputStream* st) const {
+ st->print("[" PTR_FORMAT "," PTR_FORMAT "), " SIZE_FORMAT " HeapWords -> " PTR_FORMAT,
+ this, (HeapWord*)displacedHdr + bufferSize,
+ bufferSize, nextSpoolBlock);
+}
+
+///////////////////////////////////////////////////////////////////////////
+// CFLS_LAB
+///////////////////////////////////////////////////////////////////////////
+
+#define VECTOR_257(x) \
+ /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
+ { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x }
+
+// Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
+// OldPLABSize, whose static default is different; if overridden at the
+// command-line, this will get reinitialized via a call to
+// modify_initialization() below.
+AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
+ VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
+size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
+int CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
_cfls(cfls)
{
- _blocks_to_claim = CMSParPromoteBlocksToClaim;
+ assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
for (size_t i = CompactibleFreeListSpace::IndexSetStart;
i < CompactibleFreeListSpace::IndexSetSize;
i += CompactibleFreeListSpace::IndexSetStride) {
_indexedFreeList[i].set_size(i);
+ _num_blocks[i] = 0;
+ }
+}
+
+static bool _CFLS_LAB_modified = false;
+
+void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
+ assert(!_CFLS_LAB_modified, "Call only once");
+ _CFLS_LAB_modified = true;
+ for (size_t i = CompactibleFreeListSpace::IndexSetStart;
+ i < CompactibleFreeListSpace::IndexSetSize;
+ i += CompactibleFreeListSpace::IndexSetStride) {
+ _blocks_to_claim[i].modify(n, wt, true /* force */);
}
}
@@ -2607,11 +2841,9 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
if (res == NULL) return NULL;
} else {
FreeList* fl = &_indexedFreeList[word_sz];
- bool filled = false; //TRAP
if (fl->count() == 0) {
- bool filled = true; //TRAP
// Attempt to refill this local free list.
- _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
+ get_from_global_pool(word_sz, fl);
// If it didn't work, give up.
if (fl->count() == 0) return NULL;
}
@@ -2626,80 +2858,190 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
return (HeapWord*)res;
}
-void CFLS_LAB::retire() {
- for (size_t i = CompactibleFreeListSpace::IndexSetStart;
+// Get a chunk of blocks of the right size and update related
+// book-keeping stats
+void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
+ // Get the #blocks we want to claim
+ size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
+ assert(n_blks > 0, "Error");
+ assert(ResizePLAB || n_blks == OldPLABSize, "Error");
+ // In some cases, when the application has a phase change,
+ // there may be a sudden and sharp shift in the object survival
+ // profile, and updating the counts at the end of a scavenge
+ // may not be quick enough, giving rise to large scavenge pauses
+ // during these phase changes. It is beneficial to detect such
+ // changes on-the-fly during a scavenge and avoid such a phase-change
+ // pothole. The following code is a heuristic attempt to do that.
+ // It is protected by a product flag until we have gained
+ // enough experience with this heuristic and fine-tuned its behaviour.
+ // WARNING: This might increase fragmentation if we overreact to
+ // small spikes, so some kind of historical smoothing based on
+ // previous experience with the greater reactivity might be useful.
+ // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
+ // default.
+ if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
+ size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
+ n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
+ n_blks = MIN2(n_blks, CMSOldPLABMax);
+ }
+ assert(n_blks > 0, "Error");
+ _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
+ // Update stats table entry for this block size
+ _num_blocks[word_sz] += fl->count();
+}
+
+void CFLS_LAB::compute_desired_plab_size() {
+ for (size_t i = CompactibleFreeListSpace::IndexSetStart;
i < CompactibleFreeListSpace::IndexSetSize;
i += CompactibleFreeListSpace::IndexSetStride) {
- if (_indexedFreeList[i].count() > 0) {
- MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
- Mutex::_no_safepoint_check_flag);
- _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
- // Reset this list.
- _indexedFreeList[i] = FreeList();
- _indexedFreeList[i].set_size(i);
+ assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
+ "Counter inconsistency");
+ if (_global_num_workers[i] > 0) {
+ // Need to smooth wrt historical average
+ if (ResizeOldPLAB) {
+ _blocks_to_claim[i].sample(
+ MAX2((size_t)CMSOldPLABMin,
+ MIN2((size_t)CMSOldPLABMax,
+ _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
+ }
+ // Reset counters for next round
+ _global_num_workers[i] = 0;
+ _global_num_blocks[i] = 0;
+ if (PrintOldPLAB) {
+ gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
+ }
}
}
}
-void
-CompactibleFreeListSpace::
-par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
+void CFLS_LAB::retire(int tid) {
+ // We run this single threaded with the world stopped;
+ // so no need for locks and such.
+#define CFLS_LAB_PARALLEL_ACCESS 0
+ NOT_PRODUCT(Thread* t = Thread::current();)
+ assert(Thread::current()->is_VM_thread(), "Error");
+ assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
+ "Will access to uninitialized slot below");
+#if CFLS_LAB_PARALLEL_ACCESS
+ for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
+ i > 0;
+ i -= CompactibleFreeListSpace::IndexSetStride) {
+#else // CFLS_LAB_PARALLEL_ACCESS
+ for (size_t i = CompactibleFreeListSpace::IndexSetStart;
+ i < CompactibleFreeListSpace::IndexSetSize;
+ i += CompactibleFreeListSpace::IndexSetStride) {
+#endif // !CFLS_LAB_PARALLEL_ACCESS
+ assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
+ "Can't retire more than what we obtained");
+ if (_num_blocks[i] > 0) {
+ size_t num_retire = _indexedFreeList[i].count();
+ assert(_num_blocks[i] > num_retire, "Should have used at least one");
+ {
+#if CFLS_LAB_PARALLEL_ACCESS
+ MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
+ Mutex::_no_safepoint_check_flag);
+#endif // CFLS_LAB_PARALLEL_ACCESS
+ // Update globals stats for num_blocks used
+ _global_num_blocks[i] += (_num_blocks[i] - num_retire);
+ _global_num_workers[i]++;
+ assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
+ if (num_retire > 0) {
+ _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
+ // Reset this list.
+ _indexedFreeList[i] = FreeList();
+ _indexedFreeList[i].set_size(i);
+ }
+ }
+ if (PrintOldPLAB) {
+ gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
+ tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
+ }
+ // Reset stats for next round
+ _num_blocks[i] = 0;
+ }
+ }
+}
+
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
assert(fl->count() == 0, "Precondition.");
assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
"Precondition");
- // We'll try all multiples of word_sz in the indexed set (starting with
- // word_sz itself), then try getting a big chunk and splitting it.
- int k = 1;
- size_t cur_sz = k * word_sz;
- bool found = false;
- while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
- FreeList* gfl = &_indexedFreeList[cur_sz];
- FreeList fl_for_cur_sz; // Empty.
- fl_for_cur_sz.set_size(cur_sz);
- {
- MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
- Mutex::_no_safepoint_check_flag);
- if (gfl->count() != 0) {
- size_t nn = MAX2(n/k, (size_t)1);
- gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
- found = true;
- }
- }
- // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
- if (found) {
- if (k == 1) {
- fl->prepend(&fl_for_cur_sz);
- } else {
- // Divide each block on fl_for_cur_sz up k ways.
- FreeChunk* fc;
- while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
- // Must do this in reverse order, so that anybody attempting to
- // access the main chunk sees it as a single free block until we
- // change it.
- size_t fc_size = fc->size();
- for (int i = k-1; i >= 0; i--) {
- FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
- ffc->setSize(word_sz);
- ffc->linkNext(NULL);
- ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
- // Above must occur before BOT is updated below.
- // splitting from the right, fc_size == (k - i + 1) * wordsize
- _bt.mark_block((HeapWord*)ffc, word_sz);
- fc_size -= word_sz;
- _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
- _bt.verify_single_block((HeapWord*)fc, fc_size);
- _bt.verify_single_block((HeapWord*)ffc, ffc->size());
- // Push this on "fl".
- fl->returnChunkAtHead(ffc);
+ // We'll try all multiples of word_sz in the indexed set, starting with
+ // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
+ // then try getting a big chunk and splitting it.
+ {
+ bool found;
+ int k;
+ size_t cur_sz;
+ for (k = 1, cur_sz = k * word_sz, found = false;
+ (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
+ (CMSSplitIndexedFreeListBlocks || k <= 1);
+ k++, cur_sz = k * word_sz) {
+ FreeList* gfl = &_indexedFreeList[cur_sz];
+ FreeList fl_for_cur_sz; // Empty.
+ fl_for_cur_sz.set_size(cur_sz);
+ {
+ MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
+ Mutex::_no_safepoint_check_flag);
+ if (gfl->count() != 0) {
+ // nn is the number of chunks of size cur_sz that
+ // we'd need to split k-ways each, in order to create
+ // "n" chunks of size word_sz each.
+ const size_t nn = MAX2(n/k, (size_t)1);
+ gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
+ found = true;
+ if (k > 1) {
+ // Update split death stats for the cur_sz-size blocks list:
+ // we increment the split death count by the number of blocks
+ // we just took from the cur_sz-size blocks list and which
+ // we will be splitting below.
+ ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() +
+ fl_for_cur_sz.count();
+ _indexedFreeList[cur_sz].set_splitDeaths(deaths);
}
- // TRAP
- assert(fl->tail()->next() == NULL, "List invariant.");
}
}
- return;
+ // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
+ if (found) {
+ if (k == 1) {
+ fl->prepend(&fl_for_cur_sz);
+ } else {
+ // Divide each block on fl_for_cur_sz up k ways.
+ FreeChunk* fc;
+ while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
+ // Must do this in reverse order, so that anybody attempting to
+ // access the main chunk sees it as a single free block until we
+ // change it.
+ size_t fc_size = fc->size();
+ for (int i = k-1; i >= 0; i--) {
+ FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
+ ffc->setSize(word_sz);
+ ffc->linkNext(NULL);
+ ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
+ // Above must occur before BOT is updated below.
+ // splitting from the right, fc_size == (k - i + 1) * wordsize
+ _bt.mark_block((HeapWord*)ffc, word_sz);
+ fc_size -= word_sz;
+ _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
+ _bt.verify_single_block((HeapWord*)fc, fc_size);
+ _bt.verify_single_block((HeapWord*)ffc, ffc->size());
+ // Push this on "fl".
+ fl->returnChunkAtHead(ffc);
+ }
+ // TRAP
+ assert(fl->tail()->next() == NULL, "List invariant.");
+ }
+ }
+ // Update birth stats for this block size.
+ size_t num = fl->count();
+ MutexLockerEx x(_indexedFreeListParLocks[word_sz],
+ Mutex::_no_safepoint_check_flag);
+ ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
+ _indexedFreeList[word_sz].set_splitBirths(births);
+ return;
+ }
}
- k++; cur_sz = k * word_sz;
}
// Otherwise, we'll split a block from the dictionary.
FreeChunk* fc = NULL;
@@ -2723,17 +3065,31 @@ par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
}
}
if (fc == NULL) return;
+ assert((ssize_t)n >= 1, "Control point invariant");
// Otherwise, split up that block.
- size_t nn = fc->size() / word_sz;
+ const size_t nn = fc->size() / word_sz;
n = MIN2(nn, n);
+ assert((ssize_t)n >= 1, "Control point invariant");
rem = fc->size() - n * word_sz;
// If there is a remainder, and it's too small, allocate one fewer.
if (rem > 0 && rem < MinChunkSize) {
n--; rem += word_sz;
}
+ // Note that at this point we may have n == 0.
+ assert((ssize_t)n >= 0, "Control point invariant");
+
+ // If n is 0, the chunk fc that was found is not large
+ // enough to leave a viable remainder. We are unable to
+ // allocate even one block. Return fc to the
+ // dictionary and return, leaving "fl" empty.
+ if (n == 0) {
+ returnChunkToDictionary(fc);
+ return;
+ }
+
// First return the remainder, if any.
// Note that we hold the lock until we decide if we're going to give
- // back the remainder to the dictionary, since a contending allocator
+ // back the remainder to the dictionary, since a concurrent allocation
// may otherwise see the heap as empty. (We're willing to take that
// hit if the block is a small block.)
if (rem > 0) {
@@ -2743,18 +3099,16 @@ par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
rem_fc->linkNext(NULL);
rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
// Above must occur before BOT is updated below.
+ assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
_bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
if (rem >= IndexSetSize) {
returnChunkToDictionary(rem_fc);
- dictionary()->dictCensusUpdate(fc->size(),
- true /*split*/,
- true /*birth*/);
+ dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
rem_fc = NULL;
}
// Otherwise, return it to the small list below.
}
}
- //
if (rem_fc != NULL) {
MutexLockerEx x(_indexedFreeListParLocks[rem],
Mutex::_no_safepoint_check_flag);
@@ -2762,7 +3116,7 @@ par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
_indexedFreeList[rem].returnChunkAtHead(rem_fc);
smallSplitBirth(rem);
}
-
+ assert((ssize_t)n > 0 && fc != NULL, "Consistency");
// Now do the splitting up.
// Must do this in reverse order, so that anybody attempting to
// access the main chunk sees it as a single free block until we
@@ -2792,13 +3146,15 @@ par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
_bt.verify_single_block((HeapWord*)fc, fc->size());
fl->returnChunkAtHead(fc);
+ assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
{
+ // Update the stats for this block size.
MutexLockerEx x(_indexedFreeListParLocks[word_sz],
Mutex::_no_safepoint_check_flag);
- ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
- _indexedFreeList[word_sz].set_splitBirths(new_births);
- ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
- _indexedFreeList[word_sz].set_surplus(new_surplus);
+ const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
+ _indexedFreeList[word_sz].set_splitBirths(births);
+ // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
+ // _indexedFreeList[word_sz].set_surplus(new_surplus);
}
// TRAP
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
index 9f16f8d2eb0..d937de86156 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
@@ -25,8 +25,6 @@
// Classes in support of keeping track of promotions into a non-Contiguous
// space, in this case a CompactibleFreeListSpace.
-#define CFLS_LAB_REFILL_STATS 0
-
// Forward declarations
class CompactibleFreeListSpace;
class BlkClosure;
@@ -89,6 +87,9 @@ class SpoolBlock: public FreeChunk {
displacedHdr = (markOop*)&displacedHdr;
nextSpoolBlock = NULL;
}
+
+ void print_on(outputStream* st) const;
+ void print() const { print_on(gclog_or_tty); }
};
class PromotionInfo VALUE_OBJ_CLASS_SPEC {
@@ -121,7 +122,7 @@ class PromotionInfo VALUE_OBJ_CLASS_SPEC {
return _promoHead == NULL;
}
void startTrackingPromotions();
- void stopTrackingPromotions();
+ void stopTrackingPromotions(uint worker_id = 0);
bool tracking() const { return _tracking; }
void track(PromotedObject* trackOop); // keep track of a promoted oop
// The following variant must be used when trackOop is not fully
@@ -161,6 +162,9 @@ class PromotionInfo VALUE_OBJ_CLASS_SPEC {
_nextIndex = 0;
}
+
+ void print_on(outputStream* st) const;
+ void print_statistics(uint worker_id) const;
};
class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
@@ -243,6 +247,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
mutable Mutex _freelistLock;
// locking verifier convenience function
void assert_locked() const PRODUCT_RETURN;
+ void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
// Linear allocation blocks
LinearAllocBlock _smallLinearAllocBlock;
@@ -281,13 +286,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Locks protecting the exact lists during par promotion allocation.
Mutex* _indexedFreeListParLocks[IndexSetSize];
-#if CFLS_LAB_REFILL_STATS
- // Some statistics.
- jint _par_get_chunk_from_small;
- jint _par_get_chunk_from_large;
-#endif
-
-
// Attempt to obtain up to "n" blocks of the size "word_sz" (which is
// required to be smaller than "IndexSetSize".) If successful,
// adds them to "fl", which is required to be an empty free list.
@@ -320,7 +318,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Helper function for getChunkFromIndexedFreeList.
// Replenish the indexed free list for this "size". Do not take from an
// underpopulated size.
- FreeChunk* getChunkFromIndexedFreeListHelper(size_t size);
+ FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
// Get a chunk from the indexed free list. If the indexed free list
// does not have a free chunk, try to replenish the indexed free list
@@ -430,10 +428,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void initialize_sequential_subtasks_for_marking(int n_threads,
HeapWord* low = NULL);
-#if CFLS_LAB_REFILL_STATS
- void print_par_alloc_stats();
-#endif
-
// Space enquiries
size_t used() const;
size_t free() const;
@@ -617,6 +611,12 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Do some basic checks on the the free lists.
void checkFreeListConsistency() const PRODUCT_RETURN;
+ // Printing support
+ void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
+ void print_indexed_free_lists(outputStream* st) const;
+ void print_dictionary_free_lists(outputStream* st) const;
+ void print_promo_info_blocks(outputStream* st) const;
+
NOT_PRODUCT (
void initializeIndexedFreeListArrayReturnedBytes();
size_t sumIndexedFreeListArrayReturnedBytes();
@@ -638,8 +638,9 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Statistics functions
// Initialize census for lists before the sweep.
- void beginSweepFLCensus(float sweep_current,
- float sweep_estimate);
+ void beginSweepFLCensus(float inter_sweep_current,
+ float inter_sweep_estimate,
+ float intra_sweep_estimate);
// Set the surplus for each of the free lists.
void setFLSurplus();
// Set the hint for each of the free lists.
@@ -730,16 +731,17 @@ class CFLS_LAB : public CHeapObj {
FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
// Initialized from a command-line arg.
- size_t _blocks_to_claim;
-#if CFLS_LAB_REFILL_STATS
- // Some statistics.
- int _refills;
- int _blocksTaken;
- static int _tot_refills;
- static int _tot_blocksTaken;
- static int _next_threshold;
-#endif
+ // Allocation statistics in support of dynamic adjustment of
+ // #blocks to claim per get_from_global_pool() call below.
+ static AdaptiveWeightedAverage
+ _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize];
+ static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
+ static int _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
+ size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
+
+ // Internal work method
+ void get_from_global_pool(size_t word_sz, FreeList* fl);
public:
CFLS_LAB(CompactibleFreeListSpace* cfls);
@@ -748,7 +750,12 @@ public:
HeapWord* alloc(size_t word_sz);
// Return any unused portions of the buffer to the global pool.
- void retire();
+ void retire(int tid);
+
+ // Dynamic OldPLABSize sizing
+ static void compute_desired_plab_size();
+ // When the settings are modified from default static initialization
+ static void modify_initialization(size_t n, unsigned wt);
};
size_t PromotionInfo::refillSize() const {
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
index c3d30c348b2..1ec7696bdf7 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
@@ -253,7 +253,6 @@ void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr)
}
}
-
void ConcurrentMarkSweepGeneration::ref_processor_init() {
assert(collector() != NULL, "no collector");
collector()->ref_processor_init();
@@ -341,6 +340,14 @@ CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
_icms_duty_cycle = CMSIncrementalDutyCycle;
}
+double CMSStats::cms_free_adjustment_factor(size_t free) const {
+ // TBD: CR 6909490
+ return 1.0;
+}
+
+void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
+}
+
// If promotion failure handling is on use
// the padded average size of the promotion for each
// young generation collection.
@@ -361,7 +368,11 @@ double CMSStats::time_until_cms_gen_full() const {
// Adjust by the safety factor.
double cms_free_dbl = (double)cms_free;
- cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
+ double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
+ // Apply a further correction factor which tries to adjust
+ // for recent occurance of concurrent mode failures.
+ cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
+ cms_free_dbl = cms_free_dbl * cms_adjustment;
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
@@ -395,6 +406,8 @@ double CMSStats::time_until_cms_start() const {
// late.
double work = cms_duration() + gc0_period();
double deadline = time_until_cms_gen_full();
+ // If a concurrent mode failure occurred recently, we want to be
+ // more conservative and halve our expected time_until_cms_gen_full()
if (work > deadline) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print(
@@ -556,7 +569,8 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_should_unload_classes(false),
_concurrent_cycles_since_last_unload(0),
_roots_scanning_options(0),
- _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
+ _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
+ _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
{
if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
ExplicitGCInvokesConcurrent = true;
@@ -709,7 +723,8 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
// Support for parallelizing survivor space rescan
if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
- size_t max_plab_samples = MaxNewSize/((SurvivorRatio+2)*MinTLABSize);
+ size_t max_plab_samples = cp->max_gen0_size()/
+ ((SurvivorRatio+2)*MinTLABSize);
_survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads);
_survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples);
_cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads);
@@ -772,7 +787,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
_gc_counters = new CollectorCounters("CMS", 1);
_completed_initialization = true;
- _sweep_timer.start(); // start of time
+ _inter_sweep_timer.start(); // start of time
}
const char* ConcurrentMarkSweepGeneration::name() const {
@@ -899,6 +914,14 @@ bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
return result;
}
+// At a promotion failure dump information on block layout in heap
+// (cms old generation).
+void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
+ if (CMSDumpAtPromotionFailure) {
+ cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
+ }
+}
+
CompactibleSpace*
ConcurrentMarkSweepGeneration::first_compaction_space() const {
return _cmsSpace;
@@ -1367,12 +1390,7 @@ void
ConcurrentMarkSweepGeneration::
par_promote_alloc_done(int thread_num) {
CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
- ps->lab.retire();
-#if CFLS_LAB_REFILL_STATS
- if (thread_num == 0) {
- _cmsSpace->print_par_alloc_stats();
- }
-#endif
+ ps->lab.retire(thread_num);
}
void
@@ -1973,11 +1991,14 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
// We must adjust the allocation statistics being maintained
// in the free list space. We do so by reading and clearing
// the sweep timer and updating the block flux rate estimates below.
- assert(_sweep_timer.is_active(), "We should never see the timer inactive");
- _sweep_timer.stop();
- // Note that we do not use this sample to update the _sweep_estimate.
- _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
- _sweep_estimate.padded_average());
+ assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
+ if (_inter_sweep_timer.is_active()) {
+ _inter_sweep_timer.stop();
+ // Note that we do not use this sample to update the _inter_sweep_estimate.
+ _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
+ _inter_sweep_estimate.padded_average(),
+ _intra_sweep_estimate.padded_average());
+ }
GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
ref_processor(), clear_all_soft_refs);
@@ -2014,10 +2035,10 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
}
// Adjust the per-size allocation stats for the next epoch.
- _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
- // Restart the "sweep timer" for next epoch.
- _sweep_timer.reset();
- _sweep_timer.start();
+ _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
+ // Restart the "inter sweep timer" for the next epoch.
+ _inter_sweep_timer.reset();
+ _inter_sweep_timer.start();
// Sample collection pause time and reset for collection interval.
if (UseAdaptiveSizePolicy) {
@@ -2675,7 +2696,7 @@ void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
// Also reset promotion tracking in par gc thread states.
if (ParallelGCThreads > 0) {
for (uint i = 0; i < ParallelGCThreads; i++) {
- _par_gc_thread_states[i]->promo.stopTrackingPromotions();
+ _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
}
}
}
@@ -2770,7 +2791,7 @@ class VerifyMarkedClosure: public BitMapClosure {
bool do_bit(size_t offset) {
HeapWord* addr = _marks->offsetToHeapWord(offset);
if (!_marks->isMarked(addr)) {
- oop(addr)->print();
+ oop(addr)->print_on(gclog_or_tty);
gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
_failed = true;
}
@@ -2819,7 +2840,7 @@ bool CMSCollector::verify_after_remark() {
// Clear any marks from a previous round
verification_mark_bm()->clear_all();
assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
- assert(overflow_list_is_empty(), "overflow list should be empty");
+ verify_work_stacks_empty();
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->ensure_parsability(false); // fill TLABs, but no need to retire them
@@ -2892,8 +2913,8 @@ void CMSCollector::verify_after_remark_work_1() {
verification_mark_bm()->iterate(&vcl);
if (vcl.failed()) {
gclog_or_tty->print("Verification failed");
- Universe::heap()->print();
- fatal(" ... aborting");
+ Universe::heap()->print_on(gclog_or_tty);
+ fatal("CMS: failed marking verification after remark");
}
}
@@ -3313,7 +3334,7 @@ bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
Universe::heap()->barrier_set()->resize_covered_region(mr);
// Hmmmm... why doesn't CFLS::set_end verify locking?
// This is quite ugly; FIX ME XXX
- _cmsSpace->assert_locked();
+ _cmsSpace->assert_locked(freelistLock());
_cmsSpace->set_end((HeapWord*)_virtual_space.high());
// update the space and generation capacity counters
@@ -5867,9 +5888,9 @@ void CMSCollector::sweep(bool asynch) {
check_correct_thread_executing();
verify_work_stacks_empty();
verify_overflow_empty();
- incrementSweepCount();
- _sweep_timer.stop();
- _sweep_estimate.sample(_sweep_timer.seconds());
+ increment_sweep_count();
+ _inter_sweep_timer.stop();
+ _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
// PermGen verification support: If perm gen sweeping is disabled in
@@ -5892,6 +5913,9 @@ void CMSCollector::sweep(bool asynch) {
}
}
+ assert(!_intra_sweep_timer.is_active(), "Should not be active");
+ _intra_sweep_timer.reset();
+ _intra_sweep_timer.start();
if (asynch) {
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
@@ -5936,8 +5960,11 @@ void CMSCollector::sweep(bool asynch) {
verify_work_stacks_empty();
verify_overflow_empty();
- _sweep_timer.reset();
- _sweep_timer.start();
+ _intra_sweep_timer.stop();
+ _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
+
+ _inter_sweep_timer.reset();
+ _inter_sweep_timer.start();
update_time_of_last_gc(os::javaTimeMillis());
@@ -5980,11 +6007,11 @@ void CMSCollector::sweep(bool asynch) {
// FIX ME!!! Looks like this belongs in CFLSpace, with
// CMSGen merely delegating to it.
void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
- double nearLargestPercent = 0.999;
+ double nearLargestPercent = FLSLargestBlockCoalesceProximity;
HeapWord* minAddr = _cmsSpace->bottom();
HeapWord* largestAddr =
(HeapWord*) _cmsSpace->dictionary()->findLargestDict();
- if (largestAddr == 0) {
+ if (largestAddr == NULL) {
// The dictionary appears to be empty. In this case
// try to coalesce at the end of the heap.
largestAddr = _cmsSpace->end();
@@ -5992,6 +6019,13 @@ void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
size_t largestOffset = pointer_delta(largestAddr, minAddr);
size_t nearLargestOffset =
(size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
+ if (PrintFLSStatistics != 0) {
+ gclog_or_tty->print_cr(
+ "CMS: Large Block: " PTR_FORMAT ";"
+ " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
+ largestAddr,
+ _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
+ }
_cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
}
@@ -6071,9 +6105,11 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
assert_lock_strong(gen->freelistLock());
assert_lock_strong(bitMapLock());
- assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
- gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
- _sweep_estimate.padded_average());
+ assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
+ assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
+ gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
+ _inter_sweep_estimate.padded_average(),
+ _intra_sweep_estimate.padded_average());
gen->setNearLargestChunk();
{
@@ -6086,7 +6122,7 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
// end-of-sweep-census below will be off by a little bit.
}
gen->cmsSpace()->sweep_completed();
- gen->cmsSpace()->endSweepFLCensus(sweepCount());
+ gen->cmsSpace()->endSweepFLCensus(sweep_count());
if (should_unload_classes()) { // unloaded classes this cycle,
_concurrent_cycles_since_last_unload = 0; // ... reset count
} else { // did not unload classes,
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
index a58217faf93..18164a58b4e 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
@@ -355,6 +355,11 @@ class CMSStats VALUE_OBJ_CLASS_SPEC {
unsigned int new_duty_cycle);
unsigned int icms_update_duty_cycle_impl();
+ // In support of adjusting of cms trigger ratios based on history
+ // of concurrent mode failure.
+ double cms_free_adjustment_factor(size_t free) const;
+ void adjust_cms_free_adjustment_factor(bool fail, size_t free);
+
public:
CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
unsigned int alpha = CMSExpAvgFactor);
@@ -570,8 +575,11 @@ class CMSCollector: public CHeapObj {
// appropriately.
void check_gc_time_limit();
// XXX Move these to CMSStats ??? FIX ME !!!
- elapsedTimer _sweep_timer;
- AdaptivePaddedAverage _sweep_estimate;
+ elapsedTimer _inter_sweep_timer; // time between sweeps
+ elapsedTimer _intra_sweep_timer; // time _in_ sweeps
+ // padded decaying average estimates of the above
+ AdaptivePaddedAverage _inter_sweep_estimate;
+ AdaptivePaddedAverage _intra_sweep_estimate;
protected:
ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
@@ -625,6 +633,7 @@ class CMSCollector: public CHeapObj {
// . _collectorState <= Idling == post-sweep && pre-mark
// . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
// precleaning || abortablePrecleanb
+ public:
enum CollectorState {
Resizing = 0,
Resetting = 1,
@@ -636,6 +645,7 @@ class CMSCollector: public CHeapObj {
FinalMarking = 7,
Sweeping = 8
};
+ protected:
static CollectorState _collectorState;
// State related to prologue/epilogue invocation for my generations
@@ -655,7 +665,7 @@ class CMSCollector: public CHeapObj {
int _numYields;
size_t _numDirtyCards;
- uint _sweepCount;
+ size_t _sweep_count;
// number of full gc's since the last concurrent gc.
uint _full_gcs_since_conc_gc;
@@ -905,7 +915,7 @@ class CMSCollector: public CHeapObj {
// Check that the currently executing thread is the expected
// one (foreground collector or background collector).
- void check_correct_thread_executing() PRODUCT_RETURN;
+ static void check_correct_thread_executing() PRODUCT_RETURN;
// XXXPERM void print_statistics() PRODUCT_RETURN;
bool is_cms_reachable(HeapWord* addr);
@@ -930,8 +940,8 @@ class CMSCollector: public CHeapObj {
static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
- uint sweepCount() const { return _sweepCount; }
- void incrementSweepCount() { _sweepCount++; }
+ size_t sweep_count() const { return _sweep_count; }
+ void increment_sweep_count() { _sweep_count++; }
// Timers/stats for gc scheduling and incremental mode pacing.
CMSStats& stats() { return _stats; }
@@ -1165,6 +1175,11 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
bool younger_handles_promotion_failure) const;
+ // Inform this (non-young) generation that a promotion failure was
+ // encountered during a collection of a younger generation that
+ // promotes into this generation.
+ virtual void promotion_failure_occurred();
+
bool should_collect(bool full, size_t size, bool tlab);
virtual bool should_concurrent_collect() const;
virtual bool is_too_full() const;
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp
index 5f9c4f22632..1a454fe68f4 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp
@@ -55,7 +55,8 @@ class FreeBlockDictionary: public CHeapObj {
virtual void dictCensusUpdate(size_t size, bool split, bool birth) = 0;
virtual bool coalDictOverPopulated(size_t size) = 0;
virtual void beginSweepDictCensus(double coalSurplusPercent,
- float sweep_current, float sweep_ewstimate) = 0;
+ float inter_sweep_current, float inter_sweep_estimate,
+ float intra__sweep_current) = 0;
virtual void endSweepDictCensus(double splitSurplusPercent) = 0;
virtual FreeChunk* findLargestDict() const = 0;
// verify that the given chunk is in the dictionary.
@@ -79,6 +80,7 @@ class FreeBlockDictionary: public CHeapObj {
}
virtual void printDictCensus() const = 0;
+ virtual void print_free_lists(outputStream* st) const = 0;
virtual void verify() const = 0;
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp
index 494c090c6fb..e709c3af6ca 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp
@@ -67,3 +67,8 @@ void FreeChunk::verifyList() const {
}
}
#endif
+
+void FreeChunk::print_on(outputStream* st) {
+ st->print_cr("Next: " PTR_FORMAT " Prev: " PTR_FORMAT " %s",
+ next(), prev(), cantCoalesce() ? "[can't coalesce]" : "");
+}
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
index 768614d7e2b..9e731e7ca35 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
@@ -129,6 +129,8 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
void verifyList() const PRODUCT_RETURN;
void mangleAllocated(size_t size) PRODUCT_RETURN;
void mangleFreed(size_t size) PRODUCT_RETURN;
+
+ void print_on(outputStream* st);
};
// Alignment helpers etc.
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp
index 79503deb81d..1ca1a4e5396 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp
@@ -81,8 +81,8 @@ void FreeList::reset(size_t hint) {
set_hint(hint);
}
-void FreeList::init_statistics() {
- _allocation_stats.initialize();
+void FreeList::init_statistics(bool split_birth) {
+ _allocation_stats.initialize(split_birth);
}
FreeChunk* FreeList::getChunkAtHead() {
@@ -292,14 +292,31 @@ bool FreeList::verifyChunkInFreeLists(FreeChunk* fc) const {
}
#ifndef PRODUCT
+void FreeList::verify_stats() const {
+ // The +1 of the LH comparand is to allow some "looseness" in
+ // checking: we usually call this interface when adding a block
+ // and we'll subsequently update the stats; we cannot update the
+ // stats beforehand because in the case of the large-block BT
+ // dictionary for example, this might be the first block and
+ // in that case there would be no place that we could record
+ // the stats (which are kept in the block itself).
+ assert(_allocation_stats.prevSweep() + _allocation_stats.splitBirths() + 1 // Total Stock + 1
+ >= _allocation_stats.splitDeaths() + (ssize_t)count(), "Conservation Principle");
+}
+
void FreeList::assert_proper_lock_protection_work() const {
-#ifdef ASSERT
- if (_protecting_lock != NULL &&
- SharedHeap::heap()->n_par_threads() > 0) {
- // Should become an assert.
- guarantee(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED");
+ assert(_protecting_lock != NULL, "Don't call this directly");
+ assert(ParallelGCThreads > 0, "Don't call this directly");
+ Thread* thr = Thread::current();
+ if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
+ // assert that we are holding the freelist lock
+ } else if (thr->is_GC_task_thread()) {
+ assert(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED");
+ } else if (thr->is_Java_thread()) {
+ assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
+ } else {
+ ShouldNotReachHere(); // unaccounted thread type?
}
-#endif
}
#endif
diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp
index 581317643c7..8dd1543ab64 100644
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp
@@ -35,18 +35,26 @@ class CompactibleFreeListSpace;
// for that implementation.
class Mutex;
+class TreeList;
class FreeList VALUE_OBJ_CLASS_SPEC {
friend class CompactibleFreeListSpace;
friend class VMStructs;
- friend class printTreeCensusClosure;
- FreeChunk* _head; // List of free chunks
+ friend class PrintTreeCensusClosure;
+
+ protected:
+ TreeList* _parent;
+ TreeList* _left;
+ TreeList* _right;
+
+ private:
+ FreeChunk* _head; // Head of list of free chunks
FreeChunk* _tail; // Tail of list of free chunks
- size_t _size; // Size in Heap words of each chunks
+ size_t _size; // Size in Heap words of each chunk
ssize_t _count; // Number of entries in list
size_t _hint; // next larger size list with a positive surplus
- AllocationStats _allocation_stats; // statistics for smart allocation
+ AllocationStats _allocation_stats; // allocation-related statistics
#ifdef ASSERT
Mutex* _protecting_lock;
@@ -63,9 +71,12 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
// Initialize the allocation statistics.
protected:
- void init_statistics();
+ void init_statistics(bool split_birth = false);
void set_count(ssize_t v) { _count = v;}
- void increment_count() { _count++; }
+ void increment_count() {
+ _count++;
+ }
+
void decrement_count() {
_count--;
assert(_count >= 0, "Count should not be negative");
@@ -167,11 +178,13 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
_allocation_stats.set_desired(v);
}
void compute_desired(float inter_sweep_current,
- float inter_sweep_estimate) {
+ float inter_sweep_estimate,
+ float intra_sweep_estimate) {
assert_proper_lock_protection();
_allocation_stats.compute_desired(_count,
inter_sweep_current,
- inter_sweep_estimate);
+ inter_sweep_estimate,
+ intra_sweep_estimate);
}
ssize_t coalDesired() const {
return _allocation_stats.coalDesired();
@@ -306,6 +319,9 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
// found. Return NULL if "fc" is not found.
bool verifyChunkInFreeLists(FreeChunk* fc) const;
+ // Stats verification
+ void verify_stats() const PRODUCT_RETURN;
+
// Printing support
static void print_labels_on(outputStream* st, const char* c);
void print_on(outputStream* st, const char* c = NULL) const;
diff --git a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
index 3000f010b17..96760517637 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
@@ -351,9 +351,16 @@ void
CollectionSetChooser::printSortedHeapRegions() {
gclog_or_tty->print_cr("Printing %d Heap Regions sorted by amount of known garbage",
_numMarkedRegions);
+
+ DEBUG_ONLY(int marked_count = 0;)
for (int i = 0; i < _markedRegions.length(); i++) {
- printHeapRegion(_markedRegions.at(i));
+ HeapRegion* r = _markedRegions.at(i);
+ if (r != NULL) {
+ printHeapRegion(r);
+ DEBUG_ONLY(marked_count++;)
+ }
}
+ assert(marked_count == _numMarkedRegions, "must be");
gclog_or_tty->print_cr("Done sorted heap region print");
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
index 34939de57c7..11c288073d3 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
@@ -42,28 +42,49 @@ ConcurrentG1Refine::ConcurrentG1Refine() :
_n_periods(0),
_threads(NULL), _n_threads(0)
{
- if (G1ConcRefine) {
- _n_threads = (int)thread_num();
- if (_n_threads > 0) {
- _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
- int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
- ConcurrentG1RefineThread *next = NULL;
- for (int i = _n_threads - 1; i >= 0; i--) {
- ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
- assert(t != NULL, "Conc refine should have been created");
- assert(t->cg1r() == this, "Conc refine thread should refer to this");
- _threads[i] = t;
- next = t;
- }
- }
+
+ // Ergomonically select initial concurrent refinement parameters
+ if (FLAG_IS_DEFAULT(G1ConcRefineGreenZone)) {
+ FLAG_SET_DEFAULT(G1ConcRefineGreenZone, MAX2(ParallelGCThreads, 1));
+ }
+ set_green_zone(G1ConcRefineGreenZone);
+
+ if (FLAG_IS_DEFAULT(G1ConcRefineYellowZone)) {
+ FLAG_SET_DEFAULT(G1ConcRefineYellowZone, green_zone() * 3);
+ }
+ set_yellow_zone(MAX2(G1ConcRefineYellowZone, green_zone()));
+
+ if (FLAG_IS_DEFAULT(G1ConcRefineRedZone)) {
+ FLAG_SET_DEFAULT(G1ConcRefineRedZone, yellow_zone() * 2);
+ }
+ set_red_zone(MAX2(G1ConcRefineRedZone, yellow_zone()));
+ _n_worker_threads = thread_num();
+ // We need one extra thread to do the young gen rset size sampling.
+ _n_threads = _n_worker_threads + 1;
+ reset_threshold_step();
+
+ _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
+ int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
+ ConcurrentG1RefineThread *next = NULL;
+ for (int i = _n_threads - 1; i >= 0; i--) {
+ ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
+ assert(t != NULL, "Conc refine should have been created");
+ assert(t->cg1r() == this, "Conc refine thread should refer to this");
+ _threads[i] = t;
+ next = t;
}
}
-size_t ConcurrentG1Refine::thread_num() {
- if (G1ConcRefine) {
- return (G1ParallelRSetThreads > 0) ? G1ParallelRSetThreads : ParallelGCThreads;
+void ConcurrentG1Refine::reset_threshold_step() {
+ if (FLAG_IS_DEFAULT(G1ConcRefineThresholdStep)) {
+ _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
+ } else {
+ _thread_threshold_step = G1ConcRefineThresholdStep;
}
- return 0;
+}
+
+int ConcurrentG1Refine::thread_num() {
+ return MAX2((G1ParallelRSetThreads > 0) ? G1ParallelRSetThreads : ParallelGCThreads, 1);
}
void ConcurrentG1Refine::init() {
@@ -123,6 +144,15 @@ void ConcurrentG1Refine::stop() {
}
}
+void ConcurrentG1Refine::reinitialize_threads() {
+ reset_threshold_step();
+ if (_threads != NULL) {
+ for (int i = 0; i < _n_threads; i++) {
+ _threads[i]->initialize();
+ }
+ }
+}
+
ConcurrentG1Refine::~ConcurrentG1Refine() {
if (G1ConcRSLogCacheSize > 0) {
assert(_card_counts != NULL, "Logic");
@@ -384,4 +414,3 @@ void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
st->cr();
}
}
-
diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp
index 5cef3058ca4..7bcbecfbe5e 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp
@@ -29,6 +29,31 @@ class G1RemSet;
class ConcurrentG1Refine: public CHeapObj {
ConcurrentG1RefineThread** _threads;
int _n_threads;
+ int _n_worker_threads;
+ /*
+ * The value of the update buffer queue length falls into one of 3 zones:
+ * green, yellow, red. If the value is in [0, green) nothing is
+ * done, the buffers are left unprocessed to enable the caching effect of the
+ * dirtied cards. In the yellow zone [green, yellow) the concurrent refinement
+ * threads are gradually activated. In [yellow, red) all threads are
+ * running. If the length becomes red (max queue length) the mutators start
+ * processing the buffers.
+ *
+ * There are some interesting cases (with G1AdaptiveConcRefine turned off):
+ * 1) green = yellow = red = 0. In this case the mutator will process all
+ * buffers. Except for those that are created by the deferred updates
+ * machinery during a collection.
+ * 2) green = 0. Means no caching. Can be a good way to minimize the
+ * amount of time spent updating rsets during a collection.
+ */
+ int _green_zone;
+ int _yellow_zone;
+ int _red_zone;
+
+ int _thread_threshold_step;
+
+ // Reset the threshold step value based of the current zone boundaries.
+ void reset_threshold_step();
// The cache for card refinement.
bool _use_cache;
@@ -147,6 +172,8 @@ class ConcurrentG1Refine: public CHeapObj {
void init(); // Accomplish some initialization that has to wait.
void stop();
+ void reinitialize_threads();
+
// Iterate over the conc refine threads
void threads_do(ThreadClosure *tc);
@@ -178,7 +205,20 @@ class ConcurrentG1Refine: public CHeapObj {
void clear_and_record_card_counts();
- static size_t thread_num();
+ static int thread_num();
void print_worker_threads_on(outputStream* st) const;
+
+ void set_green_zone(int x) { _green_zone = x; }
+ void set_yellow_zone(int x) { _yellow_zone = x; }
+ void set_red_zone(int x) { _red_zone = x; }
+
+ int green_zone() const { return _green_zone; }
+ int yellow_zone() const { return _yellow_zone; }
+ int red_zone() const { return _red_zone; }
+
+ int total_thread_num() const { return _n_threads; }
+ int worker_thread_num() const { return _n_worker_threads; }
+
+ int thread_threshold_step() const { return _thread_threshold_step; }
};
diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp
index aaf2544fe9b..b23c287a6e5 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp
@@ -25,10 +25,6 @@
#include "incls/_precompiled.incl"
#include "incls/_concurrentG1RefineThread.cpp.incl"
-// ======= Concurrent Mark Thread ========
-
-// The CM thread is created when the G1 garbage collector is used
-
ConcurrentG1RefineThread::
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
int worker_id_offset, int worker_id) :
@@ -37,19 +33,42 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
_worker_id(worker_id),
_active(false),
_next(next),
+ _monitor(NULL),
_cg1r(cg1r),
- _vtime_accum(0.0),
- _interval_ms(5.0)
+ _vtime_accum(0.0)
{
+
+ // Each thread has its own monitor. The i-th thread is responsible for signalling
+ // to thread i+1 if the number of buffers in the queue exceeds a threashold for this
+ // thread. Monitors are also used to wake up the threads during termination.
+ // The 0th worker in notified by mutator threads and has a special monitor.
+ // The last worker is used for young gen rset size sampling.
+ if (worker_id > 0) {
+ _monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true);
+ } else {
+ _monitor = DirtyCardQ_CBL_mon;
+ }
+ initialize();
create_and_start();
}
+void ConcurrentG1RefineThread::initialize() {
+ if (_worker_id < cg1r()->worker_thread_num()) {
+ // Current thread activation threshold
+ _threshold = MIN2(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
+ cg1r()->yellow_zone());
+ // A thread deactivates once the number of buffer reached a deactivation threshold
+ _deactivation_threshold = MAX2(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone());
+ } else {
+ set_active(true);
+ }
+}
+
void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1p = g1h->g1_policy();
if (g1p->adaptive_young_list_length()) {
int regions_visited = 0;
-
g1h->young_list_rs_length_sampling_init();
while (g1h->young_list_rs_length_sampling_more()) {
g1h->young_list_rs_length_sampling_next();
@@ -70,99 +89,121 @@ void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
}
}
+void ConcurrentG1RefineThread::run_young_rs_sampling() {
+ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+ _vtime_start = os::elapsedVTime();
+ while(!_should_terminate) {
+ _sts.join();
+ sample_young_list_rs_lengths();
+ _sts.leave();
+
+ if (os::supports_vtime()) {
+ _vtime_accum = (os::elapsedVTime() - _vtime_start);
+ } else {
+ _vtime_accum = 0.0;
+ }
+
+ MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+ if (_should_terminate) {
+ break;
+ }
+ _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefineServiceInterval);
+ }
+}
+
+void ConcurrentG1RefineThread::wait_for_completed_buffers() {
+ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+ MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+ while (!_should_terminate && !is_active()) {
+ _monitor->wait(Mutex::_no_safepoint_check_flag);
+ }
+}
+
+bool ConcurrentG1RefineThread::is_active() {
+ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+ return _worker_id > 0 ? _active : dcqs.process_completed_buffers();
+}
+
+void ConcurrentG1RefineThread::activate() {
+ MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+ if (_worker_id > 0) {
+ if (G1TraceConcurrentRefinement) {
+ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+ gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",
+ _worker_id, _threshold, (int)dcqs.completed_buffers_num());
+ }
+ set_active(true);
+ } else {
+ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+ dcqs.set_process_completed(true);
+ }
+ _monitor->notify();
+}
+
+void ConcurrentG1RefineThread::deactivate() {
+ MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+ if (_worker_id > 0) {
+ if (G1TraceConcurrentRefinement) {
+ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+ gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",
+ _worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num());
+ }
+ set_active(false);
+ } else {
+ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+ dcqs.set_process_completed(false);
+ }
+}
+
void ConcurrentG1RefineThread::run() {
initialize_in_thread();
- _vtime_start = os::elapsedVTime();
wait_for_universe_init();
+ if (_worker_id >= cg1r()->worker_thread_num()) {
+ run_young_rs_sampling();
+ terminate();
+ }
+
+ _vtime_start = os::elapsedVTime();
while (!_should_terminate) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
- // Wait for completed log buffers to exist.
- {
- MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
- while (((_worker_id == 0 && !dcqs.process_completed_buffers()) ||
- (_worker_id > 0 && !is_active())) &&
- !_should_terminate) {
- DirtyCardQ_CBL_mon->wait(Mutex::_no_safepoint_check_flag);
- }
- }
+
+ // Wait for work
+ wait_for_completed_buffers();
if (_should_terminate) {
- return;
+ break;
}
- // Now we take them off (this doesn't hold locks while it applies
- // closures.) (If we did a full collection, then we'll do a full
- // traversal.
_sts.join();
- int n_logs = 0;
- int lower_limit = 0;
- double start_vtime_sec; // only used when G1SmoothConcRefine is on
- int prev_buffer_num; // only used when G1SmoothConcRefine is on
- // This thread activation threshold
- int threshold = G1UpdateBufferQueueProcessingThreshold * _worker_id;
- // Next thread activation threshold
- int next_threshold = threshold + G1UpdateBufferQueueProcessingThreshold;
- int deactivation_threshold = MAX2(threshold - G1UpdateBufferQueueProcessingThreshold / 2, 0);
- if (G1SmoothConcRefine) {
- lower_limit = 0;
- start_vtime_sec = os::elapsedVTime();
- prev_buffer_num = (int) dcqs.completed_buffers_num();
- } else {
- lower_limit = G1UpdateBufferQueueProcessingThreshold / 4; // For now.
- }
- while (dcqs.apply_closure_to_completed_buffer(_worker_id + _worker_id_offset, lower_limit)) {
- double end_vtime_sec;
- double elapsed_vtime_sec;
- int elapsed_vtime_ms;
- int curr_buffer_num = (int) dcqs.completed_buffers_num();
-
- if (G1SmoothConcRefine) {
- end_vtime_sec = os::elapsedVTime();
- elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
- elapsed_vtime_ms = (int) (elapsed_vtime_sec * 1000.0);
-
- if (curr_buffer_num > prev_buffer_num ||
- curr_buffer_num > next_threshold) {
- decreaseInterval(elapsed_vtime_ms);
- } else if (curr_buffer_num < prev_buffer_num) {
- increaseInterval(elapsed_vtime_ms);
- }
+ do {
+ int curr_buffer_num = (int)dcqs.completed_buffers_num();
+ // If the number of the buffers falls down into the yellow zone,
+ // that means that the transition period after the evacuation pause has ended.
+ if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= cg1r()->yellow_zone()) {
+ dcqs.set_completed_queue_padding(0);
}
- if (_worker_id == 0) {
- sample_young_list_rs_lengths();
- } else if (curr_buffer_num < deactivation_threshold) {
+
+ if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) {
// If the number of the buffer has fallen below our threshold
// we should deactivate. The predecessor will reactivate this
// thread should the number of the buffers cross the threshold again.
- MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
deactivate();
- if (G1TraceConcurrentRefinement) {
- gclog_or_tty->print_cr("G1-Refine-deactivated worker %d", _worker_id);
- }
break;
}
// Check if we need to activate the next thread.
- if (curr_buffer_num > next_threshold && _next != NULL && !_next->is_active()) {
- MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
+ if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {
_next->activate();
- DirtyCardQ_CBL_mon->notify_all();
- if (G1TraceConcurrentRefinement) {
- gclog_or_tty->print_cr("G1-Refine-activated worker %d", _next->_worker_id);
- }
}
+ } while (dcqs.apply_closure_to_completed_buffer(_worker_id + _worker_id_offset, cg1r()->green_zone()));
- if (G1SmoothConcRefine) {
- prev_buffer_num = curr_buffer_num;
- _sts.leave();
- os::sleep(Thread::current(), (jlong) _interval_ms, false);
- _sts.join();
- start_vtime_sec = os::elapsedVTime();
- }
- n_logs++;
+ // We can exit the loop above while being active if there was a yield request.
+ if (is_active()) {
+ deactivate();
}
+
_sts.leave();
if (os::supports_vtime()) {
@@ -172,7 +213,6 @@ void ConcurrentG1RefineThread::run() {
}
}
assert(_should_terminate, "just checking");
-
terminate();
}
@@ -191,8 +231,8 @@ void ConcurrentG1RefineThread::stop() {
}
{
- MutexLockerEx x(DirtyCardQ_CBL_mon, Mutex::_no_safepoint_check_flag);
- DirtyCardQ_CBL_mon->notify_all();
+ MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
+ _monitor->notify();
}
{
diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp
index 167fc176ef7..b2eb5d3276e 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp
@@ -40,42 +40,36 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
// when the number of the rset update buffer crosses a certain threshold. A successor
// would self-deactivate when the number of the buffers falls below the threshold.
bool _active;
- ConcurrentG1RefineThread * _next;
- public:
- virtual void run();
+ ConcurrentG1RefineThread* _next;
+ Monitor* _monitor;
+ ConcurrentG1Refine* _cg1r;
- bool is_active() { return _active; }
- void activate() { _active = true; }
- void deactivate() { _active = false; }
+ int _thread_threshold_step;
+ // This thread activation threshold
+ int _threshold;
+ // This thread deactivation threshold
+ int _deactivation_threshold;
- private:
- ConcurrentG1Refine* _cg1r;
+ void sample_young_list_rs_lengths();
+ void run_young_rs_sampling();
+ void wait_for_completed_buffers();
- double _interval_ms;
-
- void decreaseInterval(int processing_time_ms) {
- double min_interval_ms = (double) processing_time_ms;
- _interval_ms = 0.8 * _interval_ms;
- if (_interval_ms < min_interval_ms)
- _interval_ms = min_interval_ms;
- }
- void increaseInterval(int processing_time_ms) {
- double max_interval_ms = 9.0 * (double) processing_time_ms;
- _interval_ms = 1.1 * _interval_ms;
- if (max_interval_ms > 0 && _interval_ms > max_interval_ms)
- _interval_ms = max_interval_ms;
- }
-
- void sleepBeforeNextCycle();
+ void set_active(bool x) { _active = x; }
+ bool is_active();
+ void activate();
+ void deactivate();
// For use by G1CollectedHeap, which is a friend.
static SuspendibleThreadSet* sts() { return &_sts; }
- public:
+public:
+ virtual void run();
// Constructor
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
int worker_id_offset, int worker_id);
+ void initialize();
+
// Printing
void print() const;
void print_on(outputStream* st) const;
@@ -83,13 +77,10 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
// Total virtual time so far.
double vtime_accum() { return _vtime_accum; }
- ConcurrentG1Refine* cg1r() { return _cg1r; }
-
- void sample_young_list_rs_lengths();
+ ConcurrentG1Refine* cg1r() { return _cg1r; }
// Yield for GC
- void yield();
-
+ void yield();
// shutdown
void stop();
};
diff --git a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
index af93ae7d231..c77af414161 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
@@ -760,7 +760,6 @@ void ConcurrentMark::checkpointRootsInitialPost() {
rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
- satb_mq_set.set_process_completed_threshold(G1SATBProcessCompletedThreshold);
satb_mq_set.set_active_all_threads(true);
// update_g1_committed() will be called at the end of an evac pause
diff --git a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp
index 7372a4a787c..81d8716b1b1 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp
@@ -61,8 +61,8 @@ bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER
-DirtyCardQueueSet::DirtyCardQueueSet() :
- PtrQueueSet(true /*notify_when_complete*/),
+DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
+ PtrQueueSet(notify_when_complete),
_closure(NULL),
_shared_dirty_card_queue(this, true /*perm*/),
_free_ids(NULL),
@@ -77,12 +77,12 @@ size_t DirtyCardQueueSet::num_par_ids() {
}
void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
+ int process_completed_threshold,
int max_completed_queue,
Mutex* lock, PtrQueueSet* fl_owner) {
- PtrQueueSet::initialize(cbl_mon, fl_lock, max_completed_queue, fl_owner);
+ PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold,
+ max_completed_queue, fl_owner);
set_buffer_size(G1UpdateBufferSize);
- set_process_completed_threshold(G1UpdateBufferQueueProcessingThreshold);
-
_shared_dirty_card_queue.set_lock(lock);
_free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon);
}
@@ -154,9 +154,10 @@ bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
return b;
}
-DirtyCardQueueSet::CompletedBufferNode*
-DirtyCardQueueSet::get_completed_buffer_lock(int stop_at) {
- CompletedBufferNode* nd = NULL;
+
+BufferNode*
+DirtyCardQueueSet::get_completed_buffer(int stop_at) {
+ BufferNode* nd = NULL;
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
if ((int)_n_completed_buffers <= stop_at) {
@@ -166,53 +167,31 @@ DirtyCardQueueSet::get_completed_buffer_lock(int stop_at) {
if (_completed_buffers_head != NULL) {
nd = _completed_buffers_head;
- _completed_buffers_head = nd->next;
+ _completed_buffers_head = nd->next();
if (_completed_buffers_head == NULL)
_completed_buffers_tail = NULL;
_n_completed_buffers--;
+ assert(_n_completed_buffers >= 0, "Invariant");
}
debug_only(assert_completed_buffer_list_len_correct_locked());
return nd;
}
-// We only do this in contexts where there is no concurrent enqueueing.
-DirtyCardQueueSet::CompletedBufferNode*
-DirtyCardQueueSet::get_completed_buffer_CAS() {
- CompletedBufferNode* nd = _completed_buffers_head;
-
- while (nd != NULL) {
- CompletedBufferNode* next = nd->next;
- CompletedBufferNode* result =
- (CompletedBufferNode*)Atomic::cmpxchg_ptr(next,
- &_completed_buffers_head,
- nd);
- if (result == nd) {
- return result;
- } else {
- nd = _completed_buffers_head;
- }
- }
- assert(_completed_buffers_head == NULL, "Loop post");
- _completed_buffers_tail = NULL;
- return NULL;
-}
-
bool DirtyCardQueueSet::
apply_closure_to_completed_buffer_helper(int worker_i,
- CompletedBufferNode* nd) {
+ BufferNode* nd) {
if (nd != NULL) {
+ void **buf = BufferNode::make_buffer_from_node(nd);
+ size_t index = nd->index();
bool b =
- DirtyCardQueue::apply_closure_to_buffer(_closure, nd->buf,
- nd->index, _sz,
+ DirtyCardQueue::apply_closure_to_buffer(_closure, buf,
+ index, _sz,
true, worker_i);
- void** buf = nd->buf;
- size_t index = nd->index;
- delete nd;
if (b) {
deallocate_buffer(buf);
return true; // In normal case, go on to next buffer.
} else {
- enqueue_complete_buffer(buf, index, true);
+ enqueue_complete_buffer(buf, index);
return false;
}
} else {
@@ -222,40 +201,36 @@ apply_closure_to_completed_buffer_helper(int worker_i,
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i,
int stop_at,
- bool with_CAS)
+ bool during_pause)
{
- CompletedBufferNode* nd = NULL;
- if (with_CAS) {
- guarantee(stop_at == 0, "Precondition");
- nd = get_completed_buffer_CAS();
- } else {
- nd = get_completed_buffer_lock(stop_at);
- }
+ assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
+ BufferNode* nd = get_completed_buffer(stop_at);
bool res = apply_closure_to_completed_buffer_helper(worker_i, nd);
if (res) Atomic::inc(&_processed_buffers_rs_thread);
return res;
}
void DirtyCardQueueSet::apply_closure_to_all_completed_buffers() {
- CompletedBufferNode* nd = _completed_buffers_head;
+ BufferNode* nd = _completed_buffers_head;
while (nd != NULL) {
bool b =
- DirtyCardQueue::apply_closure_to_buffer(_closure, nd->buf, 0, _sz,
- false);
+ DirtyCardQueue::apply_closure_to_buffer(_closure,
+ BufferNode::make_buffer_from_node(nd),
+ 0, _sz, false);
guarantee(b, "Should not stop early.");
- nd = nd->next;
+ nd = nd->next();
}
}
void DirtyCardQueueSet::abandon_logs() {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
- CompletedBufferNode* buffers_to_delete = NULL;
+ BufferNode* buffers_to_delete = NULL;
{
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
while (_completed_buffers_head != NULL) {
- CompletedBufferNode* nd = _completed_buffers_head;
- _completed_buffers_head = nd->next;
- nd->next = buffers_to_delete;
+ BufferNode* nd = _completed_buffers_head;
+ _completed_buffers_head = nd->next();
+ nd->set_next(buffers_to_delete);
buffers_to_delete = nd;
}
_n_completed_buffers = 0;
@@ -263,10 +238,9 @@ void DirtyCardQueueSet::abandon_logs() {
debug_only(assert_completed_buffer_list_len_correct_locked());
}
while (buffers_to_delete != NULL) {
- CompletedBufferNode* nd = buffers_to_delete;
- buffers_to_delete = nd->next;
- deallocate_buffer(nd->buf);
- delete nd;
+ BufferNode* nd = buffers_to_delete;
+ buffers_to_delete = nd->next();
+ deallocate_buffer(BufferNode::make_buffer_from_node(nd));
}
// Since abandon is done only at safepoints, we can safely manipulate
// these queues.
diff --git a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp
index 7a6f3f27bbd..da2c8378451 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -84,11 +84,12 @@ class DirtyCardQueueSet: public PtrQueueSet {
jint _processed_buffers_rs_thread;
public:
- DirtyCardQueueSet();
+ DirtyCardQueueSet(bool notify_when_complete = true);
void initialize(Monitor* cbl_mon, Mutex* fl_lock,
- int max_completed_queue = 0,
- Mutex* lock = NULL, PtrQueueSet* fl_owner = NULL);
+ int process_completed_threshold,
+ int max_completed_queue,
+ Mutex* lock, PtrQueueSet* fl_owner = NULL);
// The number of parallel ids that can be claimed to allow collector or
// mutator threads to do card-processing work.
@@ -120,12 +121,13 @@ public:
// is returned to the completed buffer set, and this call returns false.
bool apply_closure_to_completed_buffer(int worker_i = 0,
int stop_at = 0,
- bool with_CAS = false);
- bool apply_closure_to_completed_buffer_helper(int worker_i,
- CompletedBufferNode* nd);
+ bool during_pause = false);
+
+ bool apply_closure_to_completed_buffer_helper(int worker_i,
+ BufferNode* nd);
+
+ BufferNode* get_completed_buffer(int stop_at);
- CompletedBufferNode* get_completed_buffer_CAS();
- CompletedBufferNode* get_completed_buffer_lock(int stop_at);
// Applies the current closure to all completed buffers,
// non-consumptively.
void apply_closure_to_all_completed_buffers();
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index 550b028ea60..c3319d13e79 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -928,6 +928,8 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
+ TraceMemoryManagerStats tms(true /* fullGC */);
+
double start = os::elapsedTime();
g1_policy()->record_full_collection_start();
@@ -1001,6 +1003,8 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
+ MemoryService::track_memory_usage();
+
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyAfterGC:");
@@ -1371,6 +1375,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
SharedHeap(policy_),
_g1_policy(policy_),
+ _dirty_card_queue_set(false),
_ref_processor(NULL),
_process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
_bot_shared(NULL),
@@ -1436,6 +1441,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
}
jint G1CollectedHeap::initialize() {
+ CollectedHeap::pre_initialize();
os::enable_vtime();
// Necessary to satisfy locking discipline assertions.
@@ -1456,8 +1462,6 @@ jint G1CollectedHeap::initialize() {
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
- // We allocate this in any case, but only do no work if the command line
- // param is off.
_cg1r = new ConcurrentG1Refine();
// Reserve the maximum.
@@ -1590,18 +1594,20 @@ jint G1CollectedHeap::initialize() {
JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
SATB_Q_FL_lock,
- 0,
+ G1SATBProcessCompletedThreshold,
Shared_SATB_Q_lock);
JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
DirtyCardQ_FL_lock,
- G1UpdateBufferQueueMaxLength,
+ concurrent_g1_refine()->yellow_zone(),
+ concurrent_g1_refine()->red_zone(),
Shared_DirtyCardQ_lock);
if (G1DeferredRSUpdate) {
dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
DirtyCardQ_FL_lock,
- 0,
+ -1, // never trigger processing
+ -1, // no limit on length
Shared_DirtyCardQ_lock,
&JavaThread::dirty_card_queue_set());
}
@@ -1732,13 +1738,6 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
return car->free();
}
-void G1CollectedHeap::collect(GCCause::Cause cause) {
- // The caller doesn't have the Heap_lock
- assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
- MutexLocker ml(Heap_lock);
- collect_locked(cause);
-}
-
void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
assert(Thread::current()->is_VM_thread(), "Precondition#1");
assert(Heap_lock->is_locked(), "Precondition#2");
@@ -1755,17 +1754,31 @@ void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
}
}
+void G1CollectedHeap::collect(GCCause::Cause cause) {
+ // The caller doesn't have the Heap_lock
+ assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
-void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
- // Don't want to do a GC until cleanup is completed.
- wait_for_cleanup_complete();
-
- // Read the GC count while holding the Heap_lock
- int gc_count_before = SharedHeap::heap()->total_collections();
+ int gc_count_before;
{
- MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
- VM_G1CollectFull op(gc_count_before, cause);
- VMThread::execute(&op);
+ MutexLocker ml(Heap_lock);
+ // Read the GC count while holding the Heap_lock
+ gc_count_before = SharedHeap::heap()->total_collections();
+
+ // Don't want to do a GC until cleanup is completed.
+ wait_for_cleanup_complete();
+ } // We give up heap lock; VMThread::execute gets it back below
+ switch (cause) {
+ case GCCause::_scavenge_alot: {
+ // Do an incremental pause, which might sometimes be abandoned.
+ VM_G1IncCollectionPause op(gc_count_before, cause);
+ VMThread::execute(&op);
+ break;
+ }
+ default: {
+ // In all other cases, we currently do a full gc.
+ VM_G1CollectFull op(gc_count_before, cause);
+ VMThread::execute(&op);
+ }
}
}
@@ -2119,7 +2132,7 @@ size_t G1CollectedHeap::large_typearray_limit() {
}
size_t G1CollectedHeap::max_capacity() const {
- return _g1_committed.byte_size();
+ return g1_reserved_obj_bytes();
}
jlong G1CollectedHeap::millis_since_last_gc() {
@@ -2638,6 +2651,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
}
{
+ ResourceMark rm;
+
char verbose_str[128];
sprintf(verbose_str, "GC pause ");
if (g1_policy()->in_young_gc_mode()) {
@@ -2649,8 +2664,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
if (g1_policy()->should_initiate_conc_mark())
strcat(verbose_str, " (initial-mark)");
- GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
-
// if PrintGCDetails is on, we'll print long statistics information
// in the collector policy code, so let's not print this as the output
// is messy if we do.
@@ -2658,7 +2671,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
- ResourceMark rm;
+ TraceMemoryManagerStats tms(false /* fullGC */);
+
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
guarantee(!is_gc_active(), "collection is not reentrant");
@@ -2802,6 +2816,22 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
_young_list->reset_auxilary_lists();
}
} else {
+ if (_in_cset_fast_test != NULL) {
+ assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
+ FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
+ // this is more for peace of mind; we're nulling them here and
+ // we're expecting them to be null at the beginning of the next GC
+ _in_cset_fast_test = NULL;
+ _in_cset_fast_test_base = NULL;
+ }
+ // This looks confusing, because the DPT should really be empty
+ // at this point -- since we have not done any collection work,
+ // there should not be any derived pointers in the table to update;
+ // however, there is some additional state in the DPT which is
+ // reset at the end of the (null) "gc" here via the following call.
+ // A better approach might be to split off that state resetting work
+ // into a separate method that asserts that the DPT is empty and call
+ // that here. That is deferred for now.
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
}
@@ -2838,6 +2868,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
assert(regions_accounted_for(), "Region leakage.");
+ MemoryService::track_memory_usage();
+
if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyAfterGC:");
@@ -4209,10 +4241,11 @@ void G1CollectedHeap::evacuate_collection_set() {
RedirtyLoggedCardTableEntryFastClosure redirty;
dirty_card_queue_set().set_closure(&redirty);
dirty_card_queue_set().apply_closure_to_all_completed_buffers();
- JavaThread::dirty_card_queue_set().merge_bufferlists(&dirty_card_queue_set());
+
+ DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
+ dcq.merge_bufferlists(&dirty_card_queue_set());
assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
}
-
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
index 5e07828267f..bb73eb28881 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
@@ -692,7 +692,7 @@ public:
// Reserved (g1 only; super method includes perm), capacity and the used
// portion in bytes.
- size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); }
+ size_t g1_reserved_obj_bytes() const { return _g1_reserved.byte_size(); }
virtual size_t capacity() const;
virtual size_t used() const;
// This should be called when we're not holding the heap lock. The
@@ -1007,6 +1007,10 @@ public:
return true;
}
+ virtual bool card_mark_must_follow_store() const {
+ return true;
+ }
+
bool is_in_young(oop obj) {
HeapRegion* hr = heap_region_containing(obj);
return hr != NULL && hr->is_young();
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
index 20e8fba5dea..487fb6d954a 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
@@ -1516,8 +1516,30 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
(end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
update_recent_gc_times(end_time_sec, elapsed_ms);
_recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
- // using 1.01 to account for floating point inaccuracies
- assert(recent_avg_pause_time_ratio() < 1.01, "All GC?");
+ if (recent_avg_pause_time_ratio() < 0.0 ||
+ (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
+#ifndef PRODUCT
+ // Dump info to allow post-facto debugging
+ gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
+ gclog_or_tty->print_cr("-------------------------------------------");
+ gclog_or_tty->print_cr("Recent GC Times (ms):");
+ _recent_gc_times_ms->dump();
+ gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
+ _recent_prev_end_times_for_all_gcs_sec->dump();
+ gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
+ _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
+ // In debug mode, terminate the JVM if the user wants to debug at this point.
+ assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
+#endif // !PRODUCT
+ // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
+ // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
+ if (_recent_avg_pause_time_ratio < 0.0) {
+ _recent_avg_pause_time_ratio = 0.0;
+ } else {
+ assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
+ _recent_avg_pause_time_ratio = 1.0;
+ }
+ }
}
if (G1PolicyVerbose > 1) {
@@ -1892,6 +1914,10 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
calculate_young_list_min_length();
calculate_young_list_target_config();
+ // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
+ double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSUpdatePauseFractionPercent / 100.0;
+ adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
+
//
_target_pause_time_ms = -1.0;
@@ -1899,6 +1925,47 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
//
+void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
+ double update_rs_processed_buffers,
+ double goal_ms) {
+ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
+ ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
+
+ if (G1AdaptiveConcRefine) {
+ const int k_gy = 3, k_gr = 6;
+ const double inc_k = 1.1, dec_k = 0.9;
+
+ int g = cg1r->green_zone();
+ if (update_rs_time > goal_ms) {
+ g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
+ } else {
+ if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
+ g = (int)MAX2(g * inc_k, g + 1.0);
+ }
+ }
+ // Change the refinement threads params
+ cg1r->set_green_zone(g);
+ cg1r->set_yellow_zone(g * k_gy);
+ cg1r->set_red_zone(g * k_gr);
+ cg1r->reinitialize_threads();
+
+ int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
+ int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
+ cg1r->yellow_zone());
+ // Change the barrier params
+ dcqs.set_process_completed_threshold(processing_threshold);
+ dcqs.set_max_completed_queue(cg1r->red_zone());
+ }
+
+ int curr_queue_size = dcqs.completed_buffers_num();
+ if (curr_queue_size >= cg1r->yellow_zone()) {
+ dcqs.set_completed_queue_padding(curr_queue_size);
+ } else {
+ dcqs.set_completed_queue_padding(0);
+ }
+ dcqs.notify_if_necessary();
+}
+
double
G1CollectorPolicy::
predict_young_collection_elapsed_time_ms(size_t adjustment) {
@@ -2825,8 +2892,15 @@ choose_collection_set() {
double non_young_start_time_sec;
start_recording_regions();
- guarantee(_target_pause_time_ms > -1.0,
+ guarantee(_target_pause_time_ms > -1.0
+ NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot),
"_target_pause_time_ms should have been set!");
+#ifndef PRODUCT
+ if (_target_pause_time_ms <= -1.0) {
+ assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
+ _target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
+ }
+#endif
assert(_collection_set == NULL, "Precondition");
double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
@@ -2972,7 +3046,3 @@ record_collection_pause_end(bool abandoned) {
G1CollectorPolicy::record_collection_pause_end(abandoned);
assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
}
-
-// Local Variables: ***
-// c-indentation-style: gnu ***
-// End: ***
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
index 0dff91a0b50..2b9eb83f074 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
@@ -316,6 +316,10 @@ private:
bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
#endif // PRODUCT
+ void adjust_concurrent_refinement(double update_rs_time,
+ double update_rs_processed_buffers,
+ double goal_ms);
+
protected:
double _pause_time_target_ms;
double _recorded_young_cset_choice_time_ms;
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp
index b0d0df45dd9..492adaef6fd 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp
@@ -86,12 +86,22 @@ void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) {
// increase the array size (:-)
// remove the oldest entry (this might allow more GC time for
// the time slice than what's allowed)
- // concolidate the two entries with the minimum gap between them
- // (this mighte allow less GC time than what's allowed)
- guarantee(0, "array full, currently we can't recover");
+ // consolidate the two entries with the minimum gap between them
+ // (this might allow less GC time than what's allowed)
+ guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker,
+ "array full, currently we can't recover unless +G1ForgetfulMMUTracker");
+ // In the case where ScavengeALot is true, such overflow is not
+ // uncommon; in such cases, we can, without much loss of precision
+ // or performance (we are GC'ing most of the time anyway!),
+ // simply overwrite the oldest entry in the tracker: this
+ // is also the behaviour when G1ForgetfulMMUTracker is enabled.
+ _head_index = trim_index(_head_index + 1);
+ assert(_head_index == _tail_index, "Because we have a full circular buffer");
+ _tail_index = trim_index(_tail_index + 1);
+ } else {
+ _head_index = trim_index(_head_index + 1);
+ ++_no_entries;
}
- _head_index = trim_index(_head_index + 1);
- ++_no_entries;
_array[_head_index] = G1MMUTrackerQueueElem(start, end);
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp
index 0eff2c3867a..1030454a741 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp
@@ -99,7 +99,10 @@ private:
// The array is of fixed size and I don't think we'll need more than
// two or three entries with the current behaviour of G1 pauses.
// If the array is full, an easy fix is to look for the pauses with
- // the shortest gap between them and concolidate them.
+ // the shortest gap between them and consolidate them.
+ // For now, we have taken the expedient alternative of forgetting
+ // the oldest entry in the event that +G1ForgetfulMMUTracker, thus
+ // potentially violating MMU specs for some time thereafter.
G1MMUTrackerQueueElem _array[QueueLength];
int _head_index;
diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
index c941c8755d6..85f3841c128 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp
@@ -85,7 +85,7 @@
diagnostic(bool, G1SummarizeZFStats, false, \
"Summarize zero-filling info") \
\
- develop(bool, G1TraceConcurrentRefinement, false, \
+ diagnostic(bool, G1TraceConcurrentRefinement, false, \
"Trace G1 concurrent refinement") \
\
product(intx, G1MarkStackSize, 2 * 1024 * 1024, \
@@ -94,19 +94,6 @@
product(intx, G1MarkRegionStackSize, 1024 * 1024, \
"Size of the region stack for concurrent marking.") \
\
- develop(bool, G1ConcRefine, true, \
- "If true, run concurrent rem set refinement for G1") \
- \
- develop(intx, G1ConcRefineTargTraversals, 4, \
- "Number of concurrent refinement we try to achieve") \
- \
- develop(intx, G1ConcRefineInitialDelta, 4, \
- "Number of heap regions of alloc ahead of starting collection " \
- "pause to start concurrent refinement (initially)") \
- \
- develop(bool, G1SmoothConcRefine, true, \
- "Attempts to smooth out the overhead of concurrent refinement") \
- \
develop(bool, G1ConcZeroFill, true, \
"If true, run concurrent zero-filling thread") \
\
@@ -178,13 +165,38 @@
product(intx, G1UpdateBufferSize, 256, \
"Size of an update buffer") \
\
- product(intx, G1UpdateBufferQueueProcessingThreshold, 5, \
+ product(intx, G1ConcRefineYellowZone, 0, \
"Number of enqueued update buffers that will " \
- "trigger concurrent processing") \
+ "trigger concurrent processing. Will be selected ergonomically " \
+ "by default.") \
\
- product(intx, G1UpdateBufferQueueMaxLength, 30, \
+ product(intx, G1ConcRefineRedZone, 0, \
"Maximum number of enqueued update buffers before mutator " \
- "threads start processing new ones instead of enqueueing them") \
+ "threads start processing new ones instead of enqueueing them. " \
+ "Will be selected ergonomically by default. Zero will disable " \
+ "concurrent processing.") \
+ \
+ product(intx, G1ConcRefineGreenZone, 0, \
+ "The number of update buffers that are left in the queue by the " \
+ "concurrent processing threads. Will be selected ergonomically " \
+ "by default.") \
+ \
+ product(intx, G1ConcRefineServiceInterval, 300, \
+ "The last concurrent refinement thread wakes up every " \
+ "specified number of milliseconds to do miscellaneous work.") \
+ \
+ product(intx, G1ConcRefineThresholdStep, 0, \
+ "Each time the rset update queue increases by this amount " \
+ "activate the next refinement thread if available. " \
+ "Will be selected ergonomically by default.") \
+ \
+ product(intx, G1RSUpdatePauseFractionPercent, 10, \
+ "A target percentage of time that is allowed to be spend on " \
+ "process RS update buffers during the collection pause.") \
+ \
+ product(bool, G1AdaptiveConcRefine, true, \
+ "Select green, yellow and red zones adaptively to meet the " \
+ "the pause requirements.") \
\
develop(intx, G1ConcRSLogCacheSize, 10, \
"Log base 2 of the length of conc RS hot-card cache.") \
@@ -242,6 +254,10 @@
product(bool, G1UseSurvivorSpaces, true, \
"When true, use survivor space.") \
\
+ develop(bool, G1FailOnFPError, false, \
+ "When set, G1 will fail when it encounters an FP 'error', " \
+ "so as to allow debugging") \
+ \
develop(bool, G1FixedTenuringThreshold, false, \
"When set, G1 will not adjust the tenuring threshold") \
\
@@ -252,6 +268,9 @@
"If non-0 is the size of the G1 survivor space, " \
"otherwise SurvivorRatio is used to determine the size") \
\
+ product(bool, G1ForgetfulMMUTracker, false, \
+ "If the MMU tracker's memory is full, forget the oldest entry") \
+ \
product(uintx, G1HeapRegionSize, 0, \
"Size of the G1 regions.") \
\
diff --git a/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp b/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp
index 060743dd38a..4a3fac2faaf 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.cpp
@@ -64,8 +64,8 @@ void PtrQueue::enqueue_known_active(void* ptr) {
while (_index == 0) {
handle_zero_index();
}
- assert(_index > 0, "postcondition");
+ assert(_index > 0, "postcondition");
_index -= oopSize;
_buf[byte_index_to_index((int)_index)] = ptr;
assert(0 <= _index && _index <= _sz, "Invariant.");
@@ -73,7 +73,12 @@ void PtrQueue::enqueue_known_active(void* ptr) {
void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
assert(_lock->owned_by_self(), "Required.");
+
+ // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
+ // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
+ // have the same rank and we may get the "possible deadlock" message
_lock->unlock();
+
qset()->enqueue_complete_buffer(buf);
// We must relock only because the caller will unlock, for the normal
// case.
@@ -99,94 +104,139 @@ void** PtrQueueSet::allocate_buffer() {
assert(_sz > 0, "Didn't set a buffer size.");
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
if (_fl_owner->_buf_free_list != NULL) {
- void** res = _fl_owner->_buf_free_list;
- _fl_owner->_buf_free_list = (void**)_fl_owner->_buf_free_list[0];
+ void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list);
+ _fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next();
_fl_owner->_buf_free_list_sz--;
- // Just override the next pointer with NULL, just in case we scan this part
- // of the buffer.
- res[0] = NULL;
return res;
} else {
- return NEW_C_HEAP_ARRAY(void*, _sz);
+ // Allocate space for the BufferNode in front of the buffer.
+ char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size());
+ return BufferNode::make_buffer_from_block(b);
}
}
void PtrQueueSet::deallocate_buffer(void** buf) {
assert(_sz > 0, "Didn't set a buffer size.");
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
- buf[0] = (void*)_fl_owner->_buf_free_list;
- _fl_owner->_buf_free_list = buf;
+ BufferNode *node = BufferNode::make_node_from_buffer(buf);
+ node->set_next(_fl_owner->_buf_free_list);
+ _fl_owner->_buf_free_list = node;
_fl_owner->_buf_free_list_sz++;
}
void PtrQueueSet::reduce_free_list() {
+ assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
// For now we'll adopt the strategy of deleting half.
MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
size_t n = _buf_free_list_sz / 2;
while (n > 0) {
assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong.");
- void** head = _buf_free_list;
- _buf_free_list = (void**)_buf_free_list[0];
- FREE_C_HEAP_ARRAY(void*,head);
+ void* b = BufferNode::make_block_from_node(_buf_free_list);
+ _buf_free_list = _buf_free_list->next();
+ FREE_C_HEAP_ARRAY(char, b);
+ _buf_free_list_sz --;
n--;
}
}
-void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index, bool ignore_max_completed) {
- // I use explicit locking here because there's a bailout in the middle.
- _cbl_mon->lock_without_safepoint_check();
+void PtrQueue::handle_zero_index() {
+ assert(0 == _index, "Precondition.");
+ // This thread records the full buffer and allocates a new one (while
+ // holding the lock if there is one).
+ if (_buf != NULL) {
+ if (_lock) {
+ assert(_lock->owned_by_self(), "Required.");
- Thread* thread = Thread::current();
- assert( ignore_max_completed ||
- thread->is_Java_thread() ||
- SafepointSynchronize::is_at_safepoint(),
- "invariant" );
- ignore_max_completed = ignore_max_completed || !thread->is_Java_thread();
+ // The current PtrQ may be the shared dirty card queue and
+ // may be being manipulated by more than one worker thread
+ // during a pause. Since the enqueuing of the completed
+ // buffer unlocks the Shared_DirtyCardQ_lock more than one
+ // worker thread can 'race' on reading the shared queue attributes
+ // (_buf and _index) and multiple threads can call into this
+ // routine for the same buffer. This will cause the completed
+ // buffer to be added to the CBL multiple times.
- if (!ignore_max_completed && _max_completed_queue > 0 &&
- _n_completed_buffers >= (size_t) _max_completed_queue) {
- _cbl_mon->unlock();
- bool b = mut_process_buffer(buf);
- if (b) {
- deallocate_buffer(buf);
- return;
+ // We "claim" the current buffer by caching value of _buf in
+ // a local and clearing the field while holding _lock. When
+ // _lock is released (while enqueueing the completed buffer)
+ // the thread that acquires _lock will skip this code,
+ // preventing the subsequent the multiple enqueue, and
+ // install a newly allocated buffer below.
+
+ void** buf = _buf; // local pointer to completed buffer
+ _buf = NULL; // clear shared _buf field
+
+ locking_enqueue_completed_buffer(buf); // enqueue completed buffer
+
+ // While the current thread was enqueuing the buffer another thread
+ // may have a allocated a new buffer and inserted it into this pointer
+ // queue. If that happens then we just return so that the current
+ // thread doesn't overwrite the buffer allocated by the other thread
+ // and potentially losing some dirtied cards.
+
+ if (_buf != NULL) return;
+ } else {
+ if (qset()->process_or_enqueue_complete_buffer(_buf)) {
+ // Recycle the buffer. No allocation.
+ _sz = qset()->buffer_size();
+ _index = _sz;
+ return;
+ }
}
-
- // Otherwise, go ahead and enqueue the buffer. Must reaquire the lock.
- _cbl_mon->lock_without_safepoint_check();
}
+ // Reallocate the buffer
+ _buf = qset()->allocate_buffer();
+ _sz = qset()->buffer_size();
+ _index = _sz;
+ assert(0 <= _index && _index <= _sz, "Invariant.");
+}
- // Here we still hold the _cbl_mon.
- CompletedBufferNode* cbn = new CompletedBufferNode;
- cbn->buf = buf;
- cbn->next = NULL;
- cbn->index = index;
+bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
+ if (Thread::current()->is_Java_thread()) {
+ // We don't lock. It is fine to be epsilon-precise here.
+ if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
+ _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
+ bool b = mut_process_buffer(buf);
+ if (b) {
+ // True here means that the buffer hasn't been deallocated and the caller may reuse it.
+ return true;
+ }
+ }
+ }
+ // The buffer will be enqueued. The caller will have to get a new one.
+ enqueue_complete_buffer(buf);
+ return false;
+}
+
+void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+ BufferNode* cbn = BufferNode::new_from_buffer(buf);
+ cbn->set_index(index);
if (_completed_buffers_tail == NULL) {
assert(_completed_buffers_head == NULL, "Well-formedness");
_completed_buffers_head = cbn;
_completed_buffers_tail = cbn;
} else {
- _completed_buffers_tail->next = cbn;
+ _completed_buffers_tail->set_next(cbn);
_completed_buffers_tail = cbn;
}
_n_completed_buffers++;
- if (!_process_completed &&
+ if (!_process_completed && _process_completed_threshold >= 0 &&
_n_completed_buffers >= _process_completed_threshold) {
_process_completed = true;
if (_notify_when_complete)
- _cbl_mon->notify_all();
+ _cbl_mon->notify();
}
debug_only(assert_completed_buffer_list_len_correct_locked());
- _cbl_mon->unlock();
}
int PtrQueueSet::completed_buffers_list_length() {
int n = 0;
- CompletedBufferNode* cbn = _completed_buffers_head;
+ BufferNode* cbn = _completed_buffers_head;
while (cbn != NULL) {
n++;
- cbn = cbn->next;
+ cbn = cbn->next();
}
return n;
}
@@ -197,7 +247,7 @@ void PtrQueueSet::assert_completed_buffer_list_len_correct() {
}
void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
- guarantee((size_t)completed_buffers_list_length() == _n_completed_buffers,
+ guarantee(completed_buffers_list_length() == _n_completed_buffers,
"Completed buffer length is wrong.");
}
@@ -206,12 +256,8 @@ void PtrQueueSet::set_buffer_size(size_t sz) {
_sz = sz * oopSize;
}
-void PtrQueueSet::set_process_completed_threshold(size_t sz) {
- _process_completed_threshold = sz;
-}
-
-// Merge lists of buffers. Notify waiting threads if the length of the list
-// exceeds threshold. The source queue is emptied as a result. The queues
+// Merge lists of buffers. Notify the processing threads.
+// The source queue is emptied as a result. The queues
// must share the monitor.
void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
@@ -223,7 +269,7 @@ void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
} else {
assert(_completed_buffers_head != NULL, "Well formedness");
if (src->_completed_buffers_head != NULL) {
- _completed_buffers_tail->next = src->_completed_buffers_head;
+ _completed_buffers_tail->set_next(src->_completed_buffers_head);
_completed_buffers_tail = src->_completed_buffers_tail;
}
}
@@ -236,31 +282,13 @@ void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
_completed_buffers_head != NULL && _completed_buffers_tail != NULL,
"Sanity");
+}
- if (!_process_completed &&
- _n_completed_buffers >= _process_completed_threshold) {
+void PtrQueueSet::notify_if_necessary() {
+ MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+ if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) {
_process_completed = true;
if (_notify_when_complete)
- _cbl_mon->notify_all();
+ _cbl_mon->notify();
}
}
-
-// Merge free lists of the two queues. The free list of the source
-// queue is emptied as a result. The queues must share the same
-// mutex that guards free lists.
-void PtrQueueSet::merge_freelists(PtrQueueSet* src) {
- assert(_fl_lock == src->_fl_lock, "Should share the same lock");
- MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
- if (_buf_free_list != NULL) {
- void **p = _buf_free_list;
- while (*p != NULL) {
- p = (void**)*p;
- }
- *p = src->_buf_free_list;
- } else {
- _buf_free_list = src->_buf_free_list;
- }
- _buf_free_list_sz += src->_buf_free_list_sz;
- src->_buf_free_list = NULL;
- src->_buf_free_list_sz = 0;
-}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.hpp b/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.hpp
index 663dcba4fa6..ccf5b207c76 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.hpp
@@ -27,8 +27,10 @@
// the addresses of modified old-generation objects. This type supports
// this operation.
-class PtrQueueSet;
+// The definition of placement operator new(size_t, void*) in the .
+#include
+class PtrQueueSet;
class PtrQueue VALUE_OBJ_CLASS_SPEC {
protected:
@@ -77,7 +79,7 @@ public:
else enqueue_known_active(ptr);
}
- inline void handle_zero_index();
+ void handle_zero_index();
void locking_enqueue_completed_buffer(void** buf);
void enqueue_known_active(void* ptr);
@@ -126,34 +128,65 @@ public:
};
+class BufferNode {
+ size_t _index;
+ BufferNode* _next;
+public:
+ BufferNode() : _index(0), _next(NULL) { }
+ BufferNode* next() const { return _next; }
+ void set_next(BufferNode* n) { _next = n; }
+ size_t index() const { return _index; }
+ void set_index(size_t i) { _index = i; }
+
+ // Align the size of the structure to the size of the pointer
+ static size_t aligned_size() {
+ static const size_t alignment = round_to(sizeof(BufferNode), sizeof(void*));
+ return alignment;
+ }
+
+ // BufferNode is allocated before the buffer.
+ // The chunk of memory that holds both of them is a block.
+
+ // Produce a new BufferNode given a buffer.
+ static BufferNode* new_from_buffer(void** buf) {
+ return new (make_block_from_buffer(buf)) BufferNode;
+ }
+
+ // The following are the required conversion routines:
+ static BufferNode* make_node_from_buffer(void** buf) {
+ return (BufferNode*)make_block_from_buffer(buf);
+ }
+ static void** make_buffer_from_node(BufferNode *node) {
+ return make_buffer_from_block(node);
+ }
+ static void* make_block_from_node(BufferNode *node) {
+ return (void*)node;
+ }
+ static void** make_buffer_from_block(void* p) {
+ return (void**)((char*)p + aligned_size());
+ }
+ static void* make_block_from_buffer(void** p) {
+ return (void*)((char*)p - aligned_size());
+ }
+};
+
// A PtrQueueSet represents resources common to a set of pointer queues.
// In particular, the individual queues allocate buffers from this shared
// set, and return completed buffers to the set.
// All these variables are are protected by the TLOQ_CBL_mon. XXX ???
class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
-
protected:
-
- class CompletedBufferNode: public CHeapObj {
- public:
- void** buf;
- size_t index;
- CompletedBufferNode* next;
- CompletedBufferNode() : buf(NULL),
- index(0), next(NULL){ }
- };
-
Monitor* _cbl_mon; // Protects the fields below.
- CompletedBufferNode* _completed_buffers_head;
- CompletedBufferNode* _completed_buffers_tail;
- size_t _n_completed_buffers;
- size_t _process_completed_threshold;
+ BufferNode* _completed_buffers_head;
+ BufferNode* _completed_buffers_tail;
+ int _n_completed_buffers;
+ int _process_completed_threshold;
volatile bool _process_completed;
// This (and the interpretation of the first element as a "next"
// pointer) are protected by the TLOQ_FL_lock.
Mutex* _fl_lock;
- void** _buf_free_list;
+ BufferNode* _buf_free_list;
size_t _buf_free_list_sz;
// Queue set can share a freelist. The _fl_owner variable
// specifies the owner. It is set to "this" by default.
@@ -170,6 +203,7 @@ protected:
// Maximum number of elements allowed on completed queue: after that,
// enqueuer does the work itself. Zero indicates no maximum.
int _max_completed_queue;
+ int _completed_queue_padding;
int completed_buffers_list_length();
void assert_completed_buffer_list_len_correct_locked();
@@ -191,9 +225,12 @@ public:
// Because of init-order concerns, we can't pass these as constructor
// arguments.
void initialize(Monitor* cbl_mon, Mutex* fl_lock,
- int max_completed_queue = 0,
+ int process_completed_threshold,
+ int max_completed_queue,
PtrQueueSet *fl_owner = NULL) {
_max_completed_queue = max_completed_queue;
+ _process_completed_threshold = process_completed_threshold;
+ _completed_queue_padding = 0;
assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
_cbl_mon = cbl_mon;
_fl_lock = fl_lock;
@@ -208,14 +245,17 @@ public:
void deallocate_buffer(void** buf);
// Declares that "buf" is a complete buffer.
- void enqueue_complete_buffer(void** buf, size_t index = 0,
- bool ignore_max_completed = false);
+ void enqueue_complete_buffer(void** buf, size_t index = 0);
+
+ // To be invoked by the mutator.
+ bool process_or_enqueue_complete_buffer(void** buf);
bool completed_buffers_exist_dirty() {
return _n_completed_buffers > 0;
}
bool process_completed_buffers() { return _process_completed; }
+ void set_process_completed(bool x) { _process_completed = x; }
bool active() { return _all_active; }
@@ -226,15 +266,24 @@ public:
// Get the buffer size.
size_t buffer_size() { return _sz; }
- // Set the number of completed buffers that triggers log processing.
- void set_process_completed_threshold(size_t sz);
+ // Get/Set the number of completed buffers that triggers log processing.
+ void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
+ int process_completed_threshold() const { return _process_completed_threshold; }
// Must only be called at a safe point. Indicates that the buffer free
// list size may be reduced, if that is deemed desirable.
void reduce_free_list();
- size_t completed_buffers_num() { return _n_completed_buffers; }
+ int completed_buffers_num() { return _n_completed_buffers; }
void merge_bufferlists(PtrQueueSet* src);
- void merge_freelists(PtrQueueSet* src);
+
+ void set_max_completed_queue(int m) { _max_completed_queue = m; }
+ int max_completed_queue() { return _max_completed_queue; }
+
+ void set_completed_queue_padding(int padding) { _completed_queue_padding = padding; }
+ int completed_queue_padding() { return _completed_queue_padding; }
+
+ // Notify the consumer if the number of buffers crossed the threshold
+ void notify_if_necessary();
};
diff --git a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp
index 3cf402ce7a8..8efdc3b9c23 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp
@@ -67,9 +67,9 @@ SATBMarkQueueSet::SATBMarkQueueSet() :
{}
void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
- int max_completed_queue,
+ int process_completed_threshold,
Mutex* lock) {
- PtrQueueSet::initialize(cbl_mon, fl_lock, max_completed_queue);
+ PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
_shared_satb_queue.set_lock(lock);
if (ParallelGCThreads > 0) {
_par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads);
@@ -122,12 +122,12 @@ void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
int worker) {
- CompletedBufferNode* nd = NULL;
+ BufferNode* nd = NULL;
{
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
if (_completed_buffers_head != NULL) {
nd = _completed_buffers_head;
- _completed_buffers_head = nd->next;
+ _completed_buffers_head = nd->next();
if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
_n_completed_buffers--;
if (_n_completed_buffers == 0) _process_completed = false;
@@ -135,9 +135,9 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
}
ObjectClosure* cl = (par ? _par_closures[worker] : _closure);
if (nd != NULL) {
- ObjPtrQueue::apply_closure_to_buffer(cl, nd->buf, 0, _sz);
- deallocate_buffer(nd->buf);
- delete nd;
+ void **buf = BufferNode::make_buffer_from_node(nd);
+ ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
+ deallocate_buffer(buf);
return true;
} else {
return false;
@@ -145,13 +145,13 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
}
void SATBMarkQueueSet::abandon_partial_marking() {
- CompletedBufferNode* buffers_to_delete = NULL;
+ BufferNode* buffers_to_delete = NULL;
{
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
while (_completed_buffers_head != NULL) {
- CompletedBufferNode* nd = _completed_buffers_head;
- _completed_buffers_head = nd->next;
- nd->next = buffers_to_delete;
+ BufferNode* nd = _completed_buffers_head;
+ _completed_buffers_head = nd->next();
+ nd->set_next(buffers_to_delete);
buffers_to_delete = nd;
}
_completed_buffers_tail = NULL;
@@ -159,10 +159,9 @@ void SATBMarkQueueSet::abandon_partial_marking() {
DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
}
while (buffers_to_delete != NULL) {
- CompletedBufferNode* nd = buffers_to_delete;
- buffers_to_delete = nd->next;
- deallocate_buffer(nd->buf);
- delete nd;
+ BufferNode* nd = buffers_to_delete;
+ buffers_to_delete = nd->next();
+ deallocate_buffer(BufferNode::make_buffer_from_node(nd));
}
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
// So we can safely manipulate these queues.
diff --git a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp
index ed1181dd79f..76218a6363b 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp
@@ -60,8 +60,8 @@ public:
SATBMarkQueueSet();
void initialize(Monitor* cbl_mon, Mutex* fl_lock,
- int max_completed_queue = 0,
- Mutex* lock = NULL);
+ int process_completed_threshold,
+ Mutex* lock);
static void handle_zero_index_for_thread(JavaThread* t);
diff --git a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
index e59dbe483d2..2137efa4e83 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
@@ -42,7 +42,7 @@ void VM_G1CollectFull::doit() {
void VM_G1IncCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
- GCCauseSetter x(g1h, GCCause::_g1_inc_collection_pause);
+ GCCauseSetter x(g1h, _gc_cause);
g1h->do_collection_pause_at_safepoint();
}
diff --git a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
index 6cf0605ec8c..95dda3844b7 100644
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
@@ -68,8 +68,9 @@ class VM_G1CollectForAllocation: public VM_GC_Operation {
class VM_G1IncCollectionPause: public VM_GC_Operation {
public:
- VM_G1IncCollectionPause(int gc_count_before) :
- VM_GC_Operation(gc_count_before) {}
+ VM_G1IncCollectionPause(int gc_count_before,
+ GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) :
+ VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; }
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
virtual void doit();
virtual const char* name() const {
diff --git a/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep b/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep
index 7ae314990c8..c5a7a386b60 100644
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep
@@ -221,6 +221,7 @@ freeList.cpp freeList.hpp
freeList.cpp globals.hpp
freeList.cpp mutex.hpp
freeList.cpp sharedHeap.hpp
+freeList.cpp vmThread.hpp
freeList.hpp allocationStats.hpp
diff --git a/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1 b/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1
index 63bfbce76f9..60531af9032 100644
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1
@@ -109,7 +109,6 @@ dirtyCardQueue.cpp atomic.hpp
dirtyCardQueue.cpp dirtyCardQueue.hpp
dirtyCardQueue.cpp heapRegionRemSet.hpp
dirtyCardQueue.cpp mutexLocker.hpp
-dirtyCardQueue.cpp ptrQueue.inline.hpp
dirtyCardQueue.cpp safepoint.hpp
dirtyCardQueue.cpp thread.hpp
dirtyCardQueue.cpp thread_.inline.hpp
@@ -222,6 +221,15 @@ g1MarkSweep.hpp oop.hpp
g1MarkSweep.hpp timer.hpp
g1MarkSweep.hpp universe.hpp
+g1MemoryPool.cpp heapRegion.hpp
+g1MemoryPool.cpp g1CollectedHeap.inline.hpp
+g1MemoryPool.cpp g1CollectedHeap.hpp
+g1MemoryPool.cpp g1CollectorPolicy.hpp
+g1MemoryPool.cpp g1MemoryPool.hpp
+
+g1MemoryPool.hpp memoryUsage.hpp
+g1MemoryPool.hpp memoryPool.hpp
+
g1OopClosures.inline.hpp concurrentMark.hpp
g1OopClosures.inline.hpp g1OopClosures.hpp
g1OopClosures.inline.hpp g1CollectedHeap.hpp
@@ -303,12 +311,13 @@ heapRegionSeq.inline.hpp heapRegionSeq.hpp
klass.hpp g1OopClosures.hpp
+memoryService.cpp g1MemoryPool.hpp
+
ptrQueue.cpp allocation.hpp
ptrQueue.cpp allocation.inline.hpp
ptrQueue.cpp mutex.hpp
ptrQueue.cpp mutexLocker.hpp
ptrQueue.cpp ptrQueue.hpp
-ptrQueue.cpp ptrQueue.inline.hpp
ptrQueue.cpp thread_.inline.hpp
ptrQueue.hpp allocation.hpp
@@ -318,7 +327,6 @@ ptrQueue.inline.hpp ptrQueue.hpp
satbQueue.cpp allocation.inline.hpp
satbQueue.cpp mutexLocker.hpp
-satbQueue.cpp ptrQueue.inline.hpp
satbQueue.cpp satbQueue.hpp
satbQueue.cpp sharedHeap.hpp
satbQueue.cpp thread.hpp
diff --git a/hotspot/src/share/vm/gc_implementation/includeDB_gc_serial b/hotspot/src/share/vm/gc_implementation/includeDB_gc_serial
index 6fb42f95b6f..60e41874d43 100644
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_serial
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_serial
@@ -71,6 +71,7 @@ gcUtil.cpp gcUtil.hpp
gcUtil.hpp allocation.hpp
gcUtil.hpp debug.hpp
gcUtil.hpp globalDefinitions.hpp
+gcUtil.hpp ostream.hpp
gcUtil.hpp timer.hpp
generationCounters.cpp generationCounters.hpp
diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
index 5acb923a056..07f759c9457 100644
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
@@ -50,6 +50,7 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
work_queue_set_, &term_),
_is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
_keep_alive_closure(&_scan_weak_ref_closure),
+ _promotion_failure_size(0),
_pushes(0), _pops(0), _steals(0), _steal_attempts(0), _term_attempts(0),
_strong_roots_time(0.0), _term_time(0.0)
{
@@ -249,6 +250,16 @@ void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
}
}
+void ParScanThreadState::print_and_clear_promotion_failure_size() {
+ if (_promotion_failure_size != 0) {
+ if (PrintPromotionFailure) {
+ gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
+ _thread_num, _promotion_failure_size);
+ }
+ _promotion_failure_size = 0;
+ }
+}
+
class ParScanThreadStateSet: private ResourceArray {
public:
// Initializes states for the specified number of threads;
@@ -260,11 +271,11 @@ public:
GrowableArray** overflow_stacks_,
size_t desired_plab_sz,
ParallelTaskTerminator& term);
- inline ParScanThreadState& thread_sate(int i);
+ inline ParScanThreadState& thread_state(int i);
int pushes() { return _pushes; }
int pops() { return _pops; }
int steals() { return _steals; }
- void reset();
+ void reset(bool promotion_failed);
void flush();
private:
ParallelTaskTerminator& _term;
@@ -295,22 +306,31 @@ ParScanThreadStateSet::ParScanThreadStateSet(
}
}
-inline ParScanThreadState& ParScanThreadStateSet::thread_sate(int i)
+inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
{
assert(i >= 0 && i < length(), "sanity check!");
return ((ParScanThreadState*)_data)[i];
}
-void ParScanThreadStateSet::reset()
+void ParScanThreadStateSet::reset(bool promotion_failed)
{
_term.reset_for_reuse();
+ if (promotion_failed) {
+ for (int i = 0; i < length(); ++i) {
+ thread_state(i).print_and_clear_promotion_failure_size();
+ }
+ }
}
void ParScanThreadStateSet::flush()
{
+ // Work in this loop should be kept as lightweight as
+ // possible since this might otherwise become a bottleneck
+ // to scaling. Should we add heavy-weight work into this
+ // loop, consider parallelizing the loop into the worker threads.
for (int i = 0; i < length(); ++i) {
- ParScanThreadState& par_scan_state = thread_sate(i);
+ ParScanThreadState& par_scan_state = thread_state(i);
// Flush stats related to To-space PLAB activity and
// retire the last buffer.
@@ -362,6 +382,14 @@ void ParScanThreadStateSet::flush()
}
}
}
+ if (UseConcMarkSweepGC && ParallelGCThreads > 0) {
+ // We need to call this even when ResizeOldPLAB is disabled
+ // so as to avoid breaking some asserts. While we may be able
+ // to avoid this by reorganizing the code a bit, I am loathe
+ // to do that unless we find cases where ergo leads to bad
+ // performance.
+ CFLS_LAB::compute_desired_plab_size();
+ }
}
ParScanClosure::ParScanClosure(ParNewGeneration* g,
@@ -475,7 +503,7 @@ void ParNewGenTask::work(int i) {
Generation* old_gen = gch->next_gen(_gen);
- ParScanThreadState& par_scan_state = _state_set->thread_sate(i);
+ ParScanThreadState& par_scan_state = _state_set->thread_state(i);
par_scan_state.set_young_old_boundary(_young_old_boundary);
par_scan_state.start_strong_roots();
@@ -659,7 +687,7 @@ void ParNewRefProcTaskProxy::work(int i)
{
ResourceMark rm;
HandleMark hm;
- ParScanThreadState& par_scan_state = _state_set.thread_sate(i);
+ ParScanThreadState& par_scan_state = _state_set.thread_state(i);
par_scan_state.set_young_old_boundary(_young_old_boundary);
_task.work(i, par_scan_state.is_alive_closure(),
par_scan_state.keep_alive_closure(),
@@ -693,7 +721,7 @@ void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
_generation.reserved().end(), _state_set);
workers->run_task(&rp_task);
- _state_set.reset();
+ _state_set.reset(_generation.promotion_failed());
}
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
@@ -813,7 +841,7 @@ void ParNewGeneration::collect(bool full,
GenCollectedHeap::StrongRootsScope srs(gch);
tsk.work(0);
}
- thread_state_set.reset();
+ thread_state_set.reset(promotion_failed());
if (PAR_STATS_ENABLED && ParallelGCVerbose) {
gclog_or_tty->print("Thread totals:\n"
@@ -882,6 +910,8 @@ void ParNewGeneration::collect(bool full,
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
from()->set_next_compaction_space(to());
gch->set_incremental_collection_will_fail();
+ // Inform the next generation that a promotion failure occurred.
+ _next_gen->promotion_failure_occurred();
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
@@ -1029,6 +1059,8 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
new_obj = old;
preserve_mark_if_necessary(old, m);
+ // Log the size of the maiden promotion failure
+ par_scan_state->log_promotion_failure(sz);
}
old->forward_to(new_obj);
@@ -1150,6 +1182,8 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo(
failed_to_promote = true;
preserve_mark_if_necessary(old, m);
+ // Log the size of the maiden promotion failure
+ par_scan_state->log_promotion_failure(sz);
}
} else {
// Is in to-space; do copying ourselves.
diff --git a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
index 3e2ab80af2e..a8dee0bbca9 100644
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
@@ -97,6 +97,9 @@ class ParScanThreadState {
int _pushes, _pops, _steals, _steal_attempts, _term_attempts;
int _overflow_pushes, _overflow_refills, _overflow_refill_objs;
+ // Stats for promotion failure
+ size_t _promotion_failure_size;
+
// Timing numbers.
double _start;
double _start_strong_roots;
@@ -169,6 +172,15 @@ class ParScanThreadState {
// Undo the most recent allocation ("obj", of "word_sz").
void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
+ // Promotion failure stats
+ size_t promotion_failure_size() { return promotion_failure_size(); }
+ void log_promotion_failure(size_t sz) {
+ if (_promotion_failure_size == 0) {
+ _promotion_failure_size = sz;
+ }
+ }
+ void print_and_clear_promotion_failure_size();
+
int pushes() { return _pushes; }
int pops() { return _pops; }
int steals() { return _steals; }
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
index 8396e7960b1..9fe57121f14 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
@@ -51,6 +51,8 @@ static void trace_gen_sizes(const char* const str,
}
jint ParallelScavengeHeap::initialize() {
+ CollectedHeap::pre_initialize();
+
// Cannot be initialized until after the flags are parsed
GenerationSizer flag_parser;
@@ -717,10 +719,6 @@ HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
return young_gen()->allocate(size, true);
}
-void ParallelScavengeHeap::fill_all_tlabs(bool retire) {
- CollectedHeap::fill_all_tlabs(retire);
-}
-
void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
CollectedHeap::accumulate_statistics_all_tlabs();
}
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
index 3bf7671b29a..46fdcc53348 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
@@ -54,7 +54,6 @@ class ParallelScavengeHeap : public CollectedHeap {
protected:
static inline size_t total_invocations();
HeapWord* allocate_new_tlab(size_t size);
- void fill_all_tlabs(bool retire);
public:
ParallelScavengeHeap() : CollectedHeap() {
@@ -191,6 +190,10 @@ class ParallelScavengeHeap : public CollectedHeap {
return true;
}
+ virtual bool card_mark_must_follow_store() const {
+ return false;
+ }
+
// Return true if we don't we need a store barrier for
// initializing stores to an object at this address.
virtual bool can_elide_initializing_store_barrier(oop new_obj);
diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp
index 02c6450a7a0..26345dbc924 100644
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.cpp
@@ -51,7 +51,7 @@ PSGCAdaptivePolicyCounters::PSGCAdaptivePolicyCounters(const char* name_arg,
cname = PerfDataManager::counter_name(name_space(), "oldCapacity");
_old_capacity = PerfDataManager::create_variable(SUN_GC, cname,
- PerfData::U_Bytes, (jlong) Arguments::initial_heap_size(), CHECK);
+ PerfData::U_Bytes, (jlong) InitialHeapSize, CHECK);
cname = PerfDataManager::counter_name(name_space(), "boundaryMoved");
_boundary_moved = PerfDataManager::create_variable(SUN_GC, cname,
diff --git a/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp b/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp
index 4772f7c45bc..9358688a4c9 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp
@@ -31,7 +31,7 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
// beginning of this sweep:
// Count(end_last_sweep) - Count(start_this_sweep)
// + splitBirths(between) - splitDeaths(between)
- // The above number divided by the time since the start [END???] of the
+ // The above number divided by the time since the end of the
// previous sweep gives us a time rate of demand for blocks
// of this size. We compute a padded average of this rate as
// our current estimate for the time rate of demand for blocks
@@ -41,7 +41,7 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
// estimates.
AdaptivePaddedAverage _demand_rate_estimate;
- ssize_t _desired; // Estimate computed as described above
+ ssize_t _desired; // Demand stimate computed as described above
ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing
ssize_t _surplus; // count - (desired +/- small-percent),
@@ -53,9 +53,9 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
ssize_t _coalDeaths; // loss from coalescing
ssize_t _splitBirths; // additional chunks from splitting
ssize_t _splitDeaths; // loss from splitting
- size_t _returnedBytes; // number of bytes returned to list.
+ size_t _returnedBytes; // number of bytes returned to list.
public:
- void initialize() {
+ void initialize(bool split_birth = false) {
AdaptivePaddedAverage* dummy =
new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
CMS_FLSPadding);
@@ -67,7 +67,7 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
_beforeSweep = 0;
_coalBirths = 0;
_coalDeaths = 0;
- _splitBirths = 0;
+ _splitBirths = split_birth? 1 : 0;
_splitDeaths = 0;
_returnedBytes = 0;
}
@@ -75,10 +75,12 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
AllocationStats() {
initialize();
}
+
// The rate estimate is in blocks per second.
void compute_desired(size_t count,
float inter_sweep_current,
- float inter_sweep_estimate) {
+ float inter_sweep_estimate,
+ float intra_sweep_estimate) {
// If the latest inter-sweep time is below our granularity
// of measurement, we may call in here with
// inter_sweep_current == 0. However, even for suitably small
@@ -88,12 +90,31 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
// vulnerable to noisy glitches. In such cases, we
// ignore the current sample and use currently available
// historical estimates.
+ // XXX NEEDS TO BE FIXED
+ // assert(prevSweep() + splitBirths() >= splitDeaths() + (ssize_t)count, "Conservation Principle");
+ // ^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ // "Total Stock" "Not used at this block size"
if (inter_sweep_current > _threshold) {
- ssize_t demand = prevSweep() - count + splitBirths() - splitDeaths();
+ ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() - splitDeaths();
+ // XXX NEEDS TO BE FIXED
+ // assert(demand >= 0, "Demand should be non-negative");
+ // Defensive: adjust for imprecision in event counting
+ if (demand < 0) {
+ demand = 0;
+ }
+ float old_rate = _demand_rate_estimate.padded_average();
float rate = ((float)demand)/inter_sweep_current;
_demand_rate_estimate.sample(rate);
- _desired = (ssize_t)(_demand_rate_estimate.padded_average()
- *inter_sweep_estimate);
+ float new_rate = _demand_rate_estimate.padded_average();
+ ssize_t old_desired = _desired;
+ _desired = (ssize_t)(new_rate * (inter_sweep_estimate
+ + CMSExtrapolateSweep
+ ? intra_sweep_estimate
+ : 0.0));
+ if (PrintFLSStatistics > 1) {
+ gclog_or_tty->print_cr("demand: %d, old_rate: %f, current_rate: %f, new_rate: %f, old_desired: %d, new_desired: %d",
+ demand, old_rate, rate, new_rate, old_desired, _desired);
+ }
}
}
diff --git a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp
index 9ae5e4a0d29..e18782aac55 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp
@@ -52,11 +52,35 @@ void AdaptiveWeightedAverage::sample(float new_sample) {
_last_sample = new_sample;
}
+void AdaptiveWeightedAverage::print() const {
+ print_on(tty);
+}
+
+void AdaptiveWeightedAverage::print_on(outputStream* st) const {
+ guarantee(false, "NYI");
+}
+
+void AdaptivePaddedAverage::print() const {
+ print_on(tty);
+}
+
+void AdaptivePaddedAverage::print_on(outputStream* st) const {
+ guarantee(false, "NYI");
+}
+
+void AdaptivePaddedNoZeroDevAverage::print() const {
+ print_on(tty);
+}
+
+void AdaptivePaddedNoZeroDevAverage::print_on(outputStream* st) const {
+ guarantee(false, "NYI");
+}
+
void AdaptivePaddedAverage::sample(float new_sample) {
- // Compute our parent classes sample information
+ // Compute new adaptive weighted average based on new sample.
AdaptiveWeightedAverage::sample(new_sample);
- // Now compute the deviation and the new padded sample
+ // Now update the deviation and the padded average.
float new_avg = average();
float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
deviation());
diff --git a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp
index affc3e44597..1bb4fc9f852 100644
--- a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp
@@ -54,8 +54,8 @@ class AdaptiveWeightedAverage : public CHeapObj {
public:
// Input weight must be between 0 and 100
- AdaptiveWeightedAverage(unsigned weight) :
- _average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) {
+ AdaptiveWeightedAverage(unsigned weight, float avg = 0.0) :
+ _average(avg), _sample_count(0), _weight(weight), _last_sample(0.0) {
}
void clear() {
@@ -64,6 +64,13 @@ class AdaptiveWeightedAverage : public CHeapObj {
_last_sample = 0;
}
+ // Useful for modifying static structures after startup.
+ void modify(size_t avg, unsigned wt, bool force = false) {
+ assert(force, "Are you sure you want to call this?");
+ _average = (float)avg;
+ _weight = wt;
+ }
+
// Accessors
float average() const { return _average; }
unsigned weight() const { return _weight; }
@@ -83,6 +90,10 @@ class AdaptiveWeightedAverage : public CHeapObj {
// Convert to float and back to avoid integer overflow.
return (size_t)exp_avg((float)avg, (float)sample, weight);
}
+
+ // Printing
+ void print_on(outputStream* st) const;
+ void print() const;
};
@@ -129,6 +140,10 @@ class AdaptivePaddedAverage : public AdaptiveWeightedAverage {
// Override
void sample(float new_sample);
+
+ // Printing
+ void print_on(outputStream* st) const;
+ void print() const;
};
// A weighted average that includes a deviation from the average,
@@ -146,7 +161,12 @@ public:
AdaptivePaddedAverage(weight, padding) {}
// Override
void sample(float new_sample);
+
+ // Printing
+ void print_on(outputStream* st) const;
+ void print() const;
};
+
// Use a least squares fit to a set of data to generate a linear
// equation.
// y = intercept + slope * x
diff --git a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp
index c775e6021b1..031afd57572 100644
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp
@@ -59,8 +59,18 @@ CollectedHeap::CollectedHeap()
PerfDataManager::create_string_variable(SUN_GC, "lastCause",
80, GCCause::to_string(_gc_lastcause), CHECK);
}
+ _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
}
+void CollectedHeap::pre_initialize() {
+ // Used for ReduceInitialCardMarks (when COMPILER2 is used);
+ // otherwise remains unused.
+#ifdef COMPLER2
+ _defer_initial_card_mark = ReduceInitialCardMarks && (DeferInitialCardMark || card_mark_must_follow_store());
+#else
+ assert(_defer_initial_card_mark == false, "Who would set it?");
+#endif
+}
#ifndef PRODUCT
void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
@@ -140,12 +150,13 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
MemRegion deferred = thread->deferred_card_mark();
if (!deferred.is_empty()) {
+ assert(_defer_initial_card_mark, "Otherwise should be empty");
{
// Verify that the storage points to a parsable object in heap
DEBUG_ONLY(oop old_obj = oop(deferred.start());)
assert(is_in(old_obj), "Not in allocated heap");
assert(!can_elide_initializing_store_barrier(old_obj),
- "Else should have been filtered in defer_store_barrier()");
+ "Else should have been filtered in new_store_pre_barrier()");
assert(!is_in_permanent(old_obj), "Sanity: not expected");
assert(old_obj->is_oop(true), "Not an oop");
assert(old_obj->is_parsable(), "Will not be concurrently parsable");
@@ -174,9 +185,7 @@ void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
// so long as the card-mark is completed before the next
// scavenge. For all these cases, we can do a card mark
// at the point at which we do a slow path allocation
-// in the old gen. For uniformity, however, we end
-// up using the same scheme (see below) for all three
-// cases (deferring the card-mark appropriately).
+// in the old gen, i.e. in this call.
// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
// in addition that the card-mark for an old gen allocated
// object strictly follow any associated initializing stores.
@@ -199,12 +208,13 @@ void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
// but, like in CMS, because of the presence of concurrent refinement
// (much like CMS' precleaning), must strictly follow the oop-store.
// Thus, using the same protocol for maintaining the intended
-// invariants turns out, serendepitously, to be the same for all
-// three collectors/heap types above.
+// invariants turns out, serendepitously, to be the same for both
+// G1 and CMS.
//
-// For each future collector, this should be reexamined with
-// that specific collector in mind.
-oop CollectedHeap::defer_store_barrier(JavaThread* thread, oop new_obj) {
+// For any future collector, this code should be reexamined with
+// that specific collector in mind, and the documentation above suitably
+// extended and updated.
+oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
// If a previous card-mark was deferred, flush it now.
flush_deferred_store_barrier(thread);
if (can_elide_initializing_store_barrier(new_obj)) {
@@ -212,10 +222,17 @@ oop CollectedHeap::defer_store_barrier(JavaThread* thread, oop new_obj) {
// following the flush above.
assert(thread->deferred_card_mark().is_empty(), "Error");
} else {
- // Remember info for the newly deferred store barrier
- MemRegion deferred = MemRegion((HeapWord*)new_obj, new_obj->size());
- assert(!deferred.is_empty(), "Error");
- thread->set_deferred_card_mark(deferred);
+ MemRegion mr((HeapWord*)new_obj, new_obj->size());
+ assert(!mr.is_empty(), "Error");
+ if (_defer_initial_card_mark) {
+ // Defer the card mark
+ thread->set_deferred_card_mark(mr);
+ } else {
+ // Do the card mark
+ BarrierSet* bs = barrier_set();
+ assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
+ bs->write_region(mr);
+ }
}
return new_obj;
}
@@ -241,9 +258,9 @@ void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
}
-void CollectedHeap::zap_filler_array(HeapWord* start, size_t words)
+void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
{
- if (ZapFillerObjects) {
+ if (ZapFillerObjects && zap) {
Copy::fill_to_words(start + filler_array_hdr_size(),
words - filler_array_hdr_size(), 0XDEAFBABE);
}
@@ -251,7 +268,7 @@ void CollectedHeap::zap_filler_array(HeapWord* start, size_t words)
#endif // ASSERT
void
-CollectedHeap::fill_with_array(HeapWord* start, size_t words)
+CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
{
assert(words >= filler_array_min_size(), "too small for an array");
assert(words <= filler_array_max_size(), "too big for a single object");
@@ -262,31 +279,31 @@ CollectedHeap::fill_with_array(HeapWord* start, size_t words)
// Set the length first for concurrent GC.
((arrayOop)start)->set_length((int)len);
post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
- DEBUG_ONLY(zap_filler_array(start, words);)
+ DEBUG_ONLY(zap_filler_array(start, words, zap);)
}
void
-CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words)
+CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
{
assert(words <= filler_array_max_size(), "too big for a single object");
if (words >= filler_array_min_size()) {
- fill_with_array(start, words);
+ fill_with_array(start, words, zap);
} else if (words > 0) {
assert(words == min_fill_size(), "unaligned size");
- post_allocation_setup_common(SystemDictionary::object_klass(), start,
+ post_allocation_setup_common(SystemDictionary::Object_klass(), start,
words);
}
}
-void CollectedHeap::fill_with_object(HeapWord* start, size_t words)
+void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
{
DEBUG_ONLY(fill_args_check(start, words);)
HandleMark hm; // Free handles before leaving.
- fill_with_object_impl(start, words);
+ fill_with_object_impl(start, words, zap);
}
-void CollectedHeap::fill_with_objects(HeapWord* start, size_t words)
+void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
{
DEBUG_ONLY(fill_args_check(start, words);)
HandleMark hm; // Free handles before leaving.
@@ -299,13 +316,13 @@ void CollectedHeap::fill_with_objects(HeapWord* start, size_t words)
const size_t max = filler_array_max_size();
while (words > max) {
const size_t cur = words - max >= min ? max : max - min;
- fill_with_array(start, cur);
+ fill_with_array(start, cur, zap);
start += cur;
words -= cur;
}
#endif
- fill_with_object_impl(start, words);
+ fill_with_object_impl(start, words, zap);
}
HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
@@ -313,22 +330,6 @@ HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
return NULL;
}
-void CollectedHeap::fill_all_tlabs(bool retire) {
- assert(UseTLAB, "should not reach here");
- // See note in ensure_parsability() below.
- assert(SafepointSynchronize::is_at_safepoint() ||
- !is_init_completed(),
- "should only fill tlabs at safepoint");
- // The main thread starts allocating via a TLAB even before it
- // has added itself to the threads list at vm boot-up.
- assert(Threads::first() != NULL,
- "Attempt to fill tlabs before main thread has been added"
- " to threads list is doomed to failure!");
- for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
- thread->tlab().make_parsable(retire);
- }
-}
-
void CollectedHeap::ensure_parsability(bool retire_tlabs) {
// The second disjunct in the assertion below makes a concession
// for the start-up verification done while the VM is being
@@ -343,8 +344,24 @@ void CollectedHeap::ensure_parsability(bool retire_tlabs) {
"Should only be called at a safepoint or at start-up"
" otherwise concurrent mutator activity may make heap "
" unparsable again");
- if (UseTLAB) {
- fill_all_tlabs(retire_tlabs);
+ const bool use_tlab = UseTLAB;
+ const bool deferred = _defer_initial_card_mark;
+ // The main thread starts allocating via a TLAB even before it
+ // has added itself to the threads list at vm boot-up.
+ assert(!use_tlab || Threads::first() != NULL,
+ "Attempt to fill tlabs before main thread has been added"
+ " to threads list is doomed to failure!");
+ for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
+ if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
+#ifdef COMPILER2
+ // The deferred store barriers must all have been flushed to the
+ // card-table (or other remembered set structure) before GC starts
+ // processing the card-table (or other remembered set).
+ if (deferred) flush_deferred_store_barrier(thread);
+#else
+ assert(!deferred, "Should be false");
+ assert(thread->deferred_card_mark().is_empty(), "Should be empty");
+#endif
}
}
diff --git a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp
index 18148c8a30f..2bc210a4717 100644
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp
@@ -51,6 +51,9 @@ class CollectedHeap : public CHeapObj {
// Used for filler objects (static, but initialized in ctor).
static size_t _filler_array_max_size;
+ // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
+ bool _defer_initial_card_mark;
+
protected:
MemRegion _reserved;
BarrierSet* _barrier_set;
@@ -70,13 +73,16 @@ class CollectedHeap : public CHeapObj {
// Constructor
CollectedHeap();
+ // Do common initializations that must follow instance construction,
+ // for example, those needing virtual calls.
+ // This code could perhaps be moved into initialize() but would
+ // be slightly more awkward because we want the latter to be a
+ // pure virtual.
+ void pre_initialize();
+
// Create a new tlab
virtual HeapWord* allocate_new_tlab(size_t size);
- // Fix up tlabs to make the heap well-formed again,
- // optionally retiring the tlabs.
- virtual void fill_all_tlabs(bool retire);
-
// Accumulate statistics on all tlabs.
virtual void accumulate_statistics_all_tlabs();
@@ -127,14 +133,14 @@ class CollectedHeap : public CHeapObj {
static inline size_t filler_array_max_size();
DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
- DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words);)
+ DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
// Fill with a single array; caller must ensure filler_array_min_size() <=
// words <= filler_array_max_size().
- static inline void fill_with_array(HeapWord* start, size_t words);
+ static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
// Fill with a single object (either an int array or a java.lang.Object).
- static inline void fill_with_object_impl(HeapWord* start, size_t words);
+ static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
// Verification functions
virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
@@ -338,14 +344,14 @@ class CollectedHeap : public CHeapObj {
return size_t(align_object_size(oopDesc::header_size()));
}
- static void fill_with_objects(HeapWord* start, size_t words);
+ static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
- static void fill_with_object(HeapWord* start, size_t words);
- static void fill_with_object(MemRegion region) {
- fill_with_object(region.start(), region.word_size());
+ static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
+ static void fill_with_object(MemRegion region, bool zap = true) {
+ fill_with_object(region.start(), region.word_size(), zap);
}
- static void fill_with_object(HeapWord* start, HeapWord* end) {
- fill_with_object(start, pointer_delta(end, start));
+ static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
+ fill_with_object(start, pointer_delta(end, start), zap);
}
// Some heaps may offer a contiguous region for shared non-blocking
@@ -431,14 +437,25 @@ class CollectedHeap : public CHeapObj {
// promises to call this function on such a slow-path-allocated
// object before performing initializations that have elided
// store barriers. Returns new_obj, or maybe a safer copy thereof.
- virtual oop defer_store_barrier(JavaThread* thread, oop new_obj);
+ virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
// Answers whether an initializing store to a new object currently
- // allocated at the given address doesn't need a (deferred) store
+ // allocated at the given address doesn't need a store
// barrier. Returns "true" if it doesn't need an initializing
// store barrier; answers "false" if it does.
virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
+ // If a compiler is eliding store barriers for TLAB-allocated objects,
+ // we will be informed of a slow-path allocation by a call
+ // to new_store_pre_barrier() above. Such a call precedes the
+ // initialization of the object itself, and no post-store-barriers will
+ // be issued. Some heap types require that the barrier strictly follows
+ // the initializing stores. (This is currently implemented by deferring the
+ // barrier until the next slow-path allocation or gc-related safepoint.)
+ // This interface answers whether a particular heap type needs the card
+ // mark to be thus strictly sequenced after the stores.
+ virtual bool card_mark_must_follow_store() const = 0;
+
// If the CollectedHeap was asked to defer a store barrier above,
// this informs it to flush such a deferred store barrier to the
// remembered set.
diff --git a/hotspot/src/share/vm/includeDB_compiler2 b/hotspot/src/share/vm/includeDB_compiler2
index 6ba7bfaf867..34c84d72200 100644
--- a/hotspot/src/share/vm/includeDB_compiler2
+++ b/hotspot/src/share/vm/includeDB_compiler2
@@ -149,11 +149,15 @@ c2compiler.cpp runtime.hpp
c2compiler.hpp abstractCompiler.hpp
callGenerator.cpp addnode.hpp
+callGenerator.cpp bcEscapeAnalyzer.hpp
callGenerator.cpp callGenerator.hpp
callGenerator.cpp callnode.hpp
callGenerator.cpp cfgnode.hpp
callGenerator.cpp compileLog.hpp
callGenerator.cpp connode.hpp
+callGenerator.cpp ciCPCache.hpp
+callGenerator.cpp ciMethodHandle.hpp
+callGenerator.cpp javaClasses.hpp
callGenerator.cpp parse.hpp
callGenerator.cpp rootnode.hpp
callGenerator.cpp runtime.hpp
@@ -321,6 +325,7 @@ compile.cpp phaseX.hpp
compile.cpp rootnode.hpp
compile.cpp runtime.hpp
compile.cpp signature.hpp
+compile.cpp stringopts.hpp
compile.cpp stubRoutines.hpp
compile.cpp systemDictionary.hpp
compile.cpp timer.hpp
@@ -389,6 +394,9 @@ divnode.hpp type.hpp
doCall.cpp addnode.hpp
doCall.cpp callGenerator.hpp
+doCall.cpp ciCallSite.hpp
+doCall.cpp ciCPCache.hpp
+doCall.cpp ciMethodHandle.hpp
doCall.cpp cfgnode.hpp
doCall.cpp compileLog.hpp
doCall.cpp linkResolver.hpp
@@ -476,12 +484,16 @@ graphKit.cpp rootnode.hpp
graphKit.cpp runtime.hpp
graphKit.cpp sharedRuntime.hpp
+graphKit.hpp addnode.hpp
graphKit.hpp callnode.hpp
graphKit.hpp cfgnode.hpp
graphKit.hpp ciEnv.hpp
+graphKit.hpp divnode.hpp
graphKit.hpp compile.hpp
graphKit.hpp deoptimization.hpp
graphKit.hpp phaseX.hpp
+graphKit.hpp mulnode.hpp
+graphKit.hpp subnode.hpp
graphKit.hpp type.hpp
idealKit.cpp addnode.hpp
@@ -490,7 +502,10 @@ idealKit.cpp cfgnode.hpp
idealKit.cpp idealKit.hpp
idealKit.cpp runtime.hpp
+idealKit.hpp addnode.hpp
+idealKit.hpp cfgnode.hpp
idealKit.hpp connode.hpp
+idealKit.hpp divnode.hpp
idealKit.hpp mulnode.hpp
idealKit.hpp phaseX.hpp
idealKit.hpp subnode.hpp
@@ -586,6 +601,7 @@ locknode.hpp subnode.hpp
loopTransform.cpp addnode.hpp
loopTransform.cpp allocation.inline.hpp
+loopTransform.cpp callnode.hpp
loopTransform.cpp connode.hpp
loopTransform.cpp compileLog.hpp
loopTransform.cpp divnode.hpp
@@ -641,6 +657,7 @@ macro.cpp addnode.hpp
macro.cpp callnode.hpp
macro.cpp cfgnode.hpp
macro.cpp compile.hpp
+macro.cpp compileLog.hpp
macro.cpp connode.hpp
macro.cpp locknode.hpp
macro.cpp loopnode.hpp
@@ -993,6 +1010,21 @@ split_if.cpp callnode.hpp
split_if.cpp connode.hpp
split_if.cpp loopnode.hpp
+stringopts.hpp phaseX.hpp
+stringopts.hpp node.hpp
+
+stringopts.cpp addnode.hpp
+stringopts.cpp callnode.hpp
+stringopts.cpp callGenerator.hpp
+stringopts.cpp compileLog.hpp
+stringopts.cpp divnode.hpp
+stringopts.cpp idealKit.hpp
+stringopts.cpp graphKit.hpp
+stringopts.cpp rootnode.hpp
+stringopts.cpp runtime.hpp
+stringopts.cpp subnode.hpp
+stringopts.cpp stringopts.hpp
+
stubGenerator_.cpp runtime.hpp
stubRoutines.cpp runtime.hpp
diff --git a/hotspot/src/share/vm/includeDB_core b/hotspot/src/share/vm/includeDB_core
index e96a5e9be1a..ec068b6aeeb 100644
--- a/hotspot/src/share/vm/includeDB_core
+++ b/hotspot/src/share/vm/includeDB_core
@@ -1,5 +1,5 @@
//
-// Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+// Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -289,7 +289,7 @@ attachListener.hpp allocation.hpp
attachListener.hpp debug.hpp
attachListener.hpp ostream.hpp
-barrierSet.cpp barrierSet.hpp
+barrierSet.cpp barrierSet.inline.hpp
barrierSet.cpp collectedHeap.hpp
barrierSet.cpp universe.hpp
@@ -516,6 +516,11 @@ ciArrayKlassKlass.hpp ciKlassKlass.hpp
ciCallProfile.hpp ciClassList.hpp
+ciCallSite.cpp ciCallSite.hpp
+ciCallSite.cpp ciUtilities.hpp
+
+ciCallSite.hpp ciInstance.hpp
+
ciConstant.cpp allocation.hpp
ciConstant.cpp allocation.inline.hpp
ciConstant.cpp ciConstant.hpp
@@ -532,6 +537,12 @@ ciConstantPoolCache.cpp ciUtilities.hpp
ciConstantPoolCache.hpp growableArray.hpp
ciConstantPoolCache.hpp resourceArea.hpp
+ciCPCache.cpp cpCacheOop.hpp
+ciCPCache.cpp ciCPCache.hpp
+
+ciCPCache.hpp ciClassList.hpp
+ciCPCache.hpp ciObject.hpp
+
ciEnv.cpp allocation.inline.hpp
ciEnv.cpp ciConstant.hpp
ciEnv.cpp ciEnv.hpp
@@ -570,6 +581,7 @@ ciEnv.hpp debugInfoRec.hpp
ciEnv.hpp dependencies.hpp
ciEnv.hpp exceptionHandlerTable.hpp
ciEnv.hpp oopMap.hpp
+ciEnv.hpp systemDictionary.hpp
ciEnv.hpp thread.hpp
ciExceptionHandler.cpp ciExceptionHandler.hpp
@@ -592,6 +604,7 @@ ciField.cpp universe.inline.hpp
ciField.hpp ciClassList.hpp
ciField.hpp ciConstant.hpp
ciField.hpp ciFlags.hpp
+ciField.hpp ciInstance.hpp
ciFlags.cpp ciFlags.hpp
@@ -678,6 +691,7 @@ ciMethod.hpp ciFlags.hpp
ciMethod.hpp ciInstanceKlass.hpp
ciMethod.hpp ciObject.hpp
ciMethod.hpp ciSignature.hpp
+ciMethod.hpp methodHandles.hpp
ciMethod.hpp methodLiveness.hpp
ciMethodBlocks.cpp bytecode.hpp
@@ -709,6 +723,15 @@ ciMethodKlass.cpp ciUtilities.hpp
ciMethodKlass.hpp ciKlass.hpp
ciMethodKlass.hpp ciSymbol.hpp
+ciMethodHandle.cpp ciClassList.hpp
+ciMethodHandle.cpp ciInstance.hpp
+ciMethodHandle.cpp ciMethodHandle.hpp
+ciMethodHandle.cpp ciUtilities.hpp
+ciMethodHandle.cpp methodHandles.hpp
+ciMethodHandle.cpp methodHandleWalk.hpp
+
+ciMethodHandle.hpp methodHandles.hpp
+
ciNullObject.cpp ciNullObject.hpp
ciNullObject.hpp ciClassList.hpp
@@ -754,11 +777,14 @@ ciObject.hpp handles.hpp
ciObject.hpp jniHandles.hpp
ciObjectFactory.cpp allocation.inline.hpp
+ciObjectFactory.cpp ciCallSite.hpp
+ciObjectFactory.cpp ciCPCache.hpp
ciObjectFactory.cpp ciInstance.hpp
ciObjectFactory.cpp ciInstanceKlass.hpp
ciObjectFactory.cpp ciInstanceKlassKlass.hpp
ciObjectFactory.cpp ciMethod.hpp
ciObjectFactory.cpp ciMethodData.hpp
+ciObjectFactory.cpp ciMethodHandle.hpp
ciObjectFactory.cpp ciMethodKlass.hpp
ciObjectFactory.cpp ciNullObject.hpp
ciObjectFactory.cpp ciObjArray.hpp
@@ -792,6 +818,7 @@ ciSignature.hpp ciSymbol.hpp
ciSignature.hpp globalDefinitions.hpp
ciSignature.hpp growableArray.hpp
+ciStreams.cpp ciCallSite.hpp
ciStreams.cpp ciConstant.hpp
ciStreams.cpp ciField.hpp
ciStreams.cpp ciStreams.hpp
@@ -1291,6 +1318,7 @@ cpCacheOop.cpp jvmtiRedefineClassesTrace.hpp
cpCacheOop.cpp markSweep.inline.hpp
cpCacheOop.cpp objArrayOop.hpp
cpCacheOop.cpp oop.inline.hpp
+cpCacheOop.cpp rewriter.hpp
cpCacheOop.cpp universe.inline.hpp
cpCacheOop.hpp allocation.hpp
@@ -1497,6 +1525,7 @@ disassembler.cpp disassembler.hpp
disassembler.cpp fprofiler.hpp
disassembler.cpp handles.inline.hpp
disassembler.cpp hpi.hpp
+disassembler.cpp javaClasses.hpp
disassembler.cpp stubCodeGenerator.hpp
disassembler.cpp stubRoutines.hpp
@@ -2812,6 +2841,12 @@ methodDataOop.hpp oop.hpp
methodDataOop.hpp orderAccess.hpp
methodDataOop.hpp universe.hpp
+methodHandleWalk.hpp methodHandles.hpp
+
+methodHandleWalk.cpp methodHandleWalk.hpp
+methodHandleWalk.cpp oopFactory.hpp
+methodHandleWalk.cpp rewriter.hpp
+
methodHandles.hpp frame.inline.hpp
methodHandles.hpp globals.hpp
methodHandles.hpp interfaceSupport.hpp
@@ -3469,6 +3504,7 @@ reflection.cpp javaCalls.hpp
reflection.cpp javaClasses.hpp
reflection.cpp jvm.h
reflection.cpp linkResolver.hpp
+reflection.cpp methodHandleWalk.hpp
reflection.cpp objArrayKlass.hpp
reflection.cpp objArrayOop.hpp
reflection.cpp oopFactory.hpp
diff --git a/hotspot/src/share/vm/includeDB_gc_parallel b/hotspot/src/share/vm/includeDB_gc_parallel
index 5f089b7d7f1..2d1c45a0c9b 100644
--- a/hotspot/src/share/vm/includeDB_gc_parallel
+++ b/hotspot/src/share/vm/includeDB_gc_parallel
@@ -21,6 +21,8 @@
// have any questions.
//
+arguments.cpp compactibleFreeListSpace.hpp
+
assembler_.cpp g1SATBCardTableModRefBS.hpp
assembler_.cpp g1CollectedHeap.inline.hpp
assembler_.cpp heapRegion.hpp
diff --git a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp
index 78323ee2aee..8ab9e40d32b 100644
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -109,6 +109,8 @@ class AbstractInterpreter: AllStatic {
static void print_method_kind(MethodKind kind) PRODUCT_RETURN;
+ static bool can_be_compiled(methodHandle m);
+
// Runtime support
// length = invoke bytecode length (to advance to next bytecode)
diff --git a/hotspot/src/share/vm/interpreter/bytecode.cpp b/hotspot/src/share/vm/interpreter/bytecode.cpp
index 0cc8a728950..4e8d9053615 100644
--- a/hotspot/src/share/vm/interpreter/bytecode.cpp
+++ b/hotspot/src/share/vm/interpreter/bytecode.cpp
@@ -102,7 +102,9 @@ methodHandle Bytecode_invoke::static_target(TRAPS) {
KlassHandle resolved_klass;
constantPoolHandle constants(THREAD, _method->constants());
- if (adjusted_invoke_code() != Bytecodes::_invokeinterface) {
+ if (adjusted_invoke_code() == Bytecodes::_invokedynamic) {
+ LinkResolver::resolve_dynamic_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
+ } else if (adjusted_invoke_code() != Bytecodes::_invokeinterface) {
LinkResolver::resolve_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
} else {
LinkResolver::resolve_interface_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
diff --git a/hotspot/src/share/vm/interpreter/bytecode.hpp b/hotspot/src/share/vm/interpreter/bytecode.hpp
index 49e70e8c1f3..7069081028f 100644
--- a/hotspot/src/share/vm/interpreter/bytecode.hpp
+++ b/hotspot/src/share/vm/interpreter/bytecode.hpp
@@ -205,12 +205,14 @@ class Bytecode_invoke: public ResourceObj {
bool is_invokespecial() const { return adjusted_invoke_code() == Bytecodes::_invokespecial; }
bool is_invokedynamic() const { return adjusted_invoke_code() == Bytecodes::_invokedynamic; }
+ bool has_receiver() const { return !is_invokestatic() && !is_invokedynamic(); }
bool has_giant_index() const { return is_invokedynamic(); }
bool is_valid() const { return is_invokeinterface() ||
is_invokevirtual() ||
is_invokestatic() ||
- is_invokespecial(); }
+ is_invokespecial() ||
+ is_invokedynamic(); }
// Creation
inline friend Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci);
diff --git a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp
index ac46f4ab5e1..1f0adb487ed 100644
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp
@@ -281,7 +281,7 @@
#define DO_BACKEDGE_CHECKS(skip, branch_pc) \
if ((skip) <= 0) { \
- if (UseCompiler && UseLoopCounter) { \
+ if (UseLoopCounter) { \
bool do_OSR = UseOnStackReplacement; \
BACKEDGE_COUNT->increment(); \
if (do_OSR) do_OSR = BACKEDGE_COUNT->reached_InvocationLimit(); \
@@ -289,16 +289,12 @@
nmethod* osr_nmethod; \
OSR_REQUEST(osr_nmethod, branch_pc); \
if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \
- intptr_t* buf; \
- CALL_VM(buf=SharedRuntime::OSR_migration_begin(THREAD), handle_exception); \
+ intptr_t* buf = SharedRuntime::OSR_migration_begin(THREAD); \
istate->set_msg(do_osr); \
istate->set_osr_buf((address)buf); \
istate->set_osr_entry(osr_nmethod->osr_entry()); \
return; \
} \
- } else { \
- INCR_INVOCATION_COUNT; \
- SAFEPOINT; \
} \
} /* UseCompiler ... */ \
INCR_INVOCATION_COUNT; \
@@ -1281,12 +1277,7 @@ run:
jfloat f;
jdouble r;
f = STACK_FLOAT(-1);
-#ifdef IA64
- // IA64 gcc bug
- r = ( f == 0.0f ) ? (jdouble) f : (jdouble) f + ia64_double_zero;
-#else
r = (jdouble) f;
-#endif
MORE_STACK(-1); // POP
SET_STACK_DOUBLE(r, 1);
UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
diff --git a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp
index 79cee762daa..c7a874ac4df 100644
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp
@@ -270,6 +270,8 @@ void BytecodePrinter::print_constant(int i, outputStream* st) {
st->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name());
} else if (tag.is_unresolved_klass()) {
st->print_cr(" ", i);
+ } else if (tag.is_object()) {
+ st->print_cr(" " PTR_FORMAT, constants->object_at(i));
} else {
st->print_cr(" bad tag=%d at %d", tag.value(), i);
}
@@ -282,18 +284,21 @@ void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
constantPoolOop constants = method()->constants();
constantTag tag = constants->tag_at(i);
+ int nt_index = -1;
+
switch (tag.value()) {
case JVM_CONSTANT_InterfaceMethodref:
case JVM_CONSTANT_Methodref:
case JVM_CONSTANT_Fieldref:
+ case JVM_CONSTANT_NameAndType:
break;
default:
st->print_cr(" bad tag=%d at %d", tag.value(), i);
return;
}
- symbolOop name = constants->name_ref_at(orig_i);
- symbolOop signature = constants->signature_ref_at(orig_i);
+ symbolOop name = constants->uncached_name_ref_at(i);
+ symbolOop signature = constants->uncached_signature_ref_at(i);
st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string());
}
diff --git a/hotspot/src/share/vm/interpreter/bytecodes.cpp b/hotspot/src/share/vm/interpreter/bytecodes.cpp
index e55c9ff3851..cb2a7ecb234 100644
--- a/hotspot/src/share/vm/interpreter/bytecodes.cpp
+++ b/hotspot/src/share/vm/interpreter/bytecodes.cpp
@@ -357,7 +357,7 @@ void Bytecodes::initialize() {
def(_invokespecial , "invokespecial" , "bjj" , NULL , T_ILLEGAL, -1, true);
def(_invokestatic , "invokestatic" , "bjj" , NULL , T_ILLEGAL, 0, true);
def(_invokeinterface , "invokeinterface" , "bjj__", NULL , T_ILLEGAL, -1, true);
- def(_invokedynamic , "invokedynamic" , "bjjjj", NULL , T_ILLEGAL, -1, true );
+ def(_invokedynamic , "invokedynamic" , "bjjjj", NULL , T_ILLEGAL, 0, true );
def(_new , "new" , "bii" , NULL , T_OBJECT , 1, true );
def(_newarray , "newarray" , "bc" , NULL , T_OBJECT , 0, true );
def(_anewarray , "anewarray" , "bii" , NULL , T_OBJECT , 0, true );
diff --git a/hotspot/src/share/vm/interpreter/interpreter.cpp b/hotspot/src/share/vm/interpreter/interpreter.cpp
index b4f1007bc6c..484a47f7d42 100644
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp
@@ -314,6 +314,20 @@ address AbstractInterpreter::deopt_continue_after_entry(methodOop method, addres
break;
}
+ case Bytecodes::_invokedynamic: {
+ Thread *thread = Thread::current();
+ ResourceMark rm(thread);
+ methodHandle mh(thread, method);
+ type = Bytecode_invoke_at(mh, bci)->result_type(thread);
+ // since the cache entry might not be initialized:
+ // (NOT needed for the old calling convension)
+ if (!is_top_frame) {
+ int index = Bytes::get_native_u4(bcp+1);
+ method->constants()->cache()->secondary_entry_at(index)->set_parameter_size(callee_parameters);
+ }
+ break;
+ }
+
case Bytecodes::_ldc :
type = constant_pool_type( method, *(bcp+1) );
break;
diff --git a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
index c8e19944de7..bd9fc8d7c98 100644
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
@@ -353,7 +353,7 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea
assert(h_exception.not_null(), "NULL exceptions should be handled by athrow");
assert(h_exception->is_oop(), "just checking");
// Check that exception is a subclass of Throwable, otherwise we have a VerifyError
- if (!(h_exception->is_a(SystemDictionary::throwable_klass()))) {
+ if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) {
if (ExitVMOnVerifyError) vm_exit(-1);
ShouldNotReachHere();
}
@@ -585,7 +585,7 @@ IRT_ENTRY(void, InterpreterRuntime::new_illegal_monitor_state_exception(JavaThre
Handle exception(thread, thread->vm_result());
assert(exception() != NULL, "vm result should be set");
thread->set_vm_result(NULL); // clear vm result before continuing (may cause memory leaks and assert failures)
- if (!exception->is_a(SystemDictionary::threaddeath_klass())) {
+ if (!exception->is_a(SystemDictionary::ThreadDeath_klass())) {
exception = get_preinitialized_exception(
SystemDictionary::IllegalMonitorStateException_klass(),
CATCH);
@@ -660,7 +660,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string());
}
if (info.resolved_method()->method_holder() ==
- SystemDictionary::object_klass()) {
+ SystemDictionary::Object_klass()) {
// NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
// (see also cpCacheOop.cpp for details)
methodHandle rm = info.resolved_method();
@@ -681,7 +681,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
IRT_END
-// First time execution: Resolve symbols, create a permanent CallSiteImpl object.
+// First time execution: Resolve symbols, create a permanent CallSite object.
IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
ResourceMark rm(thread);
@@ -708,21 +708,16 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
constantPoolHandle pool(thread, caller_method->constants());
pool->set_invokedynamic(); // mark header to flag active call sites
- int raw_index = four_byte_index(thread);
- assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "invokedynamic indexes marked specially");
-
- // there are two CPC entries that are of interest:
- int site_index = constantPoolCacheOopDesc::decode_secondary_index(raw_index);
- int main_index = pool->cache()->entry_at(site_index)->main_entry_index();
- // and there is one CP entry, a NameAndType:
- int nt_index = pool->map_instruction_operand_to_index(raw_index);
+ int site_index = four_byte_index(thread);
+ // there is a second CPC entries that is of interest; it caches signature info:
+ int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
// first resolve the signature to a MH.invoke methodOop
if (!pool->cache()->entry_at(main_index)->is_resolved(bytecode)) {
JvmtiHideSingleStepping jhss(thread);
CallInfo info;
LinkResolver::resolve_invoke(info, Handle(), pool,
- raw_index, bytecode, CHECK);
+ site_index, bytecode, CHECK);
// The main entry corresponds to a JVM_CONSTANT_NameAndType, and serves
// as a common reference point for all invokedynamic call sites with
// that exact call descriptor. We will link it in the CP cache exactly
@@ -741,7 +736,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
assert(mh_invdyn.not_null() && mh_invdyn->is_method() && mh_invdyn->is_method_handle_invoke(),
"correct result from LinkResolver::resolve_invokedynamic");
- symbolHandle call_site_name(THREAD, pool->nt_name_ref_at(nt_index));
+ symbolHandle call_site_name(THREAD, pool->name_ref_at(site_index));
Handle call_site
= SystemDictionary::make_dynamic_call_site(caller_method->method_holder(),
caller_method->method_idnum(),
@@ -753,61 +748,11 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
// In the secondary entry, the f1 field is the call site, and the f2 (index)
// field is some data about the invoke site.
int extra_data = 0;
- pool->cache()->entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
+ pool->cache()->secondary_entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
}
IRT_END
-// Called on first time execution, and also whenever the CallSite.target is null.
-// FIXME: Do more of this in Java code.
-IRT_ENTRY(void, InterpreterRuntime::bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site)) {
- methodHandle mh_invdyn(thread, (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site));
- Handle mh_type(thread, mh_invdyn->method_handle_type());
- objArrayHandle mh_ptypes(thread, java_dyn_MethodType::ptypes(mh_type()));
-
- // squish the arguments down to a single array
- int nargs = mh_ptypes->length();
- objArrayHandle arg_array;
- {
- objArrayOop aaoop = oopFactory::new_objArray(SystemDictionary::object_klass(), nargs, CHECK);
- arg_array = objArrayHandle(thread, aaoop);
- }
- frame fr = thread->last_frame();
- assert(fr.interpreter_frame_bcp() != NULL, "sanity");
- int tos_offset = 0;
- for (int i = nargs; --i >= 0; ) {
- intptr_t* slot_addr = fr.interpreter_frame_tos_at(tos_offset++);
- oop ptype = mh_ptypes->obj_at(i);
- oop arg = NULL;
- if (!java_lang_Class::is_primitive(ptype)) {
- arg = *(oop*) slot_addr;
- } else {
- BasicType bt = java_lang_Class::primitive_type(ptype);
- assert(frame::interpreter_frame_expression_stack_direction() < 0, "else reconsider this code");
- jvalue value;
- Interpreter::get_jvalue_in_slot(slot_addr, bt, &value);
- tos_offset += type2size[bt]-1;
- arg = java_lang_boxing_object::create(bt, &value, CHECK);
- // FIXME: These boxing objects are not canonicalized under
- // the Java autoboxing rules. They should be...
- // The best approach would be to push the arglist creation into Java.
- // The JVM should use a lower-level interface to communicate argument lists.
- }
- arg_array->obj_at_put(i, arg);
- }
-
- // now find the bootstrap method
- oop bootstrap_mh_oop = instanceKlass::cast(fr.interpreter_frame_method()->method_holder())->bootstrap_method();
- assert(bootstrap_mh_oop != NULL, "resolve_invokedynamic ensures a BSM");
-
- // return the bootstrap method and argument array via vm_result/_2
- thread->set_vm_result(bootstrap_mh_oop);
- thread->set_vm_result_2(arg_array());
-}
-IRT_END
-
-
-
//------------------------------------------------------------------------------------------------------------------------
// Miscellaneous
@@ -1305,7 +1250,7 @@ IRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* threa
methodHandle mh(thread, fr.interpreter_frame_method());
Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci);
ArgumentSizeComputer asc(invoke->signature());
- int size_of_arguments = (asc.size() + (invoke->is_invokestatic() ? 0 : 1)); // receiver
+ int size_of_arguments = (asc.size() + (invoke->has_receiver() ? 1 : 0)); // receiver
Copy::conjoint_bytes(src_address, dest_address,
size_of_arguments * Interpreter::stackElementSize());
IRT_END
diff --git a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
index 76b9e9c2191..b0a616308e4 100644
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp
@@ -91,7 +91,6 @@ class InterpreterRuntime: AllStatic {
// Calls
static void resolve_invoke (JavaThread* thread, Bytecodes::Code bytecode);
static void resolve_invokedynamic(JavaThread* thread);
- static void bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site);
// Breakpoints
static void _breakpoint(JavaThread* thread, methodOopDesc* method, address bcp);
diff --git a/hotspot/src/share/vm/interpreter/linkResolver.cpp b/hotspot/src/share/vm/interpreter/linkResolver.cpp
index c9a2c13c5b1..4c5fd690393 100644
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp
@@ -55,7 +55,7 @@ void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_kl
// we should pick the vtable index from the resolved method.
// Other than that case, there is no valid vtable index to specify.
int vtable_index = methodOopDesc::invalid_vtable_index;
- if (resolved_method->method_holder() == SystemDictionary::object_klass()) {
+ if (resolved_method->method_holder() == SystemDictionary::Object_klass()) {
assert(resolved_method->vtable_index() == selected_method->vtable_index(), "sanity check");
vtable_index = resolved_method->vtable_index();
}
@@ -75,6 +75,8 @@ void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass
_selected_method = selected_method;
_vtable_index = vtable_index;
if (CompilationPolicy::mustBeCompiled(selected_method)) {
+ // This path is unusual, mostly used by the '-Xcomp' stress test mode.
+
// Note: with several active threads, the mustBeCompiled may be true
// while canBeCompiled is false; remove assert
// assert(CompilationPolicy::canBeCompiled(selected_method), "cannot compile");
@@ -82,6 +84,16 @@ void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass
// don't force compilation, resolve was on behalf of compiler
return;
}
+ if (instanceKlass::cast(selected_method->method_holder())->is_not_initialized()) {
+ // 'is_not_initialized' means not only '!is_initialized', but also that
+ // initialization has not been started yet ('!being_initialized')
+ // Do not force compilation of methods in uninitialized classes.
+ // Note that doing this would throw an assert later,
+ // in CompileBroker::compile_method.
+ // We sometimes use the link resolver to do reflective lookups
+ // even before classes are initialized.
+ return;
+ }
CompileBroker::compile_method(selected_method, InvocationEntryBci,
methodHandle(), 0, "mustBeCompiled", CHECK);
}
@@ -181,7 +193,7 @@ void LinkResolver::check_method_accessability(KlassHandle ref_klass,
// We'll check for the method name first, as that's most likely
// to be false (so we'll short-circuit out of these tests).
if (sel_method->name() == vmSymbols::clone_name() &&
- sel_klass() == SystemDictionary::object_klass() &&
+ sel_klass() == SystemDictionary::Object_klass() &&
resolved_klass->oop_is_array()) {
// We need to change "protected" to "public".
assert(flags.is_protected(), "clone not protected?");
@@ -223,6 +235,18 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle& re
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
}
+void LinkResolver::resolve_dynamic_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) {
+ // The class is java.dyn.MethodHandle
+ resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
+
+ symbolHandle method_name = vmSymbolHandles::invoke_name();
+
+ symbolHandle method_signature(THREAD, pool->signature_ref_at(index));
+ KlassHandle current_klass (THREAD, pool->pool_holder());
+
+ resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+}
+
void LinkResolver::resolve_interface_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) {
// resolve klass
@@ -1015,11 +1039,8 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle po
// This guy is reached from InterpreterRuntime::resolve_invokedynamic.
- assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "must be secondary index");
- int nt_index = pool->map_instruction_operand_to_index(raw_index);
-
// At this point, we only need the signature, and can ignore the name.
- symbolHandle method_signature(THREAD, pool->nt_signature_ref_at(nt_index));
+ symbolHandle method_signature(THREAD, pool->signature_ref_at(raw_index)); // raw_index works directly
symbolHandle method_name = vmSymbolHandles::invoke_name();
KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
diff --git a/hotspot/src/share/vm/interpreter/linkResolver.hpp b/hotspot/src/share/vm/interpreter/linkResolver.hpp
index a95873b4c90..efc1b53f5f3 100644
--- a/hotspot/src/share/vm/interpreter/linkResolver.hpp
+++ b/hotspot/src/share/vm/interpreter/linkResolver.hpp
@@ -133,6 +133,7 @@ class LinkResolver: AllStatic {
// static resolving for all calls except interface calls
static void resolve_method (methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS);
+ static void resolve_dynamic_method (methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS);
static void resolve_interface_method(methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS);
// runtime/static resolving for fields
diff --git a/hotspot/src/share/vm/interpreter/rewriter.cpp b/hotspot/src/share/vm/interpreter/rewriter.cpp
index c70c0c0356f..8ff99109349 100644
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp
@@ -48,16 +48,6 @@ void Rewriter::compute_index_maps() {
}
-int Rewriter::add_extra_cp_cache_entry(int main_entry) {
- // Hack: We put it on the map as an encoded value.
- // The only place that consumes this is ConstantPoolCacheEntry::set_initial_state
- int encoded = constantPoolCacheOopDesc::encode_secondary_index(main_entry);
- int plain_secondary_index = _cp_cache_map.append(encoded);
- return constantPoolCacheOopDesc::encode_secondary_index(plain_secondary_index);
-}
-
-
-
// Creates a constant pool cache given a CPC map
// This creates the constant pool cache initially in a state
// that is unsafe for concurrent GC processing but sets it to
@@ -127,7 +117,7 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) {
assert(p[-1] == Bytecodes::_invokedynamic, "");
int cp_index = Bytes::get_Java_u2(p);
int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily
- int cpc2 = add_extra_cp_cache_entry(cpc);
+ int cpc2 = add_secondary_cp_cache_entry(cpc);
// Replace the trailing four bytes with a CPC index for the dynamic
// call site. Unlike other CPC entries, there is one per bytecode,
@@ -137,7 +127,7 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) {
// all these entries. That is the main reason invokedynamic
// must have a five-byte instruction format. (Of course, other JVM
// implementations can use the bytes for other purposes.)
- Bytes::put_native_u4(p, cpc2);
+ Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
// Note: We use native_u4 format exclusively for 4-byte indexes.
}
@@ -257,15 +247,22 @@ methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
ResourceMark rm(THREAD);
- Rewriter rw(klass, CHECK);
+ Rewriter rw(klass, klass->constants(), klass->methods(), CHECK);
// (That's all, folks.)
}
-Rewriter::Rewriter(instanceKlassHandle klass, TRAPS)
+
+void Rewriter::rewrite(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS) {
+ ResourceMark rm(THREAD);
+ Rewriter rw(klass, cpool, methods, CHECK);
+ // (That's all, folks.)
+}
+
+
+Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS)
: _klass(klass),
- // gather starting points
- _pool( THREAD, klass->constants()),
- _methods(THREAD, klass->methods())
+ _pool(cpool),
+ _methods(methods)
{
assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
diff --git a/hotspot/src/share/vm/interpreter/rewriter.hpp b/hotspot/src/share/vm/interpreter/rewriter.hpp
index 2546b57ef37..39325e743aa 100644
--- a/hotspot/src/share/vm/interpreter/rewriter.hpp
+++ b/hotspot/src/share/vm/interpreter/rewriter.hpp
@@ -43,16 +43,21 @@ class Rewriter: public StackObj {
bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; }
int maybe_add_cp_cache_entry(int i) { return has_cp_cache(i) ? _cp_map[i] : add_cp_cache_entry(i); }
int add_cp_cache_entry(int cp_index) {
+ assert((cp_index & _secondary_entry_tag) == 0, "bad tag");
assert(_cp_map[cp_index] == -1, "not twice on same cp_index");
int cache_index = _cp_cache_map.append(cp_index);
_cp_map.at_put(cp_index, cache_index);
assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
return cache_index;
}
- int add_extra_cp_cache_entry(int main_entry);
+ int add_secondary_cp_cache_entry(int main_cpc_entry) {
+ assert(main_cpc_entry < _cp_cache_map.length(), "must be earlier CP cache entry");
+ int cache_index = _cp_cache_map.append(main_cpc_entry | _secondary_entry_tag);
+ return cache_index;
+ }
// All the work goes in here:
- Rewriter(instanceKlassHandle klass, TRAPS);
+ Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS);
void compute_index_maps();
void make_constant_pool_cache(TRAPS);
@@ -65,4 +70,9 @@ class Rewriter: public StackObj {
public:
// Driver routine:
static void rewrite(instanceKlassHandle klass, TRAPS);
+ static void rewrite(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS);
+
+ enum {
+ _secondary_entry_tag = nth_bit(30)
+ };
};
diff --git a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp
index 9f12f44e3a5..e617623506b 100644
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp
@@ -178,14 +178,12 @@ EntryPoint TemplateInterpreter::_trace_code;
#endif // !PRODUCT
EntryPoint TemplateInterpreter::_return_entry[TemplateInterpreter::number_of_return_entries];
EntryPoint TemplateInterpreter::_earlyret_entry;
-EntryPoint TemplateInterpreter::_return_unbox_entry;
EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deopt_entries ];
EntryPoint TemplateInterpreter::_continuation_entry;
EntryPoint TemplateInterpreter::_safept_entry;
address TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
address TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
-address TemplateInterpreter::_return_5_unbox_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
DispatchTable TemplateInterpreter::_active_table;
DispatchTable TemplateInterpreter::_normal_table;
@@ -253,22 +251,6 @@ void TemplateInterpreterGenerator::generate_all() {
}
}
- if (EnableInvokeDynamic) {
- CodeletMark cm(_masm, "unboxing return entry points");
- Interpreter::_return_unbox_entry =
- EntryPoint(
- generate_return_unbox_entry_for(btos, 5),
- generate_return_unbox_entry_for(ctos, 5),
- generate_return_unbox_entry_for(stos, 5),
- generate_return_unbox_entry_for(atos, 5), // cast conversion
- generate_return_unbox_entry_for(itos, 5),
- generate_return_unbox_entry_for(ltos, 5),
- generate_return_unbox_entry_for(ftos, 5),
- generate_return_unbox_entry_for(dtos, 5),
- Interpreter::_return_entry[5].entry(vtos) // no unboxing for void
- );
- }
-
{ CodeletMark cm(_masm, "earlyret entry points");
Interpreter::_earlyret_entry =
EntryPoint(
@@ -319,8 +301,6 @@ void TemplateInterpreterGenerator::generate_all() {
int index = Interpreter::TosState_as_index(states[j]);
Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3);
Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5);
- if (EnableInvokeDynamic)
- Interpreter::_return_5_unbox_addrs_by_index[index] = Interpreter::return_unbox_entry(states[j], 5);
}
{ CodeletMark cm(_masm, "continuation entry points");
@@ -485,9 +465,11 @@ void TemplateInterpreterGenerator::set_wide_entry_point(Template* t, address& we
void TemplateInterpreterGenerator::set_short_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
assert(t->is_valid(), "template must exist");
switch (t->tos_in()) {
- case btos: vep = __ pc(); __ pop(btos); bep = __ pc(); generate_and_dispatch(t); break;
- case ctos: vep = __ pc(); __ pop(ctos); sep = __ pc(); generate_and_dispatch(t); break;
- case stos: vep = __ pc(); __ pop(stos); sep = __ pc(); generate_and_dispatch(t); break;
+ case btos:
+ case ctos:
+ case stos:
+ ShouldNotReachHere(); // btos/ctos/stos should use itos.
+ break;
case atos: vep = __ pc(); __ pop(atos); aep = __ pc(); generate_and_dispatch(t); break;
case itos: vep = __ pc(); __ pop(itos); iep = __ pc(); generate_and_dispatch(t); break;
case ltos: vep = __ pc(); __ pop(ltos); lep = __ pc(); generate_and_dispatch(t); break;
@@ -547,18 +529,6 @@ address TemplateInterpreter::return_entry(TosState state, int length) {
}
-address TemplateInterpreter::return_unbox_entry(TosState state, int length) {
- assert(EnableInvokeDynamic, "");
- if (state == vtos) {
- // no unboxing to do, actually
- return return_entry(state, length);
- } else {
- assert(length == 5, "unboxing entries generated for invokedynamic only");
- return _return_unbox_entry.entry(state);
- }
-}
-
-
address TemplateInterpreter::deopt_entry(TosState state, int length) {
guarantee(0 <= length && length < Interpreter::number_of_deopt_entries, "illegal length");
return _deopt_entry[length].entry(state);
diff --git a/hotspot/src/share/vm/interpreter/templateInterpreter.hpp b/hotspot/src/share/vm/interpreter/templateInterpreter.hpp
index 7de665b882f..b3eed1c3a4e 100644
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.hpp
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.hpp
@@ -110,14 +110,12 @@ class TemplateInterpreter: public AbstractInterpreter {
#endif // !PRODUCT
static EntryPoint _return_entry[number_of_return_entries]; // entry points to return to from a call
static EntryPoint _earlyret_entry; // entry point to return early from a call
- static EntryPoint _return_unbox_entry; // entry point to unbox a return value from a call
static EntryPoint _deopt_entry[number_of_deopt_entries]; // entry points to return to from a deoptimization
static EntryPoint _continuation_entry;
static EntryPoint _safept_entry;
static address _return_3_addrs_by_index[number_of_return_addrs]; // for invokevirtual return entries
static address _return_5_addrs_by_index[number_of_return_addrs]; // for invokeinterface return entries
- static address _return_5_unbox_addrs_by_index[number_of_return_addrs]; // for invokedynamic bootstrap methods
static DispatchTable _active_table; // the active dispatch table (used by the interpreter for dispatch)
static DispatchTable _normal_table; // the normal dispatch table (used to set the active table in normal mode)
@@ -159,12 +157,10 @@ class TemplateInterpreter: public AbstractInterpreter {
// Support for invokes
static address* return_3_addrs_by_index_table() { return _return_3_addrs_by_index; }
static address* return_5_addrs_by_index_table() { return _return_5_addrs_by_index; }
- static address* return_5_unbox_addrs_by_index_table() { return _return_5_unbox_addrs_by_index; }
static int TosState_as_index(TosState state); // computes index into return_3_entry_by_index table
static address return_entry (TosState state, int length);
static address deopt_entry (TosState state, int length);
- static address return_unbox_entry(TosState state, int length);
// Safepoint support
static void notice_safepoints(); // stops the thread when reaching a safepoint
diff --git a/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp b/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp
index 9d5b694049e..676e762725d 100644
--- a/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp
+++ b/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp
@@ -51,10 +51,7 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
address generate_WrongMethodType_handler();
address generate_ArrayIndexOutOfBounds_handler(const char* name);
address generate_continuation_for(TosState state);
- address generate_return_entry_for(TosState state, int step, bool unbox = false);
- address generate_return_unbox_entry_for(TosState state, int step) {
- return generate_return_entry_for(state, step, true);
- }
+ address generate_return_entry_for(TosState state, int step);
address generate_earlyret_entry_for(TosState state);
address generate_deopt_entry_for(TosState state, int step);
address generate_safept_entry_for(TosState state, address runtime_entry);
diff --git a/hotspot/src/share/vm/memory/barrierSet.cpp b/hotspot/src/share/vm/memory/barrierSet.cpp
index 07ff7a0fc21..8cd80b7c437 100644
--- a/hotspot/src/share/vm/memory/barrierSet.cpp
+++ b/hotspot/src/share/vm/memory/barrierSet.cpp
@@ -41,11 +41,6 @@ void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) {
// count is number of array elements being written
void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
- assert(count <= (size_t)max_intx, "count too large");
- HeapWord* end = start + objArrayOopDesc::array_size((int)count);
-#if 0
- warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
- start, count, start, end);
-#endif
- Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, end));
+ // simply delegate to instance method
+ Universe::heap()->barrier_set()->write_ref_array(start, count);
}
diff --git a/hotspot/src/share/vm/memory/barrierSet.hpp b/hotspot/src/share/vm/memory/barrierSet.hpp
index 0fc6a006140..c624f2506e2 100644
--- a/hotspot/src/share/vm/memory/barrierSet.hpp
+++ b/hotspot/src/share/vm/memory/barrierSet.hpp
@@ -121,17 +121,20 @@ public:
virtual void read_ref_array(MemRegion mr) = 0;
virtual void read_prim_array(MemRegion mr) = 0;
+ // Below length is the # array elements being written
virtual void write_ref_array_pre( oop* dst, int length) {}
virtual void write_ref_array_pre(narrowOop* dst, int length) {}
+ // Below MemRegion mr is expected to be HeapWord-aligned
inline void write_ref_array(MemRegion mr);
+ // Below count is the # array elements being written, starting
+ // at the address "start", which may not necessarily be HeapWord-aligned
+ inline void write_ref_array(HeapWord* start, size_t count);
- // Static versions, suitable for calling from generated code.
+ // Static versions, suitable for calling from generated code;
+ // count is # array elements being written, starting with "start",
+ // which may not necessarily be HeapWord-aligned.
static void static_write_ref_array_pre(HeapWord* start, size_t count);
static void static_write_ref_array_post(HeapWord* start, size_t count);
- // Narrow oop versions of the above; count is # of array elements being written,
- // starting with "start", which is HeapWord-aligned.
- static void static_write_ref_array_pre_narrow(HeapWord* start, size_t count);
- static void static_write_ref_array_post_narrow(HeapWord* start, size_t count);
protected:
virtual void write_ref_array_work(MemRegion mr) = 0;
diff --git a/hotspot/src/share/vm/memory/barrierSet.inline.hpp b/hotspot/src/share/vm/memory/barrierSet.inline.hpp
index edcb551bdcd..ddc398348ca 100644
--- a/hotspot/src/share/vm/memory/barrierSet.inline.hpp
+++ b/hotspot/src/share/vm/memory/barrierSet.inline.hpp
@@ -43,6 +43,8 @@ void BarrierSet::write_ref_field(void* field, oop new_val) {
}
void BarrierSet::write_ref_array(MemRegion mr) {
+ assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start");
+ assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
if (kind() == CardTableModRef) {
((CardTableModRefBS*)this)->inline_write_ref_array(mr);
} else {
@@ -50,6 +52,34 @@ void BarrierSet::write_ref_array(MemRegion mr) {
}
}
+// count is number of array elements being written
+void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
+ assert(count <= (size_t)max_intx, "count too large");
+ HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
+ // In the case of compressed oops, start and end may potentially be misaligned;
+ // so we need to conservatively align the first downward (this is not
+ // strictly necessary for current uses, but a case of good hygiene and,
+ // if you will, aesthetics) and the second upward (this is essential for
+ // current uses) to a HeapWord boundary, so we mark all cards overlapping
+ // this write. In the event that this evolves in the future to calling a
+ // logging barrier of narrow oop granularity, like the pre-barrier for G1
+ // (mentioned here merely by way of example), we will need to change this
+ // interface, much like the pre-barrier one above, so it is "exactly precise"
+ // (if i may be allowed the adverbial redundancy for emphasis) and does not
+ // include narrow oop slots not included in the original write interval.
+ HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
+ HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
+ // If compressed oops were not being used, these should already be aligned
+ assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
+ "Expected heap word alignment of start and end");
+#if 0
+ warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
+ start, count, aligned_start, aligned_end);
+#endif
+ write_ref_array_work(MemRegion(aligned_start, aligned_end));
+}
+
+
void BarrierSet::write_region(MemRegion mr) {
if (kind() == CardTableModRef) {
((CardTableModRefBS*)this)->inline_write_region(mr);
diff --git a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp
index c036a355514..2deb7da83f0 100644
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp
@@ -511,6 +511,8 @@ void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
}
void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
+ assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+ assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
jbyte* cur = byte_for(mr.start());
jbyte* last = byte_after(mr.last());
while (cur < last) {
@@ -520,6 +522,8 @@ void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
}
void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
+ assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+ assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) dirty_MemRegion(mri);
diff --git a/hotspot/src/share/vm/memory/classify.cpp b/hotspot/src/share/vm/memory/classify.cpp
index 0cb6a572cb3..bc4fecd74ff 100644
--- a/hotspot/src/share/vm/memory/classify.cpp
+++ b/hotspot/src/share/vm/memory/classify.cpp
@@ -49,7 +49,7 @@ object_type ClassifyObjectClosure::classify_object(oop obj, bool count) {
Klass* k = obj->blueprint();
- if (k->as_klassOop() == SystemDictionary::object_klass()) {
+ if (k->as_klassOop() == SystemDictionary::Object_klass()) {
tty->print_cr("Found the class!");
}
diff --git a/hotspot/src/share/vm/memory/collectorPolicy.cpp b/hotspot/src/share/vm/memory/collectorPolicy.cpp
index 3f885d9ecba..ebb886fbad8 100644
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp
@@ -55,7 +55,7 @@ void CollectorPolicy::initialize_flags() {
void CollectorPolicy::initialize_size_info() {
// User inputs from -mx and ms are aligned
- set_initial_heap_byte_size(Arguments::initial_heap_size());
+ set_initial_heap_byte_size(InitialHeapSize);
if (initial_heap_byte_size() == 0) {
set_initial_heap_byte_size(NewSize + OldSize);
}
diff --git a/hotspot/src/share/vm/memory/defNewGeneration.cpp b/hotspot/src/share/vm/memory/defNewGeneration.cpp
index 7db8b9dea97..875cf00817b 100644
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp
@@ -609,7 +609,7 @@ void DefNewGeneration::collect(bool full,
remove_forwarding_pointers();
if (PrintGCDetails) {
- gclog_or_tty->print(" (promotion failed)");
+ gclog_or_tty->print(" (promotion failed) ");
}
// Add to-space to the list of space to compact
// when a promotion failure has occurred. In that
@@ -620,6 +620,9 @@ void DefNewGeneration::collect(bool full,
from()->set_next_compaction_space(to());
gch->set_incremental_collection_will_fail();
+ // Inform the next generation that a promotion failure occurred.
+ _next_gen->promotion_failure_occurred();
+
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
}
@@ -679,6 +682,11 @@ void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
void DefNewGeneration::handle_promotion_failure(oop old) {
preserve_mark_if_necessary(old, old->mark());
+ if (!_promotion_failed && PrintPromotionFailure) {
+ gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
+ old->size());
+ }
+
// forward to self
old->forward_to(old);
_promotion_failed = true;
diff --git a/hotspot/src/share/vm/memory/dump.cpp b/hotspot/src/share/vm/memory/dump.cpp
index 658e8347665..09ab997de10 100644
--- a/hotspot/src/share/vm/memory/dump.cpp
+++ b/hotspot/src/share/vm/memory/dump.cpp
@@ -63,7 +63,7 @@ public:
void do_oop(oop* p) {
if (p != NULL) {
oop obj = *p;
- if (obj->klass() == SystemDictionary::string_klass()) {
+ if (obj->klass() == SystemDictionary::String_klass()) {
int hash;
typeArrayOop value = java_lang_String::value(obj);
@@ -625,11 +625,11 @@ public:
if (obj->is_klass() || obj->is_instance()) {
if (obj->is_klass() ||
- obj->is_a(SystemDictionary::class_klass()) ||
- obj->is_a(SystemDictionary::throwable_klass())) {
+ obj->is_a(SystemDictionary::Class_klass()) ||
+ obj->is_a(SystemDictionary::Throwable_klass())) {
// Do nothing
}
- else if (obj->is_a(SystemDictionary::string_klass())) {
+ else if (obj->is_a(SystemDictionary::String_klass())) {
// immutable objects.
} else {
// someone added an object we hadn't accounted for.
diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.cpp b/hotspot/src/share/vm/memory/genCollectedHeap.cpp
index 5bff1597062..f85fe142156 100644
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp
@@ -51,6 +51,8 @@ GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
}
jint GenCollectedHeap::initialize() {
+ CollectedHeap::pre_initialize();
+
int i;
_n_gens = gen_policy()->number_of_generations();
@@ -129,6 +131,7 @@ jint GenCollectedHeap::initialize() {
_rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
set_barrier_set(rem_set()->bs());
+
_gch = this;
for (i = 0; i < _n_gens; i++) {
@@ -925,6 +928,8 @@ bool GenCollectedHeap::is_in(const void* p) const {
guarantee(VerifyBeforeGC ||
VerifyDuringGC ||
VerifyBeforeExit ||
+ PrintAssembly ||
+ tty->count() != 0 || // already printing
VerifyAfterGC, "too expensive");
#endif
// This might be sped up with a cache of the last generation that
diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.hpp b/hotspot/src/share/vm/memory/genCollectedHeap.hpp
index 9004e0d842c..8295d078bfa 100644
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp
@@ -260,6 +260,10 @@ public:
return true;
}
+ virtual bool card_mark_must_follow_store() const {
+ return UseConcMarkSweepGC;
+ }
+
// We don't need barriers for stores to objects in the
// young gen and, a fortiori, for initializing stores to
// objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
diff --git a/hotspot/src/share/vm/memory/generation.hpp b/hotspot/src/share/vm/memory/generation.hpp
index 985e9db1028..e39be059506 100644
--- a/hotspot/src/share/vm/memory/generation.hpp
+++ b/hotspot/src/share/vm/memory/generation.hpp
@@ -181,6 +181,12 @@ class Generation: public CHeapObj {
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
bool younger_handles_promotion_failure) const;
+ // For a non-young generation, this interface can be used to inform a
+ // generation that a promotion attempt into that generation failed.
+ // Typically used to enable diagnostic output for post-mortem analysis,
+ // but other uses of the interface are not ruled out.
+ virtual void promotion_failure_occurred() { /* does nothing */ }
+
// Return an estimate of the maximum allocation that could be performed
// in the generation without triggering any collection or expansion
// activity. It is "unsafe" because no locks are taken; the result
diff --git a/hotspot/src/share/vm/memory/heap.cpp b/hotspot/src/share/vm/memory/heap.cpp
index 4f638fda468..2d355ca6800 100644
--- a/hotspot/src/share/vm/memory/heap.cpp
+++ b/hotspot/src/share/vm/memory/heap.cpp
@@ -464,7 +464,7 @@ void CodeHeap::verify() {
}
// Verify that freelist contains the right amount of free space
- guarantee(len == _free_segments, "wrong freelist");
+ // guarantee(len == _free_segments, "wrong freelist");
// Verify that the number of free blocks is not out of hand.
static int free_block_threshold = 10000;
@@ -479,5 +479,5 @@ void CodeHeap::verify() {
for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) {
if (h->free()) count--;
}
- guarantee(count == 0, "missing free blocks");
+ // guarantee(count == 0, "missing free blocks");
}
diff --git a/hotspot/src/share/vm/memory/referenceProcessor.cpp b/hotspot/src/share/vm/memory/referenceProcessor.cpp
index 134181c45db..ac1b53c30b6 100644
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp
@@ -71,7 +71,7 @@ void ReferenceProcessor::init_statics() {
assert(_sentinelRef == NULL, "should be initialized precisely once");
EXCEPTION_MARK;
_sentinelRef = instanceKlass::cast(
- SystemDictionary::reference_klass())->
+ SystemDictionary::Reference_klass())->
allocate_permanent_instance(THREAD);
// Initialize the master soft ref clock.
@@ -299,8 +299,8 @@ void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
template
-static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
- AbstractRefProcTaskExecutor* task_executor) {
+bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
+ AbstractRefProcTaskExecutor* task_executor) {
// Remember old value of pending references list
T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
diff --git a/hotspot/src/share/vm/memory/sharedHeap.hpp b/hotspot/src/share/vm/memory/sharedHeap.hpp
index 0bf863fb133..6094387241b 100644
--- a/hotspot/src/share/vm/memory/sharedHeap.hpp
+++ b/hotspot/src/share/vm/memory/sharedHeap.hpp
@@ -224,10 +224,6 @@ public:
CodeBlobClosure* code_roots,
OopClosure* non_root_closure);
-
- // Like CollectedHeap::collect, but assume that the caller holds the Heap_lock.
- virtual void collect_locked(GCCause::Cause cause) = 0;
-
// The functions below are helper functions that a subclass of
// "SharedHeap" can use in the implementation of its virtual
// functions.
diff --git a/hotspot/src/share/vm/memory/space.cpp b/hotspot/src/share/vm/memory/space.cpp
index 53c3c2600e1..38e58ba6886 100644
--- a/hotspot/src/share/vm/memory/space.cpp
+++ b/hotspot/src/share/vm/memory/space.cpp
@@ -876,7 +876,7 @@ void ContiguousSpace::allocate_temporary_filler(int factor) {
instanceOop obj = (instanceOop) allocate(size);
obj->set_mark(markOopDesc::prototype());
obj->set_klass_gap(0);
- obj->set_klass(SystemDictionary::object_klass());
+ obj->set_klass(SystemDictionary::Object_klass());
}
}
diff --git a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp
index be7538604ff..5aa36245cde 100644
--- a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp
+++ b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp
@@ -100,7 +100,7 @@ void ThreadLocalAllocBuffer::accumulate_statistics() {
void ThreadLocalAllocBuffer::make_parsable(bool retire) {
if (end() != NULL) {
invariants();
- CollectedHeap::fill_with_object(top(), hard_end());
+ CollectedHeap::fill_with_object(top(), hard_end(), retire);
if (retire || ZeroTLAB) { // "Reset" the TLAB
set_start(NULL);
diff --git a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp
index 68b2d92a1e3..361ae3aecb1 100644
--- a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp
+++ b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,8 +27,13 @@ inline HeapWord* ThreadLocalAllocBuffer::allocate(size_t size) {
HeapWord* obj = top();
if (pointer_delta(end(), obj) >= size) {
// successful thread-local allocation
-
- DEBUG_ONLY(Copy::fill_to_words(obj, size, badHeapWordVal));
+#ifdef ASSERT
+ // Skip mangling the space corresponding to the object header to
+ // ensure that the returned space is not considered parsable by
+ // any concurrent GC thread.
+ size_t hdr_size = CollectedHeap::min_fill_size();
+ Copy::fill_to_words(obj + hdr_size, size - hdr_size, badHeapWordVal);
+#endif // ASSERT
// This addition is safe because we know that top is
// at least size below end, so the add can't wrap.
set_top(obj + size);
diff --git a/hotspot/src/share/vm/memory/universe.cpp b/hotspot/src/share/vm/memory/universe.cpp
index 5043f9e2599..5803a4d356e 100644
--- a/hotspot/src/share/vm/memory/universe.cpp
+++ b/hotspot/src/share/vm/memory/universe.cpp
@@ -67,6 +67,8 @@ typeArrayOop Universe::_the_empty_int_array = NULL;
objArrayOop Universe::_the_empty_system_obj_array = NULL;
objArrayOop Universe::_the_empty_class_klass_array = NULL;
objArrayOop Universe::_the_array_interfaces_array = NULL;
+oop Universe::_the_null_string = NULL;
+oop Universe::_the_min_jint_string = NULL;
LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
LatestMethodOopCache* Universe::_loader_addClass_cache = NULL;
ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL;
@@ -187,6 +189,8 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f->do_oop((oop*)&_the_empty_system_obj_array);
f->do_oop((oop*)&_the_empty_class_klass_array);
f->do_oop((oop*)&_the_array_interfaces_array);
+ f->do_oop((oop*)&_the_null_string);
+ f->do_oop((oop*)&_the_min_jint_string);
_finalizer_register_cache->oops_do(f);
_loader_addClass_cache->oops_do(f);
_reflect_invoke_cache->oops_do(f);
@@ -287,14 +291,17 @@ void Universe::genesis(TRAPS) {
SystemDictionary::initialize(CHECK);
- klassOop ok = SystemDictionary::object_klass();
+ klassOop ok = SystemDictionary::Object_klass();
+
+ _the_null_string = StringTable::intern("null", CHECK);
+ _the_min_jint_string = StringTable::intern("-2147483648", CHECK);
if (UseSharedSpaces) {
// Verify shared interfaces array.
assert(_the_array_interfaces_array->obj_at(0) ==
- SystemDictionary::cloneable_klass(), "u3");
+ SystemDictionary::Cloneable_klass(), "u3");
assert(_the_array_interfaces_array->obj_at(1) ==
- SystemDictionary::serializable_klass(), "u3");
+ SystemDictionary::Serializable_klass(), "u3");
// Verify element klass for system obj array klass
assert(objArrayKlass::cast(_systemObjArrayKlassObj)->element_klass() == ok, "u1");
@@ -313,8 +320,8 @@ void Universe::genesis(TRAPS) {
assert(Klass::cast(systemObjArrayKlassObj())->super() == ok, "u3");
} else {
// Set up shared interfaces array. (Do this before supers are set up.)
- _the_array_interfaces_array->obj_at_put(0, SystemDictionary::cloneable_klass());
- _the_array_interfaces_array->obj_at_put(1, SystemDictionary::serializable_klass());
+ _the_array_interfaces_array->obj_at_put(0, SystemDictionary::Cloneable_klass());
+ _the_array_interfaces_array->obj_at_put(1, SystemDictionary::Serializable_klass());
// Set element klass for system obj array klass
objArrayKlass::cast(_systemObjArrayKlassObj)->set_element_klass(ok);
@@ -358,7 +365,7 @@ void Universe::genesis(TRAPS) {
// Initialize _objectArrayKlass after core bootstraping to make
// sure the super class is set up properly for _objectArrayKlass.
_objectArrayKlassObj = instanceKlass::
- cast(SystemDictionary::object_klass())->array_klass(1, CHECK);
+ cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
// Add the class to the class hierarchy manually to make sure that
// its vtable is initialized after core bootstrapping is completed.
Klass::cast(_objectArrayKlassObj)->append_to_sibling_list();
@@ -419,11 +426,11 @@ void Universe::genesis(TRAPS) {
while (i < size) {
if (!UseConcMarkSweepGC) {
// Allocate dummy in old generation
- oop dummy = instanceKlass::cast(SystemDictionary::object_klass())->allocate_instance(CHECK);
+ oop dummy = instanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
dummy_array->obj_at_put(i++, dummy);
}
// Allocate dummy in permanent generation
- oop dummy = instanceKlass::cast(SystemDictionary::object_klass())->allocate_permanent_instance(CHECK);
+ oop dummy = instanceKlass::cast(SystemDictionary::Object_klass())->allocate_permanent_instance(CHECK);
dummy_array->obj_at_put(i++, dummy);
}
{
@@ -533,7 +540,7 @@ void Universe::fixup_mirrors(TRAPS) {
// but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
// walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
// that the number of objects allocated at this point is very small.
- assert(SystemDictionary::class_klass_loaded(), "java.lang.Class should be loaded");
+ assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
FixupMirrorClosure blk;
Universe::heap()->permanent_object_iterate(&blk);
}
@@ -549,7 +556,7 @@ void Universe::run_finalizers_on_exit() {
if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
{
PRESERVE_EXCEPTION_MARK;
- KlassHandle finalizer_klass(THREAD, SystemDictionary::finalizer_klass());
+ KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
JavaValue result(T_VOID);
JavaCalls::call_static(
&result,
@@ -744,22 +751,22 @@ static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
static const uint64_t OopEncodingHeapMax = NarrowOopHeapMax << LogMinObjAlignmentInBytes;
char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
+ size_t base = 0;
#ifdef _LP64
if (UseCompressedOops) {
assert(mode == UnscaledNarrowOop ||
mode == ZeroBasedNarrowOop ||
mode == HeapBasedNarrowOop, "mode is invalid");
+ const size_t total_size = heap_size + HeapBaseMinAddress;
// Return specified base for the first request.
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
- return (char*)HeapBaseMinAddress;
- }
- const size_t total_size = heap_size + HeapBaseMinAddress;
- if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
+ base = HeapBaseMinAddress;
+ } else if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) &&
(Universe::narrow_oop_shift() == 0)) {
// Use 32-bits oops without encoding and
// place heap's top on the 4Gb boundary
- return (char*)(NarrowOopHeapMax - heap_size);
+ base = (NarrowOopHeapMax - heap_size);
} else {
// Can't reserve with NarrowOopShift == 0
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
@@ -768,16 +775,38 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
// Use zero based compressed oops with encoding and
// place heap's top on the 32Gb boundary in case
// total_size > 4Gb or failed to reserve below 4Gb.
- return (char*)(OopEncodingHeapMax - heap_size);
+ base = (OopEncodingHeapMax - heap_size);
}
}
} else {
// Can't reserve below 32Gb.
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
}
+ // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
+ // used in ReservedHeapSpace() constructors.
+ // The final values will be set in initialize_heap() below.
+ if (base != 0 && (base + heap_size) <= OopEncodingHeapMax) {
+ // Use zero based compressed oops
+ Universe::set_narrow_oop_base(NULL);
+ // Don't need guard page for implicit checks in indexed
+ // addressing mode with zero based Compressed Oops.
+ Universe::set_narrow_oop_use_implicit_null_checks(true);
+ } else {
+ // Set to a non-NULL value so the ReservedSpace ctor computes
+ // the correct no-access prefix.
+ // The final value will be set in initialize_heap() below.
+ Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
+#ifdef _WIN64
+ if (UseLargePages) {
+ // Cannot allocate guard pages for implicit checks in indexed
+ // addressing mode when large pages are specified on windows.
+ Universe::set_narrow_oop_use_implicit_null_checks(false);
+ }
+#endif // _WIN64
+ }
}
#endif
- return NULL; // also return NULL (don't care) for 32-bit VM
+ return (char*)base; // also return NULL (don't care) for 32-bit VM
}
jint Universe::initialize_heap() {
@@ -921,7 +950,7 @@ bool universe_post_init() {
{ ResourceMark rm;
Interpreter::initialize(); // needed for interpreter entry points
if (!UseSharedSpaces) {
- KlassHandle ok_h(THREAD, SystemDictionary::object_klass());
+ KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
Universe::reinitialize_vtable_of(ok_h, CHECK_false);
Universe::reinitialize_itables(CHECK_false);
}
@@ -931,7 +960,7 @@ bool universe_post_init() {
instanceKlassHandle k_h;
if (!UseSharedSpaces) {
// Setup preallocated empty java.lang.Class array
- Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_false);
+ Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
// Setup preallocated OutOfMemoryError errors
k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_OutOfMemoryError(), true, CHECK_false);
k_h = instanceKlassHandle(THREAD, k);
@@ -998,8 +1027,8 @@ bool universe_post_init() {
// Setup static method for registering finalizers
// The finalizer klass must be linked before looking up the method, in
// case it needs to get rewritten.
- instanceKlass::cast(SystemDictionary::finalizer_klass())->link_class(CHECK_false);
- methodOop m = instanceKlass::cast(SystemDictionary::finalizer_klass())->find_method(
+ instanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
+ methodOop m = instanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
vmSymbols::register_method_name(),
vmSymbols::register_method_signature());
if (m == NULL || !m->is_static()) {
@@ -1007,7 +1036,7 @@ bool universe_post_init() {
"java.lang.ref.Finalizer.register", false);
}
Universe::_finalizer_register_cache->init(
- SystemDictionary::finalizer_klass(), m, CHECK_false);
+ SystemDictionary::Finalizer_klass(), m, CHECK_false);
// Resolve on first use and initialize class.
// Note: No race-condition here, since a resolve will always return the same result
@@ -1024,14 +1053,14 @@ bool universe_post_init() {
Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
// Setup method for registering loaded classes in class loader vector
- instanceKlass::cast(SystemDictionary::classloader_klass())->link_class(CHECK_false);
- m = instanceKlass::cast(SystemDictionary::classloader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
+ instanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
+ m = instanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
if (m == NULL || m->is_static()) {
THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
"java.lang.ClassLoader.addClass", false);
}
Universe::_loader_addClass_cache->init(
- SystemDictionary::classloader_klass(), m, CHECK_false);
+ SystemDictionary::ClassLoader_klass(), m, CHECK_false);
// The folowing is initializing converter functions for serialization in
// JVM.cpp. If we clean up the StrictMath code above we may want to find
diff --git a/hotspot/src/share/vm/memory/universe.hpp b/hotspot/src/share/vm/memory/universe.hpp
index b22a1eba6c9..97044e161d4 100644
--- a/hotspot/src/share/vm/memory/universe.hpp
+++ b/hotspot/src/share/vm/memory/universe.hpp
@@ -169,6 +169,8 @@ class Universe: AllStatic {
static objArrayOop _the_empty_system_obj_array; // Canonicalized system obj array
static objArrayOop _the_empty_class_klass_array; // Canonicalized obj array of type java.lang.Class
static objArrayOop _the_array_interfaces_array; // Canonicalized 2-array of cloneable & serializable klasses
+ static oop _the_null_string; // A cache of "null" as a Java string
+ static oop _the_min_jint_string; // A cache of "-2147483648" as a Java string
static LatestMethodOopCache* _finalizer_register_cache; // static method for registering finalizable objects
static LatestMethodOopCache* _loader_addClass_cache; // method for registering loaded classes in class loader vector
static ActiveMethodOopsCache* _reflect_invoke_cache; // method for security checks
@@ -310,6 +312,8 @@ class Universe: AllStatic {
static objArrayOop the_empty_system_obj_array () { return _the_empty_system_obj_array; }
static objArrayOop the_empty_class_klass_array () { return _the_empty_class_klass_array; }
static objArrayOop the_array_interfaces_array() { return _the_array_interfaces_array; }
+ static oop the_null_string() { return _the_null_string; }
+ static oop the_min_jint_string() { return _the_min_jint_string; }
static methodOop finalizer_register_method() { return _finalizer_register_cache->get_methodOop(); }
static methodOop loader_addClass_method() { return _loader_addClass_cache->get_methodOop(); }
static ActiveMethodOopsCache* reflect_invoke_cache() { return _reflect_invoke_cache; }
diff --git a/hotspot/src/share/vm/oops/arrayKlass.cpp b/hotspot/src/share/vm/oops/arrayKlass.cpp
index 7ff6d2f2f57..ac50c258663 100644
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp
@@ -43,7 +43,7 @@ klassOop arrayKlass::java_super() const {
if (super() == NULL) return NULL; // bootstrap case
// Array klasses have primary supertypes which are not reported to Java.
// Example super chain: String[][] -> Object[][] -> Object[] -> Object
- return SystemDictionary::object_klass();
+ return SystemDictionary::Object_klass();
}
@@ -82,7 +82,7 @@ const Klass_vtbl& cplusplus_vtbl, int header_size, KlassHandle klass, TRAPS) {
k = arrayKlassHandle(THREAD, base_klass());
assert(!k()->is_parsable(), "not expecting parsability yet.");
- k->set_super(Universe::is_bootstrapping() ? (klassOop)NULL : SystemDictionary::object_klass());
+ k->set_super(Universe::is_bootstrapping() ? (klassOop)NULL : SystemDictionary::Object_klass());
k->set_layout_helper(Klass::_lh_neutral_value);
k->set_dimension(1);
k->set_higher_dimension(NULL);
@@ -117,9 +117,9 @@ objArrayOop arrayKlass::compute_secondary_supers(int num_extra_slots, TRAPS) {
bool arrayKlass::compute_is_subtype_of(klassOop k) {
// An array is a subtype of Serializable, Clonable, and Object
- return k == SystemDictionary::object_klass()
- || k == SystemDictionary::cloneable_klass()
- || k == SystemDictionary::serializable_klass();
+ return k == SystemDictionary::Object_klass()
+ || k == SystemDictionary::Cloneable_klass()
+ || k == SystemDictionary::Serializable_klass();
}
diff --git a/hotspot/src/share/vm/oops/arrayKlass.hpp b/hotspot/src/share/vm/oops/arrayKlass.hpp
index b2bc0862c73..40280501b8d 100644
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp
@@ -67,7 +67,7 @@ class arrayKlass: public Klass {
// Compiler/Interpreter offset
static ByteSize component_mirror_offset() { return byte_offset_of(arrayKlass, _component_mirror); }
- virtual klassOop java_super() const;//{ return SystemDictionary::object_klass(); }
+ virtual klassOop java_super() const;//{ return SystemDictionary::Object_klass(); }
// Allocation
// Sizes points to the first dimension of the array, subsequent dimensions
diff --git a/hotspot/src/share/vm/oops/arrayKlassKlass.cpp b/hotspot/src/share/vm/oops/arrayKlassKlass.cpp
index 1757aae8a1f..918a7dd9bee 100644
--- a/hotspot/src/share/vm/oops/arrayKlassKlass.cpp
+++ b/hotspot/src/share/vm/oops/arrayKlassKlass.cpp
@@ -159,7 +159,7 @@ void arrayKlassKlass::oop_print_on(oop obj, outputStream* st) {
assert(obj->is_klass(), "must be klass");
klassKlass::oop_print_on(obj, st);
}
-
+#endif //PRODUCT
void arrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_klass(), "must be klass");
@@ -168,7 +168,6 @@ void arrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
st->print("[]");
}
}
-#endif
const char* arrayKlassKlass::internal_name() const {
diff --git a/hotspot/src/share/vm/oops/arrayKlassKlass.hpp b/hotspot/src/share/vm/oops/arrayKlassKlass.hpp
index 1d98d21745a..75a2becd1c9 100644
--- a/hotspot/src/share/vm/oops/arrayKlassKlass.hpp
+++ b/hotspot/src/share/vm/oops/arrayKlassKlass.hpp
@@ -55,14 +55,13 @@ class arrayKlassKlass : public klassKlass {
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
public:
// Printing
- void oop_print_on(oop obj, outputStream* st);
void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#ifndef PRODUCT
+ void oop_print_on(oop obj, outputStream* st);
+#endif //PRODUCT
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
diff --git a/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp b/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp
index 434205d3ed3..cbca7fbe202 100644
--- a/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp
+++ b/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp
@@ -166,12 +166,12 @@ void compiledICHolderKlass::oop_print_on(oop obj, outputStream* st) {
st->print(" - klass: "); c->holder_klass()->print_value_on(st); st->cr();
}
+#endif //PRODUCT
void compiledICHolderKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_compiledICHolder(), "must be compiledICHolder");
Klass::oop_print_value_on(obj, st);
}
-#endif
const char* compiledICHolderKlass::internal_name() const {
return "{compiledICHolder}";
diff --git a/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp b/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp
index 3d704773945..93f187faee6 100644
--- a/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp
+++ b/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp
@@ -68,14 +68,13 @@ class compiledICHolderKlass : public Klass {
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
public:
// Printing
- void oop_print_on (oop obj, outputStream* st);
void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#ifndef PRODUCT
+ void oop_print_on (oop obj, outputStream* st);
+#endif //PRODUCT
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
diff --git a/hotspot/src/share/vm/oops/constMethodKlass.cpp b/hotspot/src/share/vm/oops/constMethodKlass.cpp
index c1ad90c1d57..ca48fe3eb2b 100644
--- a/hotspot/src/share/vm/oops/constMethodKlass.cpp
+++ b/hotspot/src/share/vm/oops/constMethodKlass.cpp
@@ -216,6 +216,7 @@ void constMethodKlass::oop_print_on(oop obj, outputStream* st) {
}
}
+#endif //PRODUCT
// Short version of printing constMethodOop - just print the name of the
// method it belongs to.
@@ -226,8 +227,6 @@ void constMethodKlass::oop_print_value_on(oop obj, outputStream* st) {
m->method()->print_value_on(st);
}
-#endif // PRODUCT
-
const char* constMethodKlass::internal_name() const {
return "{constMethod}";
}
diff --git a/hotspot/src/share/vm/oops/constMethodKlass.hpp b/hotspot/src/share/vm/oops/constMethodKlass.hpp
index 2387d0210a6..69eebb74444 100644
--- a/hotspot/src/share/vm/oops/constMethodKlass.hpp
+++ b/hotspot/src/share/vm/oops/constMethodKlass.hpp
@@ -77,15 +77,13 @@ public:
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
public:
// Printing
- void oop_print_on (oop obj, outputStream* st);
void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
+ void oop_print_on (oop obj, outputStream* st);
+#endif //PRODUCT
-#endif
-
- public:
// Verify operations
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
diff --git a/hotspot/src/share/vm/oops/constMethodOop.hpp b/hotspot/src/share/vm/oops/constMethodOop.hpp
index c6d373946e1..91d01167246 100644
--- a/hotspot/src/share/vm/oops/constMethodOop.hpp
+++ b/hotspot/src/share/vm/oops/constMethodOop.hpp
@@ -258,6 +258,11 @@ public:
LocalVariableTableElement* localvariable_table_start() const;
// byte codes
+ void set_code(address code) {
+ if (code_size() > 0) {
+ memcpy(code_base(), code, code_size());
+ }
+ }
address code_base() const { return (address) (this+1); }
address code_end() const { return code_base() + code_size(); }
bool contains(address bcp) const { return code_base() <= bcp
diff --git a/hotspot/src/share/vm/oops/constantPoolKlass.cpp b/hotspot/src/share/vm/oops/constantPoolKlass.cpp
index 44b16435f31..f46963fd305 100644
--- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp
@@ -387,9 +387,19 @@ void constantPoolKlass::oop_print_on(oop obj, outputStream* st) {
cp->set_cache(cache());
}
-
#endif
+void constantPoolKlass::oop_print_value_on(oop obj, outputStream* st) {
+ assert(obj->is_constantPool(), "must be constantPool");
+ constantPoolOop cp = constantPoolOop(obj);
+ st->print("constant pool [%d]", cp->length());
+ if (cp->has_pseudo_string()) st->print("/pseudo_string");
+ if (cp->has_invokedynamic()) st->print("/invokedynamic");
+ cp->print_address_on(st);
+ st->print(" for ");
+ cp->pool_holder()->print_value_on(st);
+}
+
const char* constantPoolKlass::internal_name() const {
return "{constant pool}";
}
diff --git a/hotspot/src/share/vm/oops/constantPoolKlass.hpp b/hotspot/src/share/vm/oops/constantPoolKlass.hpp
index a01edbab42c..47a80d2b20e 100644
--- a/hotspot/src/share/vm/oops/constantPoolKlass.hpp
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.hpp
@@ -65,9 +65,10 @@ class constantPoolKlass : public Klass {
juint alloc_size() const { return _alloc_size; }
void set_alloc_size(juint n) { _alloc_size = n; }
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
#endif
diff --git a/hotspot/src/share/vm/oops/constantPoolOop.cpp b/hotspot/src/share/vm/oops/constantPoolOop.cpp
index af72333b0b0..a324b7ae6aa 100644
--- a/hotspot/src/share/vm/oops/constantPoolOop.cpp
+++ b/hotspot/src/share/vm/oops/constantPoolOop.cpp
@@ -110,7 +110,7 @@ klassOop constantPoolOopDesc::klass_at_impl(constantPoolHandle this_oop, int whi
}
if (!PENDING_EXCEPTION->
- is_a(SystemDictionary::linkageError_klass())) {
+ is_a(SystemDictionary::LinkageError_klass())) {
// Just throw the exception and don't prevent these classes from
// being loaded due to virtual machine errors like StackOverflow
// and OutOfMemoryError, etc, or if the thread was hit by stop()
@@ -262,25 +262,48 @@ symbolOop constantPoolOopDesc::impl_signature_ref_at(int which, bool uncached) {
int constantPoolOopDesc::impl_name_and_type_ref_index_at(int which, bool uncached) {
- jint ref_index = field_or_method_at(which, uncached);
+ int i = which;
+ if (!uncached && cache() != NULL) {
+ if (constantPoolCacheOopDesc::is_secondary_index(which))
+ // Invokedynamic indexes are always processed in native order
+ // so there is no question of reading a native u2 in Java order here.
+ return cache()->main_entry_at(which)->constant_pool_index();
+ // change byte-ordering and go via cache
+ i = remap_instruction_operand_from_cache(which);
+ } else {
+ if (tag_at(which).is_name_and_type())
+ // invokedynamic index is a simple name-and-type
+ return which;
+ }
+ assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
+ jint ref_index = *int_at_addr(i);
return extract_high_short_from_int(ref_index);
}
int constantPoolOopDesc::impl_klass_ref_index_at(int which, bool uncached) {
- jint ref_index = field_or_method_at(which, uncached);
+ guarantee(!constantPoolCacheOopDesc::is_secondary_index(which),
+ "an invokedynamic instruction does not have a klass");
+ int i = which;
+ if (!uncached && cache() != NULL) {
+ // change byte-ordering and go via cache
+ i = remap_instruction_operand_from_cache(which);
+ }
+ assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
+ jint ref_index = *int_at_addr(i);
return extract_low_short_from_int(ref_index);
}
-int constantPoolOopDesc::map_instruction_operand_to_index(int operand) {
- if (constantPoolCacheOopDesc::is_secondary_index(operand)) {
- return cache()->main_entry_at(operand)->constant_pool_index();
- }
+int constantPoolOopDesc::remap_instruction_operand_from_cache(int operand) {
+ // Operand was fetched by a stream using get_Java_u2, yet was stored
+ // by Rewriter::rewrite_member_reference in native order.
+ // So now we have to fix the damage by swapping back to native order.
assert((int)(u2)operand == operand, "clean u2");
- int index = Bytes::swap_u2(operand);
- return cache()->entry_at(index)->constant_pool_index();
+ int cpc_index = Bytes::swap_u2(operand);
+ int member_index = cache()->entry_at(cpc_index)->constant_pool_index();
+ return member_index;
}
diff --git a/hotspot/src/share/vm/oops/constantPoolOop.hpp b/hotspot/src/share/vm/oops/constantPoolOop.hpp
index 72bec650014..fb50c6a2949 100644
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp
@@ -191,6 +191,16 @@ class constantPoolOopDesc : public oopDesc {
}
}
+ void object_at_put(int which, oop str) {
+ oop_store((volatile oop*) obj_at_addr(which), str);
+ release_tag_at_put(which, JVM_CONSTANT_Object);
+ if (UseConcMarkSweepGC) {
+ // In case the earlier card-mark was consumed by a concurrent
+ // marking thread before the tag was updated, redirty the card.
+ oop_store_without_check((volatile oop*) obj_at_addr(which), str);
+ }
+ }
+
// For temporary use while constructing constant pool
void string_index_at_put(int which, int string_index) {
tag_at_put(which, JVM_CONSTANT_StringIndex);
@@ -228,7 +238,8 @@ class constantPoolOopDesc : public oopDesc {
tag.is_unresolved_klass() ||
tag.is_symbol() ||
tag.is_unresolved_string() ||
- tag.is_string();
+ tag.is_string() ||
+ tag.is_object();
}
// Fetching constants
@@ -291,6 +302,11 @@ class constantPoolOopDesc : public oopDesc {
return string_at_impl(h_this, which, CHECK_NULL);
}
+ oop object_at(int which) {
+ assert(tag_at(which).is_object(), "Corrupted constant pool");
+ return *obj_at_addr(which);
+ }
+
// A "pseudo-string" is an non-string oop that has found is way into
// a String entry.
// Under AnonymousClasses this can happen if the user patches a live
@@ -342,12 +358,14 @@ class constantPoolOopDesc : public oopDesc {
}
// The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve,
- // name_and_type_ref_index_at) all expect constant pool indices
- // from the bytecodes to be passed in, which are actually potentially byte-swapped
- // or rewritten constant pool cache indices. They all call map_instruction_operand_to_index.
- int map_instruction_operand_to_index(int operand);
+ // name_and_type_ref_index_at) all expect to be passed indices obtained
+ // directly from the bytecode, and extracted according to java byte order.
+ // If the indices are meant to refer to fields or methods, they are
+ // actually potentially byte-swapped, rewritten constant pool cache indices.
+ // The routine remap_instruction_operand_from_cache manages the adjustment
+ // of these values back to constant pool indices.
- // There are also "uncached" versions which do not map the operand index; see below.
+ // There are also "uncached" versions which do not adjust the operand index; see below.
// Lookup for entries consisting of (klass_index, name_and_type index)
klassOop klass_ref_at(int which, TRAPS);
@@ -361,8 +379,6 @@ class constantPoolOopDesc : public oopDesc {
// Lookup for entries consisting of (name_index, signature_index)
int name_ref_index_at(int which_nt); // == low-order jshort of name_and_type_at(which_nt)
int signature_ref_index_at(int which_nt); // == high-order jshort of name_and_type_at(which_nt)
- symbolOop nt_name_ref_at(int which_nt) { return symbol_at(name_ref_index_at(which_nt)); }
- symbolOop nt_signature_ref_at(int which_nt) { return symbol_at(signature_ref_index_at(which_nt)); }
BasicType basic_type_for_signature_at(int which);
@@ -425,18 +441,7 @@ class constantPoolOopDesc : public oopDesc {
int impl_klass_ref_index_at(int which, bool uncached);
int impl_name_and_type_ref_index_at(int which, bool uncached);
- // Takes either a constant pool cache index in possibly byte-swapped
- // byte order (which comes from the bytecodes after rewriting) or,
- // if "uncached" is true, a vanilla constant pool index
- jint field_or_method_at(int which, bool uncached) {
- int i = which;
- if (!uncached && cache() != NULL) {
- // change byte-ordering and go via cache
- i = map_instruction_operand_to_index(which);
- }
- assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
- return *int_at_addr(i);
- }
+ int remap_instruction_operand_from_cache(int operand);
// Used while constructing constant pool (only by ClassFileParser)
jint klass_index_at(int which) {
diff --git a/hotspot/src/share/vm/oops/cpCacheKlass.cpp b/hotspot/src/share/vm/oops/cpCacheKlass.cpp
index 5a85e88d46e..b922dc8f2ea 100644
--- a/hotspot/src/share/vm/oops/cpCacheKlass.cpp
+++ b/hotspot/src/share/vm/oops/cpCacheKlass.cpp
@@ -261,6 +261,15 @@ void constantPoolCacheKlass::oop_print_on(oop obj, outputStream* st) {
#endif
+void constantPoolCacheKlass::oop_print_value_on(oop obj, outputStream* st) {
+ assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
+ constantPoolCacheOop cache = (constantPoolCacheOop)obj;
+ st->print("cache [%d]", cache->length());
+ cache->print_address_on(st);
+ st->print(" for ");
+ cache->constant_pool()->print_value_on(st);
+}
+
void constantPoolCacheKlass::oop_verify_on(oop obj, outputStream* st) {
guarantee(obj->is_constantPoolCache(), "obj must be constant pool cache");
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
diff --git a/hotspot/src/share/vm/oops/cpCacheKlass.hpp b/hotspot/src/share/vm/oops/cpCacheKlass.hpp
index 859f64a46f5..e49b52d75a1 100644
--- a/hotspot/src/share/vm/oops/cpCacheKlass.hpp
+++ b/hotspot/src/share/vm/oops/cpCacheKlass.hpp
@@ -61,9 +61,10 @@ class constantPoolCacheKlass: public Klass {
juint alloc_size() const { return _alloc_size; }
void set_alloc_size(juint n) { _alloc_size = n; }
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
#endif
diff --git a/hotspot/src/share/vm/oops/cpCacheOop.cpp b/hotspot/src/share/vm/oops/cpCacheOop.cpp
index 6f549afbe38..36380c88903 100644
--- a/hotspot/src/share/vm/oops/cpCacheOop.cpp
+++ b/hotspot/src/share/vm/oops/cpCacheOop.cpp
@@ -28,21 +28,17 @@
// Implememtation of ConstantPoolCacheEntry
-void ConstantPoolCacheEntry::set_initial_state(int index) {
- if (constantPoolCacheOopDesc::is_secondary_index(index)) {
- // Hack: The rewriter is trying to say that this entry itself
- // will be a secondary entry.
- int main_index = constantPoolCacheOopDesc::decode_secondary_index(index);
- assert(0 <= main_index && main_index < 0x10000, "sanity check");
- _indices = (main_index << 16);
- assert(main_entry_index() == main_index, "");
- return;
- }
+void ConstantPoolCacheEntry::initialize_entry(int index) {
assert(0 < index && index < 0x10000, "sanity check");
_indices = index;
assert(constant_pool_index() == index, "");
}
+void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
+ assert(0 <= main_index && main_index < 0x10000, "sanity check");
+ _indices = (main_index << 16);
+ assert(main_entry_index() == main_index, "");
+}
int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
bool is_vfinal, bool is_volatile,
@@ -223,10 +219,10 @@ void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index)
void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, int extra_data) {
- methodOop method = (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site());
+ methodOop method = (methodOop) java_dyn_CallSite::vmmethod(call_site());
assert(method->is_method(), "must be initialized properly");
int param_size = method->size_of_parameters();
- assert(param_size > 1, "method argument size must include MH.this & initial dynamic receiver");
+ assert(param_size >= 1, "method argument size must include MH.this");
param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic
if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
// racing threads might be trying to install their own favorites
@@ -439,7 +435,18 @@ void ConstantPoolCacheEntry::verify(outputStream* st) const {
void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
- for (int i = 0; i < length(); i++) entry_at(i)->set_initial_state(inverse_index_map[i]);
+ for (int i = 0; i < length(); i++) {
+ ConstantPoolCacheEntry* e = entry_at(i);
+ int original_index = inverse_index_map[i];
+ if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
+ int main_index = (original_index - Rewriter::_secondary_entry_tag);
+ assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
+ e->initialize_secondary_entry(main_index);
+ } else {
+ e->initialize_entry(original_index);
+ }
+ assert(entry_at(i) == e, "sanity");
+ }
}
// RedefineClasses() API support:
diff --git a/hotspot/src/share/vm/oops/cpCacheOop.hpp b/hotspot/src/share/vm/oops/cpCacheOop.hpp
index ec25b87352b..6fb7a635e56 100644
--- a/hotspot/src/share/vm/oops/cpCacheOop.hpp
+++ b/hotspot/src/share/vm/oops/cpCacheOop.hpp
@@ -154,7 +154,8 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
};
// Initialization
- void set_initial_state(int index); // sets entry to initial state
+ void initialize_entry(int original_index); // initialize primary entry
+ void initialize_secondary_entry(int main_index); // initialize secondary entry
void set_field( // sets entry to resolved field state
Bytecodes::Code get_code, // the bytecode used for reading the field
@@ -251,6 +252,7 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
// Code generation support
static WordSize size() { return in_WordSize(sizeof(ConstantPoolCacheEntry) / HeapWordSize); }
+ static ByteSize size_in_bytes() { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); }
static ByteSize indices_offset() { return byte_offset_of(ConstantPoolCacheEntry, _indices); }
static ByteSize f1_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f1); }
static ByteSize f2_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f2); }
@@ -321,6 +323,7 @@ class constantPoolCacheOopDesc: public oopDesc {
ConstantPoolCacheEntry* base() const { return (ConstantPoolCacheEntry*)((address)this + in_bytes(base_offset())); }
friend class constantPoolCacheKlass;
+ friend class ConstantPoolCacheEntry;
public:
// Initialization
@@ -329,7 +332,8 @@ class constantPoolCacheOopDesc: public oopDesc {
// Secondary indexes.
// They must look completely different from normal indexes.
// The main reason is that byte swapping is sometimes done on normal indexes.
- // Also, it is helpful for debugging to tell the two apart.
+ // Also, some of the CP accessors do different things for secondary indexes.
+ // Finally, it is helpful for debugging to tell the two apart.
static bool is_secondary_index(int i) { return (i < 0); }
static int decode_secondary_index(int i) { assert(is_secondary_index(i), ""); return ~i; }
static int encode_secondary_index(int i) { assert(!is_secondary_index(i), ""); return ~i; }
@@ -337,18 +341,35 @@ class constantPoolCacheOopDesc: public oopDesc {
// Accessors
void set_constant_pool(constantPoolOop pool) { oop_store_without_check((oop*)&_constant_pool, (oop)pool); }
constantPoolOop constant_pool() const { return _constant_pool; }
- ConstantPoolCacheEntry* entry_at(int i) const { assert(0 <= i && i < length(), "index out of bounds"); return base() + i; }
+ // Fetches the entry at the given index.
+ // The entry may be either primary or secondary.
+ // In either case the index must not be encoded or byte-swapped in any way.
+ ConstantPoolCacheEntry* entry_at(int i) const {
+ assert(0 <= i && i < length(), "index out of bounds");
+ return base() + i;
+ }
+ // Fetches the secondary entry referred to by index.
+ // The index may be a secondary index, and must not be byte-swapped.
+ ConstantPoolCacheEntry* secondary_entry_at(int i) const {
+ int raw_index = i;
+ if (is_secondary_index(i)) { // correct these on the fly
+ raw_index = decode_secondary_index(i);
+ }
+ assert(entry_at(raw_index)->is_secondary_entry(), "not a secondary entry");
+ return entry_at(raw_index);
+ }
+ // Given a primary or secondary index, fetch the corresponding primary entry.
+ // Indirect through the secondary entry, if the index is encoded as a secondary index.
+ // The index must not be byte-swapped.
ConstantPoolCacheEntry* main_entry_at(int i) const {
- ConstantPoolCacheEntry* e;
+ int primary_index = i;
if (is_secondary_index(i)) {
// run through an extra level of indirection:
- i = decode_secondary_index(i);
- e = entry_at(i);
- i = e->main_entry_index();
+ int raw_index = decode_secondary_index(i);
+ primary_index = entry_at(raw_index)->main_entry_index();
}
- e = entry_at(i);
- assert(!e->is_secondary_entry(), "only one level of indirection");
- return e;
+ assert(!entry_at(primary_index)->is_secondary_entry(), "only one level of indirection");
+ return entry_at(primary_index);
}
// GC support
@@ -359,6 +380,12 @@ class constantPoolCacheOopDesc: public oopDesc {
// Code generation
static ByteSize base_offset() { return in_ByteSize(sizeof(constantPoolCacheOopDesc)); }
+ static ByteSize entry_offset(int raw_index) {
+ int index = raw_index;
+ if (is_secondary_index(raw_index))
+ index = decode_secondary_index(raw_index);
+ return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index);
+ }
// RedefineClasses() API support:
// If any entry of this constantPoolCache points to any of
diff --git a/hotspot/src/share/vm/oops/generateOopMap.cpp b/hotspot/src/share/vm/oops/generateOopMap.cpp
index eb533a81e6f..845f1ec22b6 100644
--- a/hotspot/src/share/vm/oops/generateOopMap.cpp
+++ b/hotspot/src/share/vm/oops/generateOopMap.cpp
@@ -1559,7 +1559,7 @@ void GenerateOopMap::interp1(BytecodeStream *itr) {
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_big(), itr->bci()); break;
case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_invokedynamic: do_method(false, true, itr->get_index_int(), itr->bci()); break;
+ case Bytecodes::_invokedynamic: do_method(true, false, itr->get_index_int(), itr->bci()); break;
case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_big(), itr->bci()); break;
case Bytecodes::_newarray:
case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break;
@@ -1830,12 +1830,8 @@ void GenerateOopMap::do_jsr(int targ_bci) {
void GenerateOopMap::do_ldc(int idx, int bci) {
- constantPoolOop cp = method()->constants();
- constantTag tag = cp->tag_at(idx);
-
- CellTypeState cts = (tag.is_string() || tag.is_unresolved_string() ||
- tag.is_klass() || tag.is_unresolved_klass())
- ? CellTypeState::make_line_ref(bci) : valCTS;
+ constantPoolOop cp = method()->constants();
+ CellTypeState cts = cp->is_pointer_entry(idx) ? CellTypeState::make_line_ref(bci) : valCTS;
ppush1(cts);
}
@@ -1900,11 +1896,9 @@ void GenerateOopMap::do_field(int is_get, int is_static, int idx, int bci) {
}
void GenerateOopMap::do_method(int is_static, int is_interface, int idx, int bci) {
- // Dig up signature for field in constant pool
- constantPoolOop cp = _method->constants();
- int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx);
- int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx); // @@@@@
- symbolOop signature = cp->symbol_at(signatureIdx);
+ // Dig up signature for field in constant pool
+ constantPoolOop cp = _method->constants();
+ symbolOop signature = cp->signature_ref_at(idx);
// Parse method signature
CellTypeState out[4];
diff --git a/hotspot/src/share/vm/oops/instanceKlass.cpp b/hotspot/src/share/vm/oops/instanceKlass.cpp
index aaa2bfe876e..ae57e59143b 100644
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp
@@ -383,7 +383,7 @@ void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below
}
- if (e->is_a(SystemDictionary::error_klass())) {
+ if (e->is_a(SystemDictionary::Error_klass())) {
THROW_OOP(e());
} else {
JavaCallArguments args(e);
@@ -568,7 +568,7 @@ void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
: vmSymbols::java_lang_InstantiationException(), external_name());
}
- if (as_klassOop() == SystemDictionary::class_klass()) {
+ if (as_klassOop() == SystemDictionary::Class_klass()) {
ResourceMark rm(THREAD);
THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
: vmSymbols::java_lang_IllegalAccessException(), external_name());
@@ -2045,8 +2045,9 @@ bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
// As we walk along, look for equalities between outer1 and class2.
// Eventually, the walks will terminate as outer1 stops
// at the top-level class around the original class.
- symbolOop ignore_name;
- klassOop next = outer1->compute_enclosing_class(ignore_name, CHECK_false);
+ bool ignore_inner_is_member;
+ klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member,
+ CHECK_false);
if (next == NULL) break;
if (next == class2()) return true;
outer1 = instanceKlassHandle(THREAD, next);
@@ -2055,8 +2056,9 @@ bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
// Now do the same for class2.
instanceKlassHandle outer2 = class2;
for (;;) {
- symbolOop ignore_name;
- klassOop next = outer2->compute_enclosing_class(ignore_name, CHECK_false);
+ bool ignore_inner_is_member;
+ klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member,
+ CHECK_false);
if (next == NULL) break;
// Might as well check the new outer against all available values.
if (next == class1()) return true;
@@ -2223,7 +2225,7 @@ void FieldPrinter::do_field(fieldDescriptor* fd) {
void instanceKlass::oop_print_on(oop obj, outputStream* st) {
Klass::oop_print_on(obj, st);
- if (as_klassOop() == SystemDictionary::string_klass()) {
+ if (as_klassOop() == SystemDictionary::String_klass()) {
typeArrayOop value = java_lang_String::value(obj);
juint offset = java_lang_String::offset(obj);
juint length = java_lang_String::length(obj);
@@ -2243,7 +2245,7 @@ void instanceKlass::oop_print_on(oop obj, outputStream* st) {
FieldPrinter print_nonstatic_field(st, obj);
do_nonstatic_fields(&print_nonstatic_field);
- if (as_klassOop() == SystemDictionary::class_klass()) {
+ if (as_klassOop() == SystemDictionary::Class_klass()) {
st->print(BULLET"signature: ");
java_lang_Class::print_signature(obj, st);
st->cr();
@@ -2266,11 +2268,13 @@ void instanceKlass::oop_print_on(oop obj, outputStream* st) {
}
}
+#endif //PRODUCT
+
void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
st->print("a ");
name()->print_value_on(st);
obj->print_address_on(st);
- if (as_klassOop() == SystemDictionary::string_klass()
+ if (as_klassOop() == SystemDictionary::String_klass()
&& java_lang_String::value(obj) != NULL) {
ResourceMark rm;
int len = java_lang_String::length(obj);
@@ -2279,7 +2283,7 @@ void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
st->print(" = \"%s\"", str);
if (len > plen)
st->print("...[%d]", len);
- } else if (as_klassOop() == SystemDictionary::class_klass()) {
+ } else if (as_klassOop() == SystemDictionary::Class_klass()) {
klassOop k = java_lang_Class::as_klassOop(obj);
st->print(" = ");
if (k != NULL) {
@@ -2297,8 +2301,6 @@ void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
}
}
-#endif // ndef PRODUCT
-
const char* instanceKlass::internal_name() const {
return external_name();
}
@@ -2346,7 +2348,7 @@ void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
// Check that we have the right class
static bool first_time = true;
- guarantee(k == SystemDictionary::class_klass() && first_time, "Invalid verify of maps");
+ guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps");
first_time = false;
const int extra = java_lang_Class::number_of_fake_oop_fields;
guarantee(ik->nonstatic_field_size() == extra, "just checking");
diff --git a/hotspot/src/share/vm/oops/instanceKlass.hpp b/hotspot/src/share/vm/oops/instanceKlass.hpp
index 9147892b1ea..798a3808931 100644
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp
@@ -337,12 +337,12 @@ class instanceKlass: public Klass {
static bool is_same_class_package(oop class_loader1, symbolOop class_name1, oop class_loader2, symbolOop class_name2);
// find an enclosing class (defined where original code was, in jvm.cpp!)
- klassOop compute_enclosing_class(symbolOop& simple_name_result, TRAPS) {
+ klassOop compute_enclosing_class(bool* inner_is_member, TRAPS) {
instanceKlassHandle self(THREAD, this->as_klassOop());
- return compute_enclosing_class_impl(self, simple_name_result, THREAD);
+ return compute_enclosing_class_impl(self, inner_is_member, THREAD);
}
static klassOop compute_enclosing_class_impl(instanceKlassHandle self,
- symbolOop& simple_name_result, TRAPS);
+ bool* inner_is_member, TRAPS);
// tell if two classes have the same enclosing class (at package level)
bool is_same_package_member(klassOop class2, TRAPS) {
@@ -839,17 +839,16 @@ public:
// JVMTI support
jint jvmti_class_status() const;
-#ifndef PRODUCT
public:
// Printing
- void oop_print_on (oop obj, outputStream* st);
void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
+ void oop_print_on (oop obj, outputStream* st);
void print_dependent_nmethods(bool verbose = false);
bool is_dependent_nmethod(nmethod* nm);
#endif
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
diff --git a/hotspot/src/share/vm/oops/instanceKlassKlass.cpp b/hotspot/src/share/vm/oops/instanceKlassKlass.cpp
index 18a6d7addf8..05748104d3b 100644
--- a/hotspot/src/share/vm/oops/instanceKlassKlass.cpp
+++ b/hotspot/src/share/vm/oops/instanceKlassKlass.cpp
@@ -317,6 +317,11 @@ void instanceKlassKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
pm->claim_or_forward_breadth(sg_addr);
}
+ oop* bsm_addr = ik->adr_bootstrap_method();
+ if (PSScavenge::should_scavenge(bsm_addr)) {
+ pm->claim_or_forward_breadth(bsm_addr);
+ }
+
klassKlass::oop_copy_contents(pm, obj);
}
@@ -345,6 +350,11 @@ void instanceKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
pm->claim_or_forward_depth(sg_addr);
}
+ oop* bsm_addr = ik->adr_bootstrap_method();
+ if (PSScavenge::should_scavenge(bsm_addr)) {
+ pm->claim_or_forward_depth(bsm_addr);
+ }
+
klassKlass::oop_copy_contents(pm, obj);
}
@@ -628,6 +638,7 @@ void instanceKlassKlass::oop_print_on(oop obj, outputStream* st) {
st->cr();
}
+#endif //PRODUCT
void instanceKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_klass(), "must be klass");
@@ -635,8 +646,6 @@ void instanceKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
ik->name()->print_value_on(st);
}
-#endif // PRODUCT
-
const char* instanceKlassKlass::internal_name() const {
return "{instance class}";
}
diff --git a/hotspot/src/share/vm/oops/instanceKlassKlass.hpp b/hotspot/src/share/vm/oops/instanceKlassKlass.hpp
index d736ca5eb23..79a93b63910 100644
--- a/hotspot/src/share/vm/oops/instanceKlassKlass.hpp
+++ b/hotspot/src/share/vm/oops/instanceKlassKlass.hpp
@@ -69,14 +69,13 @@ private:
// Apply closure to the InstanceKlass oops that are outside the java heap.
inline void iterate_c_heap_oops(instanceKlass* ik, OopClosure* closure);
-#ifndef PRODUCT
public:
// Printing
- void oop_print_on(oop obj, outputStream* st);
void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
+ void oop_print_on(oop obj, outputStream* st);
#endif
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
diff --git a/hotspot/src/share/vm/oops/instanceRefKlass.cpp b/hotspot/src/share/vm/oops/instanceRefKlass.cpp
index a8c7baf5214..a8f1ebeeca3 100644
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp
@@ -78,9 +78,9 @@ void instanceRefKlass::oop_follow_contents(oop obj) {
#ifndef SERIALGC
template
-static void specialized_oop_follow_contents(instanceRefKlass* ref,
- ParCompactionManager* cm,
- oop obj) {
+void specialized_oop_follow_contents(instanceRefKlass* ref,
+ ParCompactionManager* cm,
+ oop obj) {
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
T heap_oop = oopDesc::load_heap_oop(referent_addr);
debug_only(
@@ -397,7 +397,7 @@ void instanceRefKlass::update_nonstatic_oop_maps(klassOop k) {
// Check that we have the right class
debug_only(static bool first_time = true);
- assert(k == SystemDictionary::reference_klass() && first_time,
+ assert(k == SystemDictionary::Reference_klass() && first_time,
"Invalid update of maps");
debug_only(first_time = false);
assert(ik->nonstatic_oop_map_count() == 1, "just checking");
diff --git a/hotspot/src/share/vm/oops/klass.cpp b/hotspot/src/share/vm/oops/klass.cpp
index a842709649e..8260ee274b5 100644
--- a/hotspot/src/share/vm/oops/klass.cpp
+++ b/hotspot/src/share/vm/oops/klass.cpp
@@ -217,8 +217,8 @@ void Klass::initialize_supers(klassOop k, TRAPS) {
set_super(NULL);
oop_store_without_check((oop*) &_primary_supers[0], (oop) this->as_klassOop());
assert(super_depth() == 0, "Object must already be initialized properly");
- } else if (k != super() || k == SystemDictionary::object_klass()) {
- assert(super() == NULL || super() == SystemDictionary::object_klass(),
+ } else if (k != super() || k == SystemDictionary::Object_klass()) {
+ assert(super() == NULL || super() == SystemDictionary::Object_klass(),
"initialize this only once to a non-trivial value");
set_super(k);
Klass* sup = k->klass_part();
@@ -370,7 +370,7 @@ void Klass::append_to_sibling_list() {
void Klass::remove_from_sibling_list() {
// remove receiver from sibling list
instanceKlass* super = superklass();
- assert(super != NULL || as_klassOop() == SystemDictionary::object_klass(), "should have super");
+ assert(super != NULL || as_klassOop() == SystemDictionary::Object_klass(), "should have super");
if (super == NULL) return; // special case: class Object
if (super->subklass() == this) {
// first subklass
@@ -541,6 +541,7 @@ void Klass::oop_print_on(oop obj, outputStream* st) {
st->cr();
}
+#endif //PRODUCT
void Klass::oop_print_value_on(oop obj, outputStream* st) {
// print title
@@ -549,8 +550,6 @@ void Klass::oop_print_value_on(oop obj, outputStream* st) {
obj->print_address_on(st);
}
-#endif
-
// Verification
void Klass::oop_verify_on(oop obj, outputStream* st) {
diff --git a/hotspot/src/share/vm/oops/klass.hpp b/hotspot/src/share/vm/oops/klass.hpp
index c4436d6554f..427d24c7755 100644
--- a/hotspot/src/share/vm/oops/klass.hpp
+++ b/hotspot/src/share/vm/oops/klass.hpp
@@ -776,14 +776,13 @@ class Klass : public Klass_vtbl {
// JVMTI support
virtual jint jvmti_class_status() const;
-#ifndef PRODUCT
public:
// Printing
- virtual void oop_print_on (oop obj, outputStream* st);
virtual void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#ifndef PRODUCT
+ virtual void oop_print_on (oop obj, outputStream* st);
+#endif //PRODUCT
- public:
// Verification
virtual const char* internal_name() const = 0;
virtual void oop_verify_on(oop obj, outputStream* st);
diff --git a/hotspot/src/share/vm/oops/klassKlass.cpp b/hotspot/src/share/vm/oops/klassKlass.cpp
index b38d55cf4e8..78b5c797099 100644
--- a/hotspot/src/share/vm/oops/klassKlass.cpp
+++ b/hotspot/src/share/vm/oops/klassKlass.cpp
@@ -202,13 +202,12 @@ void klassKlass::oop_print_on(oop obj, outputStream* st) {
Klass::oop_print_on(obj, st);
}
+#endif //PRODUCT
void klassKlass::oop_print_value_on(oop obj, outputStream* st) {
Klass::oop_print_value_on(obj, st);
}
-#endif
-
const char* klassKlass::internal_name() const {
return "{other class}";
}
diff --git a/hotspot/src/share/vm/oops/klassKlass.hpp b/hotspot/src/share/vm/oops/klassKlass.hpp
index c8b5a9a6510..04b4ed958f3 100644
--- a/hotspot/src/share/vm/oops/klassKlass.hpp
+++ b/hotspot/src/share/vm/oops/klassKlass.hpp
@@ -67,14 +67,13 @@ class klassKlass: public Klass {
juint alloc_size() const { return _alloc_size; }
void set_alloc_size(juint n) { _alloc_size = n; }
-#ifndef PRODUCT
public:
// Printing
- void oop_print_on (oop obj, outputStream* st);
void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#ifndef PRODUCT
+ void oop_print_on (oop obj, outputStream* st);
+#endif //PRODUCT
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
diff --git a/hotspot/src/share/vm/oops/methodDataKlass.cpp b/hotspot/src/share/vm/oops/methodDataKlass.cpp
index 56592b0ee24..04823a30831 100644
--- a/hotspot/src/share/vm/oops/methodDataKlass.cpp
+++ b/hotspot/src/share/vm/oops/methodDataKlass.cpp
@@ -214,6 +214,8 @@ void methodDataKlass::oop_print_on(oop obj, outputStream* st) {
m->print_data_on(st);
}
+#endif //PRODUCT
+
void methodDataKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_methodData(), "should be method data");
methodDataOop m = methodDataOop(obj);
@@ -221,8 +223,6 @@ void methodDataKlass::oop_print_value_on(oop obj, outputStream* st) {
m->method()->print_value_on(st);
}
-#endif // !PRODUCT
-
const char* methodDataKlass::internal_name() const {
return "{method data}";
}
diff --git a/hotspot/src/share/vm/oops/methodDataKlass.hpp b/hotspot/src/share/vm/oops/methodDataKlass.hpp
index 0b78000d46f..14eaf35b229 100644
--- a/hotspot/src/share/vm/oops/methodDataKlass.hpp
+++ b/hotspot/src/share/vm/oops/methodDataKlass.hpp
@@ -71,14 +71,13 @@ class methodDataKlass : public Klass {
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
public:
// Printing
- void oop_print_on (oop obj, outputStream* st);
void oop_print_value_on(oop obj, outputStream* st);
-#endif // !PRODUCT
+#ifndef PRODUCT
+ void oop_print_on (oop obj, outputStream* st);
+#endif //PRODUCT
- public:
// Verify operations
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
diff --git a/hotspot/src/share/vm/oops/methodKlass.cpp b/hotspot/src/share/vm/oops/methodKlass.cpp
index 2879529bb9f..1b788c63b13 100644
--- a/hotspot/src/share/vm/oops/methodKlass.cpp
+++ b/hotspot/src/share/vm/oops/methodKlass.cpp
@@ -308,6 +308,7 @@ void methodKlass::oop_print_on(oop obj, outputStream* st) {
}
}
+#endif //PRODUCT
void methodKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_method(), "must be method");
@@ -323,8 +324,6 @@ void methodKlass::oop_print_value_on(oop obj, outputStream* st) {
if (WizardMode && m->code() != NULL) st->print(" ((nmethod*)%p)", m->code());
}
-#endif // PRODUCT
-
const char* methodKlass::internal_name() const {
return "{method}";
}
diff --git a/hotspot/src/share/vm/oops/methodKlass.hpp b/hotspot/src/share/vm/oops/methodKlass.hpp
index 7c26114f744..abd1cbf4741 100644
--- a/hotspot/src/share/vm/oops/methodKlass.hpp
+++ b/hotspot/src/share/vm/oops/methodKlass.hpp
@@ -68,14 +68,13 @@ class methodKlass : public Klass {
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
public:
// Printing
- void oop_print_on (oop obj, outputStream* st);
void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#ifndef PRODUCT
+ void oop_print_on (oop obj, outputStream* st);
+#endif //PRODUCT
- public:
// Verify operations
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
diff --git a/hotspot/src/share/vm/oops/methodOop.cpp b/hotspot/src/share/vm/oops/methodOop.cpp
index cd575e5c488..e6f361ad7a1 100644
--- a/hotspot/src/share/vm/oops/methodOop.cpp
+++ b/hotspot/src/share/vm/oops/methodOop.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -456,12 +456,12 @@ objArrayHandle methodOopDesc::resolved_checked_exceptions_impl(methodOop this_oo
return objArrayHandle(THREAD, Universe::the_empty_class_klass_array());
} else {
methodHandle h_this(THREAD, this_oop);
- objArrayOop m_oop = oopFactory::new_objArray(SystemDictionary::class_klass(), length, CHECK_(objArrayHandle()));
+ objArrayOop m_oop = oopFactory::new_objArray(SystemDictionary::Class_klass(), length, CHECK_(objArrayHandle()));
objArrayHandle mirrors (THREAD, m_oop);
for (int i = 0; i < length; i++) {
CheckedExceptionElement* table = h_this->checked_exceptions_start(); // recompute on each iteration, not gc safe
klassOop k = h_this->constants()->klass_at(table[i].class_cp_index, CHECK_(objArrayHandle()));
- assert(Klass::cast(k)->is_subclass_of(SystemDictionary::throwable_klass()), "invalid exception class");
+ assert(Klass::cast(k)->is_subclass_of(SystemDictionary::Throwable_klass()), "invalid exception class");
mirrors->obj_at_put(i, Klass::cast(k)->java_mirror());
}
return mirrors;
@@ -821,6 +821,18 @@ jint* methodOopDesc::method_type_offsets_chain() {
return pchase;
}
+//------------------------------------------------------------------------------
+// methodOopDesc::is_method_handle_adapter
+//
+// Tests if this method is an internal adapter frame from the
+// MethodHandleCompiler.
+bool methodOopDesc::is_method_handle_adapter() const {
+ return ((name() == vmSymbols::invoke_name() &&
+ method_holder() == SystemDictionary::MethodHandle_klass())
+ ||
+ method_holder() == SystemDictionary::InvokeDynamic_klass());
+}
+
methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
symbolHandle signature,
Handle method_type, TRAPS) {
@@ -1032,8 +1044,8 @@ bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) {
// We are loading classes eagerly. If a ClassNotFoundException or
// a LinkageError was generated, be sure to ignore it.
if (HAS_PENDING_EXCEPTION) {
- if (PENDING_EXCEPTION->is_a(SystemDictionary::classNotFoundException_klass()) ||
- PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) {
+ if (PENDING_EXCEPTION->is_a(SystemDictionary::ClassNotFoundException_klass()) ||
+ PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
CLEAR_PENDING_EXCEPTION;
} else {
return false;
diff --git a/hotspot/src/share/vm/oops/methodOop.hpp b/hotspot/src/share/vm/oops/methodOop.hpp
index fc3d3451ca4..4c9a6f05172 100644
--- a/hotspot/src/share/vm/oops/methodOop.hpp
+++ b/hotspot/src/share/vm/oops/methodOop.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -365,6 +365,7 @@ class methodOopDesc : public oopDesc {
#endif
// byte codes
+ void set_code(address code) { return constMethod()->set_code(code); }
address code_base() const { return constMethod()->code_base(); }
bool contains(address bcp) const { return constMethod()->contains(bcp); }
@@ -524,6 +525,9 @@ class methodOopDesc : public oopDesc {
// JSR 292 support
bool is_method_handle_invoke() const { return access_flags().is_method_handle_invoke(); }
+ // Tests if this method is an internal adapter frame from the
+ // MethodHandleCompiler.
+ bool is_method_handle_adapter() const;
static methodHandle make_invoke_method(KlassHandle holder,
symbolHandle signature,
Handle method_type,
@@ -537,6 +541,7 @@ class methodOopDesc : public oopDesc {
// all without checking for a stack overflow
static int extra_stack_entries() { return (EnableMethodHandles ? (int)MethodHandlePushLimit : 0) + (EnableInvokeDynamic ? 3 : 0); }
static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize()
+
// RedefineClasses() support:
bool is_old() const { return access_flags().is_old(); }
void set_is_old() { _access_flags.set_is_old(); }
diff --git a/hotspot/src/share/vm/oops/objArrayKlass.cpp b/hotspot/src/share/vm/oops/objArrayKlass.cpp
index 212126490cd..cb6884a9c01 100644
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp
@@ -127,16 +127,14 @@ template void objArrayKlass::do_copy(arrayOop s, T* src,
// pointer delta is scaled to number of elements (length field in
// objArrayOop) which we assume is 32 bit.
assert(pd == (size_t)(int)pd, "length field overflow");
- const size_t done_word_len = objArrayOopDesc::array_size((int)pd);
- bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len));
+ bs->write_ref_array((HeapWord*)dst, pd);
THROW(vmSymbols::java_lang_ArrayStoreException());
return;
}
}
}
}
- const size_t word_len = objArrayOopDesc::array_size(length);
- bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
+ bs->write_ref_array((HeapWord*)dst, length);
}
void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
@@ -248,8 +246,8 @@ objArrayOop objArrayKlass::compute_secondary_supers(int num_extra_slots, TRAPS)
} else {
objArrayOop sec_oop = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
objArrayHandle secondaries(THREAD, sec_oop);
- secondaries->obj_at_put(num_extra_slots+0, SystemDictionary::cloneable_klass());
- secondaries->obj_at_put(num_extra_slots+1, SystemDictionary::serializable_klass());
+ secondaries->obj_at_put(num_extra_slots+0, SystemDictionary::Cloneable_klass());
+ secondaries->obj_at_put(num_extra_slots+1, SystemDictionary::Serializable_klass());
for (int i = 0; i < num_elem_supers; i++) {
klassOop elem_super = (klassOop) elem_supers->obj_at(i);
klassOop array_super = elem_super->klass_part()->array_klass_or_null();
@@ -501,6 +499,8 @@ void objArrayKlass::oop_print_on(oop obj, outputStream* st) {
}
}
+#endif //PRODUCT
+
static int max_objArray_print_length = 4;
void objArrayKlass::oop_print_value_on(oop obj, outputStream* st) {
@@ -510,7 +510,7 @@ void objArrayKlass::oop_print_value_on(oop obj, outputStream* st) {
int len = objArrayOop(obj)->length();
st->print("[%d] ", len);
obj->print_address_on(st);
- if (PrintOopAddress || PrintMiscellaneous && (WizardMode || Verbose)) {
+ if (NOT_PRODUCT(PrintOopAddress ||) PrintMiscellaneous && (WizardMode || Verbose)) {
st->print("{");
for (int i = 0; i < len; i++) {
if (i > max_objArray_print_length) {
@@ -522,8 +522,6 @@ void objArrayKlass::oop_print_value_on(oop obj, outputStream* st) {
}
}
-#endif // PRODUCT
-
const char* objArrayKlass::internal_name() const {
return external_name();
}
diff --git a/hotspot/src/share/vm/oops/objArrayKlass.hpp b/hotspot/src/share/vm/oops/objArrayKlass.hpp
index fcc62aad5b9..fba1069b3d0 100644
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp
@@ -119,14 +119,13 @@ class objArrayKlass : public arrayKlass {
private:
static klassOop array_klass_impl (objArrayKlassHandle this_oop, bool or_null, int n, TRAPS);
-#ifndef PRODUCT
public:
// Printing
- void oop_print_on (oop obj, outputStream* st);
void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#ifndef PRODUCT
+ void oop_print_on (oop obj, outputStream* st);
+#endif //PRODUCT
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
diff --git a/hotspot/src/share/vm/oops/objArrayKlassKlass.cpp b/hotspot/src/share/vm/oops/objArrayKlassKlass.cpp
index 06c7f39f5eb..67626975c3c 100644
--- a/hotspot/src/share/vm/oops/objArrayKlassKlass.cpp
+++ b/hotspot/src/share/vm/oops/objArrayKlassKlass.cpp
@@ -99,7 +99,7 @@ klassOop objArrayKlassKlass::allocate_objArray_klass_impl(objArrayKlassKlassHand
}
} else {
// The element type is already Object. Object[] has direct super of Object.
- super_klass = KlassHandle(THREAD, SystemDictionary::object_klass());
+ super_klass = KlassHandle(THREAD, SystemDictionary::Object_klass());
}
}
@@ -278,6 +278,7 @@ void objArrayKlassKlass::oop_print_on(oop obj, outputStream* st) {
st->cr();
}
+#endif //PRODUCT
void objArrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_klass(), "must be klass");
@@ -287,8 +288,6 @@ void objArrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
st->print("[]");
}
-#endif
-
const char* objArrayKlassKlass::internal_name() const {
return "{object array class}";
}
diff --git a/hotspot/src/share/vm/oops/objArrayKlassKlass.hpp b/hotspot/src/share/vm/oops/objArrayKlassKlass.hpp
index ff3e94a6016..b428cf3f54c 100644
--- a/hotspot/src/share/vm/oops/objArrayKlassKlass.hpp
+++ b/hotspot/src/share/vm/oops/objArrayKlassKlass.hpp
@@ -64,14 +64,13 @@ class objArrayKlassKlass : public arrayKlassKlass {
// helpers
static klassOop allocate_objArray_klass_impl(objArrayKlassKlassHandle this_oop, int n, KlassHandle element_klass, TRAPS);
-#ifndef PRODUCT
public:
// Printing
- void oop_print_on(oop obj, outputStream* st);
void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#ifndef PRODUCT
+ void oop_print_on(oop obj, outputStream* st);
+#endif //PRODUCT
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
diff --git a/hotspot/src/share/vm/oops/objArrayOop.hpp b/hotspot/src/share/vm/oops/objArrayOop.hpp
index 626f398a6be..1c1764a8751 100644
--- a/hotspot/src/share/vm/oops/objArrayOop.hpp
+++ b/hotspot/src/share/vm/oops/objArrayOop.hpp
@@ -37,6 +37,32 @@ class objArrayOopDesc : public arrayOopDesc {
return &((T*)base())[index];
}
+private:
+ // Give size of objArrayOop in HeapWords minus the header
+ static int array_size(int length) {
+ const int OopsPerHeapWord = HeapWordSize/heapOopSize;
+ assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
+ "Else the following (new) computation would be in error");
+#ifdef ASSERT
+ // The old code is left in for sanity-checking; it'll
+ // go away pretty soon. XXX
+ // Without UseCompressedOops, this is simply:
+ // oop->length() * HeapWordsPerOop;
+ // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
+ // The oop elements are aligned up to wordSize
+ const int HeapWordsPerOop = heapOopSize/HeapWordSize;
+ int old_res;
+ if (HeapWordsPerOop > 0) {
+ old_res = length * HeapWordsPerOop;
+ } else {
+ old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
+ }
+#endif // ASSERT
+ int res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
+ assert(res == old_res, "Inconsistency between old and new.");
+ return res;
+ }
+
public:
// Returns the offset of the first element.
static int base_offset_in_bytes() {
@@ -67,27 +93,14 @@ class objArrayOopDesc : public arrayOopDesc {
// Sizing
static int header_size() { return arrayOopDesc::header_size(T_OBJECT); }
int object_size() { return object_size(length()); }
- int array_size() { return array_size(length()); }
static int object_size(int length) {
// This returns the object size in HeapWords.
- return align_object_size(header_size() + array_size(length));
- }
-
- // Give size of objArrayOop in HeapWords minus the header
- static int array_size(int length) {
- // Without UseCompressedOops, this is simply:
- // oop->length() * HeapWordsPerOop;
- // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
- // The oop elements are aligned up to wordSize
- const int HeapWordsPerOop = heapOopSize/HeapWordSize;
- if (HeapWordsPerOop > 0) {
- return length * HeapWordsPerOop;
- } else {
- const int OopsPerHeapWord = HeapWordSize/heapOopSize;
- int word_len = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
- return word_len;
- }
+ uint asz = array_size(length);
+ uint osz = align_object_size(header_size() + asz);
+ assert(osz >= asz, "no overflow");
+ assert((int)osz > 0, "no overflow");
+ return (int)osz;
}
// special iterators for index ranges, returns size of object
diff --git a/hotspot/src/share/vm/oops/oop.cpp b/hotspot/src/share/vm/oops/oop.cpp
index da787bed038..96b04d051cd 100644
--- a/hotspot/src/share/vm/oops/oop.cpp
+++ b/hotspot/src/share/vm/oops/oop.cpp
@@ -31,14 +31,13 @@ BarrierSet* oopDesc::_bs = NULL;
#ifdef PRODUCT
void oopDesc::print_on(outputStream* st) const {}
-void oopDesc::print_value_on(outputStream* st) const {}
void oopDesc::print_address_on(outputStream* st) const {}
-char* oopDesc::print_value_string() { return NULL; }
char* oopDesc::print_string() { return NULL; }
void oopDesc::print() {}
-void oopDesc::print_value() {}
void oopDesc::print_address() {}
-#else
+
+#else //PRODUCT
+
void oopDesc::print_on(outputStream* st) const {
if (this == NULL) {
st->print_cr("NULL");
@@ -47,22 +46,6 @@ void oopDesc::print_on(outputStream* st) const {
}
}
-void oopDesc::print_value_on(outputStream* st) const {
- oop obj = oop(this);
- if (this == NULL) {
- st->print("NULL");
- } else if (java_lang_String::is_instance(obj)) {
- java_lang_String::print(obj, st);
- if (PrintOopAddress) print_address_on(st);
-#ifdef ASSERT
- } else if (!Universe::heap()->is_in(obj) || !Universe::heap()->is_in(klass())) {
- st->print("### BAD OOP %p ###", (address)obj);
-#endif
- } else {
- blueprint()->oop_print_value_on(obj, st);
- }
-}
-
void oopDesc::print_address_on(outputStream* st) const {
if (PrintOopAddress) {
st->print("{"INTPTR_FORMAT"}", this);
@@ -71,24 +54,48 @@ void oopDesc::print_address_on(outputStream* st) const {
void oopDesc::print() { print_on(tty); }
-void oopDesc::print_value() { print_value_on(tty); }
-
void oopDesc::print_address() { print_address_on(tty); }
char* oopDesc::print_string() {
- stringStream* st = new stringStream();
- print_on(st);
- return st->as_string();
-}
-
-char* oopDesc::print_value_string() {
- stringStream* st = new stringStream();
- print_value_on(st);
- return st->as_string();
+ stringStream st;
+ print_on(&st);
+ return st.as_string();
}
#endif // PRODUCT
+// The print_value functions are present in all builds, to support the disassembler.
+
+void oopDesc::print_value() {
+ print_value_on(tty);
+}
+
+char* oopDesc::print_value_string() {
+ char buf[100];
+ stringStream st(buf, sizeof(buf));
+ print_value_on(&st);
+ return st.as_string();
+}
+
+void oopDesc::print_value_on(outputStream* st) const {
+ oop obj = oop(this);
+ if (this == NULL) {
+ st->print("NULL");
+ } else if (java_lang_String::is_instance(obj)) {
+ java_lang_String::print(obj, st);
+#ifndef PRODUCT
+ if (PrintOopAddress) print_address_on(st);
+#endif //PRODUCT
+#ifdef ASSERT
+ } else if (!Universe::heap()->is_in(obj) || !Universe::heap()->is_in(klass())) {
+ st->print("### BAD OOP %p ###", (address)obj);
+#endif //ASSERT
+ } else {
+ blueprint()->oop_print_value_on(obj, st);
+ }
+}
+
+
void oopDesc::verify_on(outputStream* st) {
if (this != NULL) {
blueprint()->oop_verify_on(this, st);
diff --git a/hotspot/src/share/vm/oops/oop.hpp b/hotspot/src/share/vm/oops/oop.hpp
index 9b7e9baf5c0..c67220a26e6 100644
--- a/hotspot/src/share/vm/oops/oop.hpp
+++ b/hotspot/src/share/vm/oops/oop.hpp
@@ -30,13 +30,12 @@
// no virtual functions allowed
// store into oop with store check
-template void oop_store(T* p, oop v);
-template void oop_store(volatile T* p, oop v);
+template inline void oop_store(T* p, oop v);
+template inline void oop_store(volatile T* p, oop v);
// store into oop without store check
-template void oop_store_without_check(T* p, oop v);
-template void oop_store_without_check(volatile T* p, oop v);
-
+template inline void oop_store_without_check(T* p, oop v);
+template inline void oop_store_without_check(volatile T* p, oop v);
extern bool always_do_update_barrier;
diff --git a/hotspot/src/share/vm/oops/symbolKlass.cpp b/hotspot/src/share/vm/oops/symbolKlass.cpp
index d0b6e2f33b5..4307beb8897 100644
--- a/hotspot/src/share/vm/oops/symbolKlass.cpp
+++ b/hotspot/src/share/vm/oops/symbolKlass.cpp
@@ -213,6 +213,8 @@ void symbolKlass::oop_print_on(oop obj, outputStream* st) {
st->print("'");
}
+#endif //PRODUCT
+
void symbolKlass::oop_print_value_on(oop obj, outputStream* st) {
symbolOop sym = symbolOop(obj);
st->print("'");
@@ -222,8 +224,6 @@ void symbolKlass::oop_print_value_on(oop obj, outputStream* st) {
st->print("'");
}
-#endif //PRODUCT
-
const char* symbolKlass::internal_name() const {
return "{symbol}";
}
diff --git a/hotspot/src/share/vm/oops/symbolKlass.hpp b/hotspot/src/share/vm/oops/symbolKlass.hpp
index aca3e27175b..c03f2080fcc 100644
--- a/hotspot/src/share/vm/oops/symbolKlass.hpp
+++ b/hotspot/src/share/vm/oops/symbolKlass.hpp
@@ -65,10 +65,10 @@ class symbolKlass : public Klass {
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
// Printing
void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
const char* internal_name() const;
};
diff --git a/hotspot/src/share/vm/oops/symbolOop.cpp b/hotspot/src/share/vm/oops/symbolOop.cpp
index 6308735223d..b99a303f490 100644
--- a/hotspot/src/share/vm/oops/symbolOop.cpp
+++ b/hotspot/src/share/vm/oops/symbolOop.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,11 @@
# include "incls/_precompiled.incl"
# include "incls/_symbolOop.cpp.incl"
+
+// ------------------------------------------------------------------
+// symbolOopDesc::equals
+//
+// Compares the symbol with a string of the given length.
bool symbolOopDesc::equals(const char* str, int len) const {
int l = utf8_length();
if (l != len) return false;
@@ -36,6 +41,48 @@ bool symbolOopDesc::equals(const char* str, int len) const {
return true;
}
+
+// ------------------------------------------------------------------
+// symbolOopDesc::starts_with
+//
+// Tests if the symbol starts with the specified prefix of the given
+// length.
+bool symbolOopDesc::starts_with(const char* prefix, int len) const {
+ if (len > utf8_length()) return false;
+ while (len-- > 0) {
+ if (prefix[len] != (char) byte_at(len))
+ return false;
+ }
+ assert(len == -1, "we should be at the beginning");
+ return true;
+}
+
+
+// ------------------------------------------------------------------
+// symbolOopDesc::index_of
+//
+// Finds if the given string is a substring of this symbol's utf8 bytes.
+// Return -1 on failure. Otherwise return the first index where str occurs.
+int symbolOopDesc::index_of_at(int i, const char* str, int len) const {
+ assert(i >= 0 && i <= utf8_length(), "oob");
+ if (len <= 0) return 0;
+ char first_char = str[0];
+ address bytes = (address) ((symbolOopDesc*)this)->base();
+ address limit = bytes + utf8_length() - len; // inclusive limit
+ address scan = bytes + i;
+ if (scan > limit)
+ return -1;
+ for (;;) {
+ scan = (address) memchr(scan, first_char, (limit + 1 - scan));
+ if (scan == NULL)
+ return -1; // not found
+ assert(scan >= bytes+i && scan <= limit, "scan oob");
+ if (memcmp(scan, str, len) == 0)
+ return (int)(scan - bytes);
+ }
+}
+
+
char* symbolOopDesc::as_C_string(char* buf, int size) const {
if (size > 0) {
int len = MIN2(size - 1, utf8_length());
diff --git a/hotspot/src/share/vm/oops/symbolOop.hpp b/hotspot/src/share/vm/oops/symbolOop.hpp
index 49f95ec510d..15a4b0a8504 100644
--- a/hotspot/src/share/vm/oops/symbolOop.hpp
+++ b/hotspot/src/share/vm/oops/symbolOop.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,8 +70,21 @@ class symbolOopDesc : public oopDesc {
void set_utf8_length(int len) { _length = len; }
- // Compares the symbol with a string
+ // Compares the symbol with a string.
bool equals(const char* str, int len) const;
+ bool equals(const char* str) const { return equals(str, (int) strlen(str)); }
+
+ // Tests if the symbol starts with the given prefix.
+ bool starts_with(const char* prefix, int len) const;
+ bool starts_with(const char* prefix) const {
+ return starts_with(prefix, (int) strlen(prefix));
+ }
+
+ // Tests if the symbol starts with the given prefix.
+ int index_of_at(int i, const char* str, int len) const;
+ int index_of_at(int i, const char* str) const {
+ return index_of_at(i, str, (int) strlen(str));
+ }
// Three-way compare for sorting; returns -1/0/1 if receiver is ==/> than arg
// note that the ordering is not alfabetical
diff --git a/hotspot/src/share/vm/oops/typeArrayKlassKlass.cpp b/hotspot/src/share/vm/oops/typeArrayKlassKlass.cpp
index d987f0d14be..da19938fe83 100644
--- a/hotspot/src/share/vm/oops/typeArrayKlassKlass.cpp
+++ b/hotspot/src/share/vm/oops/typeArrayKlassKlass.cpp
@@ -45,6 +45,7 @@ void typeArrayKlassKlass::oop_print_on(oop obj, outputStream* st) {
Klass:: oop_print_on(obj, st);
}
+#endif //PRODUCT
void typeArrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_klass(), "must be klass");
@@ -63,8 +64,6 @@ void typeArrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
st->print("}");
}
-#endif
-
const char* typeArrayKlassKlass::internal_name() const {
return "{type array class}";
}
diff --git a/hotspot/src/share/vm/oops/typeArrayKlassKlass.hpp b/hotspot/src/share/vm/oops/typeArrayKlassKlass.hpp
index fabda84467a..01b0f9def74 100644
--- a/hotspot/src/share/vm/oops/typeArrayKlassKlass.hpp
+++ b/hotspot/src/share/vm/oops/typeArrayKlassKlass.hpp
@@ -47,12 +47,12 @@ class typeArrayKlassKlass : public arrayKlassKlass {
static int header_size() { return oopDesc::header_size() + sizeof(typeArrayKlassKlass)/HeapWordSize; }
int object_size() const { return align_object_size(header_size()); }
-#ifndef PRODUCT
public:
// Printing
- void oop_print_on(oop obj, outputStream* st);
void oop_print_value_on(oop obj, outputStream* st);
-#endif
- public:
+#ifndef PRODUCT
+ void oop_print_on(oop obj, outputStream* st);
+#endif //PRODUCT
+
const char* internal_name() const;
};
diff --git a/hotspot/src/share/vm/opto/bytecodeInfo.cpp b/hotspot/src/share/vm/opto/bytecodeInfo.cpp
index eeb1658913a..b199d7c7047 100644
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp
+++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp
@@ -27,11 +27,16 @@
//=============================================================================
//------------------------------InlineTree-------------------------------------
-InlineTree::InlineTree( Compile* c, const InlineTree *caller_tree, ciMethod* callee, JVMState* caller_jvms, int caller_bci, float site_invoke_ratio )
+InlineTree::InlineTree( Compile* c,
+ const InlineTree *caller_tree, ciMethod* callee,
+ JVMState* caller_jvms, int caller_bci,
+ float site_invoke_ratio, int site_depth_adjust)
: C(c), _caller_jvms(caller_jvms),
_caller_tree((InlineTree*)caller_tree),
_method(callee), _site_invoke_ratio(site_invoke_ratio),
- _count_inline_bcs(method()->code_size()) {
+ _site_depth_adjust(site_depth_adjust),
+ _count_inline_bcs(method()->code_size())
+{
NOT_PRODUCT(_count_inlines = 0;)
if (_caller_jvms != NULL) {
// Keep a private copy of the caller_jvms:
@@ -40,7 +45,7 @@ InlineTree::InlineTree( Compile* c, const InlineTree *caller_tree, ciMethod* cal
assert(!caller_jvms->should_reexecute(), "there should be no reexecute bytecode with inlining");
}
assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS");
- assert((caller_tree == NULL ? 0 : caller_tree->inline_depth() + 1) == inline_depth(), "correct (redundant) depth parameter");
+ assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter");
assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter");
if (UseOldInlining) {
// Update hierarchical counts, count_inline_bcs() and count_inlines()
@@ -52,10 +57,13 @@ InlineTree::InlineTree( Compile* c, const InlineTree *caller_tree, ciMethod* cal
}
}
-InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio)
+InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms,
+ float site_invoke_ratio, int site_depth_adjust)
: C(c), _caller_jvms(caller_jvms), _caller_tree(NULL),
_method(callee_method), _site_invoke_ratio(site_invoke_ratio),
- _count_inline_bcs(method()->code_size()) {
+ _site_depth_adjust(site_depth_adjust),
+ _count_inline_bcs(method()->code_size())
+{
NOT_PRODUCT(_count_inlines = 0;)
assert(!UseOldInlining, "do not use for old stuff");
}
@@ -180,6 +188,10 @@ const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* calle
return NULL;
}
+ // Always inline MethodHandle methods.
+ if (callee_method->is_method_handle_invoke())
+ return NULL;
+
// First check all inlining restrictions which are required for correctness
if (callee_method->is_abstract()) return "abstract method";
// note: we allow ik->is_abstract()
@@ -265,10 +277,13 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
return msg;
}
- bool is_accessor = InlineAccessors && callee_method->is_accessor();
+ if (InlineAccessors && callee_method->is_accessor()) {
+ // accessor methods are not subject to any of the following limits.
+ return NULL;
+ }
// suppress a few checks for accessors and trivial methods
- if (!is_accessor && callee_method->code_size() > MaxTrivialSize) {
+ if (callee_method->code_size() > MaxTrivialSize) {
// don't inline into giant methods
if (C->unique() > (uint)NodeCountInliningCutoff) {
@@ -287,7 +302,7 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
}
}
- if (!C->do_inlining() && InlineAccessors && !is_accessor) {
+ if (!C->do_inlining() && InlineAccessors) {
return "not an accessor";
}
if( inline_depth() > MaxInlineLevel ) {
@@ -322,14 +337,17 @@ bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* call
// stricter than callee_holder->is_initialized()
ciBytecodeStream iter(caller_method);
iter.force_bci(caller_bci);
- int index = iter.get_index_int();
- if( !caller_method->is_klass_loaded(index, true) ) {
- return false;
- }
- // Try to do constant pool resolution if running Xcomp
Bytecodes::Code call_bc = iter.cur_bc();
- if( !caller_method->check_call(index, call_bc == Bytecodes::_invokestatic) ) {
- return false;
+ // An invokedynamic instruction does not have a klass.
+ if (call_bc != Bytecodes::_invokedynamic) {
+ int index = iter.get_index_int();
+ if (!caller_method->is_klass_loaded(index, true)) {
+ return false;
+ }
+ // Try to do constant pool resolution if running Xcomp
+ if( !caller_method->check_call(index, call_bc == Bytecodes::_invokestatic) ) {
+ return false;
+ }
}
}
// We will attempt to see if a class/field/etc got properly loaded. If it
@@ -457,7 +475,30 @@ InlineTree *InlineTree::build_inline_tree_for_callee( ciMethod* callee_method, J
if (old_ilt != NULL) {
return old_ilt;
}
- InlineTree *ilt = new InlineTree( C, this, callee_method, caller_jvms, caller_bci, recur_frequency );
+ int new_depth_adjust = 0;
+ if (caller_jvms->method() != NULL) {
+ if ((caller_jvms->method()->name() == ciSymbol::invoke_name() &&
+ caller_jvms->method()->holder()->name() == ciSymbol::java_dyn_MethodHandle())
+ || caller_jvms->method()->holder()->name() == ciSymbol::java_dyn_InvokeDynamic())
+ /* @@@ FIXME:
+ if (caller_jvms->method()->is_method_handle_adapter())
+ */
+ new_depth_adjust -= 1; // don't count actions in MH or indy adapter frames
+ else if (callee_method->is_method_handle_invoke()) {
+ new_depth_adjust -= 1; // don't count method handle calls from java.dyn implem
+ }
+ if (new_depth_adjust != 0 && PrintInlining) {
+ stringStream nm1; caller_jvms->method()->print_name(&nm1);
+ stringStream nm2; callee_method->print_name(&nm2);
+ tty->print_cr("discounting inlining depth from %s to %s", nm1.base(), nm2.base());
+ }
+ if (new_depth_adjust != 0 && C->log()) {
+ int id1 = C->log()->identify(caller_jvms->method());
+ int id2 = C->log()->identify(callee_method);
+ C->log()->elem("inline_depth_discount caller='%d' callee='%d'", id1, id2);
+ }
+ }
+ InlineTree *ilt = new InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _site_depth_adjust + new_depth_adjust);
_subtrees.append( ilt );
NOT_PRODUCT( _count_inlines += 1; )
@@ -483,7 +524,7 @@ InlineTree *InlineTree::build_inline_tree_root() {
Compile* C = Compile::current();
// Root of inline tree
- InlineTree *ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F);
+ InlineTree *ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F, 0);
return ilt;
}
diff --git a/hotspot/src/share/vm/opto/c2_globals.cpp b/hotspot/src/share/vm/opto/c2_globals.cpp
index 5715b24ba57..40594bf940e 100644
--- a/hotspot/src/share/vm/opto/c2_globals.cpp
+++ b/hotspot/src/share/vm/opto/c2_globals.cpp
@@ -25,4 +25,4 @@
# include "incls/_precompiled.incl"
# include "incls/_c2_globals.cpp.incl"
-C2_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+C2_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_EXPERIMENTAL_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
diff --git a/hotspot/src/share/vm/opto/c2_globals.hpp b/hotspot/src/share/vm/opto/c2_globals.hpp
index 091ad4a9bb4..fd3256ade33 100644
--- a/hotspot/src/share/vm/opto/c2_globals.hpp
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp
@@ -26,7 +26,7 @@
// Defines all globals flags used by the server compiler.
//
-#define C2_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
+#define C2_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct) \
\
notproduct(intx, CompileZapFirst, 0, \
"If +ZapDeadCompiledLocals, " \
@@ -154,6 +154,12 @@
notproduct(bool, TraceProfileTripCount, false, \
"Trace profile loop trip count information") \
\
+ product(bool, UseLoopPredicate, true, \
+ "Generate a predicate to select fast/slow loop versions") \
+ \
+ develop(bool, TraceLoopPredicate, false, \
+ "Trace generation of loop predicates") \
+ \
develop(bool, OptoCoalesce, true, \
"Use Conservative Copy Coalescing in the Register Allocator") \
\
@@ -394,6 +400,12 @@
product(bool, UseOptoBiasInlining, true, \
"Generate biased locking code in C2 ideal graph") \
\
+ product(bool, OptimizeStringConcat, false, \
+ "Optimize the construction of Strings by StringBuilder") \
+ \
+ notproduct(bool, PrintOptimizeStringConcat, false, \
+ "Print information about transformations performed on Strings") \
+ \
product(intx, ValueSearchLimit, 1000, \
"Recursion limit in PhaseMacroExpand::value_from_mem_phi") \
\
@@ -413,4 +425,4 @@
product(bool, BlockLayoutRotateLoops, true, \
"Allow back branches to be fall throughs in the block layour") \
-C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
+C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
diff --git a/hotspot/src/share/vm/opto/callGenerator.cpp b/hotspot/src/share/vm/opto/callGenerator.cpp
index 8ce7c0ce57d..37272feb19e 100644
--- a/hotspot/src/share/vm/opto/callGenerator.cpp
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -98,12 +98,21 @@ JVMState* ParseGenerator::generate(JVMState* jvms) {
//---------------------------DirectCallGenerator------------------------------
// Internal class which handles all out-of-line calls w/o receiver type checks.
class DirectCallGenerator : public CallGenerator {
-public:
- DirectCallGenerator(ciMethod* method)
- : CallGenerator(method)
+ private:
+ CallStaticJavaNode* _call_node;
+ // Force separate memory and I/O projections for the exceptional
+ // paths to facilitate late inlinig.
+ bool _separate_io_proj;
+
+ public:
+ DirectCallGenerator(ciMethod* method, bool separate_io_proj)
+ : CallGenerator(method),
+ _separate_io_proj(separate_io_proj)
{
}
virtual JVMState* generate(JVMState* jvms);
+
+ CallStaticJavaNode* call_node() const { return _call_node; }
};
JVMState* DirectCallGenerator::generate(JVMState* jvms) {
@@ -127,14 +136,82 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) {
}
// Mark the call node as virtual, sort of:
call->set_optimized_virtual(true);
+ if (method()->is_method_handle_invoke())
+ call->set_method_handle_invoke(true);
}
kit.set_arguments_for_java_call(call);
+ kit.set_edges_for_java_call(call, false, _separate_io_proj);
+ Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
+ kit.push_node(method()->return_type()->basic_type(), ret);
+ _call_node = call; // Save the call node in case we need it later
+ return kit.transfer_exceptions_into_jvms();
+}
+
+//---------------------------DynamicCallGenerator-----------------------------
+// Internal class which handles all out-of-line invokedynamic calls.
+class DynamicCallGenerator : public CallGenerator {
+public:
+ DynamicCallGenerator(ciMethod* method)
+ : CallGenerator(method)
+ {
+ }
+ virtual JVMState* generate(JVMState* jvms);
+};
+
+JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
+ GraphKit kit(jvms);
+
+ if (kit.C->log() != NULL) {
+ kit.C->log()->elem("dynamic_call bci='%d'", jvms->bci());
+ }
+
+ // Get the constant pool cache from the caller class.
+ ciMethod* caller_method = jvms->method();
+ ciBytecodeStream str(caller_method);
+ str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
+ assert(str.cur_bc() == Bytecodes::_invokedynamic, "wrong place to issue a dynamic call!");
+ ciCPCache* cpcache = str.get_cpcache();
+
+ // Get the offset of the CallSite from the constant pool cache
+ // pointer.
+ int index = str.get_method_index();
+ size_t call_site_offset = cpcache->get_f1_offset(index);
+
+ // Load the CallSite object from the constant pool cache.
+ const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
+ Node* cpcache_adr = kit.makecon(cpcache_ptr);
+ Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
+ Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
+
+ // Load the target MethodHandle from the CallSite object.
+ Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes());
+ Node* target_mh = kit.make_load(kit.control(), target_mh_adr, TypeInstPtr::BOTTOM, T_OBJECT);
+
+ address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub();
+
+ CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci());
+ // invokedynamic is treated as an optimized invokevirtual.
+ call->set_optimized_virtual(true);
+ // Take extra care (in the presence of argument motion) not to trash the SP:
+ call->set_method_handle_invoke(true);
+
+ // Pass the target MethodHandle as first argument and shift the
+ // other arguments.
+ call->init_req(0 + TypeFunc::Parms, target_mh);
+ uint nargs = call->method()->arg_size();
+ for (uint i = 1; i < nargs; i++) {
+ Node* arg = kit.argument(i - 1);
+ call->init_req(i + TypeFunc::Parms, arg);
+ }
+
kit.set_edges_for_java_call(call);
Node* ret = kit.set_results_for_java_call(call);
kit.push_node(method()->return_type()->basic_type(), ret);
return kit.transfer_exceptions_into_jvms();
}
+//--------------------------VirtualCallGenerator------------------------------
+// Internal class which handles all out-of-line calls checking receiver type.
class VirtualCallGenerator : public CallGenerator {
private:
int _vtable_index;
@@ -149,8 +226,6 @@ public:
virtual JVMState* generate(JVMState* jvms);
};
-//--------------------------VirtualCallGenerator------------------------------
-// Internal class which handles all out-of-line calls checking receiver type.
JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
GraphKit kit(jvms);
Node* receiver = kit.argument(0);
@@ -238,16 +313,124 @@ CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
return new ParseGenerator(m, expected_uses, true);
}
-CallGenerator* CallGenerator::for_direct_call(ciMethod* m) {
+CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
assert(!m->is_abstract(), "for_direct_call mismatch");
- return new DirectCallGenerator(m);
+ return new DirectCallGenerator(m, separate_io_proj);
+}
+
+CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
+ assert(m->is_method_handle_invoke(), "for_dynamic_call mismatch");
+ return new DynamicCallGenerator(m);
}
CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
assert(!m->is_static(), "for_virtual_call mismatch");
+ assert(!m->is_method_handle_invoke(), "should be a direct call");
return new VirtualCallGenerator(m, vtable_index);
}
+// Allow inlining decisions to be delayed
+class LateInlineCallGenerator : public DirectCallGenerator {
+ CallGenerator* _inline_cg;
+
+ public:
+ LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
+ DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
+
+ virtual bool is_late_inline() const { return true; }
+
+ // Convert the CallStaticJava into an inline
+ virtual void do_late_inline();
+
+ JVMState* generate(JVMState* jvms) {
+ // Record that this call site should be revisited once the main
+ // parse is finished.
+ Compile::current()->add_late_inline(this);
+
+ // Emit the CallStaticJava and request separate projections so
+ // that the late inlining logic can distinguish between fall
+ // through and exceptional uses of the memory and io projections
+ // as is done for allocations and macro expansion.
+ return DirectCallGenerator::generate(jvms);
+ }
+
+};
+
+
+void LateInlineCallGenerator::do_late_inline() {
+ // Can't inline it
+ if (call_node() == NULL || call_node()->outcnt() == 0 ||
+ call_node()->in(0) == NULL || call_node()->in(0)->is_top())
+ return;
+
+ CallStaticJavaNode* call = call_node();
+
+ // Make a clone of the JVMState that appropriate to use for driving a parse
+ Compile* C = Compile::current();
+ JVMState* jvms = call->jvms()->clone_shallow(C);
+ uint size = call->req();
+ SafePointNode* map = new (C, size) SafePointNode(size, jvms);
+ for (uint i1 = 0; i1 < size; i1++) {
+ map->init_req(i1, call->in(i1));
+ }
+
+ // Make sure the state is a MergeMem for parsing.
+ if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
+ map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
+ }
+
+ // Make enough space for the expression stack and transfer the incoming arguments
+ int nargs = method()->arg_size();
+ jvms->set_map(map);
+ map->ensure_stack(jvms, jvms->method()->max_stack());
+ if (nargs > 0) {
+ for (int i1 = 0; i1 < nargs; i1++) {
+ map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
+ }
+ }
+
+ CompileLog* log = C->log();
+ if (log != NULL) {
+ log->head("late_inline method='%d'", log->identify(method()));
+ JVMState* p = jvms;
+ while (p != NULL) {
+ log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+ p = p->caller();
+ }
+ log->tail("late_inline");
+ }
+
+ // Setup default node notes to be picked up by the inlining
+ Node_Notes* old_nn = C->default_node_notes();
+ if (old_nn != NULL) {
+ Node_Notes* entry_nn = old_nn->clone(C);
+ entry_nn->set_jvms(jvms);
+ C->set_default_node_notes(entry_nn);
+ }
+
+ // Now perform the inling using the synthesized JVMState
+ JVMState* new_jvms = _inline_cg->generate(jvms);
+ if (new_jvms == NULL) return; // no change
+ if (C->failing()) return;
+
+ // Capture any exceptional control flow
+ GraphKit kit(new_jvms);
+
+ // Find the result object
+ Node* result = C->top();
+ int result_size = method()->return_type()->size();
+ if (result_size != 0 && !kit.stopped()) {
+ result = (result_size == 1) ? kit.pop() : kit.pop_pair();
+ }
+
+ kit.replace_call(call, result);
+}
+
+
+CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
+ return new LateInlineCallGenerator(method, inline_cg);
+}
+
//---------------------------WarmCallGenerator--------------------------------
// Internal class which handles initial deferral of inlining decisions.
@@ -315,70 +498,7 @@ JVMState* WarmCallGenerator::generate(JVMState* jvms) {
}
void WarmCallInfo::make_hot() {
- Compile* C = Compile::current();
- // Replace the callnode with something better.
- CallJavaNode* call = this->call()->as_CallJava();
- ciMethod* method = call->method();
- int nargs = method->arg_size();
- JVMState* jvms = call->jvms()->clone_shallow(C);
- uint size = TypeFunc::Parms + MAX2(2, nargs);
- SafePointNode* map = new (C, size) SafePointNode(size, jvms);
- for (uint i1 = 0; i1 < (uint)(TypeFunc::Parms + nargs); i1++) {
- map->init_req(i1, call->in(i1));
- }
- jvms->set_map(map);
- jvms->set_offsets(map->req());
- jvms->set_locoff(TypeFunc::Parms);
- jvms->set_stkoff(TypeFunc::Parms);
- GraphKit kit(jvms);
-
- JVMState* new_jvms = _hot_cg->generate(kit.jvms());
- if (new_jvms == NULL) return; // no change
- if (C->failing()) return;
-
- kit.set_jvms(new_jvms);
- Node* res = C->top();
- int res_size = method->return_type()->size();
- if (res_size != 0) {
- kit.inc_sp(-res_size);
- res = kit.argument(0);
- }
- GraphKit ekit(kit.combine_and_pop_all_exception_states()->jvms());
-
- // Replace the call:
- for (DUIterator i = call->outs(); call->has_out(i); i++) {
- Node* n = call->out(i);
- Node* nn = NULL; // replacement
- if (n->is_Proj()) {
- ProjNode* nproj = n->as_Proj();
- assert(nproj->_con < (uint)(TypeFunc::Parms + (res_size ? 1 : 0)), "sane proj");
- if (nproj->_con == TypeFunc::Parms) {
- nn = res;
- } else {
- nn = kit.map()->in(nproj->_con);
- }
- if (nproj->_con == TypeFunc::I_O) {
- for (DUIterator j = nproj->outs(); nproj->has_out(j); j++) {
- Node* e = nproj->out(j);
- if (e->Opcode() == Op_CreateEx) {
- e->replace_by(ekit.argument(0));
- } else if (e->Opcode() == Op_Catch) {
- for (DUIterator k = e->outs(); e->has_out(k); k++) {
- CatchProjNode* p = e->out(j)->as_CatchProj();
- if (p->is_handler_proj()) {
- p->replace_by(ekit.control());
- } else {
- p->replace_by(kit.control());
- }
- }
- }
- }
- }
- }
- NOT_PRODUCT(if (!nn) n->dump(2));
- assert(nn != NULL, "don't know what to do with this user");
- n->replace_by(nn);
- }
+ Unimplemented();
}
void WarmCallInfo::make_cold() {
@@ -527,6 +647,155 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
}
+//------------------------PredictedDynamicCallGenerator-----------------------
+// Internal class which handles all out-of-line calls checking receiver type.
+class PredictedDynamicCallGenerator : public CallGenerator {
+ ciMethodHandle* _predicted_method_handle;
+ CallGenerator* _if_missed;
+ CallGenerator* _if_hit;
+ float _hit_prob;
+
+public:
+ PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle,
+ CallGenerator* if_missed,
+ CallGenerator* if_hit,
+ float hit_prob)
+ : CallGenerator(if_missed->method()),
+ _predicted_method_handle(predicted_method_handle),
+ _if_missed(if_missed),
+ _if_hit(if_hit),
+ _hit_prob(hit_prob)
+ {}
+
+ virtual bool is_inline() const { return _if_hit->is_inline(); }
+ virtual bool is_deferred() const { return _if_hit->is_deferred(); }
+
+ virtual JVMState* generate(JVMState* jvms);
+};
+
+
+CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
+ CallGenerator* if_missed,
+ CallGenerator* if_hit,
+ float hit_prob) {
+ return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob);
+}
+
+
+JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
+ GraphKit kit(jvms);
+ PhaseGVN& gvn = kit.gvn();
+
+ CompileLog* log = kit.C->log();
+ if (log != NULL) {
+ log->elem("predicted_dynamic_call bci='%d'", jvms->bci());
+ }
+
+ // Get the constant pool cache from the caller class.
+ ciMethod* caller_method = jvms->method();
+ ciBytecodeStream str(caller_method);
+ str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
+ ciCPCache* cpcache = str.get_cpcache();
+
+ // Get the offset of the CallSite from the constant pool cache
+ // pointer.
+ int index = str.get_method_index();
+ size_t call_site_offset = cpcache->get_f1_offset(index);
+
+ // Load the CallSite object from the constant pool cache.
+ const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
+ Node* cpcache_adr = kit.makecon(cpcache_ptr);
+ Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
+ Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
+
+ // Load the target MethodHandle from the CallSite object.
+ Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes());
+ Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
+
+ // Check if the MethodHandle is still the same.
+ const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true);
+ Node* predicted_mh = kit.makecon(predicted_mh_ptr);
+
+ Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh));
+ Node* bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
+ IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
+ kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff)));
+ Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff));
+
+ SafePointNode* slow_map = NULL;
+ JVMState* slow_jvms;
+ { PreserveJVMState pjvms(&kit);
+ kit.set_control(slow_ctl);
+ if (!kit.stopped()) {
+ slow_jvms = _if_missed->generate(kit.sync_jvms());
+ assert(slow_jvms != NULL, "miss path must not fail to generate");
+ kit.add_exception_states_from(slow_jvms);
+ kit.set_map(slow_jvms->map());
+ if (!kit.stopped())
+ slow_map = kit.stop();
+ }
+ }
+
+ if (kit.stopped()) {
+ // Instance exactly does not matches the desired type.
+ kit.set_jvms(slow_jvms);
+ return kit.transfer_exceptions_into_jvms();
+ }
+
+ // Make the hot call:
+ JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
+ if (new_jvms == NULL) {
+ // Inline failed, so make a direct call.
+ assert(_if_hit->is_inline(), "must have been a failed inline");
+ CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
+ new_jvms = cg->generate(kit.sync_jvms());
+ }
+ kit.add_exception_states_from(new_jvms);
+ kit.set_jvms(new_jvms);
+
+ // Need to merge slow and fast?
+ if (slow_map == NULL) {
+ // The fast path is the only path remaining.
+ return kit.transfer_exceptions_into_jvms();
+ }
+
+ if (kit.stopped()) {
+ // Inlined method threw an exception, so it's just the slow path after all.
+ kit.set_jvms(slow_jvms);
+ return kit.transfer_exceptions_into_jvms();
+ }
+
+ // Finish the diamond.
+ kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
+ RegionNode* region = new (kit.C, 3) RegionNode(3);
+ region->init_req(1, kit.control());
+ region->init_req(2, slow_map->control());
+ kit.set_control(gvn.transform(region));
+ Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
+ iophi->set_req(2, slow_map->i_o());
+ kit.set_i_o(gvn.transform(iophi));
+ kit.merge_memory(slow_map->merged_memory(), region, 2);
+ uint tos = kit.jvms()->stkoff() + kit.sp();
+ uint limit = slow_map->req();
+ for (uint i = TypeFunc::Parms; i < limit; i++) {
+ // Skip unused stack slots; fast forward to monoff();
+ if (i == tos) {
+ i = kit.jvms()->monoff();
+ if( i >= limit ) break;
+ }
+ Node* m = kit.map()->in(i);
+ Node* n = slow_map->in(i);
+ if (m != n) {
+ const Type* t = gvn.type(m)->meet(gvn.type(n));
+ Node* phi = PhiNode::make(region, m, t);
+ phi->set_req(2, n);
+ kit.map()->set_req(i, gvn.transform(phi));
+ }
+ }
+ return kit.transfer_exceptions_into_jvms();
+}
+
+
//-------------------------UncommonTrapCallGenerator-----------------------------
// Internal class which handles all out-of-line calls checking receiver type.
class UncommonTrapCallGenerator : public CallGenerator {
diff --git a/hotspot/src/share/vm/opto/callGenerator.hpp b/hotspot/src/share/vm/opto/callGenerator.hpp
index bbd47ca4aab..ecc7a4ac2e4 100644
--- a/hotspot/src/share/vm/opto/callGenerator.hpp
+++ b/hotspot/src/share/vm/opto/callGenerator.hpp
@@ -57,6 +57,13 @@ class CallGenerator : public ResourceObj {
// is_trap: Does not return to the caller. (E.g., uncommon trap.)
virtual bool is_trap() const { return false; }
+ // is_late_inline: supports conversion of call into an inline
+ virtual bool is_late_inline() const { return false; }
+ // Replace the call with an inline version of the code
+ virtual void do_late_inline() { ShouldNotReachHere(); }
+
+ virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
+
// Note: It is possible for a CG to be both inline and virtual.
// (The hashCode intrinsic does a vtable check and an inlined fast path.)
@@ -92,9 +99,13 @@ class CallGenerator : public ResourceObj {
static CallGenerator* for_osr(ciMethod* m, int osr_bci);
// How to generate vanilla out-of-line call sites:
- static CallGenerator* for_direct_call(ciMethod* m); // static, special
+ static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false); // static, special
+ static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic
static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
+ // How to generate a replace a direct call with an inline version
+ static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
+
// How to make a call but defer the decision whether to inline or not.
static CallGenerator* for_warm_call(WarmCallInfo* ci,
CallGenerator* if_cold,
@@ -106,6 +117,12 @@ class CallGenerator : public ResourceObj {
CallGenerator* if_hit,
float hit_prob);
+ // How to make a call that optimistically assumes a MethodHandle target:
+ static CallGenerator* for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
+ CallGenerator* if_missed,
+ CallGenerator* if_hit,
+ float hit_prob);
+
// How to make a call that gives up and goes back to the interpreter:
static CallGenerator* for_uncommon_trap(ciMethod* m,
Deoptimization::DeoptReason reason,
diff --git a/hotspot/src/share/vm/opto/callnode.cpp b/hotspot/src/share/vm/opto/callnode.cpp
index 5c69109b8fe..a5902713111 100644
--- a/hotspot/src/share/vm/opto/callnode.cpp
+++ b/hotspot/src/share/vm/opto/callnode.cpp
@@ -693,6 +693,84 @@ Node *CallNode::result_cast() {
}
+void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) {
+ projs->fallthrough_proj = NULL;
+ projs->fallthrough_catchproj = NULL;
+ projs->fallthrough_ioproj = NULL;
+ projs->catchall_ioproj = NULL;
+ projs->catchall_catchproj = NULL;
+ projs->fallthrough_memproj = NULL;
+ projs->catchall_memproj = NULL;
+ projs->resproj = NULL;
+ projs->exobj = NULL;
+
+ for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
+ ProjNode *pn = fast_out(i)->as_Proj();
+ if (pn->outcnt() == 0) continue;
+ switch (pn->_con) {
+ case TypeFunc::Control:
+ {
+ // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
+ projs->fallthrough_proj = pn;
+ DUIterator_Fast jmax, j = pn->fast_outs(jmax);
+ const Node *cn = pn->fast_out(j);
+ if (cn->is_Catch()) {
+ ProjNode *cpn = NULL;
+ for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
+ cpn = cn->fast_out(k)->as_Proj();
+ assert(cpn->is_CatchProj(), "must be a CatchProjNode");
+ if (cpn->_con == CatchProjNode::fall_through_index)
+ projs->fallthrough_catchproj = cpn;
+ else {
+ assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
+ projs->catchall_catchproj = cpn;
+ }
+ }
+ }
+ break;
+ }
+ case TypeFunc::I_O:
+ if (pn->_is_io_use)
+ projs->catchall_ioproj = pn;
+ else
+ projs->fallthrough_ioproj = pn;
+ for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
+ Node* e = pn->out(j);
+ if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj()) {
+ assert(projs->exobj == NULL, "only one");
+ projs->exobj = e;
+ }
+ }
+ break;
+ case TypeFunc::Memory:
+ if (pn->_is_io_use)
+ projs->catchall_memproj = pn;
+ else
+ projs->fallthrough_memproj = pn;
+ break;
+ case TypeFunc::Parms:
+ projs->resproj = pn;
+ break;
+ default:
+ assert(false, "unexpected projection from allocation node.");
+ }
+ }
+
+ // The resproj may not exist because the result couuld be ignored
+ // and the exception object may not exist if an exception handler
+ // swallows the exception but all the other must exist and be found.
+ assert(projs->fallthrough_proj != NULL, "must be found");
+ assert(projs->fallthrough_catchproj != NULL, "must be found");
+ assert(projs->fallthrough_memproj != NULL, "must be found");
+ assert(projs->fallthrough_ioproj != NULL, "must be found");
+ assert(projs->catchall_catchproj != NULL, "must be found");
+ if (separate_io_proj) {
+ assert(projs->catchall_memproj != NULL, "must be found");
+ assert(projs->catchall_ioproj != NULL, "must be found");
+ }
+}
+
+
//=============================================================================
uint CallJavaNode::size_of() const { return sizeof(*this); }
uint CallJavaNode::cmp( const Node &n ) const {
diff --git a/hotspot/src/share/vm/opto/callnode.hpp b/hotspot/src/share/vm/opto/callnode.hpp
index ac886f3ba99..e3bd8906d95 100644
--- a/hotspot/src/share/vm/opto/callnode.hpp
+++ b/hotspot/src/share/vm/opto/callnode.hpp
@@ -470,6 +470,23 @@ public:
#endif
};
+
+// Simple container for the outgoing projections of a call. Useful
+// for serious surgery on calls.
+class CallProjections : public StackObj {
+public:
+ Node* fallthrough_proj;
+ Node* fallthrough_catchproj;
+ Node* fallthrough_memproj;
+ Node* fallthrough_ioproj;
+ Node* catchall_catchproj;
+ Node* catchall_memproj;
+ Node* catchall_ioproj;
+ Node* resproj;
+ Node* exobj;
+};
+
+
//------------------------------CallNode---------------------------------------
// Call nodes now subsume the function of debug nodes at callsites, so they
// contain the functionality of a full scope chain of debug nodes.
@@ -521,6 +538,11 @@ public:
// or returns NULL if there is no one.
Node *result_cast();
+ // Collect all the interesting edges from a call for use in
+ // replacing the call by something else. Used by macro expansion
+ // and the late inlining support.
+ void extract_projections(CallProjections* projs, bool separate_io_proj);
+
virtual uint match_edge(uint idx) const;
#ifndef PRODUCT
@@ -529,6 +551,7 @@ public:
#endif
};
+
//------------------------------CallJavaNode-----------------------------------
// Make a static or dynamic subroutine call node using Java calling
// convention. (The "Java" calling convention is the compiler's calling
@@ -539,12 +562,15 @@ protected:
virtual uint size_of() const; // Size is bigger
bool _optimized_virtual;
+ bool _method_handle_invoke;
ciMethod* _method; // Method being direct called
public:
const int _bci; // Byte Code Index of call byte code
CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
: CallNode(tf, addr, TypePtr::BOTTOM),
- _method(method), _bci(bci), _optimized_virtual(false)
+ _method(method), _bci(bci),
+ _optimized_virtual(false),
+ _method_handle_invoke(false)
{
init_class_id(Class_CallJava);
}
@@ -554,6 +580,8 @@ public:
void set_method(ciMethod *m) { _method = m; }
void set_optimized_virtual(bool f) { _optimized_virtual = f; }
bool is_optimized_virtual() const { return _optimized_virtual; }
+ void set_method_handle_invoke(bool f) { _method_handle_invoke = f; }
+ bool is_method_handle_invoke() const { return _method_handle_invoke; }
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
diff --git a/hotspot/src/share/vm/opto/compile.cpp b/hotspot/src/share/vm/opto/compile.cpp
index ecffd2f4e6a..84102e1553c 100644
--- a/hotspot/src/share/vm/opto/compile.cpp
+++ b/hotspot/src/share/vm/opto/compile.cpp
@@ -224,6 +224,32 @@ bool Compile::valid_bundle_info(const Node *n) {
}
+void Compile::gvn_replace_by(Node* n, Node* nn) {
+ for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
+ Node* use = n->last_out(i);
+ bool is_in_table = initial_gvn()->hash_delete(use);
+ uint uses_found = 0;
+ for (uint j = 0; j < use->len(); j++) {
+ if (use->in(j) == n) {
+ if (j < use->req())
+ use->set_req(j, nn);
+ else
+ use->set_prec(j, nn);
+ uses_found++;
+ }
+ }
+ if (is_in_table) {
+ // reinsert into table
+ initial_gvn()->hash_find_insert(use);
+ }
+ record_for_igvn(use);
+ i -= uses_found; // we deleted 1 or more copies of this edge
+ }
+}
+
+
+
+
// Identify all nodes that are reachable from below, useful.
// Use breadth-first pass that records state in a Unique_Node_List,
// recursive traversal is slower.
@@ -554,6 +580,28 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
rethrow_exceptions(kit.transfer_exceptions_into_jvms());
}
+ if (!failing() && has_stringbuilder()) {
+ {
+ // remove useless nodes to make the usage analysis simpler
+ ResourceMark rm;
+ PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
+ }
+
+ {
+ ResourceMark rm;
+ print_method("Before StringOpts", 3);
+ PhaseStringOpts pso(initial_gvn(), &for_igvn);
+ print_method("After StringOpts", 3);
+ }
+
+ // now inline anything that we skipped the first time around
+ while (_late_inlines.length() > 0) {
+ CallGenerator* cg = _late_inlines.pop();
+ cg->do_late_inline();
+ }
+ }
+ assert(_late_inlines.length() == 0, "should have been processed");
+
print_method("Before RemoveUseless", 3);
// Remove clutter produced by parsing.
@@ -820,6 +868,7 @@ void Compile::Init(int aliaslevel) {
_fixed_slots = 0;
set_has_split_ifs(false);
set_has_loops(has_method() && method()->has_loops()); // first approximation
+ set_has_stringbuilder(false);
_deopt_happens = true; // start out assuming the worst
_trap_can_recompile = false; // no traps emitted yet
_major_progress = true; // start out assuming good things will happen
@@ -883,6 +932,7 @@ void Compile::Init(int aliaslevel) {
_intrinsics = NULL;
_macro_nodes = new GrowableArray(comp_arena(), 8, 0, NULL);
+ _predicate_opaqs = new GrowableArray(comp_arena(), 8, 0, NULL);
register_library_intrinsics();
}
@@ -1504,6 +1554,19 @@ void Compile::Finish_Warm() {
}
}
+//---------------------cleanup_loop_predicates-----------------------
+// Remove the opaque nodes that protect the predicates so that all unused
+// checks and uncommon_traps will be eliminated from the ideal graph
+void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
+ if (predicate_count()==0) return;
+ for (int i = predicate_count(); i > 0; i--) {
+ Node * n = predicate_opaque1_node(i-1);
+ assert(n->Opcode() == Op_Opaque1, "must be");
+ igvn.replace_node(n, n->in(1));
+ }
+ assert(predicate_count()==0, "should be clean!");
+ igvn.optimize();
+}
//------------------------------Optimize---------------------------------------
// Given a graph, optimize it.
@@ -1545,7 +1608,7 @@ void Compile::Optimize() {
if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
{
TracePhase t2("idealLoop", &_t_idealLoop, true);
- PhaseIdealLoop ideal_loop( igvn, true );
+ PhaseIdealLoop ideal_loop( igvn, true, UseLoopPredicate);
loop_opts_cnt--;
if (major_progress()) print_method("PhaseIdealLoop 1", 2);
if (failing()) return;
@@ -1553,7 +1616,7 @@ void Compile::Optimize() {
// Loop opts pass if partial peeling occurred in previous pass
if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
TracePhase t3("idealLoop", &_t_idealLoop, true);
- PhaseIdealLoop ideal_loop( igvn, false );
+ PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate);
loop_opts_cnt--;
if (major_progress()) print_method("PhaseIdealLoop 2", 2);
if (failing()) return;
@@ -1561,7 +1624,7 @@ void Compile::Optimize() {
// Loop opts pass for loop-unrolling before CCP
if(major_progress() && (loop_opts_cnt > 0)) {
TracePhase t4("idealLoop", &_t_idealLoop, true);
- PhaseIdealLoop ideal_loop( igvn, false );
+ PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate);
loop_opts_cnt--;
if (major_progress()) print_method("PhaseIdealLoop 3", 2);
}
@@ -1599,13 +1662,21 @@ void Compile::Optimize() {
// peeling, unrolling, etc.
if(loop_opts_cnt > 0) {
debug_only( int cnt = 0; );
+ bool loop_predication = UseLoopPredicate;
while(major_progress() && (loop_opts_cnt > 0)) {
TracePhase t2("idealLoop", &_t_idealLoop, true);
assert( cnt++ < 40, "infinite cycle in loop optimization" );
- PhaseIdealLoop ideal_loop( igvn, true );
+ PhaseIdealLoop ideal_loop( igvn, true, loop_predication);
loop_opts_cnt--;
if (major_progress()) print_method("PhaseIdealLoop iterations", 2);
if (failing()) return;
+ // Perform loop predication optimization during first iteration after CCP.
+ // After that switch it off and cleanup unused loop predicates.
+ if (loop_predication) {
+ loop_predication = false;
+ cleanup_loop_predicates(igvn);
+ if (failing()) return;
+ }
}
}
@@ -1803,6 +1874,7 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
!n->is_Phi() && // a few noisely useless nodes
!n->is_Proj() &&
!n->is_MachTemp() &&
+ !n->is_SafePointScalarObject() &&
!n->is_Catch() && // Would be nice to print exception table targets
!n->is_MergeMem() && // Not very interesting
!n->is_top() && // Debug info table constants
@@ -2240,6 +2312,30 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
break;
}
+ case Op_Proj: {
+ if (OptimizeStringConcat) {
+ ProjNode* p = n->as_Proj();
+ if (p->_is_io_use) {
+ // Separate projections were used for the exception path which
+ // are normally removed by a late inline. If it wasn't inlined
+ // then they will hang around and should just be replaced with
+ // the original one.
+ Node* proj = NULL;
+ // Replace with just one
+ for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
+ Node *use = i.get();
+ if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
+ proj = use;
+ break;
+ }
+ }
+ assert(p != NULL, "must be found");
+ p->subsume_by(proj);
+ }
+ }
+ break;
+ }
+
case Op_Phi:
if (n->as_Phi()->bottom_type()->isa_narrowoop()) {
// The EncodeP optimization may create Phi with the same edges
diff --git a/hotspot/src/share/vm/opto/compile.hpp b/hotspot/src/share/vm/opto/compile.hpp
index 4bd1900fc70..c683110355d 100644
--- a/hotspot/src/share/vm/opto/compile.hpp
+++ b/hotspot/src/share/vm/opto/compile.hpp
@@ -38,6 +38,7 @@ class Node_Notes;
class OptoReg;
class PhaseCFG;
class PhaseGVN;
+class PhaseIterGVN;
class PhaseRegAlloc;
class PhaseCCP;
class PhaseCCP_DCE;
@@ -149,6 +150,7 @@ class Compile : public Phase {
bool _has_loops; // True if the method _may_ have some loops
bool _has_split_ifs; // True if the method _may_ have some split-if
bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
+ bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
uint _trap_hist[trapHistLength]; // Cumulative traps
bool _trap_can_recompile; // Have we emitted a recompiling trap?
uint _decompile_count; // Cumulative decompilation counts.
@@ -171,6 +173,7 @@ class Compile : public Phase {
const char* _failure_reason; // for record_failure/failing pattern
GrowableArray* _intrinsics; // List of intrinsics.
GrowableArray* _macro_nodes; // List of nodes which need to be expanded before matching.
+ GrowableArray* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates.
ConnectionGraph* _congraph;
#ifndef PRODUCT
IdealGraphPrinter* _printer;
@@ -219,6 +222,9 @@ class Compile : public Phase {
Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN
WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining.
+ GrowableArray _late_inlines; // List of CallGenerators to be revisited after
+ // main parsing has finished.
+
// Matching, CFG layout, allocation, code generation
PhaseCFG* _cfg; // Results of CFG finding
bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
@@ -298,6 +304,8 @@ class Compile : public Phase {
void set_has_split_ifs(bool z) { _has_split_ifs = z; }
bool has_unsafe_access() const { return _has_unsafe_access; }
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
+ bool has_stringbuilder() const { return _has_stringbuilder; }
+ void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }
uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
bool trap_can_recompile() const { return _trap_can_recompile; }
@@ -345,7 +353,9 @@ class Compile : public Phase {
}
int macro_count() { return _macro_nodes->length(); }
+ int predicate_count() { return _predicate_opaqs->length();}
Node* macro_node(int idx) { return _macro_nodes->at(idx); }
+ Node* predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);}
ConnectionGraph* congraph() { return _congraph;}
void add_macro_node(Node * n) {
//assert(n->is_macro(), "must be a macro node");
@@ -357,7 +367,19 @@ class Compile : public Phase {
// that the node is in the array before attempting to remove it
if (_macro_nodes->contains(n))
_macro_nodes->remove(n);
+ // remove from _predicate_opaqs list also if it is there
+ if (predicate_count() > 0 && _predicate_opaqs->contains(n)){
+ _predicate_opaqs->remove(n);
+ }
}
+ void add_predicate_opaq(Node * n) {
+ assert(!_predicate_opaqs->contains(n), " duplicate entry in predicate opaque1");
+ assert(_macro_nodes->contains(n), "should have already been in macro list");
+ _predicate_opaqs->append(n);
+ }
+ // remove the opaque nodes that protect the predicates so that the unused checks and
+ // uncommon traps will be eliminated from the graph.
+ void cleanup_loop_predicates(PhaseIterGVN &igvn);
// Compilation environment.
Arena* comp_arena() { return &_comp_arena; }
@@ -475,6 +497,7 @@ class Compile : public Phase {
// Decide how to build a call.
// The profile factor is a discount to apply to this site's interp. profile.
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float profile_factor);
+ bool should_delay_inlining(ciMethod* call_method, JVMState* jvms);
// Report if there were too many traps at a current method and bci.
// Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
@@ -495,6 +518,11 @@ class Compile : public Phase {
void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; }
void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; }
+ // Replace n by nn using initial_gvn, calling hash_delete and
+ // record_for_igvn as needed.
+ void gvn_replace_by(Node* n, Node* nn);
+
+
void identify_useful_nodes(Unique_Node_List &useful);
void remove_useless_nodes (Unique_Node_List &useful);
@@ -502,6 +530,9 @@ class Compile : public Phase {
void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
WarmCallInfo* pop_warm_call();
+ // Record this CallGenerator for inlining at the end of parsing.
+ void add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); }
+
// Matching, CFG layout, allocation, code generation
PhaseCFG* cfg() { return _cfg; }
bool select_24_bit_instr() const { return _select_24_bit_instr; }
diff --git a/hotspot/src/share/vm/opto/divnode.cpp b/hotspot/src/share/vm/opto/divnode.cpp
index a81e3b1942f..bc3c5ac9f7e 100644
--- a/hotspot/src/share/vm/opto/divnode.cpp
+++ b/hotspot/src/share/vm/opto/divnode.cpp
@@ -114,7 +114,8 @@ static Node *transform_int_divide( PhaseGVN *phase, Node *dividend, jint divisor
if( andconi_t && andconi_t->is_con() ) {
jint andconi = andconi_t->get_con();
if( andconi < 0 && is_power_of_2(-andconi) && (-andconi) >= d ) {
- dividend = dividend->in(1);
+ if( (-andconi) == d ) // Remove AND if it clears bits which will be shifted
+ dividend = dividend->in(1);
needs_rounding = false;
}
}
@@ -356,7 +357,8 @@ static Node *transform_long_divide( PhaseGVN *phase, Node *dividend, jlong divis
if( andconl_t && andconl_t->is_con() ) {
jlong andconl = andconl_t->get_con();
if( andconl < 0 && is_power_of_2_long(-andconl) && (-andconl) >= d ) {
- dividend = dividend->in(1);
+ if( (-andconl) == d ) // Remove AND if it clears bits which will be shifted
+ dividend = dividend->in(1);
needs_rounding = false;
}
}
diff --git a/hotspot/src/share/vm/opto/doCall.cpp b/hotspot/src/share/vm/opto/doCall.cpp
index 5104e648aa6..c000a7e80fb 100644
--- a/hotspot/src/share/vm/opto/doCall.cpp
+++ b/hotspot/src/share/vm/opto/doCall.cpp
@@ -43,7 +43,9 @@ void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_met
}
#endif
-CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float prof_factor) {
+CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
+ JVMState* jvms, bool allow_inline,
+ float prof_factor) {
CallGenerator* cg;
// Dtrace currently doesn't work unless all calls are vanilla
@@ -116,7 +118,7 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
// TO DO: When UseOldInlining is removed, copy the ILT code elsewhere.
float site_invoke_ratio = prof_factor;
// Note: ilt is for the root of this parse, not the present call site.
- ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio);
+ ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, 0);
}
WarmCallInfo scratch_ci;
if (!UseOldInlining)
@@ -128,6 +130,12 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
if (allow_inline) {
CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses);
+ if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) {
+ // Delay the inlining of this method to give us the
+ // opportunity to perform some high level optimizations
+ // first.
+ return CallGenerator::for_late_inline(call_method, cg);
+ }
if (cg == NULL) {
// Fall through.
} else if (require_inline || !InlineWarmCalls) {
@@ -218,6 +226,57 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
}
}
+ // Do MethodHandle calls.
+ if (call_method->is_method_handle_invoke()) {
+ if (jvms->method()->java_code_at_bci(jvms->bci()) != Bytecodes::_invokedynamic) {
+ GraphKit kit(jvms);
+ Node* n = kit.argument(0);
+
+ if (n->Opcode() == Op_ConP) {
+ const TypeOopPtr* oop_ptr = n->bottom_type()->is_oopptr();
+ ciObject* const_oop = oop_ptr->const_oop();
+ ciMethodHandle* method_handle = const_oop->as_method_handle();
+
+ // Set the actually called method to have access to the class
+ // and signature in the MethodHandleCompiler.
+ method_handle->set_callee(call_method);
+
+ // Get an adapter for the MethodHandle.
+ ciMethod* target_method = method_handle->get_method_handle_adapter();
+
+ CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
+ if (hit_cg != NULL && hit_cg->is_inline())
+ return hit_cg;
+ }
+
+ return CallGenerator::for_direct_call(call_method);
+ }
+ else {
+ // Get the MethodHandle from the CallSite.
+ ciMethod* caller_method = jvms->method();
+ ciBytecodeStream str(caller_method);
+ str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
+ ciCallSite* call_site = str.get_call_site();
+ ciMethodHandle* method_handle = call_site->get_target();
+
+ // Set the actually called method to have access to the class
+ // and signature in the MethodHandleCompiler.
+ method_handle->set_callee(call_method);
+
+ // Get an adapter for the MethodHandle.
+ ciMethod* target_method = method_handle->get_invokedynamic_adapter();
+
+ CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
+ if (hit_cg != NULL && hit_cg->is_inline()) {
+ CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method);
+ return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor);
+ }
+
+ // If something failed, generate a normal dynamic call.
+ return CallGenerator::for_dynamic_call(call_method);
+ }
+ }
+
// There was no special inlining tactic, or it bailed out.
// Use a more generic tactic, like a simple call.
if (call_is_virtual) {
@@ -225,10 +284,63 @@ CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index,
} else {
// Class Hierarchy Analysis or Type Profile reveals a unique target,
// or it is a static or special call.
- return CallGenerator::for_direct_call(call_method);
+ return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms));
}
}
+// Return true for methods that shouldn't be inlined early so that
+// they are easier to analyze and optimize as intrinsics.
+bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
+ if (has_stringbuilder()) {
+
+ if ((call_method->holder() == C->env()->StringBuilder_klass() ||
+ call_method->holder() == C->env()->StringBuffer_klass()) &&
+ (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
+ jvms->method()->holder() == C->env()->StringBuffer_klass())) {
+ // Delay SB calls only when called from non-SB code
+ return false;
+ }
+
+ switch (call_method->intrinsic_id()) {
+ case vmIntrinsics::_StringBuilder_void:
+ case vmIntrinsics::_StringBuilder_int:
+ case vmIntrinsics::_StringBuilder_String:
+ case vmIntrinsics::_StringBuilder_append_char:
+ case vmIntrinsics::_StringBuilder_append_int:
+ case vmIntrinsics::_StringBuilder_append_String:
+ case vmIntrinsics::_StringBuilder_toString:
+ case vmIntrinsics::_StringBuffer_void:
+ case vmIntrinsics::_StringBuffer_int:
+ case vmIntrinsics::_StringBuffer_String:
+ case vmIntrinsics::_StringBuffer_append_char:
+ case vmIntrinsics::_StringBuffer_append_int:
+ case vmIntrinsics::_StringBuffer_append_String:
+ case vmIntrinsics::_StringBuffer_toString:
+ case vmIntrinsics::_Integer_toString:
+ return true;
+
+ case vmIntrinsics::_String_String:
+ {
+ Node* receiver = jvms->map()->in(jvms->argoff() + 1);
+ if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
+ CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
+ ciMethod* m = csj->method();
+ if (m != NULL &&
+ (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
+ m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
+ // Delay String.(new SB())
+ return true;
+ }
+ return false;
+ }
+
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
// uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
@@ -240,7 +352,7 @@ bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* kl
// Interface classes can be loaded & linked and never get around to
// being initialized. Uncommon-trap for not-initialized static or
// v-calls. Let interface calls happen.
- ciInstanceKlass* holder_klass = dest_method->holder();
+ ciInstanceKlass* holder_klass = dest_method->holder();
if (!holder_klass->is_initialized() &&
!holder_klass->is_interface()) {
uncommon_trap(Deoptimization::Reason_uninitialized,
@@ -248,14 +360,6 @@ bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* kl
holder_klass);
return true;
}
- if (dest_method->is_method_handle_invoke()
- && holder_klass->name() == ciSymbol::java_dyn_Dynamic()) {
- // FIXME: NYI
- uncommon_trap(Deoptimization::Reason_unhandled,
- Deoptimization::Action_none,
- holder_klass);
- return true;
- }
assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility");
return false;
@@ -274,6 +378,7 @@ void Parse::do_call() {
bool is_virtual = bc() == Bytecodes::_invokevirtual;
bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
+ bool is_invokedynamic = bc() == Bytecodes::_invokedynamic;
// Find target being called
bool will_link;
@@ -282,7 +387,8 @@ void Parse::do_call() {
ciKlass* holder = iter().get_declared_method_holder();
ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
- int nargs = dest_method->arg_size();
+ int nargs = dest_method->arg_size();
+ if (is_invokedynamic) nargs -= 1;
// uncommon-trap when callee is unloaded, uninitialized or will not link
// bailout when too many arguments for register representation
@@ -296,7 +402,7 @@ void Parse::do_call() {
return;
}
assert(holder_klass->is_loaded(), "");
- assert(dest_method->is_static() == !has_receiver, "must match bc");
+ assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc");
// Note: this takes into account invokeinterface of methods declared in java/lang/Object,
// which should be invokevirtuals but according to the VM spec may be invokeinterfaces
assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
diff --git a/hotspot/src/share/vm/opto/escape.cpp b/hotspot/src/share/vm/opto/escape.cpp
index b22f4814a57..111443cd1ae 100644
--- a/hotspot/src/share/vm/opto/escape.cpp
+++ b/hotspot/src/share/vm/opto/escape.cpp
@@ -537,11 +537,13 @@ bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
}
const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
- // Do NOT remove the next call: ensure an new alias index is allocated
- // for the instance type
+ // Do NOT remove the next line: ensure a new alias index is allocated
+ // for the instance type. Note: C++ will not remove it since the call
+ // has side effect.
int alias_idx = _compile->get_alias_index(tinst);
igvn->set_type(addp, tinst);
// record the allocation in the node map
+ assert(ptnode_adr(addp->_idx)->_node != NULL, "should be registered");
set_map(addp->_idx, get_map(base->_idx));
// Set addp's Base and Address to 'base'.
@@ -617,9 +619,14 @@ PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, Gro
const TypePtr *atype = C->get_adr_type(alias_idx);
result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
C->copy_node_notes_to(result, orig_phi);
- set_map_phi(orig_phi->_idx, result);
igvn->set_type(result, result->bottom_type());
record_for_optimizer(result);
+
+ debug_only(Node* pn = ptnode_adr(orig_phi->_idx)->_node;)
+ assert(pn == NULL || pn == orig_phi, "wrong node");
+ set_map(orig_phi->_idx, result);
+ ptnode_adr(orig_phi->_idx)->_node = orig_phi;
+
new_created = true;
return result;
}
@@ -709,6 +716,81 @@ static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const Type
return mem;
}
+//
+// Move memory users to their memory slices.
+//
+void ConnectionGraph::move_inst_mem(Node* n, GrowableArray &orig_phis, PhaseGVN *igvn) {
+ Compile* C = _compile;
+
+ const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
+ assert(tp != NULL, "ptr type");
+ int alias_idx = C->get_alias_index(tp);
+ int general_idx = C->get_general_index(alias_idx);
+
+ // Move users first
+ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+ Node* use = n->fast_out(i);
+ if (use->is_MergeMem()) {
+ MergeMemNode* mmem = use->as_MergeMem();
+ assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
+ if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
+ continue; // Nothing to do
+ }
+ // Replace previous general reference to mem node.
+ uint orig_uniq = C->unique();
+ Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
+ assert(orig_uniq == C->unique(), "no new nodes");
+ mmem->set_memory_at(general_idx, m);
+ --imax;
+ --i;
+ } else if (use->is_MemBar()) {
+ assert(!use->is_Initialize(), "initializing stores should not be moved");
+ if (use->req() > MemBarNode::Precedent &&
+ use->in(MemBarNode::Precedent) == n) {
+ // Don't move related membars.
+ record_for_optimizer(use);
+ continue;
+ }
+ tp = use->as_MemBar()->adr_type()->isa_ptr();
+ if (tp != NULL && C->get_alias_index(tp) == alias_idx ||
+ alias_idx == general_idx) {
+ continue; // Nothing to do
+ }
+ // Move to general memory slice.
+ uint orig_uniq = C->unique();
+ Node* m = find_inst_mem(n, general_idx, orig_phis, igvn);
+ assert(orig_uniq == C->unique(), "no new nodes");
+ igvn->hash_delete(use);
+ imax -= use->replace_edge(n, m);
+ igvn->hash_insert(use);
+ record_for_optimizer(use);
+ --i;
+#ifdef ASSERT
+ } else if (use->is_Mem()) {
+ if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
+ // Don't move related cardmark.
+ continue;
+ }
+ // Memory nodes should have new memory input.
+ tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
+ assert(tp != NULL, "ptr type");
+ int idx = C->get_alias_index(tp);
+ assert(get_map(use->_idx) != NULL || idx == alias_idx,
+ "Following memory nodes should have new memory input or be on the same memory slice");
+ } else if (use->is_Phi()) {
+ // Phi nodes should be split and moved already.
+ tp = use->as_Phi()->adr_type()->isa_ptr();
+ assert(tp != NULL, "ptr type");
+ int idx = C->get_alias_index(tp);
+ assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
+ } else {
+ use->dump();
+ assert(false, "should not be here");
+#endif
+ }
+ }
+}
+
//
// Search memory chain of "mem" to find a MemNode whose address
// is the specified alias index.
@@ -774,10 +856,18 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
Node *un = result->as_Phi()->unique_input(phase);
if (un != NULL) {
+ orig_phis.append_if_missing(result->as_Phi());
result = un;
} else {
break;
}
+ } else if (result->is_ClearArray()) {
+ if (!ClearArrayNode::step_through(&result, (uint)tinst->instance_id(), phase)) {
+ // Can not bypass initialization of the instance
+ // we are looking for.
+ break;
+ }
+ // Otherwise skip it (the call updated 'result' value).
} else if (result->Opcode() == Op_SCMemProj) {
assert(result->in(0)->is_LoadStore(), "sanity");
const Type *at = phase->type(result->in(0)->in(MemNode::Address));
@@ -807,7 +897,6 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
return result;
}
-
//
// Convert the types of unescaped object to instance types where possible,
// propagate the new type information through the graph, and update memory
@@ -899,12 +988,13 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
//
void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist) {
GrowableArray memnode_worklist;
- GrowableArray mergemem_worklist;
GrowableArray orig_phis;
+
PhaseGVN *igvn = _compile->initial_gvn();
uint new_index_start = (uint) _compile->num_alias_types();
- VectorSet visited(Thread::current()->resource_area());
- VectorSet ptset(Thread::current()->resource_area());
+ Arena* arena = Thread::current()->resource_area();
+ VectorSet visited(arena);
+ VectorSet ptset(arena);
// Phase 1: Process possible allocations from alloc_worklist.
@@ -980,6 +1070,8 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
// - non-escaping
// - eligible to be a unique type
// - not determined to be ineligible by escape analysis
+ assert(ptnode_adr(alloc->_idx)->_node != NULL &&
+ ptnode_adr(n->_idx)->_node != NULL, "should be registered");
set_map(alloc->_idx, n);
set_map(n->_idx, alloc);
const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
@@ -1024,7 +1116,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
alloc_worklist.append_if_missing(addp2);
}
alloc_worklist.append_if_missing(use);
- } else if (use->is_Initialize()) {
+ } else if (use->is_MemBar()) {
memnode_worklist.append_if_missing(use);
}
}
@@ -1034,10 +1126,12 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
PointsTo(ptset, get_addp_base(n), igvn);
assert(ptset.Size() == 1, "AddP address is unique");
uint elem = ptset.getelem(); // Allocation node's index
- if (elem == _phantom_object)
+ if (elem == _phantom_object) {
+ assert(false, "escaped allocation");
continue; // Assume the value was set outside this method.
+ }
Node *base = get_map(elem); // CheckCastPP node
- if (!split_AddP(n, base, igvn)) continue; // wrong type
+ if (!split_AddP(n, base, igvn)) continue; // wrong type from dead path
tinst = igvn->type(base)->isa_oopptr();
} else if (n->is_Phi() ||
n->is_CheckCastPP() ||
@@ -1052,8 +1146,10 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
PointsTo(ptset, n, igvn);
if (ptset.Size() == 1) {
uint elem = ptset.getelem(); // Allocation node's index
- if (elem == _phantom_object)
+ if (elem == _phantom_object) {
+ assert(false, "escaped allocation");
continue; // Assume the value was set outside this method.
+ }
Node *val = get_map(elem); // CheckCastPP node
TypeNode *tn = n->as_Type();
tinst = igvn->type(val)->isa_oopptr();
@@ -1068,8 +1164,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
tn_t = tn_type->isa_oopptr();
}
- if (tn_t != NULL &&
- tinst->cast_to_instance_id(TypeOopPtr::InstanceBot)->higher_equal(tn_t)) {
+ if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) {
if (tn_type->isa_narrowoop()) {
tn_type = tinst->make_narrowoop();
} else {
@@ -1081,33 +1176,25 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
igvn->hash_insert(tn);
record_for_optimizer(n);
} else {
- continue; // wrong type
+ assert(tn_type == TypePtr::NULL_PTR ||
+ tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()),
+ "unexpected type");
+ continue; // Skip dead path with different type
}
}
} else {
+ debug_only(n->dump();)
+ assert(false, "EA: unexpected node");
continue;
}
- // push users on appropriate worklist
+ // push allocation's users on appropriate worklist
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node *use = n->fast_out(i);
if(use->is_Mem() && use->in(MemNode::Address) == n) {
+ // Load/store to instance's field
memnode_worklist.append_if_missing(use);
- } else if (use->is_Initialize()) {
+ } else if (use->is_MemBar()) {
memnode_worklist.append_if_missing(use);
- } else if (use->is_MergeMem()) {
- mergemem_worklist.append_if_missing(use);
- } else if (use->is_SafePoint() && tinst != NULL) {
- // Look for MergeMem nodes for calls which reference unique allocation
- // (through CheckCastPP nodes) even for debug info.
- Node* m = use->in(TypeFunc::Memory);
- uint iid = tinst->instance_id();
- while (m->is_Proj() && m->in(0)->is_SafePoint() &&
- m->in(0) != use && !m->in(0)->_idx != iid) {
- m = m->in(0)->in(TypeFunc::Memory);
- }
- if (m->is_MergeMem()) {
- mergemem_worklist.append_if_missing(m);
- }
} else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
Node* addp2 = find_second_addp(use, n);
if (addp2 != NULL) {
@@ -1120,6 +1207,29 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
use->is_DecodeN() ||
(use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
alloc_worklist.append_if_missing(use);
+#ifdef ASSERT
+ } else if (use->is_Mem()) {
+ assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
+ } else if (use->is_MergeMem()) {
+ assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
+ } else if (use->is_SafePoint()) {
+ // Look for MergeMem nodes for calls which reference unique allocation
+ // (through CheckCastPP nodes) even for debug info.
+ Node* m = use->in(TypeFunc::Memory);
+ if (m->is_MergeMem()) {
+ assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
+ }
+ } else {
+ uint op = use->Opcode();
+ if (!(op == Op_CmpP || op == Op_Conv2B ||
+ op == Op_CastP2X || op == Op_StoreCM ||
+ op == Op_FastLock || op == Op_AryEq || op == Op_StrComp ||
+ op == Op_StrEquals || op == Op_StrIndexOf)) {
+ n->dump();
+ use->dump();
+ assert(false, "EA: missing allocation reference path");
+ }
+#endif
}
}
@@ -1137,19 +1247,16 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
Node *n = memnode_worklist.pop();
if (visited.test_set(n->_idx))
continue;
- if (n->is_Phi()) {
- assert(n->as_Phi()->adr_type() != TypePtr::BOTTOM, "narrow memory slice required");
- // we don't need to do anything, but the users must be pushed if we haven't processed
- // this Phi before
- } else if (n->is_Initialize()) {
- // we don't need to do anything, but the users of the memory projection must be pushed
- n = n->as_Initialize()->proj_out(TypeFunc::Memory);
+ if (n->is_Phi() || n->is_ClearArray()) {
+ // we don't need to do anything, but the users must be pushed
+ } else if (n->is_MemBar()) { // Initialize, MemBar nodes
+ // we don't need to do anything, but the users must be pushed
+ n = n->as_MemBar()->proj_out(TypeFunc::Memory);
if (n == NULL)
continue;
} else {
assert(n->is_Mem(), "memory node required.");
Node *addr = n->in(MemNode::Address);
- assert(addr->is_AddP(), "AddP required");
const Type *addr_t = igvn->type(addr);
if (addr_t == Type::TOP)
continue;
@@ -1161,6 +1268,10 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
return;
}
if (mem != n->in(MemNode::Memory)) {
+ // We delay the memory edge update since we need old one in
+ // MergeMem code below when instances memory slices are separated.
+ debug_only(Node* pn = ptnode_adr(n->_idx)->_node;)
+ assert(pn == NULL || pn == n, "wrong node");
set_map(n->_idx, mem);
ptnode_adr(n->_idx)->_node = n;
}
@@ -1181,36 +1292,55 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
// push user on appropriate worklist
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node *use = n->fast_out(i);
- if (use->is_Phi()) {
+ if (use->is_Phi() || use->is_ClearArray()) {
memnode_worklist.append_if_missing(use);
} else if(use->is_Mem() && use->in(MemNode::Memory) == n) {
+ if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores
+ continue;
memnode_worklist.append_if_missing(use);
- } else if (use->is_Initialize()) {
+ } else if (use->is_MemBar()) {
memnode_worklist.append_if_missing(use);
+#ifdef ASSERT
+ } else if(use->is_Mem()) {
+ assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
} else if (use->is_MergeMem()) {
- mergemem_worklist.append_if_missing(use);
+ assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
+ } else {
+ uint op = use->Opcode();
+ if (!(op == Op_StoreCM ||
+ (op == Op_CallLeaf && use->as_CallLeaf()->_name != NULL &&
+ strcmp(use->as_CallLeaf()->_name, "g1_wb_pre") == 0) ||
+ op == Op_AryEq || op == Op_StrComp ||
+ op == Op_StrEquals || op == Op_StrIndexOf)) {
+ n->dump();
+ use->dump();
+ assert(false, "EA: missing memory path");
+ }
+#endif
}
}
}
// Phase 3: Process MergeMem nodes from mergemem_worklist.
- // Walk each memory moving the first node encountered of each
+ // Walk each memory slice moving the first node encountered of each
// instance type to the the input corresponding to its alias index.
- while (mergemem_worklist.length() != 0) {
- Node *n = mergemem_worklist.pop();
- assert(n->is_MergeMem(), "MergeMem node required.");
- if (visited.test_set(n->_idx))
- continue;
- MergeMemNode *nmm = n->as_MergeMem();
+ uint length = _mergemem_worklist.length();
+ for( uint next = 0; next < length; ++next ) {
+ MergeMemNode* nmm = _mergemem_worklist.at(next);
+ assert(!visited.test_set(nmm->_idx), "should not be visited before");
// Note: we don't want to use MergeMemStream here because we only want to
- // scan inputs which exist at the start, not ones we add during processing.
- uint nslices = nmm->req();
+ // scan inputs which exist at the start, not ones we add during processing.
+ // Note 2: MergeMem may already contains instance memory slices added
+ // during find_inst_mem() call when memory nodes were processed above.
igvn->hash_delete(nmm);
+ uint nslices = nmm->req();
for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
Node* mem = nmm->in(i);
Node* cur = NULL;
if (mem == NULL || mem->is_top())
continue;
+ // First, update mergemem by moving memory nodes to corresponding slices
+ // if their type became more precise since this mergemem was created.
while (mem->is_Mem()) {
const Type *at = igvn->type(mem->in(MemNode::Address));
if (at != Type::TOP) {
@@ -1229,7 +1359,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
}
nmm->set_memory_at(i, (cur != NULL) ? cur : mem);
// Find any instance of the current type if we haven't encountered
- // a value of the instance along the chain.
+ // already a memory slice of the instance along the memory chain.
for (uint ni = new_index_start; ni < new_index_end; ni++) {
if((uint)_compile->get_general_index(ni) == i) {
Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
@@ -1245,11 +1375,11 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
}
// Find the rest of instances values
for (uint ni = new_index_start; ni < new_index_end; ni++) {
- const TypeOopPtr *tinst = igvn->C->get_adr_type(ni)->isa_oopptr();
+ const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
Node* result = step_through_mergemem(nmm, ni, tinst);
if (result == nmm->base_memory()) {
// Didn't find instance memory, search through general slice recursively.
- result = nmm->memory_at(igvn->C->get_general_index(ni));
+ result = nmm->memory_at(_compile->get_general_index(ni));
result = find_inst_mem(result, ni, orig_phis, igvn);
if (_compile->failing()) {
return;
@@ -1259,41 +1389,6 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
}
igvn->hash_insert(nmm);
record_for_optimizer(nmm);
-
- // Propagate new memory slices to following MergeMem nodes.
- for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
- Node *use = n->fast_out(i);
- if (use->is_Call()) {
- CallNode* in = use->as_Call();
- if (in->proj_out(TypeFunc::Memory) != NULL) {
- Node* m = in->proj_out(TypeFunc::Memory);
- for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
- Node* mm = m->fast_out(j);
- if (mm->is_MergeMem()) {
- mergemem_worklist.append_if_missing(mm);
- }
- }
- }
- if (use->is_Allocate()) {
- use = use->as_Allocate()->initialization();
- if (use == NULL) {
- continue;
- }
- }
- }
- if (use->is_Initialize()) {
- InitializeNode* in = use->as_Initialize();
- if (in->proj_out(TypeFunc::Memory) != NULL) {
- Node* m = in->proj_out(TypeFunc::Memory);
- for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
- Node* mm = m->fast_out(j);
- if (mm->is_MergeMem()) {
- mergemem_worklist.append_if_missing(mm);
- }
- }
- }
- }
- }
}
// Phase 4: Update the inputs of non-instance memory Phis and
@@ -1322,19 +1417,48 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist)
}
// Update the memory inputs of MemNodes with the value we computed
- // in Phase 2.
+ // in Phase 2 and move stores memory users to corresponding memory slices.
+#ifdef ASSERT
+ visited.Clear();
+ Node_Stack old_mems(arena, _compile->unique() >> 2);
+#endif
for (uint i = 0; i < nodes_size(); i++) {
Node *nmem = get_map(i);
if (nmem != NULL) {
Node *n = ptnode_adr(i)->_node;
- if (n != NULL && n->is_Mem()) {
+ assert(n != NULL, "sanity");
+ if (n->is_Mem()) {
+#ifdef ASSERT
+ Node* old_mem = n->in(MemNode::Memory);
+ if (!visited.test_set(old_mem->_idx)) {
+ old_mems.push(old_mem, old_mem->outcnt());
+ }
+#endif
+ assert(n->in(MemNode::Memory) != nmem, "sanity");
+ if (!n->is_Load()) {
+ // Move memory users of a store first.
+ move_inst_mem(n, orig_phis, igvn);
+ }
+ // Now update memory input
igvn->hash_delete(n);
n->set_req(MemNode::Memory, nmem);
igvn->hash_insert(n);
record_for_optimizer(n);
+ } else {
+ assert(n->is_Allocate() || n->is_CheckCastPP() ||
+ n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
}
}
}
+#ifdef ASSERT
+ // Verify that memory was split correctly
+ while (old_mems.is_nonempty()) {
+ Node* old_mem = old_mems.node();
+ uint old_cnt = old_mems.index();
+ old_mems.pop();
+ assert(old_cnt = old_mem->outcnt(), "old mem could be lost");
+ }
+#endif
}
bool ConnectionGraph::has_candidates(Compile *C) {
@@ -1381,8 +1505,20 @@ bool ConnectionGraph::compute_escape() {
ptnode_adr(n->_idx)->node_type() == PointsToNode::JavaObject) {
has_allocations = true;
}
- if(n->is_AddP())
- cg_worklist.append(n->_idx);
+ if(n->is_AddP()) {
+ // Collect address nodes which directly reference an allocation.
+ // Use them during stage 3 below to build initial connection graph
+ // field edges. Other field edges could be added after StoreP/LoadP
+ // nodes are processed during stage 4 below.
+ Node* base = get_addp_base(n);
+ if(base->is_Proj() && base->in(0)->is_Allocate()) {
+ cg_worklist.append(n->_idx);
+ }
+ } else if (n->is_MergeMem()) {
+ // Collect all MergeMem nodes to add memory slices for
+ // scalar replaceable objects in split_unique_types().
+ _mergemem_worklist.append(n->as_MergeMem());
+ }
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* m = n->fast_out(i); // Get user
worklist_init.push(m);
@@ -1423,12 +1559,13 @@ bool ConnectionGraph::compute_escape() {
}
}
- VectorSet ptset(Thread::current()->resource_area());
+ Arena* arena = Thread::current()->resource_area();
+ VectorSet ptset(arena);
GrowableArray deferred_edges;
- VectorSet visited(Thread::current()->resource_area());
+ VectorSet visited(arena);
- // 5. Remove deferred edges from the graph and collect
- // information needed for type splitting.
+ // 5. Remove deferred edges from the graph and adjust
+ // escape state of nonescaping objects.
cg_length = cg_worklist.length();
for( uint next = 0; next < cg_length; ++next ) {
int ni = cg_worklist.at(next);
@@ -1438,98 +1575,9 @@ bool ConnectionGraph::compute_escape() {
remove_deferred(ni, &deferred_edges, &visited);
Node *n = ptn->_node;
if (n->is_AddP()) {
- // Search for objects which are not scalar replaceable.
- // Mark their escape state as ArgEscape to propagate the state
- // to referenced objects.
- // Note: currently there are no difference in compiler optimizations
- // for ArgEscape objects and NoEscape objects which are not
- // scalar replaceable.
-
- int offset = ptn->offset();
- Node *base = get_addp_base(n);
- ptset.Clear();
- PointsTo(ptset, base, igvn);
- int ptset_size = ptset.Size();
-
- // Check if a field's initializing value is recorded and add
- // a corresponding NULL field's value if it is not recorded.
- // Connection Graph does not record a default initialization by NULL
- // captured by Initialize node.
- //
- // Note: it will disable scalar replacement in some cases:
- //
- // Point p[] = new Point[1];
- // p[0] = new Point(); // Will be not scalar replaced
- //
- // but it will save us from incorrect optimizations in next cases:
- //
- // Point p[] = new Point[1];
- // if ( x ) p[0] = new Point(); // Will be not scalar replaced
- //
- // Without a control flow analysis we can't distinguish above cases.
- //
- if (offset != Type::OffsetBot && ptset_size == 1) {
- uint elem = ptset.getelem(); // Allocation node's index
- // It does not matter if it is not Allocation node since
- // only non-escaping allocations are scalar replaced.
- if (ptnode_adr(elem)->_node->is_Allocate() &&
- ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
- AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
- InitializeNode* ini = alloc->initialization();
- Node* value = NULL;
- if (ini != NULL) {
- BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
- Node* store = ini->find_captured_store(offset, type2aelembytes(ft), igvn);
- if (store != NULL && store->is_Store())
- value = store->in(MemNode::ValueIn);
- }
- if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
- // A field's initializing value was not recorded. Add NULL.
- uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
- add_pointsto_edge(ni, null_idx);
- }
- }
- }
-
- // An object is not scalar replaceable if the field which may point
- // to it has unknown offset (unknown element of an array of objects).
- //
- if (offset == Type::OffsetBot) {
- uint e_cnt = ptn->edge_count();
- for (uint ei = 0; ei < e_cnt; ei++) {
- uint npi = ptn->edge_target(ei);
- set_escape_state(npi, PointsToNode::ArgEscape);
- ptnode_adr(npi)->_scalar_replaceable = false;
- }
- }
-
- // Currently an object is not scalar replaceable if a LoadStore node
- // access its field since the field value is unknown after it.
- //
- bool has_LoadStore = false;
- for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
- Node *use = n->fast_out(i);
- if (use->is_LoadStore()) {
- has_LoadStore = true;
- break;
- }
- }
- // An object is not scalar replaceable if the address points
- // to unknown field (unknown element for arrays, offset is OffsetBot).
- //
- // Or the address may point to more then one object. This may produce
- // the false positive result (set scalar_replaceable to false)
- // since the flow-insensitive escape analysis can't separate
- // the case when stores overwrite the field's value from the case
- // when stores happened on different control branches.
- //
- if (ptset_size > 1 || ptset_size != 0 &&
- (has_LoadStore || offset == Type::OffsetBot)) {
- for( VectorSetI j(&ptset); j.test(); ++j ) {
- set_escape_state(j.elem, PointsToNode::ArgEscape);
- ptnode_adr(j.elem)->_scalar_replaceable = false;
- }
- }
+ // Search for objects which are not scalar replaceable
+ // and adjust their escape state.
+ verify_escape_state(ni, ptset, igvn);
}
}
}
@@ -1646,6 +1694,150 @@ bool ConnectionGraph::compute_escape() {
return has_non_escaping_obj;
}
+// Search for objects which are not scalar replaceable.
+void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase) {
+ PointsToNode* ptn = ptnode_adr(nidx);
+ Node* n = ptn->_node;
+ assert(n->is_AddP(), "Should be called for AddP nodes only");
+ // Search for objects which are not scalar replaceable.
+ // Mark their escape state as ArgEscape to propagate the state
+ // to referenced objects.
+ // Note: currently there are no difference in compiler optimizations
+ // for ArgEscape objects and NoEscape objects which are not
+ // scalar replaceable.
+
+ Compile* C = _compile;
+
+ int offset = ptn->offset();
+ Node* base = get_addp_base(n);
+ ptset.Clear();
+ PointsTo(ptset, base, phase);
+ int ptset_size = ptset.Size();
+
+ // Check if a oop field's initializing value is recorded and add
+ // a corresponding NULL field's value if it is not recorded.
+ // Connection Graph does not record a default initialization by NULL
+ // captured by Initialize node.
+ //
+ // Note: it will disable scalar replacement in some cases:
+ //
+ // Point p[] = new Point[1];
+ // p[0] = new Point(); // Will be not scalar replaced
+ //
+ // but it will save us from incorrect optimizations in next cases:
+ //
+ // Point p[] = new Point[1];
+ // if ( x ) p[0] = new Point(); // Will be not scalar replaced
+ //
+ // Do a simple control flow analysis to distinguish above cases.
+ //
+ if (offset != Type::OffsetBot && ptset_size == 1) {
+ uint elem = ptset.getelem(); // Allocation node's index
+ // It does not matter if it is not Allocation node since
+ // only non-escaping allocations are scalar replaced.
+ if (ptnode_adr(elem)->_node->is_Allocate() &&
+ ptnode_adr(elem)->escape_state() == PointsToNode::NoEscape) {
+ AllocateNode* alloc = ptnode_adr(elem)->_node->as_Allocate();
+ InitializeNode* ini = alloc->initialization();
+
+ // Check only oop fields.
+ const Type* adr_type = n->as_AddP()->bottom_type();
+ BasicType basic_field_type = T_INT;
+ if (adr_type->isa_instptr()) {
+ ciField* field = C->alias_type(adr_type->isa_instptr())->field();
+ if (field != NULL) {
+ basic_field_type = field->layout_type();
+ } else {
+ // Ignore non field load (for example, klass load)
+ }
+ } else if (adr_type->isa_aryptr()) {
+ const Type* elemtype = adr_type->isa_aryptr()->elem();
+ basic_field_type = elemtype->array_element_basic_type();
+ } else {
+ // Raw pointers are used for initializing stores so skip it.
+ assert(adr_type->isa_rawptr() && base->is_Proj() &&
+ (base->in(0) == alloc),"unexpected pointer type");
+ }
+ if (basic_field_type == T_OBJECT ||
+ basic_field_type == T_NARROWOOP ||
+ basic_field_type == T_ARRAY) {
+ Node* value = NULL;
+ if (ini != NULL) {
+ BasicType ft = UseCompressedOops ? T_NARROWOOP : T_OBJECT;
+ Node* store = ini->find_captured_store(offset, type2aelembytes(ft), phase);
+ if (store != NULL && store->is_Store()) {
+ value = store->in(MemNode::ValueIn);
+ } else if (ptn->edge_count() > 0) { // Are there oop stores?
+ // Check for a store which follows allocation without branches.
+ // For example, a volatile field store is not collected
+ // by Initialize node. TODO: it would be nice to use idom() here.
+ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+ store = n->fast_out(i);
+ if (store->is_Store() && store->in(0) != NULL) {
+ Node* ctrl = store->in(0);
+ while(!(ctrl == ini || ctrl == alloc || ctrl == NULL ||
+ ctrl == C->root() || ctrl == C->top() || ctrl->is_Region() ||
+ ctrl->is_IfTrue() || ctrl->is_IfFalse())) {
+ ctrl = ctrl->in(0);
+ }
+ if (ctrl == ini || ctrl == alloc) {
+ value = store->in(MemNode::ValueIn);
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (value == NULL || value != ptnode_adr(value->_idx)->_node) {
+ // A field's initializing value was not recorded. Add NULL.
+ uint null_idx = UseCompressedOops ? _noop_null : _oop_null;
+ add_pointsto_edge(nidx, null_idx);
+ }
+ }
+ }
+ }
+
+ // An object is not scalar replaceable if the field which may point
+ // to it has unknown offset (unknown element of an array of objects).
+ //
+ if (offset == Type::OffsetBot) {
+ uint e_cnt = ptn->edge_count();
+ for (uint ei = 0; ei < e_cnt; ei++) {
+ uint npi = ptn->edge_target(ei);
+ set_escape_state(npi, PointsToNode::ArgEscape);
+ ptnode_adr(npi)->_scalar_replaceable = false;
+ }
+ }
+
+ // Currently an object is not scalar replaceable if a LoadStore node
+ // access its field since the field value is unknown after it.
+ //
+ bool has_LoadStore = false;
+ for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
+ Node *use = n->fast_out(i);
+ if (use->is_LoadStore()) {
+ has_LoadStore = true;
+ break;
+ }
+ }
+ // An object is not scalar replaceable if the address points
+ // to unknown field (unknown element for arrays, offset is OffsetBot).
+ //
+ // Or the address may point to more then one object. This may produce
+ // the false positive result (set scalar_replaceable to false)
+ // since the flow-insensitive escape analysis can't separate
+ // the case when stores overwrite the field's value from the case
+ // when stores happened on different control branches.
+ //
+ if (ptset_size > 1 || ptset_size != 0 &&
+ (has_LoadStore || offset == Type::OffsetBot)) {
+ for( VectorSetI j(&ptset); j.test(); ++j ) {
+ set_escape_state(j.elem, PointsToNode::ArgEscape);
+ ptnode_adr(j.elem)->_scalar_replaceable = false;
+ }
+ }
+}
+
void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *phase) {
switch (call->Opcode()) {
@@ -1657,6 +1849,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
assert(false, "should be done already");
break;
#endif
+ case Op_CallLeaf:
case Op_CallLeafNoFP:
{
// Stub calls, objects do not escape but they are not scale replaceable.
@@ -1667,9 +1860,23 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
const Type* at = d->field_at(i);
Node *arg = call->in(i)->uncast();
const Type *aat = phase->type(arg);
- if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr()) {
+ if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
+ ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
+
assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
aat->isa_ptr() != NULL, "expecting an Ptr");
+#ifdef ASSERT
+ if (!(call->Opcode() == Op_CallLeafNoFP &&
+ call->as_CallLeaf()->_name != NULL &&
+ (strstr(call->as_CallLeaf()->_name, "arraycopy") != 0) ||
+ call->as_CallLeaf()->_name != NULL &&
+ (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 ||
+ strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
+ ) {
+ call->dump();
+ assert(false, "EA: unexpected CallLeaf");
+ }
+#endif
set_escape_state(arg->_idx, PointsToNode::ArgEscape);
if (arg->is_AddP()) {
//
@@ -1706,9 +1913,10 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
const Type* at = d->field_at(i);
int k = i - TypeFunc::Parms;
+ Node *arg = call->in(i)->uncast();
- if (at->isa_oopptr() != NULL) {
- Node *arg = call->in(i)->uncast();
+ if (at->isa_oopptr() != NULL &&
+ ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) {
bool global_escapes = false;
bool fields_escapes = false;
@@ -1942,20 +2150,23 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
record_for_optimizer(n);
_processed.set(n->_idx);
} else {
- // Have to process call's arguments first.
+ // Don't mark as processed since call's arguments have to be processed.
PointsToNode::NodeType nt = PointsToNode::UnknownType;
+ PointsToNode::EscapeState es = PointsToNode::UnknownEscape;
// Check if a call returns an object.
const TypeTuple *r = n->as_Call()->tf()->range();
- if (n->is_CallStaticJava() && r->cnt() > TypeFunc::Parms &&
+ if (r->cnt() > TypeFunc::Parms &&
+ r->field_at(TypeFunc::Parms)->isa_ptr() &&
n->as_Call()->proj_out(TypeFunc::Parms) != NULL) {
- // Note: use isa_ptr() instead of isa_oopptr() here because
- // the _multianewarray functions return a TypeRawPtr.
- if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
- nt = PointsToNode::JavaObject;
+ nt = PointsToNode::JavaObject;
+ if (!n->is_CallStaticJava()) {
+ // Since the called mathod is statically unknown assume
+ // the worst case that the returned value globally escapes.
+ es = PointsToNode::GlobalEscape;
}
}
- add_node(n, nt, PointsToNode::UnknownEscape, false);
+ add_node(n, nt, es, false);
}
return;
}
@@ -2088,18 +2299,27 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
}
case Op_Proj:
{
- // we are only interested in the result projection from a call
+ // we are only interested in the oop result projection from a call
if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
- add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
- process_call_result(n->as_Proj(), phase);
- if (!_processed.test(n->_idx)) {
- // The call's result may need to be processed later if the call
- // returns it's argument and the argument is not processed yet.
- _delayed_worklist.push(n);
+ const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
+ assert(r->cnt() > TypeFunc::Parms, "sanity");
+ if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
+ add_node(n, PointsToNode::LocalVar, PointsToNode::UnknownEscape, false);
+ int ti = n->in(0)->_idx;
+ // The call may not be registered yet (since not all its inputs are registered)
+ // if this is the projection from backbranch edge of Phi.
+ if (ptnode_adr(ti)->node_type() != PointsToNode::UnknownType) {
+ process_call_result(n->as_Proj(), phase);
+ }
+ if (!_processed.test(n->_idx)) {
+ // The call's result may need to be processed later if the call
+ // returns it's argument and the argument is not processed yet.
+ _delayed_worklist.push(n);
+ }
+ break;
}
- } else {
- _processed.set(n->_idx);
}
+ _processed.set(n->_idx);
break;
}
case Op_Return:
@@ -2160,6 +2380,15 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
}
break;
}
+ case Op_AryEq:
+ case Op_StrComp:
+ case Op_StrEquals:
+ case Op_StrIndexOf:
+ {
+ // char[] arrays passed to string intrinsics are not scalar replaceable.
+ add_node(n, PointsToNode::UnknownType, PointsToNode::UnknownEscape, false);
+ break;
+ }
case Op_ThreadLocal:
{
add_node(n, PointsToNode::JavaObject, PointsToNode::ArgEscape, true);
@@ -2174,6 +2403,7 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
uint n_idx = n->_idx;
+ assert(ptnode_adr(n_idx)->_node != NULL, "node should be registered");
// Don't set processed bit for AddP, LoadP, StoreP since
// they may need more then one pass to process.
@@ -2211,6 +2441,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
case Op_DecodeN:
{
int ti = n->in(1)->_idx;
+ assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "all nodes should be registered");
if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
add_pointsto_edge(n_idx, ti);
} else {
@@ -2250,7 +2481,6 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
#endif
Node* adr = n->in(MemNode::Address)->uncast();
- const Type *adr_type = phase->type(adr);
Node* adr_base;
if (adr->is_AddP()) {
adr_base = get_addp_base(adr);
@@ -2302,13 +2532,19 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
}
case Op_Proj:
{
- // we are only interested in the result projection from a call
+ // we are only interested in the oop result projection from a call
if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() ) {
- process_call_result(n->as_Proj(), phase);
- assert(_processed.test(n_idx), "all call results should be processed");
- } else {
- assert(false, "Op_Proj");
+ assert(ptnode_adr(n->in(0)->_idx)->node_type() != PointsToNode::UnknownType,
+ "all nodes should be registered");
+ const TypeTuple *r = n->in(0)->as_Call()->tf()->range();
+ assert(r->cnt() > TypeFunc::Parms, "sanity");
+ if (r->field_at(TypeFunc::Parms)->isa_ptr() != NULL) {
+ process_call_result(n->as_Proj(), phase);
+ assert(_processed.test(n_idx), "all call results should be processed");
+ break;
+ }
}
+ assert(false, "Op_Proj");
break;
}
case Op_Return:
@@ -2320,6 +2556,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
}
#endif
int ti = n->in(TypeFunc::Parms)->_idx;
+ assert(ptnode_adr(ti)->node_type() != PointsToNode::UnknownType, "node should be registered");
if (ptnode_adr(ti)->node_type() == PointsToNode::JavaObject) {
add_pointsto_edge(n_idx, ti);
} else {
@@ -2354,14 +2591,38 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
}
break;
}
+ case Op_AryEq:
+ case Op_StrComp:
+ case Op_StrEquals:
+ case Op_StrIndexOf:
+ {
+ // char[] arrays passed to string intrinsic do not escape but
+ // they are not scalar replaceable. Adjust escape state for them.
+ // Start from in(2) edge since in(1) is memory edge.
+ for (uint i = 2; i < n->req(); i++) {
+ Node* adr = n->in(i)->uncast();
+ const Type *at = phase->type(adr);
+ if (!adr->is_top() && at->isa_ptr()) {
+ assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
+ at->isa_ptr() != NULL, "expecting an Ptr");
+ if (adr->is_AddP()) {
+ adr = get_addp_base(adr);
+ }
+ // Mark as ArgEscape everything "adr" could point to.
+ set_escape_state(adr->_idx, PointsToNode::ArgEscape);
+ }
+ }
+ _processed.set(n_idx);
+ break;
+ }
case Op_ThreadLocal:
{
assert(false, "Op_ThreadLocal");
break;
}
default:
- ;
- // nothing to do
+ // This method should be called only for EA specific nodes.
+ ShouldNotReachHere();
}
}
diff --git a/hotspot/src/share/vm/opto/escape.hpp b/hotspot/src/share/vm/opto/escape.hpp
index 1ce0cc9cf29..576043beb45 100644
--- a/hotspot/src/share/vm/opto/escape.hpp
+++ b/hotspot/src/share/vm/opto/escape.hpp
@@ -210,6 +210,8 @@ private:
Unique_Node_List _delayed_worklist; // Nodes to be processed before
// the call build_connection_graph().
+ GrowableArray _mergemem_worklist; // List of all MergeMem nodes
+
VectorSet _processed; // Records which nodes have been
// processed.
@@ -289,7 +291,7 @@ private:
bool split_AddP(Node *addp, Node *base, PhaseGVN *igvn);
PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray &orig_phi_worklist, PhaseGVN *igvn, bool &new_created);
PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray &orig_phi_worklist, PhaseGVN *igvn);
- Node *find_mem(Node *mem, int alias_idx, PhaseGVN *igvn);
+ void move_inst_mem(Node* n, GrowableArray &orig_phis, PhaseGVN *igvn);
Node *find_inst_mem(Node *mem, int alias_idx,GrowableArray &orig_phi_worklist, PhaseGVN *igvn);
// Propagate unique types created for unescaped allocated objects
@@ -298,7 +300,6 @@ private:
// manage entries in _node_map
void set_map(int idx, Node *n) { _node_map.map(idx, n); }
- void set_map_phi(int idx, PhiNode *p) { _node_map.map(idx, (Node *) p); }
Node *get_map(int idx) { return _node_map[idx]; }
PhiNode *get_map_phi(int idx) {
Node *phi = _node_map[idx];
@@ -315,6 +316,9 @@ private:
// Set the escape state of a node
void set_escape_state(uint ni, PointsToNode::EscapeState es);
+ // Search for objects which are not scalar replaceable.
+ void verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase);
+
public:
ConnectionGraph(Compile *C);
diff --git a/hotspot/src/share/vm/opto/graphKit.cpp b/hotspot/src/share/vm/opto/graphKit.cpp
index b63aae489ff..57fea648024 100644
--- a/hotspot/src/share/vm/opto/graphKit.cpp
+++ b/hotspot/src/share/vm/opto/graphKit.cpp
@@ -981,14 +981,19 @@ bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface:
{
- bool is_static = (depth == 0);
bool ignore;
ciBytecodeStream iter(method());
iter.reset_to_bci(bci());
iter.next();
ciMethod* method = iter.get_method(ignore);
inputs = method->arg_size_no_receiver();
- if (!is_static) inputs += 1;
+ // Add a receiver argument, maybe:
+ if (code != Bytecodes::_invokestatic &&
+ code != Bytecodes::_invokedynamic)
+ inputs += 1;
+ // (Do not use ciMethod::arg_size(), because
+ // it might be an unloaded method, which doesn't
+ // know whether it is static or not.)
int size = method->return_type()->size();
depth = size - inputs;
}
@@ -1351,8 +1356,8 @@ void GraphKit::set_all_memory(Node* newmem) {
}
//------------------------------set_all_memory_call----------------------------
-void GraphKit::set_all_memory_call(Node* call) {
- Node* newmem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) );
+void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
+ Node* newmem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory, separate_io_proj) );
set_all_memory(newmem);
}
@@ -1573,7 +1578,7 @@ void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
//---------------------------set_edges_for_java_call---------------------------
// Connect a newly created call into the current JVMS.
// A return value node (if any) is returned from set_edges_for_java_call.
-void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw) {
+void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
// Add the predefined inputs:
call->init_req( TypeFunc::Control, control() );
@@ -1595,13 +1600,13 @@ void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw) {
// Re-use the current map to produce the result.
set_control(_gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Control)));
- set_i_o( _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::I_O )));
- set_all_memory_call(xcall);
+ set_i_o( _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::I_O , separate_io_proj)));
+ set_all_memory_call(xcall, separate_io_proj);
//return xcall; // no need, caller already has it
}
-Node* GraphKit::set_results_for_java_call(CallJavaNode* call) {
+Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
if (stopped()) return top(); // maybe the call folded up?
// Capture the return value, if any.
@@ -1614,8 +1619,15 @@ Node* GraphKit::set_results_for_java_call(CallJavaNode* call) {
// Note: Since any out-of-line call can produce an exception,
// we always insert an I_O projection from the call into the result.
- make_slow_call_ex(call, env()->Throwable_klass(), false);
+ make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
+ if (separate_io_proj) {
+ // The caller requested separate projections be used by the fall
+ // through and exceptional paths, so replace the projections for
+ // the fall through path.
+ set_i_o(_gvn.transform( new (C, 1) ProjNode(call, TypeFunc::I_O) ));
+ set_all_memory(_gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) ));
+ }
return ret;
}
@@ -1678,6 +1690,64 @@ void GraphKit::set_predefined_output_for_runtime_call(Node* call,
}
}
+
+// Replace the call with the current state of the kit.
+void GraphKit::replace_call(CallNode* call, Node* result) {
+ JVMState* ejvms = NULL;
+ if (has_exceptions()) {
+ ejvms = transfer_exceptions_into_jvms();
+ }
+
+ SafePointNode* final_state = stop();
+
+ // Find all the needed outputs of this call
+ CallProjections callprojs;
+ call->extract_projections(&callprojs, true);
+
+ // Replace all the old call edges with the edges from the inlining result
+ C->gvn_replace_by(callprojs.fallthrough_catchproj, final_state->in(TypeFunc::Control));
+ C->gvn_replace_by(callprojs.fallthrough_memproj, final_state->in(TypeFunc::Memory));
+ C->gvn_replace_by(callprojs.fallthrough_ioproj, final_state->in(TypeFunc::I_O));
+
+ // Replace the result with the new result if it exists and is used
+ if (callprojs.resproj != NULL && result != NULL) {
+ C->gvn_replace_by(callprojs.resproj, result);
+ }
+
+ if (ejvms == NULL) {
+ // No exception edges to simply kill off those paths
+ C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
+ C->gvn_replace_by(callprojs.catchall_memproj, C->top());
+ C->gvn_replace_by(callprojs.catchall_ioproj, C->top());
+
+ // Replace the old exception object with top
+ if (callprojs.exobj != NULL) {
+ C->gvn_replace_by(callprojs.exobj, C->top());
+ }
+ } else {
+ GraphKit ekit(ejvms);
+
+ // Load my combined exception state into the kit, with all phis transformed:
+ SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
+
+ Node* ex_oop = ekit.use_exception_state(ex_map);
+
+ C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
+ C->gvn_replace_by(callprojs.catchall_memproj, ekit.reset_memory());
+ C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o());
+
+ // Replace the old exception object with the newly created one
+ if (callprojs.exobj != NULL) {
+ C->gvn_replace_by(callprojs.exobj, ex_oop);
+ }
+ }
+
+ // Disconnect the call from the graph
+ call->disconnect_inputs(NULL);
+ C->gvn_replace_by(call, C->top());
+}
+
+
//------------------------------increment_counter------------------------------
// for statistics: increment a VM counter by 1
@@ -3189,9 +3259,10 @@ void GraphKit::write_barrier_post(Node* oop_store,
if (use_ReduceInitialCardMarks()
&& obj == just_allocated_object(control())) {
// We can skip marks on a freshly-allocated object in Eden.
- // Keep this code in sync with maybe_defer_card_mark() in runtime.cpp.
- // That routine informs GC to take appropriate compensating steps
- // so as to make this card-mark elision safe.
+ // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
+ // That routine informs GC to take appropriate compensating steps,
+ // upon a slow-path allocation, so as to make this card-mark
+ // elision safe.
return;
}
@@ -3459,4 +3530,3 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
sync_kit(ideal);
}
#undef __
-
diff --git a/hotspot/src/share/vm/opto/graphKit.hpp b/hotspot/src/share/vm/opto/graphKit.hpp
index b127789b5f3..8135aca2d39 100644
--- a/hotspot/src/share/vm/opto/graphKit.hpp
+++ b/hotspot/src/share/vm/opto/graphKit.hpp
@@ -279,6 +279,34 @@ class GraphKit : public Phase {
}
Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
+
+ // Some convenient shortcuts for common nodes
+ Node* IfTrue(IfNode* iff) { return _gvn.transform(new (C,1) IfTrueNode(iff)); }
+ Node* IfFalse(IfNode* iff) { return _gvn.transform(new (C,1) IfFalseNode(iff)); }
+
+ Node* AddI(Node* l, Node* r) { return _gvn.transform(new (C,3) AddINode(l, r)); }
+ Node* SubI(Node* l, Node* r) { return _gvn.transform(new (C,3) SubINode(l, r)); }
+ Node* MulI(Node* l, Node* r) { return _gvn.transform(new (C,3) MulINode(l, r)); }
+ Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new (C,3) DivINode(ctl, l, r)); }
+
+ Node* AndI(Node* l, Node* r) { return _gvn.transform(new (C,3) AndINode(l, r)); }
+ Node* OrI(Node* l, Node* r) { return _gvn.transform(new (C,3) OrINode(l, r)); }
+ Node* XorI(Node* l, Node* r) { return _gvn.transform(new (C,3) XorINode(l, r)); }
+
+ Node* MaxI(Node* l, Node* r) { return _gvn.transform(new (C,3) MaxINode(l, r)); }
+ Node* MinI(Node* l, Node* r) { return _gvn.transform(new (C,3) MinINode(l, r)); }
+
+ Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) LShiftINode(l, r)); }
+ Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) RShiftINode(l, r)); }
+ Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) URShiftINode(l, r)); }
+
+ Node* CmpI(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpINode(l, r)); }
+ Node* CmpL(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpLNode(l, r)); }
+ Node* CmpP(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpPNode(l, r)); }
+ Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C,2) BoolNode(cmp, relop)); }
+
+ Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new (C,4) AddPNode(b, a, o)); }
+
// Convert between int and long, and size_t.
// (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
Node* ConvI2L(Node* offset);
@@ -400,7 +428,7 @@ class GraphKit : public Phase {
void set_all_memory(Node* newmem);
// Create a memory projection from the call, then set_all_memory.
- void set_all_memory_call(Node* call);
+ void set_all_memory_call(Node* call, bool separate_io_proj = false);
// Create a LoadNode, reading from the parser's memory state.
// (Note: require_atomic_access is useful only with T_LONG.)
@@ -543,12 +571,12 @@ class GraphKit : public Phase {
// Transform the call, and update the basics: control, i_o, memory.
// (The next step is usually to call set_results_for_java_call.)
void set_edges_for_java_call(CallJavaNode* call,
- bool must_throw = false);
+ bool must_throw = false, bool separate_io_proj = false);
// Finish up a java call that was started by set_edges_for_java_call.
// Call add_exception on any throw arising from the call.
// Return the call result (transformed).
- Node* set_results_for_java_call(CallJavaNode* call);
+ Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
// Similar to set_edges_for_java_call, but simplified for runtime calls.
void set_predefined_output_for_runtime_call(Node* call) {
@@ -559,6 +587,11 @@ class GraphKit : public Phase {
const TypePtr* hook_mem);
Node* set_predefined_input_for_runtime_call(SafePointNode* call);
+ // Replace the call with the current state of the kit. Requires
+ // that the call was generated with separate io_projs so that
+ // exceptional control flow can be handled properly.
+ void replace_call(CallNode* call, Node* result);
+
// helper functions for statistics
void increment_counter(address counter_addr); // increment a debug counter
void increment_counter(Node* counter_addr); // increment a debug counter
diff --git a/hotspot/src/share/vm/opto/ifnode.cpp b/hotspot/src/share/vm/opto/ifnode.cpp
index 51ca8fe28ad..a026fceef47 100644
--- a/hotspot/src/share/vm/opto/ifnode.cpp
+++ b/hotspot/src/share/vm/opto/ifnode.cpp
@@ -531,6 +531,9 @@ Node* IfNode::up_one_dom(Node *curr, bool linear_only) {
if (linear_only)
return NULL;
+ if( dom->is_Root() )
+ return NULL;
+
// Else hit a Region. Check for a loop header
if( dom->is_Loop() )
return dom->in(1); // Skip up thru loops
diff --git a/hotspot/src/share/vm/opto/lcm.cpp b/hotspot/src/share/vm/opto/lcm.cpp
index 31de55a5435..4f1d6b670e2 100644
--- a/hotspot/src/share/vm/opto/lcm.cpp
+++ b/hotspot/src/share/vm/opto/lcm.cpp
@@ -120,6 +120,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
case Op_LoadRange:
case Op_LoadD_unaligned:
case Op_LoadL_unaligned:
+ assert(mach->in(2) == val, "should be address");
break;
case Op_StoreB:
case Op_StoreC:
@@ -146,6 +147,21 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
default: // Also check for embedded loads
if( !mach->needs_anti_dependence_check() )
continue; // Not an memory op; skip it
+ {
+ // Check that value is used in memory address.
+ Node* base;
+ Node* index;
+ const MachOper* oper = mach->memory_inputs(base, index);
+ if (oper == NULL || oper == (MachOper*)-1) {
+ continue; // Not an memory op; skip it
+ }
+ if (val == base ||
+ val == index && val->bottom_type()->isa_narrowoop()) {
+ break; // Found it
+ } else {
+ continue; // Skip it
+ }
+ }
break;
}
// check if the offset is not too high for implicit exception
@@ -542,6 +558,16 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
// pointers as far as the kill mask goes.
bool exclude_soe = op == Op_CallRuntime;
+ // If the call is a MethodHandle invoke, we need to exclude the
+ // register which is used to save the SP value over MH invokes from
+ // the mask. Otherwise this register could be used for
+ // deoptimization information.
+ if (op == Op_CallStaticJava) {
+ MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall;
+ if (mcallstaticjava->_method_handle_invoke)
+ proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask());
+ }
+
// Fill in the kill mask for the call
for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
if( !regs.Member(r) ) { // Not already defined by the call
@@ -616,8 +642,9 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
}
}
- if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire &&
- n->req() > TypeFunc::Parms ) {
+ if( n->is_Mach() && n->req() > TypeFunc::Parms &&
+ (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
+ n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
// MemBarAcquire could be created without Precedent edge.
// del_req() replaces the specified edge with the last input edge
// and then removes the last edge. If the specified edge > number of
diff --git a/hotspot/src/share/vm/opto/library_call.cpp b/hotspot/src/share/vm/opto/library_call.cpp
index 1be04de6acf..8f69208911e 100644
--- a/hotspot/src/share/vm/opto/library_call.cpp
+++ b/hotspot/src/share/vm/opto/library_call.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -3697,12 +3697,14 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
// Helper routine for above
bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
+ ciMethod* method = jvms->method();
+
// Is this the Method.invoke method itself?
- if (jvms->method()->intrinsic_id() == vmIntrinsics::_invoke)
+ if (method->intrinsic_id() == vmIntrinsics::_invoke)
return true;
// Is this a helper, defined somewhere underneath MethodAccessorImpl.
- ciKlass* k = jvms->method()->holder();
+ ciKlass* k = method->holder();
if (k->is_instance_klass()) {
ciInstanceKlass* ik = k->as_instance_klass();
for (; ik != NULL; ik = ik->super()) {
@@ -3712,6 +3714,10 @@ bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
}
}
}
+ else if (method->is_method_handle_adapter()) {
+ // This is an internal adapter frame from the MethodHandleCompiler -- skip it
+ return true;
+ }
return false;
}
diff --git a/hotspot/src/share/vm/opto/loopTransform.cpp b/hotspot/src/share/vm/opto/loopTransform.cpp
index 0d4bf7869ea..809f47472c6 100644
--- a/hotspot/src/share/vm/opto/loopTransform.cpp
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp
@@ -549,6 +549,10 @@ bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
// Comparing trip+off vs limit
Node *bol = iff->in(1);
if( bol->req() != 2 ) continue; // dead constant test
+ if (!bol->is_Bool()) {
+ assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only");
+ continue;
+ }
Node *cmp = bol->in(1);
Node *rc_exp = cmp->in(1);
@@ -875,7 +879,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
//------------------------------is_invariant-----------------------------
// Return true if n is invariant
bool IdealLoopTree::is_invariant(Node* n) const {
- Node *n_c = _phase->get_ctrl(n);
+ Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n;
if (n_c->is_top()) return false;
return !is_member(_phase->get_loop(n_c));
}
@@ -1594,7 +1598,7 @@ bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
// Check and remove empty loops (spam micro-benchmarks)
if( policy_do_remove_empty_loop(phase) )
- return true; // Here we removed an empty loop
+ return true; // Here we removed an empty loop
bool should_peel = policy_peeling(phase); // Should we peel?
@@ -1688,8 +1692,8 @@ bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_
// an even number of trips). If we are peeling, we might enable some RCE
// and we'd rather unroll the post-RCE'd loop SO... do not unroll if
// peeling.
- if( should_unroll && !should_peel )
- phase->do_unroll(this,old_new, true);
+ if( should_unroll && !should_peel )
+ phase->do_unroll(this,old_new, true);
// Adjust the pre-loop limits to align the main body
// iterations.
@@ -1731,9 +1735,9 @@ bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new )
_allow_optimizations &&
!tail()->is_top() ) { // Also ignore the occasional dead backedge
if (!_has_call) {
- if (!iteration_split_impl( phase, old_new )) {
- return false;
- }
+ if (!iteration_split_impl( phase, old_new )) {
+ return false;
+ }
} else if (policy_unswitching(phase)) {
phase->do_unswitching(this, old_new);
}
@@ -1746,3 +1750,576 @@ bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new )
return false;
return true;
}
+
+//-------------------------------is_uncommon_trap_proj----------------------------
+// Return true if proj is the form of "proj->[region->..]call_uct"
+bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, bool must_reason_predicate) {
+ int path_limit = 10;
+ assert(proj, "invalid argument");
+ Node* out = proj;
+ for (int ct = 0; ct < path_limit; ct++) {
+ out = out->unique_ctrl_out();
+ if (out == NULL || out->is_Root() || out->is_Start())
+ return false;
+ if (out->is_CallStaticJava()) {
+ int req = out->as_CallStaticJava()->uncommon_trap_request();
+ if (req != 0) {
+ Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(req);
+ if (!must_reason_predicate || reason == Deoptimization::Reason_predicate){
+ return true;
+ }
+ }
+ return false; // don't do further after call
+ }
+ }
+ return false;
+}
+
+//-------------------------------is_uncommon_trap_if_pattern-------------------------
+// Return true for "if(test)-> proj -> ...
+// |
+// V
+// other_proj->[region->..]call_uct"
+//
+// "must_reason_predicate" means the uct reason must be Reason_predicate
+bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, bool must_reason_predicate) {
+ Node *in0 = proj->in(0);
+ if (!in0->is_If()) return false;
+ IfNode* iff = in0->as_If();
+
+ // we need "If(Conv2B(Opaque1(...)))" pattern for must_reason_predicate
+ if (must_reason_predicate) {
+ if (iff->in(1)->Opcode() != Op_Conv2B ||
+ iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
+ return false;
+ }
+ }
+
+ ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj();
+ return is_uncommon_trap_proj(other_proj, must_reason_predicate);
+}
+
+//------------------------------create_new_if_for_predicate------------------------
+// create a new if above the uct_if_pattern for the predicate to be promoted.
+//
+// before after
+// ---------- ----------
+// ctrl ctrl
+// | |
+// | |
+// v v
+// iff new_iff
+// / \ / \
+// / \ / \
+// v v v v
+// uncommon_proj cont_proj if_uct if_cont
+// \ | | | |
+// \ | | | |
+// v v v | v
+// rgn loop | iff
+// | | / \
+// | | / \
+// v | v v
+// uncommon_trap | uncommon_proj cont_proj
+// \ \ | |
+// \ \ | |
+// v v v v
+// rgn loop
+// |
+// |
+// v
+// uncommon_trap
+//
+//
+// We will create a region to guard the uct call if there is no one there.
+// The true projecttion (if_cont) of the new_iff is returned.
+ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj) {
+ assert(is_uncommon_trap_if_pattern(cont_proj, true), "must be a uct if pattern!");
+ IfNode* iff = cont_proj->in(0)->as_If();
+
+ ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
+ Node *rgn = uncommon_proj->unique_ctrl_out();
+ assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
+
+ if (!rgn->is_Region()) { // create a region to guard the call
+ assert(rgn->is_Call(), "must be call uct");
+ CallNode* call = rgn->as_Call();
+ rgn = new (C, 1) RegionNode(1);
+ _igvn.set_type(rgn, rgn->bottom_type());
+ rgn->add_req(uncommon_proj);
+ set_idom(rgn, idom(uncommon_proj), dom_depth(uncommon_proj)+1);
+ _igvn.hash_delete(call);
+ call->set_req(0, rgn);
+ }
+
+ // Create new_iff
+ uint iffdd = dom_depth(iff);
+ IdealLoopTree* lp = get_loop(iff);
+ IfNode *new_iff = new (C, 2) IfNode(iff->in(0), NULL, iff->_prob, iff->_fcnt);
+ register_node(new_iff, lp, idom(iff), iffdd);
+ Node *if_cont = new (C, 1) IfTrueNode(new_iff);
+ Node *if_uct = new (C, 1) IfFalseNode(new_iff);
+ if (cont_proj->is_IfFalse()) {
+ // Swap
+ Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp;
+ }
+ register_node(if_cont, lp, new_iff, iffdd);
+ register_node(if_uct, get_loop(rgn), new_iff, iffdd);
+
+ // if_cont to iff
+ _igvn.hash_delete(iff);
+ iff->set_req(0, if_cont);
+ set_idom(iff, if_cont, dom_depth(iff));
+
+ // if_uct to rgn
+ _igvn.hash_delete(rgn);
+ rgn->add_req(if_uct);
+ Node* ridom = idom(rgn);
+ Node* nrdom = dom_lca(ridom, new_iff);
+ set_idom(rgn, nrdom, dom_depth(rgn));
+
+ // rgn must have no phis
+ assert(!rgn->as_Region()->has_phi(), "region must have no phis");
+
+ return if_cont->as_Proj();
+}
+
+//------------------------------find_predicate_insertion_point--------------------------
+// Find a good location to insert a predicate
+ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c) {
+ if (start_c == C->root() || !start_c->is_Proj())
+ return NULL;
+ if (is_uncommon_trap_if_pattern(start_c->as_Proj(), true/*Reason_Predicate*/)) {
+ return start_c->as_Proj();
+ }
+ return NULL;
+}
+
+//------------------------------Invariance-----------------------------------
+// Helper class for loop_predication_impl to compute invariance on the fly and
+// clone invariants.
+class Invariance : public StackObj {
+ VectorSet _visited, _invariant;
+ Node_Stack _stack;
+ VectorSet _clone_visited;
+ Node_List _old_new; // map of old to new (clone)
+ IdealLoopTree* _lpt;
+ PhaseIdealLoop* _phase;
+
+ // Helper function to set up the invariance for invariance computation
+ // If n is a known invariant, set up directly. Otherwise, look up the
+ // the possibility to push n onto the stack for further processing.
+ void visit(Node* use, Node* n) {
+ if (_lpt->is_invariant(n)) { // known invariant
+ _invariant.set(n->_idx);
+ } else if (!n->is_CFG()) {
+ Node *n_ctrl = _phase->ctrl_or_self(n);
+ Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
+ if (_phase->is_dominator(n_ctrl, u_ctrl)) {
+ _stack.push(n, n->in(0) == NULL ? 1 : 0);
+ }
+ }
+ }
+
+ // Compute invariance for "the_node" and (possibly) all its inputs recursively
+ // on the fly
+ void compute_invariance(Node* n) {
+ assert(_visited.test(n->_idx), "must be");
+ visit(n, n);
+ while (_stack.is_nonempty()) {
+ Node* n = _stack.node();
+ uint idx = _stack.index();
+ if (idx == n->req()) { // all inputs are processed
+ _stack.pop();
+ // n is invariant if it's inputs are all invariant
+ bool all_inputs_invariant = true;
+ for (uint i = 0; i < n->req(); i++) {
+ Node* in = n->in(i);
+ if (in == NULL) continue;
+ assert(_visited.test(in->_idx), "must have visited input");
+ if (!_invariant.test(in->_idx)) { // bad guy
+ all_inputs_invariant = false;
+ break;
+ }
+ }
+ if (all_inputs_invariant) {
+ _invariant.set(n->_idx); // I am a invariant too
+ }
+ } else { // process next input
+ _stack.set_index(idx + 1);
+ Node* m = n->in(idx);
+ if (m != NULL && !_visited.test_set(m->_idx)) {
+ visit(n, m);
+ }
+ }
+ }
+ }
+
+ // Helper function to set up _old_new map for clone_nodes.
+ // If n is a known invariant, set up directly ("clone" of n == n).
+ // Otherwise, push n onto the stack for real cloning.
+ void clone_visit(Node* n) {
+ assert(_invariant.test(n->_idx), "must be invariant");
+ if (_lpt->is_invariant(n)) { // known invariant
+ _old_new.map(n->_idx, n);
+ } else{ // to be cloned
+ assert (!n->is_CFG(), "should not see CFG here");
+ _stack.push(n, n->in(0) == NULL ? 1 : 0);
+ }
+ }
+
+ // Clone "n" and (possibly) all its inputs recursively
+ void clone_nodes(Node* n, Node* ctrl) {
+ clone_visit(n);
+ while (_stack.is_nonempty()) {
+ Node* n = _stack.node();
+ uint idx = _stack.index();
+ if (idx == n->req()) { // all inputs processed, clone n!
+ _stack.pop();
+ // clone invariant node
+ Node* n_cl = n->clone();
+ _old_new.map(n->_idx, n_cl);
+ _phase->register_new_node(n_cl, ctrl);
+ for (uint i = 0; i < n->req(); i++) {
+ Node* in = n_cl->in(i);
+ if (in == NULL) continue;
+ n_cl->set_req(i, _old_new[in->_idx]);
+ }
+ } else { // process next input
+ _stack.set_index(idx + 1);
+ Node* m = n->in(idx);
+ if (m != NULL && !_clone_visited.test_set(m->_idx)) {
+ clone_visit(m); // visit the input
+ }
+ }
+ }
+ }
+
+ public:
+ Invariance(Arena* area, IdealLoopTree* lpt) :
+ _lpt(lpt), _phase(lpt->_phase),
+ _visited(area), _invariant(area), _stack(area, 10 /* guess */),
+ _clone_visited(area), _old_new(area)
+ {}
+
+ // Map old to n for invariance computation and clone
+ void map_ctrl(Node* old, Node* n) {
+ assert(old->is_CFG() && n->is_CFG(), "must be");
+ _old_new.map(old->_idx, n); // "clone" of old is n
+ _invariant.set(old->_idx); // old is invariant
+ _clone_visited.set(old->_idx);
+ }
+
+ // Driver function to compute invariance
+ bool is_invariant(Node* n) {
+ if (!_visited.test_set(n->_idx))
+ compute_invariance(n);
+ return (_invariant.test(n->_idx) != 0);
+ }
+
+ // Driver function to clone invariant
+ Node* clone(Node* n, Node* ctrl) {
+ assert(ctrl->is_CFG(), "must be");
+ assert(_invariant.test(n->_idx), "must be an invariant");
+ if (!_clone_visited.test(n->_idx))
+ clone_nodes(n, ctrl);
+ return _old_new[n->_idx];
+ }
+};
+
+//------------------------------is_range_check_if -----------------------------------
+// Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format
+// Note: this function is particularly designed for loop predication. We require load_range
+// and offset to be loop invariant computed on the fly by "invar"
+bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const {
+ if (!is_loop_exit(iff)) {
+ return false;
+ }
+ if (!iff->in(1)->is_Bool()) {
+ return false;
+ }
+ const BoolNode *bol = iff->in(1)->as_Bool();
+ if (bol->_test._test != BoolTest::lt) {
+ return false;
+ }
+ if (!bol->in(1)->is_Cmp()) {
+ return false;
+ }
+ const CmpNode *cmp = bol->in(1)->as_Cmp();
+ if (cmp->Opcode() != Op_CmpU ) {
+ return false;
+ }
+ if (cmp->in(2)->Opcode() != Op_LoadRange) {
+ return false;
+ }
+ LoadRangeNode* lr = (LoadRangeNode*)cmp->in(2);
+ if (!invar.is_invariant(lr)) { // loadRange must be invariant
+ return false;
+ }
+ Node *iv = _head->as_CountedLoop()->phi();
+ int scale = 0;
+ Node *offset = NULL;
+ if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) {
+ return false;
+ }
+ if(offset && !invar.is_invariant(offset)) { // offset must be invariant
+ return false;
+ }
+ return true;
+}
+
+//------------------------------rc_predicate-----------------------------------
+// Create a range check predicate
+//
+// for (i = init; i < limit; i += stride) {
+// a[scale*i+offset]
+// }
+//
+// Compute max(scale*i + offset) for init <= i < limit and build the predicate
+// as "max(scale*i + offset) u< a.length".
+//
+// There are two cases for max(scale*i + offset):
+// (1) stride*scale > 0
+// max(scale*i + offset) = scale*(limit-stride) + offset
+// (2) stride*scale < 0
+// max(scale*i + offset) = scale*init + offset
+BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl,
+ int scale, Node* offset,
+ Node* init, Node* limit, Node* stride,
+ Node* range) {
+ Node* max_idx_expr = init;
+ int stride_con = stride->get_int();
+ if ((stride_con > 0) == (scale > 0)) {
+ max_idx_expr = new (C, 3) SubINode(limit, stride);
+ register_new_node(max_idx_expr, ctrl);
+ }
+
+ if (scale != 1) {
+ ConNode* con_scale = _igvn.intcon(scale);
+ max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale);
+ register_new_node(max_idx_expr, ctrl);
+ }
+
+ if (offset && (!offset->is_Con() || offset->get_int() != 0)){
+ max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset);
+ register_new_node(max_idx_expr, ctrl);
+ }
+
+ CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range);
+ register_new_node(cmp, ctrl);
+ BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt);
+ register_new_node(bol, ctrl);
+ return bol;
+}
+
+//------------------------------ loop_predication_impl--------------------------
+// Insert loop predicates for null checks and range checks
+bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
+ if (!UseLoopPredicate) return false;
+
+ // Too many traps seen?
+ bool tmt = C->too_many_traps(C->method(), 0, Deoptimization::Reason_predicate);
+ int tc = C->trap_count(Deoptimization::Reason_predicate);
+ if (tmt || tc > 0) {
+ if (TraceLoopPredicate) {
+ tty->print_cr("too many predicate traps: %d", tc);
+ C->method()->print(); // which method has too many predicate traps
+ tty->print_cr("");
+ }
+ return false;
+ }
+
+ CountedLoopNode *cl = NULL;
+ if (loop->_head->is_CountedLoop()) {
+ cl = loop->_head->as_CountedLoop();
+ // do nothing for iteration-splitted loops
+ if(!cl->is_normal_loop()) return false;
+ }
+
+ LoopNode *lpn = loop->_head->as_Loop();
+ Node* entry = lpn->in(LoopNode::EntryControl);
+
+ ProjNode *predicate_proj = find_predicate_insertion_point(entry);
+ if (!predicate_proj){
+#ifndef PRODUCT
+ if (TraceLoopPredicate) {
+ tty->print("missing predicate:");
+ loop->dump_head();
+ }
+#endif
+ return false;
+ }
+
+ ConNode* zero = _igvn.intcon(0);
+ set_ctrl(zero, C->root());
+ Node *cond_false = new (C, 2) Conv2BNode(zero);
+ register_new_node(cond_false, C->root());
+ ConNode* one = _igvn.intcon(1);
+ set_ctrl(one, C->root());
+ Node *cond_true = new (C, 2) Conv2BNode(one);
+ register_new_node(cond_true, C->root());
+
+ ResourceArea *area = Thread::current()->resource_area();
+ Invariance invar(area, loop);
+
+ // Create list of if-projs such that a newer proj dominates all older
+ // projs in the list, and they all dominate loop->tail()
+ Node_List if_proj_list(area);
+ LoopNode *head = loop->_head->as_Loop();
+ Node *current_proj = loop->tail(); //start from tail
+ while ( current_proj != head ) {
+ if (loop == get_loop(current_proj) && // still in the loop ?
+ current_proj->is_Proj() && // is a projection ?
+ current_proj->in(0)->Opcode() == Op_If) { // is a if projection ?
+ if_proj_list.push(current_proj);
+ }
+ current_proj = idom(current_proj);
+ }
+
+ bool hoisted = false; // true if at least one proj is promoted
+ while (if_proj_list.size() > 0) {
+ // Following are changed to nonnull when a predicate can be hoisted
+ ProjNode* new_predicate_proj = NULL;
+ BoolNode* new_predicate_bol = NULL;
+
+ ProjNode* proj = if_proj_list.pop()->as_Proj();
+ IfNode* iff = proj->in(0)->as_If();
+
+ if (!is_uncommon_trap_if_pattern(proj)) {
+ if (loop->is_loop_exit(iff)) {
+ // stop processing the remaining projs in the list because the execution of them
+ // depends on the condition of "iff" (iff->in(1)).
+ break;
+ } else {
+ // Both arms are inside the loop. There are two cases:
+ // (1) there is one backward branch. In this case, any remaining proj
+ // in the if_proj list post-dominates "iff". So, the condition of "iff"
+ // does not determine the execution the remining projs directly, and we
+ // can safely continue.
+ // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj"
+ // does not dominate loop->tail(), so it can not be in the if_proj list.
+ continue;
+ }
+ }
+
+ Node* test = iff->in(1);
+ if (!test->is_Bool()){ //Conv2B, ...
+ continue;
+ }
+ BoolNode* bol = test->as_Bool();
+ if (invar.is_invariant(bol)) {
+ // Invariant test
+ new_predicate_proj = create_new_if_for_predicate(predicate_proj);
+ Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
+ new_predicate_bol = invar.clone(bol, ctrl)->as_Bool();
+ if (TraceLoopPredicate) tty->print("invariant");
+ } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
+ // Range check (only for counted loops)
+ new_predicate_proj = create_new_if_for_predicate(predicate_proj);
+ Node *ctrl = new_predicate_proj->in(0)->as_If()->in(0);
+ const Node* cmp = bol->in(1)->as_Cmp();
+ Node* idx = cmp->in(1);
+ assert(!invar.is_invariant(idx), "index is variant");
+ assert(cmp->in(2)->Opcode() == Op_LoadRange, "must be");
+ LoadRangeNode* ld_rng = (LoadRangeNode*)cmp->in(2); // LoadRangeNode
+ assert(invar.is_invariant(ld_rng), "load range must be invariant");
+ ld_rng = (LoadRangeNode*)invar.clone(ld_rng, ctrl);
+ int scale = 1;
+ Node* offset = zero;
+ bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset);
+ assert(ok, "must be index expression");
+ if (offset && offset != zero) {
+ assert(invar.is_invariant(offset), "offset must be loop invariant");
+ offset = invar.clone(offset, ctrl);
+ }
+ Node* init = cl->init_trip();
+ Node* limit = cl->limit();
+ Node* stride = cl->stride();
+ new_predicate_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, ld_rng);
+ if (TraceLoopPredicate) tty->print("range check");
+ }
+
+ if (new_predicate_proj == NULL) {
+ // The other proj of the "iff" is a uncommon trap projection, and we can assume
+ // the other proj will not be executed ("executed" means uct raised).
+ continue;
+ } else {
+ // Success - attach condition (new_predicate_bol) to predicate if
+ invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
+ IfNode* new_iff = new_predicate_proj->in(0)->as_If();
+
+ // Negate test if necessary
+ if (proj->_con != predicate_proj->_con) {
+ new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate());
+ register_new_node(new_predicate_bol, new_iff->in(0));
+ if (TraceLoopPredicate) tty->print_cr(" if negated: %d", iff->_idx);
+ } else {
+ if (TraceLoopPredicate) tty->print_cr(" if: %d", iff->_idx);
+ }
+
+ _igvn.hash_delete(new_iff);
+ new_iff->set_req(1, new_predicate_bol);
+
+ _igvn.hash_delete(iff);
+ iff->set_req(1, proj->is_IfFalse() ? cond_false : cond_true);
+
+ Node* ctrl = new_predicate_proj; // new control
+ ProjNode* dp = proj; // old control
+ assert(get_loop(dp) == loop, "guarenteed at the time of collecting proj");
+ // Find nodes (depends only on the test) off the surviving projection;
+ // move them outside the loop with the control of proj_clone
+ for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
+ Node* cd = dp->fast_out(i); // Control-dependent node
+ if (cd->depends_only_on_test()) {
+ assert(cd->in(0) == dp, "");
+ _igvn.hash_delete(cd);
+ cd->set_req(0, ctrl); // ctrl, not NULL
+ set_early_ctrl(cd);
+ _igvn._worklist.push(cd);
+ IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
+ if (new_loop != loop) {
+ if (!loop->_child) loop->_body.yank(cd);
+ if (!new_loop->_child ) new_loop->_body.push(cd);
+ }
+ --i;
+ --imax;
+ }
+ }
+
+ hoisted = true;
+ C->set_major_progress();
+ }
+ } // end while
+
+#ifndef PRODUCT
+ // report that the loop predication has been actually performed
+ // for this loop
+ if (TraceLoopPredicate && hoisted) {
+ tty->print("Loop Predication Performed:");
+ loop->dump_head();
+ }
+#endif
+
+ return hoisted;
+}
+
+//------------------------------loop_predication--------------------------------
+// driver routine for loop predication optimization
+bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) {
+ bool hoisted = false;
+ // Recursively promote predicates
+ if ( _child ) {
+ hoisted = _child->loop_predication( phase);
+ }
+
+ // self
+ if (!_irreducible && !tail()->is_top()) {
+ hoisted |= phase->loop_predication_impl(this);
+ }
+
+ if ( _next ) { //sibling
+ hoisted |= _next->loop_predication( phase);
+ }
+
+ return hoisted;
+}
diff --git a/hotspot/src/share/vm/opto/loopnode.cpp b/hotspot/src/share/vm/opto/loopnode.cpp
index a1d87225203..b662aa29328 100644
--- a/hotspot/src/share/vm/opto/loopnode.cpp
+++ b/hotspot/src/share/vm/opto/loopnode.cpp
@@ -1279,7 +1279,8 @@ void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) {
// Visit all children, looking for Phis
for (DUIterator i = cl->outs(); cl->has_out(i); i++) {
Node *out = cl->out(i);
- if (!out->is_Phi() || out == phi) continue; // Looking for other phis
+ // Look for other phis (secondary IVs). Skip dead ones
+ if (!out->is_Phi() || out == phi || !phase->has_node(out)) continue;
PhiNode* phi2 = out->as_Phi();
Node *incr2 = phi2->in( LoopNode::LoopBackControl );
// Look for induction variables of the form: X += constant
@@ -1419,11 +1420,57 @@ static void log_loop_tree(IdealLoopTree* root, IdealLoopTree* loop, CompileLog*
}
}
+//---------------------collect_potentially_useful_predicates-----------------------
+// Helper function to collect potentially useful predicates to prevent them from
+// being eliminated by PhaseIdealLoop::eliminate_useless_predicates
+void PhaseIdealLoop::collect_potentially_useful_predicates(
+ IdealLoopTree * loop, Unique_Node_List &useful_predicates) {
+ if (loop->_child) { // child
+ collect_potentially_useful_predicates(loop->_child, useful_predicates);
+ }
+
+ // self (only loops that we can apply loop predication may use their predicates)
+ if (loop->_head->is_Loop() &&
+ !loop->_irreducible &&
+ !loop->tail()->is_top()) {
+ LoopNode *lpn = loop->_head->as_Loop();
+ Node* entry = lpn->in(LoopNode::EntryControl);
+ ProjNode *predicate_proj = find_predicate_insertion_point(entry);
+ if (predicate_proj != NULL ) { // right pattern that can be used by loop predication
+ assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
+ useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one
+ }
+ }
+
+ if ( loop->_next ) { // sibling
+ collect_potentially_useful_predicates(loop->_next, useful_predicates);
+ }
+}
+
+//------------------------eliminate_useless_predicates-----------------------------
+// Eliminate all inserted predicates if they could not be used by loop predication.
+void PhaseIdealLoop::eliminate_useless_predicates() {
+ if (C->predicate_count() == 0) return; // no predicate left
+
+ Unique_Node_List useful_predicates; // to store useful predicates
+ if (C->has_loops()) {
+ collect_potentially_useful_predicates(_ltree_root->_child, useful_predicates);
+ }
+
+ for (int i = C->predicate_count(); i > 0; i--) {
+ Node * n = C->predicate_opaque1_node(i-1);
+ assert(n->Opcode() == Op_Opaque1, "must be");
+ if (!useful_predicates.member(n)) { // not in the useful list
+ _igvn.replace_node(n, n->in(1));
+ }
+ }
+}
+
//=============================================================================
//----------------------------build_and_optimize-------------------------------
// Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to
// its corresponding LoopNode. If 'optimize' is true, do some loop cleanups.
-void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) {
+void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool do_loop_pred) {
int old_progress = C->major_progress();
// Reset major-progress flag for the driver's heuristics
@@ -1576,6 +1623,12 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) {
return;
}
+ // some parser-inserted loop predicates could never be used by loop
+ // predication. Eliminate them before loop optimization
+ if (UseLoopPredicate) {
+ eliminate_useless_predicates();
+ }
+
// clear out the dead code
while(_deadlist.size()) {
_igvn.remove_globally_dead_node(_deadlist.pop());
@@ -1602,7 +1655,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) {
// Because RCE opportunities can be masked by split_thru_phi,
// look for RCE candidates and inhibit split_thru_phi
// on just their loop-phi's for this pass of loop opts
- if( SplitIfBlocks && do_split_ifs ) {
+ if (SplitIfBlocks && do_split_ifs) {
if (lpt->policy_range_check(this)) {
lpt->_rce_candidate = 1; // = true
}
@@ -1618,12 +1671,17 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) {
NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); );
}
+ // Perform loop predication before iteration splitting
+ if (do_loop_pred && C->has_loops() && !C->major_progress()) {
+ _ltree_root->_child->loop_predication(this);
+ }
+
// Perform iteration-splitting on inner loops. Split iterations to avoid
// range checks or one-shot null checks.
// If split-if's didn't hack the graph too bad (no CFG changes)
// then do loop opts.
- if( C->has_loops() && !C->major_progress() ) {
+ if (C->has_loops() && !C->major_progress()) {
memset( worklist.adr(), 0, worklist.Size()*sizeof(Node*) );
_ltree_root->_child->iteration_split( this, worklist );
// No verify after peeling! GCM has hoisted code out of the loop.
@@ -1635,7 +1693,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) {
// Do verify graph edges in any case
NOT_PRODUCT( C->verify_graph_edges(); );
- if( !do_split_ifs ) {
+ if (!do_split_ifs) {
// We saw major progress in Split-If to get here. We forced a
// pass with unrolling and not split-if, however more split-if's
// might make progress. If the unrolling didn't make progress
@@ -2762,6 +2820,22 @@ void PhaseIdealLoop::build_loop_late_post( Node *n ) {
Node *legal = LCA; // Walk 'legal' up the IDOM chain
Node *least = legal; // Best legal position so far
while( early != legal ) { // While not at earliest legal
+#ifdef ASSERT
+ if (legal->is_Start() && !early->is_Root()) {
+ // Bad graph. Print idom path and fail.
+ tty->print_cr( "Bad graph detected in build_loop_late");
+ tty->print("n: ");n->dump(); tty->cr();
+ tty->print("early: ");early->dump(); tty->cr();
+ int ct = 0;
+ Node *dbg_legal = LCA;
+ while(!dbg_legal->is_Start() && ct < 100) {
+ tty->print("idom[%d] ",ct); dbg_legal->dump(); tty->cr();
+ ct++;
+ dbg_legal = idom(dbg_legal);
+ }
+ assert(false, "Bad graph detected in build_loop_late");
+ }
+#endif
// Find least loop nesting depth
legal = idom(legal); // Bump up the IDOM tree
// Check for lower nesting depth
diff --git a/hotspot/src/share/vm/opto/loopnode.hpp b/hotspot/src/share/vm/opto/loopnode.hpp
index 892095595ed..e34cfcb18a3 100644
--- a/hotspot/src/share/vm/opto/loopnode.hpp
+++ b/hotspot/src/share/vm/opto/loopnode.hpp
@@ -30,6 +30,7 @@ class LoopNode;
class Node;
class PhaseIdealLoop;
class VectorSet;
+class Invariance;
struct small_cache;
//
@@ -325,6 +326,10 @@ public:
// Returns TRUE if loop tree is structurally changed.
bool beautify_loops( PhaseIdealLoop *phase );
+ // Perform optimization to use the loop predicates for null checks and range checks.
+ // Applies to any loop level (not just the innermost one)
+ bool loop_predication( PhaseIdealLoop *phase);
+
// Perform iteration-splitting on inner loops. Split iterations to
// avoid range checks or one-shot null checks. Returns false if the
// current round of loop opts should stop.
@@ -395,6 +400,9 @@ public:
// into longer memory ops, we may want to increase alignment.
bool policy_align( PhaseIdealLoop *phase ) const;
+ // Return TRUE if "iff" is a range check.
+ bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const;
+
// Compute loop trip count from profile data
void compute_profile_trip_cnt( PhaseIdealLoop *phase );
@@ -521,9 +529,6 @@ class PhaseIdealLoop : public PhaseTransform {
}
Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag );
- // true if CFG node d dominates CFG node n
- bool is_dominator(Node *d, Node *n);
-
// Helper function for directing control inputs away from CFG split
// points.
Node *find_non_split_ctrl( Node *ctrl ) const {
@@ -572,6 +577,17 @@ public:
assert(n == find_non_split_ctrl(n), "must return legal ctrl" );
return n;
}
+ // true if CFG node d dominates CFG node n
+ bool is_dominator(Node *d, Node *n);
+ // return get_ctrl for a data node and self(n) for a CFG node
+ Node* ctrl_or_self(Node* n) {
+ if (has_ctrl(n))
+ return get_ctrl(n);
+ else {
+ assert (n->is_CFG(), "must be a CFG node");
+ return n;
+ }
+ }
private:
Node *get_ctrl_no_update( Node *i ) const {
@@ -600,7 +616,7 @@ private:
// Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms. Replace
// the 'old_node' with 'new_node'. Kill old-node. Add a reference
// from old_node to new_node to support the lazy update. Reference
- // replaces loop reference, since that is not neede for dead node.
+ // replaces loop reference, since that is not needed for dead node.
public:
void lazy_update( Node *old_node, Node *new_node ) {
assert( old_node != new_node, "no cycles please" );
@@ -679,11 +695,11 @@ private:
_dom_lca_tags(C->comp_arena()),
_verify_me(NULL),
_verify_only(true) {
- build_and_optimize(false);
+ build_and_optimize(false, false);
}
// build the loop tree and perform any requested optimizations
- void build_and_optimize(bool do_split_if);
+ void build_and_optimize(bool do_split_if, bool do_loop_pred);
public:
// Dominators for the sea of nodes
@@ -694,13 +710,13 @@ public:
Node *dom_lca_internal( Node *n1, Node *n2 ) const;
// Compute the Ideal Node to Loop mapping
- PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs) :
+ PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool do_loop_pred) :
PhaseTransform(Ideal_Loop),
_igvn(igvn),
_dom_lca_tags(C->comp_arena()),
_verify_me(NULL),
_verify_only(false) {
- build_and_optimize(do_split_ifs);
+ build_and_optimize(do_split_ifs, do_loop_pred);
}
// Verify that verify_me made the same decisions as a fresh run.
@@ -710,7 +726,7 @@ public:
_dom_lca_tags(C->comp_arena()),
_verify_me(verify_me),
_verify_only(false) {
- build_and_optimize(false);
+ build_and_optimize(false, false);
}
// Build and verify the loop tree without modifying the graph. This
@@ -790,6 +806,30 @@ public:
// Return true if exp is a scaled induction var plus (or minus) constant
bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
+ // Return true if proj is for "proj->[region->..]call_uct"
+ bool is_uncommon_trap_proj(ProjNode* proj, bool must_reason_predicate = false);
+ // Return true for "if(test)-> proj -> ...
+ // |
+ // V
+ // other_proj->[region->..]call_uct"
+ bool is_uncommon_trap_if_pattern(ProjNode* proj, bool must_reason_predicate = false);
+ // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
+ ProjNode* create_new_if_for_predicate(ProjNode* cont_proj);
+ // Find a good location to insert a predicate
+ ProjNode* find_predicate_insertion_point(Node* start_c);
+ // Construct a range check for a predicate if
+ BoolNode* rc_predicate(Node* ctrl,
+ int scale, Node* offset,
+ Node* init, Node* limit, Node* stride,
+ Node* range);
+
+ // Implementation of the loop predication to promote checks outside the loop
+ bool loop_predication_impl(IdealLoopTree *loop);
+
+ // Helper function to collect predicate for eliminating the useless ones
+ void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1);
+ void eliminate_useless_predicates();
+
// Eliminate range-checks and other trip-counter vs loop-invariant tests.
void do_range_check( IdealLoopTree *loop, Node_List &old_new );
@@ -906,7 +946,6 @@ private:
const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl);
// Helper functions
- void register_new_node( Node *n, Node *blk );
Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache );
Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true );
void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true );
@@ -918,6 +957,7 @@ private:
public:
void set_created_loop_node() { _created_loop_node = true; }
bool created_loop_node() { return _created_loop_node; }
+ void register_new_node( Node *n, Node *blk );
#ifndef PRODUCT
void dump( ) const;
diff --git a/hotspot/src/share/vm/opto/machnode.cpp b/hotspot/src/share/vm/opto/machnode.cpp
index 76121704b5f..0e9d4c6e819 100644
--- a/hotspot/src/share/vm/opto/machnode.cpp
+++ b/hotspot/src/share/vm/opto/machnode.cpp
@@ -636,7 +636,9 @@ uint MachCallJavaNode::cmp( const Node &n ) const {
}
#ifndef PRODUCT
void MachCallJavaNode::dump_spec(outputStream *st) const {
- if( _method ) {
+ if (_method_handle_invoke)
+ st->print("MethodHandle ");
+ if (_method) {
_method->print_short_name(st);
st->print(" ");
}
@@ -644,6 +646,20 @@ void MachCallJavaNode::dump_spec(outputStream *st) const {
}
#endif
+//------------------------------Registers--------------------------------------
+const RegMask &MachCallJavaNode::in_RegMask(uint idx) const {
+ // Values in the domain use the users calling convention, embodied in the
+ // _in_rms array of RegMasks.
+ if (idx < tf()->domain()->cnt()) return _in_rms[idx];
+ // Values outside the domain represent debug info
+ Matcher* m = Compile::current()->matcher();
+ // If this call is a MethodHandle invoke we have to use a different
+ // debugmask which does not include the register we use to save the
+ // SP over MH invokes.
+ RegMask** debugmask = _method_handle_invoke ? m->idealreg2mhdebugmask : m->idealreg2debugmask;
+ return *debugmask[in(idx)->ideal_reg()];
+}
+
//=============================================================================
uint MachCallStaticJavaNode::size_of() const { return sizeof(*this); }
uint MachCallStaticJavaNode::cmp( const Node &n ) const {
diff --git a/hotspot/src/share/vm/opto/machnode.hpp b/hotspot/src/share/vm/opto/machnode.hpp
index 3c24a3e5c65..67d6965b628 100644
--- a/hotspot/src/share/vm/opto/machnode.hpp
+++ b/hotspot/src/share/vm/opto/machnode.hpp
@@ -662,9 +662,13 @@ public:
ciMethod* _method; // Method being direct called
int _bci; // Byte Code index of call byte code
bool _optimized_virtual; // Tells if node is a static call or an optimized virtual
+ bool _method_handle_invoke; // Tells if the call has to preserve SP
MachCallJavaNode() : MachCallNode() {
init_class_id(Class_MachCallJava);
}
+
+ virtual const RegMask &in_RegMask(uint) const;
+
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
diff --git a/hotspot/src/share/vm/opto/macro.cpp b/hotspot/src/share/vm/opto/macro.cpp
index e2421a7f3d3..2fdc335b918 100644
--- a/hotspot/src/share/vm/opto/macro.cpp
+++ b/hotspot/src/share/vm/opto/macro.cpp
@@ -316,6 +316,21 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw");
}
mem = mem->in(MemNode::Memory);
+ } else if (mem->is_ClearArray()) {
+ if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
+ // Can not bypass initialization of the instance
+ // we are looking.
+ debug_only(intptr_t offset;)
+ assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
+ InitializeNode* init = alloc->as_Allocate()->initialization();
+ // We are looking for stored value, return Initialize node
+ // or memory edge from Allocate node.
+ if (init != NULL)
+ return init;
+ else
+ return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers).
+ }
+ // Otherwise skip it (the call updated 'mem' value).
} else if (mem->Opcode() == Op_SCMemProj) {
assert(mem->in(0)->is_LoadStore(), "sanity");
const TypePtr* atype = mem->in(0)->in(MemNode::Address)->bottom_type()->is_ptr();
@@ -823,6 +838,18 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
Node *n = use->last_out(k);
uint oc2 = use->outcnt();
if (n->is_Store()) {
+#ifdef ASSERT
+ // Verify that there is no dependent MemBarVolatile nodes,
+ // they should be removed during IGVN, see MemBarNode::Ideal().
+ for (DUIterator_Fast pmax, p = n->fast_outs(pmax);
+ p < pmax; p++) {
+ Node* mb = n->fast_out(p);
+ assert(mb->is_Initialize() || !mb->is_MemBar() ||
+ mb->req() <= MemBarNode::Precedent ||
+ mb->in(MemBarNode::Precedent) != n,
+ "MemBarVolatile should be eliminated for non-escaping object");
+ }
+#endif
_igvn.replace_node(n, n->in(MemNode::Memory));
} else {
eliminate_card_mark(n);
@@ -912,15 +939,29 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
return false;
}
+ CompileLog* log = C->log();
+ if (log != NULL) {
+ Node* klass = alloc->in(AllocateNode::KlassNode);
+ const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
+ log->head("eliminate_allocation type='%d'",
+ log->identify(tklass->klass()));
+ JVMState* p = alloc->jvms();
+ while (p != NULL) {
+ log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+ p = p->caller();
+ }
+ log->tail("eliminate_allocation");
+ }
+
process_users_of_allocation(alloc);
#ifndef PRODUCT
-if (PrintEliminateAllocations) {
- if (alloc->is_AllocateArray())
- tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
- else
- tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
-}
+ if (PrintEliminateAllocations) {
+ if (alloc->is_AllocateArray())
+ tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
+ else
+ tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
+ }
#endif
return true;
@@ -1639,6 +1680,18 @@ bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
} // if (!oldbox->is_eliminated())
} // if (alock->is_Lock() && !lock->is_coarsened())
+ CompileLog* log = C->log();
+ if (log != NULL) {
+ log->head("eliminate_lock lock='%d'",
+ alock->is_Lock());
+ JVMState* p = alock->jvms();
+ while (p != NULL) {
+ log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+ p = p->caller();
+ }
+ log->tail("eliminate_lock");
+ }
+
#ifndef PRODUCT
if (PrintEliminateLocks) {
if (alock->is_Lock()) {
diff --git a/hotspot/src/share/vm/opto/matcher.cpp b/hotspot/src/share/vm/opto/matcher.cpp
index 57d2c5e72ae..d535df05c76 100644
--- a/hotspot/src/share/vm/opto/matcher.cpp
+++ b/hotspot/src/share/vm/opto/matcher.cpp
@@ -70,19 +70,27 @@ Matcher::Matcher( Node_List &proj_list ) :
_dontcare(&_states_arena) {
C->set_matcher(this);
- idealreg2spillmask[Op_RegI] = NULL;
- idealreg2spillmask[Op_RegN] = NULL;
- idealreg2spillmask[Op_RegL] = NULL;
- idealreg2spillmask[Op_RegF] = NULL;
- idealreg2spillmask[Op_RegD] = NULL;
- idealreg2spillmask[Op_RegP] = NULL;
+ idealreg2spillmask [Op_RegI] = NULL;
+ idealreg2spillmask [Op_RegN] = NULL;
+ idealreg2spillmask [Op_RegL] = NULL;
+ idealreg2spillmask [Op_RegF] = NULL;
+ idealreg2spillmask [Op_RegD] = NULL;
+ idealreg2spillmask [Op_RegP] = NULL;
+
+ idealreg2debugmask [Op_RegI] = NULL;
+ idealreg2debugmask [Op_RegN] = NULL;
+ idealreg2debugmask [Op_RegL] = NULL;
+ idealreg2debugmask [Op_RegF] = NULL;
+ idealreg2debugmask [Op_RegD] = NULL;
+ idealreg2debugmask [Op_RegP] = NULL;
+
+ idealreg2mhdebugmask[Op_RegI] = NULL;
+ idealreg2mhdebugmask[Op_RegN] = NULL;
+ idealreg2mhdebugmask[Op_RegL] = NULL;
+ idealreg2mhdebugmask[Op_RegF] = NULL;
+ idealreg2mhdebugmask[Op_RegD] = NULL;
+ idealreg2mhdebugmask[Op_RegP] = NULL;
- idealreg2debugmask[Op_RegI] = NULL;
- idealreg2debugmask[Op_RegN] = NULL;
- idealreg2debugmask[Op_RegL] = NULL;
- idealreg2debugmask[Op_RegF] = NULL;
- idealreg2debugmask[Op_RegD] = NULL;
- idealreg2debugmask[Op_RegP] = NULL;
debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node
}
@@ -389,19 +397,28 @@ static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
void Matcher::init_first_stack_mask() {
// Allocate storage for spill masks as masks for the appropriate load type.
- RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask)*12);
- idealreg2spillmask[Op_RegN] = &rms[0];
- idealreg2spillmask[Op_RegI] = &rms[1];
- idealreg2spillmask[Op_RegL] = &rms[2];
- idealreg2spillmask[Op_RegF] = &rms[3];
- idealreg2spillmask[Op_RegD] = &rms[4];
- idealreg2spillmask[Op_RegP] = &rms[5];
- idealreg2debugmask[Op_RegN] = &rms[6];
- idealreg2debugmask[Op_RegI] = &rms[7];
- idealreg2debugmask[Op_RegL] = &rms[8];
- idealreg2debugmask[Op_RegF] = &rms[9];
- idealreg2debugmask[Op_RegD] = &rms[10];
- idealreg2debugmask[Op_RegP] = &rms[11];
+ RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * 3*6);
+
+ idealreg2spillmask [Op_RegN] = &rms[0];
+ idealreg2spillmask [Op_RegI] = &rms[1];
+ idealreg2spillmask [Op_RegL] = &rms[2];
+ idealreg2spillmask [Op_RegF] = &rms[3];
+ idealreg2spillmask [Op_RegD] = &rms[4];
+ idealreg2spillmask [Op_RegP] = &rms[5];
+
+ idealreg2debugmask [Op_RegN] = &rms[6];
+ idealreg2debugmask [Op_RegI] = &rms[7];
+ idealreg2debugmask [Op_RegL] = &rms[8];
+ idealreg2debugmask [Op_RegF] = &rms[9];
+ idealreg2debugmask [Op_RegD] = &rms[10];
+ idealreg2debugmask [Op_RegP] = &rms[11];
+
+ idealreg2mhdebugmask[Op_RegN] = &rms[12];
+ idealreg2mhdebugmask[Op_RegI] = &rms[13];
+ idealreg2mhdebugmask[Op_RegL] = &rms[14];
+ idealreg2mhdebugmask[Op_RegF] = &rms[15];
+ idealreg2mhdebugmask[Op_RegD] = &rms[16];
+ idealreg2mhdebugmask[Op_RegP] = &rms[17];
OptoReg::Name i;
@@ -442,12 +459,19 @@ void Matcher::init_first_stack_mask() {
// Make up debug masks. Any spill slot plus callee-save registers.
// Caller-save registers are assumed to be trashable by the various
// inline-cache fixup routines.
- *idealreg2debugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
- *idealreg2debugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
- *idealreg2debugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
- *idealreg2debugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
- *idealreg2debugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
- *idealreg2debugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
+ *idealreg2debugmask [Op_RegN]= *idealreg2spillmask[Op_RegN];
+ *idealreg2debugmask [Op_RegI]= *idealreg2spillmask[Op_RegI];
+ *idealreg2debugmask [Op_RegL]= *idealreg2spillmask[Op_RegL];
+ *idealreg2debugmask [Op_RegF]= *idealreg2spillmask[Op_RegF];
+ *idealreg2debugmask [Op_RegD]= *idealreg2spillmask[Op_RegD];
+ *idealreg2debugmask [Op_RegP]= *idealreg2spillmask[Op_RegP];
+
+ *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
+ *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
+ *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
+ *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
+ *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
+ *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
// Prevent stub compilations from attempting to reference
// callee-saved registers from debug info
@@ -458,14 +482,31 @@ void Matcher::init_first_stack_mask() {
if( _register_save_policy[i] == 'C' ||
_register_save_policy[i] == 'A' ||
(_register_save_policy[i] == 'E' && exclude_soe) ) {
- idealreg2debugmask[Op_RegN]->Remove(i);
- idealreg2debugmask[Op_RegI]->Remove(i); // Exclude save-on-call
- idealreg2debugmask[Op_RegL]->Remove(i); // registers from debug
- idealreg2debugmask[Op_RegF]->Remove(i); // masks
- idealreg2debugmask[Op_RegD]->Remove(i);
- idealreg2debugmask[Op_RegP]->Remove(i);
+ idealreg2debugmask [Op_RegN]->Remove(i);
+ idealreg2debugmask [Op_RegI]->Remove(i); // Exclude save-on-call
+ idealreg2debugmask [Op_RegL]->Remove(i); // registers from debug
+ idealreg2debugmask [Op_RegF]->Remove(i); // masks
+ idealreg2debugmask [Op_RegD]->Remove(i);
+ idealreg2debugmask [Op_RegP]->Remove(i);
+
+ idealreg2mhdebugmask[Op_RegN]->Remove(i);
+ idealreg2mhdebugmask[Op_RegI]->Remove(i);
+ idealreg2mhdebugmask[Op_RegL]->Remove(i);
+ idealreg2mhdebugmask[Op_RegF]->Remove(i);
+ idealreg2mhdebugmask[Op_RegD]->Remove(i);
+ idealreg2mhdebugmask[Op_RegP]->Remove(i);
}
}
+
+ // Subtract the register we use to save the SP for MethodHandle
+ // invokes to from the debug mask.
+ const RegMask save_mask = method_handle_invoke_SP_save_mask();
+ idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
+ idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
+ idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
+ idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
+ idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
+ idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
}
//---------------------------is_save_on_entry----------------------------------
@@ -989,6 +1030,7 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
CallNode *call;
const TypeTuple *domain;
ciMethod* method = NULL;
+ bool is_method_handle_invoke = false; // for special kill effects
if( sfpt->is_Call() ) {
call = sfpt->as_Call();
domain = call->tf()->domain();
@@ -1013,6 +1055,8 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
mcall_java->_method = method;
mcall_java->_bci = call_java->_bci;
mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
+ is_method_handle_invoke = call_java->is_method_handle_invoke();
+ mcall_java->_method_handle_invoke = is_method_handle_invoke;
if( mcall_java->is_MachCallStaticJava() )
mcall_java->as_MachCallStaticJava()->_name =
call_java->as_CallStaticJava()->_name;
@@ -1126,6 +1170,15 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
}
+ if (is_method_handle_invoke) {
+ // Kill some extra stack space in case method handles want to do
+ // a little in-place argument insertion.
+ int regs_per_word = NOT_LP64(1) LP64_ONLY(2); // %%% make a global const!
+ out_arg_limit_per_call += MethodHandlePushLimit * regs_per_word;
+ // Do not update mcall->_argsize because (a) the extra space is not
+ // pushed as arguments and (b) _argsize is dead (not used anywhere).
+ }
+
// Compute the max stack slot killed by any call. These will not be
// available for debug info, and will be used to adjust FIRST_STACK_mask
// after all call sites have been visited.
@@ -1832,67 +1885,23 @@ void Matcher::find_shared( Node *n ) {
case Op_Binary: // These are introduced in the Post_Visit state.
ShouldNotReachHere();
break;
- case Op_StoreB: // Do match these, despite no ideal reg
- case Op_StoreC:
- case Op_StoreCM:
- case Op_StoreD:
- case Op_StoreF:
- case Op_StoreI:
- case Op_StoreL:
- case Op_StoreP:
- case Op_StoreN:
- case Op_Store16B:
- case Op_Store8B:
- case Op_Store4B:
- case Op_Store8C:
- case Op_Store4C:
- case Op_Store2C:
- case Op_Store4I:
- case Op_Store2I:
- case Op_Store2L:
- case Op_Store4F:
- case Op_Store2F:
- case Op_Store2D:
case Op_ClearArray:
case Op_SafePoint:
mem_op = true;
break;
- case Op_LoadB:
- case Op_LoadUS:
- case Op_LoadD:
- case Op_LoadF:
- case Op_LoadI:
- case Op_LoadKlass:
- case Op_LoadNKlass:
- case Op_LoadL:
- case Op_LoadS:
- case Op_LoadP:
- case Op_LoadN:
- case Op_LoadRange:
- case Op_LoadD_unaligned:
- case Op_LoadL_unaligned:
- case Op_Load16B:
- case Op_Load8B:
- case Op_Load4B:
- case Op_Load4C:
- case Op_Load2C:
- case Op_Load8C:
- case Op_Load8S:
- case Op_Load4S:
- case Op_Load2S:
- case Op_Load4I:
- case Op_Load2I:
- case Op_Load2L:
- case Op_Load4F:
- case Op_Load2F:
- case Op_Load2D:
- mem_op = true;
- // Must be root of match tree due to prior load conflict
- if( C->subsume_loads() == false ) {
- set_shared(n);
+ default:
+ if( n->is_Store() ) {
+ // Do match stores, despite no ideal reg
+ mem_op = true;
+ break;
+ }
+ if( n->is_Mem() ) { // Loads and LoadStores
+ mem_op = true;
+ // Loads must be root of match tree due to prior load conflict
+ if( C->subsume_loads() == false )
+ set_shared(n);
}
// Fall into default case
- default:
if( !n->ideal_reg() )
set_dontcare(n); // Unmatchable Nodes
} // end_switch
@@ -1913,15 +1922,15 @@ void Matcher::find_shared( Node *n ) {
continue; // for(int i = ...)
}
- // Clone addressing expressions as they are "free" in most instructions
- if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
- if (m->in(AddPNode::Base)->Opcode() == Op_DecodeN) {
- // Bases used in addresses must be shared but since
- // they are shared through a DecodeN they may appear
- // to have a single use so force sharing here.
- set_shared(m->in(AddPNode::Base)->in(1));
- }
+ if( mop == Op_AddP && m->in(AddPNode::Base)->Opcode() == Op_DecodeN ) {
+ // Bases used in addresses must be shared but since
+ // they are shared through a DecodeN they may appear
+ // to have a single use so force sharing here.
+ set_shared(m->in(AddPNode::Base)->in(1));
+ }
+ // Clone addressing expressions as they are "free" in memory access instructions
+ if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
// Some inputs for address expression are not put on stack
// to avoid marking them as shared and forcing them into register
// if they are used only in address expressions.
diff --git a/hotspot/src/share/vm/opto/matcher.hpp b/hotspot/src/share/vm/opto/matcher.hpp
index fe657c53421..b48303baa87 100644
--- a/hotspot/src/share/vm/opto/matcher.hpp
+++ b/hotspot/src/share/vm/opto/matcher.hpp
@@ -117,8 +117,9 @@ public:
static const int base2reg[]; // Map Types to machine register types
// Convert ideal machine register to a register mask for spill-loads
static const RegMask *idealreg2regmask[];
- RegMask *idealreg2spillmask[_last_machine_leaf];
- RegMask *idealreg2debugmask[_last_machine_leaf];
+ RegMask *idealreg2spillmask [_last_machine_leaf];
+ RegMask *idealreg2debugmask [_last_machine_leaf];
+ RegMask *idealreg2mhdebugmask[_last_machine_leaf];
void init_spill_mask( Node *ret );
// Convert machine register number to register mask
static uint mreg2regmask_max;
@@ -297,6 +298,8 @@ public:
// Register for MODL projection of divmodL
static RegMask modL_proj_mask();
+ static const RegMask method_handle_invoke_SP_save_mask();
+
// Java-Interpreter calling convention
// (what you use when calling between compiled-Java and Interpreted-Java
diff --git a/hotspot/src/share/vm/opto/memnode.cpp b/hotspot/src/share/vm/opto/memnode.cpp
index b80f18c2e7d..4698add456b 100644
--- a/hotspot/src/share/vm/opto/memnode.cpp
+++ b/hotspot/src/share/vm/opto/memnode.cpp
@@ -123,6 +123,13 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr,
} else {
assert(false, "unexpected projection");
}
+ } else if (result->is_ClearArray()) {
+ if (!ClearArrayNode::step_through(&result, instance_id, phase)) {
+ // Can not bypass initialization of the instance
+ // we are looking for.
+ break;
+ }
+ // Otherwise skip it (the call updated 'result' value).
} else if (result->is_MergeMem()) {
result = step_through_mergemem(phase, result->as_MergeMem(), t_adr, NULL, tty);
}
@@ -255,6 +262,13 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
return NodeSentinel; // caller will return NULL
}
+ // Do NOT remove or optimize the next lines: ensure a new alias index
+ // is allocated for an oop pointer type before Escape Analysis.
+ // Note: C++ will not remove it since the call has side effect.
+ if ( t_adr->isa_oopptr() ) {
+ int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
+ }
+
#ifdef ASSERT
Node* base = NULL;
if (address->is_AddP())
@@ -530,6 +544,15 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) {
} else if (mem->is_Proj() && mem->in(0)->is_MemBar()) {
mem = mem->in(0)->in(TypeFunc::Memory);
continue; // (a) advance through independent MemBar memory
+ } else if (mem->is_ClearArray()) {
+ if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) {
+ // (the call updated 'mem' value)
+ continue; // (a) advance through independent allocation memory
+ } else {
+ // Can not bypass initialization of the instance
+ // we are looking for.
+ return mem;
+ }
} else if (mem->is_MergeMem()) {
int alias_idx = phase->C->get_alias_index(adr_type());
mem = mem->as_MergeMem()->memory_at(alias_idx);
@@ -1496,6 +1519,8 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
}
}
} else if (tp->base() == Type::InstPtr) {
+ const TypeInstPtr* tinst = tp->is_instptr();
+ ciKlass* klass = tinst->klass();
assert( off != Type::OffsetBot ||
// arrays can be cast to Objects
tp->is_oopptr()->klass()->is_java_lang_Object() ||
@@ -1503,6 +1528,25 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
phase->C->has_unsafe_access(),
"Field accesses must be precise" );
// For oop loads, we expect the _type to be precise
+ if (OptimizeStringConcat && klass == phase->C->env()->String_klass() &&
+ adr->is_AddP() && off != Type::OffsetBot) {
+ // For constant Strings treat the fields as compile time constants.
+ Node* base = adr->in(AddPNode::Base);
+ if (base->Opcode() == Op_ConP) {
+ const TypeOopPtr* t = phase->type(base)->isa_oopptr();
+ ciObject* string = t->const_oop();
+ ciConstant constant = string->as_instance()->field_value_by_offset(off);
+ if (constant.basic_type() == T_INT) {
+ return TypeInt::make(constant.as_int());
+ } else if (constant.basic_type() == T_ARRAY) {
+ if (adr->bottom_type()->is_ptr_to_narrowoop()) {
+ return TypeNarrowOop::make_from_constant(constant.as_object());
+ } else {
+ return TypeOopPtr::make_from_constant(constant.as_object());
+ }
+ }
+ }
+ }
} else if (tp->base() == Type::KlassPtr) {
assert( off != Type::OffsetBot ||
// arrays can be cast to Objects
@@ -2426,6 +2470,31 @@ Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){
return mem;
}
+//----------------------------step_through----------------------------------
+// Return allocation input memory edge if it is different instance
+// or itself if it is the one we are looking for.
+bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) {
+ Node* n = *np;
+ assert(n->is_ClearArray(), "sanity");
+ intptr_t offset;
+ AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset);
+ // This method is called only before Allocate nodes are expanded during
+ // macro nodes expansion. Before that ClearArray nodes are only generated
+ // in LibraryCallKit::generate_arraycopy() which follows allocations.
+ assert(alloc != NULL, "should have allocation");
+ if (alloc->_idx == instance_id) {
+ // Can not bypass initialization of the instance we are looking for.
+ return false;
+ }
+ // Otherwise skip it.
+ InitializeNode* init = alloc->initialization();
+ if (init != NULL)
+ *np = init->in(TypeFunc::Memory);
+ else
+ *np = alloc->in(TypeFunc::Memory);
+ return true;
+}
+
//----------------------------clear_memory-------------------------------------
// Generate code to initialize object storage to zero.
Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
@@ -2599,7 +2668,30 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
- return remove_dead_region(phase, can_reshape) ? this : NULL;
+ if (remove_dead_region(phase, can_reshape)) return this;
+
+ // Eliminate volatile MemBars for scalar replaced objects.
+ if (can_reshape && req() == (Precedent+1) &&
+ (Opcode() == Op_MemBarAcquire || Opcode() == Op_MemBarVolatile)) {
+ // Volatile field loads and stores.
+ Node* my_mem = in(MemBarNode::Precedent);
+ if (my_mem != NULL && my_mem->is_Mem()) {
+ const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
+ // Check for scalar replaced object reference.
+ if( t_oop != NULL && t_oop->is_known_instance_field() &&
+ t_oop->offset() != Type::OffsetBot &&
+ t_oop->offset() != Type::OffsetTop) {
+ // Replace MemBar projections by its inputs.
+ PhaseIterGVN* igvn = phase->is_IterGVN();
+ igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory));
+ igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control));
+ // Must return either the original node (now dead) or a new node
+ // (Do not return a top here, since that would break the uniqueness of top.)
+ return new (phase->C, 1) ConINode(TypeInt::ZERO);
+ }
+ }
+ }
+ return NULL;
}
//------------------------------Value------------------------------------------
diff --git a/hotspot/src/share/vm/opto/memnode.hpp b/hotspot/src/share/vm/opto/memnode.hpp
index a71df7d406a..cd0e60d971e 100644
--- a/hotspot/src/share/vm/opto/memnode.hpp
+++ b/hotspot/src/share/vm/opto/memnode.hpp
@@ -717,7 +717,10 @@ public:
//------------------------------ClearArray-------------------------------------
class ClearArrayNode: public Node {
public:
- ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) : Node(ctrl,arymem,word_cnt,base) {}
+ ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
+ : Node(ctrl,arymem,word_cnt,base) {
+ init_class_id(Class_ClearArray);
+ }
virtual int Opcode() const;
virtual const Type *bottom_type() const { return Type::MEMORY; }
// ClearArray modifies array elements, and so affects only the
@@ -743,6 +746,9 @@ public:
Node* start_offset,
Node* end_offset,
PhaseGVN* phase);
+ // Return allocation input memory edge if it is different instance
+ // or itself if it is the one we are looking for.
+ static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
};
//------------------------------StrComp-------------------------------------
diff --git a/hotspot/src/share/vm/opto/node.hpp b/hotspot/src/share/vm/opto/node.hpp
index bad1607058e..92da96b40a2 100644
--- a/hotspot/src/share/vm/opto/node.hpp
+++ b/hotspot/src/share/vm/opto/node.hpp
@@ -47,6 +47,7 @@ class CallStaticJavaNode;
class CatchNode;
class CatchProjNode;
class CheckCastPPNode;
+class ClearArrayNode;
class CmpNode;
class CodeBuffer;
class ConstraintCastNode;
@@ -599,8 +600,9 @@ public:
DEFINE_CLASS_ID(BoxLock, Node, 10)
DEFINE_CLASS_ID(Add, Node, 11)
DEFINE_CLASS_ID(Mul, Node, 12)
+ DEFINE_CLASS_ID(ClearArray, Node, 13)
- _max_classes = ClassMask_Mul
+ _max_classes = ClassMask_ClearArray
};
#undef DEFINE_CLASS_ID
@@ -661,18 +663,25 @@ public:
return (_flags & Flag_is_Call) != 0;
}
+ CallNode* isa_Call() const {
+ return is_Call() ? as_Call() : NULL;
+ }
+
CallNode *as_Call() const { // Only for CallNode (not for MachCallNode)
assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class");
return (CallNode*)this;
}
- #define DEFINE_CLASS_QUERY(type) \
- bool is_##type() const { \
+ #define DEFINE_CLASS_QUERY(type) \
+ bool is_##type() const { \
return ((_class_id & ClassMask_##type) == Class_##type); \
- } \
- type##Node *as_##type() const { \
- assert(is_##type(), "invalid node class"); \
- return (type##Node*)this; \
+ } \
+ type##Node *as_##type() const { \
+ assert(is_##type(), "invalid node class"); \
+ return (type##Node*)this; \
+ } \
+ type##Node* isa_##type() const { \
+ return (is_##type()) ? as_##type() : NULL; \
}
DEFINE_CLASS_QUERY(AbstractLock)
@@ -691,6 +700,7 @@ public:
DEFINE_CLASS_QUERY(CatchProj)
DEFINE_CLASS_QUERY(CheckCastPP)
DEFINE_CLASS_QUERY(ConstraintCast)
+ DEFINE_CLASS_QUERY(ClearArray)
DEFINE_CLASS_QUERY(CMove)
DEFINE_CLASS_QUERY(Cmp)
DEFINE_CLASS_QUERY(CountedLoop)
@@ -1249,6 +1259,24 @@ Node* Node::last_out(DUIterator_Last& i) const {
#undef I_VDUI_ONLY
#undef VDUI_ONLY
+// An Iterator that truly follows the iterator pattern. Doesn't
+// support deletion but could be made to.
+//
+// for (SimpleDUIterator i(n); i.has_next(); i.next()) {
+// Node* m = i.get();
+//
+class SimpleDUIterator : public StackObj {
+ private:
+ Node* node;
+ DUIterator_Fast i;
+ DUIterator_Fast imax;
+ public:
+ SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
+ bool has_next() { return i < imax; }
+ void next() { i++; }
+ Node* get() { return node->fast_out(i); }
+};
+
//-----------------------------------------------------------------------------
// Map dense integer indices to Nodes. Uses classic doubling-array trick.
@@ -1290,6 +1318,12 @@ class Node_List : public Node_Array {
public:
Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
+ bool contains(Node* n) {
+ for (uint e = 0; e < size(); e++) {
+ if (at(e) == n) return true;
+ }
+ return false;
+ }
void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
void remove( uint i ) { Node_Array::remove(i); _cnt--; }
void push( Node *b ) { map(_cnt++,b); }
diff --git a/hotspot/src/share/vm/opto/output.cpp b/hotspot/src/share/vm/opto/output.cpp
index 3274fd4d9df..c762808b63c 100644
--- a/hotspot/src/share/vm/opto/output.cpp
+++ b/hotspot/src/share/vm/opto/output.cpp
@@ -794,6 +794,7 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
#endif
int safepoint_pc_offset = current_offset;
+ bool is_method_handle_invoke = false;
// Add the safepoint in the DebugInfoRecorder
if( !mach->is_MachCall() ) {
@@ -801,6 +802,11 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
} else {
mcall = mach->as_MachCall();
+
+ // Is the call a MethodHandle call?
+ if (mcall->is_MachCallJava())
+ is_method_handle_invoke = mcall->as_MachCallJava()->_method_handle_invoke;
+
safepoint_pc_offset += mcall->ret_addr_offset();
debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
}
@@ -911,9 +917,9 @@ void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
ciMethod* scope_method = method ? method : _method;
// Describe the scope here
assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
- assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest");
+ assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
// Now we can describe the scope.
- debug_info()->describe_scope(safepoint_pc_offset,scope_method,jvms->bci(),jvms->should_reexecute(),locvals,expvals,monvals);
+ debug_info()->describe_scope(safepoint_pc_offset, scope_method, jvms->bci(), jvms->should_reexecute(), is_method_handle_invoke, locvals, expvals, monvals);
} // End jvms loop
// Mark the end of the scope set.
diff --git a/hotspot/src/share/vm/opto/parse.hpp b/hotspot/src/share/vm/opto/parse.hpp
index 37f7b629fbe..d48b21971b7 100644
--- a/hotspot/src/share/vm/opto/parse.hpp
+++ b/hotspot/src/share/vm/opto/parse.hpp
@@ -39,6 +39,7 @@ class InlineTree : public ResourceObj {
// Always between 0.0 and 1.0. Represents the percentage of the method's
// total execution time used at this call site.
const float _site_invoke_ratio;
+ const int _site_depth_adjust;
float compute_callee_frequency( int caller_bci ) const;
GrowableArray _subtrees;
@@ -50,7 +51,8 @@ protected:
ciMethod* callee_method,
JVMState* caller_jvms,
int caller_bci,
- float site_invoke_ratio);
+ float site_invoke_ratio,
+ int site_depth_adjust);
InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
JVMState* caller_jvms,
int caller_bci);
@@ -61,14 +63,15 @@ protected:
InlineTree *caller_tree() const { return _caller_tree; }
InlineTree* callee_at(int bci, ciMethod* m) const;
- int inline_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
+ int inline_depth() const { return stack_depth() + _site_depth_adjust; }
+ int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
public:
static InlineTree* build_inline_tree_root();
static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false);
// For temporary (stack-allocated, stateless) ilts:
- InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio);
+ InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int site_depth_adjust);
// InlineTree enum
enum InlineStyle {
@@ -427,6 +430,11 @@ class Parse : public GraphKit {
}
}
+ // Return true if the parser should add a loop predicate
+ bool should_add_predicate(int target_bci);
+ // Insert a loop predicate into the graph
+ void add_predicate();
+
// Note: Intrinsic generation routines may be found in library_call.cpp.
// Helper function to setup Ideal Call nodes
@@ -488,7 +496,7 @@ class Parse : public GraphKit {
void do_ifnull(BoolTest::mask btest, Node* c);
void do_if(BoolTest::mask btest, Node* c);
- void repush_if_args();
+ int repush_if_args();
void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
Block* path, Block* other_path);
IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask);
diff --git a/hotspot/src/share/vm/opto/parse1.cpp b/hotspot/src/share/vm/opto/parse1.cpp
index 9633c5e270b..169cdc9754b 100644
--- a/hotspot/src/share/vm/opto/parse1.cpp
+++ b/hotspot/src/share/vm/opto/parse1.cpp
@@ -231,12 +231,13 @@ void Parse::load_interpreter_state(Node* osr_buf) {
// Use the raw liveness computation to make sure that unexpected
// values don't propagate into the OSR frame.
- MethodLivenessResult live_locals = method()->raw_liveness_at_bci(osr_bci());
+ MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
if (!live_locals.is_valid()) {
// Degenerate or breakpointed method.
C->record_method_not_compilable("OSR in empty or breakpointed method");
return;
}
+ MethodLivenessResult raw_live_locals = method()->raw_liveness_at_bci(osr_bci());
// Extract the needed locals from the interpreter frame.
Node *locals_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals-1)*wordSize);
@@ -316,6 +317,10 @@ void Parse::load_interpreter_state(Node* osr_buf) {
continue;
}
}
+ if (type->basic_type() == T_ADDRESS && !raw_live_locals.at(index)) {
+ // Skip type check for dead address locals
+ continue;
+ }
set_local(index, check_interpreter_type(l, type, bad_type_exit));
}
@@ -1378,6 +1383,10 @@ void Parse::do_one_block() {
set_parse_bci(iter().cur_bci());
if (bci() == block()->limit()) {
+ // insert a predicate if it falls through to a loop head block
+ if (should_add_predicate(bci())){
+ add_predicate();
+ }
// Do not walk into the next block until directed by do_all_blocks.
merge(bci());
break;
@@ -2078,6 +2087,37 @@ void Parse::add_safepoint() {
}
}
+//------------------------------should_add_predicate--------------------------
+bool Parse::should_add_predicate(int target_bci) {
+ if (!UseLoopPredicate) return false;
+ Block* target = successor_for_bci(target_bci);
+ if (target != NULL &&
+ target->is_loop_head() &&
+ block()->rpo() < target->rpo()) {
+ return true;
+ }
+ return false;
+}
+
+//------------------------------add_predicate---------------------------------
+void Parse::add_predicate() {
+ assert(UseLoopPredicate,"use only for loop predicate");
+ Node *cont = _gvn.intcon(1);
+ Node* opq = _gvn.transform(new (C, 2) Opaque1Node(C, cont));
+ Node *bol = _gvn.transform(new (C, 2) Conv2BNode(opq));
+ IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN);
+ Node* iffalse = _gvn.transform(new (C, 1) IfFalseNode(iff));
+ C->add_predicate_opaq(opq);
+ {
+ PreserveJVMState pjvms(this);
+ set_control(iffalse);
+ uncommon_trap(Deoptimization::Reason_predicate,
+ Deoptimization::Action_maybe_recompile);
+ }
+ Node* iftrue = _gvn.transform(new (C, 1) IfTrueNode(iff));
+ set_control(iftrue);
+}
+
#ifndef PRODUCT
//------------------------show_parse_info--------------------------------------
void Parse::show_parse_info() {
diff --git a/hotspot/src/share/vm/opto/parse2.cpp b/hotspot/src/share/vm/opto/parse2.cpp
index 5457d966b6f..869266c1769 100644
--- a/hotspot/src/share/vm/opto/parse2.cpp
+++ b/hotspot/src/share/vm/opto/parse2.cpp
@@ -278,6 +278,11 @@ void Parse::do_tableswitch() {
if (len < 1) {
// If this is a backward branch, add safepoint
maybe_add_safepoint(default_dest);
+ if (should_add_predicate(default_dest)){
+ _sp += 1; // set original stack for use by uncommon_trap
+ add_predicate();
+ _sp -= 1;
+ }
merge(default_dest);
return;
}
@@ -324,6 +329,11 @@ void Parse::do_lookupswitch() {
if (len < 1) { // If this is a backward branch, add safepoint
maybe_add_safepoint(default_dest);
+ if (should_add_predicate(default_dest)){
+ _sp += 1; // set original stack for use by uncommon_trap
+ add_predicate();
+ _sp -= 1;
+ }
merge(default_dest);
return;
}
@@ -731,6 +741,9 @@ void Parse::do_jsr() {
push(_gvn.makecon(ret_addr));
// Flow to the jsr.
+ if (should_add_predicate(jsr_bci)){
+ add_predicate();
+ }
merge(jsr_bci);
}
@@ -881,7 +894,7 @@ bool Parse::seems_never_taken(float prob) {
//-------------------------------repush_if_args--------------------------------
// Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
-inline void Parse::repush_if_args() {
+inline int Parse::repush_if_args() {
#ifndef PRODUCT
if (PrintOpto && WizardMode) {
tty->print("defending against excessive implicit null exceptions on %s @%d in ",
@@ -895,6 +908,7 @@ inline void Parse::repush_if_args() {
assert(argument(0) != NULL, "must exist");
assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
_sp += bc_depth;
+ return bc_depth;
}
//----------------------------------do_ifnull----------------------------------
@@ -954,8 +968,14 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
// Update method data
profile_taken_branch(target_bci);
adjust_map_after_if(btest, c, prob, branch_block, next_block);
- if (!stopped())
+ if (!stopped()) {
+ if (should_add_predicate(target_bci)){ // add a predicate if it branches to a loop
+ int nargs = repush_if_args(); // set original stack for uncommon_trap
+ add_predicate();
+ _sp -= nargs;
+ }
merge(target_bci);
+ }
}
}
@@ -1076,8 +1096,14 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
// Update method data
profile_taken_branch(target_bci);
adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
- if (!stopped())
+ if (!stopped()) {
+ if (should_add_predicate(target_bci)){ // add a predicate if it branches to a loop
+ int nargs = repush_if_args(); // set original stack for the uncommon_trap
+ add_predicate();
+ _sp -= nargs;
+ }
merge(target_bci);
+ }
}
}
@@ -2080,6 +2106,10 @@ void Parse::do_one_bytecode() {
// Update method data
profile_taken_branch(target_bci);
+ // Add loop predicate if it goes to a loop
+ if (should_add_predicate(target_bci)){
+ add_predicate();
+ }
// Merge the current control into the target basic block
merge(target_bci);
diff --git a/hotspot/src/share/vm/opto/parse3.cpp b/hotspot/src/share/vm/opto/parse3.cpp
index 7125cb5d619..40f9940bd71 100644
--- a/hotspot/src/share/vm/opto/parse3.cpp
+++ b/hotspot/src/share/vm/opto/parse3.cpp
@@ -125,7 +125,25 @@ void Parse::do_field_access(bool is_get, bool is_field) {
void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) {
// Does this field have a constant value? If so, just push the value.
- if (field->is_constant() && push_constant(field->constant_value())) return;
+ if (field->is_constant()) {
+ if (field->is_static()) {
+ // final static field
+ if (push_constant(field->constant_value()))
+ return;
+ }
+ else {
+ // final non-static field of a trusted class ({java,sun}.dyn
+ // classes).
+ if (obj->is_Con()) {
+ const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
+ ciObject* constant_oop = oop_ptr->const_oop();
+ ciConstant constant = field->constant_value_of(constant_oop);
+
+ if (push_constant(constant, true))
+ return;
+ }
+ }
+ }
ciType* field_klass = field->type();
bool is_vol = field->is_volatile();
@@ -145,7 +163,7 @@ void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool
if (!field->type()->is_loaded()) {
type = TypeInstPtr::BOTTOM;
must_assert_null = true;
- } else if (field->is_constant()) {
+ } else if (field->is_constant() && field->is_static()) {
// This can happen if the constant oop is non-perm.
ciObject* con = field->constant_value().as_object();
// Do not "join" in the previous type; it doesn't add value,
@@ -240,19 +258,19 @@ void Parse::do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool
// membar is dependent on the store, keeping any other membars generated
// below from floating up past the store.
int adr_idx = C->get_alias_index(adr_type);
- insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx);
+ insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx, store);
// Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
// volatile alias indices. Skip this if the membar is redundant.
if (adr_idx != Compile::AliasIdxBot) {
- insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot);
+ insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot, store);
}
// Finally, place alias-index-specific membars for each volatile index
// that isn't the adr_idx membar. Typically there's only 1 or 2.
for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) {
if (i != adr_idx && C->alias_type(i)->is_volatile()) {
- insert_mem_bar_volatile(Op_MemBarVolatile, i);
+ insert_mem_bar_volatile(Op_MemBarVolatile, i, store);
}
}
}
diff --git a/hotspot/src/share/vm/opto/parseHelper.cpp b/hotspot/src/share/vm/opto/parseHelper.cpp
index 6b3f432eae7..ab7883fd8ff 100644
--- a/hotspot/src/share/vm/opto/parseHelper.cpp
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp
@@ -221,6 +221,14 @@ void Parse::do_new() {
// Push resultant oop onto stack
push(obj);
+
+ // Keep track of whether opportunities exist for StringBuilder
+ // optimizations.
+ if (OptimizeStringConcat &&
+ (klass == C->env()->StringBuilder_klass() ||
+ klass == C->env()->StringBuffer_klass())) {
+ C->set_has_stringbuilder(true);
+ }
}
#ifndef PRODUCT
diff --git a/hotspot/src/share/vm/opto/phase.hpp b/hotspot/src/share/vm/opto/phase.hpp
index d0d54e1d900..9df5500fd5e 100644
--- a/hotspot/src/share/vm/opto/phase.hpp
+++ b/hotspot/src/share/vm/opto/phase.hpp
@@ -44,6 +44,7 @@ public:
BlockLayout, // Linear ordering of blocks
Register_Allocation, // Register allocation, duh
LIVE, // Dragon-book LIVE range problem
+ StringOpts, // StringBuilder related optimizations
Interference_Graph, // Building the IFG
Coalesce, // Coalescing copies
Ideal_Loop, // Find idealized trip-counted loops
diff --git a/hotspot/src/share/vm/opto/phaseX.hpp b/hotspot/src/share/vm/opto/phaseX.hpp
index b9391ba1d97..33ff56f0ee4 100644
--- a/hotspot/src/share/vm/opto/phaseX.hpp
+++ b/hotspot/src/share/vm/opto/phaseX.hpp
@@ -345,7 +345,11 @@ public:
Node *hash_find(const Node *n) { return _table.hash_find(n); }
// Used after parsing to eliminate values that are no longer in program
- void remove_useless_nodes(VectorSet &useful) { _table.remove_useless_nodes(useful); }
+ void remove_useless_nodes(VectorSet &useful) {
+ _table.remove_useless_nodes(useful);
+ // this may invalidate cached cons so reset the cache
+ init_con_caches();
+ }
virtual ConNode* uncached_makecon(const Type* t); // override from PhaseTransform
diff --git a/hotspot/src/share/vm/opto/runtime.cpp b/hotspot/src/share/vm/opto/runtime.cpp
index 2b3851961ee..f0d0c217088 100644
--- a/hotspot/src/share/vm/opto/runtime.cpp
+++ b/hotspot/src/share/vm/opto/runtime.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -143,7 +143,7 @@ const char* OptoRuntime::stub_name(address entry) {
// We failed the fast-path allocation. Now we need to do a scavenge or GC
// and try allocation again.
-void OptoRuntime::maybe_defer_card_mark(JavaThread* thread) {
+void OptoRuntime::new_store_pre_barrier(JavaThread* thread) {
// After any safepoint, just before going back to compiled code,
// we inform the GC that we will be doing initializing writes to
// this object in the future without emitting card-marks, so
@@ -156,7 +156,7 @@ void OptoRuntime::maybe_defer_card_mark(JavaThread* thread) {
assert(Universe::heap()->can_elide_tlab_store_barriers(),
"compiler must check this first");
// GC may decide to give back a safer copy of new_obj.
- new_obj = Universe::heap()->defer_store_barrier(thread, new_obj);
+ new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj);
thread->set_vm_result(new_obj);
}
@@ -200,7 +200,7 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(klassOopDesc* klass, JavaThrea
if (GraphKit::use_ReduceInitialCardMarks()) {
// inform GC that we won't do card marks for initializing writes.
- maybe_defer_card_mark(thread);
+ new_store_pre_barrier(thread);
}
JRT_END
@@ -239,7 +239,7 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(klassOopDesc* array_type, int len
if (GraphKit::use_ReduceInitialCardMarks()) {
// inform GC that we won't do card marks for initializing writes.
- maybe_defer_card_mark(thread);
+ new_store_pre_barrier(thread);
}
JRT_END
@@ -790,7 +790,7 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* t
NOT_PRODUCT(Exceptions::debug_check_abort(exception));
#ifdef ASSERT
- if (!(exception->is_a(SystemDictionary::throwable_klass()))) {
+ if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
// should throw an exception here
ShouldNotReachHere();
}
@@ -858,6 +858,9 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* t
thread->set_exception_pc(pc);
thread->set_exception_handler_pc(handler_address);
thread->set_exception_stack_size(0);
+
+ // Check if the exception PC is a MethodHandle call.
+ thread->set_is_method_handle_exception(nm->is_method_handle_return(pc));
}
// Restore correct return pc. Was saved above.
@@ -936,7 +939,7 @@ address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address r
#endif
assert (exception != NULL, "should have thrown a NULLPointerException");
#ifdef ASSERT
- if (!(exception->is_a(SystemDictionary::throwable_klass()))) {
+ if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
// should throw an exception here
ShouldNotReachHere();
}
diff --git a/hotspot/src/share/vm/opto/runtime.hpp b/hotspot/src/share/vm/opto/runtime.hpp
index c3d8238ae1e..2c0c49880e4 100644
--- a/hotspot/src/share/vm/opto/runtime.hpp
+++ b/hotspot/src/share/vm/opto/runtime.hpp
@@ -133,8 +133,9 @@ class OptoRuntime : public AllStatic {
// Allocate storage for a objArray or typeArray
static void new_array_C(klassOopDesc* array_klass, int len, JavaThread *thread);
- // Post-slow-path-allocation step for implementing ReduceInitialCardMarks:
- static void maybe_defer_card_mark(JavaThread* thread);
+ // Post-slow-path-allocation, pre-initializing-stores step for
+ // implementing ReduceInitialCardMarks
+ static void new_store_pre_barrier(JavaThread* thread);
// Allocate storage for a multi-dimensional arrays
// Note: needs to be fixed for arbitrary number of dimensions
diff --git a/hotspot/src/share/vm/opto/split_if.cpp b/hotspot/src/share/vm/opto/split_if.cpp
index a7a6baaa925..75ba440a6bd 100644
--- a/hotspot/src/share/vm/opto/split_if.cpp
+++ b/hotspot/src/share/vm/opto/split_if.cpp
@@ -219,6 +219,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
//------------------------------register_new_node------------------------------
void PhaseIdealLoop::register_new_node( Node *n, Node *blk ) {
+ assert(!n->is_CFG(), "must be data node");
_igvn.register_new_node_with_optimizer(n);
set_ctrl(n, blk);
IdealLoopTree *loop = get_loop(blk);
diff --git a/hotspot/src/share/vm/opto/stringopts.cpp b/hotspot/src/share/vm/opto/stringopts.cpp
new file mode 100644
index 00000000000..192d31f8257
--- /dev/null
+++ b/hotspot/src/share/vm/opto/stringopts.cpp
@@ -0,0 +1,1395 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_stringopts.cpp.incl"
+
+#define __ kit.
+
+class StringConcat : public ResourceObj {
+ private:
+ PhaseStringOpts* _stringopts;
+ Node* _string_alloc;
+ AllocateNode* _begin; // The allocation the begins the pattern
+ CallStaticJavaNode* _end; // The final call of the pattern. Will either be
+ // SB.toString or or String.(SB.toString)
+ bool _multiple; // indicates this is a fusion of two or more
+ // separate StringBuilders
+
+ Node* _arguments; // The list of arguments to be concatenated
+ GrowableArray _mode; // into a String along with a mode flag
+ // indicating how to treat the value.
+
+ Node_List _control; // List of control nodes that will be deleted
+ Node_List _uncommon_traps; // Uncommon traps that needs to be rewritten
+ // to restart at the initial JVMState.
+ public:
+ // Mode for converting arguments to Strings
+ enum {
+ StringMode,
+ IntMode,
+ CharMode
+ };
+
+ StringConcat(PhaseStringOpts* stringopts, CallStaticJavaNode* end):
+ _end(end),
+ _begin(NULL),
+ _multiple(false),
+ _string_alloc(NULL),
+ _stringopts(stringopts) {
+ _arguments = new (_stringopts->C, 1) Node(1);
+ _arguments->del_req(0);
+ }
+
+ bool validate_control_flow();
+
+ void merge_add() {
+#if 0
+ // XXX This is place holder code for reusing an existing String
+ // allocation but the logic for checking the state safety is
+ // probably inadequate at the moment.
+ CallProjections endprojs;
+ sc->end()->extract_projections(&endprojs, false);
+ if (endprojs.resproj != NULL) {
+ for (SimpleDUIterator i(endprojs.resproj); i.has_next(); i.next()) {
+ CallStaticJavaNode *use = i.get()->isa_CallStaticJava();
+ if (use != NULL && use->method() != NULL &&
+ use->method()->holder() == C->env()->String_klass() &&
+ use->method()->name() == ciSymbol::object_initializer_name() &&
+ use->in(TypeFunc::Parms + 1) == endprojs.resproj) {
+ // Found useless new String(sb.toString()) so reuse the newly allocated String
+ // when creating the result instead of allocating a new one.
+ sc->set_string_alloc(use->in(TypeFunc::Parms));
+ sc->set_end(use);
+ }
+ }
+ }
+#endif
+ }
+
+ StringConcat* merge(StringConcat* other, Node* arg);
+
+ void set_allocation(AllocateNode* alloc) {
+ _begin = alloc;
+ }
+
+ void append(Node* value, int mode) {
+ _arguments->add_req(value);
+ _mode.append(mode);
+ }
+ void push(Node* value, int mode) {
+ _arguments->ins_req(0, value);
+ _mode.insert_before(0, mode);
+ }
+ void push_string(Node* value) {
+ push(value, StringMode);
+ }
+ void push_int(Node* value) {
+ push(value, IntMode);
+ }
+ void push_char(Node* value) {
+ push(value, CharMode);
+ }
+
+ Node* argument(int i) {
+ return _arguments->in(i);
+ }
+ void set_argument(int i, Node* value) {
+ _arguments->set_req(i, value);
+ }
+ int num_arguments() {
+ return _mode.length();
+ }
+ int mode(int i) {
+ return _mode.at(i);
+ }
+ void add_control(Node* ctrl) {
+ assert(!_control.contains(ctrl), "only push once");
+ _control.push(ctrl);
+ }
+ CallStaticJavaNode* end() { return _end; }
+ AllocateNode* begin() { return _begin; }
+ Node* string_alloc() { return _string_alloc; }
+
+ void eliminate_unneeded_control();
+ void eliminate_initialize(InitializeNode* init);
+ void eliminate_call(CallNode* call);
+
+ void maybe_log_transform() {
+ CompileLog* log = _stringopts->C->log();
+ if (log != NULL) {
+ log->head("replace_string_concat arguments='%d' string_alloc='%d' multiple='%d'",
+ num_arguments(),
+ _string_alloc != NULL,
+ _multiple);
+ JVMState* p = _begin->jvms();
+ while (p != NULL) {
+ log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+ p = p->caller();
+ }
+ log->tail("replace_string_concat");
+ }
+ }
+
+ void convert_uncommon_traps(GraphKit& kit, const JVMState* jvms) {
+ for (uint u = 0; u < _uncommon_traps.size(); u++) {
+ Node* uct = _uncommon_traps.at(u);
+
+ // Build a new call using the jvms state of the allocate
+ address call_addr = SharedRuntime::uncommon_trap_blob()->instructions_begin();
+ const TypeFunc* call_type = OptoRuntime::uncommon_trap_Type();
+ int size = call_type->domain()->cnt();
+ const TypePtr* no_memory_effects = NULL;
+ Compile* C = _stringopts->C;
+ CallStaticJavaNode* call = new (C, size) CallStaticJavaNode(call_type, call_addr, "uncommon_trap",
+ jvms->bci(), no_memory_effects);
+ for (int e = 0; e < TypeFunc::Parms; e++) {
+ call->init_req(e, uct->in(e));
+ }
+ // Set the trap request to record intrinsic failure if this trap
+ // is taken too many times. Ideally we would handle then traps by
+ // doing the original bookkeeping in the MDO so that if it caused
+ // the code to be thrown out we could still recompile and use the
+ // optimization. Failing the uncommon traps doesn't really mean
+ // that the optimization is a bad idea but there's no other way to
+ // do the MDO updates currently.
+ int trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_intrinsic,
+ Deoptimization::Action_make_not_entrant);
+ call->init_req(TypeFunc::Parms, __ intcon(trap_request));
+ kit.add_safepoint_edges(call);
+
+ _stringopts->gvn()->transform(call);
+ C->gvn_replace_by(uct, call);
+ uct->disconnect_inputs(NULL);
+ }
+ }
+
+ void cleanup() {
+ // disconnect the hook node
+ _arguments->disconnect_inputs(NULL);
+ }
+};
+
+
+void StringConcat::eliminate_unneeded_control() {
+ eliminate_initialize(begin()->initialization());
+ for (uint i = 0; i < _control.size(); i++) {
+ Node* n = _control.at(i);
+ if (n->is_Call()) {
+ if (n != _end) {
+ eliminate_call(n->as_Call());
+ }
+ } else if (n->is_IfTrue()) {
+ Compile* C = _stringopts->C;
+ C->gvn_replace_by(n, n->in(0)->in(0));
+ C->gvn_replace_by(n->in(0), C->top());
+ }
+ }
+}
+
+
+StringConcat* StringConcat::merge(StringConcat* other, Node* arg) {
+ StringConcat* result = new StringConcat(_stringopts, _end);
+ for (uint x = 0; x < _control.size(); x++) {
+ Node* n = _control.at(x);
+ if (n->is_Call()) {
+ result->_control.push(n);
+ }
+ }
+ for (uint x = 0; x < other->_control.size(); x++) {
+ Node* n = other->_control.at(x);
+ if (n->is_Call()) {
+ result->_control.push(n);
+ }
+ }
+ assert(result->_control.contains(other->_end), "what?");
+ assert(result->_control.contains(_begin), "what?");
+ for (int x = 0; x < num_arguments(); x++) {
+ if (argument(x) == arg) {
+ // replace the toString result with the all the arguments that
+ // made up the other StringConcat
+ for (int y = 0; y < other->num_arguments(); y++) {
+ result->append(other->argument(y), other->mode(y));
+ }
+ } else {
+ result->append(argument(x), mode(x));
+ }
+ }
+ result->set_allocation(other->_begin);
+ result->_multiple = true;
+ return result;
+}
+
+
+void StringConcat::eliminate_call(CallNode* call) {
+ Compile* C = _stringopts->C;
+ CallProjections projs;
+ call->extract_projections(&projs, false);
+ if (projs.fallthrough_catchproj != NULL) {
+ C->gvn_replace_by(projs.fallthrough_catchproj, call->in(TypeFunc::Control));
+ }
+ if (projs.fallthrough_memproj != NULL) {
+ C->gvn_replace_by(projs.fallthrough_memproj, call->in(TypeFunc::Memory));
+ }
+ if (projs.catchall_memproj != NULL) {
+ C->gvn_replace_by(projs.catchall_memproj, C->top());
+ }
+ if (projs.fallthrough_ioproj != NULL) {
+ C->gvn_replace_by(projs.fallthrough_ioproj, call->in(TypeFunc::I_O));
+ }
+ if (projs.catchall_ioproj != NULL) {
+ C->gvn_replace_by(projs.catchall_ioproj, C->top());
+ }
+ if (projs.catchall_catchproj != NULL) {
+ // EA can't cope with the partially collapsed graph this
+ // creates so put it on the worklist to be collapsed later.
+ for (SimpleDUIterator i(projs.catchall_catchproj); i.has_next(); i.next()) {
+ Node *use = i.get();
+ int opc = use->Opcode();
+ if (opc == Op_CreateEx || opc == Op_Region) {
+ _stringopts->record_dead_node(use);
+ }
+ }
+ C->gvn_replace_by(projs.catchall_catchproj, C->top());
+ }
+ if (projs.resproj != NULL) {
+ C->gvn_replace_by(projs.resproj, C->top());
+ }
+ C->gvn_replace_by(call, C->top());
+}
+
+void StringConcat::eliminate_initialize(InitializeNode* init) {
+ Compile* C = _stringopts->C;
+
+ // Eliminate Initialize node.
+ assert(init->outcnt() <= 2, "only a control and memory projection expected");
+ assert(init->req() <= InitializeNode::RawStores, "no pending inits");
+ Node *ctrl_proj = init->proj_out(TypeFunc::Control);
+ if (ctrl_proj != NULL) {
+ C->gvn_replace_by(ctrl_proj, init->in(TypeFunc::Control));
+ }
+ Node *mem_proj = init->proj_out(TypeFunc::Memory);
+ if (mem_proj != NULL) {
+ Node *mem = init->in(TypeFunc::Memory);
+ C->gvn_replace_by(mem_proj, mem);
+ }
+ C->gvn_replace_by(init, C->top());
+ init->disconnect_inputs(NULL);
+}
+
+Node_List PhaseStringOpts::collect_toString_calls() {
+ Node_List string_calls;
+ Node_List worklist;
+
+ _visited.Clear();
+
+ // Prime the worklist
+ for (uint i = 1; i < C->root()->len(); i++) {
+ Node* n = C->root()->in(i);
+ if (n != NULL && !_visited.test_set(n->_idx)) {
+ worklist.push(n);
+ }
+ }
+
+ while (worklist.size() > 0) {
+ Node* ctrl = worklist.pop();
+ if (ctrl->is_CallStaticJava()) {
+ CallStaticJavaNode* csj = ctrl->as_CallStaticJava();
+ ciMethod* m = csj->method();
+ if (m != NULL &&
+ (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
+ m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString)) {
+ string_calls.push(csj);
+ }
+ }
+ if (ctrl->in(0) != NULL && !_visited.test_set(ctrl->in(0)->_idx)) {
+ worklist.push(ctrl->in(0));
+ }
+ if (ctrl->is_Region()) {
+ for (uint i = 1; i < ctrl->len(); i++) {
+ if (ctrl->in(i) != NULL && !_visited.test_set(ctrl->in(i)->_idx)) {
+ worklist.push(ctrl->in(i));
+ }
+ }
+ }
+ }
+ return string_calls;
+}
+
+
+StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
+ ciMethod* m = call->method();
+ ciSymbol* string_sig;
+ ciSymbol* int_sig;
+ ciSymbol* char_sig;
+ if (m->holder() == C->env()->StringBuilder_klass()) {
+ string_sig = ciSymbol::String_StringBuilder_signature();
+ int_sig = ciSymbol::int_StringBuilder_signature();
+ char_sig = ciSymbol::char_StringBuilder_signature();
+ } else if (m->holder() == C->env()->StringBuffer_klass()) {
+ string_sig = ciSymbol::String_StringBuffer_signature();
+ int_sig = ciSymbol::int_StringBuffer_signature();
+ char_sig = ciSymbol::char_StringBuffer_signature();
+ } else {
+ return NULL;
+ }
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ tty->print("considering toString call in ");
+ call->jvms()->dump_spec(tty); tty->cr();
+ }
+#endif
+
+ StringConcat* sc = new StringConcat(this, call);
+
+ AllocateNode* alloc = NULL;
+ InitializeNode* init = NULL;
+
+ // possible opportunity for StringBuilder fusion
+ CallStaticJavaNode* cnode = call;
+ while (cnode) {
+ Node* recv = cnode->in(TypeFunc::Parms)->uncast();
+ if (recv->is_Proj()) {
+ recv = recv->in(0);
+ }
+ cnode = recv->isa_CallStaticJava();
+ if (cnode == NULL) {
+ alloc = recv->isa_Allocate();
+ if (alloc == NULL) {
+ break;
+ }
+ // Find the constructor call
+ Node* result = alloc->result_cast();
+ if (result == NULL || !result->is_CheckCastPP()) {
+ // strange looking allocation
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ tty->print("giving up because allocation looks strange ");
+ alloc->jvms()->dump_spec(tty); tty->cr();
+ }
+#endif
+ break;
+ }
+ Node* constructor = NULL;
+ for (SimpleDUIterator i(result); i.has_next(); i.next()) {
+ CallStaticJavaNode *use = i.get()->isa_CallStaticJava();
+ if (use != NULL && use->method() != NULL &&
+ use->method()->name() == ciSymbol::object_initializer_name() &&
+ use->method()->holder() == m->holder()) {
+ // Matched the constructor.
+ ciSymbol* sig = use->method()->signature()->as_symbol();
+ if (sig == ciSymbol::void_method_signature() ||
+ sig == ciSymbol::int_void_signature() ||
+ sig == ciSymbol::string_void_signature()) {
+ if (sig == ciSymbol::string_void_signature()) {
+ // StringBuilder(String) so pick this up as the first argument
+ assert(use->in(TypeFunc::Parms + 1) != NULL, "what?");
+ sc->push_string(use->in(TypeFunc::Parms + 1));
+ }
+ // The int variant takes an initial size for the backing
+ // array so just treat it like the void version.
+ constructor = use;
+ } else {
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ tty->print("unexpected constructor signature: %s", sig->as_utf8());
+ }
+#endif
+ }
+ break;
+ }
+ }
+ if (constructor == NULL) {
+ // couldn't find constructor
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ tty->print("giving up because couldn't find constructor ");
+ alloc->jvms()->dump_spec(tty);
+ }
+#endif
+ break;
+ }
+
+ // Walked all the way back and found the constructor call so see
+ // if this call converted into a direct string concatenation.
+ sc->add_control(call);
+ sc->add_control(constructor);
+ sc->add_control(alloc);
+ sc->set_allocation(alloc);
+ if (sc->validate_control_flow()) {
+ return sc;
+ } else {
+ return NULL;
+ }
+ } else if (cnode->method() == NULL) {
+ break;
+ } else if (cnode->method()->holder() == m->holder() &&
+ cnode->method()->name() == ciSymbol::append_name() &&
+ (cnode->method()->signature()->as_symbol() == string_sig ||
+ cnode->method()->signature()->as_symbol() == char_sig ||
+ cnode->method()->signature()->as_symbol() == int_sig)) {
+ sc->add_control(cnode);
+ Node* arg = cnode->in(TypeFunc::Parms + 1);
+ if (cnode->method()->signature()->as_symbol() == int_sig) {
+ sc->push_int(arg);
+ } else if (cnode->method()->signature()->as_symbol() == char_sig) {
+ sc->push_char(arg);
+ } else {
+ if (arg->is_Proj() && arg->in(0)->is_CallStaticJava()) {
+ CallStaticJavaNode* csj = arg->in(0)->as_CallStaticJava();
+ if (csj->method() != NULL &&
+ csj->method()->holder() == C->env()->Integer_klass() &&
+ csj->method()->name() == ciSymbol::toString_name()) {
+ sc->add_control(csj);
+ sc->push_int(csj->in(TypeFunc::Parms));
+ continue;
+ }
+ }
+ sc->push_string(arg);
+ }
+ continue;
+ } else {
+ // some unhandled signature
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ tty->print("giving up because encountered unexpected signature ");
+ cnode->tf()->dump(); tty->cr();
+ cnode->in(TypeFunc::Parms + 1)->dump();
+ }
+#endif
+ break;
+ }
+ }
+ return NULL;
+}
+
+
+PhaseStringOpts::PhaseStringOpts(PhaseGVN* gvn, Unique_Node_List*):
+ Phase(StringOpts),
+ _gvn(gvn),
+ _visited(Thread::current()->resource_area()) {
+
+ assert(OptimizeStringConcat, "shouldn't be here");
+
+ size_table_field = C->env()->Integer_klass()->get_field_by_name(ciSymbol::make("sizeTable"),
+ ciSymbol::make("[I"), true);
+ if (size_table_field == NULL) {
+ // Something wrong so give up.
+ assert(false, "why can't we find Integer.sizeTable?");
+ return;
+ }
+
+ // Collect the types needed to talk about the various slices of memory
+ const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
+ false, NULL, 0);
+
+ const TypePtr* value_field_type = string_type->add_offset(java_lang_String::value_offset_in_bytes());
+ const TypePtr* offset_field_type = string_type->add_offset(java_lang_String::offset_offset_in_bytes());
+ const TypePtr* count_field_type = string_type->add_offset(java_lang_String::count_offset_in_bytes());
+
+ value_field_idx = C->get_alias_index(value_field_type);
+ count_field_idx = C->get_alias_index(count_field_type);
+ offset_field_idx = C->get_alias_index(offset_field_type);
+ char_adr_idx = C->get_alias_index(TypeAryPtr::CHARS);
+
+ // For each locally allocated StringBuffer see if the usages can be
+ // collapsed into a single String construction.
+
+ // Run through the list of allocation looking for SB.toString to see
+ // if it's possible to fuse the usage of the SB into a single String
+ // construction.
+ GrowableArray concats;
+ Node_List toStrings = collect_toString_calls();
+ while (toStrings.size() > 0) {
+ StringConcat* sc = build_candidate(toStrings.pop()->as_CallStaticJava());
+ if (sc != NULL) {
+ concats.push(sc);
+ }
+ }
+
+ // try to coalesce separate concats
+ restart:
+ for (int c = 0; c < concats.length(); c++) {
+ StringConcat* sc = concats.at(c);
+ for (int i = 0; i < sc->num_arguments(); i++) {
+ Node* arg = sc->argument(i);
+ if (arg->is_Proj() && arg->in(0)->is_CallStaticJava()) {
+ CallStaticJavaNode* csj = arg->in(0)->as_CallStaticJava();
+ if (csj->method() != NULL &&
+ (csj->method()->holder() == C->env()->StringBuffer_klass() ||
+ csj->method()->holder() == C->env()->StringBuilder_klass()) &&
+ csj->method()->name() == ciSymbol::toString_name()) {
+ for (int o = 0; o < concats.length(); o++) {
+ if (c == o) continue;
+ StringConcat* other = concats.at(o);
+ if (other->end() == csj) {
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ tty->print_cr("considering stacked concats");
+ }
+#endif
+
+ StringConcat* merged = sc->merge(other, arg);
+ if (merged->validate_control_flow()) {
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ tty->print_cr("stacking would succeed");
+ }
+#endif
+ if (c < o) {
+ concats.remove_at(o);
+ concats.at_put(c, merged);
+ } else {
+ concats.remove_at(c);
+ concats.at_put(o, merged);
+ }
+ goto restart;
+ } else {
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ tty->print_cr("stacking would fail");
+ }
+#endif
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ for (int c = 0; c < concats.length(); c++) {
+ StringConcat* sc = concats.at(c);
+ replace_string_concat(sc);
+ }
+
+ remove_dead_nodes();
+}
+
+void PhaseStringOpts::record_dead_node(Node* dead) {
+ dead_worklist.push(dead);
+}
+
+void PhaseStringOpts::remove_dead_nodes() {
+ // Delete any dead nodes to make things clean enough that escape
+ // analysis doesn't get unhappy.
+ while (dead_worklist.size() > 0) {
+ Node* use = dead_worklist.pop();
+ int opc = use->Opcode();
+ switch (opc) {
+ case Op_Region: {
+ uint i = 1;
+ for (i = 1; i < use->req(); i++) {
+ if (use->in(i) != C->top()) {
+ break;
+ }
+ }
+ if (i >= use->req()) {
+ for (SimpleDUIterator i(use); i.has_next(); i.next()) {
+ Node* m = i.get();
+ if (m->is_Phi()) {
+ dead_worklist.push(m);
+ }
+ }
+ C->gvn_replace_by(use, C->top());
+ }
+ break;
+ }
+ case Op_AddP:
+ case Op_CreateEx: {
+ // Recurisvely clean up references to CreateEx so EA doesn't
+ // get unhappy about the partially collapsed graph.
+ for (SimpleDUIterator i(use); i.has_next(); i.next()) {
+ Node* m = i.get();
+ if (m->is_AddP()) {
+ dead_worklist.push(m);
+ }
+ }
+ C->gvn_replace_by(use, C->top());
+ break;
+ }
+ case Op_Phi:
+ if (use->in(0) == C->top()) {
+ C->gvn_replace_by(use, C->top());
+ }
+ break;
+ }
+ }
+}
+
+
+bool StringConcat::validate_control_flow() {
+ // We found all the calls and arguments now lets see if it's
+ // safe to transform the graph as we would expect.
+
+ // Check to see if this resulted in too many uncommon traps previously
+ if (Compile::current()->too_many_traps(_begin->jvms()->method(), _begin->jvms()->bci(),
+ Deoptimization::Reason_intrinsic)) {
+ return false;
+ }
+
+ // Walk backwards over the control flow from toString to the
+ // allocation and make sure all the control flow is ok. This
+ // means it's either going to be eliminated once the calls are
+ // removed or it can safely be transformed into an uncommon
+ // trap.
+
+ int null_check_count = 0;
+ Unique_Node_List ctrl_path;
+
+ assert(_control.contains(_begin), "missing");
+ assert(_control.contains(_end), "missing");
+
+ // Collect the nodes that we know about and will eliminate into ctrl_path
+ for (uint i = 0; i < _control.size(); i++) {
+ // Push the call and it's control projection
+ Node* n = _control.at(i);
+ if (n->is_Allocate()) {
+ AllocateNode* an = n->as_Allocate();
+ InitializeNode* init = an->initialization();
+ ctrl_path.push(init);
+ ctrl_path.push(init->as_Multi()->proj_out(0));
+ }
+ if (n->is_Call()) {
+ CallNode* cn = n->as_Call();
+ ctrl_path.push(cn);
+ ctrl_path.push(cn->proj_out(0));
+ ctrl_path.push(cn->proj_out(0)->unique_out());
+ ctrl_path.push(cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0));
+ } else {
+ ShouldNotReachHere();
+ }
+ }
+
+ // Skip backwards through the control checking for unexpected contro flow
+ Node* ptr = _end;
+ bool fail = false;
+ while (ptr != _begin) {
+ if (ptr->is_Call() && ctrl_path.member(ptr)) {
+ ptr = ptr->in(0);
+ } else if (ptr->is_CatchProj() && ctrl_path.member(ptr)) {
+ ptr = ptr->in(0)->in(0)->in(0);
+ assert(ctrl_path.member(ptr), "should be a known piece of control");
+ } else if (ptr->is_IfTrue()) {
+ IfNode* iff = ptr->in(0)->as_If();
+ BoolNode* b = iff->in(1)->isa_Bool();
+ Node* cmp = b->in(1);
+ Node* v1 = cmp->in(1);
+ Node* v2 = cmp->in(2);
+ Node* otherproj = iff->proj_out(1 - ptr->as_Proj()->_con);
+
+ // Null check of the return of append which can simply be eliminated
+ if (b->_test._test == BoolTest::ne &&
+ v2->bottom_type() == TypePtr::NULL_PTR &&
+ v1->is_Proj() && ctrl_path.member(v1->in(0))) {
+ // NULL check of the return value of the append
+ null_check_count++;
+ if (otherproj->outcnt() == 1) {
+ CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
+ if (call != NULL && call->_name != NULL && strcmp(call->_name, "uncommon_trap") == 0) {
+ ctrl_path.push(call);
+ }
+ }
+ _control.push(ptr);
+ ptr = ptr->in(0)->in(0);
+ continue;
+ }
+
+ // A test which leads to an uncommon trap which should be safe.
+ // Later this trap will be converted into a trap that restarts
+ // at the beginning.
+ if (otherproj->outcnt() == 1) {
+ CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
+ if (call != NULL && call->_name != NULL && strcmp(call->_name, "uncommon_trap") == 0) {
+ // control flow leads to uct so should be ok
+ _uncommon_traps.push(call);
+ ctrl_path.push(call);
+ ptr = ptr->in(0)->in(0);
+ continue;
+ }
+ }
+
+#ifndef PRODUCT
+ // Some unexpected control flow we don't know how to handle.
+ if (PrintOptimizeStringConcat) {
+ tty->print_cr("failing with unknown test");
+ b->dump();
+ cmp->dump();
+ v1->dump();
+ v2->dump();
+ tty->cr();
+ }
+#endif
+ break;
+ } else if (ptr->is_Proj() && ptr->in(0)->is_Initialize()) {
+ ptr = ptr->in(0)->in(0);
+ } else if (ptr->is_Region()) {
+ Node* copy = ptr->as_Region()->is_copy();
+ if (copy != NULL) {
+ ptr = copy;
+ continue;
+ }
+ if (ptr->req() == 3 &&
+ ptr->in(1) != NULL && ptr->in(1)->is_Proj() &&
+ ptr->in(2) != NULL && ptr->in(2)->is_Proj() &&
+ ptr->in(1)->in(0) == ptr->in(2)->in(0) &&
+ ptr->in(1)->in(0) != NULL && ptr->in(1)->in(0)->is_If()) {
+ // Simple diamond.
+ // XXX should check for possibly merging stores. simple data merges are ok.
+ ptr = ptr->in(1)->in(0)->in(0);
+ continue;
+ }
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ tty->print_cr("fusion would fail for region");
+ _begin->dump();
+ ptr->dump(2);
+ }
+#endif
+ fail = true;
+ break;
+ } else {
+ // other unknown control
+ if (!fail) {
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ tty->print_cr("fusion would fail for");
+ _begin->dump();
+ }
+#endif
+ fail = true;
+ }
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ ptr->dump();
+ }
+#endif
+ ptr = ptr->in(0);
+ }
+ }
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat && fail) {
+ tty->cr();
+ }
+#endif
+ if (fail) return !fail;
+
+ // Validate that all these results produced are contained within
+ // this cluster of objects. First collect all the results produced
+ // by calls in the region.
+ _stringopts->_visited.Clear();
+ Node_List worklist;
+ Node* final_result = _end->proj_out(TypeFunc::Parms);
+ for (uint i = 0; i < _control.size(); i++) {
+ CallNode* cnode = _control.at(i)->isa_Call();
+ if (cnode != NULL) {
+ _stringopts->_visited.test_set(cnode->_idx);
+ }
+ Node* result = cnode != NULL ? cnode->proj_out(TypeFunc::Parms) : NULL;
+ if (result != NULL && result != final_result) {
+ worklist.push(result);
+ }
+ }
+
+ Node* last_result = NULL;
+ while (worklist.size() > 0) {
+ Node* result = worklist.pop();
+ if (_stringopts->_visited.test_set(result->_idx))
+ continue;
+ for (SimpleDUIterator i(result); i.has_next(); i.next()) {
+ Node *use = i.get();
+ if (ctrl_path.member(use)) {
+ // already checked this
+ continue;
+ }
+ int opc = use->Opcode();
+ if (opc == Op_CmpP || opc == Op_Node) {
+ ctrl_path.push(use);
+ continue;
+ }
+ if (opc == Op_CastPP || opc == Op_CheckCastPP) {
+ for (SimpleDUIterator j(use); j.has_next(); j.next()) {
+ worklist.push(j.get());
+ }
+ worklist.push(use->in(1));
+ ctrl_path.push(use);
+ continue;
+ }
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat) {
+ if (result != last_result) {
+ last_result = result;
+ tty->print_cr("extra uses for result:");
+ last_result->dump();
+ }
+ use->dump();
+ }
+#endif
+ fail = true;
+ break;
+ }
+ }
+
+#ifndef PRODUCT
+ if (PrintOptimizeStringConcat && !fail) {
+ ttyLocker ttyl;
+ tty->cr();
+ tty->print("fusion would succeed (%d %d) for ", null_check_count, _uncommon_traps.size());
+ _begin->jvms()->dump_spec(tty); tty->cr();
+ for (int i = 0; i < num_arguments(); i++) {
+ argument(i)->dump();
+ }
+ _control.dump();
+ tty->cr();
+ }
+#endif
+
+ return !fail;
+}
+
+Node* PhaseStringOpts::fetch_static_field(GraphKit& kit, ciField* field) {
+ const TypeKlassPtr* klass_type = TypeKlassPtr::make(field->holder());
+ Node* klass_node = __ makecon(klass_type);
+ BasicType bt = field->layout_type();
+ ciType* field_klass = field->type();
+
+ const Type *type;
+ if( bt == T_OBJECT ) {
+ if (!field->type()->is_loaded()) {
+ type = TypeInstPtr::BOTTOM;
+ } else if (field->is_constant()) {
+ // This can happen if the constant oop is non-perm.
+ ciObject* con = field->constant_value().as_object();
+ // Do not "join" in the previous type; it doesn't add value,
+ // and may yield a vacuous result if the field is of interface type.
+ type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
+ assert(type != NULL, "field singleton type must be consistent");
+ } else {
+ type = TypeOopPtr::make_from_klass(field_klass->as_klass());
+ }
+ } else {
+ type = Type::get_const_basic_type(bt);
+ }
+
+ return kit.make_load(NULL, kit.basic_plus_adr(klass_node, field->offset_in_bytes()),
+ type, T_OBJECT,
+ C->get_alias_index(klass_type->add_offset(field->offset_in_bytes())));
+}
+
+Node* PhaseStringOpts::int_stringSize(GraphKit& kit, Node* arg) {
+ RegionNode *final_merge = new (C, 3) RegionNode(3);
+ kit.gvn().set_type(final_merge, Type::CONTROL);
+ Node* final_size = new (C, 3) PhiNode(final_merge, TypeInt::INT);
+ kit.gvn().set_type(final_size, TypeInt::INT);
+
+ IfNode* iff = kit.create_and_map_if(kit.control(),
+ __ Bool(__ CmpI(arg, __ intcon(0x80000000)), BoolTest::ne),
+ PROB_FAIR, COUNT_UNKNOWN);
+ Node* is_min = __ IfFalse(iff);
+ final_merge->init_req(1, is_min);
+ final_size->init_req(1, __ intcon(11));
+
+ kit.set_control(__ IfTrue(iff));
+ if (kit.stopped()) {
+ final_merge->init_req(2, C->top());
+ final_size->init_req(2, C->top());
+ } else {
+
+ // int size = (i < 0) ? stringSize(-i) + 1 : stringSize(i);
+ RegionNode *r = new (C, 3) RegionNode(3);
+ kit.gvn().set_type(r, Type::CONTROL);
+ Node *phi = new (C, 3) PhiNode(r, TypeInt::INT);
+ kit.gvn().set_type(phi, TypeInt::INT);
+ Node *size = new (C, 3) PhiNode(r, TypeInt::INT);
+ kit.gvn().set_type(size, TypeInt::INT);
+ Node* chk = __ CmpI(arg, __ intcon(0));
+ Node* p = __ Bool(chk, BoolTest::lt);
+ IfNode* iff = kit.create_and_map_if(kit.control(), p, PROB_FAIR, COUNT_UNKNOWN);
+ Node* lessthan = __ IfTrue(iff);
+ Node* greaterequal = __ IfFalse(iff);
+ r->init_req(1, lessthan);
+ phi->init_req(1, __ SubI(__ intcon(0), arg));
+ size->init_req(1, __ intcon(1));
+ r->init_req(2, greaterequal);
+ phi->init_req(2, arg);
+ size->init_req(2, __ intcon(0));
+ kit.set_control(r);
+ C->record_for_igvn(r);
+ C->record_for_igvn(phi);
+ C->record_for_igvn(size);
+
+ // for (int i=0; ; i++)
+ // if (x <= sizeTable[i])
+ // return i+1;
+ RegionNode *loop = new (C, 3) RegionNode(3);
+ loop->init_req(1, kit.control());
+ kit.gvn().set_type(loop, Type::CONTROL);
+
+ Node *index = new (C, 3) PhiNode(loop, TypeInt::INT);
+ index->init_req(1, __ intcon(0));
+ kit.gvn().set_type(index, TypeInt::INT);
+ kit.set_control(loop);
+ Node* sizeTable = fetch_static_field(kit, size_table_field);
+
+ Node* value = kit.load_array_element(NULL, sizeTable, index, TypeAryPtr::INTS);
+ C->record_for_igvn(value);
+ Node* limit = __ CmpI(phi, value);
+ Node* limitb = __ Bool(limit, BoolTest::le);
+ IfNode* iff2 = kit.create_and_map_if(kit.control(), limitb, PROB_MIN, COUNT_UNKNOWN);
+ Node* lessEqual = __ IfTrue(iff2);
+ Node* greater = __ IfFalse(iff2);
+
+ loop->init_req(2, greater);
+ index->init_req(2, __ AddI(index, __ intcon(1)));
+
+ kit.set_control(lessEqual);
+ C->record_for_igvn(loop);
+ C->record_for_igvn(index);
+
+ final_merge->init_req(2, kit.control());
+ final_size->init_req(2, __ AddI(__ AddI(index, size), __ intcon(1)));
+ }
+
+ kit.set_control(final_merge);
+ C->record_for_igvn(final_merge);
+ C->record_for_igvn(final_size);
+
+ return final_size;
+}
+
+void PhaseStringOpts::int_getChars(GraphKit& kit, Node* arg, Node* char_array, Node* start, Node* end) {
+ RegionNode *final_merge = new (C, 4) RegionNode(4);
+ kit.gvn().set_type(final_merge, Type::CONTROL);
+ Node *final_mem = PhiNode::make(final_merge, kit.memory(char_adr_idx), Type::MEMORY, TypeAryPtr::CHARS);
+ kit.gvn().set_type(final_mem, Type::MEMORY);
+
+ // need to handle Integer.MIN_VALUE specially because negating doesn't make it positive
+ {
+ // i == MIN_VALUE
+ IfNode* iff = kit.create_and_map_if(kit.control(),
+ __ Bool(__ CmpI(arg, __ intcon(0x80000000)), BoolTest::ne),
+ PROB_FAIR, COUNT_UNKNOWN);
+
+ Node* old_mem = kit.memory(char_adr_idx);
+
+ kit.set_control(__ IfFalse(iff));
+ if (kit.stopped()) {
+ // Statically not equal to MIN_VALUE so this path is dead
+ final_merge->init_req(3, kit.control());
+ } else {
+ copy_string(kit, __ makecon(TypeInstPtr::make(C->env()->the_min_jint_string())),
+ char_array, start);
+ final_merge->init_req(3, kit.control());
+ final_mem->init_req(3, kit.memory(char_adr_idx));
+ }
+
+ kit.set_control(__ IfTrue(iff));
+ kit.set_memory(old_mem, char_adr_idx);
+ }
+
+
+ // Simplified version of Integer.getChars
+
+ // int q, r;
+ // int charPos = index;
+ Node* charPos = end;
+
+ // char sign = 0;
+
+ Node* i = arg;
+ Node* sign = __ intcon(0);
+
+ // if (i < 0) {
+ // sign = '-';
+ // i = -i;
+ // }
+ {
+ IfNode* iff = kit.create_and_map_if(kit.control(),
+ __ Bool(__ CmpI(arg, __ intcon(0)), BoolTest::lt),
+ PROB_FAIR, COUNT_UNKNOWN);
+
+ RegionNode *merge = new (C, 3) RegionNode(3);
+ kit.gvn().set_type(merge, Type::CONTROL);
+ i = new (C, 3) PhiNode(merge, TypeInt::INT);
+ kit.gvn().set_type(i, TypeInt::INT);
+ sign = new (C, 3) PhiNode(merge, TypeInt::INT);
+ kit.gvn().set_type(sign, TypeInt::INT);
+
+ merge->init_req(1, __ IfTrue(iff));
+ i->init_req(1, __ SubI(__ intcon(0), arg));
+ sign->init_req(1, __ intcon('-'));
+ merge->init_req(2, __ IfFalse(iff));
+ i->init_req(2, arg);
+ sign->init_req(2, __ intcon(0));
+
+ kit.set_control(merge);
+
+ C->record_for_igvn(merge);
+ C->record_for_igvn(i);
+ C->record_for_igvn(sign);
+ }
+
+ // for (;;) {
+ // q = i / 10;
+ // r = i - ((q << 3) + (q << 1)); // r = i-(q*10) ...
+ // buf [--charPos] = digits [r];
+ // i = q;
+ // if (i == 0) break;
+ // }
+
+ {
+ RegionNode *head = new (C, 3) RegionNode(3);
+ head->init_req(1, kit.control());
+ kit.gvn().set_type(head, Type::CONTROL);
+ Node *i_phi = new (C, 3) PhiNode(head, TypeInt::INT);
+ i_phi->init_req(1, i);
+ kit.gvn().set_type(i_phi, TypeInt::INT);
+ charPos = PhiNode::make(head, charPos);
+ kit.gvn().set_type(charPos, TypeInt::INT);
+ Node *mem = PhiNode::make(head, kit.memory(char_adr_idx), Type::MEMORY, TypeAryPtr::CHARS);
+ kit.gvn().set_type(mem, Type::MEMORY);
+ kit.set_control(head);
+ kit.set_memory(mem, char_adr_idx);
+
+ Node* q = __ DivI(kit.null(), i_phi, __ intcon(10));
+ Node* r = __ SubI(i_phi, __ AddI(__ LShiftI(q, __ intcon(3)),
+ __ LShiftI(q, __ intcon(1))));
+ Node* m1 = __ SubI(charPos, __ intcon(1));
+ Node* ch = __ AddI(r, __ intcon('0'));
+
+ Node* st = __ store_to_memory(kit.control(), kit.array_element_address(char_array, m1, T_CHAR),
+ ch, T_CHAR, char_adr_idx);
+
+
+ IfNode* iff = kit.create_and_map_if(head, __ Bool(__ CmpI(q, __ intcon(0)), BoolTest::ne),
+ PROB_FAIR, COUNT_UNKNOWN);
+ Node* ne = __ IfTrue(iff);
+ Node* eq = __ IfFalse(iff);
+
+ head->init_req(2, ne);
+ mem->init_req(2, st);
+ i_phi->init_req(2, q);
+ charPos->init_req(2, m1);
+
+ charPos = m1;
+
+ kit.set_control(eq);
+ kit.set_memory(st, char_adr_idx);
+
+ C->record_for_igvn(head);
+ C->record_for_igvn(mem);
+ C->record_for_igvn(i_phi);
+ C->record_for_igvn(charPos);
+ }
+
+ {
+ // if (sign != 0) {
+ // buf [--charPos] = sign;
+ // }
+ IfNode* iff = kit.create_and_map_if(kit.control(),
+ __ Bool(__ CmpI(sign, __ intcon(0)), BoolTest::ne),
+ PROB_FAIR, COUNT_UNKNOWN);
+
+ final_merge->init_req(2, __ IfFalse(iff));
+ final_mem->init_req(2, kit.memory(char_adr_idx));
+
+ kit.set_control(__ IfTrue(iff));
+ if (kit.stopped()) {
+ final_merge->init_req(1, C->top());
+ final_mem->init_req(1, C->top());
+ } else {
+ Node* m1 = __ SubI(charPos, __ intcon(1));
+ Node* st = __ store_to_memory(kit.control(), kit.array_element_address(char_array, m1, T_CHAR),
+ sign, T_CHAR, char_adr_idx);
+
+ final_merge->init_req(1, kit.control());
+ final_mem->init_req(1, st);
+ }
+
+ kit.set_control(final_merge);
+ kit.set_memory(final_mem, char_adr_idx);
+
+ C->record_for_igvn(final_merge);
+ C->record_for_igvn(final_mem);
+ }
+}
+
+
+Node* PhaseStringOpts::copy_string(GraphKit& kit, Node* str, Node* char_array, Node* start) {
+ Node* string = str;
+ Node* offset = kit.make_load(NULL,
+ kit.basic_plus_adr(string, string, java_lang_String::offset_offset_in_bytes()),
+ TypeInt::INT, T_INT, offset_field_idx);
+ Node* count = kit.make_load(NULL,
+ kit.basic_plus_adr(string, string, java_lang_String::count_offset_in_bytes()),
+ TypeInt::INT, T_INT, count_field_idx);
+ const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
+ TypeAry::make(TypeInt::CHAR,TypeInt::POS),
+ ciTypeArrayKlass::make(T_CHAR), true, 0);
+ Node* value = kit.make_load(NULL,
+ kit.basic_plus_adr(string, string, java_lang_String::value_offset_in_bytes()),
+ value_type, T_OBJECT, value_field_idx);
+
+ // copy the contents
+ if (offset->is_Con() && count->is_Con() && value->is_Con() && count->get_int() < unroll_string_copy_length) {
+ // For small constant strings just emit individual stores.
+ // A length of 6 seems like a good space/speed tradeof.
+ int c = count->get_int();
+ int o = offset->get_int();
+ const TypeOopPtr* t = kit.gvn().type(value)->isa_oopptr();
+ ciTypeArray* value_array = t->const_oop()->as_type_array();
+ for (int e = 0; e < c; e++) {
+ __ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
+ __ intcon(value_array->char_at(o + e)), T_CHAR, char_adr_idx);
+ start = __ AddI(start, __ intcon(1));
+ }
+ } else {
+ Node* src_ptr = kit.array_element_address(value, offset, T_CHAR);
+ Node* dst_ptr = kit.array_element_address(char_array, start, T_CHAR);
+ Node* c = count;
+ Node* extra = NULL;
+#ifdef _LP64
+ c = __ ConvI2L(c);
+ extra = C->top();
+#endif
+ Node* call = kit.make_runtime_call(GraphKit::RC_LEAF|GraphKit::RC_NO_FP,
+ OptoRuntime::fast_arraycopy_Type(),
+ CAST_FROM_FN_PTR(address, StubRoutines::jshort_disjoint_arraycopy()),
+ "jshort_disjoint_arraycopy", TypeAryPtr::CHARS,
+ src_ptr, dst_ptr, c, extra);
+ start = __ AddI(start, count);
+ }
+ return start;
+}
+
+
+void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
+ // Log a little info about the transformation
+ sc->maybe_log_transform();
+
+ // pull the JVMState of the allocation into a SafePointNode to serve as
+ // as a shim for the insertion of the new code.
+ JVMState* jvms = sc->begin()->jvms()->clone_shallow(C);
+ uint size = sc->begin()->req();
+ SafePointNode* map = new (C, size) SafePointNode(size, jvms);
+
+ // copy the control and memory state from the final call into our
+ // new starting state. This allows any preceeding tests to feed
+ // into the new section of code.
+ for (uint i1 = 0; i1 < TypeFunc::Parms; i1++) {
+ map->init_req(i1, sc->end()->in(i1));
+ }
+ // blow away old allocation arguments
+ for (uint i1 = TypeFunc::Parms; i1 < jvms->debug_start(); i1++) {
+ map->init_req(i1, C->top());
+ }
+ // Copy the rest of the inputs for the JVMState
+ for (uint i1 = jvms->debug_start(); i1 < sc->begin()->req(); i1++) {
+ map->init_req(i1, sc->begin()->in(i1));
+ }
+ // Make sure the memory state is a MergeMem for parsing.
+ if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
+ map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
+ }
+
+ jvms->set_map(map);
+ map->ensure_stack(jvms, jvms->method()->max_stack());
+
+
+ // disconnect all the old StringBuilder calls from the graph
+ sc->eliminate_unneeded_control();
+
+ // At this point all the old work has been completely removed from
+ // the graph and the saved JVMState exists at the point where the
+ // final toString call used to be.
+ GraphKit kit(jvms);
+
+ // There may be uncommon traps which are still using the
+ // intermediate states and these need to be rewritten to point at
+ // the JVMState at the beginning of the transformation.
+ sc->convert_uncommon_traps(kit, jvms);
+
+ // Now insert the logic to compute the size of the string followed
+ // by all the logic to construct array and resulting string.
+
+ Node* null_string = __ makecon(TypeInstPtr::make(C->env()->the_null_string()));
+
+ // Create a region for the overflow checks to merge into.
+ int args = MAX2(sc->num_arguments(), 1);
+ RegionNode* overflow = new (C, args) RegionNode(args);
+ kit.gvn().set_type(overflow, Type::CONTROL);
+
+ // Create a hook node to hold onto the individual sizes since they
+ // are need for the copying phase.
+ Node* string_sizes = new (C, args) Node(args);
+
+ Node* length = __ intcon(0);
+ for (int argi = 0; argi < sc->num_arguments(); argi++) {
+ Node* arg = sc->argument(argi);
+ switch (sc->mode(argi)) {
+ case StringConcat::IntMode: {
+ Node* string_size = int_stringSize(kit, arg);
+
+ // accumulate total
+ length = __ AddI(length, string_size);
+
+ // Cache this value for the use by int_toString
+ string_sizes->init_req(argi, string_size);
+ break;
+ }
+ case StringConcat::StringMode: {
+ const Type* type = kit.gvn().type(arg);
+ if (type == TypePtr::NULL_PTR) {
+ // replace the argument with the null checked version
+ arg = null_string;
+ sc->set_argument(argi, arg);
+ } else if (!type->higher_equal(TypeInstPtr::NOTNULL)) {
+ // s = s != null ? s : "null";
+ // length = length + (s.count - s.offset);
+ RegionNode *r = new (C, 3) RegionNode(3);
+ kit.gvn().set_type(r, Type::CONTROL);
+ Node *phi = new (C, 3) PhiNode(r, type->join(TypeInstPtr::NOTNULL));
+ kit.gvn().set_type(phi, phi->bottom_type());
+ Node* p = __ Bool(__ CmpP(arg, kit.null()), BoolTest::ne);
+ IfNode* iff = kit.create_and_map_if(kit.control(), p, PROB_MIN, COUNT_UNKNOWN);
+ Node* notnull = __ IfTrue(iff);
+ Node* isnull = __ IfFalse(iff);
+ r->init_req(1, notnull);
+ phi->init_req(1, arg);
+ r->init_req(2, isnull);
+ phi->init_req(2, null_string);
+ kit.set_control(r);
+ C->record_for_igvn(r);
+ C->record_for_igvn(phi);
+ // replace the argument with the null checked version
+ arg = phi;
+ sc->set_argument(argi, arg);
+ }
+ // Node* offset = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, offset_offset),
+ // TypeInt::INT, T_INT, offset_field_idx);
+ Node* count = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()),
+ TypeInt::INT, T_INT, count_field_idx);
+ length = __ AddI(length, count);
+ string_sizes->init_req(argi, NULL);
+ break;
+ }
+ case StringConcat::CharMode: {
+ // one character only
+ length = __ AddI(length, __ intcon(1));
+ break;
+ }
+ default:
+ ShouldNotReachHere();
+ }
+ if (argi > 0) {
+ // Check that the sum hasn't overflowed
+ IfNode* iff = kit.create_and_map_if(kit.control(),
+ __ Bool(__ CmpI(length, __ intcon(0)), BoolTest::lt),
+ PROB_MIN, COUNT_UNKNOWN);
+ kit.set_control(__ IfFalse(iff));
+ overflow->set_req(argi, __ IfTrue(iff));
+ }
+ }
+
+ {
+ // Hook
+ PreserveJVMState pjvms(&kit);
+ kit.set_control(overflow);
+ kit.uncommon_trap(Deoptimization::Reason_intrinsic,
+ Deoptimization::Action_make_not_entrant);
+ }
+
+ // length now contains the number of characters needed for the
+ // char[] so create a new AllocateArray for the char[]
+ Node* char_array = NULL;
+ {
+ PreserveReexecuteState preexecs(&kit);
+ // The original jvms is for an allocation of either a String or
+ // StringBuffer so no stack adjustment is necessary for proper
+ // reexecution. If we deoptimize in the slow path the bytecode
+ // will be reexecuted and the char[] allocation will be thrown away.
+ kit.jvms()->set_should_reexecute(true);
+ char_array = kit.new_array(__ makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_CHAR))),
+ length, 1);
+ }
+
+ // Mark the allocation so that zeroing is skipped since the code
+ // below will overwrite the entire array
+ AllocateArrayNode* char_alloc = AllocateArrayNode::Ideal_array_allocation(char_array, _gvn);
+ char_alloc->maybe_set_complete(_gvn);
+
+ // Now copy the string representations into the final char[]
+ Node* start = __ intcon(0);
+ for (int argi = 0; argi < sc->num_arguments(); argi++) {
+ Node* arg = sc->argument(argi);
+ switch (sc->mode(argi)) {
+ case StringConcat::IntMode: {
+ Node* end = __ AddI(start, string_sizes->in(argi));
+ // getChars words backwards so pass the ending point as well as the start
+ int_getChars(kit, arg, char_array, start, end);
+ start = end;
+ break;
+ }
+ case StringConcat::StringMode: {
+ start = copy_string(kit, arg, char_array, start);
+ break;
+ }
+ case StringConcat::CharMode: {
+ __ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
+ arg, T_CHAR, char_adr_idx);
+ start = __ AddI(start, __ intcon(1));
+ break;
+ }
+ default:
+ ShouldNotReachHere();
+ }
+ }
+
+ // If we're not reusing an existing String allocation then allocate one here.
+ Node* result = sc->string_alloc();
+ if (result == NULL) {
+ PreserveReexecuteState preexecs(&kit);
+ // The original jvms is for an allocation of either a String or
+ // StringBuffer so no stack adjustment is necessary for proper
+ // reexecution.
+ kit.jvms()->set_should_reexecute(true);
+ result = kit.new_instance(__ makecon(TypeKlassPtr::make(C->env()->String_klass())));
+ }
+
+ // Intialize the string
+ kit.store_to_memory(kit.control(), kit.basic_plus_adr(result, java_lang_String::offset_offset_in_bytes()),
+ __ intcon(0), T_INT, offset_field_idx);
+ kit.store_to_memory(kit.control(), kit.basic_plus_adr(result, java_lang_String::count_offset_in_bytes()),
+ length, T_INT, count_field_idx);
+ kit.store_to_memory(kit.control(), kit.basic_plus_adr(result, java_lang_String::value_offset_in_bytes()),
+ char_array, T_OBJECT, value_field_idx);
+
+ // hook up the outgoing control and result
+ kit.replace_call(sc->end(), result);
+
+ // Unhook any hook nodes
+ string_sizes->disconnect_inputs(NULL);
+ sc->cleanup();
+}
diff --git a/hotspot/src/share/vm/opto/stringopts.hpp b/hotspot/src/share/vm/opto/stringopts.hpp
new file mode 100644
index 00000000000..417e08a9185
--- /dev/null
+++ b/hotspot/src/share/vm/opto/stringopts.hpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class StringConcat;
+
+class PhaseStringOpts : public Phase {
+ friend class StringConcat;
+
+ private:
+ PhaseGVN* _gvn;
+
+ // List of dead nodes to clean up aggressively at the end
+ Unique_Node_List dead_worklist;
+
+ // Memory slices needed for code gen
+ int char_adr_idx;
+ int value_field_idx;
+ int count_field_idx;
+ int offset_field_idx;
+
+ // Integer.sizeTable - used for int to String conversion
+ ciField* size_table_field;
+
+ // A set for use by various stages
+ VectorSet _visited;
+
+ // Collect a list of all SB.toString calls
+ Node_List collect_toString_calls();
+
+ // Examine the use of the SB alloc to see if it can be replace with
+ // a single string construction.
+ StringConcat* build_candidate(CallStaticJavaNode* call);
+
+ // Replace all the SB calls in concat with an optimization String allocation
+ void replace_string_concat(StringConcat* concat);
+
+ // Load the value of a static field, performing any constant folding.
+ Node* fetch_static_field(GraphKit& kit, ciField* field);
+
+ // Compute the number of characters required to represent the int value
+ Node* int_stringSize(GraphKit& kit, Node* value);
+
+ // Copy the characters representing value into char_array starting at start
+ void int_getChars(GraphKit& kit, Node* value, Node* char_array, Node* start, Node* end);
+
+ // Copy of the contents of the String str into char_array starting at index start.
+ Node* copy_string(GraphKit& kit, Node* str, Node* char_array, Node* start);
+
+ // Clean up any leftover nodes
+ void record_dead_node(Node* node);
+ void remove_dead_nodes();
+
+ PhaseGVN* gvn() { return _gvn; }
+
+ enum {
+ // max length of constant string copy unrolling in copy_string
+ unroll_string_copy_length = 6
+ };
+
+ public:
+ PhaseStringOpts(PhaseGVN* gvn, Unique_Node_List* worklist);
+};
diff --git a/hotspot/src/share/vm/opto/subnode.cpp b/hotspot/src/share/vm/opto/subnode.cpp
index 81e033f2769..1a8c2f60e66 100644
--- a/hotspot/src/share/vm/opto/subnode.cpp
+++ b/hotspot/src/share/vm/opto/subnode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1244,8 +1244,7 @@ const Type *CosDNode::Value( PhaseTransform *phase ) const {
if( t1 == Type::TOP ) return Type::TOP;
if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
double d = t1->getd();
- if( d < 0.0 ) return Type::DOUBLE;
- return TypeD::make( SharedRuntime::dcos( d ) );
+ return TypeD::make( StubRoutines::intrinsic_cos( d ) );
}
//=============================================================================
@@ -1256,8 +1255,7 @@ const Type *SinDNode::Value( PhaseTransform *phase ) const {
if( t1 == Type::TOP ) return Type::TOP;
if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
double d = t1->getd();
- if( d < 0.0 ) return Type::DOUBLE;
- return TypeD::make( SharedRuntime::dsin( d ) );
+ return TypeD::make( StubRoutines::intrinsic_sin( d ) );
}
//=============================================================================
@@ -1268,8 +1266,7 @@ const Type *TanDNode::Value( PhaseTransform *phase ) const {
if( t1 == Type::TOP ) return Type::TOP;
if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
double d = t1->getd();
- if( d < 0.0 ) return Type::DOUBLE;
- return TypeD::make( SharedRuntime::dtan( d ) );
+ return TypeD::make( StubRoutines::intrinsic_tan( d ) );
}
//=============================================================================
@@ -1280,8 +1277,7 @@ const Type *LogDNode::Value( PhaseTransform *phase ) const {
if( t1 == Type::TOP ) return Type::TOP;
if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
double d = t1->getd();
- if( d < 0.0 ) return Type::DOUBLE;
- return TypeD::make( SharedRuntime::dlog( d ) );
+ return TypeD::make( StubRoutines::intrinsic_log( d ) );
}
//=============================================================================
@@ -1292,8 +1288,7 @@ const Type *Log10DNode::Value( PhaseTransform *phase ) const {
if( t1 == Type::TOP ) return Type::TOP;
if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
double d = t1->getd();
- if( d < 0.0 ) return Type::DOUBLE;
- return TypeD::make( SharedRuntime::dlog10( d ) );
+ return TypeD::make( StubRoutines::intrinsic_log10( d ) );
}
//=============================================================================
@@ -1304,8 +1299,7 @@ const Type *ExpDNode::Value( PhaseTransform *phase ) const {
if( t1 == Type::TOP ) return Type::TOP;
if( t1->base() != Type::DoubleCon ) return Type::DOUBLE;
double d = t1->getd();
- if( d < 0.0 ) return Type::DOUBLE;
- return TypeD::make( SharedRuntime::dexp( d ) );
+ return TypeD::make( StubRoutines::intrinsic_exp( d ) );
}
@@ -1323,5 +1317,5 @@ const Type *PowDNode::Value( PhaseTransform *phase ) const {
double d2 = t2->getd();
if( d1 < 0.0 ) return Type::DOUBLE;
if( d2 < 0.0 ) return Type::DOUBLE;
- return TypeD::make( SharedRuntime::dpow( d1, d2 ) );
+ return TypeD::make( StubRoutines::intrinsic_pow( d1, d2 ) );
}
diff --git a/hotspot/src/share/vm/opto/superword.cpp b/hotspot/src/share/vm/opto/superword.cpp
index bc62f30c3d3..83d359d31f1 100644
--- a/hotspot/src/share/vm/opto/superword.cpp
+++ b/hotspot/src/share/vm/opto/superword.cpp
@@ -1921,6 +1921,11 @@ SWPointer::SWPointer(MemNode* mem, SuperWord* slp) :
}
// Match AddP(base, AddP(ptr, k*iv [+ invariant]), constant)
Node* base = adr->in(AddPNode::Base);
+ //unsafe reference could not be aligned appropriately without runtime checking
+ if (base == NULL || base->bottom_type() == Type::TOP) {
+ assert(!valid(), "unsafe access");
+ return;
+ }
for (int i = 0; i < 3; i++) {
if (!scaled_iv_plus_offset(adr->in(AddPNode::Offset))) {
assert(!valid(), "too complex");
diff --git a/hotspot/src/share/vm/opto/type.cpp b/hotspot/src/share/vm/opto/type.cpp
index c22d1e8f6b0..289835af4c8 100644
--- a/hotspot/src/share/vm/opto/type.cpp
+++ b/hotspot/src/share/vm/opto/type.cpp
@@ -2431,7 +2431,7 @@ const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass *klass, bool klass_
//------------------------------make_from_constant-----------------------------
// Make a java pointer from an oop constant
const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_constant) {
- if (o->is_method_data() || o->is_method()) {
+ if (o->is_method_data() || o->is_method() || o->is_cpcache()) {
// Treat much like a typeArray of bytes, like below, but fake the type...
const Type* etype = (Type*)get_const_basic_type(T_BYTE);
const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
@@ -3966,7 +3966,7 @@ const TypeFunc *TypeFunc::make(ciMethod* method) {
const TypeFunc* tf = C->last_tf(method); // check cache
if (tf != NULL) return tf; // The hit rate here is almost 50%.
const TypeTuple *domain;
- if (method->flags().is_static()) {
+ if (method->is_static()) {
domain = TypeTuple::make_domain(NULL, method->signature());
} else {
domain = TypeTuple::make_domain(method->holder(), method->signature());
diff --git a/hotspot/src/share/vm/opto/type.hpp b/hotspot/src/share/vm/opto/type.hpp
index 9b11f9ca595..03f81532c41 100644
--- a/hotspot/src/share/vm/opto/type.hpp
+++ b/hotspot/src/share/vm/opto/type.hpp
@@ -847,9 +847,6 @@ public:
// Constant pointer to array
static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot);
- // Convenience
- static const TypeAryPtr *make(ciObject* o);
-
// Return a 'ptr' version of this type
virtual const Type *cast_to_ptr_type(PTR ptr) const;
diff --git a/hotspot/src/share/vm/prims/jni.cpp b/hotspot/src/share/vm/prims/jni.cpp
index 20484da07a9..fbf66099a79 100644
--- a/hotspot/src/share/vm/prims/jni.cpp
+++ b/hotspot/src/share/vm/prims/jni.cpp
@@ -396,11 +396,11 @@ JNI_ENTRY(jmethodID, jni_FromReflectedMethod(JNIEnv *env, jobject method))
oop mirror = NULL;
int slot = 0;
- if (reflected->klass() == SystemDictionary::reflect_constructor_klass()) {
+ if (reflected->klass() == SystemDictionary::reflect_Constructor_klass()) {
mirror = java_lang_reflect_Constructor::clazz(reflected);
slot = java_lang_reflect_Constructor::slot(reflected);
} else {
- assert(reflected->klass() == SystemDictionary::reflect_method_klass(), "wrong type");
+ assert(reflected->klass() == SystemDictionary::reflect_Method_klass(), "wrong type");
mirror = java_lang_reflect_Method::clazz(reflected);
slot = java_lang_reflect_Method::slot(reflected);
}
@@ -496,7 +496,7 @@ JNI_ENTRY(jclass, jni_GetSuperclass(JNIEnv *env, jclass sub))
klassOop super = Klass::cast(k)->java_super();
// super2 is the value computed by the compiler's getSuperClass intrinsic:
debug_only(klassOop super2 = ( Klass::cast(k)->oop_is_javaArray()
- ? SystemDictionary::object_klass()
+ ? SystemDictionary::Object_klass()
: Klass::cast(k)->super() ) );
assert(super == super2,
"java_super computation depends on interface, array, other super");
@@ -584,7 +584,7 @@ JNI_ENTRY_NO_PRESERVE(void, jni_ExceptionDescribe(JNIEnv *env))
if (thread->has_pending_exception()) {
Handle ex(thread, thread->pending_exception());
thread->clear_pending_exception();
- if (ex->is_a(SystemDictionary::threaddeath_klass())) {
+ if (ex->is_a(SystemDictionary::ThreadDeath_klass())) {
// Don't print anything if we are being killed.
} else {
jio_fprintf(defaultStream::error_stream(), "Exception ");
@@ -593,12 +593,12 @@ JNI_ENTRY_NO_PRESERVE(void, jni_ExceptionDescribe(JNIEnv *env))
jio_fprintf(defaultStream::error_stream(),
"in thread \"%s\" ", thread->get_thread_name());
}
- if (ex->is_a(SystemDictionary::throwable_klass())) {
+ if (ex->is_a(SystemDictionary::Throwable_klass())) {
JavaValue result(T_VOID);
JavaCalls::call_virtual(&result,
ex,
KlassHandle(THREAD,
- SystemDictionary::throwable_klass()),
+ SystemDictionary::Throwable_klass()),
vmSymbolHandles::printStackTrace_name(),
vmSymbolHandles::void_method_signature(),
THREAD);
@@ -3231,6 +3231,21 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, v
jint result = JNI_ERR;
DT_RETURN_MARK(CreateJavaVM, jint, (const jint&)result);
+ // We're about to use Atomic::xchg for synchronization. Some Zero
+ // platforms use the GCC builtin __sync_lock_test_and_set for this,
+ // but __sync_lock_test_and_set is not guaranteed to do what we want
+ // on all architectures. So we check it works before relying on it.
+#if defined(ZERO) && defined(ASSERT)
+ {
+ jint a = 0xcafebabe;
+ jint b = Atomic::xchg(0xdeadbeef, &a);
+ void *c = &a;
+ void *d = Atomic::xchg_ptr(&b, &c);
+ assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works");
+ assert(c == &b && d == &a, "Atomic::xchg_ptr() works");
+ }
+#endif // ZERO && ASSERT
+
// At the moment it's only possible to have one Java VM,
// since some of the runtime state is in global variables.
diff --git a/hotspot/src/share/vm/prims/jniCheck.cpp b/hotspot/src/share/vm/prims/jniCheck.cpp
index 0e3baac47ed..43c17cc392b 100644
--- a/hotspot/src/share/vm/prims/jniCheck.cpp
+++ b/hotspot/src/share/vm/prims/jniCheck.cpp
@@ -341,7 +341,7 @@ klassOop jniCheck::validate_class(JavaThread* thr, jclass clazz, bool allow_prim
ReportJNIFatalError(thr, fatal_received_null_class);
}
- if (mirror->klass() != SystemDictionary::class_klass()) {
+ if (mirror->klass() != SystemDictionary::Class_klass()) {
ReportJNIFatalError(thr, fatal_class_not_a_class);
}
@@ -358,7 +358,7 @@ void jniCheck::validate_throwable_klass(JavaThread* thr, klassOop klass) {
assert(klass != NULL, "klass argument must have a value");
if (!Klass::cast(klass)->oop_is_instance() ||
- !instanceKlass::cast(klass)->is_subclass_of(SystemDictionary::throwable_klass())) {
+ !instanceKlass::cast(klass)->is_subclass_of(SystemDictionary::Throwable_klass())) {
ReportJNIFatalError(thr, fatal_class_not_a_throwable_class);
}
}
diff --git a/hotspot/src/share/vm/prims/jvm.cpp b/hotspot/src/share/vm/prims/jvm.cpp
index 36c6507ee72..7ddb08d9967 100644
--- a/hotspot/src/share/vm/prims/jvm.cpp
+++ b/hotspot/src/share/vm/prims/jvm.cpp
@@ -80,7 +80,7 @@ static void trace_class_resolution_impl(klassOop to_class, TRAPS) {
while (!vfst.at_end()) {
methodOop m = vfst.method();
- if (!vfst.method()->method_holder()->klass_part()->is_subclass_of(SystemDictionary::classloader_klass())&&
+ if (!vfst.method()->method_holder()->klass_part()->is_subclass_of(SystemDictionary::ClassLoader_klass())&&
!vfst.method()->method_holder()->klass_part()->is_subclass_of(access_controller_klass) &&
!vfst.method()->method_holder()->klass_part()->is_subclass_of(privileged_action_klass)) {
break;
@@ -257,7 +257,7 @@ static void set_property(Handle props, const char* key, const char* value, TRAPS
Handle value_str = java_lang_String::create_from_platform_dependent_str((value != NULL ? value : ""), CHECK);
JavaCalls::call_virtual(&r,
props,
- KlassHandle(THREAD, SystemDictionary::properties_klass()),
+ KlassHandle(THREAD, SystemDictionary::Properties_klass()),
vmSymbolHandles::put_name(),
vmSymbolHandles::object_object_object_signature(),
key_str,
@@ -495,7 +495,7 @@ JVM_ENTRY(jobject, JVM_Clone(JNIEnv* env, jobject handle))
guarantee(klass->is_cloneable(), "all arrays are cloneable");
} else {
guarantee(obj->is_instance(), "should be instanceOop");
- bool cloneable = klass->is_subtype_of(SystemDictionary::cloneable_klass());
+ bool cloneable = klass->is_subtype_of(SystemDictionary::Cloneable_klass());
guarantee(cloneable == klass->is_cloneable(), "incorrect cloneable flag");
}
#endif
@@ -908,7 +908,7 @@ JVM_ENTRY(jobjectArray, JVM_GetClassInterfaces(JNIEnv *env, jclass cls))
// Special handling for primitive objects
if (java_lang_Class::is_primitive(mirror)) {
// Primitive objects does not have any interfaces
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
return (jobjectArray) JNIHandles::make_local(env, r);
}
@@ -923,7 +923,7 @@ JVM_ENTRY(jobjectArray, JVM_GetClassInterfaces(JNIEnv *env, jclass cls))
}
// Allocate result array
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), size, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), size, CHECK_NULL);
objArrayHandle result (THREAD, r);
// Fill in result
if (klass->oop_is_instance()) {
@@ -934,8 +934,8 @@ JVM_ENTRY(jobjectArray, JVM_GetClassInterfaces(JNIEnv *env, jclass cls))
}
} else {
// All arrays implement java.lang.Cloneable and java.io.Serializable
- result->obj_at_put(0, Klass::cast(SystemDictionary::cloneable_klass())->java_mirror());
- result->obj_at_put(1, Klass::cast(SystemDictionary::serializable_klass())->java_mirror());
+ result->obj_at_put(0, Klass::cast(SystemDictionary::Cloneable_klass())->java_mirror());
+ result->obj_at_put(1, Klass::cast(SystemDictionary::Serializable_klass())->java_mirror());
}
return (jobjectArray) JNIHandles::make_local(env, result());
JVM_END
@@ -1098,8 +1098,8 @@ JVM_ENTRY(jobject, JVM_DoPrivileged(JNIEnv *env, jclass cls, jobject action, job
pending_exception = Handle(THREAD, PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
- if ( pending_exception->is_a(SystemDictionary::exception_klass()) &&
- !pending_exception->is_a(SystemDictionary::runtime_exception_klass())) {
+ if ( pending_exception->is_a(SystemDictionary::Exception_klass()) &&
+ !pending_exception->is_a(SystemDictionary::RuntimeException_klass())) {
// Throw a java.security.PrivilegedActionException(Exception e) exception
JavaCallArguments args(pending_exception);
THROW_ARG_0(vmSymbolHandles::java_security_PrivilegedActionException(),
@@ -1190,7 +1190,7 @@ JVM_ENTRY(jobject, JVM_GetStackAccessControlContext(JNIEnv *env, jclass cls))
// the resource area must be registered in case of a gc
RegisterArrayForGC ragc(thread, local_array);
- objArrayOop context = oopFactory::new_objArray(SystemDictionary::protectionDomain_klass(),
+ objArrayOop context = oopFactory::new_objArray(SystemDictionary::ProtectionDomain_klass(),
local_array->length(), CHECK_NULL);
objArrayHandle h_context(thread, context);
for (int index = 0; index < local_array->length(); index++) {
@@ -1251,7 +1251,7 @@ JVM_ENTRY(jobjectArray, JVM_GetDeclaredClasses(JNIEnv *env, jclass ofClass))
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) ||
! Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_instance()) {
- oop result = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_NULL);
+ oop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
return (jobjectArray)JNIHandles::make_local(env, result);
}
@@ -1259,7 +1259,7 @@ JVM_ENTRY(jobjectArray, JVM_GetDeclaredClasses(JNIEnv *env, jclass ofClass))
if (k->inner_classes()->length() == 0) {
// Neither an inner nor outer class
- oop result = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_NULL);
+ oop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
return (jobjectArray)JNIHandles::make_local(env, result);
}
@@ -1269,7 +1269,7 @@ JVM_ENTRY(jobjectArray, JVM_GetDeclaredClasses(JNIEnv *env, jclass ofClass))
int length = icls->length();
// Allocate temp. result array
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), length/4, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), length/4, CHECK_NULL);
objArrayHandle result (THREAD, r);
int members = 0;
@@ -1299,7 +1299,7 @@ JVM_ENTRY(jobjectArray, JVM_GetDeclaredClasses(JNIEnv *env, jclass ofClass))
if (members != length) {
// Return array of right length
- objArrayOop res = oopFactory::new_objArray(SystemDictionary::class_klass(), members, CHECK_NULL);
+ objArrayOop res = oopFactory::new_objArray(SystemDictionary::Class_klass(), members, CHECK_NULL);
for(int i = 0; i < members; i++) {
res->obj_at_put(i, result->obj_at(i));
}
@@ -1318,19 +1318,20 @@ JVM_ENTRY(jclass, JVM_GetDeclaringClass(JNIEnv *env, jclass ofClass))
return NULL;
}
- symbolOop simple_name = NULL;
+ bool inner_is_member = false;
klassOop outer_klass
= instanceKlass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass))
- )->compute_enclosing_class(simple_name, CHECK_NULL);
+ )->compute_enclosing_class(&inner_is_member, CHECK_NULL);
if (outer_klass == NULL) return NULL; // already a top-level class
- if (simple_name == NULL) return NULL; // an anonymous class (inside a method)
+ if (!inner_is_member) return NULL; // an anonymous class (inside a method)
return (jclass) JNIHandles::make_local(env, Klass::cast(outer_klass)->java_mirror());
}
JVM_END
// should be in instanceKlass.cpp, but is here for historical reasons
klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k,
- symbolOop& simple_name_result, TRAPS) {
+ bool* inner_is_member,
+ TRAPS) {
Thread* thread = THREAD;
const int inner_class_info_index = inner_class_inner_class_info_offset;
const int outer_class_info_index = inner_class_outer_class_info_offset;
@@ -1347,8 +1348,7 @@ klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k,
bool found = false;
klassOop ok;
instanceKlassHandle outer_klass;
- bool inner_is_member = false;
- int simple_name_index = 0;
+ *inner_is_member = false;
// Find inner_klass attribute
for (int i = 0; i < i_length && !found; i += inner_class_next_offset) {
@@ -1364,8 +1364,7 @@ klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k,
if (found && ooff != 0) {
ok = i_cp->klass_at(ooff, CHECK_NULL);
outer_klass = instanceKlassHandle(thread, ok);
- simple_name_index = noff;
- inner_is_member = true;
+ *inner_is_member = true;
}
}
}
@@ -1377,7 +1376,7 @@ klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k,
if (encl_method_class_idx != 0) {
ok = i_cp->klass_at(encl_method_class_idx, CHECK_NULL);
outer_klass = instanceKlassHandle(thread, ok);
- inner_is_member = false;
+ *inner_is_member = false;
}
}
@@ -1387,9 +1386,7 @@ klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k,
// Throws an exception if outer klass has not declared k as an inner klass
// We need evidence that each klass knows about the other, or else
// the system could allow a spoof of an inner class to gain access rights.
- Reflection::check_for_inner_class(outer_klass, k, inner_is_member, CHECK_NULL);
-
- simple_name_result = (inner_is_member ? i_cp->symbol_at(simple_name_index) : symbolOop(NULL));
+ Reflection::check_for_inner_class(outer_klass, k, *inner_is_member, CHECK_NULL);
return outer_klass();
}
@@ -1473,11 +1470,11 @@ static methodOop jvm_get_method_common(jobject method, TRAPS) {
oop mirror = NULL;
int slot = 0;
- if (reflected->klass() == SystemDictionary::reflect_constructor_klass()) {
+ if (reflected->klass() == SystemDictionary::reflect_Constructor_klass()) {
mirror = java_lang_reflect_Constructor::clazz(reflected);
slot = java_lang_reflect_Constructor::slot(reflected);
} else {
- assert(reflected->klass() == SystemDictionary::reflect_method_klass(),
+ assert(reflected->klass() == SystemDictionary::reflect_Method_klass(),
"wrong type");
mirror = java_lang_reflect_Method::clazz(reflected);
slot = java_lang_reflect_Method::slot(reflected);
@@ -1533,7 +1530,7 @@ JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredFields(JNIEnv *env, jclass ofClass,
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) ||
Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_javaArray()) {
// Return empty array
- oop res = oopFactory::new_objArray(SystemDictionary::reflect_field_klass(), 0, CHECK_NULL);
+ oop res = oopFactory::new_objArray(SystemDictionary::reflect_Field_klass(), 0, CHECK_NULL);
return (jobjectArray) JNIHandles::make_local(env, res);
}
@@ -1561,13 +1558,13 @@ JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredFields(JNIEnv *env, jclass ofClass,
} else {
num_fields = fields_len / instanceKlass::next_offset;
- if (k() == SystemDictionary::throwable_klass()) {
+ if (k() == SystemDictionary::Throwable_klass()) {
num_fields--;
skip_backtrace = true;
}
}
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_field_klass(), num_fields, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Field_klass(), num_fields, CHECK_NULL);
objArrayHandle result (THREAD, r);
int out_idx = 0;
@@ -1601,7 +1598,7 @@ JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass,
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
|| Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_javaArray()) {
// Return empty array
- oop res = oopFactory::new_objArray(SystemDictionary::reflect_method_klass(), 0, CHECK_NULL);
+ oop res = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), 0, CHECK_NULL);
return (jobjectArray) JNIHandles::make_local(env, res);
}
@@ -1625,7 +1622,7 @@ JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass,
}
// Allocate result
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_method_klass(), num_methods, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), num_methods, CHECK_NULL);
objArrayHandle result (THREAD, r);
int out_idx = 0;
@@ -1653,7 +1650,7 @@ JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredConstructors(JNIEnv *env, jclass ofC
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
|| Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_javaArray()) {
// Return empty array
- oop res = oopFactory::new_objArray(SystemDictionary::reflect_constructor_klass(), 0 , CHECK_NULL);
+ oop res = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0 , CHECK_NULL);
return (jobjectArray) JNIHandles::make_local(env, res);
}
@@ -1677,7 +1674,7 @@ JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredConstructors(JNIEnv *env, jclass ofC
}
// Allocate result
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_constructor_klass(), num_constructors, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), num_constructors, CHECK_NULL);
objArrayHandle result(THREAD, r);
int out_idx = 0;
@@ -1890,7 +1887,7 @@ JVM_ENTRY(jobjectArray, JVM_ConstantPoolGetMemberRefInfoAt(JNIEnv *env, jobject
symbolHandle klass_name (THREAD, cp->klass_name_at(klass_ref));
symbolHandle member_name(THREAD, cp->uncached_name_ref_at(index));
symbolHandle member_sig (THREAD, cp->uncached_signature_ref_at(index));
- objArrayOop dest_o = oopFactory::new_objArray(SystemDictionary::string_klass(), 3, CHECK_NULL);
+ objArrayOop dest_o = oopFactory::new_objArray(SystemDictionary::String_klass(), 3, CHECK_NULL);
objArrayHandle dest(THREAD, dest_o);
Handle str = java_lang_String::create_from_symbol(klass_name, CHECK_NULL);
dest->obj_at_put(0, str());
@@ -2257,10 +2254,8 @@ JVM_ENTRY(const char*, JVM_GetCPMethodNameUTF(JNIEnv *env, jclass cls, jint cp_i
switch (cp->tag_at(cp_index).value()) {
case JVM_CONSTANT_InterfaceMethodref:
case JVM_CONSTANT_Methodref:
+ case JVM_CONSTANT_NameAndType: // for invokedynamic
return cp->uncached_name_ref_at(cp_index)->as_utf8();
- case JVM_CONSTANT_NameAndType:
- // for invokedynamic
- return cp->nt_name_ref_at(cp_index)->as_utf8();
default:
fatal("JVM_GetCPMethodNameUTF: illegal constant");
}
@@ -2277,10 +2272,8 @@ JVM_ENTRY(const char*, JVM_GetCPMethodSignatureUTF(JNIEnv *env, jclass cls, jint
switch (cp->tag_at(cp_index).value()) {
case JVM_CONSTANT_InterfaceMethodref:
case JVM_CONSTANT_Methodref:
+ case JVM_CONSTANT_NameAndType: // for invokedynamic
return cp->uncached_signature_ref_at(cp_index)->as_utf8();
- case JVM_CONSTANT_NameAndType:
- // for invokedynamic
- return cp->nt_signature_ref_at(cp_index)->as_utf8();
default:
fatal("JVM_GetCPMethodSignatureUTF: illegal constant");
}
@@ -2582,7 +2575,7 @@ static void thread_entry(JavaThread* thread, TRAPS) {
JavaValue result(T_VOID);
JavaCalls::call_virtual(&result,
obj,
- KlassHandle(THREAD, SystemDictionary::thread_klass()),
+ KlassHandle(THREAD, SystemDictionary::Thread_klass()),
vmSymbolHandles::run_method_name(),
vmSymbolHandles::void_method_signature(),
THREAD);
@@ -2680,7 +2673,7 @@ JVM_ENTRY(void, JVM_StopThread(JNIEnv* env, jobject jthread, jobject throwable))
// Fix for 4314342, 4145910, perhaps others: it now doesn't have
// any effect on the "liveness" of a thread; see
// JVM_IsThreadAlive, below.
- if (java_throwable->is_a(SystemDictionary::threaddeath_klass())) {
+ if (java_throwable->is_a(SystemDictionary::ThreadDeath_klass())) {
java_lang_Thread::set_stillborn(java_thread);
}
THROW_OOP(java_throwable);
@@ -3035,7 +3028,7 @@ JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
}
// Create result array of type [Ljava/lang/Class;
- objArrayOop result = oopFactory::new_objArray(SystemDictionary::class_klass(), depth, CHECK_NULL);
+ objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), depth, CHECK_NULL);
// Fill in mirrors corresponding to method holders
int index = 0;
while (first != NULL) {
@@ -4331,7 +4324,7 @@ JVM_ENTRY(jobjectArray, JVM_GetAllThreads(JNIEnv *env, jclass dummy))
JvmtiVMObjectAllocEventCollector oam;
int num_threads = tle.num_threads();
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::thread_klass(), num_threads, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::Thread_klass(), num_threads, CHECK_NULL);
objArrayHandle threads_ah(THREAD, r);
for (int i = 0; i < num_threads; i++) {
@@ -4365,7 +4358,7 @@ JVM_ENTRY(jobjectArray, JVM_DumpThreads(JNIEnv *env, jclass threadClass, jobject
// check if threads is not an array of objects of Thread class
klassOop k = objArrayKlass::cast(ah->klass())->element_klass();
- if (k != SystemDictionary::thread_klass()) {
+ if (k != SystemDictionary::Thread_klass()) {
THROW_(vmSymbols::java_lang_IllegalArgumentException(), 0);
}
@@ -4425,7 +4418,7 @@ JVM_ENTRY(jobjectArray, JVM_GetEnclosingMethodInfo(JNIEnv *env, jclass ofClass))
if (encl_method_class_idx == 0) {
return NULL;
}
- objArrayOop dest_o = oopFactory::new_objArray(SystemDictionary::object_klass(), 3, CHECK_NULL);
+ objArrayOop dest_o = oopFactory::new_objArray(SystemDictionary::Object_klass(), 3, CHECK_NULL);
objArrayHandle dest(THREAD, dest_o);
klassOop enc_k = ik_h->constants()->klass_at(encl_method_class_idx, CHECK_NULL);
dest->obj_at_put(0, Klass::cast(enc_k)->java_mirror());
@@ -4539,7 +4532,7 @@ JVM_ENTRY(jobjectArray, JVM_GetThreadStateNames(JNIEnv* env,
values_h->int_at(0) == java_lang_Thread::NEW,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
1, /* only 1 substate */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4552,7 +4545,7 @@ JVM_ENTRY(jobjectArray, JVM_GetThreadStateNames(JNIEnv* env,
values_h->int_at(0) == java_lang_Thread::RUNNABLE,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
1, /* only 1 substate */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4565,7 +4558,7 @@ JVM_ENTRY(jobjectArray, JVM_GetThreadStateNames(JNIEnv* env,
values_h->int_at(0) == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
1, /* only 1 substate */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4578,7 +4571,7 @@ JVM_ENTRY(jobjectArray, JVM_GetThreadStateNames(JNIEnv* env,
values_h->int_at(0) == java_lang_Thread::IN_OBJECT_WAIT &&
values_h->int_at(1) == java_lang_Thread::PARKED,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
2, /* number of substates */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4596,7 +4589,7 @@ JVM_ENTRY(jobjectArray, JVM_GetThreadStateNames(JNIEnv* env,
values_h->int_at(1) == java_lang_Thread::IN_OBJECT_WAIT_TIMED &&
values_h->int_at(2) == java_lang_Thread::PARKED_TIMED,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
3, /* number of substates */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4615,7 +4608,7 @@ JVM_ENTRY(jobjectArray, JVM_GetThreadStateNames(JNIEnv* env,
assert(values_h->length() == 1 &&
values_h->int_at(0) == java_lang_Thread::TERMINATED,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
1, /* only 1 substate */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4650,4 +4643,3 @@ JVM_ENTRY(void, JVM_GetVersionInfo(JNIEnv* env, jvm_version_info* info, size_t i
#endif // KERNEL
}
JVM_END
-
diff --git a/hotspot/src/share/vm/prims/jvmtiEnter.xsl b/hotspot/src/share/vm/prims/jvmtiEnter.xsl
index 6380ca3ee0a..28044aafd75 100644
--- a/hotspot/src/share/vm/prims/jvmtiEnter.xsl
+++ b/hotspot/src/share/vm/prims/jvmtiEnter.xsl
@@ -773,7 +773,7 @@ static jvmtiError JNICALL
}
- if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
JVMTI_ERROR_INVALID_THREAD
@@ -857,7 +857,7 @@ static jvmtiError JNICALL
}
- if (!k_mirror->is_a(SystemDictionary::class_klass())) {
+ if (!k_mirror->is_a(SystemDictionary::Class_klass())) {
JVMTI_ERROR_INVALID_CLASS
diff --git a/hotspot/src/share/vm/prims/jvmtiEnv.cpp b/hotspot/src/share/vm/prims/jvmtiEnv.cpp
index 95977f0092e..4ad9996baae 100644
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp
+++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,15 +32,15 @@
// FIXLATER: hook into JvmtiTrace
#define TraceJVMTICalls false
-JvmtiEnv::JvmtiEnv() : JvmtiEnvBase() {
+JvmtiEnv::JvmtiEnv(jint version) : JvmtiEnvBase(version) {
}
JvmtiEnv::~JvmtiEnv() {
}
JvmtiEnv*
-JvmtiEnv::create_a_jvmti() {
- return new JvmtiEnv();
+JvmtiEnv::create_a_jvmti(jint version) {
+ return new JvmtiEnv(version);
}
// VM operation class to copy jni function table at safepoint.
@@ -133,7 +133,7 @@ JvmtiEnv::GetThreadLocalStorage(jthread thread, void** data_ptr) {
if (thread_oop == NULL) {
return JVMTI_ERROR_INVALID_THREAD;
}
- if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
return JVMTI_ERROR_INVALID_THREAD;
}
JavaThread* java_thread = java_lang_Thread::thread(thread_oop);
@@ -199,7 +199,7 @@ JvmtiEnv::RetransformClasses(jint class_count, const jclass* classes) {
if (k_mirror == NULL) {
return JVMTI_ERROR_INVALID_CLASS;
}
- if (!k_mirror->is_a(SystemDictionary::class_klass())) {
+ if (!k_mirror->is_a(SystemDictionary::Class_klass())) {
return JVMTI_ERROR_INVALID_CLASS;
}
@@ -266,7 +266,7 @@ JvmtiEnv::GetObjectSize(jobject object, jlong* size_ptr) {
oop mirror = JNIHandles::resolve_external_guard(object);
NULL_CHECK(mirror, JVMTI_ERROR_INVALID_OBJECT);
- if (mirror->klass() == SystemDictionary::class_klass()) {
+ if (mirror->klass() == SystemDictionary::Class_klass()) {
if (!java_lang_Class::is_primitive(mirror)) {
mirror = java_lang_Class::as_klassOop(mirror);
assert(mirror != NULL, "class for non-primitive mirror must exist");
@@ -327,7 +327,7 @@ JvmtiEnv::SetEventNotificationMode(jvmtiEventMode mode, jvmtiEvent event_type, j
if (thread_oop == NULL) {
return JVMTI_ERROR_INVALID_THREAD;
}
- if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
return JVMTI_ERROR_INVALID_THREAD;
}
java_thread = java_lang_Thread::thread(thread_oop);
@@ -411,8 +411,15 @@ JvmtiEnv::AddToBootstrapClassLoaderSearch(const char* segment) {
if (phase == JVMTI_PHASE_ONLOAD) {
Arguments::append_sysclasspath(segment);
return JVMTI_ERROR_NONE;
- } else {
- assert(phase == JVMTI_PHASE_LIVE, "sanity check");
+ } else if (use_version_1_0_semantics()) {
+ // This JvmtiEnv requested version 1.0 semantics and this function
+ // is only allowed in the ONLOAD phase in version 1.0 so we need to
+ // return an error here.
+ return JVMTI_ERROR_WRONG_PHASE;
+ } else if (phase == JVMTI_PHASE_LIVE) {
+ // The phase is checked by the wrapper that called this function,
+ // but this thread could be racing with the thread that is
+ // terminating the VM so we check one more time.
// create the zip entry
ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment);
@@ -433,6 +440,8 @@ JvmtiEnv::AddToBootstrapClassLoaderSearch(const char* segment) {
}
ClassLoader::add_to_list(zip_entry);
return JVMTI_ERROR_NONE;
+ } else {
+ return JVMTI_ERROR_WRONG_PHASE;
}
} /* end AddToBootstrapClassLoaderSearch */
@@ -451,11 +460,12 @@ JvmtiEnv::AddToSystemClassLoaderSearch(const char* segment) {
}
}
return JVMTI_ERROR_NONE;
- } else {
+ } else if (phase == JVMTI_PHASE_LIVE) {
+ // The phase is checked by the wrapper that called this function,
+ // but this thread could be racing with the thread that is
+ // terminating the VM so we check one more time.
HandleMark hm;
- assert(phase == JVMTI_PHASE_LIVE, "sanity check");
-
// create the zip entry (which will open the zip file and hence
// check that the segment is indeed a zip file).
ClassPathZipEntry* zip_entry = ClassLoader::create_class_path_zip_entry(segment);
@@ -504,6 +514,8 @@ JvmtiEnv::AddToSystemClassLoaderSearch(const char* segment) {
}
return JVMTI_ERROR_NONE;
+ } else {
+ return JVMTI_ERROR_WRONG_PHASE;
}
} /* end AddToSystemClassLoaderSearch */
@@ -580,7 +592,6 @@ JvmtiEnv::SetVerboseFlag(jvmtiVerboseFlag flag, jboolean value) {
break;
case JVMTI_VERBOSE_GC:
PrintGC = value != 0;
- TraceClassUnloading = value != 0;
break;
case JVMTI_VERBOSE_JNI:
PrintJNIResolving = value != 0;
@@ -620,7 +631,7 @@ JvmtiEnv::GetThreadState(jthread thread, jint* thread_state_ptr) {
thread_oop = JNIHandles::resolve_external_guard(thread);
}
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
return JVMTI_ERROR_INVALID_THREAD;
}
@@ -858,7 +869,7 @@ JvmtiEnv::StopThread(JavaThread* java_thread, jobject exception) {
jvmtiError
JvmtiEnv::InterruptThread(jthread thread) {
oop thread_oop = JNIHandles::resolve_external_guard(thread);
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass()))
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass()))
return JVMTI_ERROR_INVALID_THREAD;
JavaThread* current_thread = JavaThread::current();
@@ -895,7 +906,7 @@ JvmtiEnv::GetThreadInfo(jthread thread, jvmtiThreadInfo* info_ptr) {
} else {
thread_oop = JNIHandles::resolve_external_guard(thread);
}
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass()))
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass()))
return JVMTI_ERROR_INVALID_THREAD;
Handle thread_obj(current_thread, thread_oop);
@@ -1061,7 +1072,7 @@ JvmtiEnv::GetCurrentContendedMonitor(JavaThread* java_thread, jobject* monitor_p
jvmtiError
JvmtiEnv::RunAgentThread(jthread thread, jvmtiStartFunction proc, const void* arg, jint priority) {
oop thread_oop = JNIHandles::resolve_external_guard(thread);
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
return JVMTI_ERROR_INVALID_THREAD;
}
if (priority < JVMTI_THREAD_MIN_PRIORITY || priority > JVMTI_THREAD_MAX_PRIORITY) {
@@ -2863,6 +2874,14 @@ JvmtiEnv::IsMethodSynthetic(methodOop method_oop, jboolean* is_synthetic_ptr) {
// is_obsolete_ptr - pre-checked for NULL
jvmtiError
JvmtiEnv::IsMethodObsolete(methodOop method_oop, jboolean* is_obsolete_ptr) {
+ if (use_version_1_0_semantics() &&
+ get_capabilities()->can_redefine_classes == 0) {
+ // This JvmtiEnv requested version 1.0 semantics and this function
+ // requires the can_redefine_classes capability in version 1.0 so
+ // we need to return an error here.
+ return JVMTI_ERROR_MUST_POSSESS_CAPABILITY;
+ }
+
if (method_oop == NULL || method_oop->is_obsolete()) {
*is_obsolete_ptr = true;
} else {
diff --git a/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp b/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp
index c6526995912..24311492bee 100644
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp
@@ -123,7 +123,26 @@ JvmtiEnvBase::is_valid() {
}
-JvmtiEnvBase::JvmtiEnvBase() : _env_event_enable() {
+bool
+JvmtiEnvBase::use_version_1_0_semantics() {
+ int major, minor, micro;
+
+ JvmtiExport::decode_version_values(_version, &major, &minor, µ);
+ return major == 1 && minor == 0; // micro version doesn't matter here
+}
+
+
+bool
+JvmtiEnvBase::use_version_1_1_semantics() {
+ int major, minor, micro;
+
+ JvmtiExport::decode_version_values(_version, &major, &minor, µ);
+ return major == 1 && minor == 1; // micro version doesn't matter here
+}
+
+
+JvmtiEnvBase::JvmtiEnvBase(jint version) : _env_event_enable() {
+ _version = version;
_env_local_storage = NULL;
_tag_map = NULL;
_native_method_prefix_count = 0;
@@ -508,7 +527,7 @@ JvmtiEnvBase::new_jthreadGroupArray(int length, Handle *handles) {
JavaThread *
JvmtiEnvBase::get_JavaThread(jthread jni_thread) {
oop t = JNIHandles::resolve_external_guard(jni_thread);
- if (t == NULL || !t->is_a(SystemDictionary::thread_klass())) {
+ if (t == NULL || !t->is_a(SystemDictionary::Thread_klass())) {
return NULL;
}
// The following returns NULL if the thread has not yet run or is in
@@ -1250,7 +1269,7 @@ VM_GetThreadListStackTraces::doit() {
for (int i = 0; i < _thread_count; ++i) {
jthread jt = _thread_list[i];
oop thread_oop = JNIHandles::resolve_external_guard(jt);
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
set_result(JVMTI_ERROR_INVALID_THREAD);
return;
}
diff --git a/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp b/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp
index e6dd31e5870..8ad31f9ebf8 100644
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -76,6 +76,7 @@ class JvmtiEnvBase : public CHeapObj {
jvmtiEnv _jvmti_external;
jint _magic;
+ jint _version; // version value passed to JNI GetEnv()
JvmtiEnvBase* _next;
bool _is_retransformable;
const void *_env_local_storage; // per env agent allocated data.
@@ -91,7 +92,7 @@ class JvmtiEnvBase : public CHeapObj {
int _native_method_prefix_count;
protected:
- JvmtiEnvBase();
+ JvmtiEnvBase(jint version);
~JvmtiEnvBase();
void dispose();
void env_dispose();
@@ -122,6 +123,9 @@ class JvmtiEnvBase : public CHeapObj {
bool is_valid();
+ bool use_version_1_0_semantics(); // agent asked for version 1.0
+ bool use_version_1_1_semantics(); // agent asked for version 1.1
+
bool is_retransformable() { return _is_retransformable; }
static ByteSize jvmti_external_offset() {
diff --git a/hotspot/src/share/vm/prims/jvmtiExport.cpp b/hotspot/src/share/vm/prims/jvmtiExport.cpp
index 7a5068b43b3..12af0d9f4f2 100644
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp
@@ -319,7 +319,27 @@ address JvmtiExport::get_field_modification_count_addr() {
jint
JvmtiExport::get_jvmti_interface(JavaVM *jvm, void **penv, jint version) {
- /* To Do: add version checks */
+ // The JVMTI_VERSION_INTERFACE_JVMTI part of the version number
+ // has already been validated in JNI GetEnv().
+ int major, minor, micro;
+
+ // micro version doesn't matter here (yet?)
+ decode_version_values(version, &major, &minor, µ);
+ switch (major) {
+ case 1:
+ switch (minor) {
+ case 0: // version 1.0. is recognized
+ case 1: // version 1.1. is recognized
+ break;
+
+ default:
+ return JNI_EVERSION; // unsupported minor version number
+ }
+ break;
+
+ default:
+ return JNI_EVERSION; // unsupported major version number
+ }
if (JvmtiEnv::get_phase() == JVMTI_PHASE_LIVE) {
JavaThread* current_thread = (JavaThread*) ThreadLocalStorage::thread();
@@ -328,13 +348,13 @@ JvmtiExport::get_jvmti_interface(JavaVM *jvm, void **penv, jint version) {
__ENTRY(jvmtiEnv*, JvmtiExport::get_jvmti_interface, current_thread)
debug_only(VMNativeEntryWrapper __vew;)
- JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti();
+ JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti(version);
*penv = jvmti_env->jvmti_external(); // actual type is jvmtiEnv* -- not to be confused with JvmtiEnv*
return JNI_OK;
} else if (JvmtiEnv::get_phase() == JVMTI_PHASE_ONLOAD) {
// not live, no thread to transition
- JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti();
+ JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti(version);
*penv = jvmti_env->jvmti_external(); // actual type is jvmtiEnv* -- not to be confused with JvmtiEnv*
return JNI_OK;
@@ -345,6 +365,15 @@ JvmtiExport::get_jvmti_interface(JavaVM *jvm, void **penv, jint version) {
}
}
+
+void
+JvmtiExport::decode_version_values(jint version, int * major, int * minor,
+ int * micro) {
+ *major = (version & JVMTI_VERSION_MASK_MAJOR) >> JVMTI_VERSION_SHIFT_MAJOR;
+ *minor = (version & JVMTI_VERSION_MASK_MINOR) >> JVMTI_VERSION_SHIFT_MINOR;
+ *micro = (version & JVMTI_VERSION_MASK_MICRO) >> JVMTI_VERSION_SHIFT_MICRO;
+}
+
void JvmtiExport::enter_primordial_phase() {
JvmtiEnvBase::set_phase(JVMTI_PHASE_PRIMORDIAL);
}
@@ -627,7 +656,7 @@ static inline klassOop oop_to_klassOop(oop obj) {
klassOop k = obj->klass();
// if the object is a java.lang.Class then return the java mirror
- if (k == SystemDictionary::class_klass()) {
+ if (k == SystemDictionary::Class_klass()) {
if (!java_lang_Class::is_primitive(obj)) {
k = java_lang_Class::as_klassOop(obj);
assert(k != NULL, "class for non-primitive mirror must exist");
@@ -1896,7 +1925,7 @@ void JvmtiExport::record_vm_internal_object_allocation(oop obj) {
if (collector != NULL && collector->is_enabled()) {
// Don't record classes as these will be notified via the ClassLoad
// event.
- if (obj->klass() != SystemDictionary::class_klass()) {
+ if (obj->klass() != SystemDictionary::Class_klass()) {
collector->record_allocation(obj);
}
}
diff --git a/hotspot/src/share/vm/prims/jvmtiExport.hpp b/hotspot/src/share/vm/prims/jvmtiExport.hpp
index 54a9416f425..20214aecf9c 100644
--- a/hotspot/src/share/vm/prims/jvmtiExport.hpp
+++ b/hotspot/src/share/vm/prims/jvmtiExport.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -236,6 +236,8 @@ class JvmtiExport : public AllStatic {
static bool is_jvmti_version(jint version) { return (version & JVMTI_VERSION_MASK) == JVMTI_VERSION_VALUE; }
static bool is_jvmdi_version(jint version) { return (version & JVMTI_VERSION_MASK) == JVMDI_VERSION_VALUE; }
static jint get_jvmti_interface(JavaVM *jvm, void **penv, jint version);
+ static void decode_version_values(jint version, int * major, int * minor,
+ int * micro);
// single stepping management methods
static void at_single_stepping_point(JavaThread *thread, methodOop method, address location) KERNEL_RETURN;
diff --git a/hotspot/src/share/vm/prims/jvmtiHpp.xsl b/hotspot/src/share/vm/prims/jvmtiHpp.xsl
index 3b3b23e90f6..e5dd49ffc6c 100644
--- a/hotspot/src/share/vm/prims/jvmtiHpp.xsl
+++ b/hotspot/src/share/vm/prims/jvmtiHpp.xsl
@@ -1,6 +1,6 @@
+
+
+
diff --git a/jdk/make/modules/tools/nbproject/project.properties b/jdk/make/modules/tools/nbproject/project.properties
new file mode 100644
index 00000000000..84df879f82b
--- /dev/null
+++ b/jdk/make/modules/tools/nbproject/project.properties
@@ -0,0 +1,86 @@
+#
+# Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Sun designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Sun in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+# CA 95054 USA or visit www.sun.com if you need additional information or
+# have any questions.
+#
+
+application.title=classanalyzer
+application.vendor=mchung
+build.classes.dir=${build.dir}/classes
+build.classes.excludes=**/*.java,**/*.form
+
+# This directory is removed when the project is cleaned:
+build.dir=build
+build.generated.dir=${build.dir}/generated
+build.generated.sources.dir=${build.dir}/generated-sources
+
+# Only compile against the classpath explicitly listed here:
+build.sysclasspath=ignore
+build.test.classes.dir=${build.dir}/test/classes
+build.test.results.dir=${build.dir}/test/results
+
+cp.extra=${tools.jar}
+
+debug.classpath=\
+ ${run.classpath}
+debug.test.classpath=\
+ ${run.test.classpath}
+
+# This directory is removed when the project is cleaned:
+dist.dir=dist
+dist.jar=${dist.dir}/classanalyzer.jar
+dist.javadoc.dir=${dist.dir}/javadoc
+
+excludes=
+
+file.reference.tools.jar=${jdk.home}/lib/tools.jar
+file.reference.tools-src=src
+includes=**
+jar.compress=false
+javac.classpath=\
+ ${file.reference.tools.jar}
+javac.deprecation=false
+javac.source=1.5
+javac.target=1.5
+javac.test.classpath=
+javadoc.author=false
+javadoc.noindex=false
+javadoc.nonavbar=false
+javadoc.notree=false
+javadoc.private=false
+javadoc.splitindex=false
+javadoc.use=false
+javadoc.version=false
+main.class=com.sun.classanalyzer.ClassAnalyzer
+manifest.file=manifest.mf
+meta.inf.dir=${src.dir}/META-INF
+platform.active=JDK_1.6
+run.classpath=\
+ ${javac.classpath}:\
+ ${build.classes.dir}
+# Space-separated list of JVM arguments used when running the project
+# (you may also define separate properties like run-sys-prop.name=value instead of -Dname=value
+# or test-sys-prop.name=value to set system properties for unit tests):
+run.jvmargs=-Xmx256m
+run.test.classpath=
+source.encoding=UTF-8
+src.dir=${file.reference.tools-src}
diff --git a/jdk/make/modules/tools/nbproject/project.xml b/jdk/make/modules/tools/nbproject/project.xml
new file mode 100644
index 00000000000..bd83ccaeb9b
--- /dev/null
+++ b/jdk/make/modules/tools/nbproject/project.xml
@@ -0,0 +1,39 @@
+
+
+
+
+ org.netbeans.modules.java.j2seproject
+
+
+ classanalyzer
+
+
+
+
+
+
+
+
diff --git a/jdk/make/modules/tools/src/com/sun/classanalyzer/AnnotatedDependency.java b/jdk/make/modules/tools/src/com/sun/classanalyzer/AnnotatedDependency.java
new file mode 100644
index 00000000000..72b21d9949e
--- /dev/null
+++ b/jdk/make/modules/tools/src/com/sun/classanalyzer/AnnotatedDependency.java
@@ -0,0 +1,627 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+package com.sun.classanalyzer;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.Map;
+
+import com.sun.classanalyzer.Module.Reference;
+import java.util.LinkedList;
+import java.util.TreeMap;
+
+/**
+ *
+ * @author Mandy Chung
+ */
+public abstract class AnnotatedDependency implements Comparable {
+
+ final Klass from;
+ final List classes;
+ protected boolean optional;
+ String description;
+ Klass.Method method;
+ private List filters = null;
+
+ public AnnotatedDependency(Klass klass) {
+ this(klass, false);
+ }
+
+ public AnnotatedDependency(Klass klass, boolean optional) {
+ this.from = klass;
+ this.classes = new ArrayList();
+ this.optional = optional;
+ }
+
+ abstract String getTag();
+
+ abstract boolean isDynamic();
+
+ void setMethod(Klass.Method m) {
+ this.method = m;
+ }
+
+ void addElement(String element, List value) {
+ if (element.equals("value")) {
+ addValue(value);
+ } else if (element.equals("description")) {
+ description = value.get(0);
+ } else if (element.equals("optional")) {
+ optional = value.get(0).equals("1") || Boolean.parseBoolean(value.get(0));
+ }
+ }
+
+ void addValue(List value) {
+ for (String s : value) {
+ if ((s = s.trim()).length() > 0) {
+ classes.add(s);
+ }
+ }
+ }
+
+ List getValue() {
+ return classes;
+ }
+
+ boolean isOptional() {
+ return optional;
+ }
+
+ boolean isEmpty() {
+ return classes.isEmpty();
+ }
+
+ boolean matches(String classname) {
+ synchronized (this) {
+ // initialize filters
+ if (filters == null) {
+ filters = new ArrayList();
+ for (String pattern : classes) {
+ filters.add(new Filter(pattern));
+ }
+
+ }
+ }
+
+ for (Filter f : filters) {
+ if (f.matches(classname)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for (String v : getValue()) {
+ if (sb.length() == 0) {
+ sb.append(getTag());
+ sb.append("\n");
+ } else {
+ sb.append("\n");
+ }
+ sb.append(" ");
+ sb.append(from.getClassName()).append(" -> ");
+ sb.append(v);
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public int compareTo(AnnotatedDependency o) {
+ if (from == o.from) {
+ if (this.getClass().getName().equals(o.getClass().getName())) {
+ String s1 = classes.isEmpty() ? "" : classes.get(0);
+ String s2 = o.classes.isEmpty() ? "" : o.classes.get(0);
+ return s1.compareTo(s2);
+ } else {
+ return this.getClass().getName().compareTo(o.getClass().getName());
+ }
+
+ } else {
+ return from.compareTo(o.from);
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ int hashcode = 7 + 73 * from.hashCode();
+ for (String s : classes) {
+ hashcode ^= s.hashCode();
+ }
+ return hashcode;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (!(obj instanceof AnnotatedDependency)) {
+ return false;
+ }
+ AnnotatedDependency other = (AnnotatedDependency) obj;
+ boolean ret = this.from.equals(other.from) && this.classes.size() == other.classes.size();
+ if (ret == true) {
+ for (int i = 0; i < this.classes.size(); i++) {
+ ret = ret && this.classes.get(i).equals(other.classes.get(i));
+ }
+ }
+ return ret;
+ }
+
+ static class ClassForName extends AnnotatedDependency {
+
+ public ClassForName(Klass klass, boolean optional) {
+ super(klass, optional);
+ }
+
+ @Override
+ String getTag() {
+ if (this.optional) {
+ return TAG + "(optional)";
+ } else {
+ return TAG;
+ }
+ }
+
+ @Override
+ boolean isDynamic() {
+ return true;
+ }
+ static final String TYPE = "sun.annotation.ClassForName";
+ static final String TAG = "@ClassForName";
+ }
+
+ static class NativeFindClass extends AnnotatedDependency {
+
+ public NativeFindClass(Klass klass, boolean optional) {
+ super(klass, optional);
+ }
+
+ @Override
+ String getTag() {
+ if (this.optional) {
+ return TAG + "(optional)";
+ } else {
+ return TAG;
+ }
+ }
+
+ @Override
+ boolean isDynamic() {
+ return true;
+ }
+ static final String TYPE = "sun.annotation.NativeFindClass";
+ static final String TAG = "@NativeFindClass";
+ }
+
+ static class Provider extends AnnotatedDependency {
+
+ private List services = new ArrayList();
+
+ Provider(Klass klass) {
+ super(klass, true);
+ }
+
+ @Override
+ boolean isDynamic() {
+ return true;
+ }
+
+ public List services() {
+ return services;
+ }
+
+ @Override
+ void addElement(String element, List value) {
+ if (element.equals("service")) {
+ List configFiles = new ArrayList();
+ for (String s : value) {
+ if ((s = s.trim()).length() > 0) {
+ configFiles.add(metaInfPath + s);
+ }
+ }
+ addValue(configFiles);
+ }
+ }
+
+ @Override
+ void addValue(List value) {
+ for (String s : value) {
+ if ((s = s.trim()).length() > 0) {
+ if (s.startsWith("META-INF")) {
+ services.add(s);
+ readServiceConfiguration(s, classes);
+ } else {
+ throw new RuntimeException("invalid value" + s);
+ }
+ }
+ }
+ }
+
+ boolean isEmpty() {
+ return services.isEmpty();
+ }
+ static final String metaInfPath =
+ "META-INF" + File.separator + "services" + File.separator;
+
+ static void readServiceConfiguration(String config, List names) {
+ BufferedReader br = null;
+ try {
+ InputStream is = ClassPath.open(config);
+ if (is != null) {
+ // Properties doesn't perserve the order of the input file
+ br = new BufferedReader(new InputStreamReader(is, "utf-8"));
+ int lc = 1;
+ while ((lc = parseLine(br, lc, names)) >= 0);
+ }
+ } catch (IOException ex) {
+ throw new RuntimeException(ex);
+ } finally {
+ if (br != null) {
+ try {
+ br.close();
+ } catch (IOException ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ }
+ }
+
+ // Parse a single line from the given configuration file, adding the name
+ // on the line to the names list.
+ //
+ private static int parseLine(BufferedReader r, int lc, List names) throws IOException {
+ String ln = r.readLine();
+ if (ln == null) {
+ return -1;
+ }
+ int ci = ln.indexOf('#');
+ if (ci >= 0) {
+ ln = ln.substring(0, ci);
+ }
+ ln = ln.trim();
+ int n = ln.length();
+ if (n != 0) {
+ if ((ln.indexOf(' ') >= 0) || (ln.indexOf('\t') >= 0)) {
+ throw new RuntimeException("Illegal configuration-file syntax");
+ }
+ int cp = ln.codePointAt(0);
+ if (!Character.isJavaIdentifierStart(cp)) {
+ throw new RuntimeException("Illegal provider-class name: " + ln);
+ }
+ for (int i = Character.charCount(cp); i < n; i += Character.charCount(cp)) {
+ cp = ln.codePointAt(i);
+ if (!Character.isJavaIdentifierPart(cp) && (cp != '.')) {
+ throw new RuntimeException("Illegal provider-class name: " + ln);
+ }
+ }
+ if (!names.contains(ln)) {
+ names.add(ln);
+ }
+ }
+ return lc + 1;
+ }
+
+ @Override
+ String getTag() {
+ return TAG;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (!(obj instanceof AnnotatedDependency)) {
+ return false;
+ }
+ Provider other = (Provider) obj;
+ boolean ret = this.from.equals(other.from) &&
+ this.services.size() == other.services.size();
+ if (ret == true) {
+ for (int i = 0; i < this.services.size(); i++) {
+ ret = ret && this.services.get(i).equals(other.services.get(i));
+ }
+ }
+ return ret;
+ }
+
+ @Override
+ public int hashCode() {
+ int hashcode = 7 + 73 * from.hashCode();
+ for (String s : services) {
+ hashcode ^= s.hashCode();
+ }
+ return hashcode;
+ }
+
+ @Override
+ public List getValue() {
+ List result = new ArrayList();
+ result.addAll(services);
+ return result;
+ }
+ static final String TYPE = "sun.annotation.Provider";
+ static final String TAG = "@Provider";
+ }
+
+ static class OptionalDependency extends AnnotatedDependency {
+
+ static boolean isOptional(Klass from, Klass to) {
+ synchronized (OptionalDependency.class) {
+ if (optionalDepsMap == null) {
+ // Build a map of classes to its optional dependencies
+ initDependencies();
+ }
+ }
+ for (Reference ref : optionalDepsMap.keySet()) {
+ if (ref.referrer() == from && ref.referree() == to) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ OptionalDependency(Klass klass) {
+ super(klass, true);
+ }
+
+ @Override
+ boolean isDynamic() {
+ return false;
+ }
+
+ @Override
+ String getTag() {
+ return TAG;
+ }
+ static final String TYPE = "sun.annotation.Optional";
+ static final String TAG = "@Optional";
+ }
+
+ static class CompilerInline extends AnnotatedDependency {
+
+ public CompilerInline(Klass klass) {
+ super(klass);
+ }
+
+ @Override
+ String getTag() {
+ return TAG;
+ }
+
+ @Override
+ boolean isDynamic() {
+ return false;
+ }
+ static final String TYPE = "sun.annotation.Inline";
+ static final String TAG = "@Inline";
+ }
+
+ static class Filter {
+
+ final String pattern;
+ final String regex;
+
+ Filter(String pattern) {
+ this.pattern = pattern;
+
+ boolean isRegex = false;
+ for (int i = 0; i < pattern.length(); i++) {
+ char p = pattern.charAt(i);
+ if (p == '*' || p == '[' || p == ']') {
+ isRegex = true;
+ break;
+ }
+ }
+
+ if (isRegex) {
+ this.regex = convertToRegex(pattern);
+ } else {
+ this.regex = null;
+ }
+ }
+
+ private String convertToRegex(String pattern) {
+ StringBuilder sb = new StringBuilder();
+ int i = 0;
+ int index = 0;
+ int plen = pattern.length();
+ while (i < plen) {
+ char p = pattern.charAt(i);
+ if (p == '*') {
+ sb.append("(").append(pattern.substring(index, i)).append(")");
+ if (i + 1 < plen && pattern.charAt(i + 1) == '*') {
+ sb.append(".*");
+ index = i + 2;
+ } else {
+ sb.append("[^\\.]*");
+ index = i + 1;
+ }
+ } else if (p == '[') {
+ int j = i + 1;
+ while (j < plen) {
+ if (pattern.charAt(j) == ']') {
+ break;
+ }
+ j++;
+ }
+ if (j >= plen || pattern.charAt(j) != ']') {
+ throw new RuntimeException("Malformed pattern " + pattern);
+ }
+ sb.append("(").append(pattern.substring(index, i)).append(")");
+ sb.append(pattern.substring(i, j + 1));
+ index = j + 1;
+ i = j;
+ }
+ i++;
+ }
+ if (index < plen) {
+ sb.append("(").append(pattern.substring(index, plen)).append(")");
+ }
+ return sb.toString();
+ }
+
+ boolean matches(String name) {
+ if (regex == null) {
+ // the pattern is not a regex
+ return name.equals(pattern);
+ } else {
+ return name.matches(regex);
+ }
+ }
+ }
+
+ static boolean isValidType(String type) {
+ if (type.endsWith("(optional)")) {
+ int len = type.length() - "(optional)".length();
+ type = type.substring(0, len);
+ }
+ return type.equals(ClassForName.TYPE) || type.equals(ClassForName.TAG) ||
+ type.equals(NativeFindClass.TYPE) || type.equals(NativeFindClass.TAG) ||
+ type.equals(Provider.TYPE) || type.equals(Provider.TAG) ||
+ type.equals(CompilerInline.TYPE) || type.equals(CompilerInline.TAG) ||
+ type.equals(OptionalDependency.TYPE) || type.equals(OptionalDependency.TAG);
+ }
+
+ static AnnotatedDependency newAnnotatedDependency(String tag, String value, Klass klass) {
+ AnnotatedDependency dep = newAnnotatedDependency(tag, klass);
+ if (dep != null) {
+ dep.addValue(Collections.singletonList(value));
+ }
+ return dep;
+ }
+ static List annotatedDependencies = new LinkedList();
+ static List optionalDependencies = new LinkedList();
+
+ static AnnotatedDependency newAnnotatedDependency(String type, Klass klass) {
+ boolean optional = false;
+ if (type.endsWith("(optional)")) {
+ optional = true;
+ int len = type.length() - "(optional)".length();
+ type = type.substring(0, len);
+ }
+
+ if (type.equals(OptionalDependency.TYPE) || type.equals(OptionalDependency.TAG)) {
+ return newOptionalDependency(klass);
+ }
+
+ AnnotatedDependency dep;
+ if (type.equals(ClassForName.TYPE) || type.equals(ClassForName.TAG)) {
+ dep = new ClassForName(klass, optional);
+ } else if (type.equals(NativeFindClass.TYPE) || type.equals(NativeFindClass.TAG)) {
+ dep = new NativeFindClass(klass, optional);
+ } else if (type.equals(Provider.TYPE) || type.equals(Provider.TAG)) {
+ dep = new Provider(klass);
+ } else if (type.equals(CompilerInline.TYPE) || type.equals(CompilerInline.TAG)) {
+ dep = new CompilerInline(klass);
+ } else {
+ return null;
+ }
+ klass.addAnnotatedDep(dep);
+ annotatedDependencies.add(dep);
+ return dep;
+ }
+
+ static OptionalDependency newOptionalDependency(Klass klass) {
+ OptionalDependency dep = new OptionalDependency(klass);
+ optionalDependencies.add(dep);
+ return dep;
+ }
+ static Map> annotatedDepsMap = null;
+ static Map> optionalDepsMap = null;
+
+ static Map> getReferences(Module m) {
+ // ensure it's initialized
+ initDependencies();
+
+ Map> result = new TreeMap>();
+ for (Reference ref : annotatedDepsMap.keySet()) {
+ if (m.contains(ref.referrer()) && m.isModuleDependence(ref.referree())) {
+ result.put(ref, annotatedDepsMap.get(ref));
+ }
+ }
+ return result;
+ }
+
+ static Set getDependencies(Module m) {
+ // ensure it's initialized
+ initDependencies();
+
+ Set deps = new TreeSet();
+ for (Reference ref : annotatedDepsMap.keySet()) {
+ if (m.contains(ref.referrer())) {
+ Module other = m.getModuleDependence(ref.referree());
+ if (other != null) {
+ for (AnnotatedDependency ad : annotatedDepsMap.get(ref)) {
+ Module.Dependency d = new Module.Dependency(other, ad.isOptional(), ad.isDynamic());
+ deps.add(d);
+ }
+ }
+ }
+ }
+ return deps;
+ }
+
+ synchronized static void initDependencies() {
+ if (annotatedDepsMap != null) {
+ return;
+ }
+
+ // Build a map of references to its dependencies
+ annotatedDepsMap = new TreeMap>();
+ optionalDepsMap = new TreeMap>();
+
+ for (Klass k : Klass.getAllClasses()) {
+ for (AnnotatedDependency ad : annotatedDependencies) {
+ if (ad.matches(k.getClassName())) {
+ Reference ref = new Reference(ad.from, k);
+ Set set = annotatedDepsMap.get(ref);
+ if (set == null) {
+ set = new TreeSet();
+ annotatedDepsMap.put(ref, set);
+ }
+ set.add(ad);
+ }
+ }
+
+ for (AnnotatedDependency ad : optionalDependencies) {
+ if (ad.matches(k.getClassName())) {
+ Reference ref = new Reference(ad.from, k);
+ Set set = optionalDepsMap.get(ref);
+ if (set == null) {
+ set = new TreeSet();
+ optionalDepsMap.put(ref, set);
+ }
+ set.add(ad);
+ }
+ }
+ }
+ }
+}
diff --git a/jdk/make/modules/tools/src/com/sun/classanalyzer/AnnotationParser.java b/jdk/make/modules/tools/src/com/sun/classanalyzer/AnnotationParser.java
new file mode 100644
index 00000000000..7d984aa2e74
--- /dev/null
+++ b/jdk/make/modules/tools/src/com/sun/classanalyzer/AnnotationParser.java
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+package com.sun.classanalyzer;
+
+import com.sun.tools.classfile.*;
+import com.sun.tools.classfile.Annotation;
+import com.sun.tools.classfile.ExtendedAnnotation;
+import com.sun.tools.classfile.Annotation.Annotation_element_value;
+import com.sun.tools.classfile.Annotation.Array_element_value;
+import com.sun.tools.classfile.Annotation.Class_element_value;
+import com.sun.tools.classfile.Annotation.Enum_element_value;
+import com.sun.tools.classfile.Annotation.Primitive_element_value;
+import com.sun.tools.classfile.ConstantPoolException;
+import com.sun.tools.classfile.Descriptor;
+import com.sun.tools.classfile.Descriptor.InvalidDescriptor;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.sun.classanalyzer.AnnotatedDependency.*;
+import java.io.File;
+import java.io.PrintWriter;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ * @author Mandy Chung
+ */
+public class AnnotationParser {
+
+ static boolean parseAnnotation = false;
+ static void setParseAnnotation(boolean newValue) {
+ parseAnnotation = newValue;
+ }
+
+ private final ClassFileParser cfparser;
+ public AnnotationParser(ClassFileParser cfparser) {
+ this.cfparser = cfparser;
+ }
+
+ private AnnotatedDependency addAnnotation(Annotation annot, Klass.Method method) {
+ String type = getType(annot.type_index);
+ AnnotatedDependency dep = AnnotatedDependency.newAnnotatedDependency(type, cfparser.this_klass);
+ if (dep != null) {
+ for (int i = 0; i < annot.num_element_value_pairs; i++) {
+ Element element = getElement(annot.element_value_pairs[i]);
+ dep.addElement(element.name, element.value);
+ }
+ dep.setMethod(method);
+ }
+ return dep;
+ }
+
+ private AnnotatedDependency addAnnotation(ExtendedAnnotation annot, Klass.Method method) {
+ return addAnnotation(annot.annotation, method);
+ }
+
+ class Element {
+
+ String name;
+ List value;
+
+ Element(String name) {
+ this.name = name;
+ this.value = new ArrayList();
+ }
+
+ void add(String v) {
+ value.add(v);
+ }
+ }
+
+ Element getElement(Annotation.element_value_pair pair) {
+ Element element = new Element(getName(pair.element_name_index));
+ evp.parse(pair.value, element);
+ return element;
+ }
+
+ private String getType(int index) {
+ try {
+ Descriptor d = new Descriptor(index);
+ return d.getFieldType(cfparser.classfile.constant_pool);
+ } catch (ConstantPoolException ignore) {
+ } catch (InvalidDescriptor ignore) {
+ }
+ return "Unknown";
+ }
+
+ private String getName(int index) {
+ return cfparser.constantPoolParser.stringValue(index);
+ }
+ element_value_Parser evp = new element_value_Parser();
+
+ class element_value_Parser implements Annotation.element_value.Visitor {
+
+ public Void parse(Annotation.element_value value, Element element) {
+ value.accept(this, element);
+ return null;
+ }
+
+ public Void visitPrimitive(Primitive_element_value ev, Element element) {
+ String value = getName(ev.const_value_index);
+ element.add(value);
+ return null;
+ }
+
+ public Void visitEnum(Enum_element_value ev, Element element) {
+ String value = getName(ev.type_name_index) + "." + getName(ev.const_name_index);
+ element.add(value);
+ return null;
+ }
+
+ public Void visitClass(Class_element_value ev, Element element) {
+ String value = getName(ev.class_info_index) + ".class";
+ element.add(value);
+ return null;
+ }
+
+ public Void visitAnnotation(Annotation_element_value ev, Element element) {
+ // AnnotationParser.this.addAnnotation(ev.annotation_value);
+ throw new UnsupportedOperationException("Not supported: " + ev);
+ }
+
+ public Void visitArray(Array_element_value ev, Element element) {
+ for (int i = 0; i < ev.num_values; i++) {
+ parse(ev.values[i], element);
+ }
+ return null;
+ }
+ }
+
+ void parseAttributes(Attributes attributes, Klass.Method method) {
+ if (!parseAnnotation) {
+ return;
+ }
+
+ visitRuntimeAnnotations((RuntimeVisibleAnnotations_attribute) attributes.get(Attribute.RuntimeVisibleAnnotations), method);
+ visitRuntimeAnnotations((RuntimeInvisibleAnnotations_attribute) attributes.get(Attribute.RuntimeInvisibleAnnotations), method);
+ visitRuntimeTypeAnnotations((RuntimeVisibleTypeAnnotations_attribute) attributes.get(Attribute.RuntimeVisibleTypeAnnotations), method);
+ visitRuntimeTypeAnnotations((RuntimeInvisibleTypeAnnotations_attribute) attributes.get(Attribute.RuntimeInvisibleTypeAnnotations), method);
+ visitRuntimeParameterAnnotations((RuntimeVisibleParameterAnnotations_attribute) attributes.get(Attribute.RuntimeVisibleParameterAnnotations), method);
+ visitRuntimeParameterAnnotations((RuntimeInvisibleParameterAnnotations_attribute) attributes.get(Attribute.RuntimeInvisibleParameterAnnotations), method);
+ }
+
+ public void visitRuntimeAnnotations(RuntimeAnnotations_attribute attr, Klass.Method method) {
+ if (attr == null) {
+ return;
+ }
+
+ for (int i = 0; i < attr.annotations.length; i++) {
+ addAnnotation(attr.annotations[i], method);
+ }
+ }
+
+ public void visitRuntimeTypeAnnotations(RuntimeTypeAnnotations_attribute attr, Klass.Method method) {
+ if (attr == null) {
+ return;
+ }
+
+ for (int i = 0; i < attr.annotations.length; i++) {
+ addAnnotation(attr.annotations[i], method);
+ }
+ }
+
+ public void visitRuntimeParameterAnnotations(RuntimeParameterAnnotations_attribute attr, Klass.Method method) {
+ if (attr == null) {
+ return;
+ }
+
+ for (int param = 0; param < attr.parameter_annotations.length; param++) {
+ for (int i = 0; i < attr.parameter_annotations[param].length; i++) {
+ addAnnotation(attr.parameter_annotations[param][i], method);
+ }
+ }
+ }
+
+ void parseAttributes(Attributes attributes) {
+ parseAttributes(attributes, null);
+ }
+
+ public static void main(String[] args) throws Exception {
+ String jdkhome = null;
+ String output = ".";
+
+ // process arguments
+ int i = 0;
+ while (i < args.length) {
+ String arg = args[i++];
+ if (arg.equals("-jdkhome")) {
+ if (i < args.length) {
+ jdkhome = args[i++];
+ } else {
+ usage();
+ }
+ } else if (arg.equals("-output")) {
+ output = args[i++];
+ } else {
+ usage();
+ }
+ }
+ if (jdkhome == null) {
+ usage();
+ }
+
+ // parse annotation and code attribute to find all references
+ // to Class.forName etc
+ CodeAttributeParser.setParseCodeAttribute(true);
+ AnnotationParser.setParseAnnotation(true);
+
+ ClassPath.setJDKHome(jdkhome);
+ ClassPath.parseAllClassFiles();
+
+ PrintWriter writer = new PrintWriter(new File(output, "jdk7.depconfig"));
+
+ try {
+ for (Klass k : Klass.getAllClasses()) {
+ for (AnnotatedDependency dep : k.getAnnotatedDeps()) {
+ if (dep.isEmpty()) {
+ continue;
+ }
+ writer.format("# %s \n", dep.method == null ? dep.from : dep.method);
+ writer.format("%s\n\n", dep);
+ }
+ }
+ } finally {
+ writer.close();
+ }
+
+ writer = new PrintWriter(new File(output, "optional.depconfig"));
+ try {
+ AnnotatedDependency prev = null;
+ for (AnnotatedDependency dep : AnnotatedDependency.optionalDependencies) {
+ if (prev != null && !dep.equals(prev)) {
+ writer.format("%s\n\n", prev);
+ }
+ writer.format("# %s \n", dep.method == null ? dep.from : dep.method);
+ prev = dep;
+ }
+ if (prev != null) {
+ writer.format("%s\n\n", prev);
+ }
+ } finally {
+ writer.close();
+ }
+
+ writer = new PrintWriter(new File(output, "runtime.references"));
+ try {
+ for (Map.Entry> entry : CodeAttributeParser.runtimeReferences.entrySet()) {
+ writer.format("References to %s\n", entry.getKey());
+ Klass prev = null;
+ for (Klass.Method m : entry.getValue()) {
+ if (prev == null || prev != m.getKlass()) {
+ writer.format(" %-50s # %s\n", m.getKlass(), m);
+ } else if (prev == m.getKlass()) {
+ writer.format(" %-50s # %s\n", "", m);
+ }
+ prev = m.getKlass();
+ }
+ }
+ } finally {
+ writer.close();
+ }
+ }
+
+ private static void usage() {
+ System.out.println("Usage: AnnotationParser ");
+ System.out.println("Options: ");
+ System.out.println("\t-jdkhome where all jars will be parsed");
+ System.out.println("\t-depconfig