This commit is contained in:
Kevin Walls 2010-07-31 15:10:59 +01:00
commit 75c23332be
1385 changed files with 42730 additions and 18488 deletions

View File

@ -70,3 +70,9 @@ ff9031a745d9cc52318f2148e43ca3b07ee08098 jdk7-b92
b5dab6a313fdff4c043250e4d9c8f66fd624d27e jdk7-b93 b5dab6a313fdff4c043250e4d9c8f66fd624d27e jdk7-b93
8bb281f0f91582104d65d032be22522bfd2d8110 jdk7-b94 8bb281f0f91582104d65d032be22522bfd2d8110 jdk7-b94
654298d26561b76dfe3cfcffbbd7078080837300 jdk7-b95 654298d26561b76dfe3cfcffbbd7078080837300 jdk7-b95
d260f892491e040ae385a8e6df59557a7d721abf jdk7-b96
7e406ebed9a5968b584f3c3e6b60893b5d6d9741 jdk7-b97
db6e660120446c407e2d908d52ec046592b21726 jdk7-b98
c4c8a5bc54f66abc68cd185d9294042121922154 jdk7-b99
2d6ba7a221915bdf0311acc5641c7f3875cb793e jdk7-b100
2548ac036b8fca3326d058d758e6df8355a42469 jdk7-b101

View File

@ -70,3 +70,9 @@ cf26288a114be67c39f2758959ce50b60f5ae330 jdk7-b85
5fc102ff48f0e787ce9cc77249841d5ff0941b75 jdk7-b93 5fc102ff48f0e787ce9cc77249841d5ff0941b75 jdk7-b93
d7f35c61afa092b6357c2c4bce3f298f16620f71 jdk7-b94 d7f35c61afa092b6357c2c4bce3f298f16620f71 jdk7-b94
fd3663286e77b9f13c39eee124db2beb079b3ca6 jdk7-b95 fd3663286e77b9f13c39eee124db2beb079b3ca6 jdk7-b95
cf71cb5151166f35433afebaf67dbf34a704a170 jdk7-b96
5e197c942c6ebd8b92f324a31049c5f1d26d40ef jdk7-b97
6cea9984d73d74de0cd01f30d07ac0a1ed196117 jdk7-b98
e7f18db469a3e947b7096bfd12e87380e5a042cd jdk7-b99
b218a53ec7d3d42be61d31d6917a6c5c037b6f56 jdk7-b100
4193eaf5f1b82794c6a0fb1a8d11af43d1b1d611 jdk7-b101

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 1995, 2009, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -29,10 +29,6 @@ ifndef TOPDIR
TOPDIR:=. TOPDIR:=.
endif endif
ifndef CONTROL_TOPDIR
CONTROL_TOPDIR=$(TOPDIR)
endif
# Openjdk sources (only used if SKIP_OPENJDK_BUILD!=true) # Openjdk sources (only used if SKIP_OPENJDK_BUILD!=true)
OPENJDK_SOURCETREE=$(TOPDIR)/openjdk OPENJDK_SOURCETREE=$(TOPDIR)/openjdk
OPENJDK_BUILDDIR:=$(shell \ OPENJDK_BUILDDIR:=$(shell \
@ -120,7 +116,7 @@ endif
all_product_build:: all_product_build::
@$(FINISH_ECHO) @$(FINISH_ECHO)
# Generis build of basic repo series # Generic build of basic repo series
generic_build_repo_series:: generic_build_repo_series::
$(MKDIR) -p $(OUTPUTDIR) $(MKDIR) -p $(OUTPUTDIR)
$(MKDIR) -p $(OUTPUTDIR)/j2sdk-image $(MKDIR) -p $(OUTPUTDIR)/j2sdk-image
@ -179,11 +175,15 @@ endif
# The install process needs to know what the DEBUG_NAME is, so # The install process needs to know what the DEBUG_NAME is, so
# look for INSTALL_DEBUG_NAME in the install rules. # look for INSTALL_DEBUG_NAME in the install rules.
# #
# NOTE: On windows, do not use $(ABS_BOOTDIR_OUTPUTDIR)-$(DEBUG_NAME).
# Due to the use of short paths in $(ABS_OUTPUTDIR), this may
# not be the same location.
#
# Location of fresh bootdir output # Location of fresh bootdir output
ABS_BOOTDIR_OUTPUTDIR=$(ABS_OUTPUTDIR)/bootjdk ABS_BOOTDIR_OUTPUTDIR=$(ABS_OUTPUTDIR)/bootjdk
FRESH_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)/j2sdk-image FRESH_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)/j2sdk-image
FRESH_DEBUG_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)-$(DEBUG_NAME)/j2sdk-image FRESH_DEBUG_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-$(DEBUG_NAME)/j2sdk-image
create_fresh_product_bootdir: FRC create_fresh_product_bootdir: FRC
@$(START_ECHO) @$(START_ECHO)
@ -248,10 +248,14 @@ build_product_image:
generic_build_repo_series generic_build_repo_series
@$(FINISH_ECHO) @$(FINISH_ECHO)
# NOTE: On windows, do not use $(ABS_OUTPUTDIR)-$(DEBUG_NAME).
# Due to the use of short paths in $(ABS_OUTPUTDIR), this may
# not be the same location.
generic_debug_build: generic_debug_build:
@$(START_ECHO) @$(START_ECHO)
$(MAKE) \ $(MAKE) \
ALT_OUTPUTDIR=$(ABS_OUTPUTDIR)-$(DEBUG_NAME) \ ALT_OUTPUTDIR=$(ABS_OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-$(DEBUG_NAME) \
DEBUG_NAME=$(DEBUG_NAME) \ DEBUG_NAME=$(DEBUG_NAME) \
GENERATE_DOCS=false \ GENERATE_DOCS=false \
$(BOOT_CYCLE_DEBUG_SETTINGS) \ $(BOOT_CYCLE_DEBUG_SETTINGS) \
@ -348,8 +352,8 @@ endif
clobber:: clobber::
$(RM) -r $(OUTPUTDIR)/* $(RM) -r $(OUTPUTDIR)/*
$(RM) -r $(OUTPUTDIR)-debug/* $(RM) -r $(OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-debug/*
$(RM) -r $(OUTPUTDIR)-fastdebug/* $(RM) -r $(OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-fastdebug/*
-($(RMDIR) -p $(OUTPUTDIR) > $(DEV_NULL) 2>&1; $(TRUE)) -($(RMDIR) -p $(OUTPUTDIR) > $(DEV_NULL) 2>&1; $(TRUE))
clean: clobber clean: clobber
@ -550,6 +554,56 @@ ifeq ($(BUNDLE_RULES_AVAILABLE), true)
include $(BUNDLE_RULES) include $(BUNDLE_RULES)
endif endif
################################################################
# rule to test
################################################################
.NOTPARALLEL: test
test: test_clean test_start test_summary
test_start:
@$(ECHO) "Tests started at `$(DATE)`"
test_clean:
$(RM) $(OUTPUTDIR)/test_failures.txt $(OUTPUTDIR)/test_log.txt
test_summary: $(OUTPUTDIR)/test_failures.txt
@$(ECHO) "#################################################"
@$(ECHO) "Tests completed at `$(DATE)`"
@( $(EGREP) '^TEST STATS:' $(OUTPUTDIR)/test_log.txt \
|| $(ECHO) "No TEST STATS seen in log" )
@$(ECHO) "For complete details see: $(OUTPUTDIR)/test_log.txt"
@$(ECHO) "#################################################"
@if [ -s $< ] ; then \
$(ECHO) "ERROR: Test failure count: `$(CAT) $< | $(WC) -l`"; \
$(CAT) $<; \
exit 1; \
else \
$(ECHO) "Success! No failures detected"; \
fi
# Get failure list from log
$(OUTPUTDIR)/test_failures.txt: $(OUTPUTDIR)/test_log.txt
@$(RM) $@
@( $(EGREP) '^FAILED:' $< || $(ECHO) "" ) > $@
# Get log file of all tests run
JDK_TO_TEST := $(shell \
if [ -d "$(ABS_OUTPUTDIR)/j2sdk-image" ] ; then \
$(ECHO) "$(ABS_OUTPUTDIR)/j2sdk-image"; \
elif [ -d "$(ABS_OUTPUTDIR)/bin" ] ; then \
$(ECHO) "$(ABS_OUTPUTDIR)"; \
elif [ "$(PRODUCT_HOME)" != "" -a -d "$(PRODUCT_HOME)/bin" ] ; then \
$(ECHO) "$(PRODUCT_HOME)"; \
fi \
)
$(OUTPUTDIR)/test_log.txt:
$(RM) $@
( $(CD) test && \
$(MAKE) NO_STOPPING=- PRODUCT_HOME=$(JDK_TO_TEST) \
) | tee $@
################################################################ ################################################################
# JPRT rule to build # JPRT rule to build
################################################################ ################################################################
@ -560,7 +614,7 @@ include ./make/jprt.gmk
# PHONY # PHONY
################################################################ ################################################################
.PHONY: all \ .PHONY: all test test_start test_summary test_clean \
generic_build_repo_series \ generic_build_repo_series \
what clobber insane \ what clobber insane \
dev dev-build dev-sanity dev-clobber \ dev dev-build dev-sanity dev-clobber \

View File

@ -65,8 +65,9 @@
<li><a href="#cacerts">Certificate Authority File (cacert)</a> </li> <li><a href="#cacerts">Certificate Authority File (cacert)</a> </li>
<li><a href="#compilers">Compilers</a> <li><a href="#compilers">Compilers</a>
<ul> <ul>
<li><a href="#msvc">Microsoft Visual Studio</a> </li> <li><a href="#msvc32">Microsoft Visual Studio Professional/Express for 32 bit</a> </li>
<li><a href="#mssdk">Microsoft Platform SDK</a> </li> <li><a href="#msvc64">Microsoft Visual Studio Professional for 64 bit</a> </li>
<li><a href="#mssdk64">Microsoft Windows SDK for 64 bit</a> </li>
<li><a href="#gcc">Linux gcc/binutils</a> </li> <li><a href="#gcc">Linux gcc/binutils</a> </li>
<li><a href="#studio">Sun Studio</a> </li> <li><a href="#studio">Sun Studio</a> </li>
</ul> </ul>
@ -789,11 +790,11 @@
</li> </li>
<li> <li>
Install the Install the
<a href="#msvc">Microsoft Visual Studio Compilers</a>). <a href="#msvc32">Microsoft Visual Studio Compilers</a>).
</li> </li>
<li> <li>
Setup all environment variables for compilers Setup all environment variables for compilers
(see <a href="#msvc">compilers</a>). (see <a href="#msvc32">compilers</a>).
</li> </li>
<li> <li>
Install Install
@ -958,7 +959,7 @@
are also an option, although these compilers have not are also an option, although these compilers have not
been extensively used yet. been extensively used yet.
</blockquote> </blockquote>
<strong><a name="msvc">Windows i586: Microsoft Visual Studio Compilers</a></strong> <strong><a name="msvc32">Windows i586: Microsoft Visual Studio 2010 Compilers</a></strong>
<blockquote> <blockquote>
<p> <p>
<b>BEGIN WARNING</b>: At this time (Spring/Summer 2010) JDK 7 is starting a transition to <b>BEGIN WARNING</b>: At this time (Spring/Summer 2010) JDK 7 is starting a transition to
@ -971,14 +972,13 @@ So for now you should be able to build with either VS2003 or VS2010.
We do not guarantee that VS2008 will work, although there is sufficient We do not guarantee that VS2008 will work, although there is sufficient
makefile support to make at least basic JDK builds plausible. makefile support to make at least basic JDK builds plausible.
Visual Studio 2010 Express compilers are now able to build all the Visual Studio 2010 Express compilers are now able to build all the
open source repositories, but this is 32 bit only, since open source repositories, but this is 32 bit only. To build 64 bit
we have not yet seen the 7.1 Windows SDK with the 64 bit Windows binaries use the the 7.1 Windows SDK.<b>END WARNING.</b>
compilers. <b>END WARNING.</b>
<p> <p>
The 32-bit OpenJDK Windows build The 32-bit OpenJDK Windows build
requires requires
Microsoft Visual Studio C++ 2010 (VS2010) Professional Microsoft Visual Studio C++ 2010 (VS2010) Professional
Edition compiler. Edition or Express compiler.
The compiler and other tools are expected to reside The compiler and other tools are expected to reside
in the location defined by the variable in the location defined by the variable
<tt>VS100COMNTOOLS</tt> which <tt>VS100COMNTOOLS</tt> which
@ -1001,14 +1001,33 @@ compilers. <b>END WARNING.</b>
The path <tt>/usr/bin</tt> must be after the path to the The path <tt>/usr/bin</tt> must be after the path to the
Visual Studio product. Visual Studio product.
</blockquote> </blockquote>
<strong><a name="mssdk">Windows x64: Microsoft Visual Studio Compilers</a></strong> <strong><a name="msvc64">Windows x64: Microsoft Visual Studio 2010 Professional Compiler</a></strong>
<blockquote> <blockquote>
On <b>X64</b>, the set up is much the same in VS2010 For <b>X64</b>, builds, when using the VS2010 Professional
compiler, the 64 bit build set up is much the same as 32 bit
except that you run <tt>amd64\VCVARS64.BAT</tt> except that you run <tt>amd64\VCVARS64.BAT</tt>
to set the compiler environment variables. to set the compiler environment variables.
Previously 64 builds had used the 64 bit compiler in Previously 64 bit builds had used the 64 bit compiler in
an unbundled Windows SDK but this is no longer necessary. an unbundled Windows SDK but this is no longer necessary if
you have VS2010 Professional.
</blockquote> </blockquote>
<strong><a name="mssdk64">Windows x64: Microsoft Windows 7.1 SDK 64 bit compilers.</a></strong>
For a free alternative for 64 bit builds, use the 7.1 SDK.
Microsoft say that to set up your paths for this run
<pre>
c:\Program Files\Microsoft SDKs\Windows\v7.1\bin\setenv.cmd /x64.
</pre>
What was tested is just directly setting up LIB, INCLUDE,
PATH and based on the installation directories using the
DOS short name appropriate for the system, (you will
need to set them for yours, not just blindly copy this) eg :
<pre>
set VSINSTALLDIR=c:\PROGRA~2\MICROS~1.0
set WindowsSdkDir=c:\PROGRA~1\MICROS~1\Windows\v7.1
set PATH=%VSINSTALLDIR%\vc\bin\amd64;%VSINSTALLDIR%\Common7\IDE;%WindowsSdkDir%\bin;%PATH%
set INCLUDE=%VSINSTALLDIR%\vc\include;%WindowsSdkDir%\include
set LIB=%VSINSTALLDIR%\vc\lib\amd64;%WindowsSdkDir%\lib\x64
</pre>
</blockquote> </blockquote>
<!-- ------------------------------------------------------ --> <!-- ------------------------------------------------------ -->
<h4><a name="zip">Zip and Unzip</a></h4> <h4><a name="zip">Zip and Unzip</a></h4>

View File

@ -70,3 +70,9 @@ bcd2fc089227559ac5be927923609fac29f067fa jdk7-b91
9718d624864c29dca44373d541e93cdd309a994f jdk7-b93 9718d624864c29dca44373d541e93cdd309a994f jdk7-b93
533c11186b44e3a02d6c5fe69a73260505fcfe5e jdk7-b94 533c11186b44e3a02d6c5fe69a73260505fcfe5e jdk7-b94
06dbf406818c789bb586c1de4c002024cd26ecd2 jdk7-b95 06dbf406818c789bb586c1de4c002024cd26ecd2 jdk7-b95
edc2a2659c77dabc55cb55bb617bad89e3a05bb3 jdk7-b96
4ec9d59374caa1e5d72fa802291b4d66955a4936 jdk7-b97
3b99409057e4c255da946f9f540d051a5ef4ab23 jdk7-b98
95db968660e7d87c345d5cf3dc2e3db037fb7220 jdk7-b99
a56d734a1e970e1a21a8f4feb13053e9a33674c7 jdk7-b100
86a239832646a74811695428984b6947c0bd6dc8 jdk7-b101

View File

@ -176,7 +176,16 @@ ifeq ($(FASTDEBUG), true)
CXXFLAGS_DBG += $(CC_LOWER_OPT) CXXFLAGS_DBG += $(CC_LOWER_OPT)
endif endif
CPPFLAGS_COMMON = -D$(ARCH) -DARCH='"$(ARCH)"' -DLINUX $(VERSION_DEFINES) \ CPP_ARCH_FLAGS = -DARCH='"$(ARCH)"'
# Alpha arch does not like "alpha" defined (potential general arch cleanup issue here)
ifneq ($(ARCH),alpha)
CPP_ARCH_FLAGS += -D$(ARCH)
else
CPP_ARCH_FLAGS += -D_$(ARCH)_
endif
CPPFLAGS_COMMON = $(CPP_ARCH_FLAGS) -DLINUX $(VERSION_DEFINES) \
-D_LARGEFILE64_SOURCE -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -D_GNU_SOURCE -D_REENTRANT
ifeq ($(ARCH_DATA_MODEL), 64) ifeq ($(ARCH_DATA_MODEL), 64)

View File

@ -30,7 +30,7 @@
BUILDDIR = ../.. BUILDDIR = ../..
include $(BUILDDIR)/common/Defs.gmk include $(BUILDDIR)/common/Defs.gmk
SUBDIRS = org SUBDIRS = org core
all build clean clobber:: all build clean clobber::
$(SUBDIRS-loop) $(SUBDIRS-loop)

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 1997, 2005, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -24,16 +24,49 @@
# #
# #
# Makefile for building Java input methods # Makefile for building RMI/IIOP
# #
BUILDDIR = ../../.. BUILDDIR = ../../..
PRODUCT = sun PRODUCT = sun
include $(BUILDDIR)/common/Defs.gmk include $(BUILDDIR)/common/Defs.gmk
SUBDIRS = indicim thaiim #
include $(BUILDDIR)/common/Subdirs.gmk # Files to compile.
#
CORBA_JMK_DIRECTORY=$(TOPDIR)/make/com/sun/corba/minclude/
include $(CORBA_JMK_DIRECTORY)javax_rmi.jmk
include $(CORBA_JMK_DIRECTORY)javax_rmi_CORBA.jmk
include $(CORBA_JMK_DIRECTORY)javax_transaction.jmk
include $(CORBA_JMK_DIRECTORY)javax_activity.jmk
include $(CORBA_JMK_DIRECTORY)sun_corba.jmk
all build clean clobber:: FILES_java = \
$(SUBDIRS-loop) $(javax_rmi_java) \
$(javax_rmi_CORBA_java) \
$(javax_transaction_java) \
$(javax_activity_java) \
$(sun_corba_java)
#
# Resources
#
LOCALE_SET_DEFINITION = jre
RESOURCE_BUNDLES_PROPERTIES = \
com/sun/corba/se/impl/orbutil/resources/sunorb.properties
#
# Rules
#
include $(BUILDDIR)/common/Classes.gmk
#
# Extra clean rules because we build more than one package.
#
clean:: classheaders.clean objects.clean
$(RM) -r $(CLASSBINDIR)/javax/rmi
$(RM) -r $(CLASSBINDIR)/javax/transaction
$(RM) -r $(CLASSBINDIR)/javax/activity
$(RM) -r $(CLASSBINDIR)/com/sun/corba/se/impl

View File

@ -70,7 +70,4 @@ REQUIRED_Group = \
com/sun/corba/se/internal/util/IdentityHashtable.java \ com/sun/corba/se/internal/util/IdentityHashtable.java \
com/sun/corba/se/internal/util/Utility.java \ com/sun/corba/se/internal/util/Utility.java \
com/sun/corba/se/internal/util/JDKBridge.java \ com/sun/corba/se/internal/util/JDKBridge.java \
com/sun/corba/se/internal/io/LibraryManager.java \
com/sun/corba/se/internal/io/ObjectStreamClass.java \
com/sun/corba/se/internal/io/TypeMismatchException.java \
com/sun/corba/se/internal/util/RepositoryId.java com/sun/corba/se/internal/util/RepositoryId.java

View File

@ -75,7 +75,7 @@ public class CorbaResourceUtil {
args[1] = (arg1 != null ? arg1.toString() : "null"); args[1] = (arg1 != null ? arg1.toString() : "null");
args[2] = (arg2 != null ? arg2.toString() : "null"); args[2] = (arg2 != null ? arg2.toString() : "null");
return java.text.MessageFormat.format(format, args); return java.text.MessageFormat.format(format, (Object[]) args);
} }
private static boolean resourcesInitialized = false; private static boolean resourcesInitialized = false;

View File

@ -350,7 +350,7 @@ public final class ObjectUtility {
if (useToString) { if (useToString) {
try { try {
cls.getDeclaredMethod( "toString", null ) ; cls.getDeclaredMethod( "toString", (Class[])null ) ;
return true ; return true ;
} catch (Exception exc) { } catch (Exception exc) {
return false ; return false ;

View File

@ -108,8 +108,8 @@ public class ExceptionHandlerImpl implements ExceptionHandler
try { try {
helperClass = Class.forName( helperName, true, loader ) ; helperClass = Class.forName( helperName, true, loader ) ;
Method idMethod = helperClass.getDeclaredMethod( "id", null ) ; Method idMethod = helperClass.getDeclaredMethod( "id", (Class[])null ) ;
setId( (String)idMethod.invoke( null, null ) ) ; setId( (String)idMethod.invoke( null, (Object[])null ) ) ;
} catch (Exception ex) { } catch (Exception ex) {
throw wrapper.badHelperIdMethod( ex, helperName ) ; throw wrapper.badHelperIdMethod( ex, helperName ) ;
} }

View File

@ -589,7 +589,7 @@ abstract public class ORB {
this.getClass().getMethod("create_operation_list", argc); this.getClass().getMethod("create_operation_list", argc);
// OK, the method exists, so invoke it and be happy. // OK, the method exists, so invoke it and be happy.
Object[] argx = { oper }; java.lang.Object[] argx = { oper };
return (org.omg.CORBA.NVList)meth.invoke(this, argx); return (org.omg.CORBA.NVList)meth.invoke(this, argx);
} }
catch( java.lang.reflect.InvocationTargetException exs ) { catch( java.lang.reflect.InvocationTargetException exs ) {

View File

@ -187,7 +187,7 @@ public final class Bridge
try { try {
// Invoke the ObjectInputStream.latestUserDefinedLoader method // Invoke the ObjectInputStream.latestUserDefinedLoader method
return (ClassLoader)latestUserDefinedLoaderMethod.invoke(null, return (ClassLoader)latestUserDefinedLoaderMethod.invoke(null,
NO_ARGS); (Object[])NO_ARGS);
} catch (InvocationTargetException ite) { } catch (InvocationTargetException ite) {
Error err = new Error( Error err = new Error(
"sun.corba.Bridge.latestUserDefinedLoader: " + ite ) ; "sun.corba.Bridge.latestUserDefinedLoader: " + ite ) ;

View File

@ -98,3 +98,11 @@ e0a1a502e402dbe7bf2d9102b4084a7e79a99a9b jdk7-b91
d38f45079fe98792a7381dbb4b64f5b589ec8c58 jdk7-b94 d38f45079fe98792a7381dbb4b64f5b589ec8c58 jdk7-b94
8bfe9058ca4661779ac1d0572329f3943e68362e hs19-b01 8bfe9058ca4661779ac1d0572329f3943e68362e hs19-b01
91d861ba858daca645993a1ab6ba2fa06a8f4a5b jdk7-b95 91d861ba858daca645993a1ab6ba2fa06a8f4a5b jdk7-b95
573e8ea5fd68e8e51eb6308d283ac3b3889d15e0 jdk7-b96
573e8ea5fd68e8e51eb6308d283ac3b3889d15e0 hs19-b02
5f42499e57adc16380780f40541e1a66cd601891 jdk7-b97
8a045b3f5c13eaad92ff4baf15ca671845fcad1a jdk7-b98
6a236384a379642b5a2398e2819db9ab4e711e9b jdk7-b99
ad1977f08c4d69162a0775fe3f9576b9fd521d10 jdk7-b100
6c3a919105b68c15b7db923ec9a00006e9560910 jdk7-b101
ad1977f08c4d69162a0775fe3f9576b9fd521d10 hs19-b03

View File

@ -35,7 +35,6 @@ import sun.jvm.hotspot.utilities.*;
public class NMethod extends CodeBlob { public class NMethod extends CodeBlob {
private static long pcDescSize; private static long pcDescSize;
private static CIntegerField zombieInstructionSizeField;
private static sun.jvm.hotspot.types.OopField methodField; private static sun.jvm.hotspot.types.OopField methodField;
/** != InvocationEntryBci if this nmethod is an on-stack replacement method */ /** != InvocationEntryBci if this nmethod is an on-stack replacement method */
private static CIntegerField entryBCIField; private static CIntegerField entryBCIField;
@ -88,7 +87,6 @@ public class NMethod extends CodeBlob {
private static void initialize(TypeDataBase db) { private static void initialize(TypeDataBase db) {
Type type = db.lookupType("nmethod"); Type type = db.lookupType("nmethod");
zombieInstructionSizeField = type.getCIntegerField("_zombie_instruction_size");
methodField = type.getOopField("_method"); methodField = type.getOopField("_method");
entryBCIField = type.getCIntegerField("_entry_bci"); entryBCIField = type.getCIntegerField("_entry_bci");
osrLinkField = type.getAddressField("_osr_link"); osrLinkField = type.getAddressField("_osr_link");

View File

@ -72,6 +72,7 @@ public class BytecodeDisassembler {
addBytecodeClass(Bytecodes._invokestatic, BytecodeInvoke.class); addBytecodeClass(Bytecodes._invokestatic, BytecodeInvoke.class);
addBytecodeClass(Bytecodes._invokespecial, BytecodeInvoke.class); addBytecodeClass(Bytecodes._invokespecial, BytecodeInvoke.class);
addBytecodeClass(Bytecodes._invokeinterface, BytecodeInvoke.class); addBytecodeClass(Bytecodes._invokeinterface, BytecodeInvoke.class);
addBytecodeClass(Bytecodes._invokedynamic, BytecodeInvoke.class);
addBytecodeClass(Bytecodes._jsr, BytecodeJsr.class); addBytecodeClass(Bytecodes._jsr, BytecodeJsr.class);
addBytecodeClass(Bytecodes._jsr_w, BytecodeJsrW.class); addBytecodeClass(Bytecodes._jsr_w, BytecodeJsrW.class);
addBytecodeClass(Bytecodes._iload, BytecodeLoad.class); addBytecodeClass(Bytecodes._iload, BytecodeLoad.class);

View File

@ -54,15 +54,31 @@ public class BytecodeInvoke extends BytecodeWithCPIndex {
// returns the name of the invoked method // returns the name of the invoked method
public Symbol name() { public Symbol name() {
ConstantPool cp = method().getConstants(); ConstantPool cp = method().getConstants();
if (isInvokedynamic()) {
int[] nt = cp.getNameAndTypeAt(indexForFieldOrMethod());
return cp.getSymbolAt(nt[0]);
}
return cp.getNameRefAt(index()); return cp.getNameRefAt(index());
} }
// returns the signature of the invoked method // returns the signature of the invoked method
public Symbol signature() { public Symbol signature() {
ConstantPool cp = method().getConstants(); ConstantPool cp = method().getConstants();
if (isInvokedynamic()) {
int[] nt = cp.getNameAndTypeAt(indexForFieldOrMethod());
return cp.getSymbolAt(nt[1]);
}
return cp.getSignatureRefAt(index()); return cp.getSignatureRefAt(index());
} }
public int getSecondaryIndex() {
if (isInvokedynamic()) {
// change byte-ordering of 4-byte integer
return VM.getVM().getBytes().swapInt(javaSignedWordAt(1));
}
return super.getSecondaryIndex(); // throw an error
}
public Method getInvokedMethod() { public Method getInvokedMethod() {
return method().getConstants().getMethodRefAt(index()); return method().getConstants().getMethodRefAt(index());
} }
@ -87,6 +103,7 @@ public class BytecodeInvoke extends BytecodeWithCPIndex {
public boolean isInvokevirtual() { return adjustedInvokeCode() == Bytecodes._invokevirtual; } public boolean isInvokevirtual() { return adjustedInvokeCode() == Bytecodes._invokevirtual; }
public boolean isInvokestatic() { return adjustedInvokeCode() == Bytecodes._invokestatic; } public boolean isInvokestatic() { return adjustedInvokeCode() == Bytecodes._invokestatic; }
public boolean isInvokespecial() { return adjustedInvokeCode() == Bytecodes._invokespecial; } public boolean isInvokespecial() { return adjustedInvokeCode() == Bytecodes._invokespecial; }
public boolean isInvokedynamic() { return adjustedInvokeCode() == Bytecodes._invokedynamic; }
public boolean isValid() { return isInvokeinterface() || public boolean isValid() { return isInvokeinterface() ||
isInvokevirtual() || isInvokevirtual() ||
@ -104,6 +121,11 @@ public class BytecodeInvoke extends BytecodeWithCPIndex {
buf.append(spaces); buf.append(spaces);
buf.append('#'); buf.append('#');
buf.append(Integer.toString(indexForFieldOrMethod())); buf.append(Integer.toString(indexForFieldOrMethod()));
if (isInvokedynamic()) {
buf.append('(');
buf.append(Integer.toString(getSecondaryIndex()));
buf.append(')');
}
buf.append(" [Method "); buf.append(" [Method ");
StringBuffer sigBuf = new StringBuffer(); StringBuffer sigBuf = new StringBuffer();
new SignatureConverter(signature(), sigBuf).iterateReturntype(); new SignatureConverter(signature(), sigBuf).iterateReturntype();

View File

@ -25,6 +25,7 @@
package sun.jvm.hotspot.interpreter; package sun.jvm.hotspot.interpreter;
import sun.jvm.hotspot.oops.*; import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.utilities.*; import sun.jvm.hotspot.utilities.*;
public class BytecodeLoadConstant extends BytecodeWithCPIndex { public class BytecodeLoadConstant extends BytecodeWithCPIndex {
@ -32,10 +33,47 @@ public class BytecodeLoadConstant extends BytecodeWithCPIndex {
super(method, bci); super(method, bci);
} }
public boolean hasCacheIndex() {
// normal ldc uses CP index, but fast_aldc uses swapped CP cache index
return javaCode() != code();
}
public int index() { public int index() {
return javaCode() == Bytecodes._ldc ? int i = javaCode() == Bytecodes._ldc ?
(int) (0xFF & javaByteAt(1)) (int) (0xFF & javaByteAt(1))
: (int) (0xFFFF & javaShortAt(1)); : (int) (0xFFFF & javaShortAt(1));
if (hasCacheIndex()) {
return (0xFFFF & VM.getVM().getBytes().swapShort((short) i));
} else {
return i;
}
}
public int poolIndex() {
int i = index();
if (hasCacheIndex()) {
ConstantPoolCache cpCache = method().getConstants().getCache();
return cpCache.getEntryAt(i).getConstantPoolIndex();
} else {
return i;
}
}
public int cacheIndex() {
if (hasCacheIndex()) {
return index();
} else {
return -1; // no cache index
}
}
private Oop getCachedConstant() {
int i = cacheIndex();
if (i >= 0) {
ConstantPoolCache cpCache = method().getConstants().getCache();
return cpCache.getEntryAt(i).getF1();
}
return null;
} }
public void verify() { public void verify() {
@ -58,6 +96,7 @@ public class BytecodeLoadConstant extends BytecodeWithCPIndex {
// has to be int or float or String or Klass // has to be int or float or String or Klass
return (ctag.isUnresolvedString() || ctag.isString() return (ctag.isUnresolvedString() || ctag.isString()
|| ctag.isUnresolvedKlass() || ctag.isKlass() || ctag.isUnresolvedKlass() || ctag.isKlass()
|| ctag.isMethodHandle() || ctag.isMethodType()
|| ctag.isInt() || ctag.isFloat())? true: false; || ctag.isInt() || ctag.isFloat())? true: false;
} }
} }
@ -112,7 +151,7 @@ public class BytecodeLoadConstant extends BytecodeWithCPIndex {
public String getConstantValue() { public String getConstantValue() {
ConstantPool cpool = method().getConstants(); ConstantPool cpool = method().getConstants();
int cpIndex = index(); int cpIndex = poolIndex();
ConstantTag ctag = cpool.getTagAt(cpIndex); ConstantTag ctag = cpool.getTagAt(cpIndex);
if (ctag.isInt()) { if (ctag.isInt()) {
return "<int " + Integer.toString(cpool.getIntAt(cpIndex)) +">"; return "<int " + Integer.toString(cpool.getIntAt(cpIndex)) +">";
@ -149,6 +188,18 @@ public class BytecodeLoadConstant extends BytecodeWithCPIndex {
} else { } else {
throw new RuntimeException("should not reach here"); throw new RuntimeException("should not reach here");
} }
} else if (ctag.isMethodHandle() || ctag.isMethodType()) {
Oop x = getCachedConstant();
int refidx = cpool.getMethodHandleIndexAt(cpIndex);
int refkind = cpool.getMethodHandleRefKindAt(cpIndex);
return "<MethodHandle kind=" + Integer.toString(refkind) +
" ref=" + Integer.toString(refidx)
+ (x == null ? "" : " @" + x.getHandle()) + ">";
} else if (ctag.isMethodType()) {
Oop x = getCachedConstant();
int refidx = cpool.getMethodTypeIndexAt(cpIndex);
return "<MethodType " + cpool.getSymbolAt(refidx).asString()
+ (x == null ? "" : " @" + x.getHandle()) + ">";
} else { } else {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(false, "invalid load constant type"); Assert.that(false, "invalid load constant type");
@ -162,7 +213,12 @@ public class BytecodeLoadConstant extends BytecodeWithCPIndex {
buf.append(getJavaBytecodeName()); buf.append(getJavaBytecodeName());
buf.append(spaces); buf.append(spaces);
buf.append('#'); buf.append('#');
buf.append(Integer.toString(index())); buf.append(Integer.toString(poolIndex()));
if (hasCacheIndex()) {
buf.append('(');
buf.append(Integer.toString(cacheIndex()));
buf.append(')');
}
buf.append(spaces); buf.append(spaces);
buf.append(getConstantValue()); buf.append(getConstantValue());
if (code() != javaCode()) { if (code() != javaCode()) {

View File

@ -37,12 +37,19 @@ public abstract class BytecodeWithCPIndex extends Bytecode {
// the constant pool index for this bytecode // the constant pool index for this bytecode
public int index() { return 0xFFFF & javaShortAt(1); } public int index() { return 0xFFFF & javaShortAt(1); }
public int getSecondaryIndex() {
throw new IllegalArgumentException("must be invokedynamic");
}
protected int indexForFieldOrMethod() { protected int indexForFieldOrMethod() {
ConstantPoolCache cpCache = method().getConstants().getCache(); ConstantPoolCache cpCache = method().getConstants().getCache();
// get ConstantPool index from ConstantPoolCacheIndex at given bci // get ConstantPool index from ConstantPoolCacheIndex at given bci
int cpCacheIndex = index(); int cpCacheIndex = index();
if (cpCache == null) { if (cpCache == null) {
return cpCacheIndex; return cpCacheIndex;
} else if (code() == Bytecodes._invokedynamic) {
int secondaryIndex = getSecondaryIndex();
return cpCache.getMainEntryAt(secondaryIndex).getConstantPoolIndex();
} else { } else {
// change byte-ordering and go via cache // change byte-ordering and go via cache
return cpCache.getEntryAt((int) (0xFFFF & VM.getVM().getBytes().swapShort((short) cpCacheIndex))).getConstantPoolIndex(); return cpCache.getEntryAt((int) (0xFFFF & VM.getVM().getBytes().swapShort((short) cpCacheIndex))).getConstantPoolIndex();

View File

@ -222,7 +222,7 @@ public class Bytecodes {
public static final int _invokespecial = 183; // 0xb7 public static final int _invokespecial = 183; // 0xb7
public static final int _invokestatic = 184; // 0xb8 public static final int _invokestatic = 184; // 0xb8
public static final int _invokeinterface = 185; // 0xb9 public static final int _invokeinterface = 185; // 0xb9
public static final int _xxxunusedxxx = 186; // 0xba public static final int _invokedynamic = 186; // 0xba
public static final int _new = 187; // 0xbb public static final int _new = 187; // 0xbb
public static final int _newarray = 188; // 0xbc public static final int _newarray = 188; // 0xbc
public static final int _anewarray = 189; // 0xbd public static final int _anewarray = 189; // 0xbd
@ -269,9 +269,12 @@ public class Bytecodes {
public static final int _fast_invokevfinal = 226; public static final int _fast_invokevfinal = 226;
public static final int _fast_linearswitch = 227; public static final int _fast_linearswitch = 227;
public static final int _fast_binaryswitch = 228; public static final int _fast_binaryswitch = 228;
public static final int _shouldnotreachhere = 229; // For debugging public static final int _fast_aldc = 229;
public static final int _fast_aldc_w = 230;
public static final int _return_register_finalizer = 231;
public static final int _shouldnotreachhere = 232; // For debugging
public static final int number_of_codes = 230; public static final int number_of_codes = 233;
public static int specialLengthAt(Method method, int bci) { public static int specialLengthAt(Method method, int bci) {
int code = codeAt(method, bci); int code = codeAt(method, bci);
@ -458,9 +461,9 @@ public class Bytecodes {
def(_dconst_1 , "dconst_1" , "b" , null , BasicType.getTDouble() , 2, false); def(_dconst_1 , "dconst_1" , "b" , null , BasicType.getTDouble() , 2, false);
def(_bipush , "bipush" , "bc" , null , BasicType.getTInt() , 1, false); def(_bipush , "bipush" , "bc" , null , BasicType.getTInt() , 1, false);
def(_sipush , "sipush" , "bcc" , null , BasicType.getTInt() , 1, false); def(_sipush , "sipush" , "bcc" , null , BasicType.getTInt() , 1, false);
def(_ldc , "ldc" , "bi" , null , BasicType.getTIllegal(), 1, true ); def(_ldc , "ldc" , "bk" , null , BasicType.getTIllegal(), 1, true );
def(_ldc_w , "ldc_w" , "bii" , null , BasicType.getTIllegal(), 1, true ); def(_ldc_w , "ldc_w" , "bkk" , null , BasicType.getTIllegal(), 1, true );
def(_ldc2_w , "ldc2_w" , "bii" , null , BasicType.getTIllegal(), 2, true ); def(_ldc2_w , "ldc2_w" , "bkk" , null , BasicType.getTIllegal(), 2, true );
def(_iload , "iload" , "bi" , "wbii" , BasicType.getTInt() , 1, false); def(_iload , "iload" , "bi" , "wbii" , BasicType.getTInt() , 1, false);
def(_lload , "lload" , "bi" , "wbii" , BasicType.getTLong() , 2, false); def(_lload , "lload" , "bi" , "wbii" , BasicType.getTLong() , 2, false);
def(_fload , "fload" , "bi" , "wbii" , BasicType.getTFloat() , 1, false); def(_fload , "fload" , "bi" , "wbii" , BasicType.getTFloat() , 1, false);
@ -618,26 +621,26 @@ public class Bytecodes {
def(_dreturn , "dreturn" , "b" , null , BasicType.getTDouble() , -2, true ); def(_dreturn , "dreturn" , "b" , null , BasicType.getTDouble() , -2, true );
def(_areturn , "areturn" , "b" , null , BasicType.getTObject() , -1, true ); def(_areturn , "areturn" , "b" , null , BasicType.getTObject() , -1, true );
def(_return , "return" , "b" , null , BasicType.getTVoid() , 0, true ); def(_return , "return" , "b" , null , BasicType.getTVoid() , 0, true );
def(_getstatic , "getstatic" , "bjj" , null , BasicType.getTIllegal(), 1, true ); def(_getstatic , "getstatic" , "bJJ" , null , BasicType.getTIllegal(), 1, true );
def(_putstatic , "putstatic" , "bjj" , null , BasicType.getTIllegal(), -1, true ); def(_putstatic , "putstatic" , "bJJ" , null , BasicType.getTIllegal(), -1, true );
def(_getfield , "getfield" , "bjj" , null , BasicType.getTIllegal(), 0, true ); def(_getfield , "getfield" , "bJJ" , null , BasicType.getTIllegal(), 0, true );
def(_putfield , "putfield" , "bjj" , null , BasicType.getTIllegal(), -2, true ); def(_putfield , "putfield" , "bJJ" , null , BasicType.getTIllegal(), -2, true );
def(_invokevirtual , "invokevirtual" , "bjj" , null , BasicType.getTIllegal(), -1, true ); def(_invokevirtual , "invokevirtual" , "bJJ" , null , BasicType.getTIllegal(), -1, true );
def(_invokespecial , "invokespecial" , "bjj" , null , BasicType.getTIllegal(), -1, true ); def(_invokespecial , "invokespecial" , "bJJ" , null , BasicType.getTIllegal(), -1, true );
def(_invokestatic , "invokestatic" , "bjj" , null , BasicType.getTIllegal(), 0, true ); def(_invokestatic , "invokestatic" , "bJJ" , null , BasicType.getTIllegal(), 0, true );
def(_invokeinterface , "invokeinterface" , "bjj__", null , BasicType.getTIllegal(), -1, true ); def(_invokeinterface , "invokeinterface" , "bJJ__", null , BasicType.getTIllegal(), -1, true );
def(_xxxunusedxxx , "xxxunusedxxx" , null , null , BasicType.getTVoid() , 0, false); def(_invokedynamic , "invokedynamic" , "bJJJJ", null , BasicType.getTIllegal(), -1, true );
def(_new , "new" , "bii" , null , BasicType.getTObject() , 1, true ); def(_new , "new" , "bkk" , null , BasicType.getTObject() , 1, true );
def(_newarray , "newarray" , "bc" , null , BasicType.getTObject() , 0, true ); def(_newarray , "newarray" , "bc" , null , BasicType.getTObject() , 0, true );
def(_anewarray , "anewarray" , "bii" , null , BasicType.getTObject() , 0, true ); def(_anewarray , "anewarray" , "bkk" , null , BasicType.getTObject() , 0, true );
def(_arraylength , "arraylength" , "b" , null , BasicType.getTVoid() , 0, true ); def(_arraylength , "arraylength" , "b" , null , BasicType.getTVoid() , 0, true );
def(_athrow , "athrow" , "b" , null , BasicType.getTVoid() , -1, true ); def(_athrow , "athrow" , "b" , null , BasicType.getTVoid() , -1, true );
def(_checkcast , "checkcast" , "bii" , null , BasicType.getTObject() , 0, true ); def(_checkcast , "checkcast" , "bkk" , null , BasicType.getTObject() , 0, true );
def(_instanceof , "instanceof" , "bii" , null , BasicType.getTInt() , 0, true ); def(_instanceof , "instanceof" , "bkk" , null , BasicType.getTInt() , 0, true );
def(_monitorenter , "monitorenter" , "b" , null , BasicType.getTVoid() , -1, true ); def(_monitorenter , "monitorenter" , "b" , null , BasicType.getTVoid() , -1, true );
def(_monitorexit , "monitorexit" , "b" , null , BasicType.getTVoid() , -1, true ); def(_monitorexit , "monitorexit" , "b" , null , BasicType.getTVoid() , -1, true );
def(_wide , "wide" , "" , null , BasicType.getTVoid() , 0, false); def(_wide , "wide" , "" , null , BasicType.getTVoid() , 0, false);
def(_multianewarray , "multianewarray" , "biic" , null , BasicType.getTObject() , 1, true ); def(_multianewarray , "multianewarray" , "bkkc" , null , BasicType.getTObject() , 1, true );
def(_ifnull , "ifnull" , "boo" , null , BasicType.getTVoid() , -1, false); def(_ifnull , "ifnull" , "boo" , null , BasicType.getTVoid() , -1, false);
def(_ifnonnull , "ifnonnull" , "boo" , null , BasicType.getTVoid() , -1, false); def(_ifnonnull , "ifnonnull" , "boo" , null , BasicType.getTVoid() , -1, false);
def(_goto_w , "goto_w" , "boooo", null , BasicType.getTVoid() , 0, false); def(_goto_w , "goto_w" , "boooo", null , BasicType.getTVoid() , 0, false);
@ -646,38 +649,44 @@ public class Bytecodes {
// JVM bytecodes // JVM bytecodes
// bytecode bytecode name format wide f. result tp stk traps std code // bytecode bytecode name format wide f. result tp stk traps std code
def(_fast_agetfield , "fast_agetfield" , "bjj" , null , BasicType.getTObject() , 0, true , _getfield ); def(_fast_agetfield , "fast_agetfield" , "bJJ" , null , BasicType.getTObject() , 0, true , _getfield );
def(_fast_bgetfield , "fast_bgetfield" , "bjj" , null , BasicType.getTInt() , 0, true , _getfield ); def(_fast_bgetfield , "fast_bgetfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _getfield );
def(_fast_cgetfield , "fast_cgetfield" , "bjj" , null , BasicType.getTChar() , 0, true , _getfield ); def(_fast_cgetfield , "fast_cgetfield" , "bJJ" , null , BasicType.getTChar() , 0, true , _getfield );
def(_fast_dgetfield , "fast_dgetfield" , "bjj" , null , BasicType.getTDouble() , 0, true , _getfield ); def(_fast_dgetfield , "fast_dgetfield" , "bJJ" , null , BasicType.getTDouble() , 0, true , _getfield );
def(_fast_fgetfield , "fast_fgetfield" , "bjj" , null , BasicType.getTFloat() , 0, true , _getfield ); def(_fast_fgetfield , "fast_fgetfield" , "bJJ" , null , BasicType.getTFloat() , 0, true , _getfield );
def(_fast_igetfield , "fast_igetfield" , "bjj" , null , BasicType.getTInt() , 0, true , _getfield ); def(_fast_igetfield , "fast_igetfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _getfield );
def(_fast_lgetfield , "fast_lgetfield" , "bjj" , null , BasicType.getTLong() , 0, true , _getfield ); def(_fast_lgetfield , "fast_lgetfield" , "bJJ" , null , BasicType.getTLong() , 0, true , _getfield );
def(_fast_sgetfield , "fast_sgetfield" , "bjj" , null , BasicType.getTShort() , 0, true , _getfield ); def(_fast_sgetfield , "fast_sgetfield" , "bJJ" , null , BasicType.getTShort() , 0, true , _getfield );
def(_fast_aputfield , "fast_aputfield" , "bjj" , null , BasicType.getTObject() , 0, true , _putfield ); def(_fast_aputfield , "fast_aputfield" , "bJJ" , null , BasicType.getTObject() , 0, true , _putfield );
def(_fast_bputfield , "fast_bputfield" , "bjj" , null , BasicType.getTInt() , 0, true , _putfield ); def(_fast_bputfield , "fast_bputfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _putfield );
def(_fast_cputfield , "fast_cputfield" , "bjj" , null , BasicType.getTChar() , 0, true , _putfield ); def(_fast_cputfield , "fast_cputfield" , "bJJ" , null , BasicType.getTChar() , 0, true , _putfield );
def(_fast_dputfield , "fast_dputfield" , "bjj" , null , BasicType.getTDouble() , 0, true , _putfield ); def(_fast_dputfield , "fast_dputfield" , "bJJ" , null , BasicType.getTDouble() , 0, true , _putfield );
def(_fast_fputfield , "fast_fputfield" , "bjj" , null , BasicType.getTFloat() , 0, true , _putfield ); def(_fast_fputfield , "fast_fputfield" , "bJJ" , null , BasicType.getTFloat() , 0, true , _putfield );
def(_fast_iputfield , "fast_iputfield" , "bjj" , null , BasicType.getTInt() , 0, true , _putfield ); def(_fast_iputfield , "fast_iputfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _putfield );
def(_fast_lputfield , "fast_lputfield" , "bjj" , null , BasicType.getTLong() , 0, true , _putfield ); def(_fast_lputfield , "fast_lputfield" , "bJJ" , null , BasicType.getTLong() , 0, true , _putfield );
def(_fast_sputfield , "fast_sputfield" , "bjj" , null , BasicType.getTShort() , 0, true , _putfield ); def(_fast_sputfield , "fast_sputfield" , "bJJ" , null , BasicType.getTShort() , 0, true , _putfield );
def(_fast_aload_0 , "fast_aload_0" , "b" , null , BasicType.getTObject() , 1, true , _aload_0 ); def(_fast_aload_0 , "fast_aload_0" , "b" , null , BasicType.getTObject() , 1, true , _aload_0 );
def(_fast_iaccess_0 , "fast_iaccess_0" , "b_jj" , null , BasicType.getTInt() , 1, true , _aload_0 ); def(_fast_iaccess_0 , "fast_iaccess_0" , "b_JJ" , null , BasicType.getTInt() , 1, true , _aload_0 );
def(_fast_aaccess_0 , "fast_aaccess_0" , "b_jj" , null , BasicType.getTObject() , 1, true , _aload_0 ); def(_fast_aaccess_0 , "fast_aaccess_0" , "b_JJ" , null , BasicType.getTObject() , 1, true , _aload_0 );
def(_fast_faccess_0 , "fast_faccess_0" , "b_jj" , null , BasicType.getTObject() , 1, true , _aload_0 ); def(_fast_faccess_0 , "fast_faccess_0" , "b_JJ" , null , BasicType.getTObject() , 1, true , _aload_0 );
def(_fast_iload , "fast_iload" , "bi" , null , BasicType.getTInt() , 1, false, _iload); def(_fast_iload , "fast_iload" , "bi" , null , BasicType.getTInt() , 1, false, _iload);
def(_fast_iload2 , "fast_iload2" , "bi_i" , null , BasicType.getTInt() , 2, false, _iload); def(_fast_iload2 , "fast_iload2" , "bi_i" , null , BasicType.getTInt() , 2, false, _iload);
def(_fast_icaload , "fast_icaload" , "bi_" , null , BasicType.getTInt() , 0, false, _iload); def(_fast_icaload , "fast_icaload" , "bi_" , null , BasicType.getTInt() , 0, false, _iload);
// Faster method invocation. // Faster method invocation.
def(_fast_invokevfinal , "fast_invokevfinal" , "bjj" , null , BasicType.getTIllegal(), -1, true, _invokevirtual); def(_fast_invokevfinal , "fast_invokevfinal" , "bJJ" , null , BasicType.getTIllegal(), -1, true, _invokevirtual);
def(_fast_linearswitch , "fast_linearswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch ); def(_fast_linearswitch , "fast_linearswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch );
def(_fast_binaryswitch , "fast_binaryswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch ); def(_fast_binaryswitch , "fast_binaryswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch );
def(_return_register_finalizer, "return_register_finalizer", "b" , null , BasicType.getTVoid() , 0, true, _return );
def(_fast_aldc , "fast_aldc" , "bj" , null , BasicType.getTObject(), 1, true, _ldc );
def(_fast_aldc_w , "fast_aldc_w" , "bJJ" , null , BasicType.getTObject(), 1, true, _ldc_w );
def(_shouldnotreachhere , "_shouldnotreachhere" , "b" , null , BasicType.getTVoid() , 0, false); def(_shouldnotreachhere , "_shouldnotreachhere" , "b" , null , BasicType.getTVoid() , 0, false);
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {

View File

@ -152,7 +152,7 @@ public class ConstantPool extends Oop implements ClassConstants {
return res; return res;
} }
public int getNameAndTypeAt(int which) { public int[] getNameAndTypeAt(int which) {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(getTagAt(which).isNameAndType(), "Corrupted constant pool"); Assert.that(getTagAt(which).isNameAndType(), "Corrupted constant pool");
} }
@ -160,18 +160,16 @@ public class ConstantPool extends Oop implements ClassConstants {
if (DEBUG) { if (DEBUG) {
System.err.println("ConstantPool.getNameAndTypeAt(" + which + "): result = " + i); System.err.println("ConstantPool.getNameAndTypeAt(" + which + "): result = " + i);
} }
return i; return new int[] { extractLowShortFromInt(i), extractHighShortFromInt(i) };
} }
public Symbol getNameRefAt(int which) { public Symbol getNameRefAt(int which) {
int refIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which)); int nameIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[0];
int nameIndex = extractLowShortFromInt(refIndex);
return getSymbolAt(nameIndex); return getSymbolAt(nameIndex);
} }
public Symbol getSignatureRefAt(int which) { public Symbol getSignatureRefAt(int which) {
int refIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which)); int sigIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[1];
int sigIndex = extractHighShortFromInt(refIndex);
return getSymbolAt(sigIndex); return getSymbolAt(sigIndex);
} }
@ -220,11 +218,11 @@ public class ConstantPool extends Oop implements ClassConstants {
/** Lookup for entries consisting of (name_index, signature_index) */ /** Lookup for entries consisting of (name_index, signature_index) */
public int getNameRefIndexAt(int index) { public int getNameRefIndexAt(int index) {
int refIndex = getNameAndTypeAt(index); int[] refIndex = getNameAndTypeAt(index);
if (DEBUG) { if (DEBUG) {
System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): refIndex = " + refIndex); System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): refIndex = " + refIndex[0]+"/"+refIndex[1]);
} }
int i = extractLowShortFromInt(refIndex); int i = refIndex[0];
if (DEBUG) { if (DEBUG) {
System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): result = " + i); System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): result = " + i);
} }
@ -233,17 +231,53 @@ public class ConstantPool extends Oop implements ClassConstants {
/** Lookup for entries consisting of (name_index, signature_index) */ /** Lookup for entries consisting of (name_index, signature_index) */
public int getSignatureRefIndexAt(int index) { public int getSignatureRefIndexAt(int index) {
int refIndex = getNameAndTypeAt(index); int[] refIndex = getNameAndTypeAt(index);
if (DEBUG) { if (DEBUG) {
System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): refIndex = " + refIndex); System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): refIndex = " + refIndex[0]+"/"+refIndex[1]);
} }
int i = extractHighShortFromInt(refIndex); int i = refIndex[1];
if (DEBUG) { if (DEBUG) {
System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): result = " + i); System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): result = " + i);
} }
return i; return i;
} }
/** Lookup for MethodHandle entries. */
public int getMethodHandleIndexAt(int i) {
if (Assert.ASSERTS_ENABLED) {
Assert.that(getTagAt(i).isMethodHandle(), "Corrupted constant pool");
}
int res = extractHighShortFromInt(getIntAt(i));
if (DEBUG) {
System.err.println("ConstantPool.getMethodHandleIndexAt(" + i + "): result = " + res);
}
return res;
}
/** Lookup for MethodHandle entries. */
public int getMethodHandleRefKindAt(int i) {
if (Assert.ASSERTS_ENABLED) {
Assert.that(getTagAt(i).isMethodHandle(), "Corrupted constant pool");
}
int res = extractLowShortFromInt(getIntAt(i));
if (DEBUG) {
System.err.println("ConstantPool.getMethodHandleRefKindAt(" + i + "): result = " + res);
}
return res;
}
/** Lookup for MethodType entries. */
public int getMethodTypeIndexAt(int i) {
if (Assert.ASSERTS_ENABLED) {
Assert.that(getTagAt(i).isMethodType(), "Corrupted constant pool");
}
int res = getIntAt(i);
if (DEBUG) {
System.err.println("ConstantPool.getMethodHandleTypeAt(" + i + "): result = " + res);
}
return res;
}
final private static String[] nameForTag = new String[] { final private static String[] nameForTag = new String[] {
}; };
@ -261,6 +295,9 @@ public class ConstantPool extends Oop implements ClassConstants {
case JVM_CONSTANT_Methodref: return "JVM_CONSTANT_Methodref"; case JVM_CONSTANT_Methodref: return "JVM_CONSTANT_Methodref";
case JVM_CONSTANT_InterfaceMethodref: return "JVM_CONSTANT_InterfaceMethodref"; case JVM_CONSTANT_InterfaceMethodref: return "JVM_CONSTANT_InterfaceMethodref";
case JVM_CONSTANT_NameAndType: return "JVM_CONSTANT_NameAndType"; case JVM_CONSTANT_NameAndType: return "JVM_CONSTANT_NameAndType";
case JVM_CONSTANT_MethodHandle: return "JVM_CONSTANT_MethodHandle";
case JVM_CONSTANT_MethodType: return "JVM_CONSTANT_MethodType";
case JVM_CONSTANT_InvokeDynamic: return "JVM_CONSTANT_InvokeDynamic";
case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid"; case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid";
case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass"; case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass";
case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError"; case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError";
@ -317,6 +354,9 @@ public class ConstantPool extends Oop implements ClassConstants {
case JVM_CONSTANT_Methodref: case JVM_CONSTANT_Methodref:
case JVM_CONSTANT_InterfaceMethodref: case JVM_CONSTANT_InterfaceMethodref:
case JVM_CONSTANT_NameAndType: case JVM_CONSTANT_NameAndType:
case JVM_CONSTANT_MethodHandle:
case JVM_CONSTANT_MethodType:
case JVM_CONSTANT_InvokeDynamic:
visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true); visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true);
break; break;
} }
@ -467,6 +507,30 @@ public class ConstantPool extends Oop implements ClassConstants {
+ ", type = " + signatureIndex); + ", type = " + signatureIndex);
break; break;
} }
case JVM_CONSTANT_MethodHandle: {
dos.writeByte(cpConstType);
int value = getIntAt(ci);
short nameIndex = (short) extractLowShortFromInt(value);
short signatureIndex = (short) extractHighShortFromInt(value);
dos.writeShort(nameIndex);
dos.writeShort(signatureIndex);
if (DEBUG) debugMessage("CP[" + ci + "] = N&T name = " + nameIndex
+ ", type = " + signatureIndex);
break;
}
case JVM_CONSTANT_InvokeDynamic: {
dos.writeByte(cpConstType);
int value = getIntAt(ci);
short bootstrapMethodIndex = (short) extractLowShortFromInt(value);
short nameAndTypeIndex = (short) extractHighShortFromInt(value);
dos.writeShort(bootstrapMethodIndex);
dos.writeShort(nameAndTypeIndex);
if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " + bootstrapMethodIndex
+ ", N&T = " + nameAndTypeIndex);
break;
}
default: default:
throw new InternalError("unknown tag: " + cpConstType); throw new InternalError("unknown tag: " + cpConstType);
} // switch } // switch
@ -488,10 +552,12 @@ public class ConstantPool extends Oop implements ClassConstants {
// //
private static int extractHighShortFromInt(int val) { private static int extractHighShortFromInt(int val) {
// must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
return (val >> 16) & 0xFFFF; return (val >> 16) & 0xFFFF;
} }
private static int extractLowShortFromInt(int val) { private static int extractLowShortFromInt(int val) {
// must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
return val & 0xFFFF; return val & 0xFFFF;
} }
} }

View File

@ -78,6 +78,31 @@ public class ConstantPoolCache extends Oop {
return new ConstantPoolCacheEntry(this, i); return new ConstantPoolCacheEntry(this, i);
} }
public static boolean isSecondaryIndex(int i) { return (i < 0); }
public static int decodeSecondaryIndex(int i) { return isSecondaryIndex(i) ? ~i : i; }
public static int encodeSecondaryIndex(int i) { return !isSecondaryIndex(i) ? ~i : i; }
// secondary entries hold invokedynamic call site bindings
public ConstantPoolCacheEntry getSecondaryEntryAt(int i) {
ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, decodeSecondaryIndex(i));
if (Assert.ASSERTS_ENABLED) {
Assert.that(e.isSecondaryEntry(), "must be a secondary entry");
}
return e;
}
public ConstantPoolCacheEntry getMainEntryAt(int i) {
if (isSecondaryIndex(i)) {
// run through an extra level of indirection:
i = getSecondaryEntryAt(i).getMainEntryIndex();
}
ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, i);
if (Assert.ASSERTS_ENABLED) {
Assert.that(!e.isSecondaryEntry(), "must not be a secondary entry");
}
return e;
}
public int getIntAt(int entry, int fld) { public int getIntAt(int entry, int fld) {
//alignObjectSize ? //alignObjectSize ?
long offset = baseOffset + /*alignObjectSize*/entry * elementSize + fld* getHeap().getIntSize(); long offset = baseOffset + /*alignObjectSize*/entry * elementSize + fld* getHeap().getIntSize();

View File

@ -28,6 +28,7 @@ import java.util.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class ConstantPoolCacheEntry { public class ConstantPoolCacheEntry {
private static long size; private static long size;
@ -67,9 +68,23 @@ public class ConstantPoolCacheEntry {
} }
public int getConstantPoolIndex() { public int getConstantPoolIndex() {
if (Assert.ASSERTS_ENABLED) {
Assert.that(!isSecondaryEntry(), "must not be a secondary CP entry");
}
return (int) (getIndices() & 0xFFFF); return (int) (getIndices() & 0xFFFF);
} }
public boolean isSecondaryEntry() {
return (getIndices() & 0xFFFF) == 0;
}
public int getMainEntryIndex() {
if (Assert.ASSERTS_ENABLED) {
Assert.that(isSecondaryEntry(), "must be a secondary CP entry");
}
return (int) (getIndices() >>> 16);
}
private long getIndices() { private long getIndices() {
return cp.getHandle().getCIntegerAt(indices.getOffset() + offset, indices.getSize(), indices.isUnsigned()); return cp.getHandle().getCIntegerAt(indices.getOffset() + offset, indices.getSize(), indices.isUnsigned());
} }

View File

@ -566,6 +566,7 @@ public class GenerateOopMap {
case Bytecodes._invokespecial: case Bytecodes._invokespecial:
case Bytecodes._invokestatic: case Bytecodes._invokestatic:
case Bytecodes._invokeinterface: case Bytecodes._invokeinterface:
case Bytecodes._invokedynamic:
// FIXME: print signature of referenced method (need more // FIXME: print signature of referenced method (need more
// accessors in ConstantPool and ConstantPoolCache) // accessors in ConstantPool and ConstantPoolCache)
int idx = currentBC.getIndexBig(); int idx = currentBC.getIndexBig();
@ -605,6 +606,7 @@ public class GenerateOopMap {
case Bytecodes._invokespecial: case Bytecodes._invokespecial:
case Bytecodes._invokestatic: case Bytecodes._invokestatic:
case Bytecodes._invokeinterface: case Bytecodes._invokeinterface:
case Bytecodes._invokedynamic:
// FIXME: print signature of referenced method (need more // FIXME: print signature of referenced method (need more
// accessors in ConstantPool and ConstantPoolCache) // accessors in ConstantPool and ConstantPoolCache)
int idx = currentBC.getIndexBig(); int idx = currentBC.getIndexBig();
@ -1134,6 +1136,7 @@ public class GenerateOopMap {
case Bytecodes._invokespecial: case Bytecodes._invokespecial:
case Bytecodes._invokestatic: case Bytecodes._invokestatic:
case Bytecodes._invokeinterface: case Bytecodes._invokeinterface:
case Bytecodes._invokedynamic:
_itr_send = itr; _itr_send = itr;
_report_result_for_send = true; _report_result_for_send = true;
break; break;
@ -1379,6 +1382,7 @@ public class GenerateOopMap {
case Bytecodes._invokevirtual: case Bytecodes._invokevirtual:
case Bytecodes._invokespecial: doMethod(false, false, itr.getIndexBig(), itr.bci()); break; case Bytecodes._invokespecial: doMethod(false, false, itr.getIndexBig(), itr.bci()); break;
case Bytecodes._invokestatic: doMethod(true, false, itr.getIndexBig(), itr.bci()); break; case Bytecodes._invokestatic: doMethod(true, false, itr.getIndexBig(), itr.bci()); break;
case Bytecodes._invokedynamic: doMethod(false, true, itr.getIndexBig(), itr.bci()); break;
case Bytecodes._invokeinterface: doMethod(false, true, itr.getIndexBig(), itr.bci()); break; case Bytecodes._invokeinterface: doMethod(false, true, itr.getIndexBig(), itr.bci()); break;
case Bytecodes._newarray: case Bytecodes._newarray:
case Bytecodes._anewarray: ppNewRef(vCTS, itr.bci()); break; case Bytecodes._anewarray: ppNewRef(vCTS, itr.bci()); break;
@ -1725,7 +1729,7 @@ public class GenerateOopMap {
void doMethod (boolean is_static, boolean is_interface, int idx, int bci) { void doMethod (boolean is_static, boolean is_interface, int idx, int bci) {
// Dig up signature for field in constant pool // Dig up signature for field in constant pool
ConstantPool cp = _method.getConstants(); ConstantPool cp = _method.getConstants();
int nameAndTypeIdx = cp.getNameAndTypeRefIndexAt(idx); int nameAndTypeIdx = cp.getTagAt(idx).isNameAndType() ? idx : cp.getNameAndTypeRefIndexAt(idx);
int signatureIdx = cp.getSignatureRefIndexAt(nameAndTypeIdx); int signatureIdx = cp.getSignatureRefIndexAt(nameAndTypeIdx);
Symbol signature = cp.getSymbolAt(signatureIdx); Symbol signature = cp.getSymbolAt(signatureIdx);

View File

@ -40,6 +40,20 @@ public interface ClassConstants
public static final int JVM_CONSTANT_Methodref = 10; public static final int JVM_CONSTANT_Methodref = 10;
public static final int JVM_CONSTANT_InterfaceMethodref = 11; public static final int JVM_CONSTANT_InterfaceMethodref = 11;
public static final int JVM_CONSTANT_NameAndType = 12; public static final int JVM_CONSTANT_NameAndType = 12;
public static final int JVM_CONSTANT_MethodHandle = 15;
public static final int JVM_CONSTANT_MethodType = 16;
public static final int JVM_CONSTANT_InvokeDynamic = 17;
// JVM_CONSTANT_MethodHandle subtypes
public static final int JVM_REF_getField = 1;
public static final int JVM_REF_getStatic = 2;
public static final int JVM_REF_putField = 3;
public static final int JVM_REF_putStatic = 4;
public static final int JVM_REF_invokeVirtual = 5;
public static final int JVM_REF_invokeStatic = 6;
public static final int JVM_REF_invokeSpecial = 7;
public static final int JVM_REF_newInvokeSpecial = 8;
public static final int JVM_REF_invokeInterface = 9;
// HotSpot specific constant pool constant types. // HotSpot specific constant pool constant types.

View File

@ -54,14 +54,34 @@ public class ByteCodeRewriter
} }
protected short getConstantPoolIndex(int bci) { protected short getConstantPoolIndex(int rawcode, int bci) {
// get ConstantPool index from ConstantPoolCacheIndex at given bci // get ConstantPool index from ConstantPoolCacheIndex at given bci
short cpCacheIndex = method.getBytecodeShortArg(bci); String fmt = Bytecodes.format(rawcode);
int cpCacheIndex;
switch (fmt.length()) {
case 2: cpCacheIndex = method.getBytecodeByteArg(bci); break;
case 3: cpCacheIndex = method.getBytecodeShortArg(bci); break;
case 5:
if (fmt.indexOf("__") >= 0)
cpCacheIndex = method.getBytecodeShortArg(bci);
else
cpCacheIndex = method.getBytecodeIntArg(bci);
break;
default: throw new IllegalArgumentException();
}
if (cpCache == null) { if (cpCache == null) {
return cpCacheIndex; return (short) cpCacheIndex;
} else { } else if (fmt.indexOf("JJJJ") >= 0) {
// change byte-ordering and go via secondary cache entry
return (short) cpCache.getMainEntryAt(bytes.swapInt(cpCacheIndex)).getConstantPoolIndex();
} else if (fmt.indexOf("JJ") >= 0) {
// change byte-ordering and go via cache // change byte-ordering and go via cache
return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort(cpCacheIndex))).getConstantPoolIndex(); return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort((short)cpCacheIndex))).getConstantPoolIndex();
} else if (fmt.indexOf("j") >= 0) {
// go via cache
return (short) cpCache.getEntryAt((int) (0xFF & cpCacheIndex)).getConstantPoolIndex();
} else {
return (short) cpCacheIndex;
} }
} }
@ -100,10 +120,31 @@ public class ByteCodeRewriter
case Bytecodes._invokespecial: case Bytecodes._invokespecial:
case Bytecodes._invokestatic: case Bytecodes._invokestatic:
case Bytecodes._invokeinterface: { case Bytecodes._invokeinterface: {
cpoolIndex = getConstantPoolIndex(bci + 1); cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
writeShort(code, bci + 1, cpoolIndex); writeShort(code, bci + 1, cpoolIndex);
break; break;
} }
case Bytecodes._invokedynamic:
cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
writeShort(code, bci + 1, cpoolIndex);
writeShort(code, bci + 3, (short)0); // clear out trailing bytes
break;
case Bytecodes._ldc_w:
if (hotspotcode != bytecode) {
// fast_aldc_w puts constant in CP cache
cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
writeShort(code, bci + 1, cpoolIndex);
}
break;
case Bytecodes._ldc:
if (hotspotcode != bytecode) {
// fast_aldc puts constant in CP cache
cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1);
code[bci + 1] = (byte)(cpoolIndex);
}
break;
} }
len = Bytecodes.lengthFor(bytecode); len = Bytecodes.lengthFor(bytecode);

View File

@ -61,10 +61,12 @@ public class ClassWriter implements /* imports */ ClassConstants
protected short _signatureIndex; protected short _signatureIndex;
protected static int extractHighShortFromInt(int val) { protected static int extractHighShortFromInt(int val) {
// must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
return (val >> 16) & 0xFFFF; return (val >> 16) & 0xFFFF;
} }
protected static int extractLowShortFromInt(int val) { protected static int extractLowShortFromInt(int val) {
// must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc.
return val & 0xFFFF; return val & 0xFFFF;
} }
@ -297,6 +299,37 @@ public class ClassWriter implements /* imports */ ClassConstants
+ ", type = " + signatureIndex); + ", type = " + signatureIndex);
break; break;
} }
case JVM_CONSTANT_MethodHandle: {
dos.writeByte(cpConstType);
int value = cpool.getIntAt(ci);
short bootstrapMethodIndex = (short) extractLowShortFromInt(value);
short nameAndTypeIndex = (short) extractHighShortFromInt(value);
dos.writeShort(bootstrapMethodIndex);
dos.writeShort(nameAndTypeIndex);
if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " +
bootstrapMethodIndex + ", N&T = " + nameAndTypeIndex);
break;
}
case JVM_CONSTANT_MethodType: {
dos.writeByte(cpConstType);
int value = cpool.getIntAt(ci);
short refIndex = (short) value;
dos.writeShort(refIndex);
if (DEBUG) debugMessage("CP[" + ci + "] = MT index = " + refIndex);
break;
}
case JVM_CONSTANT_InvokeDynamic: {
dos.writeByte(cpConstType);
int value = cpool.getIntAt(ci);
short refIndex = (short) value;
dos.writeShort(refIndex);
if (DEBUG) debugMessage("CP[" + ci + "] = MT index = " + refIndex);
break;
}
default: default:
throw new InternalError("Unknown tag: " + cpConstType); throw new InternalError("Unknown tag: " + cpConstType);
} // switch } // switch

View File

@ -572,6 +572,21 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
buf.cell(Integer.toString(cpool.getIntAt(index))); buf.cell(Integer.toString(cpool.getIntAt(index)));
break; break;
case JVM_CONSTANT_MethodHandle:
buf.cell("JVM_CONSTANT_MethodHandle");
buf.cell(genLowHighShort(cpool.getIntAt(index)));
break;
case JVM_CONSTANT_MethodType:
buf.cell("JVM_CONSTANT_MethodType");
buf.cell(Integer.toString(cpool.getIntAt(index)));
break;
case JVM_CONSTANT_InvokeDynamic:
buf.cell("JVM_CONSTANT_InvokeDynamic");
buf.cell(genLowHighShort(cpool.getIntAt(index)));
break;
default: default:
throw new InternalError("unknown tag: " + ctag); throw new InternalError("unknown tag: " + ctag);
} }

View File

@ -38,12 +38,27 @@ public class ConstantTag {
private static int JVM_CONSTANT_Methodref = 10; private static int JVM_CONSTANT_Methodref = 10;
private static int JVM_CONSTANT_InterfaceMethodref = 11; private static int JVM_CONSTANT_InterfaceMethodref = 11;
private static int JVM_CONSTANT_NameAndType = 12; private static int JVM_CONSTANT_NameAndType = 12;
private static int JVM_CONSTANT_MethodHandle = 15; // JSR 292
private static int JVM_CONSTANT_MethodType = 16; // JSR 292
private static int JVM_CONSTANT_InvokeDynamic = 17; // JSR 292
private static int JVM_CONSTANT_Invalid = 0; // For bad value initialization private static int JVM_CONSTANT_Invalid = 0; // For bad value initialization
private static int JVM_CONSTANT_UnresolvedClass = 100; // Temporary tag until actual use private static int JVM_CONSTANT_UnresolvedClass = 100; // Temporary tag until actual use
private static int JVM_CONSTANT_ClassIndex = 101; // Temporary tag while constructing constant pool private static int JVM_CONSTANT_ClassIndex = 101; // Temporary tag while constructing constant pool
private static int JVM_CONSTANT_UnresolvedString = 102; // Temporary tag until actual use private static int JVM_CONSTANT_UnresolvedString = 102; // Temporary tag until actual use
private static int JVM_CONSTANT_StringIndex = 103; // Temporary tag while constructing constant pool private static int JVM_CONSTANT_StringIndex = 103; // Temporary tag while constructing constant pool
private static int JVM_CONSTANT_UnresolvedClassInError = 104; // Resolution failed private static int JVM_CONSTANT_UnresolvedClassInError = 104; // Resolution failed
private static int JVM_CONSTANT_Object = 105; // Required for BoundMethodHandle arguments.
// JVM_CONSTANT_MethodHandle subtypes //FIXME: connect these to data structure
private static int JVM_REF_getField = 1;
private static int JVM_REF_getStatic = 2;
private static int JVM_REF_putField = 3;
private static int JVM_REF_putStatic = 4;
private static int JVM_REF_invokeVirtual = 5;
private static int JVM_REF_invokeStatic = 6;
private static int JVM_REF_invokeSpecial = 7;
private static int JVM_REF_newInvokeSpecial = 8;
private static int JVM_REF_invokeInterface = 9;
private byte tag; private byte tag;
@ -62,6 +77,9 @@ public class ConstantTag {
public boolean isDouble() { return tag == JVM_CONSTANT_Double; } public boolean isDouble() { return tag == JVM_CONSTANT_Double; }
public boolean isNameAndType() { return tag == JVM_CONSTANT_NameAndType; } public boolean isNameAndType() { return tag == JVM_CONSTANT_NameAndType; }
public boolean isUtf8() { return tag == JVM_CONSTANT_Utf8; } public boolean isUtf8() { return tag == JVM_CONSTANT_Utf8; }
public boolean isMethodHandle() { return tag == JVM_CONSTANT_MethodHandle; }
public boolean isMethodType() { return tag == JVM_CONSTANT_MethodType; }
public boolean isInvokeDynamic() { return tag == JVM_CONSTANT_InvokeDynamic; }
public boolean isInvalid() { return tag == JVM_CONSTANT_Invalid; } public boolean isInvalid() { return tag == JVM_CONSTANT_Invalid; }
@ -73,6 +91,8 @@ public class ConstantTag {
public boolean isUnresolvedString() { return tag == JVM_CONSTANT_UnresolvedString; } public boolean isUnresolvedString() { return tag == JVM_CONSTANT_UnresolvedString; }
public boolean isStringIndex() { return tag == JVM_CONSTANT_StringIndex; } public boolean isStringIndex() { return tag == JVM_CONSTANT_StringIndex; }
public boolean isObject() { return tag == JVM_CONSTANT_Object; }
public boolean isKlassReference() { return isKlassIndex() || isUnresolvedKlass(); } public boolean isKlassReference() { return isKlassIndex() || isUnresolvedKlass(); }
public boolean isFieldOrMethod() { return isField() || isMethod() || isInterfaceMethod(); } public boolean isFieldOrMethod() { return isField() || isMethod() || isInterfaceMethod(); }
public boolean isSymbol() { return isUtf8(); } public boolean isSymbol() { return isUtf8(); }

View File

@ -825,6 +825,8 @@ function jdis(method) {
} }
writeln(""); writeln("");
disAsm.decode(new sapkg.interpreter.BytecodeVisitor() { disAsm.decode(new sapkg.interpreter.BytecodeVisitor() {
prologue: function(method) { },
epilogue: function() { },
visit: function(bytecode) { visit: function(bytecode) {
if (hasLines) { if (hasLines) {
var line = method.getLineNumberFromBCI(bci); var line = method.getLineNumberFromBCI(bci);

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2010
HS_MAJOR_VER=19 HS_MAJOR_VER=19
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=02 HS_BUILD_NUMBER=04
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=7 JDK_MINOR_VER=7

View File

@ -138,7 +138,11 @@ ADLCFLAGS += -q -T
# Normally, debugging is done directly on the ad_<arch>*.cpp files. # Normally, debugging is done directly on the ad_<arch>*.cpp files.
# But -g will put #line directives in those files pointing back to <arch>.ad. # But -g will put #line directives in those files pointing back to <arch>.ad.
# Some builds of gcc 3.2 have a bug that gets tickled by the extra #line directives
# so skip it for 3.2 and ealier.
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
ADLCFLAGS += -g ADLCFLAGS += -g
endif
ifdef LP64 ifdef LP64
ADLCFLAGS += -D_LP64 ADLCFLAGS += -D_LP64

View File

@ -40,6 +40,9 @@ GENERATED = $(TOPDIR)/../generated
# tools.jar is needed by the JDI - SA binding # tools.jar is needed by the JDI - SA binding
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
# TODO: if it's a modules image, check if SA module is installed.
MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules
# gnumake 3.78.1 does not accept the *s that # gnumake 3.78.1 does not accept the *s that
# are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them # are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them
AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1)) AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
@ -65,7 +68,7 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2)
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
exit 1; \ exit 1; \
fi fi
$(QUIETLY) if [ ! -f $(SA_CLASSPATH) ] ; then \ $(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \
echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\ echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\
echo ""; \ echo ""; \
exit 1; \ exit 1; \

View File

@ -36,6 +36,9 @@ GENERATED = ../generated
# tools.jar is needed by the JDI - SA binding # tools.jar is needed by the JDI - SA binding
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
# TODO: if it's a modules image, check if SA module is installed.
MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules
# gnumake 3.78.1 does not accept the *s that # gnumake 3.78.1 does not accept the *s that
# are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them # are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them
AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1)) AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
@ -59,7 +62,7 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2)
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
exit 1; \ exit 1; \
fi fi
$(QUIETLY) if [ ! -f $(SA_CLASSPATH) ] ; then \ $(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \
echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\ echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\
echo ""; \ echo ""; \
exit 1; \ exit 1; \

View File

@ -32,6 +32,17 @@ SLASH_JAVA ?= J:
PATH_SEP = ; PATH_SEP = ;
# Need PLATFORM (os-arch combo names) for jdk and hotspot, plus libarch name # Need PLATFORM (os-arch combo names) for jdk and hotspot, plus libarch name
ifeq ($(ARCH_DATA_MODEL),32)
ARCH_DATA_MODEL=32
PLATFORM=windows-i586
VM_PLATFORM=windows_i486
HS_ARCH=x86
MAKE_ARGS += ARCH=x86
MAKE_ARGS += BUILDARCH=i486
MAKE_ARGS += Platform_arch=x86
MAKE_ARGS += Platform_arch_model=x86_32
endif
ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) x86),) ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) x86),)
ARCH_DATA_MODEL=32 ARCH_DATA_MODEL=32
PLATFORM=windows-i586 PLATFORM=windows-i586
@ -43,43 +54,57 @@ ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) x86),)
MAKE_ARGS += Platform_arch_model=x86_32 MAKE_ARGS += Platform_arch_model=x86_32
endif endif
ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) ia64),) ifneq ($(ARCH_DATA_MODEL),32)
ARCH_DATA_MODEL=64 ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) ia64),)
PLATFORM=windows-ia64 ARCH_DATA_MODEL=64
VM_PLATFORM=windows_ia64 PLATFORM=windows-ia64
HS_ARCH=ia64 VM_PLATFORM=windows_ia64
MAKE_ARGS += LP64=1 HS_ARCH=ia64
MAKE_ARGS += ARCH=ia64 MAKE_ARGS += LP64=1
MAKE_ARGS += BUILDARCH=ia64 MAKE_ARGS += ARCH=ia64
MAKE_ARGS += Platform_arch=ia64 MAKE_ARGS += BUILDARCH=ia64
MAKE_ARGS += Platform_arch_model=ia64 MAKE_ARGS += Platform_arch=ia64
endif MAKE_ARGS += Platform_arch_model=ia64
endif
# http://support.microsoft.com/kb/888731 : this can be either # http://support.microsoft.com/kb/888731 : this can be either
# AMD64 for AMD, or EM64T for Intel chips. # AMD64 for AMD, or EM64T for Intel chips.
ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) AMD64),) ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) AMD64),)
ARCH_DATA_MODEL=64 ARCH_DATA_MODEL=64
PLATFORM=windows-amd64 PLATFORM=windows-amd64
VM_PLATFORM=windows_amd64 VM_PLATFORM=windows_amd64
HS_ARCH=x86 HS_ARCH=x86
MAKE_ARGS += LP64=1 MAKE_ARGS += LP64=1
MAKE_ARGS += ARCH=x86 MAKE_ARGS += ARCH=x86
MAKE_ARGS += BUILDARCH=amd64 MAKE_ARGS += BUILDARCH=amd64
MAKE_ARGS += Platform_arch=x86 MAKE_ARGS += Platform_arch=x86
MAKE_ARGS += Platform_arch_model=x86_64 MAKE_ARGS += Platform_arch_model=x86_64
endif endif
ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) EM64T),)
ARCH_DATA_MODEL=64
PLATFORM=windows-amd64
VM_PLATFORM=windows_amd64
HS_ARCH=x86
MAKE_ARGS += LP64=1
MAKE_ARGS += ARCH=x86
MAKE_ARGS += BUILDARCH=amd64
MAKE_ARGS += Platform_arch=x86
MAKE_ARGS += Platform_arch_model=x86_64
endif
# NB later OS versions than 2003 may report "Intel64" # NB later OS versions than 2003 may report "Intel64"
ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) "EM64T\|Intel64"),) ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) Intel64),)
ARCH_DATA_MODEL=64 ARCH_DATA_MODEL=64
PLATFORM=windows-amd64 PLATFORM=windows-amd64
VM_PLATFORM=windows_amd64 VM_PLATFORM=windows_amd64
HS_ARCH=x86 HS_ARCH=x86
MAKE_ARGS += LP64=1 MAKE_ARGS += LP64=1
MAKE_ARGS += ARCH=x86 MAKE_ARGS += ARCH=x86
MAKE_ARGS += BUILDARCH=amd64 MAKE_ARGS += BUILDARCH=amd64
MAKE_ARGS += Platform_arch=x86 MAKE_ARGS += Platform_arch=x86
MAKE_ARGS += Platform_arch_model=x86_64 MAKE_ARGS += Platform_arch_model=x86_64
endif
endif endif
JDK_INCLUDE_SUBDIR=win32 JDK_INCLUDE_SUBDIR=win32

View File

@ -1007,9 +1007,9 @@ class StubGenerator: public StubCodeGenerator {
__ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp)); __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
__ delayed()->cmp(to_from, byte_count); __ delayed()->cmp(to_from, byte_count);
if (NOLp == NULL) if (NOLp == NULL)
__ brx(Assembler::greaterEqual, false, Assembler::pt, no_overlap_target); __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
else else
__ brx(Assembler::greaterEqual, false, Assembler::pt, (*NOLp)); __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
__ delayed()->nop(); __ delayed()->nop();
} }

View File

@ -318,6 +318,31 @@ void TemplateTable::ldc(bool wide) {
__ bind(exit); __ bind(exit);
} }
// Fast path for caching oop constants.
// %%% We should use this to handle Class and String constants also.
// %%% It will simplify the ldc/primitive path considerably.
void TemplateTable::fast_aldc(bool wide) {
transition(vtos, atos);
if (!EnableMethodHandles) {
// We should not encounter this bytecode if !EnableMethodHandles.
// The verifier will stop it. However, if we get past the verifier,
// this will stop the thread in a reasonable way, without crashing the JVM.
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_IncompatibleClassChangeError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return;
}
Register Rcache = G3_scratch;
Register Rscratch = G4_scratch;
resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1));
__ verify_oop(Otos_i);
}
void TemplateTable::ldc2_w() { void TemplateTable::ldc2_w() {
transition(vtos, vtos); transition(vtos, vtos);
Label retry, resolved, Long, exit; Label retry, resolved, Long, exit;
@ -1994,6 +2019,8 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
default : ShouldNotReachHere(); break; default : ShouldNotReachHere(); break;
} }
// first time invocation - must resolve first // first time invocation - must resolve first

View File

@ -375,6 +375,32 @@ void TemplateTable::ldc(bool wide) {
__ bind(Done); __ bind(Done);
} }
// Fast path for caching oop constants.
// %%% We should use this to handle Class and String constants also.
// %%% It will simplify the ldc/primitive path considerably.
void TemplateTable::fast_aldc(bool wide) {
transition(vtos, atos);
if (!EnableMethodHandles) {
// We should not encounter this bytecode if !EnableMethodHandles.
// The verifier will stop it. However, if we get past the verifier,
// this will stop the thread in a reasonable way, without crashing the JVM.
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_IncompatibleClassChangeError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return;
}
const Register cache = rcx;
const Register index = rdx;
resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
if (VerifyOops) {
__ verify_oop(rax);
}
}
void TemplateTable::ldc2_w() { void TemplateTable::ldc2_w() {
transition(vtos, vtos); transition(vtos, vtos);
Label Long, Done; Label Long, Done;
@ -2055,6 +2081,8 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokestatic : // fall through
case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
default : ShouldNotReachHere(); break; default : ShouldNotReachHere(); break;
} }
__ movl(temp, (int)bytecode()); __ movl(temp, (int)bytecode());

View File

@ -389,6 +389,32 @@ void TemplateTable::ldc(bool wide) {
__ bind(Done); __ bind(Done);
} }
// Fast path for caching oop constants.
// %%% We should use this to handle Class and String constants also.
// %%% It will simplify the ldc/primitive path considerably.
void TemplateTable::fast_aldc(bool wide) {
transition(vtos, atos);
if (!EnableMethodHandles) {
// We should not encounter this bytecode if !EnableMethodHandles.
// The verifier will stop it. However, if we get past the verifier,
// this will stop the thread in a reasonable way, without crashing the JVM.
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_IncompatibleClassChangeError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return;
}
const Register cache = rcx;
const Register index = rdx;
resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
if (VerifyOops) {
__ verify_oop(rax);
}
}
void TemplateTable::ldc2_w() { void TemplateTable::ldc2_w() {
transition(vtos, vtos); transition(vtos, vtos);
Label Long, Done; Label Long, Done;
@ -2063,6 +2089,12 @@ void TemplateTable::resolve_cache_and_index(int byte_no,
case Bytecodes::_invokedynamic: case Bytecodes::_invokedynamic:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);
break; break;
case Bytecodes::_fast_aldc:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
break;
case Bytecodes::_fast_aldc_w:
entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
break; break;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@ const char* VM_Version::_features_str = "";
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
static BufferBlob* stub_blob; static BufferBlob* stub_blob;
static const int stub_size = 300; static const int stub_size = 400;
extern "C" { extern "C" {
typedef void (*getPsrInfo_stub_t)(void*); typedef void (*getPsrInfo_stub_t)(void*);
@ -56,7 +56,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT);
const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
Label detect_486, cpu486, detect_586, std_cpuid1; Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
Label ext_cpuid1, ext_cpuid5, done; Label ext_cpuid1, ext_cpuid5, done;
StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub"); StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
@ -131,13 +131,62 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
__ movl(Address(rsi,12), rdx); __ movl(Address(rsi,12), rdx);
__ cmpl(rax, 3); // Is cpuid(0x4) supported? __ cmpl(rax, 0xa); // Is cpuid(0xB) supported?
__ jccb(Assembler::belowEqual, std_cpuid1); __ jccb(Assembler::belowEqual, std_cpuid4);
//
// cpuid(0xB) Processor Topology
//
__ movl(rax, 0xb);
__ xorl(rcx, rcx); // Threads level
__ cpuid();
__ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
__ movl(Address(rsi,12), rdx);
__ movl(rax, 0xb);
__ movl(rcx, 1); // Cores level
__ cpuid();
__ push(rax);
__ andl(rax, 0x1f); // Determine if valid topology level
__ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level
__ andl(rax, 0xffff);
__ pop(rax);
__ jccb(Assembler::equal, std_cpuid4);
__ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
__ movl(Address(rsi,12), rdx);
__ movl(rax, 0xb);
__ movl(rcx, 2); // Packages level
__ cpuid();
__ push(rax);
__ andl(rax, 0x1f); // Determine if valid topology level
__ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level
__ andl(rax, 0xffff);
__ pop(rax);
__ jccb(Assembler::equal, std_cpuid4);
__ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
__ movl(Address(rsi,12), rdx);
// //
// cpuid(0x4) Deterministic cache params // cpuid(0x4) Deterministic cache params
// //
__ bind(std_cpuid4);
__ movl(rax, 4); __ movl(rax, 4);
__ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
__ jccb(Assembler::greater, std_cpuid1);
__ xorl(rcx, rcx); // L1 cache __ xorl(rcx, rcx); // L1 cache
__ cpuid(); __ cpuid();
__ push(rax); __ push(rax);
@ -460,13 +509,18 @@ void VM_Version::get_processor_features() {
AllocatePrefetchDistance = allocate_prefetch_distance(); AllocatePrefetchDistance = allocate_prefetch_distance();
AllocatePrefetchStyle = allocate_prefetch_style(); AllocatePrefetchStyle = allocate_prefetch_style();
if( AllocatePrefetchStyle == 2 && is_intel() && if( is_intel() && cpu_family() == 6 && supports_sse3() ) {
cpu_family() == 6 && supports_sse3() ) { // watermark prefetching on Core if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core
#ifdef _LP64 #ifdef _LP64
AllocatePrefetchDistance = 384; AllocatePrefetchDistance = 384;
#else #else
AllocatePrefetchDistance = 320; AllocatePrefetchDistance = 320;
#endif #endif
}
if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
AllocatePrefetchDistance = 192;
AllocatePrefetchLines = 4;
}
} }
assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value"); assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -114,6 +114,14 @@ public:
} bits; } bits;
}; };
union TplCpuidBEbx {
uint32_t value;
struct {
uint32_t logical_cpus : 16,
: 16;
} bits;
};
union ExtCpuid1Ecx { union ExtCpuid1Ecx {
uint32_t value; uint32_t value;
struct { struct {
@ -211,6 +219,25 @@ protected:
uint32_t dcp_cpuid4_ecx; // unused currently uint32_t dcp_cpuid4_ecx; // unused currently
uint32_t dcp_cpuid4_edx; // unused currently uint32_t dcp_cpuid4_edx; // unused currently
// cpuid function 0xB (processor topology)
// ecx = 0
uint32_t tpl_cpuidB0_eax;
TplCpuidBEbx tpl_cpuidB0_ebx;
uint32_t tpl_cpuidB0_ecx; // unused currently
uint32_t tpl_cpuidB0_edx; // unused currently
// ecx = 1
uint32_t tpl_cpuidB1_eax;
TplCpuidBEbx tpl_cpuidB1_ebx;
uint32_t tpl_cpuidB1_ecx; // unused currently
uint32_t tpl_cpuidB1_edx; // unused currently
// ecx = 2
uint32_t tpl_cpuidB2_eax;
TplCpuidBEbx tpl_cpuidB2_ebx;
uint32_t tpl_cpuidB2_ecx; // unused currently
uint32_t tpl_cpuidB2_edx; // unused currently
// cpuid function 0x80000000 // example, unused // cpuid function 0x80000000 // example, unused
uint32_t ext_max_function; uint32_t ext_max_function;
uint32_t ext_vendor_name_0; uint32_t ext_vendor_name_0;
@ -316,6 +343,9 @@ public:
static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); } static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); }
static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); } static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); }
static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); } static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); }
static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); }
static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
// Initialization // Initialization
static void initialize(); static void initialize();
@ -346,10 +376,22 @@ public:
static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA' static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG' static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
static bool supports_processor_topology() {
return (_cpuid_info.std_max_function >= 0xB) &&
// eax[4:0] | ebx[0:15] == 0 indicates invalid topology level.
// Some cpus have max cpuid >= 0xB but do not support processor topology.
((_cpuid_info.tpl_cpuidB0_eax & 0x1f | _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus) != 0);
}
static uint cores_per_cpu() { static uint cores_per_cpu() {
uint result = 1; uint result = 1;
if (is_intel()) { if (is_intel()) {
result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); if (supports_processor_topology()) {
result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
_cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
} else {
result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
}
} else if (is_amd()) { } else if (is_amd()) {
result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1); result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1);
} }
@ -358,7 +400,9 @@ public:
static uint threads_per_core() { static uint threads_per_core() {
uint result = 1; uint result = 1;
if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) { if (is_intel() && supports_processor_topology()) {
result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
} else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu / result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
cores_per_cpu(); cores_per_cpu();
} }

View File

@ -820,7 +820,7 @@ int AbstractInterpreter::layout_activation(methodOop method,
bool is_top_frame) { bool is_top_frame) {
assert(popframe_extra_args == 0, "what to do?"); assert(popframe_extra_args == 0, "what to do?");
assert(!is_top_frame || (!callee_locals && !callee_param_count), assert(!is_top_frame || (!callee_locals && !callee_param_count),
"top frame should have no caller") "top frame should have no caller");
// This code must exactly match what InterpreterFrame::build // This code must exactly match what InterpreterFrame::build
// does (the full InterpreterFrame::build, that is, not the // does (the full InterpreterFrame::build, that is, not the

View File

@ -2079,9 +2079,9 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
static char saved_jvm_path[MAXPATHLEN] = {0}; static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm.so or libjvm_g.so // Find the full path to the current module, libjvm.so or libjvm_g.so
void os::jvm_path(char *buf, jint len) { void os::jvm_path(char *buf, jint buflen) {
// Error checking. // Error checking.
if (len < MAXPATHLEN) { if (buflen < MAXPATHLEN) {
assert(false, "must use a large-enough buffer"); assert(false, "must use a large-enough buffer");
buf[0] = '\0'; buf[0] = '\0';
return; return;
@ -2117,6 +2117,9 @@ void os::jvm_path(char *buf, jint len) {
// Look for JAVA_HOME in the environment. // Look for JAVA_HOME in the environment.
char* java_home_var = ::getenv("JAVA_HOME"); char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != NULL && java_home_var[0] != 0) { if (java_home_var != NULL && java_home_var[0] != 0) {
char* jrelib_p;
int len;
// Check the current module name "libjvm.so" or "libjvm_g.so". // Check the current module name "libjvm.so" or "libjvm_g.so".
p = strrchr(buf, '/'); p = strrchr(buf, '/');
assert(strstr(p, "/libjvm") == p, "invalid library name"); assert(strstr(p, "/libjvm") == p, "invalid library name");
@ -2124,14 +2127,24 @@ void os::jvm_path(char *buf, jint len) {
if (realpath(java_home_var, buf) == NULL) if (realpath(java_home_var, buf) == NULL)
return; return;
sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch);
// determine if this is a legacy image or modules image
// modules image doesn't have "jre" subdirectory
len = strlen(buf);
jrelib_p = buf + len;
snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
if (0 != access(buf, F_OK)) {
snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
}
if (0 == access(buf, F_OK)) { if (0 == access(buf, F_OK)) {
// Use current module name "libjvm[_g].so" instead of // Use current module name "libjvm[_g].so" instead of
// "libjvm"debug_only("_g")".so" since for fastdebug version // "libjvm"debug_only("_g")".so" since for fastdebug version
// we should have "libjvm.so" but debug_only("_g") adds "_g"! // we should have "libjvm.so" but debug_only("_g") adds "_g"!
// It is used when we are choosing the HPI library's name // It is used when we are choosing the HPI library's name
// "libhpi[_g].so" in hpi::initialize_get_interface(). // "libhpi[_g].so" in hpi::initialize_get_interface().
sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p); len = strlen(buf);
snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
} else { } else {
// Go back to path of .so // Go back to path of .so
if (realpath(dli_fname, buf) == NULL) if (realpath(dli_fname, buf) == NULL)

View File

@ -123,7 +123,7 @@
int set_interrupt_callback (Sync_Interrupt_Callback * cb); int set_interrupt_callback (Sync_Interrupt_Callback * cb);
void remove_interrupt_callback(Sync_Interrupt_Callback * cb); void remove_interrupt_callback(Sync_Interrupt_Callback * cb);
void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args); void do_interrupt_callbacks_at_interrupt(InterruptArguments *args);
// *************************************************************** // ***************************************************************
// java.lang.Thread.interrupt state. // java.lang.Thread.interrupt state.

View File

@ -2435,6 +2435,8 @@ void os::jvm_path(char *buf, jint buflen) {
char* java_home_var = ::getenv("JAVA_HOME"); char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != NULL && java_home_var[0] != 0) { if (java_home_var != NULL && java_home_var[0] != 0) {
char cpu_arch[12]; char cpu_arch[12];
char* jrelib_p;
int len;
sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
#ifdef _LP64 #ifdef _LP64
// If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
@ -2450,14 +2452,23 @@ void os::jvm_path(char *buf, jint buflen) {
p = strstr(p, "_g") ? "_g" : ""; p = strstr(p, "_g") ? "_g" : "";
realpath(java_home_var, buf); realpath(java_home_var, buf);
sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch); // determine if this is a legacy image or modules image
// modules image doesn't have "jre" subdirectory
len = strlen(buf);
jrelib_p = buf + len;
snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
if (0 != access(buf, F_OK)) {
snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
}
if (0 == access(buf, F_OK)) { if (0 == access(buf, F_OK)) {
// Use current module name "libjvm[_g].so" instead of // Use current module name "libjvm[_g].so" instead of
// "libjvm"debug_only("_g")".so" since for fastdebug version // "libjvm"debug_only("_g")".so" since for fastdebug version
// we should have "libjvm.so" but debug_only("_g") adds "_g"! // we should have "libjvm.so" but debug_only("_g") adds "_g"!
// It is used when we are choosing the HPI library's name // It is used when we are choosing the HPI library's name
// "libhpi[_g].so" in hpi::initialize_get_interface(). // "libhpi[_g].so" in hpi::initialize_get_interface().
sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p); len = strlen(buf);
snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
} else { } else {
// Go back to path of .so // Go back to path of .so
realpath((char *)dlinfo.dli_fname, buf); realpath((char *)dlinfo.dli_fname, buf);

View File

@ -1,290 +0,0 @@
/*
* Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "incls/_precompiled.incl"
#include "incls/_vtune_windows.cpp.incl"
static int current_method_ID = 0;
// ------------- iJITProf.h -------------------
// defined by Intel -- do not change
#include "windows.h"
extern "C" {
enum iJITP_Event {
ExceptionOccurred_S, // Java exception
ExceptionOccurred_IDS,
Shutdown, // VM exit
ThreadCreate, // threads
ThreadDestroy,
ThreadSwitch,
ClassLoadStart, // class loading
ClassLoadEnd,
GCStart, // GC
GCEnd,
NMethodCreate = 13, // nmethod creation
NMethodDelete
// rest of event types omitted (call profiling not supported yet)
};
// version number -- 0 if VTune not installed
int WINAPI iJitP_VersionNumber();
enum iJITP_ModeFlags {
NoNotification = 0x0, // don't call vtune
NotifyNMethodCreate = 0x1, // notify NMethod_Create
NotifyNMethodDelete = 0x2, // notify NMethod_Create
NotifyMethodEnter = 0x4, // method entry
NotifyMethodExit = 0x8, // method exit
NotifyShutdown = 0x10, // VM exit
NotifyGC = 0x20, // GC
};
// call back function type
typedef void (WINAPI *ModeChangedFn)(iJITP_ModeFlags flags);
// ------------- VTune method interfaces ----------------------
typedef void (WINAPI *RegisterCallbackFn)(ModeChangedFn fn); // register callback
typedef int (WINAPI *NotifyEventFn)(iJITP_Event, void* event_data);
// specific event data structures
// data for NMethodCreate
struct VTuneObj { // base class for allocation
// (can't use CHeapObj -- has vtable ptr)
void* operator new(size_t size) { return os::malloc(size); }
void operator delete(void* p) { fatal("never delete VTune data"); }
};
struct LineNumberInfo : VTuneObj { // PC-to-line number mapping
unsigned long offset; // byte offset from start of method
unsigned long line_num; // corresponding line number
};
struct MethodLoadInfo : VTuneObj {
unsigned long methodID; // unique method ID
const char* name; // method name
unsigned long instr_start; // start address
unsigned long instr_size; // length in bytes
unsigned long line_number_size; // size of line number table
LineNumberInfo* line_number_table; // line number mapping
unsigned long classID; // unique class ID
char* class_file_name; // fully qualified class file name
char* source_file_name; // fully qualified source file name
MethodLoadInfo(nmethod* nm); // for real nmethods
MethodLoadInfo(const char* vm_name, address start, address end);
// for "nmethods" like stubs, interpreter, etc
};
// data for NMethodDelete
struct MethodInfo : VTuneObj {
unsigned long methodID; // unique method ID
unsigned long classID; // (added for convenience -- not part of Intel interface)
MethodInfo(methodOop m);
};
};
MethodInfo::MethodInfo(methodOop m) {
// just give it a new ID -- we're not compiling methods twice (usually)
// (and even if we did, one might want to see the two versions separately)
methodID = ++current_method_ID;
}
MethodLoadInfo::MethodLoadInfo(const char* vm_name, address start, address end) {
classID = 0;
methodID = ++current_method_ID;
name = vm_name;
instr_start = (unsigned long)start;
instr_size = end - start;
line_number_size = 0;
line_number_table = NULL;
class_file_name = source_file_name = "HotSpot JVM";
}
MethodLoadInfo::MethodLoadInfo(nmethod* nm) {
methodOop m = nm->method();
MethodInfo info(m);
classID = info.classID;
methodID = info.methodID;
name = strdup(m->name()->as_C_string());
instr_start = (unsigned long)nm->instructions_begin();
instr_size = nm->code_size();
line_number_size = 0;
line_number_table = NULL;
klassOop kl = m->method_holder();
char* class_name = Klass::cast(kl)->name()->as_C_string();
char* file_name = NEW_C_HEAP_ARRAY(char, strlen(class_name) + 1);
strcpy(file_name, class_name);
class_file_name = file_name;
char* src_name = NEW_C_HEAP_ARRAY(char, strlen(class_name) + strlen(".java") + 1);
strcpy(src_name, class_name);
strcat(src_name, ".java");
source_file_name = src_name;
}
// --------------------- DLL loading functions ------------------------
#define DLLNAME "iJitProf.dll"
static HINSTANCE load_lib(char* name) {
HINSTANCE lib = NULL;
HKEY hk;
// try to get VTune directory from the registry
if (RegOpenKey(HKEY_CURRENT_USER, "Software\\VB and VBA Program Settings\\VTune\\StartUp", &hk) == ERROR_SUCCESS) {
for (int i = 0; true; i++) {
char szName[MAX_PATH + 1];
char szVal [MAX_PATH + 1];
DWORD cbName, cbVal;
cbName = cbVal = MAX_PATH + 1;
if (RegEnumValue(hk, i, szName, &cbName, NULL, NULL, (LPBYTE)szVal, &cbVal) == ERROR_SUCCESS) {
// get VTune directory
if (!strcmp(szName, name)) {
char*p = szVal;
while (*p == ' ') p++; // trim
char* q = p + strlen(p) - 1;
while (*q == ' ') *(q--) = '\0';
// chdir to the VTune dir
GetCurrentDirectory(MAX_PATH + 1, szName);
SetCurrentDirectory(p);
// load lib
lib = LoadLibrary(strcat(strcat(p, "\\"), DLLNAME));
if (lib != NULL && WizardMode) tty->print_cr("*loaded VTune DLL %s", p);
// restore current dir
SetCurrentDirectory(szName);
break;
}
} else {
break;
}
}
}
return lib;
}
static RegisterCallbackFn iJIT_RegisterCallback = NULL;
static NotifyEventFn iJIT_NotifyEvent = NULL;
static bool load_iJIT_funcs() {
// first try to load from PATH
HINSTANCE lib = LoadLibrary(DLLNAME);
if (lib != NULL && WizardMode) tty->print_cr("*loaded VTune DLL %s via PATH", DLLNAME);
// if not successful, try to look in the VTUNE directory
if (lib == NULL) lib = load_lib("VTUNEDIR30");
if (lib == NULL) lib = load_lib("VTUNEDIR25");
if (lib == NULL) lib = load_lib("VTUNEDIR");
if (lib == NULL) return false; // unsuccessful
// try to load the functions
iJIT_RegisterCallback = (RegisterCallbackFn)GetProcAddress(lib, "iJIT_RegisterCallback");
iJIT_NotifyEvent = (NotifyEventFn) GetProcAddress(lib, "iJIT_NotifyEvent");
if (!iJIT_RegisterCallback) tty->print_cr("*couldn't find VTune entry point iJIT_RegisterCallback");
if (!iJIT_NotifyEvent) tty->print_cr("*couldn't find VTune entry point iJIT_NotifyEvent");
return iJIT_RegisterCallback != NULL && iJIT_NotifyEvent != NULL;
}
// --------------------- VTune class ------------------------
static bool active = false;
static int flags = 0;
void VTune::start_GC() {
if (active && (flags & NotifyGC)) iJIT_NotifyEvent(GCStart, NULL);
}
void VTune::end_GC() {
if (active && (flags & NotifyGC)) iJIT_NotifyEvent(GCEnd, NULL);
}
void VTune::start_class_load() {
// not yet implemented in VTune
}
void VTune::end_class_load() {
// not yet implemented in VTune
}
void VTune::exit() {
if (active && (flags & NotifyShutdown)) iJIT_NotifyEvent(Shutdown, NULL);
}
void VTune::register_stub(const char* name, address start, address end) {
if (flags & NotifyNMethodCreate) {
MethodLoadInfo* info = new MethodLoadInfo(name, start, end);
if (PrintMiscellaneous && WizardMode && Verbose) {
tty->print_cr("NMethodCreate %s (%d): %#x..%#x", info->name, info->methodID,
info->instr_start, info->instr_start + info->instr_size);
}
iJIT_NotifyEvent(NMethodCreate, info);
}
}
void VTune::create_nmethod(nmethod* nm) {
if (flags & NotifyNMethodCreate) {
MethodLoadInfo* info = new MethodLoadInfo(nm);
if (PrintMiscellaneous && WizardMode && Verbose) {
tty->print_cr("NMethodCreate %s (%d): %#x..%#x", info->name, info->methodID,
info->instr_start, info->instr_start + info->instr_size);
}
iJIT_NotifyEvent(NMethodCreate, info);
}
}
void VTune::delete_nmethod(nmethod* nm) {
if (flags & NotifyNMethodDelete) {
MethodInfo* info = new MethodInfo(nm->method());
iJIT_NotifyEvent(NMethodDelete, info);
}
}
static void set_flags(int new_flags) {
flags = new_flags;
// if (WizardMode) tty->print_cr("*new VTune flags: %#x", flags);
}
void vtune_init() {
if (!UseVTune) return;
active = load_iJIT_funcs();
if (active) {
iJIT_RegisterCallback((ModeChangedFn)set_flags);
} else {
assert(flags == 0, "flags shouldn't be set");
}
}

View File

@ -26,7 +26,7 @@ static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64 #ifdef AMD64
(void)memmove(to, from, count * HeapWordSize); (void)memmove(to, from, count * HeapWordSize);
#else #else
// Same as pd_aligned_conjoint_words, except includes a zero-count check. // Includes a zero-count check.
intx temp; intx temp;
__asm__ volatile(" testl %6,%6 ;" __asm__ volatile(" testl %6,%6 ;"
" jz 7f ;" " jz 7f ;"
@ -84,7 +84,7 @@ static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
break; break;
} }
#else #else
// Same as pd_aligned_disjoint_words, except includes a zero-count check. // Includes a zero-count check.
intx temp; intx temp;
__asm__ volatile(" testl %6,%6 ;" __asm__ volatile(" testl %6,%6 ;"
" jz 3f ;" " jz 3f ;"
@ -130,75 +130,18 @@ static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count)
} }
static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64 pd_conjoint_words(from, to, count);
(void)memmove(to, from, count * HeapWordSize);
#else
// Same as pd_conjoint_words, except no zero-count check.
intx temp;
__asm__ volatile(" cmpl %4,%5 ;"
" leal -4(%4,%6,4),%3;"
" jbe 1f ;"
" cmpl %7,%5 ;"
" jbe 4f ;"
"1: cmpl $32,%6 ;"
" ja 3f ;"
" subl %4,%1 ;"
"2: movl (%4),%3 ;"
" movl %7,(%5,%4,1) ;"
" addl $4,%0 ;"
" subl $1,%2 ;"
" jnz 2b ;"
" jmp 7f ;"
"3: rep; smovl ;"
" jmp 7f ;"
"4: cmpl $32,%2 ;"
" movl %7,%0 ;"
" leal -4(%5,%6,4),%1;"
" ja 6f ;"
" subl %4,%1 ;"
"5: movl (%4),%3 ;"
" movl %7,(%5,%4,1) ;"
" subl $4,%0 ;"
" subl $1,%2 ;"
" jnz 5b ;"
" jmp 7f ;"
"6: std ;"
" rep; smovl ;"
" cld ;"
"7: nop "
: "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
: "0" (from), "1" (to), "2" (count), "3" (temp)
: "memory", "flags");
#endif // AMD64
} }
static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
pd_disjoint_words(from, to, count); pd_disjoint_words(from, to, count);
#else
// Same as pd_disjoint_words, except no zero-count check.
intx temp;
__asm__ volatile(" cmpl $32,%6 ;"
" ja 2f ;"
" subl %4,%1 ;"
"1: movl (%4),%3 ;"
" movl %7,(%5,%4,1);"
" addl $4,%0 ;"
" subl $1,%2 ;"
" jnz 1b ;"
" jmp 3f ;"
"2: rep; smovl ;"
"3: nop "
: "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
: "0" (from), "1" (to), "2" (count), "3" (temp)
: "memory", "cc");
#endif // AMD64
} }
static void pd_conjoint_bytes(void* from, void* to, size_t count) { static void pd_conjoint_bytes(void* from, void* to, size_t count) {
#ifdef AMD64 #ifdef AMD64
(void)memmove(to, from, count); (void)memmove(to, from, count);
#else #else
// Includes a zero-count check.
intx temp; intx temp;
__asm__ volatile(" testl %6,%6 ;" __asm__ volatile(" testl %6,%6 ;"
" jz 13f ;" " jz 13f ;"

View File

@ -121,10 +121,10 @@ cb_CopyRight:
jnz 3b jnz 3b
addl %esi,%edi addl %esi,%edi
4: movl %eax,%ecx # byte count less prefix 4: movl %eax,%ecx # byte count less prefix
andl $3,%ecx # suffix byte count 5: andl $3,%ecx # suffix byte count
jz 7f # no suffix jz 7f # no suffix
# copy suffix # copy suffix
5: xorl %eax,%eax xorl %eax,%eax
6: movb (%esi,%eax,1),%dl 6: movb (%esi,%eax,1),%dl
movb %dl,(%edi,%eax,1) movb %dl,(%edi,%eax,1)
addl $1,%eax addl $1,%eax
@ -159,10 +159,10 @@ cb_CopyLeft:
# copy dwords, aligned or not # copy dwords, aligned or not
3: rep; smovl 3: rep; smovl
4: movl %eax,%ecx # byte count 4: movl %eax,%ecx # byte count
andl $3,%ecx # suffix byte count 5: andl $3,%ecx # suffix byte count
jz 7f # no suffix jz 7f # no suffix
# copy suffix # copy suffix
5: subl %esi,%edi subl %esi,%edi
addl $3,%esi addl $3,%esi
6: movb (%esi),%dl 6: movb (%esi),%dl
movb %dl,(%edi,%esi,1) movb %dl,(%edi,%esi,1)
@ -214,10 +214,10 @@ acb_CopyRight:
# copy aligned dwords # copy aligned dwords
3: rep; smovl 3: rep; smovl
4: movl %eax,%ecx 4: movl %eax,%ecx
andl $3,%ecx 5: andl $3,%ecx
jz 7f jz 7f
# copy suffix # copy suffix
5: xorl %eax,%eax xorl %eax,%eax
6: movb (%esi,%eax,1),%dl 6: movb (%esi,%eax,1),%dl
movb %dl,(%edi,%eax,1) movb %dl,(%edi,%eax,1)
addl $1,%eax addl $1,%eax
@ -250,9 +250,9 @@ acb_CopyLeft:
jnz 3b jnz 3b
addl %esi,%edi addl %esi,%edi
4: movl %eax,%ecx 4: movl %eax,%ecx
andl $3,%ecx 5: andl $3,%ecx
jz 7f jz 7f
5: subl %esi,%edi subl %esi,%edi
addl $3,%esi addl $3,%esi
6: movb (%esi),%dl 6: movb (%esi),%dl
movb %dl,(%edi,%esi,1) movb %dl,(%edi,%esi,1)
@ -287,11 +287,12 @@ cs_CopyRight:
andl $3,%eax # either 0 or 2 andl $3,%eax # either 0 or 2
jz 1f # no prefix jz 1f # no prefix
# copy prefix # copy prefix
subl $1,%ecx
jl 5f # zero count
movw (%esi),%dx movw (%esi),%dx
movw %dx,(%edi) movw %dx,(%edi)
addl %eax,%esi # %eax == 2 addl %eax,%esi # %eax == 2
addl %eax,%edi addl %eax,%edi
subl $1,%ecx
1: movl %ecx,%eax # word count less prefix 1: movl %ecx,%eax # word count less prefix
sarl %ecx # dword count sarl %ecx # dword count
jz 4f # no dwords to move jz 4f # no dwords to move
@ -454,12 +455,13 @@ ci_CopyRight:
ret ret
.=.+10 .=.+10
2: subl %esi,%edi 2: subl %esi,%edi
jmp 4f
.p2align 4,,15 .p2align 4,,15
3: movl (%esi),%edx 3: movl (%esi),%edx
movl %edx,(%edi,%esi,1) movl %edx,(%edi,%esi,1)
addl $4,%esi addl $4,%esi
subl $1,%ecx 4: subl $1,%ecx
jnz 3b jge 3b
popl %edi popl %edi
popl %esi popl %esi
ret ret
@ -467,19 +469,20 @@ ci_CopyLeft:
std std
leal -4(%edi,%ecx,4),%edi # to + count*4 - 4 leal -4(%edi,%ecx,4),%edi # to + count*4 - 4
cmpl $32,%ecx cmpl $32,%ecx
ja 3f # > 32 dwords ja 4f # > 32 dwords
subl %eax,%edi # eax == from + count*4 - 4 subl %eax,%edi # eax == from + count*4 - 4
jmp 3f
.p2align 4,,15 .p2align 4,,15
2: movl (%eax),%edx 2: movl (%eax),%edx
movl %edx,(%edi,%eax,1) movl %edx,(%edi,%eax,1)
subl $4,%eax subl $4,%eax
subl $1,%ecx 3: subl $1,%ecx
jnz 2b jge 2b
cld cld
popl %edi popl %edi
popl %esi popl %esi
ret ret
3: movl %eax,%esi # from + count*4 - 4 4: movl %eax,%esi # from + count*4 - 4
rep; smovl rep; smovl
cld cld
popl %edi popl %edi

View File

@ -861,7 +861,7 @@ cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
extern "C" _solaris_raw_setup_fpu(address ptr); extern "C" void _solaris_raw_setup_fpu(address ptr);
void os::setup_fpu() { void os::setup_fpu() {
address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std(); address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std();
_solaris_raw_setup_fpu(fpu_cntrl); _solaris_raw_setup_fpu(fpu_cntrl);

View File

@ -154,10 +154,10 @@ cb_CopyRight:
jnz 3b jnz 3b
addl %esi,%edi addl %esi,%edi
4: movl %eax,%ecx / byte count less prefix 4: movl %eax,%ecx / byte count less prefix
andl $3,%ecx / suffix byte count 5: andl $3,%ecx / suffix byte count
jz 7f / no suffix jz 7f / no suffix
/ copy suffix / copy suffix
5: xorl %eax,%eax xorl %eax,%eax
6: movb (%esi,%eax,1),%dl 6: movb (%esi,%eax,1),%dl
movb %dl,(%edi,%eax,1) movb %dl,(%edi,%eax,1)
addl $1,%eax addl $1,%eax
@ -192,10 +192,10 @@ cb_CopyLeft:
/ copy dwords, aligned or not / copy dwords, aligned or not
3: rep; smovl 3: rep; smovl
4: movl %eax,%ecx / byte count 4: movl %eax,%ecx / byte count
andl $3,%ecx / suffix byte count 5: andl $3,%ecx / suffix byte count
jz 7f / no suffix jz 7f / no suffix
/ copy suffix / copy suffix
5: subl %esi,%edi subl %esi,%edi
addl $3,%esi addl $3,%esi
6: movb (%esi),%dl 6: movb (%esi),%dl
movb %dl,(%edi,%esi,1) movb %dl,(%edi,%esi,1)
@ -246,10 +246,10 @@ acb_CopyRight:
/ copy aligned dwords / copy aligned dwords
3: rep; smovl 3: rep; smovl
4: movl %eax,%ecx 4: movl %eax,%ecx
andl $3,%ecx 5: andl $3,%ecx
jz 7f jz 7f
/ copy suffix / copy suffix
5: xorl %eax,%eax xorl %eax,%eax
6: movb (%esi,%eax,1),%dl 6: movb (%esi,%eax,1),%dl
movb %dl,(%edi,%eax,1) movb %dl,(%edi,%eax,1)
addl $1,%eax addl $1,%eax
@ -282,9 +282,9 @@ acb_CopyLeft:
jnz 3b jnz 3b
addl %esi,%edi addl %esi,%edi
4: movl %eax,%ecx 4: movl %eax,%ecx
andl $3,%ecx 5: andl $3,%ecx
jz 7f jz 7f
5: subl %esi,%edi subl %esi,%edi
addl $3,%esi addl $3,%esi
6: movb (%esi),%dl 6: movb (%esi),%dl
movb %dl,(%edi,%esi,1) movb %dl,(%edi,%esi,1)
@ -318,11 +318,12 @@ cs_CopyRight:
andl $3,%eax / either 0 or 2 andl $3,%eax / either 0 or 2
jz 1f / no prefix jz 1f / no prefix
/ copy prefix / copy prefix
subl $1,%ecx
jl 5f / zero count
movw (%esi),%dx movw (%esi),%dx
movw %dx,(%edi) movw %dx,(%edi)
addl %eax,%esi / %eax == 2 addl %eax,%esi / %eax == 2
addl %eax,%edi addl %eax,%edi
subl $1,%ecx
1: movl %ecx,%eax / word count less prefix 1: movl %ecx,%eax / word count less prefix
sarl %ecx / dword count sarl %ecx / dword count
jz 4f / no dwords to move jz 4f / no dwords to move
@ -482,12 +483,13 @@ ci_CopyRight:
ret ret
.=.+10 .=.+10
2: subl %esi,%edi 2: subl %esi,%edi
jmp 4f
.align 16 .align 16
3: movl (%esi),%edx 3: movl (%esi),%edx
movl %edx,(%edi,%esi,1) movl %edx,(%edi,%esi,1)
addl $4,%esi addl $4,%esi
subl $1,%ecx 4: subl $1,%ecx
jnz 3b jge 3b
popl %edi popl %edi
popl %esi popl %esi
ret ret
@ -495,19 +497,20 @@ ci_CopyLeft:
std std
leal -4(%edi,%ecx,4),%edi / to + count*4 - 4 leal -4(%edi,%ecx,4),%edi / to + count*4 - 4
cmpl $32,%ecx cmpl $32,%ecx
ja 3f / > 32 dwords ja 4f / > 32 dwords
subl %eax,%edi / eax == from + count*4 - 4 subl %eax,%edi / eax == from + count*4 - 4
jmp 3f
.align 16 .align 16
2: movl (%eax),%edx 2: movl (%eax),%edx
movl %edx,(%edi,%eax,1) movl %edx,(%edi,%eax,1)
subl $4,%eax subl $4,%eax
subl $1,%ecx 3: subl $1,%ecx
jnz 2b jge 2b
cld cld
popl %edi popl %edi
popl %esi popl %esi
ret ret
3: movl %eax,%esi / from + count*4 - 4 4: movl %eax,%esi / from + count*4 - 4
rep; smovl rep; smovl
cld cld
popl %edi popl %edi

View File

@ -404,7 +404,7 @@ void CodeSection::expand_locs(int new_capacity) {
locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity); locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity);
} else { } else {
locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity); locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity);
Copy::conjoint_bytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo));
_locs_own = true; _locs_own = true;
} }
_locs_start = locs_start; _locs_start = locs_start;
@ -581,7 +581,7 @@ csize_t CodeBuffer::copy_relocations_to(CodeBlob* dest) const {
(HeapWord*)(buf+buf_offset), (HeapWord*)(buf+buf_offset),
(lsize + HeapWordSize-1) / HeapWordSize); (lsize + HeapWordSize-1) / HeapWordSize);
} else { } else {
Copy::conjoint_bytes(lstart, buf+buf_offset, lsize); Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize);
} }
} }
buf_offset += lsize; buf_offset += lsize;

View File

@ -242,10 +242,10 @@ void Compilation::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) {
code->insts()->initialize_shared_locs((relocInfo*)locs_buffer, code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
locs_buffer_size / sizeof(relocInfo)); locs_buffer_size / sizeof(relocInfo));
code->initialize_consts_size(Compilation::desired_max_constant_size()); code->initialize_consts_size(Compilation::desired_max_constant_size());
// Call stubs + deopt/exception handler // Call stubs + two deopt handlers (regular and MH) + exception handler
code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) + code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
LIR_Assembler::exception_handler_size + LIR_Assembler::exception_handler_size +
LIR_Assembler::deopt_handler_size); 2 * LIR_Assembler::deopt_handler_size);
} }

View File

@ -878,15 +878,12 @@ void GraphBuilder::load_constant() {
case T_OBJECT : case T_OBJECT :
{ {
ciObject* obj = con.as_object(); ciObject* obj = con.as_object();
if (obj->is_klass()) { if (!obj->is_loaded()
ciKlass* klass = obj->as_klass(); || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) {
if (!klass->is_loaded() || PatchALot) { patch_state = state()->copy();
patch_state = state()->copy(); t = new ObjectConstant(obj);
t = new ObjectConstant(obj);
} else {
t = new InstanceConstant(klass->java_mirror());
}
} else { } else {
assert(!obj->is_klass(), "must be java_mirror of klass");
t = new InstanceConstant(obj->as_instance()); t = new InstanceConstant(obj->as_instance());
} }
break; break;

View File

@ -601,7 +601,7 @@ JRT_END
static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
Bytecode_field* field_access = Bytecode_field_at(caller(), caller->bcp_from(bci)); Bytecode_field* field_access = Bytecode_field_at(caller, bci);
// This can be static or non-static field access // This can be static or non-static field access
Bytecodes::Code code = field_access->code(); Bytecodes::Code code = field_access->code();
@ -721,7 +721,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code
if (stub_id == Runtime1::access_field_patching_id) { if (stub_id == Runtime1::access_field_patching_id) {
Bytecode_field* field_access = Bytecode_field_at(caller_method(), caller_method->bcp_from(bci)); Bytecode_field* field_access = Bytecode_field_at(caller_method, bci);
FieldAccessInfo result; // initialize class if needed FieldAccessInfo result; // initialize class if needed
Bytecodes::Code code = field_access->code(); Bytecodes::Code code = field_access->code();
constantPoolHandle constants(THREAD, caller_method->constants()); constantPoolHandle constants(THREAD, caller_method->constants());
@ -781,11 +781,9 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
case Bytecodes::_ldc: case Bytecodes::_ldc:
case Bytecodes::_ldc_w: case Bytecodes::_ldc_w:
{ {
Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method(), Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method, bci);
caller_method->bcp_from(bci)); k = cc->resolve_constant(CHECK);
klassOop resolved = caller_method->constants()->klass_at(cc->index(), CHECK); assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant");
// ldc wants the java mirror.
k = resolved->klass_part()->java_mirror();
} }
break; break;
default: Unimplemented(); default: Unimplemented();
@ -816,6 +814,15 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// Return to the now deoptimized frame. // Return to the now deoptimized frame.
} }
// If we are patching in a non-perm oop, make sure the nmethod
// is on the right list.
if (ScavengeRootsInCode && load_klass.not_null() && load_klass->is_scavengable()) {
MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
guarantee(nm != NULL, "only nmethods can contain non-perm oops");
if (!nm->on_scavenge_root_list())
CodeCache::add_scavenge_root_nmethod(nm);
}
// Now copy code back // Now copy code back
@ -1115,7 +1122,7 @@ JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int l
if (length == 0) return; if (length == 0) return;
// Not guaranteed to be word atomic, but that doesn't matter // Not guaranteed to be word atomic, but that doesn't matter
// for anything but an oop array, which is covered by oop_arraycopy. // for anything but an oop array, which is covered by oop_arraycopy.
Copy::conjoint_bytes(src, dst, length); Copy::conjoint_jbytes(src, dst, length);
JRT_END JRT_END
JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num)) JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))

View File

@ -106,7 +106,7 @@ public:
void BCEscapeAnalyzer::set_returned(ArgumentMap vars) { void BCEscapeAnalyzer::set_returned(ArgumentMap vars) {
for (int i = 0; i < _arg_size; i++) { for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i)) if (vars.contains(i))
_arg_returned.set_bit(i); _arg_returned.set(i);
} }
_return_local = _return_local && !(vars.contains_unknown() || vars.contains_allocated()); _return_local = _return_local && !(vars.contains_unknown() || vars.contains_allocated());
_return_allocated = _return_allocated && vars.contains_allocated() && !(vars.contains_unknown() || vars.contains_vars()); _return_allocated = _return_allocated && vars.contains_allocated() && !(vars.contains_unknown() || vars.contains_vars());
@ -126,16 +126,16 @@ bool BCEscapeAnalyzer::is_arg_stack(ArgumentMap vars){
if (_conservative) if (_conservative)
return true; return true;
for (int i = 0; i < _arg_size; i++) { for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i) && _arg_stack.at(i)) if (vars.contains(i) && _arg_stack.test(i))
return true; return true;
} }
return false; return false;
} }
void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, BitMap &bm) { void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, VectorSet &bm) {
for (int i = 0; i < _arg_size; i++) { for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i)) { if (vars.contains(i)) {
bm.clear_bit(i); bm >>= i;
} }
} }
} }
@ -1157,15 +1157,15 @@ void BCEscapeAnalyzer::initialize() {
ciSignature* sig = method()->signature(); ciSignature* sig = method()->signature();
int j = 0; int j = 0;
if (!method()->is_static()) { if (!method()->is_static()) {
_arg_local.set_bit(0); _arg_local.set(0);
_arg_stack.set_bit(0); _arg_stack.set(0);
j++; j++;
} }
for (i = 0; i < sig->count(); i++) { for (i = 0; i < sig->count(); i++) {
ciType* t = sig->type_at(i); ciType* t = sig->type_at(i);
if (!t->is_primitive_type()) { if (!t->is_primitive_type()) {
_arg_local.set_bit(j); _arg_local.set(j);
_arg_stack.set_bit(j); _arg_stack.set(j);
} }
j += t->size(); j += t->size();
} }
@ -1198,9 +1198,9 @@ void BCEscapeAnalyzer::clear_escape_info() {
set_modified(var, OFFSET_ANY, 4); set_modified(var, OFFSET_ANY, 4);
set_global_escape(var); set_global_escape(var);
} }
_arg_local.clear(); _arg_local.Clear();
_arg_stack.clear(); _arg_stack.Clear();
_arg_returned.clear(); _arg_returned.Clear();
_return_local = false; _return_local = false;
_return_allocated = false; _return_allocated = false;
_allocated_escapes = true; _allocated_escapes = true;
@ -1254,7 +1254,7 @@ void BCEscapeAnalyzer::compute_escape_info() {
// Do not scan method if it has no object parameters and // Do not scan method if it has no object parameters and
// does not returns an object (_return_allocated is set in initialize()). // does not returns an object (_return_allocated is set in initialize()).
if (_arg_local.is_empty() && !_return_allocated) { if (_arg_local.Size() == 0 && !_return_allocated) {
// Clear all info since method's bytecode was not analysed and // Clear all info since method's bytecode was not analysed and
// set pessimistic escape information. // set pessimistic escape information.
clear_escape_info(); clear_escape_info();
@ -1275,14 +1275,14 @@ void BCEscapeAnalyzer::compute_escape_info() {
// //
if (!has_dependencies() && !methodData()->is_empty()) { if (!has_dependencies() && !methodData()->is_empty()) {
for (i = 0; i < _arg_size; i++) { for (i = 0; i < _arg_size; i++) {
if (_arg_local.at(i)) { if (_arg_local.test(i)) {
assert(_arg_stack.at(i), "inconsistent escape info"); assert(_arg_stack.test(i), "inconsistent escape info");
methodData()->set_arg_local(i); methodData()->set_arg_local(i);
methodData()->set_arg_stack(i); methodData()->set_arg_stack(i);
} else if (_arg_stack.at(i)) { } else if (_arg_stack.test(i)) {
methodData()->set_arg_stack(i); methodData()->set_arg_stack(i);
} }
if (_arg_returned.at(i)) { if (_arg_returned.test(i)) {
methodData()->set_arg_returned(i); methodData()->set_arg_returned(i);
} }
methodData()->set_arg_modified(i, _arg_modified[i]); methodData()->set_arg_modified(i, _arg_modified[i]);
@ -1308,9 +1308,12 @@ void BCEscapeAnalyzer::read_escape_info() {
// read escape information from method descriptor // read escape information from method descriptor
for (int i = 0; i < _arg_size; i++) { for (int i = 0; i < _arg_size; i++) {
_arg_local.at_put(i, methodData()->is_arg_local(i)); if (methodData()->is_arg_local(i))
_arg_stack.at_put(i, methodData()->is_arg_stack(i)); _arg_local.set(i);
_arg_returned.at_put(i, methodData()->is_arg_returned(i)); if (methodData()->is_arg_stack(i))
_arg_stack.set(i);
if (methodData()->is_arg_returned(i))
_arg_returned.set(i);
_arg_modified[i] = methodData()->arg_modified(i); _arg_modified[i] = methodData()->arg_modified(i);
} }
_return_local = methodData()->eflag_set(methodDataOopDesc::return_local); _return_local = methodData()->eflag_set(methodDataOopDesc::return_local);
@ -1358,26 +1361,26 @@ void BCEscapeAnalyzer::dump() {
BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent) BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
: _conservative(method == NULL || !EstimateArgEscape) : _conservative(method == NULL || !EstimateArgEscape)
, _arena(CURRENT_ENV->arena())
, _method(method) , _method(method)
, _methodData(method ? method->method_data() : NULL) , _methodData(method ? method->method_data() : NULL)
, _arg_size(method ? method->arg_size() : 0) , _arg_size(method ? method->arg_size() : 0)
, _stack() , _arg_local(_arena)
, _arg_local(_arg_size) , _arg_stack(_arena)
, _arg_stack(_arg_size) , _arg_returned(_arena)
, _arg_returned(_arg_size) , _dirty(_arena)
, _dirty(_arg_size)
, _return_local(false) , _return_local(false)
, _return_allocated(false) , _return_allocated(false)
, _allocated_escapes(false) , _allocated_escapes(false)
, _unknown_modified(false) , _unknown_modified(false)
, _dependencies() , _dependencies(_arena, 4, 0, NULL)
, _parent(parent) , _parent(parent)
, _level(parent == NULL ? 0 : parent->level() + 1) { , _level(parent == NULL ? 0 : parent->level() + 1) {
if (!_conservative) { if (!_conservative) {
_arg_local.clear(); _arg_local.Clear();
_arg_stack.clear(); _arg_stack.Clear();
_arg_returned.clear(); _arg_returned.Clear();
_dirty.clear(); _dirty.Clear();
Arena* arena = CURRENT_ENV->arena(); Arena* arena = CURRENT_ENV->arena();
_arg_modified = (uint *) arena->Amalloc(_arg_size * sizeof(uint)); _arg_modified = (uint *) arena->Amalloc(_arg_size * sizeof(uint));
Copy::zero_to_bytes(_arg_modified, _arg_size * sizeof(uint)); Copy::zero_to_bytes(_arg_modified, _arg_size * sizeof(uint));
@ -1414,8 +1417,8 @@ void BCEscapeAnalyzer::copy_dependencies(Dependencies *deps) {
deps->assert_evol_method(method()); deps->assert_evol_method(method());
} }
for (int i = 0; i < _dependencies.length(); i+=2) { for (int i = 0; i < _dependencies.length(); i+=2) {
ciKlass *k = _dependencies[i]->as_klass(); ciKlass *k = _dependencies.at(i)->as_klass();
ciMethod *m = _dependencies[i+1]->as_method(); ciMethod *m = _dependencies.at(i+1)->as_method();
deps->assert_unique_concrete_method(k, m); deps->assert_unique_concrete_method(k, m);
} }
} }

View File

@ -22,9 +22,6 @@
* *
*/ */
define_array(ciObjectArray, ciObject*);
define_stack(ciObjectList, ciObjectArray);
// This class implements a fast, conservative analysis of effect of methods // This class implements a fast, conservative analysis of effect of methods
// on the escape state of their arguments. The analysis is at the bytecode // on the escape state of their arguments. The analysis is at the bytecode
// level. // level.
@ -34,18 +31,17 @@ class ciBlock;
class BCEscapeAnalyzer : public ResourceObj { class BCEscapeAnalyzer : public ResourceObj {
private: private:
Arena* _arena; // ciEnv arena
bool _conservative; // If true, return maximally bool _conservative; // If true, return maximally
// conservative results. // conservative results.
ciMethod* _method; ciMethod* _method;
ciMethodData* _methodData; ciMethodData* _methodData;
int _arg_size; int _arg_size;
VectorSet _arg_local;
intStack _stack; VectorSet _arg_stack;
VectorSet _arg_returned;
BitMap _arg_local; VectorSet _dirty;
BitMap _arg_stack;
BitMap _arg_returned;
BitMap _dirty;
enum{ ARG_OFFSET_MAX = 31}; enum{ ARG_OFFSET_MAX = 31};
uint *_arg_modified; uint *_arg_modified;
@ -54,7 +50,7 @@ class BCEscapeAnalyzer : public ResourceObj {
bool _allocated_escapes; bool _allocated_escapes;
bool _unknown_modified; bool _unknown_modified;
ciObjectList _dependencies; GrowableArray<ciObject *> _dependencies;
ciMethodBlocks *_methodBlocks; ciMethodBlocks *_methodBlocks;
@ -68,20 +64,10 @@ class BCEscapeAnalyzer : public ResourceObj {
private: private:
// helper functions // helper functions
bool is_argument(int i) { return i >= 0 && i < _arg_size; } bool is_argument(int i) { return i >= 0 && i < _arg_size; }
void raw_push(int i) { _stack.push(i); }
int raw_pop() { return _stack.is_empty() ? -1 : _stack.pop(); }
void apush(int i) { raw_push(i); }
void spush() { raw_push(-1); }
void lpush() { spush(); spush(); }
int apop() { return raw_pop(); }
void spop() { assert(_stack.is_empty() || _stack.top() == -1, ""); raw_pop(); }
void lpop() { spop(); spop(); }
void set_returned(ArgumentMap vars); void set_returned(ArgumentMap vars);
bool is_argument(ArgumentMap vars); bool is_argument(ArgumentMap vars);
bool is_arg_stack(ArgumentMap vars); bool is_arg_stack(ArgumentMap vars);
void clear_bits(ArgumentMap vars, BitMap &bs); void clear_bits(ArgumentMap vars, VectorSet &bs);
void set_method_escape(ArgumentMap vars); void set_method_escape(ArgumentMap vars);
void set_global_escape(ArgumentMap vars); void set_global_escape(ArgumentMap vars);
void set_dirty(ArgumentMap vars); void set_dirty(ArgumentMap vars);
@ -116,25 +102,25 @@ class BCEscapeAnalyzer : public ResourceObj {
ciMethodData* methodData() const { return _methodData; } ciMethodData* methodData() const { return _methodData; }
BCEscapeAnalyzer* parent() const { return _parent; } BCEscapeAnalyzer* parent() const { return _parent; }
int level() const { return _level; } int level() const { return _level; }
ciObjectList* dependencies() { return &_dependencies; } GrowableArray<ciObject *>* dependencies() { return &_dependencies; }
bool has_dependencies() const { return !_dependencies.is_empty(); } bool has_dependencies() const { return !_dependencies.is_empty(); }
// retrieval of interprocedural escape information // retrieval of interprocedural escape information
// The given argument does not escape the callee. // The given argument does not escape the callee.
bool is_arg_local(int i) const { bool is_arg_local(int i) const {
return !_conservative && _arg_local.at(i); return !_conservative && _arg_local.test(i);
} }
// The given argument escapes the callee, but does not become globally // The given argument escapes the callee, but does not become globally
// reachable. // reachable.
bool is_arg_stack(int i) const { bool is_arg_stack(int i) const {
return !_conservative && _arg_stack.at(i); return !_conservative && _arg_stack.test(i);
} }
// The given argument does not escape globally, and may be returned. // The given argument does not escape globally, and may be returned.
bool is_arg_returned(int i) const { bool is_arg_returned(int i) const {
return !_conservative && _arg_returned.at(i); } return !_conservative && _arg_returned.test(i); }
// True iff only input arguments are returned. // True iff only input arguments are returned.
bool is_return_local() const { bool is_return_local() const {

View File

@ -44,12 +44,22 @@ size_t ciCPCache::get_f1_offset(int index) {
// ciCPCache::is_f1_null_at // ciCPCache::is_f1_null_at
bool ciCPCache::is_f1_null_at(int index) { bool ciCPCache::is_f1_null_at(int index) {
VM_ENTRY_MARK; VM_ENTRY_MARK;
constantPoolCacheOop cpcache = (constantPoolCacheOop) get_oop(); oop f1 = entry_at(index)->f1();
oop f1 = cpcache->secondary_entry_at(index)->f1();
return (f1 == NULL); return (f1 == NULL);
} }
// ------------------------------------------------------------------
// ciCPCache::get_pool_index
int ciCPCache::get_pool_index(int index) {
VM_ENTRY_MARK;
ConstantPoolCacheEntry* e = entry_at(index);
if (e->is_secondary_entry())
e = entry_at(e->main_entry_index());
return e->constant_pool_index();
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciCPCache::print // ciCPCache::print
// //

View File

@ -29,6 +29,18 @@
// Note: This class is called ciCPCache as ciConstantPoolCache is used // Note: This class is called ciCPCache as ciConstantPoolCache is used
// for something different. // for something different.
class ciCPCache : public ciObject { class ciCPCache : public ciObject {
private:
constantPoolCacheOop get_cpCacheOop() { // must be called inside a VM_ENTRY_MARK
return (constantPoolCacheOop) get_oop();
}
ConstantPoolCacheEntry* entry_at(int i) {
int raw_index = i;
if (constantPoolCacheOopDesc::is_secondary_index(i))
raw_index = constantPoolCacheOopDesc::decode_secondary_index(i);
return get_cpCacheOop()->entry_at(raw_index);
}
public: public:
ciCPCache(constantPoolCacheHandle cpcache) : ciObject(cpcache) {} ciCPCache(constantPoolCacheHandle cpcache) : ciObject(cpcache) {}
@ -41,5 +53,7 @@ public:
bool is_f1_null_at(int index); bool is_f1_null_at(int index);
int get_pool_index(int index);
void print(); void print();
}; };

View File

@ -85,6 +85,7 @@ friend class ciCallSite; \
friend class ciConstantPoolCache; \ friend class ciConstantPoolCache; \
friend class ciField; \ friend class ciField; \
friend class ciConstant; \ friend class ciConstant; \
friend class ciCPCache; \
friend class ciFlags; \ friend class ciFlags; \
friend class ciExceptionHandler; \ friend class ciExceptionHandler; \
friend class ciCallProfile; \ friend class ciCallProfile; \

View File

@ -511,9 +511,22 @@ ciKlass* ciEnv::get_klass_by_index(constantPoolHandle cpool,
// //
// Implementation of get_constant_by_index(). // Implementation of get_constant_by_index().
ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool, ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
int index, int pool_index, int cache_index,
ciInstanceKlass* accessor) { ciInstanceKlass* accessor) {
bool ignore_will_link;
EXCEPTION_CONTEXT; EXCEPTION_CONTEXT;
int index = pool_index;
if (cache_index >= 0) {
assert(index < 0, "only one kind of index at a time");
ConstantPoolCacheEntry* cpc_entry = cpool->cache()->entry_at(cache_index);
index = cpc_entry->constant_pool_index();
oop obj = cpc_entry->f1();
if (obj != NULL) {
assert(obj->is_instance(), "must be an instance");
ciObject* ciobj = get_object(obj);
return ciConstant(T_OBJECT, ciobj);
}
}
constantTag tag = cpool->tag_at(index); constantTag tag = cpool->tag_at(index);
if (tag.is_int()) { if (tag.is_int()) {
return ciConstant(T_INT, (jint)cpool->int_at(index)); return ciConstant(T_INT, (jint)cpool->int_at(index));
@ -540,8 +553,7 @@ ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
return ciConstant(T_OBJECT, constant); return ciConstant(T_OBJECT, constant);
} else if (tag.is_klass() || tag.is_unresolved_klass()) { } else if (tag.is_klass() || tag.is_unresolved_klass()) {
// 4881222: allow ldc to take a class type // 4881222: allow ldc to take a class type
bool ignore; ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore_will_link, accessor);
ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore, accessor);
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;
record_out_of_memory_failure(); record_out_of_memory_failure();
@ -549,42 +561,32 @@ ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
} }
assert (klass->is_instance_klass() || klass->is_array_klass(), assert (klass->is_instance_klass() || klass->is_array_klass(),
"must be an instance or array klass "); "must be an instance or array klass ");
return ciConstant(T_OBJECT, klass); return ciConstant(T_OBJECT, klass->java_mirror());
} else if (tag.is_object()) { } else if (tag.is_object()) {
oop obj = cpool->object_at(index); oop obj = cpool->object_at(index);
assert(obj->is_instance(), "must be an instance"); assert(obj->is_instance(), "must be an instance");
ciObject* ciobj = get_object(obj); ciObject* ciobj = get_object(obj);
return ciConstant(T_OBJECT, ciobj); return ciConstant(T_OBJECT, ciobj);
} else if (tag.is_method_type()) {
// must execute Java code to link this CP entry into cache[i].f1
ciSymbol* signature = get_object(cpool->method_type_signature_at(index))->as_symbol();
ciObject* ciobj = get_unloaded_method_type_constant(signature);
return ciConstant(T_OBJECT, ciobj);
} else if (tag.is_method_handle()) {
// must execute Java code to link this CP entry into cache[i].f1
int ref_kind = cpool->method_handle_ref_kind_at(index);
int callee_index = cpool->method_handle_klass_index_at(index);
ciKlass* callee = get_klass_by_index_impl(cpool, callee_index, ignore_will_link, accessor);
ciSymbol* name = get_object(cpool->method_handle_name_ref_at(index))->as_symbol();
ciSymbol* signature = get_object(cpool->method_handle_signature_ref_at(index))->as_symbol();
ciObject* ciobj = get_unloaded_method_handle_constant(callee, name, signature, ref_kind);
return ciConstant(T_OBJECT, ciobj);
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
return ciConstant(); return ciConstant();
} }
} }
// ------------------------------------------------------------------
// ciEnv::is_unresolved_string_impl
//
// Implementation of is_unresolved_string().
bool ciEnv::is_unresolved_string_impl(instanceKlass* accessor, int index) const {
EXCEPTION_CONTEXT;
assert(accessor->is_linked(), "must be linked before accessing constant pool");
constantPoolOop cpool = accessor->constants();
constantTag tag = cpool->tag_at(index);
return tag.is_unresolved_string();
}
// ------------------------------------------------------------------
// ciEnv::is_unresolved_klass_impl
//
// Implementation of is_unresolved_klass().
bool ciEnv::is_unresolved_klass_impl(instanceKlass* accessor, int index) const {
EXCEPTION_CONTEXT;
assert(accessor->is_linked(), "must be linked before accessing constant pool");
constantPoolOop cpool = accessor->constants();
constantTag tag = cpool->tag_at(index);
return tag.is_unresolved_klass();
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciEnv::get_constant_by_index // ciEnv::get_constant_by_index
// //
@ -592,31 +594,9 @@ bool ciEnv::is_unresolved_klass_impl(instanceKlass* accessor, int index) const {
// //
// Implementation note: this query is currently in no way cached. // Implementation note: this query is currently in no way cached.
ciConstant ciEnv::get_constant_by_index(constantPoolHandle cpool, ciConstant ciEnv::get_constant_by_index(constantPoolHandle cpool,
int index, int pool_index, int cache_index,
ciInstanceKlass* accessor) { ciInstanceKlass* accessor) {
GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, index, accessor);) GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, pool_index, cache_index, accessor);)
}
// ------------------------------------------------------------------
// ciEnv::is_unresolved_string
//
// Check constant pool
//
// Implementation note: this query is currently in no way cached.
bool ciEnv::is_unresolved_string(ciInstanceKlass* accessor,
int index) const {
GUARDED_VM_ENTRY(return is_unresolved_string_impl(accessor->get_instanceKlass(), index); )
}
// ------------------------------------------------------------------
// ciEnv::is_unresolved_klass
//
// Check constant pool
//
// Implementation note: this query is currently in no way cached.
bool ciEnv::is_unresolved_klass(ciInstanceKlass* accessor,
int index) const {
GUARDED_VM_ENTRY(return is_unresolved_klass_impl(accessor->get_instanceKlass(), index); )
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -748,8 +728,8 @@ ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
} }
// Get the invoker methodOop from the constant pool. // Get the invoker methodOop from the constant pool.
intptr_t f2_value = cpool->cache()->main_entry_at(index)->f2(); oop f1_value = cpool->cache()->main_entry_at(index)->f1();
methodOop signature_invoker = methodOop(f2_value); methodOop signature_invoker = methodOop(f1_value);
assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(), assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
"correct result from LinkResolver::resolve_invokedynamic"); "correct result from LinkResolver::resolve_invokedynamic");

View File

@ -116,12 +116,8 @@ private:
bool& is_accessible, bool& is_accessible,
ciInstanceKlass* loading_klass); ciInstanceKlass* loading_klass);
ciConstant get_constant_by_index(constantPoolHandle cpool, ciConstant get_constant_by_index(constantPoolHandle cpool,
int constant_index, int pool_index, int cache_index,
ciInstanceKlass* accessor); ciInstanceKlass* accessor);
bool is_unresolved_string(ciInstanceKlass* loading_klass,
int constant_index) const;
bool is_unresolved_klass(ciInstanceKlass* loading_klass,
int constant_index) const;
ciField* get_field_by_index(ciInstanceKlass* loading_klass, ciField* get_field_by_index(ciInstanceKlass* loading_klass,
int field_index); int field_index);
ciMethod* get_method_by_index(constantPoolHandle cpool, ciMethod* get_method_by_index(constantPoolHandle cpool,
@ -137,12 +133,8 @@ private:
bool& is_accessible, bool& is_accessible,
ciInstanceKlass* loading_klass); ciInstanceKlass* loading_klass);
ciConstant get_constant_by_index_impl(constantPoolHandle cpool, ciConstant get_constant_by_index_impl(constantPoolHandle cpool,
int constant_index, int pool_index, int cache_index,
ciInstanceKlass* loading_klass); ciInstanceKlass* loading_klass);
bool is_unresolved_string_impl (instanceKlass* loading_klass,
int constant_index) const;
bool is_unresolved_klass_impl (instanceKlass* loading_klass,
int constant_index) const;
ciField* get_field_by_index_impl(ciInstanceKlass* loading_klass, ciField* get_field_by_index_impl(ciInstanceKlass* loading_klass,
int field_index); int field_index);
ciMethod* get_method_by_index_impl(constantPoolHandle cpool, ciMethod* get_method_by_index_impl(constantPoolHandle cpool,
@ -190,6 +182,25 @@ private:
return _factory->get_unloaded_klass(accessing_klass, name, true); return _factory->get_unloaded_klass(accessing_klass, name, true);
} }
// Get a ciKlass representing an unloaded klass mirror.
// Result is not necessarily unique, but will be unloaded.
ciInstance* get_unloaded_klass_mirror(ciKlass* type) {
return _factory->get_unloaded_klass_mirror(type);
}
// Get a ciInstance representing an unresolved method handle constant.
ciInstance* get_unloaded_method_handle_constant(ciKlass* holder,
ciSymbol* name,
ciSymbol* signature,
int ref_kind) {
return _factory->get_unloaded_method_handle_constant(holder, name, signature, ref_kind);
}
// Get a ciInstance representing an unresolved method type constant.
ciInstance* get_unloaded_method_type_constant(ciSymbol* signature) {
return _factory->get_unloaded_method_type_constant(signature);
}
// See if we already have an unloaded klass for the given name // See if we already have an unloaded klass for the given name
// or return NULL if not. // or return NULL if not.
ciKlass *check_get_unloaded_klass(ciKlass* accessing_klass, ciSymbol* name) { ciKlass *check_get_unloaded_klass(ciKlass* accessing_klass, ciSymbol* name) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -44,9 +44,7 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
_flags = ciFlags(access_flags); _flags = ciFlags(access_flags);
_has_finalizer = access_flags.has_finalizer(); _has_finalizer = access_flags.has_finalizer();
_has_subklass = ik->subklass() != NULL; _has_subklass = ik->subklass() != NULL;
_is_initialized = ik->is_initialized(); _init_state = (instanceKlass::ClassState)ik->get_init_state();
// Next line must follow and use the result of the previous line:
_is_linked = _is_initialized || ik->is_linked();
_nonstatic_field_size = ik->nonstatic_field_size(); _nonstatic_field_size = ik->nonstatic_field_size();
_has_nonstatic_fields = ik->has_nonstatic_fields(); _has_nonstatic_fields = ik->has_nonstatic_fields();
_nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields: _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
@ -91,8 +89,7 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name,
: ciKlass(name, ciInstanceKlassKlass::make()) : ciKlass(name, ciInstanceKlassKlass::make())
{ {
assert(name->byte_at(0) != '[', "not an instance klass"); assert(name->byte_at(0) != '[', "not an instance klass");
_is_initialized = false; _init_state = (instanceKlass::ClassState)0;
_is_linked = false;
_nonstatic_field_size = -1; _nonstatic_field_size = -1;
_has_nonstatic_fields = false; _has_nonstatic_fields = false;
_nonstatic_fields = NULL; _nonstatic_fields = NULL;
@ -109,21 +106,10 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name,
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciInstanceKlass::compute_shared_is_initialized // ciInstanceKlass::compute_shared_is_initialized
bool ciInstanceKlass::compute_shared_is_initialized() { void ciInstanceKlass::compute_shared_init_state() {
GUARDED_VM_ENTRY( GUARDED_VM_ENTRY(
instanceKlass* ik = get_instanceKlass(); instanceKlass* ik = get_instanceKlass();
_is_initialized = ik->is_initialized(); _init_state = (instanceKlass::ClassState)ik->get_init_state();
return _is_initialized;
)
}
// ------------------------------------------------------------------
// ciInstanceKlass::compute_shared_is_linked
bool ciInstanceKlass::compute_shared_is_linked() {
GUARDED_VM_ENTRY(
instanceKlass* ik = get_instanceKlass();
_is_linked = ik->is_linked();
return _is_linked;
) )
} }
@ -323,8 +309,8 @@ ciInstanceKlass* ciInstanceKlass::super() {
// ciInstanceKlass::java_mirror // ciInstanceKlass::java_mirror
// //
// Get the instance of java.lang.Class corresponding to this klass. // Get the instance of java.lang.Class corresponding to this klass.
// Cache it on this->_java_mirror.
ciInstance* ciInstanceKlass::java_mirror() { ciInstance* ciInstanceKlass::java_mirror() {
assert(is_loaded(), "must be loaded");
if (_java_mirror == NULL) { if (_java_mirror == NULL) {
_java_mirror = ciKlass::java_mirror(); _java_mirror = ciKlass::java_mirror();
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,9 +39,8 @@ private:
jobject _loader; jobject _loader;
jobject _protection_domain; jobject _protection_domain;
instanceKlass::ClassState _init_state; // state of class
bool _is_shared; bool _is_shared;
bool _is_initialized;
bool _is_linked;
bool _has_finalizer; bool _has_finalizer;
bool _has_subklass; bool _has_subklass;
bool _has_nonstatic_fields; bool _has_nonstatic_fields;
@ -87,27 +86,34 @@ protected:
bool is_shared() { return _is_shared; } bool is_shared() { return _is_shared; }
bool compute_shared_is_initialized(); void compute_shared_init_state();
bool compute_shared_is_linked();
bool compute_shared_has_subklass(); bool compute_shared_has_subklass();
int compute_shared_nof_implementors(); int compute_shared_nof_implementors();
int compute_nonstatic_fields(); int compute_nonstatic_fields();
GrowableArray<ciField*>* compute_nonstatic_fields_impl(GrowableArray<ciField*>* super_fields); GrowableArray<ciField*>* compute_nonstatic_fields_impl(GrowableArray<ciField*>* super_fields);
// Update the init_state for shared klasses
void update_if_shared(instanceKlass::ClassState expected) {
if (_is_shared && _init_state != expected) {
if (is_loaded()) compute_shared_init_state();
}
}
public: public:
// Has this klass been initialized? // Has this klass been initialized?
bool is_initialized() { bool is_initialized() {
if (_is_shared && !_is_initialized) { update_if_shared(instanceKlass::fully_initialized);
return is_loaded() && compute_shared_is_initialized(); return _init_state == instanceKlass::fully_initialized;
} }
return _is_initialized; // Is this klass being initialized?
bool is_being_initialized() {
update_if_shared(instanceKlass::being_initialized);
return _init_state == instanceKlass::being_initialized;
} }
// Has this klass been linked? // Has this klass been linked?
bool is_linked() { bool is_linked() {
if (_is_shared && !_is_linked) { update_if_shared(instanceKlass::linked);
return is_loaded() && compute_shared_is_linked(); return _init_state >= instanceKlass::linked;
}
return _is_linked;
} }
// General klass information. // General klass information.

View File

@ -192,8 +192,14 @@ ciKlass* ciKlass::find_klass(ciSymbol* klass_name) {
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciKlass::java_mirror // ciKlass::java_mirror
//
// Get the instance of java.lang.Class corresponding to this klass.
// If it is an unloaded instance or array klass, return an unloaded
// mirror object of type Class.
ciInstance* ciKlass::java_mirror() { ciInstance* ciKlass::java_mirror() {
GUARDED_VM_ENTRY( GUARDED_VM_ENTRY(
if (!is_loaded())
return ciEnv::current()->get_unloaded_klass_mirror(this);
oop java_mirror = get_Klass()->java_mirror(); oop java_mirror = get_Klass()->java_mirror();
return CURRENT_ENV->get_object(java_mirror)->as_instance(); return CURRENT_ENV->get_object(java_mirror)->as_instance();
) )

View File

@ -54,10 +54,10 @@ ciMethod::ciMethod(methodHandle h_m) : ciObject(h_m) {
_code = NULL; _code = NULL;
_exception_handlers = NULL; _exception_handlers = NULL;
_liveness = NULL; _liveness = NULL;
_bcea = NULL;
_method_blocks = NULL; _method_blocks = NULL;
#ifdef COMPILER2 #ifdef COMPILER2
_flow = NULL; _flow = NULL;
_bcea = NULL;
#endif // COMPILER2 #endif // COMPILER2
ciEnv *env = CURRENT_ENV; ciEnv *env = CURRENT_ENV;
@ -121,11 +121,11 @@ ciMethod::ciMethod(ciInstanceKlass* holder,
_intrinsic_id = vmIntrinsics::_none; _intrinsic_id = vmIntrinsics::_none;
_liveness = NULL; _liveness = NULL;
_can_be_statically_bound = false; _can_be_statically_bound = false;
_bcea = NULL;
_method_blocks = NULL; _method_blocks = NULL;
_method_data = NULL; _method_data = NULL;
#ifdef COMPILER2 #ifdef COMPILER2
_flow = NULL; _flow = NULL;
_bcea = NULL;
#endif // COMPILER2 #endif // COMPILER2
} }
@ -694,30 +694,21 @@ int ciMethod::scale_count(int count, float prof_factor) {
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciMethod::is_method_handle_invoke // ciMethod::is_method_handle_invoke
// //
// Return true if the method is a MethodHandle target. // Return true if the method is an instance of one of the two
// signature-polymorphic MethodHandle methods, invokeExact or invokeGeneric.
bool ciMethod::is_method_handle_invoke() const { bool ciMethod::is_method_handle_invoke() const {
bool flag = (holder()->name() == ciSymbol::java_dyn_MethodHandle() && if (!is_loaded()) return false;
methodOopDesc::is_method_handle_invoke_name(name()->sid())); VM_ENTRY_MARK;
#ifdef ASSERT return get_methodOop()->is_method_handle_invoke();
if (is_loaded()) {
bool flag2 = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
{
VM_ENTRY_MARK;
bool flag3 = get_methodOop()->is_method_handle_invoke();
assert(flag2 == flag3, "consistent");
assert(flag == flag3, "consistent");
}
}
#endif //ASSERT
return flag;
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciMethod::is_method_handle_adapter // ciMethod::is_method_handle_adapter
// //
// Return true if the method is a generated MethodHandle adapter. // Return true if the method is a generated MethodHandle adapter.
// These are built by MethodHandleCompiler.
bool ciMethod::is_method_handle_adapter() const { bool ciMethod::is_method_handle_adapter() const {
check_is_loaded(); if (!is_loaded()) return false;
VM_ENTRY_MARK; VM_ENTRY_MARK;
return get_methodOop()->is_method_handle_adapter(); return get_methodOop()->is_method_handle_adapter();
} }
@ -1033,10 +1024,15 @@ bool ciMethod::is_accessor () const { FETCH_FLAG_FROM_VM(is_accessor)
bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); } bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); }
BCEscapeAnalyzer *ciMethod::get_bcea() { BCEscapeAnalyzer *ciMethod::get_bcea() {
#ifdef COMPILER2
if (_bcea == NULL) { if (_bcea == NULL) {
_bcea = new (CURRENT_ENV->arena()) BCEscapeAnalyzer(this, NULL); _bcea = new (CURRENT_ENV->arena()) BCEscapeAnalyzer(this, NULL);
} }
return _bcea; return _bcea;
#else // COMPILER2
ShouldNotReachHere();
return NULL;
#endif // COMPILER2
} }
ciMethodBlocks *ciMethod::get_method_blocks() { ciMethodBlocks *ciMethod::get_method_blocks() {

View File

@ -48,7 +48,6 @@ class ciMethod : public ciObject {
ciInstanceKlass* _holder; ciInstanceKlass* _holder;
ciSignature* _signature; ciSignature* _signature;
ciMethodData* _method_data; ciMethodData* _method_data;
BCEscapeAnalyzer* _bcea;
ciMethodBlocks* _method_blocks; ciMethodBlocks* _method_blocks;
// Code attributes. // Code attributes.
@ -72,7 +71,8 @@ class ciMethod : public ciObject {
// Optional liveness analyzer. // Optional liveness analyzer.
MethodLiveness* _liveness; MethodLiveness* _liveness;
#ifdef COMPILER2 #ifdef COMPILER2
ciTypeFlow* _flow; ciTypeFlow* _flow;
BCEscapeAnalyzer* _bcea;
#endif #endif
ciMethod(methodHandle h_m); ciMethod(methodHandle h_m);

View File

@ -70,6 +70,7 @@ ciObjectFactory::ciObjectFactory(Arena* arena,
_unloaded_methods = new (arena) GrowableArray<ciMethod*>(arena, 4, 0, NULL); _unloaded_methods = new (arena) GrowableArray<ciMethod*>(arena, 4, 0, NULL);
_unloaded_klasses = new (arena) GrowableArray<ciKlass*>(arena, 8, 0, NULL); _unloaded_klasses = new (arena) GrowableArray<ciKlass*>(arena, 8, 0, NULL);
_unloaded_instances = new (arena) GrowableArray<ciInstance*>(arena, 4, 0, NULL);
_return_addresses = _return_addresses =
new (arena) GrowableArray<ciReturnAddress*>(arena, 8, 0, NULL); new (arena) GrowableArray<ciReturnAddress*>(arena, 8, 0, NULL);
} }
@ -443,6 +444,74 @@ ciKlass* ciObjectFactory::get_unloaded_klass(ciKlass* accessing_klass,
return new_klass; return new_klass;
} }
//------------------------------------------------------------------
// ciObjectFactory::get_unloaded_instance
//
// Get a ciInstance representing an as-yet undetermined instance of a given class.
//
ciInstance* ciObjectFactory::get_unloaded_instance(ciInstanceKlass* instance_klass) {
for (int i=0; i<_unloaded_instances->length(); i++) {
ciInstance* entry = _unloaded_instances->at(i);
if (entry->klass()->equals(instance_klass)) {
// We've found a match.
return entry;
}
}
// This is a new unloaded instance. Create it and stick it in
// the cache.
ciInstance* new_instance = new (arena()) ciInstance(instance_klass);
init_ident_of(new_instance);
_unloaded_instances->append(new_instance);
// make sure it looks the way we want:
assert(!new_instance->is_loaded(), "");
assert(new_instance->klass() == instance_klass, "");
return new_instance;
}
//------------------------------------------------------------------
// ciObjectFactory::get_unloaded_klass_mirror
//
// Get a ciInstance representing an unresolved klass mirror.
//
// Currently, this ignores the parameters and returns a unique unloaded instance.
ciInstance* ciObjectFactory::get_unloaded_klass_mirror(ciKlass* type) {
assert(ciEnv::_Class_klass != NULL, "");
return get_unloaded_instance(ciEnv::_Class_klass->as_instance_klass());
}
//------------------------------------------------------------------
// ciObjectFactory::get_unloaded_method_handle_constant
//
// Get a ciInstance representing an unresolved method handle constant.
//
// Currently, this ignores the parameters and returns a unique unloaded instance.
ciInstance* ciObjectFactory::get_unloaded_method_handle_constant(ciKlass* holder,
ciSymbol* name,
ciSymbol* signature,
int ref_kind) {
if (ciEnv::_MethodHandle_klass == NULL) return NULL;
return get_unloaded_instance(ciEnv::_MethodHandle_klass->as_instance_klass());
}
//------------------------------------------------------------------
// ciObjectFactory::get_unloaded_method_type_constant
//
// Get a ciInstance representing an unresolved method type constant.
//
// Currently, this ignores the parameters and returns a unique unloaded instance.
ciInstance* ciObjectFactory::get_unloaded_method_type_constant(ciSymbol* signature) {
if (ciEnv::_MethodType_klass == NULL) return NULL;
return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
}
//------------------------------------------------------------------ //------------------------------------------------------------------
// ciObjectFactory::get_empty_methodData // ciObjectFactory::get_empty_methodData
// //
@ -637,7 +706,8 @@ void ciObjectFactory::print_contents() {
// //
// Print debugging information about the object factory // Print debugging information about the object factory
void ciObjectFactory::print() { void ciObjectFactory::print() {
tty->print("<ciObjectFactory oops=%d unloaded_methods=%d unloaded_klasses=%d>", tty->print("<ciObjectFactory oops=%d unloaded_methods=%d unloaded_instances=%d unloaded_klasses=%d>",
_ci_objects->length(), _unloaded_methods->length(), _ci_objects->length(), _unloaded_methods->length(),
_unloaded_instances->length(),
_unloaded_klasses->length()); _unloaded_klasses->length());
} }

View File

@ -39,6 +39,7 @@ private:
GrowableArray<ciObject*>* _ci_objects; GrowableArray<ciObject*>* _ci_objects;
GrowableArray<ciMethod*>* _unloaded_methods; GrowableArray<ciMethod*>* _unloaded_methods;
GrowableArray<ciKlass*>* _unloaded_klasses; GrowableArray<ciKlass*>* _unloaded_klasses;
GrowableArray<ciInstance*>* _unloaded_instances;
GrowableArray<ciReturnAddress*>* _return_addresses; GrowableArray<ciReturnAddress*>* _return_addresses;
int _next_ident; int _next_ident;
@ -73,6 +74,8 @@ private:
void print_contents_impl(); void print_contents_impl();
ciInstance* get_unloaded_instance(ciInstanceKlass* klass);
public: public:
static bool is_initialized() { return _initialized; } static bool is_initialized() { return _initialized; }
@ -98,6 +101,18 @@ public:
ciSymbol* name, ciSymbol* name,
bool create_if_not_found); bool create_if_not_found);
// Get a ciInstance representing an unresolved klass mirror.
ciInstance* get_unloaded_klass_mirror(ciKlass* type);
// Get a ciInstance representing an unresolved method handle constant.
ciInstance* get_unloaded_method_handle_constant(ciKlass* holder,
ciSymbol* name,
ciSymbol* signature,
int ref_kind);
// Get a ciInstance representing an unresolved method type constant.
ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
// Get the ciMethodData representing the methodData for a method // Get the ciMethodData representing the methodData for a method
// with none. // with none.

View File

@ -186,12 +186,13 @@ ciKlass* ciBytecodeStream::get_klass(bool& will_link) {
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciBytecodeStream::get_constant_index // ciBytecodeStream::get_constant_raw_index
// //
// If this bytecode is one of the ldc variants, get the index of the // If this bytecode is one of the ldc variants, get the index of the
// referenced constant. // referenced constant.
int ciBytecodeStream::get_constant_index() const { int ciBytecodeStream::get_constant_raw_index() const {
switch(cur_bc()) { // work-alike for Bytecode_loadconstant::raw_index()
switch (cur_bc()) {
case Bytecodes::_ldc: case Bytecodes::_ldc:
return get_index_u1(); return get_index_u1();
case Bytecodes::_ldc_w: case Bytecodes::_ldc_w:
@ -202,25 +203,52 @@ int ciBytecodeStream::get_constant_index() const {
return 0; return 0;
} }
} }
// ------------------------------------------------------------------
// ciBytecodeStream::get_constant_pool_index
// Decode any CP cache index into a regular pool index.
int ciBytecodeStream::get_constant_pool_index() const {
// work-alike for Bytecode_loadconstant::pool_index()
int index = get_constant_raw_index();
if (has_cache_index()) {
return get_cpcache()->get_pool_index(index);
}
return index;
}
// ------------------------------------------------------------------
// ciBytecodeStream::get_constant_cache_index
// Return the CP cache index, or -1 if there isn't any.
int ciBytecodeStream::get_constant_cache_index() const {
// work-alike for Bytecode_loadconstant::cache_index()
return has_cache_index() ? get_constant_raw_index() : -1;
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciBytecodeStream::get_constant // ciBytecodeStream::get_constant
// //
// If this bytecode is one of the ldc variants, get the referenced // If this bytecode is one of the ldc variants, get the referenced
// constant. // constant.
ciConstant ciBytecodeStream::get_constant() { ciConstant ciBytecodeStream::get_constant() {
int pool_index = get_constant_raw_index();
int cache_index = -1;
if (has_cache_index()) {
cache_index = pool_index;
pool_index = -1;
}
VM_ENTRY_MARK; VM_ENTRY_MARK;
constantPoolHandle cpool(_method->get_methodOop()->constants()); constantPoolHandle cpool(_method->get_methodOop()->constants());
return CURRENT_ENV->get_constant_by_index(cpool, get_constant_index(), _holder); return CURRENT_ENV->get_constant_by_index(cpool, pool_index, cache_index, _holder);
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
bool ciBytecodeStream::is_unresolved_string() const { // ciBytecodeStream::get_constant_pool_tag
return CURRENT_ENV->is_unresolved_string(_holder, get_constant_index()); //
} // If this bytecode is one of the ldc variants, get the referenced
// constant.
// ------------------------------------------------------------------ constantTag ciBytecodeStream::get_constant_pool_tag(int index) const {
bool ciBytecodeStream::is_unresolved_klass() const { VM_ENTRY_MARK;
return CURRENT_ENV->is_unresolved_klass(_holder, get_klass_index()); return _method->get_methodOop()->constants()->tag_at(index);
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
@ -378,13 +406,16 @@ int ciBytecodeStream::get_method_signature_index() {
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciBytecodeStream::get_cpcache // ciBytecodeStream::get_cpcache
ciCPCache* ciBytecodeStream::get_cpcache() { ciCPCache* ciBytecodeStream::get_cpcache() const {
VM_ENTRY_MARK; if (_cpcache == NULL) {
// Get the constant pool. VM_ENTRY_MARK;
constantPoolOop cpool = _holder->get_instanceKlass()->constants(); // Get the constant pool.
constantPoolCacheOop cpcache = cpool->cache(); constantPoolOop cpool = _holder->get_instanceKlass()->constants();
constantPoolCacheOop cpcache = cpool->cache();
return CURRENT_ENV->get_object(cpcache)->as_cpcache(); *(ciCPCache**)&_cpcache = CURRENT_ENV->get_object(cpcache)->as_cpcache();
}
return _cpcache;
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------

View File

@ -46,6 +46,7 @@ private:
ciMethod* _method; // the method ciMethod* _method; // the method
ciInstanceKlass* _holder; ciInstanceKlass* _holder;
ciCPCache* _cpcache;
address _bc_start; // Start of current bytecode for table address _bc_start; // Start of current bytecode for table
address _was_wide; // Address past last wide bytecode address _was_wide; // Address past last wide bytecode
jint* _table_base; // Aligned start of last table or switch jint* _table_base; // Aligned start of last table or switch
@ -58,7 +59,9 @@ private:
void reset( address base, unsigned int size ) { void reset( address base, unsigned int size ) {
_bc_start =_was_wide = 0; _bc_start =_was_wide = 0;
_start = _pc = base; _end = base + size; } _start = _pc = base; _end = base + size;
_cpcache = NULL;
}
void assert_wide(bool require_wide) const { void assert_wide(bool require_wide) const {
if (require_wide) if (require_wide)
@ -136,15 +139,20 @@ public:
bool is_wide() const { return ( _pc == _was_wide ); } bool is_wide() const { return ( _pc == _was_wide ); }
// Does this instruction contain an index which refes into the CP cache? // Does this instruction contain an index which refes into the CP cache?
bool uses_cp_cache() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); } bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); }
int get_index_u1() const { int get_index_u1() const {
return bytecode()->get_index_u1(cur_bc_raw()); return bytecode()->get_index_u1(cur_bc_raw());
} }
int get_index_u1_cpcache() const {
return bytecode()->get_index_u1_cpcache(cur_bc_raw());
}
// Get a byte index following this bytecode. // Get a byte index following this bytecode.
// If prefixed with a wide bytecode, get a wide index. // If prefixed with a wide bytecode, get a wide index.
int get_index() const { int get_index() const {
assert(!has_cache_index(), "else use cpcache variant");
return (_pc == _was_wide) // was widened? return (_pc == _was_wide) // was widened?
? get_index_u2(true) // yes, return wide index ? get_index_u2(true) // yes, return wide index
: get_index_u1(); // no, return narrow index : get_index_u1(); // no, return narrow index
@ -207,7 +215,9 @@ public:
return cur_bci() + get_int_table(index); } return cur_bci() + get_int_table(index); }
// --- Constant pool access --- // --- Constant pool access ---
int get_constant_index() const; int get_constant_raw_index() const;
int get_constant_pool_index() const;
int get_constant_cache_index() const;
int get_field_index(); int get_field_index();
int get_method_index(); int get_method_index();
@ -217,12 +227,17 @@ public:
int get_klass_index() const; int get_klass_index() const;
// If this bytecode is one of the ldc variants, get the referenced // If this bytecode is one of the ldc variants, get the referenced
// constant // constant. Do not attempt to resolve it, since that would require
// execution of Java code. If it is not resolved, return an unloaded
// object (ciConstant.as_object()->is_loaded() == false).
ciConstant get_constant(); ciConstant get_constant();
// True if the ldc variant points to an unresolved string constantTag get_constant_pool_tag(int index) const;
bool is_unresolved_string() const;
// True if the ldc variant points to an unresolved klass // True if the klass-using bytecode points to an unresolved klass
bool is_unresolved_klass() const; bool is_unresolved_klass() const {
constantTag tag = get_constant_pool_tag(get_klass_index());
return tag.is_unresolved_klass();
}
// If this bytecode is one of get_field, get_static, put_field, // If this bytecode is one of get_field, get_static, put_field,
// or put_static, get the referenced field. // or put_static, get the referenced field.
@ -238,7 +253,7 @@ public:
int get_method_holder_index(); int get_method_holder_index();
int get_method_signature_index(); int get_method_signature_index();
ciCPCache* get_cpcache(); ciCPCache* get_cpcache() const;
ciCallSite* get_call_site(); ciCallSite* get_call_site();
}; };

View File

@ -712,10 +712,8 @@ void ciTypeFlow::StateVector::do_ldc(ciBytecodeStream* str) {
ciObject* obj = con.as_object(); ciObject* obj = con.as_object();
if (obj->is_null_object()) { if (obj->is_null_object()) {
push_null(); push_null();
} else if (obj->is_klass()) {
// The type of ldc <class> is java.lang.Class
push_object(outer()->env()->Class_klass());
} else { } else {
assert(!obj->is_klass(), "must be java_mirror of klass");
push_object(obj->klass()); push_object(obj->klass());
} }
} else { } else {

View File

@ -117,6 +117,45 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
cp->string_index_at_put(index, string_index); cp->string_index_at_put(index, string_index);
} }
break; break;
case JVM_CONSTANT_MethodHandle :
case JVM_CONSTANT_MethodType :
if (!EnableMethodHandles ||
_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
classfile_parse_error(
(!EnableMethodHandles ?
"This JVM does not support constant tag %u in class file %s" :
"Class file version does not support constant tag %u in class file %s"),
tag, CHECK);
}
if (tag == JVM_CONSTANT_MethodHandle) {
cfs->guarantee_more(4, CHECK); // ref_kind, method_index, tag/access_flags
u1 ref_kind = cfs->get_u1_fast();
u2 method_index = cfs->get_u2_fast();
cp->method_handle_index_at_put(index, ref_kind, method_index);
} else if (tag == JVM_CONSTANT_MethodType) {
cfs->guarantee_more(3, CHECK); // signature_index, tag/access_flags
u2 signature_index = cfs->get_u2_fast();
cp->method_type_index_at_put(index, signature_index);
} else {
ShouldNotReachHere();
}
break;
case JVM_CONSTANT_InvokeDynamic :
{
if (!EnableInvokeDynamic ||
_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
classfile_parse_error(
(!EnableInvokeDynamic ?
"This JVM does not support constant tag %u in class file %s" :
"Class file version does not support constant tag %u in class file %s"),
tag, CHECK);
}
cfs->guarantee_more(5, CHECK); // bsm_index, name_and_type_index, tag/access_flags
u2 bootstrap_method_index = cfs->get_u2_fast();
u2 name_and_type_index = cfs->get_u2_fast();
cp->invoke_dynamic_at_put(index, bootstrap_method_index, name_and_type_index);
}
break;
case JVM_CONSTANT_Integer : case JVM_CONSTANT_Integer :
{ {
cfs->guarantee_more(5, CHECK); // bytes, tag/access_flags cfs->guarantee_more(5, CHECK); // bytes, tag/access_flags
@ -337,6 +376,78 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
cp->unresolved_string_at_put(index, sym); cp->unresolved_string_at_put(index, sym);
} }
break; break;
case JVM_CONSTANT_MethodHandle :
{
int ref_index = cp->method_handle_index_at(index);
check_property(
valid_cp_range(ref_index, length) &&
EnableMethodHandles,
"Invalid constant pool index %u in class file %s",
ref_index, CHECK_(nullHandle));
constantTag tag = cp->tag_at(ref_index);
int ref_kind = cp->method_handle_ref_kind_at(index);
switch (ref_kind) {
case JVM_REF_getField:
case JVM_REF_getStatic:
case JVM_REF_putField:
case JVM_REF_putStatic:
check_property(
tag.is_field(),
"Invalid constant pool index %u in class file %s (not a field)",
ref_index, CHECK_(nullHandle));
break;
case JVM_REF_invokeVirtual:
case JVM_REF_invokeStatic:
case JVM_REF_invokeSpecial:
case JVM_REF_newInvokeSpecial:
check_property(
tag.is_method(),
"Invalid constant pool index %u in class file %s (not a method)",
ref_index, CHECK_(nullHandle));
break;
case JVM_REF_invokeInterface:
check_property(
tag.is_interface_method(),
"Invalid constant pool index %u in class file %s (not an interface method)",
ref_index, CHECK_(nullHandle));
break;
default:
classfile_parse_error(
"Bad method handle kind at constant pool index %u in class file %s",
index, CHECK_(nullHandle));
}
// Keep the ref_index unchanged. It will be indirected at link-time.
}
break;
case JVM_CONSTANT_MethodType :
{
int ref_index = cp->method_type_index_at(index);
check_property(
valid_cp_range(ref_index, length) &&
cp->tag_at(ref_index).is_utf8() &&
EnableMethodHandles,
"Invalid constant pool index %u in class file %s",
ref_index, CHECK_(nullHandle));
}
break;
case JVM_CONSTANT_InvokeDynamic :
{
int bootstrap_method_ref_index = cp->invoke_dynamic_bootstrap_method_ref_index_at(index);
int name_and_type_ref_index = cp->invoke_dynamic_name_and_type_ref_index_at(index);
check_property((bootstrap_method_ref_index == 0 && AllowTransitionalJSR292)
||
(valid_cp_range(bootstrap_method_ref_index, length) &&
cp->tag_at(bootstrap_method_ref_index).is_method_handle()),
"Invalid constant pool index %u in class file %s",
bootstrap_method_ref_index,
CHECK_(nullHandle));
check_property(valid_cp_range(name_and_type_ref_index, length) &&
cp->tag_at(name_and_type_ref_index).is_name_and_type(),
"Invalid constant pool index %u in class file %s",
name_and_type_ref_index,
CHECK_(nullHandle));
break;
}
default: default:
fatal(err_msg("bad constant pool tag value %u", fatal(err_msg("bad constant pool tag value %u",
cp->tag_at(index).value())); cp->tag_at(index).value()));
@ -452,6 +563,43 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
} }
break; break;
} }
case JVM_CONSTANT_MethodHandle: {
int ref_index = cp->method_handle_index_at(index);
int ref_kind = cp->method_handle_ref_kind_at(index);
switch (ref_kind) {
case JVM_REF_invokeVirtual:
case JVM_REF_invokeStatic:
case JVM_REF_invokeSpecial:
case JVM_REF_newInvokeSpecial:
{
int name_and_type_ref_index = cp->name_and_type_ref_index_at(ref_index);
int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index);
symbolHandle name(THREAD, cp->symbol_at(name_ref_index));
if (ref_kind == JVM_REF_newInvokeSpecial) {
if (name() != vmSymbols::object_initializer_name()) {
classfile_parse_error(
"Bad constructor name at constant pool index %u in class file %s",
name_ref_index, CHECK_(nullHandle));
}
} else {
if (name() == vmSymbols::object_initializer_name()) {
classfile_parse_error(
"Bad method name at constant pool index %u in class file %s",
name_ref_index, CHECK_(nullHandle));
}
}
}
break;
// Other ref_kinds are already fully checked in previous pass.
}
break;
}
case JVM_CONSTANT_MethodType: {
symbolHandle no_name = vmSymbolHandles::type_name(); // place holder
symbolHandle signature(THREAD, cp->method_type_signature_at(index));
verify_legal_method_signature(no_name, signature, CHECK_(nullHandle));
break;
}
} // end of switch } // end of switch
} // end of for } // end of for
@ -467,7 +615,7 @@ void ClassFileParser::patch_constant_pool(constantPoolHandle cp, int index, Hand
case JVM_CONSTANT_UnresolvedClass : case JVM_CONSTANT_UnresolvedClass :
// Patching a class means pre-resolving it. // Patching a class means pre-resolving it.
// The name in the constant pool is ignored. // The name in the constant pool is ignored.
if (patch->klass() == SystemDictionary::Class_klass()) { // %%% java_lang_Class::is_instance if (java_lang_Class::is_instance(patch())) {
guarantee_property(!java_lang_Class::is_primitive(patch()), guarantee_property(!java_lang_Class::is_primitive(patch()),
"Illegal class patch at %d in class file %s", "Illegal class patch at %d in class file %s",
index, CHECK); index, CHECK);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -832,7 +832,6 @@ objArrayOop ClassLoader::get_system_packages(TRAPS) {
instanceKlassHandle ClassLoader::load_classfile(symbolHandle h_name, TRAPS) { instanceKlassHandle ClassLoader::load_classfile(symbolHandle h_name, TRAPS) {
VTuneClassLoadMarker clm;
ResourceMark rm(THREAD); ResourceMark rm(THREAD);
EventMark m("loading class " INTPTR_FORMAT, (address)h_name()); EventMark m("loading class " INTPTR_FORMAT, (address)h_name());
ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion); ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);

View File

@ -2454,6 +2454,48 @@ Handle SystemDictionary::find_method_handle_type(symbolHandle signature,
return Handle(THREAD, (oop) result.get_jobject()); return Handle(THREAD, (oop) result.get_jobject());
} }
// Ask Java code to find or construct a method handle constant.
Handle SystemDictionary::link_method_handle_constant(KlassHandle caller,
int ref_kind, //e.g., JVM_REF_invokeVirtual
KlassHandle callee,
symbolHandle name_sym,
symbolHandle signature,
TRAPS) {
Handle empty;
Handle name = java_lang_String::create_from_symbol(name_sym(), CHECK_(empty));
Handle type;
if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') {
bool ignore_is_on_bcp = false;
type = find_method_handle_type(signature, caller, ignore_is_on_bcp, CHECK_(empty));
} else {
SignatureStream ss(signature(), false);
if (!ss.is_done()) {
oop mirror = ss.as_java_mirror(caller->class_loader(), caller->protection_domain(),
SignatureStream::NCDFError, CHECK_(empty));
type = Handle(THREAD, mirror);
ss.next();
if (!ss.is_done()) type = Handle(); // error!
}
}
if (type.is_null()) {
THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad signature", empty);
}
// call sun.dyn.MethodHandleNatives::linkMethodHandleConstant(Class caller, int refKind, Class callee, String name, Object type) -> MethodHandle
JavaCallArguments args;
args.push_oop(caller->java_mirror()); // the referring class
args.push_int(ref_kind);
args.push_oop(callee->java_mirror()); // the target class
args.push_oop(name());
args.push_oop(type());
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::linkMethodHandleConstant_name(),
vmSymbols::linkMethodHandleConstant_signature(),
&args, CHECK_(empty));
return Handle(THREAD, (oop) result.get_jobject());
}
// Ask Java code to find or construct a java.dyn.CallSite for the given // Ask Java code to find or construct a java.dyn.CallSite for the given
// name and signature, as interpreted relative to the given class loader. // name and signature, as interpreted relative to the given class loader.
@ -2465,6 +2507,10 @@ Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method,
int caller_bci, int caller_bci,
TRAPS) { TRAPS) {
Handle empty; Handle empty;
guarantee(bootstrap_method.not_null() &&
java_dyn_MethodHandle::is_instance(bootstrap_method()),
"caller must supply a valid BSM");
Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty)); Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty));
MethodHandles::init_MemberName(caller_mname(), caller_method()); MethodHandles::init_MemberName(caller_mname(), caller_method());
@ -2495,20 +2541,61 @@ Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method,
return call_site_oop; return call_site_oop;
} }
Handle SystemDictionary::find_bootstrap_method(KlassHandle caller, TRAPS) { Handle SystemDictionary::find_bootstrap_method(methodHandle caller_method, int caller_bci,
int cache_index, TRAPS) {
Handle empty; Handle empty;
if (!caller->oop_is_instance()) return empty;
instanceKlassHandle ik(THREAD, caller()); constantPoolHandle pool;
{
klassOop caller = caller_method->method_holder();
if (!Klass::cast(caller)->oop_is_instance()) return empty;
pool = constantPoolHandle(THREAD, instanceKlass::cast(caller)->constants());
}
oop boot_method_oop = ik->bootstrap_method(); int constant_pool_index = pool->cache()->entry_at(cache_index)->constant_pool_index();
if (boot_method_oop != NULL) { constantTag tag = pool->tag_at(constant_pool_index);
if (TraceMethodHandles) {
tty->print_cr("bootstrap method for "PTR_FORMAT" cached as "PTR_FORMAT":", ik(), boot_method_oop); if (tag.is_invoke_dynamic()) {
// JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type]
// The bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry.
int bsm_index = pool->invoke_dynamic_bootstrap_method_ref_index_at(constant_pool_index);
if (bsm_index != 0) {
int bsm_index_in_cache = pool->cache()->entry_at(cache_index)->bootstrap_method_index_in_cache();
DEBUG_ONLY(int bsm_index_2 = pool->cache()->entry_at(bsm_index_in_cache)->constant_pool_index());
assert(bsm_index == bsm_index_2, "BSM constant lifted to cache");
if (TraceMethodHandles) {
tty->print_cr("resolving bootstrap method for "PTR_FORMAT" at %d at cache[%d]CP[%d]...",
(intptr_t) caller_method(), caller_bci, cache_index, constant_pool_index);
}
oop bsm_oop = pool->resolve_cached_constant_at(bsm_index_in_cache, CHECK_(empty));
if (TraceMethodHandles) {
tty->print_cr("bootstrap method for "PTR_FORMAT" at %d retrieved as "PTR_FORMAT":",
(intptr_t) caller_method(), caller_bci, (intptr_t) bsm_oop);
}
assert(bsm_oop->is_oop()
&& java_dyn_MethodHandle::is_instance(bsm_oop), "must be sane");
return Handle(THREAD, bsm_oop);
} }
assert(boot_method_oop->is_oop() // else null BSM; fall through
&& java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane"); } else if (tag.is_name_and_type()) {
return Handle(THREAD, boot_method_oop); // JSR 292 EDR does not have JVM_CONSTANT_InvokeDynamic
// a bare name&type defaults its BSM to null, so fall through...
} else {
ShouldNotReachHere(); // verifier does not allow this
}
// Fall through to pick up the per-class bootstrap method.
// This mechanism may go away in the PFD.
assert(AllowTransitionalJSR292, "else the verifier should have stopped us already");
oop bsm_oop = instanceKlass::cast(caller_method->method_holder())->bootstrap_method();
if (bsm_oop != NULL) {
if (TraceMethodHandles) {
tty->print_cr("bootstrap method for "PTR_FORMAT" registered as "PTR_FORMAT":",
(intptr_t) caller_method(), (intptr_t) bsm_oop);
}
assert(bsm_oop->is_oop()
&& java_dyn_MethodHandle::is_instance(bsm_oop), "must be sane");
return Handle(THREAD, bsm_oop);
} }
return empty; return empty;

View File

@ -473,6 +473,13 @@ public:
KlassHandle accessing_klass, KlassHandle accessing_klass,
bool& return_bcp_flag, bool& return_bcp_flag,
TRAPS); TRAPS);
// ask Java to compute a java.dyn.MethodHandle object for a given CP entry
static Handle link_method_handle_constant(KlassHandle caller,
int ref_kind, //e.g., JVM_REF_invokeVirtual
KlassHandle callee,
symbolHandle name,
symbolHandle signature,
TRAPS);
// ask Java to create a dynamic call site, while linking an invokedynamic op // ask Java to create a dynamic call site, while linking an invokedynamic op
static Handle make_dynamic_call_site(Handle bootstrap_method, static Handle make_dynamic_call_site(Handle bootstrap_method,
// Callee information: // Callee information:
@ -485,7 +492,10 @@ public:
TRAPS); TRAPS);
// coordinate with Java about bootstrap methods // coordinate with Java about bootstrap methods
static Handle find_bootstrap_method(KlassHandle caller, TRAPS); static Handle find_bootstrap_method(methodHandle caller_method,
int caller_bci, // N.B. must be an invokedynamic
int cache_index, // must be corresponding main_entry
TRAPS);
// Utility for printing loader "name" as part of tracing constraints // Utility for printing loader "name" as part of tracing constraints
static const char* loader_name(oop loader) { static const char* loader_name(oop loader) {

View File

@ -1598,7 +1598,10 @@ void ClassVerifier::verify_ldc(
if (opcode == Bytecodes::_ldc || opcode == Bytecodes::_ldc_w) { if (opcode == Bytecodes::_ldc || opcode == Bytecodes::_ldc_w) {
if (!tag.is_unresolved_string() && !tag.is_unresolved_klass()) { if (!tag.is_unresolved_string() && !tag.is_unresolved_klass()) {
types = (1 << JVM_CONSTANT_Integer) | (1 << JVM_CONSTANT_Float) types = (1 << JVM_CONSTANT_Integer) | (1 << JVM_CONSTANT_Float)
| (1 << JVM_CONSTANT_String) | (1 << JVM_CONSTANT_Class); | (1 << JVM_CONSTANT_String) | (1 << JVM_CONSTANT_Class)
| (1 << JVM_CONSTANT_MethodHandle) | (1 << JVM_CONSTANT_MethodType);
// Note: The class file parser already verified the legality of
// MethodHandle and MethodType constants.
verify_cp_type(index, cp, types, CHECK_VERIFY(this)); verify_cp_type(index, cp, types, CHECK_VERIFY(this));
} }
} else { } else {
@ -1632,6 +1635,14 @@ void ClassVerifier::verify_ldc(
current_frame->push_stack_2( current_frame->push_stack_2(
VerificationType::long_type(), VerificationType::long_type(),
VerificationType::long2_type(), CHECK_VERIFY(this)); VerificationType::long2_type(), CHECK_VERIFY(this));
} else if (tag.is_method_handle()) {
current_frame->push_stack(
VerificationType::reference_type(
vmSymbols::java_dyn_MethodHandle()), CHECK_VERIFY(this));
} else if (tag.is_method_type()) {
current_frame->push_stack(
VerificationType::reference_type(
vmSymbols::java_dyn_MethodType()), CHECK_VERIFY(this));
} else { } else {
verify_error(bci, "Invalid index in ldc"); verify_error(bci, "Invalid index in ldc");
return; return;
@ -1902,7 +1913,8 @@ void ClassVerifier::verify_invoke_instructions(
unsigned int types = (opcode == Bytecodes::_invokeinterface unsigned int types = (opcode == Bytecodes::_invokeinterface
? 1 << JVM_CONSTANT_InterfaceMethodref ? 1 << JVM_CONSTANT_InterfaceMethodref
: opcode == Bytecodes::_invokedynamic : opcode == Bytecodes::_invokedynamic
? 1 << JVM_CONSTANT_NameAndType ? (1 << JVM_CONSTANT_NameAndType
|1 << JVM_CONSTANT_InvokeDynamic)
: 1 << JVM_CONSTANT_Methodref); : 1 << JVM_CONSTANT_Methodref);
verify_cp_type(index, cp, types, CHECK_VERIFY(this)); verify_cp_type(index, cp, types, CHECK_VERIFY(this));
@ -1920,9 +1932,12 @@ void ClassVerifier::verify_invoke_instructions(
// Get referenced class type // Get referenced class type
VerificationType ref_class_type; VerificationType ref_class_type;
if (opcode == Bytecodes::_invokedynamic) { if (opcode == Bytecodes::_invokedynamic) {
if (!EnableInvokeDynamic) { if (!EnableInvokeDynamic ||
_klass->major_version() < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
class_format_error( class_format_error(
"invokedynamic instructions not enabled on this JVM", (!EnableInvokeDynamic ?
"invokedynamic instructions not enabled in this JVM" :
"invokedynamic instructions not supported by this class file version"),
_klass->external_name()); _klass->external_name());
return; return;
} }

View File

@ -25,7 +25,10 @@
// The verifier class // The verifier class
class Verifier : AllStatic { class Verifier : AllStatic {
public: public:
enum { STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50 }; enum {
STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50,
INVOKEDYNAMIC_MAJOR_VERSION = 51
};
typedef enum { ThrowException, NoException } Mode; typedef enum { ThrowException, NoException } Mode;
/** /**

View File

@ -246,6 +246,8 @@
/* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */ \ /* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */ \
template(findMethodHandleType_name, "findMethodHandleType") \ template(findMethodHandleType_name, "findMethodHandleType") \
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \ template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \
template(linkMethodHandleConstant_name, "linkMethodHandleConstant") \
template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") \
template(makeDynamicCallSite_name, "makeDynamicCallSite") \ template(makeDynamicCallSite_name, "makeDynamicCallSite") \
template(makeDynamicCallSite_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") \ template(makeDynamicCallSite_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") \
NOT_LP64( do_alias(machine_word_signature, int_signature) ) \ NOT_LP64( do_alias(machine_word_signature, int_signature) ) \

View File

@ -202,6 +202,11 @@ void BufferBlob::free( BufferBlob *blob ) {
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Implementation of AdapterBlob // Implementation of AdapterBlob
AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) :
BufferBlob("I2C/C2I adapters", size, cb) {
CodeCache::commit(this);
}
AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
@ -281,7 +286,6 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub); tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub);
Disassembler::decode(stub->instructions_begin(), stub->instructions_end()); Disassembler::decode(stub->instructions_begin(), stub->instructions_end());
} }
VTune::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
Forte::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end()); Forte::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
if (JvmtiExport::should_post_dynamic_code_generated()) { if (JvmtiExport::should_post_dynamic_code_generated()) {
@ -356,7 +360,6 @@ DeoptimizationBlob* DeoptimizationBlob::create(
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
} }
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
if (JvmtiExport::should_post_dynamic_code_generated()) { if (JvmtiExport::should_post_dynamic_code_generated()) {
@ -414,7 +417,6 @@ UncommonTrapBlob* UncommonTrapBlob::create(
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
} }
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
if (JvmtiExport::should_post_dynamic_code_generated()) { if (JvmtiExport::should_post_dynamic_code_generated()) {
@ -474,7 +476,6 @@ ExceptionBlob* ExceptionBlob::create(
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
} }
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
if (JvmtiExport::should_post_dynamic_code_generated()) { if (JvmtiExport::should_post_dynamic_code_generated()) {
@ -533,7 +534,6 @@ SafepointBlob* SafepointBlob::create(
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
} }
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
if (JvmtiExport::should_post_dynamic_code_generated()) { if (JvmtiExport::should_post_dynamic_code_generated()) {

View File

@ -219,8 +219,7 @@ class BufferBlob: public CodeBlob {
class AdapterBlob: public BufferBlob { class AdapterBlob: public BufferBlob {
private: private:
AdapterBlob(int size) : BufferBlob("I2C/C2I adapters", size) {} AdapterBlob(int size, CodeBuffer* cb);
AdapterBlob(int size, CodeBuffer* cb) : BufferBlob("I2C/C2I adapters", size, cb) {}
public: public:
// Creation // Creation

View File

@ -93,6 +93,8 @@ class CodeBlob_sizes {
CodeHeap * CodeCache::_heap = new CodeHeap(); CodeHeap * CodeCache::_heap = new CodeHeap();
int CodeCache::_number_of_blobs = 0; int CodeCache::_number_of_blobs = 0;
int CodeCache::_number_of_adapters = 0;
int CodeCache::_number_of_nmethods = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0; int CodeCache::_number_of_nmethods_with_dependencies = 0;
bool CodeCache::_needs_cache_clean = false; bool CodeCache::_needs_cache_clean = false;
nmethod* CodeCache::_scavenge_root_nmethods = NULL; nmethod* CodeCache::_scavenge_root_nmethods = NULL;
@ -176,8 +178,14 @@ void CodeCache::free(CodeBlob* cb) {
verify_if_often(); verify_if_often();
print_trace("free", cb); print_trace("free", cb);
if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) { if (cb->is_nmethod()) {
_number_of_nmethods_with_dependencies--; _number_of_nmethods--;
if (((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies--;
}
}
if (cb->is_adapter_blob()) {
_number_of_adapters--;
} }
_number_of_blobs--; _number_of_blobs--;
@ -191,9 +199,16 @@ void CodeCache::free(CodeBlob* cb) {
void CodeCache::commit(CodeBlob* cb) { void CodeCache::commit(CodeBlob* cb) {
// this is called by nmethod::nmethod, which must already own CodeCache_lock // this is called by nmethod::nmethod, which must already own CodeCache_lock
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) { if (cb->is_nmethod()) {
_number_of_nmethods_with_dependencies++; _number_of_nmethods++;
if (((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies++;
}
} }
if (cb->is_adapter_blob()) {
_number_of_adapters++;
}
// flush the hardware I-cache // flush the hardware I-cache
ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size()); ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size());
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -43,6 +43,8 @@ class CodeCache : AllStatic {
// 4422213 or 4436291 for details. // 4422213 or 4436291 for details.
static CodeHeap * _heap; static CodeHeap * _heap;
static int _number_of_blobs; static int _number_of_blobs;
static int _number_of_adapters;
static int _number_of_nmethods;
static int _number_of_nmethods_with_dependencies; static int _number_of_nmethods_with_dependencies;
static bool _needs_cache_clean; static bool _needs_cache_clean;
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
@ -105,6 +107,8 @@ class CodeCache : AllStatic {
static nmethod* first_nmethod(); static nmethod* first_nmethod();
static nmethod* next_nmethod (CodeBlob* cb); static nmethod* next_nmethod (CodeBlob* cb);
static int nof_blobs() { return _number_of_blobs; } static int nof_blobs() { return _number_of_blobs; }
static int nof_adapters() { return _number_of_adapters; }
static int nof_nmethods() { return _number_of_nmethods; }
// GC support // GC support
static void gc_epilogue(); static void gc_epilogue();

View File

@ -397,11 +397,6 @@ void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, add
//-------------end of code for ExceptionCache-------------- //-------------end of code for ExceptionCache--------------
void nmFlags::clear() {
assert(sizeof(nmFlags) == sizeof(int), "using more than one word for nmFlags");
*(jint*)this = 0;
}
int nmethod::total_size() const { int nmethod::total_size() const {
return return
code_size() + code_size() +
@ -419,8 +414,32 @@ const char* nmethod::compile_kind() const {
return NULL; return NULL;
} }
// %%% This variable is no longer used? // Fill in default values for various flag fields
int nmethod::_zombie_instruction_size = NativeJump::instruction_size; void nmethod::init_defaults() {
_state = alive;
_marked_for_reclamation = 0;
_has_flushed_dependencies = 0;
_speculatively_disconnected = 0;
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_marked_for_deoptimization = 0;
_lock_count = 0;
_stack_traversal_mark = 0;
_unload_reported = false; // jvmti state
NOT_PRODUCT(_has_debug_info = false);
_oops_do_mark_link = NULL;
_jmethod_id = NULL;
_osr_link = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
_saved_nmethod_link = NULL;
_compiler = NULL;
#ifdef HAVE_DTRACE_H
_trap_offset = 0;
#endif // def HAVE_DTRACE_H
}
nmethod* nmethod::new_native_nmethod(methodHandle method, nmethod* nmethod::new_native_nmethod(methodHandle method,
@ -580,24 +599,16 @@ nmethod::nmethod(
debug_only(No_Safepoint_Verifier nsv;) debug_only(No_Safepoint_Verifier nsv;)
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
NOT_PRODUCT(_has_debug_info = false); init_defaults();
_oops_do_mark_link = NULL;
_method = method; _method = method;
_entry_bci = InvocationEntryBci; _entry_bci = InvocationEntryBci;
_osr_link = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
_saved_nmethod_link = NULL;
_compiler = NULL;
// We have no exception handler or deopt handler make the // We have no exception handler or deopt handler make the
// values something that will never match a pc like the nmethod vtable entry // values something that will never match a pc like the nmethod vtable entry
_exception_offset = 0; _exception_offset = 0;
_deoptimize_offset = 0; _deoptimize_offset = 0;
_deoptimize_mh_offset = 0; _deoptimize_mh_offset = 0;
_orig_pc_offset = 0; _orig_pc_offset = 0;
#ifdef HAVE_DTRACE_H
_trap_offset = 0;
#endif // def HAVE_DTRACE_H
_stub_offset = data_offset(); _stub_offset = data_offset();
_consts_offset = data_offset(); _consts_offset = data_offset();
_oops_offset = data_offset(); _oops_offset = data_offset();
@ -615,17 +626,9 @@ nmethod::nmethod(
_exception_cache = NULL; _exception_cache = NULL;
_pc_desc_cache.reset_to(NULL); _pc_desc_cache.reset_to(NULL);
flags.clear();
flags.state = alive;
_markedForDeoptimization = 0;
_lock_count = 0;
_stack_traversal_mark = 0;
code_buffer->copy_oops_to(this); code_buffer->copy_oops_to(this);
debug_only(verify_scavenge_root_oops()); debug_only(verify_scavenge_root_oops());
CodeCache::commit(this); CodeCache::commit(this);
VTune::create_nmethod(this);
} }
if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
@ -673,14 +676,9 @@ nmethod::nmethod(
debug_only(No_Safepoint_Verifier nsv;) debug_only(No_Safepoint_Verifier nsv;)
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
NOT_PRODUCT(_has_debug_info = false); init_defaults();
_oops_do_mark_link = NULL;
_method = method; _method = method;
_entry_bci = InvocationEntryBci; _entry_bci = InvocationEntryBci;
_osr_link = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
_compiler = NULL;
// We have no exception handler or deopt handler make the // We have no exception handler or deopt handler make the
// values something that will never match a pc like the nmethod vtable entry // values something that will never match a pc like the nmethod vtable entry
_exception_offset = 0; _exception_offset = 0;
@ -706,17 +704,9 @@ nmethod::nmethod(
_exception_cache = NULL; _exception_cache = NULL;
_pc_desc_cache.reset_to(NULL); _pc_desc_cache.reset_to(NULL);
flags.clear();
flags.state = alive;
_markedForDeoptimization = 0;
_lock_count = 0;
_stack_traversal_mark = 0;
code_buffer->copy_oops_to(this); code_buffer->copy_oops_to(this);
debug_only(verify_scavenge_root_oops()); debug_only(verify_scavenge_root_oops());
CodeCache::commit(this); CodeCache::commit(this);
VTune::create_nmethod(this);
} }
if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
@ -781,20 +771,13 @@ nmethod::nmethod(
debug_only(No_Safepoint_Verifier nsv;) debug_only(No_Safepoint_Verifier nsv;)
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
NOT_PRODUCT(_has_debug_info = false); init_defaults();
_oops_do_mark_link = NULL;
_method = method; _method = method;
_entry_bci = entry_bci;
_compile_id = compile_id; _compile_id = compile_id;
_comp_level = comp_level; _comp_level = comp_level;
_entry_bci = entry_bci;
_osr_link = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
_compiler = compiler; _compiler = compiler;
_orig_pc_offset = orig_pc_offset; _orig_pc_offset = orig_pc_offset;
#ifdef HAVE_DTRACE_H
_trap_offset = 0;
#endif // def HAVE_DTRACE_H
_stub_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->stubs()->start()); _stub_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->stubs()->start());
// Exception handler and deopt handler are in the stub section // Exception handler and deopt handler are in the stub section
@ -821,15 +804,6 @@ nmethod::nmethod(
_exception_cache = NULL; _exception_cache = NULL;
_pc_desc_cache.reset_to(scopes_pcs_begin()); _pc_desc_cache.reset_to(scopes_pcs_begin());
flags.clear();
flags.state = alive;
_markedForDeoptimization = 0;
_unload_reported = false; // jvmti state
_lock_count = 0;
_stack_traversal_mark = 0;
// Copy contents of ScopeDescRecorder to nmethod // Copy contents of ScopeDescRecorder to nmethod
code_buffer->copy_oops_to(this); code_buffer->copy_oops_to(this);
debug_info->copy_to(this); debug_info->copy_to(this);
@ -841,8 +815,6 @@ nmethod::nmethod(
CodeCache::commit(this); CodeCache::commit(this);
VTune::create_nmethod(this);
// Copy contents of ExceptionHandlerTable to nmethod // Copy contents of ExceptionHandlerTable to nmethod
handler_table->copy_to(this); handler_table->copy_to(this);
nul_chk_table->copy_to(this); nul_chk_table->copy_to(this);
@ -988,11 +960,6 @@ void nmethod::print_nmethod(bool printmethod) {
} }
void nmethod::set_version(int v) {
flags.version = v;
}
// Promote one word from an assembly-time handle to a live embedded oop. // Promote one word from an assembly-time handle to a live embedded oop.
inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) { inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
if (handle == NULL || if (handle == NULL ||
@ -1139,6 +1106,8 @@ void nmethod::cleanup_inline_caches() {
// This is a private interface with the sweeper. // This is a private interface with the sweeper.
void nmethod::mark_as_seen_on_stack() { void nmethod::mark_as_seen_on_stack() {
assert(is_not_entrant(), "must be a non-entrant method"); assert(is_not_entrant(), "must be a non-entrant method");
// Set the traversal mark to ensure that the sweeper does 2
// cleaning passes before moving to zombie.
set_stack_traversal_mark(NMethodSweeper::traversal_count()); set_stack_traversal_mark(NMethodSweeper::traversal_count());
} }
@ -1207,7 +1176,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
// for later on. // for later on.
CodeCache::set_needs_cache_clean(true); CodeCache::set_needs_cache_clean(true);
} }
flags.state = unloaded; _state = unloaded;
// Log the unloading. // Log the unloading.
log_state_change(); log_state_change();
@ -1233,21 +1202,21 @@ void nmethod::log_state_change() const {
if (LogCompilation) { if (LogCompilation) {
if (xtty != NULL) { if (xtty != NULL) {
ttyLocker ttyl; // keep the following output all in one block ttyLocker ttyl; // keep the following output all in one block
if (flags.state == unloaded) { if (_state == unloaded) {
xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'", xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
os::current_thread_id()); os::current_thread_id());
} else { } else {
xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s", xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
os::current_thread_id(), os::current_thread_id(),
(flags.state == zombie ? " zombie='1'" : "")); (_state == zombie ? " zombie='1'" : ""));
} }
log_identity(xtty); log_identity(xtty);
xtty->stamp(); xtty->stamp();
xtty->end_elem(); xtty->end_elem();
} }
} }
if (PrintCompilation && flags.state != unloaded) { if (PrintCompilation && _state != unloaded) {
print_on(tty, flags.state == zombie ? "made zombie " : "made not entrant "); print_on(tty, _state == zombie ? "made zombie " : "made not entrant ");
tty->cr(); tty->cr();
} }
} }
@ -1258,8 +1227,9 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
bool was_alive = false; bool was_alive = false;
// Make sure the nmethod is not flushed in case of a safepoint in code below. // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
nmethodLocker nml(this); nmethodLocker nml(this);
methodHandle the_method(method());
{ {
// If the method is already zombie there is nothing to do // If the method is already zombie there is nothing to do
@ -1279,7 +1249,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// Enter critical section. Does not block for safepoint. // Enter critical section. Does not block for safepoint.
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
if (flags.state == state) { if (_state == state) {
// another thread already performed this transition so nothing // another thread already performed this transition so nothing
// to do, but return false to indicate this. // to do, but return false to indicate this.
return false; return false;
@ -1290,17 +1260,37 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
if (!is_osr_method() && !is_not_entrant()) { if (!is_osr_method() && !is_not_entrant()) {
NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
SharedRuntime::get_handle_wrong_method_stub()); SharedRuntime::get_handle_wrong_method_stub());
assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
} }
was_alive = is_in_use(); // Read state under lock if (is_in_use()) {
// It's a true state change, so mark the method as decompiled.
// Do it only for transition from alive.
inc_decompile_count();
}
// Change state // Change state
flags.state = state; _state = state;
// Log the transition once // Log the transition once
log_state_change(); log_state_change();
// Remove nmethod from method.
// We need to check if both the _code and _from_compiled_code_entry_point
// refer to this nmethod because there is a race in setting these two fields
// in methodOop as seen in bugid 4947125.
// If the vep() points to the zombie nmethod, the memory for the nmethod
// could be flushed and the compiler and vtable stubs could still call
// through it.
if (method() != NULL && (method()->code() == this ||
method()->from_compiled_entry() == verified_entry_point())) {
HandleMark hm;
method()->clear_code();
}
if (state == not_entrant) {
mark_as_seen_on_stack();
}
} // leave critical region under Patching_lock } // leave critical region under Patching_lock
// When the nmethod becomes zombie it is no longer alive so the // When the nmethod becomes zombie it is no longer alive so the
@ -1308,18 +1298,17 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// state will be flushed later when the transition to zombie // state will be flushed later when the transition to zombie
// happens or they get unloaded. // happens or they get unloaded.
if (state == zombie) { if (state == zombie) {
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
// and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading).
post_compiled_method_unload();
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
flush_dependencies(NULL); flush_dependencies(NULL);
} else { } else {
assert(state == not_entrant, "other cases may need to be handled differently"); assert(state == not_entrant, "other cases may need to be handled differently");
} }
if (state == not_entrant) {
Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
} else {
Events::log("Make nmethod zombie " INTPTR_FORMAT, this);
}
if (TraceCreateZombies) { if (TraceCreateZombies) {
tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie"); tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
} }
@ -1327,47 +1316,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// Make sweeper aware that there is a zombie method that needs to be removed // Make sweeper aware that there is a zombie method that needs to be removed
NMethodSweeper::notify(this); NMethodSweeper::notify(this);
// not_entrant only stuff
if (state == not_entrant) {
mark_as_seen_on_stack();
}
if (was_alive) {
// It's a true state change, so mark the method as decompiled.
// Do it only for transition from alive.
inc_decompile_count();
}
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
// and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading).
if (state == zombie) {
post_compiled_method_unload();
}
// Zombie only stuff
if (state == zombie) {
VTune::delete_nmethod(this);
}
// Check whether method got unloaded at a safepoint before this,
// if so we can skip the flushing steps below
if (method() == NULL) return true;
// Remove nmethod from method.
// We need to check if both the _code and _from_compiled_code_entry_point
// refer to this nmethod because there is a race in setting these two fields
// in methodOop as seen in bugid 4947125.
// If the vep() points to the zombie nmethod, the memory for the nmethod
// could be flushed and the compiler and vtable stubs could still call
// through it.
if (method()->code() == this ||
method()->from_compiled_entry() == verified_entry_point()) {
HandleMark hm;
method()->clear_code();
}
return true; return true;
} }
@ -1488,11 +1436,25 @@ void nmethod::post_compiled_method_load_event() {
moop->signature()->utf8_length(), moop->signature()->utf8_length(),
code_begin(), code_size()); code_begin(), code_size());
if (JvmtiExport::should_post_compiled_method_load() ||
JvmtiExport::should_post_compiled_method_unload()) {
get_and_cache_jmethod_id();
}
if (JvmtiExport::should_post_compiled_method_load()) { if (JvmtiExport::should_post_compiled_method_load()) {
JvmtiExport::post_compiled_method_load(this); JvmtiExport::post_compiled_method_load(this);
} }
} }
jmethodID nmethod::get_and_cache_jmethod_id() {
if (_jmethod_id == NULL) {
// Cache the jmethod_id since it can no longer be looked up once the
// method itself has been marked for unloading.
_jmethod_id = method()->jmethod_id();
}
return _jmethod_id;
}
void nmethod::post_compiled_method_unload() { void nmethod::post_compiled_method_unload() {
if (unload_reported()) { if (unload_reported()) {
// During unloading we transition to unloaded and then to zombie // During unloading we transition to unloaded and then to zombie
@ -1504,12 +1466,17 @@ void nmethod::post_compiled_method_unload() {
DTRACE_METHOD_UNLOAD_PROBE(method()); DTRACE_METHOD_UNLOAD_PROBE(method());
// If a JVMTI agent has enabled the CompiledMethodUnload event then // If a JVMTI agent has enabled the CompiledMethodUnload event then
// post the event. Sometime later this nmethod will be made a zombie by // post the event. Sometime later this nmethod will be made a zombie
// the sweeper but the methodOop will not be valid at that point. // by the sweeper but the methodOop will not be valid at that point.
if (JvmtiExport::should_post_compiled_method_unload()) { // If the _jmethod_id is null then no load event was ever requested
// so don't bother posting the unload. The main reason for this is
// that the jmethodID is a weak reference to the methodOop so if
// it's being unloaded there's no way to look it up since the weak
// ref will have been cleared.
if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
assert(!unload_reported(), "already unloaded"); assert(!unload_reported(), "already unloaded");
HandleMark hm; HandleMark hm;
JvmtiExport::post_compiled_method_unload(method()->jmethod_id(), code_begin()); JvmtiExport::post_compiled_method_unload(_jmethod_id, code_begin());
} }
// The JVMTI CompiledMethodUnload event can be enabled or disabled at // The JVMTI CompiledMethodUnload event can be enabled or disabled at
@ -2087,7 +2054,6 @@ address nmethod::continuation_for_implicit_exception(address pc) {
void nmethod_init() { void nmethod_init() {
// make sure you didn't forget to adjust the filler fields // make sure you didn't forget to adjust the filler fields
assert(sizeof(nmFlags) <= 4, "nmFlags occupies more than a word");
assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word"); assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
} }
@ -2323,7 +2289,6 @@ void nmethod::print() const {
tty->print("((nmethod*) "INTPTR_FORMAT ") ", this); tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
tty->print(" for method " INTPTR_FORMAT , (address)method()); tty->print(" for method " INTPTR_FORMAT , (address)method());
tty->print(" { "); tty->print(" { ");
if (version()) tty->print("v%d ", version());
if (is_in_use()) tty->print("in_use "); if (is_in_use()) tty->print("in_use ");
if (is_not_entrant()) tty->print("not_entrant "); if (is_not_entrant()) tty->print("not_entrant ");
if (is_zombie()) tty->print("zombie "); if (is_zombie()) tty->print("zombie ");
@ -2659,13 +2624,10 @@ void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin,
case Bytecodes::_getstatic: case Bytecodes::_getstatic:
case Bytecodes::_putstatic: case Bytecodes::_putstatic:
{ {
methodHandle sdm = sd->method(); Bytecode_field* field = Bytecode_field_at(sd->method(), sd->bci());
Bytecode_field* field = Bytecode_field_at(sdm(), sdm->bcp_from(sd->bci()));
constantPoolOop sdmc = sdm->constants();
symbolOop name = sdmc->name_ref_at(field->index());
st->print(" "); st->print(" ");
if (name != NULL) if (field->name() != NULL)
name->print_symbol_on(st); field->name()->print_symbol_on(st);
else else
st->print("<UNKNOWN>"); st->print("<UNKNOWN>");
} }

View File

@ -78,29 +78,8 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
// nmethods (native methods) are the compiled code versions of Java methods. // nmethods (native methods) are the compiled code versions of Java methods.
//
struct nmFlags { // An nmethod contains:
friend class VMStructs;
unsigned int version:8; // version number (0 = first version)
unsigned int age:4; // age (in # of sweep steps)
unsigned int state:2; // {alive, zombie, unloaded)
unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
unsigned int markedForReclamation:1; // Used by NMethodSweeper
unsigned int has_unsafe_access:1; // May fault due to unsafe access.
unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
unsigned int speculatively_disconnected:1; // Marked for potential unload
void clear();
};
// A nmethod contains:
// - header (the nmethod structure) // - header (the nmethod structure)
// [Relocation] // [Relocation]
// - relocation information // - relocation information
@ -131,10 +110,9 @@ class nmethod : public CodeBlob {
friend class CodeCache; // non-perm oops friend class CodeCache; // non-perm oops
private: private:
// Shared fields for all nmethod's // Shared fields for all nmethod's
static int _zombie_instruction_size;
methodOop _method; methodOop _method;
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
jmethodID _jmethod_id; // Cache of method()->jmethod_id()
// To support simple linked-list chaining of nmethods: // To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from instanceKlass::osr_nmethods_head nmethod* _osr_link; // from instanceKlass::osr_nmethods_head
@ -146,6 +124,11 @@ class nmethod : public CodeBlob {
AbstractCompiler* _compiler; // The compiler which compiled this nmethod AbstractCompiler* _compiler; // The compiler which compiled this nmethod
// offsets for entry points
address _entry_point; // entry point with class check
address _verified_entry_point; // entry point without class check
address _osr_entry_point; // entry point for on stack replacement
// Offsets for different nmethod parts // Offsets for different nmethod parts
int _exception_offset; int _exception_offset;
// All deoptee's will resume execution at this location described by // All deoptee's will resume execution at this location described by
@ -174,23 +157,31 @@ class nmethod : public CodeBlob {
// pc during a deopt. // pc during a deopt.
int _orig_pc_offset; int _orig_pc_offset;
int _compile_id; // which compilation made this nmethod int _compile_id; // which compilation made this nmethod
int _comp_level; // compilation level int _comp_level; // compilation level
// offsets for entry points // protected by CodeCache_lock
address _entry_point; // entry point with class check bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
address _verified_entry_point; // entry point without class check bool _speculatively_disconnected; // Marked for potential unload
address _osr_entry_point; // entry point for on stack replacement
bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
bool _marked_for_deoptimization; // Used for stack deoptimization
// used by jvmti to track if an unload event has been posted for this nmethod.
bool _unload_reported;
// set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
// Protected by Patching_lock
unsigned char _state; // {alive, not_entrant, zombie, unloaded)
nmFlags flags; // various flags to keep track of nmethod state
bool _markedForDeoptimization; // Used for stack deoptimization
enum { alive = 0, enum { alive = 0,
not_entrant = 1, // uncommon trap has happened but activations may still exist not_entrant = 1, // uncommon trap has happened but activations may still exist
zombie = 2, zombie = 2,
unloaded = 3 }; unloaded = 3 };
// used by jvmti to track if an unload event has been posted for this nmethod.
bool _unload_reported;
jbyte _scavenge_root_state; jbyte _scavenge_root_state;
@ -269,15 +260,15 @@ class nmethod : public CodeBlob {
bool make_not_entrant_or_zombie(unsigned int state); bool make_not_entrant_or_zombie(unsigned int state);
void inc_decompile_count(); void inc_decompile_count();
// used to check that writes to nmFlags are done consistently.
static void check_safepoint() PRODUCT_RETURN;
// Used to manipulate the exception cache // Used to manipulate the exception cache
void add_exception_cache_entry(ExceptionCache* new_entry); void add_exception_cache_entry(ExceptionCache* new_entry);
ExceptionCache* exception_cache_entry_for_exception(Handle exception); ExceptionCache* exception_cache_entry_for_exception(Handle exception);
// Inform external interfaces that a compiled method has been unloaded // Inform external interfaces that a compiled method has been unloaded
inline void post_compiled_method_unload(); void post_compiled_method_unload();
// Initailize fields to their default values
void init_defaults();
public: public:
// create nmethod with entry_bci // create nmethod with entry_bci
@ -392,11 +383,11 @@ class nmethod : public CodeBlob {
address verified_entry_point() const { return _verified_entry_point; } // if klass is correct address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
// flag accessing and manipulation // flag accessing and manipulation
bool is_in_use() const { return flags.state == alive; } bool is_in_use() const { return _state == alive; }
bool is_alive() const { return flags.state == alive || flags.state == not_entrant; } bool is_alive() const { return _state == alive || _state == not_entrant; }
bool is_not_entrant() const { return flags.state == not_entrant; } bool is_not_entrant() const { return _state == not_entrant; }
bool is_zombie() const { return flags.state == zombie; } bool is_zombie() const { return _state == zombie; }
bool is_unloaded() const { return flags.state == unloaded; } bool is_unloaded() const { return _state == unloaded; }
// Make the nmethod non entrant. The nmethod will continue to be // Make the nmethod non entrant. The nmethod will continue to be
// alive. It is used when an uncommon trap happens. Returns true // alive. It is used when an uncommon trap happens. Returns true
@ -409,37 +400,33 @@ class nmethod : public CodeBlob {
bool unload_reported() { return _unload_reported; } bool unload_reported() { return _unload_reported; }
void set_unload_reported() { _unload_reported = true; } void set_unload_reported() { _unload_reported = true; }
bool is_marked_for_deoptimization() const { return _markedForDeoptimization; } bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
void mark_for_deoptimization() { _markedForDeoptimization = true; } void mark_for_deoptimization() { _marked_for_deoptimization = true; }
void make_unloaded(BoolObjectClosure* is_alive, oop cause); void make_unloaded(BoolObjectClosure* is_alive, oop cause);
bool has_dependencies() { return dependencies_size() != 0; } bool has_dependencies() { return dependencies_size() != 0; }
void flush_dependencies(BoolObjectClosure* is_alive); void flush_dependencies(BoolObjectClosure* is_alive);
bool has_flushed_dependencies() { return flags.hasFlushedDependencies; } bool has_flushed_dependencies() { return _has_flushed_dependencies; }
void set_has_flushed_dependencies() { void set_has_flushed_dependencies() {
assert(!has_flushed_dependencies(), "should only happen once"); assert(!has_flushed_dependencies(), "should only happen once");
flags.hasFlushedDependencies = 1; _has_flushed_dependencies = 1;
} }
bool is_marked_for_reclamation() const { return flags.markedForReclamation; } bool is_marked_for_reclamation() const { return _marked_for_reclamation; }
void mark_for_reclamation() { flags.markedForReclamation = 1; } void mark_for_reclamation() { _marked_for_reclamation = 1; }
void unmark_for_reclamation() { flags.markedForReclamation = 0; }
bool has_unsafe_access() const { return flags.has_unsafe_access; } bool has_unsafe_access() const { return _has_unsafe_access; }
void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; } void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; } bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; } void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; } bool is_speculatively_disconnected() const { return _speculatively_disconnected; }
void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; } void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
int comp_level() const { return _comp_level; } int comp_level() const { return _comp_level; }
int version() const { return flags.version; }
void set_version(int v);
// Support for oops in scopes and relocs: // Support for oops in scopes and relocs:
// Note: index 0 is reserved for null. // Note: index 0 is reserved for null.
oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); } oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }
@ -599,6 +586,7 @@ public:
// jvmti support: // jvmti support:
void post_compiled_method_load_event(); void post_compiled_method_load_event();
jmethodID get_and_cache_jmethod_id();
// verify operations // verify operations
void verify(); void verify();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -50,7 +50,6 @@ void* VtableStub::operator new(size_t size, int code_size) {
} }
_chunk = blob->instructions_begin(); _chunk = blob->instructions_begin();
_chunk_end = _chunk + bytes; _chunk_end = _chunk + bytes;
VTune::register_stub("vtable stub", _chunk, _chunk_end);
Forte::register_stub("vtable stub", _chunk, _chunk_end); Forte::register_stub("vtable stub", _chunk, _chunk_end);
// Notify JVMTI about this stub. The event will be recorded by the enclosing // Notify JVMTI about this stub. The event will be recorded by the enclosing
// JvmtiDynamicCodeEventCollector and posted when this thread has released // JvmtiDynamicCodeEventCollector and posted when this thread has released

View File

@ -664,19 +664,14 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
return; return;
} }
// XXX use a global constant instead of 64! typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
typedef struct OopTaskQueuePadded {
OopTaskQueue work_queue;
char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing
} OopTaskQueuePadded;
for (i = 0; i < num_queues; i++) { for (i = 0; i < num_queues; i++) {
OopTaskQueuePadded *q_padded = new OopTaskQueuePadded(); PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
if (q_padded == NULL) { if (q == NULL) {
warning("work_queue allocation failure."); warning("work_queue allocation failure.");
return; return;
} }
_task_queues->register_queue(i, &q_padded->work_queue); _task_queues->register_queue(i, q);
} }
for (i = 0; i < num_queues; i++) { for (i = 0; i < num_queues; i++) {
_task_queues->queue(i)->initialize(); _task_queues->queue(i)->initialize();

View File

@ -234,6 +234,11 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
if (_gc_cause != GCCause::_gc_locker && if (_gc_cause != GCCause::_gc_locker &&
gch->total_full_collections_completed() <= _full_gc_count_before) { gch->total_full_collections_completed() <= _full_gc_count_before) {
// maybe we should change the condition to test _gc_cause ==
// GCCause::_java_lang_system_gc, instead of
// _gc_cause != GCCause::_gc_locker
assert(_gc_cause == GCCause::_java_lang_system_gc,
"the only way to get here if this was a System.gc()-induced GC");
assert(ExplicitGCInvokesConcurrent, "Error"); assert(ExplicitGCInvokesConcurrent, "Error");
// Now, wait for witnessing concurrent gc cycle to complete, // Now, wait for witnessing concurrent gc cycle to complete,
// but do so in native mode, because we want to lock the // but do so in native mode, because we want to lock the

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -271,21 +271,16 @@ jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* def
if (cas_res == prev_epoch_entry) { if (cas_res == prev_epoch_entry) {
// We successfully updated the card num value in the epoch entry // We successfully updated the card num value in the epoch entry
count_ptr->_count = 0; // initialize counter for new card num count_ptr->_count = 0; // initialize counter for new card num
jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
// Even though the region containg the card at old_card_num was not // Even though the region containg the card at old_card_num was not
// in the young list when old_card_num was recorded in the epoch // in the young list when old_card_num was recorded in the epoch
// cache it could have been added to the free list and subsequently // cache it could have been added to the free list and subsequently
// added to the young list in the intervening time. If the evicted // added to the young list in the intervening time. See CR 6817995.
// card is in a young region just return the card_ptr and the evicted // We do not deal with this case here - it will be handled in
// card will not be cleaned. See CR 6817995. // HeapRegion::oops_on_card_seq_iterate_careful after it has been
// determined that the region containing the card has been allocated
jbyte* old_card_ptr = card_num_2_ptr(old_card_num); // to, and it's safe to check the young type of the region.
if (is_young_card(old_card_ptr)) {
*count = 0;
// We can defer the processing of card_ptr
*defer = true;
return card_ptr;
}
// We do not want to defer processing of card_ptr in this case // We do not want to defer processing of card_ptr in this case
// (we need to refine old_card_ptr and card_ptr) // (we need to refine old_card_ptr and card_ptr)
@ -301,22 +296,22 @@ jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
jbyte* cached_ptr = add_card_count(card_ptr, &count, defer); jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
assert(cached_ptr != NULL, "bad cached card ptr"); assert(cached_ptr != NULL, "bad cached card ptr");
if (is_young_card(cached_ptr)) { // We've just inserted a card pointer into the card count cache
// The region containing cached_ptr has been freed during a clean up // and got back the card that we just inserted or (evicted) the
// pause, reallocated, and tagged as young. // previous contents of that count slot.
assert(cached_ptr != card_ptr, "shouldn't be");
// We've just inserted a new old-gen card pointer into the card count // The card we got back could be in a young region. When the
// cache and evicted the previous contents of that count slot. // returned card (if evicted) was originally inserted, we had
// The evicted card pointer has been determined to be in a young region // determined that its containing region was not young. However
// and so cannot be the newly inserted card pointer (that will be // it is possible for the region to be freed during a cleanup
// in an old region). // pause, then reallocated and tagged as young which will result
// The count for newly inserted card will be set to zero during the // in the returned card residing in a young region.
// insertion, so we don't want to defer the cleaning of the newly //
// inserted card pointer. // We do not deal with this case here - the change from non-young
assert(*defer == false, "deferring non-hot card"); // to young could be observed at any time - it will be handled in
return NULL; // HeapRegion::oops_on_card_seq_iterate_careful after it has been
} // determined that the region containing the card has been allocated
// to.
// The card pointer we obtained from card count cache is not hot // The card pointer we obtained from card count cache is not hot
// so do not store it in the cache; return it for immediate // so do not store it in the cache; return it for immediate
@ -325,7 +320,7 @@ jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
return cached_ptr; return cached_ptr;
} }
// Otherwise, the pointer we got from the _card_counts is hot. // Otherwise, the pointer we got from the _card_counts cache is hot.
jbyte* res = NULL; jbyte* res = NULL;
MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
if (_n_hot == _hot_cache_size) { if (_n_hot == _hot_cache_size) {
@ -338,17 +333,8 @@ jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0; if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
_n_hot++; _n_hot++;
if (res != NULL) { // The card obtained from the hot card cache could be in a young
// Even though the region containg res was not in the young list // region. See above on how this can happen.
// when it was recorded in the hot cache it could have been added
// to the free list and subsequently added to the young list in
// the intervening time. If res is in a young region, return NULL
// so that res is not cleaned. See CR 6817995.
if (is_young_card(res)) {
res = NULL;
}
}
return res; return res;
} }

View File

@ -266,6 +266,12 @@ void ConcurrentMarkThread::run() {
_cm->clearNextBitmap(); _cm->clearNextBitmap();
_sts.leave(); _sts.leave();
} }
// Update the number of full collections that have been
// completed. This will also notify the FullGCCount_lock in case a
// Java thread is waiting for a full GC to happen (e.g., it
// called System.gc() with +ExplicitGCInvokesConcurrent).
g1->increment_full_collections_completed(true /* outer */);
} }
assert(_should_terminate, "just checking"); assert(_should_terminate, "just checking");

View File

@ -638,6 +638,11 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// Now retry the allocation. // Now retry the allocation.
if (_cur_alloc_region != NULL) { if (_cur_alloc_region != NULL) {
if (allocated_young_region != NULL) {
// We need to ensure that the store to top does not
// float above the setting of the young type.
OrderAccess::storestore();
}
res = _cur_alloc_region->allocate(word_size); res = _cur_alloc_region->allocate(word_size);
} }
} }
@ -809,7 +814,8 @@ public:
} }
}; };
void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, void G1CollectedHeap::do_collection(bool explicit_gc,
bool clear_all_soft_refs,
size_t word_size) { size_t word_size) {
if (GC_locker::check_active_before_gc()) { if (GC_locker::check_active_before_gc()) {
return; // GC is disabled (e.g. JNI GetXXXCritical operation) return; // GC is disabled (e.g. JNI GetXXXCritical operation)
@ -821,10 +827,6 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
Universe::print_heap_before_gc(); Universe::print_heap_before_gc();
} }
if (full && DisableExplicitGC) {
return;
}
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
@ -837,9 +839,11 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
IsGCActiveMark x; IsGCActiveMark x;
// Timing // Timing
bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
assert(!system_gc || explicit_gc, "invariant");
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t(full ? "Full GC (System.gc())" : "Full GC", TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
PrintGC, true, gclog_or_tty); PrintGC, true, gclog_or_tty);
TraceMemoryManagerStats tms(true /* fullGC */); TraceMemoryManagerStats tms(true /* fullGC */);
@ -944,7 +948,7 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
heap_region_iterate(&rs_clear); heap_region_iterate(&rs_clear);
// Resize the heap if necessary. // Resize the heap if necessary.
resize_if_necessary_after_full_collection(full ? 0 : word_size); resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
if (_cg1r->use_cache()) { if (_cg1r->use_cache()) {
_cg1r->clear_and_record_card_counts(); _cg1r->clear_and_record_card_counts();
@ -1009,13 +1013,18 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
"young list should be empty at this point"); "young list should be empty at this point");
} }
// Update the number of full collections that have been completed.
increment_full_collections_completed(false /* outer */);
if (PrintHeapAtGC) { if (PrintHeapAtGC) {
Universe::print_heap_after_gc(); Universe::print_heap_after_gc();
} }
} }
void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
do_collection(true, clear_all_soft_refs, 0); do_collection(true, /* explicit_gc */
clear_all_soft_refs,
0 /* word_size */);
} }
// This code is mostly copied from TenuredGeneration. // This code is mostly copied from TenuredGeneration.
@ -1331,6 +1340,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_young_list(new YoungList(this)), _young_list(new YoungList(this)),
_gc_time_stamp(0), _gc_time_stamp(0),
_surviving_young_words(NULL), _surviving_young_words(NULL),
_full_collections_completed(0),
_in_cset_fast_test(NULL), _in_cset_fast_test(NULL),
_in_cset_fast_test_base(NULL), _in_cset_fast_test_base(NULL),
_dirty_cards_region_list(NULL) { _dirty_cards_region_list(NULL) {
@ -1689,6 +1699,51 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
return car->free(); return car->free();
} }
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
return
((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
}
void G1CollectedHeap::increment_full_collections_completed(bool outer) {
MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
// We have already incremented _total_full_collections at the start
// of the GC, so total_full_collections() represents how many full
// collections have been started.
unsigned int full_collections_started = total_full_collections();
// Given that this method is called at the end of a Full GC or of a
// concurrent cycle, and those can be nested (i.e., a Full GC can
// interrupt a concurrent cycle), the number of full collections
// completed should be either one (in the case where there was no
// nesting) or two (when a Full GC interrupted a concurrent cycle)
// behind the number of full collections started.
// This is the case for the inner caller, i.e. a Full GC.
assert(outer ||
(full_collections_started == _full_collections_completed + 1) ||
(full_collections_started == _full_collections_completed + 2),
err_msg("for inner caller: full_collections_started = %u "
"is inconsistent with _full_collections_completed = %u",
full_collections_started, _full_collections_completed));
// This is the case for the outer caller, i.e. the concurrent cycle.
assert(!outer ||
(full_collections_started == _full_collections_completed + 1),
err_msg("for outer caller: full_collections_started = %u "
"is inconsistent with _full_collections_completed = %u",
full_collections_started, _full_collections_completed));
_full_collections_completed += 1;
// This notify_all() will ensure that a thread that called
// System.gc() with (with ExplicitGCInvokesConcurrent set or not)
// and it's waiting for a full GC to finish will be woken up. It is
// waiting in VM_G1IncCollectionPause::doit_epilogue().
FullGCCount_lock->notify_all();
}
void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
assert(Thread::current()->is_VM_thread(), "Precondition#1"); assert(Thread::current()->is_VM_thread(), "Precondition#1");
assert(Heap_lock->is_locked(), "Precondition#2"); assert(Heap_lock->is_locked(), "Precondition#2");
@ -1709,25 +1764,41 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
// The caller doesn't have the Heap_lock // The caller doesn't have the Heap_lock
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
int gc_count_before; unsigned int gc_count_before;
unsigned int full_gc_count_before;
{ {
MutexLocker ml(Heap_lock); MutexLocker ml(Heap_lock);
// Read the GC count while holding the Heap_lock // Read the GC count while holding the Heap_lock
gc_count_before = SharedHeap::heap()->total_collections(); gc_count_before = SharedHeap::heap()->total_collections();
full_gc_count_before = SharedHeap::heap()->total_full_collections();
// Don't want to do a GC until cleanup is completed. // Don't want to do a GC until cleanup is completed.
wait_for_cleanup_complete(); wait_for_cleanup_complete();
} // We give up heap lock; VMThread::execute gets it back below
switch (cause) { // We give up heap lock; VMThread::execute gets it back below
case GCCause::_scavenge_alot: { }
// Do an incremental pause, which might sometimes be abandoned.
VM_G1IncCollectionPause op(gc_count_before, cause); if (should_do_concurrent_full_gc(cause)) {
// Schedule an initial-mark evacuation pause that will start a
// concurrent cycle.
VM_G1IncCollectionPause op(gc_count_before,
true, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
cause);
VMThread::execute(&op);
} else {
if (cause == GCCause::_gc_locker
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
// Schedule a standard evacuation pause.
VM_G1IncCollectionPause op(gc_count_before,
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
cause);
VMThread::execute(&op); VMThread::execute(&op);
break; } else {
} // Schedule a Full GC.
default: { VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
// In all other cases, we currently do a full gc.
VM_G1CollectFull op(gc_count_before, cause);
VMThread::execute(&op); VMThread::execute(&op);
} }
} }
@ -1989,6 +2060,11 @@ void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
HeapRegionClosure *cl) { HeapRegionClosure *cl) {
if (r == NULL) {
// The CSet is empty so there's nothing to do.
return;
}
assert(r->in_collection_set(), assert(r->in_collection_set(),
"Start region must be a member of the collection set."); "Start region must be a member of the collection set.");
HeapRegion* cur = r; HeapRegion* cur = r;
@ -2481,11 +2557,13 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
} }
void G1CollectedHeap::do_collection_pause() { void G1CollectedHeap::do_collection_pause() {
assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock");
// Read the GC count while holding the Heap_lock // Read the GC count while holding the Heap_lock
// we need to do this _before_ wait_for_cleanup_complete(), to // we need to do this _before_ wait_for_cleanup_complete(), to
// ensure that we do not give up the heap lock and potentially // ensure that we do not give up the heap lock and potentially
// pick up the wrong count // pick up the wrong count
int gc_count_before = SharedHeap::heap()->total_collections(); unsigned int gc_count_before = SharedHeap::heap()->total_collections();
// Don't want to do a GC pause while cleanup is being completed! // Don't want to do a GC pause while cleanup is being completed!
wait_for_cleanup_complete(); wait_for_cleanup_complete();
@ -2493,7 +2571,10 @@ void G1CollectedHeap::do_collection_pause() {
g1_policy()->record_stop_world_start(); g1_policy()->record_stop_world_start();
{ {
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
VM_G1IncCollectionPause op(gc_count_before); VM_G1IncCollectionPause op(gc_count_before,
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
GCCause::_g1_inc_collection_pause);
VMThread::execute(&op); VMThread::execute(&op);
} }
} }
@ -2612,7 +2693,7 @@ struct PrepareForRSScanningClosure : public HeapRegionClosure {
}; };
void void
G1CollectedHeap::do_collection_pause_at_safepoint() { G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
if (GC_locker::check_active_before_gc()) { if (GC_locker::check_active_before_gc()) {
return; // GC is disabled (e.g. JNI GetXXXCritical operation) return; // GC is disabled (e.g. JNI GetXXXCritical operation)
} }
@ -2637,8 +2718,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
else else
strcat(verbose_str, "(partial)"); strcat(verbose_str, "(partial)");
} }
if (g1_policy()->during_initial_mark_pause()) if (g1_policy()->during_initial_mark_pause()) {
strcat(verbose_str, " (initial-mark)"); strcat(verbose_str, " (initial-mark)");
// We are about to start a marking cycle, so we increment the
// full collection counter.
increment_total_full_collections();
}
// if PrintGCDetails is on, we'll print long statistics information // if PrintGCDetails is on, we'll print long statistics information
// in the collector policy code, so let's not print this as the output // in the collector policy code, so let's not print this as the output
@ -2661,7 +2746,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
"young list should be well formed"); "young list should be well formed");
} }
bool abandoned = false;
{ // Call to jvmpi::post_class_unload_events must occur outside of active GC { // Call to jvmpi::post_class_unload_events must occur outside of active GC
IsGCActiveMark x; IsGCActiveMark x;
@ -2743,7 +2827,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
// Now choose the CS. We may abandon a pause if we find no // Now choose the CS. We may abandon a pause if we find no
// region that will fit in the MMU pause. // region that will fit in the MMU pause.
bool abandoned = g1_policy()->choose_collection_set(); bool abandoned = g1_policy()->choose_collection_set(target_pause_time_ms);
// Nothing to do if we were unable to choose a collection set. // Nothing to do if we were unable to choose a collection set.
if (!abandoned) { if (!abandoned) {
@ -3972,6 +4056,10 @@ public:
void work(int i) { void work(int i) {
if (i >= _n_workers) return; // no work needed this round if (i >= _n_workers) return; // no work needed this round
double start_time_ms = os::elapsedTime() * 1000.0;
_g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;
@ -4019,7 +4107,7 @@ public:
double elapsed_ms = (os::elapsedTime()-start)*1000.0; double elapsed_ms = (os::elapsedTime()-start)*1000.0;
double term_ms = pss.term_time()*1000.0; double term_ms = pss.term_time()*1000.0;
_g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
_g1h->g1_policy()->record_termination_time(i, term_ms); _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
} }
_g1h->g1_policy()->record_thread_age_table(pss.age_table()); _g1h->g1_policy()->record_thread_age_table(pss.age_table());
_g1h->update_surviving_young_words(pss.surviving_young_words()+1); _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
@ -4043,7 +4131,8 @@ public:
double term = pss.term_time(); double term = pss.term_time();
gclog_or_tty->print(" Elapsed: %7.2f ms.\n" gclog_or_tty->print(" Elapsed: %7.2f ms.\n"
" Strong roots: %7.2f ms (%6.2f%%)\n" " Strong roots: %7.2f ms (%6.2f%%)\n"
" Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", " Termination: %7.2f ms (%6.2f%%) "
"(in "SIZE_FORMAT" entries)\n",
elapsed * 1000.0, elapsed * 1000.0,
strong_roots * 1000.0, (strong_roots*100.0/elapsed), strong_roots * 1000.0, (strong_roots*100.0/elapsed),
term * 1000.0, (term*100.0/elapsed), term * 1000.0, (term*100.0/elapsed),
@ -4059,6 +4148,8 @@ public:
assert(pss.refs_to_scan() == 0, "Task queue should be empty"); assert(pss.refs_to_scan() == 0, "Task queue should be empty");
assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
double end_time_ms = os::elapsedTime() * 1000.0;
_g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
} }
}; };

View File

@ -277,6 +277,18 @@ private:
void update_surviving_young_words(size_t* surv_young_words); void update_surviving_young_words(size_t* surv_young_words);
void cleanup_surviving_young_words(); void cleanup_surviving_young_words();
// It decides whether an explicit GC should start a concurrent cycle
// instead of doing a STW GC. Currently, a concurrent cycle is
// explicitly started if:
// (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
// (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
bool should_do_concurrent_full_gc(GCCause::Cause cause);
// Keeps track of how many "full collections" (i.e., Full GCs or
// concurrent cycles) we have completed. The number of them we have
// started is maintained in _total_full_collections in CollectedHeap.
volatile unsigned int _full_collections_completed;
protected: protected:
// Returns "true" iff none of the gc alloc regions have any allocations // Returns "true" iff none of the gc alloc regions have any allocations
@ -356,13 +368,14 @@ protected:
// GC pause. // GC pause.
void retire_alloc_region(HeapRegion* alloc_region, bool par); void retire_alloc_region(HeapRegion* alloc_region, bool par);
// Helper function for two callbacks below. // - if explicit_gc is true, the GC is for a System.gc() or a heap
// "full", if true, indicates that the GC is for a System.gc() request, // inspection request and should collect the entire heap
// and should collect the entire heap. If "clear_all_soft_refs" is true, // - if clear_all_soft_refs is true, all soft references are cleared
// all soft references are cleared during the GC. If "full" is false, // during the GC
// "word_size" describes the allocation that the GC should // - if explicit_gc is false, word_size describes the allocation that
// attempt (at least) to satisfy. // the GC should attempt (at least) to satisfy
void do_collection(bool full, bool clear_all_soft_refs, void do_collection(bool explicit_gc,
bool clear_all_soft_refs,
size_t word_size); size_t word_size);
// Callback from VM_G1CollectFull operation. // Callback from VM_G1CollectFull operation.
@ -431,6 +444,26 @@ public:
_in_cset_fast_test_length * sizeof(bool)); _in_cset_fast_test_length * sizeof(bool));
} }
// This is called at the end of either a concurrent cycle or a Full
// GC to update the number of full collections completed. Those two
// can happen in a nested fashion, i.e., we start a concurrent
// cycle, a Full GC happens half-way through it which ends first,
// and then the cycle notices that a Full GC happened and ends
// too. The outer parameter is a boolean to help us do a bit tighter
// consistency checking in the method. If outer is false, the caller
// is the inner caller in the nesting (i.e., the Full GC). If outer
// is true, the caller is the outer caller in this nesting (i.e.,
// the concurrent cycle). Further nesting is not currently
// supported. The end of the this call also notifies the
// FullGCCount_lock in case a Java thread is waiting for a full GC
// to happen (e.g., it called System.gc() with
// +ExplicitGCInvokesConcurrent).
void increment_full_collections_completed(bool outer);
unsigned int full_collections_completed() {
return _full_collections_completed;
}
protected: protected:
// Shrink the garbage-first heap by at most the given size (in bytes!). // Shrink the garbage-first heap by at most the given size (in bytes!).
@ -444,7 +477,7 @@ protected:
// The guts of the incremental collection pause, executed by the vm // The guts of the incremental collection pause, executed by the vm
// thread. // thread.
virtual void do_collection_pause_at_safepoint(); virtual void do_collection_pause_at_safepoint(double target_pause_time_ms);
// Actually do the work of evacuating the collection set. // Actually do the work of evacuating the collection set.
virtual void evacuate_collection_set(); virtual void evacuate_collection_set();
@ -1549,7 +1582,7 @@ protected:
int _hash_seed; int _hash_seed;
int _queue_num; int _queue_num;
int _term_attempts; size_t _term_attempts;
#if G1_DETAILED_STATS #if G1_DETAILED_STATS
int _pushes, _pops, _steals, _steal_attempts; int _pushes, _pops, _steals, _steal_attempts;
int _overflow_pushes; int _overflow_pushes;
@ -1727,8 +1760,8 @@ public:
int* hash_seed() { return &_hash_seed; } int* hash_seed() { return &_hash_seed; }
int queue_num() { return _queue_num; } int queue_num() { return _queue_num; }
int term_attempts() { return _term_attempts; } size_t term_attempts() { return _term_attempts; }
void note_term_attempt() { _term_attempts++; } void note_term_attempt() { _term_attempts++; }
#if G1_DETAILED_STATS #if G1_DETAILED_STATS
int pushes() { return _pushes; } int pushes() { return _pushes; }

View File

@ -154,7 +154,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_known_garbage_bytes(0), _known_garbage_bytes(0),
_young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)), _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
_target_pause_time_ms(-1.0),
_recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)), _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
@ -231,20 +230,21 @@ G1CollectorPolicy::G1CollectorPolicy() :
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
_par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
_par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads]; _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
_par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads]; _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
_par_last_update_rs_start_times_ms = new double[_parallel_gc_threads];
_par_last_update_rs_times_ms = new double[_parallel_gc_threads]; _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
_par_last_update_rs_processed_buffers = new double[_parallel_gc_threads]; _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
_par_last_scan_rs_start_times_ms = new double[_parallel_gc_threads];
_par_last_scan_rs_times_ms = new double[_parallel_gc_threads]; _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
_par_last_scan_new_refs_times_ms = new double[_parallel_gc_threads]; _par_last_scan_new_refs_times_ms = new double[_parallel_gc_threads];
_par_last_obj_copy_times_ms = new double[_parallel_gc_threads]; _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
_par_last_termination_times_ms = new double[_parallel_gc_threads]; _par_last_termination_times_ms = new double[_parallel_gc_threads];
_par_last_termination_attempts = new double[_parallel_gc_threads];
_par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
// start conservatively // start conservatively
_expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis; _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
@ -274,10 +274,64 @@ G1CollectorPolicy::G1CollectorPolicy() :
// </NEW PREDICTION> // </NEW PREDICTION>
double time_slice = (double) GCPauseIntervalMillis / 1000.0; // Below, we might need to calculate the pause time target based on
// the pause interval. When we do so we are going to give G1 maximum
// flexibility and allow it to do pauses when it needs to. So, we'll
// arrange that the pause interval to be pause time target + 1 to
// ensure that a) the pause time target is maximized with respect to
// the pause interval and b) we maintain the invariant that pause
// time target < pause interval. If the user does not want this
// maximum flexibility, they will have to set the pause interval
// explicitly.
// First make sure that, if either parameter is set, its value is
// reasonable.
if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
if (MaxGCPauseMillis < 1) {
vm_exit_during_initialization("MaxGCPauseMillis should be "
"greater than 0");
}
}
if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
if (GCPauseIntervalMillis < 1) {
vm_exit_during_initialization("GCPauseIntervalMillis should be "
"greater than 0");
}
}
// Then, if the pause time target parameter was not set, set it to
// the default value.
if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
// The default pause time target in G1 is 200ms
FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
} else {
// We do not allow the pause interval to be set without the
// pause time target
vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
"without setting MaxGCPauseMillis");
}
}
// Then, if the interval parameter was not set, set it according to
// the pause time target (this will also deal with the case when the
// pause time target is the default value).
if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
}
// Finally, make sure that the two parameters are consistent.
if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
char buffer[256];
jio_snprintf(buffer, 256,
"MaxGCPauseMillis (%u) should be less than "
"GCPauseIntervalMillis (%u)",
MaxGCPauseMillis, GCPauseIntervalMillis);
vm_exit_during_initialization(buffer);
}
double max_gc_time = (double) MaxGCPauseMillis / 1000.0; double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
guarantee(max_gc_time < time_slice, double time_slice = (double) GCPauseIntervalMillis / 1000.0;
"Max GC time should not be greater than the time slice");
_mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
_sigma = (double) G1ConfidencePercent / 100.0; _sigma = (double) G1ConfidencePercent / 100.0;
@ -782,16 +836,17 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
// if they are not set properly // if they are not set properly
for (int i = 0; i < _parallel_gc_threads; ++i) { for (int i = 0; i < _parallel_gc_threads; ++i) {
_par_last_ext_root_scan_times_ms[i] = -666.0; _par_last_gc_worker_start_times_ms[i] = -1234.0;
_par_last_mark_stack_scan_times_ms[i] = -666.0; _par_last_ext_root_scan_times_ms[i] = -1234.0;
_par_last_update_rs_start_times_ms[i] = -666.0; _par_last_mark_stack_scan_times_ms[i] = -1234.0;
_par_last_update_rs_times_ms[i] = -666.0; _par_last_update_rs_times_ms[i] = -1234.0;
_par_last_update_rs_processed_buffers[i] = -666.0; _par_last_update_rs_processed_buffers[i] = -1234.0;
_par_last_scan_rs_start_times_ms[i] = -666.0; _par_last_scan_rs_times_ms[i] = -1234.0;
_par_last_scan_rs_times_ms[i] = -666.0; _par_last_scan_new_refs_times_ms[i] = -1234.0;
_par_last_scan_new_refs_times_ms[i] = -666.0; _par_last_obj_copy_times_ms[i] = -1234.0;
_par_last_obj_copy_times_ms[i] = -666.0; _par_last_termination_times_ms[i] = -1234.0;
_par_last_termination_times_ms[i] = -666.0; _par_last_termination_attempts[i] = -1234.0;
_par_last_gc_worker_end_times_ms[i] = -1234.0;
} }
#endif #endif
@ -942,9 +997,9 @@ T sum_of(T* sum_arr, int start, int n, int N) {
return sum; return sum;
} }
void G1CollectorPolicy::print_par_stats (int level, void G1CollectorPolicy::print_par_stats(int level,
const char* str, const char* str,
double* data, double* data,
bool summary) { bool summary) {
double min = data[0], max = data[0]; double min = data[0], max = data[0];
double total = 0.0; double total = 0.0;
@ -973,10 +1028,10 @@ void G1CollectorPolicy::print_par_stats (int level,
gclog_or_tty->print_cr("]"); gclog_or_tty->print_cr("]");
} }
void G1CollectorPolicy::print_par_buffers (int level, void G1CollectorPolicy::print_par_sizes(int level,
const char* str, const char* str,
double* data, double* data,
bool summary) { bool summary) {
double min = data[0], max = data[0]; double min = data[0], max = data[0];
double total = 0.0; double total = 0.0;
int j; int j;
@ -1321,15 +1376,22 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
} }
if (parallel) { if (parallel) {
print_stats(1, "Parallel Time", _cur_collection_par_time_ms); print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
print_par_stats(2, "Update RS (Start)", _par_last_update_rs_start_times_ms, false); print_par_stats(2, "GC Worker Start Time",
_par_last_gc_worker_start_times_ms, false);
print_par_stats(2, "Update RS", _par_last_update_rs_times_ms); print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
print_par_buffers(3, "Processed Buffers", print_par_sizes(3, "Processed Buffers",
_par_last_update_rs_processed_buffers, true); _par_last_update_rs_processed_buffers, true);
print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms); print_par_stats(2, "Ext Root Scanning",
print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms); _par_last_ext_root_scan_times_ms);
print_par_stats(2, "Mark Stack Scanning",
_par_last_mark_stack_scan_times_ms);
print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms); print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms); print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
print_par_stats(2, "Termination", _par_last_termination_times_ms); print_par_stats(2, "Termination", _par_last_termination_times_ms);
print_par_sizes(3, "Termination Attempts",
_par_last_termination_attempts, true);
print_par_stats(2, "GC Worker End Time",
_par_last_gc_worker_end_times_ms, false);
print_stats(2, "Other", parallel_other_time); print_stats(2, "Other", parallel_other_time);
print_stats(1, "Clear CT", _cur_clear_ct_time_ms); print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
} else { } else {
@ -1572,8 +1634,6 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
// </NEW PREDICTION> // </NEW PREDICTION>
_target_pause_time_ms = -1.0;
} }
// <NEW PREDICTION> // <NEW PREDICTION>
@ -2303,7 +2363,6 @@ G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
if (reached_target_length) { if (reached_target_length) {
assert( young_list_length > 0 && _g1->young_list()->length() > 0, assert( young_list_length > 0 && _g1->young_list()->length() > 0,
"invariant" ); "invariant" );
_target_pause_time_ms = max_pause_time_ms;
return true; return true;
} }
} else { } else {
@ -2335,6 +2394,17 @@ bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
} }
#endif #endif
bool
G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
if (!during_cycle) {
set_initiate_conc_mark_if_possible();
return true;
} else {
return false;
}
}
void void
G1CollectorPolicy::decide_on_conc_mark_initiation() { G1CollectorPolicy::decide_on_conc_mark_initiation() {
// We are about to decide on whether this pause will be an // We are about to decide on whether this pause will be an
@ -2801,7 +2871,8 @@ void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream
#endif // !PRODUCT #endif // !PRODUCT
bool bool
G1CollectorPolicy_BestRegionsFirst::choose_collection_set() { G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
double target_pause_time_ms) {
// Set this here - in case we're not doing young collections. // Set this here - in case we're not doing young collections.
double non_young_start_time_sec = os::elapsedTime(); double non_young_start_time_sec = os::elapsedTime();
@ -2814,26 +2885,19 @@ G1CollectorPolicy_BestRegionsFirst::choose_collection_set() {
start_recording_regions(); start_recording_regions();
guarantee(_target_pause_time_ms > -1.0 guarantee(target_pause_time_ms > 0.0,
NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot), err_msg("target_pause_time_ms = %1.6lf should be positive",
"_target_pause_time_ms should have been set!"); target_pause_time_ms));
#ifndef PRODUCT guarantee(_collection_set == NULL, "Precondition");
if (_target_pause_time_ms <= -1.0) {
assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
_target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
}
#endif
assert(_collection_set == NULL, "Precondition");
double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
double predicted_pause_time_ms = base_time_ms; double predicted_pause_time_ms = base_time_ms;
double target_time_ms = _target_pause_time_ms; double time_remaining_ms = target_pause_time_ms - base_time_ms;
double time_remaining_ms = target_time_ms - base_time_ms;
// the 10% and 50% values are arbitrary... // the 10% and 50% values are arbitrary...
if (time_remaining_ms < 0.10*target_time_ms) { if (time_remaining_ms < 0.10 * target_pause_time_ms) {
time_remaining_ms = 0.50 * target_time_ms; time_remaining_ms = 0.50 * target_pause_time_ms;
_within_target = false; _within_target = false;
} else { } else {
_within_target = true; _within_target = true;
@ -2996,7 +3060,18 @@ choose_collection_set_end:
_recorded_non_young_cset_choice_time_ms = _recorded_non_young_cset_choice_time_ms =
(non_young_end_time_sec - non_young_start_time_sec) * 1000.0; (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
return abandon_collection; // Here we are supposed to return whether the pause should be
// abandoned or not (i.e., whether the collection set is empty or
// not). However, this introduces a subtle issue when a pause is
// initiated explicitly with System.gc() and
// +ExplicitGCInvokesConcurrent (see Comment #2 in CR 6944166), it's
// supposed to start a marking cycle, and it's abandoned. So, by
// returning false here we are telling the caller never to consider
// a pause to be abandoned. We'll actually remove all the code
// associated with abandoned pauses as part of CR 6963209, but we are
// just disabling them this way for the moment to avoid increasing
// further the amount of changes for CR 6944166.
return false;
} }
void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() { void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {

View File

@ -171,16 +171,17 @@ protected:
double* _cur_aux_times_ms; double* _cur_aux_times_ms;
bool* _cur_aux_times_set; bool* _cur_aux_times_set;
double* _par_last_gc_worker_start_times_ms;
double* _par_last_ext_root_scan_times_ms; double* _par_last_ext_root_scan_times_ms;
double* _par_last_mark_stack_scan_times_ms; double* _par_last_mark_stack_scan_times_ms;
double* _par_last_update_rs_start_times_ms;
double* _par_last_update_rs_times_ms; double* _par_last_update_rs_times_ms;
double* _par_last_update_rs_processed_buffers; double* _par_last_update_rs_processed_buffers;
double* _par_last_scan_rs_start_times_ms;
double* _par_last_scan_rs_times_ms; double* _par_last_scan_rs_times_ms;
double* _par_last_scan_new_refs_times_ms; double* _par_last_scan_new_refs_times_ms;
double* _par_last_obj_copy_times_ms; double* _par_last_obj_copy_times_ms;
double* _par_last_termination_times_ms; double* _par_last_termination_times_ms;
double* _par_last_termination_attempts;
double* _par_last_gc_worker_end_times_ms;
// indicates that we are in young GC mode // indicates that we are in young GC mode
bool _in_young_gc_mode; bool _in_young_gc_mode;
@ -198,8 +199,6 @@ protected:
size_t _young_cset_length; size_t _young_cset_length;
bool _last_young_gc_full; bool _last_young_gc_full;
double _target_pause_time_ms;
unsigned _full_young_pause_num; unsigned _full_young_pause_num;
unsigned _partial_young_pause_num; unsigned _partial_young_pause_num;
@ -525,6 +524,10 @@ public:
return _mmu_tracker; return _mmu_tracker;
} }
double max_pause_time_ms() {
return _mmu_tracker->max_gc_time() * 1000.0;
}
double predict_init_time_ms() { double predict_init_time_ms() {
return get_new_prediction(_concurrent_mark_init_times_ms); return get_new_prediction(_concurrent_mark_init_times_ms);
} }
@ -559,13 +562,14 @@ public:
} }
protected: protected:
void print_stats (int level, const char* str, double value); void print_stats(int level, const char* str, double value);
void print_stats (int level, const char* str, int value); void print_stats(int level, const char* str, int value);
void print_par_stats (int level, const char* str, double* data) {
void print_par_stats(int level, const char* str, double* data) {
print_par_stats(level, str, data, true); print_par_stats(level, str, data, true);
} }
void print_par_stats (int level, const char* str, double* data, bool summary); void print_par_stats(int level, const char* str, double* data, bool summary);
void print_par_buffers (int level, const char* str, double* data, bool summary); void print_par_sizes(int level, const char* str, double* data, bool summary);
void check_other_times(int level, void check_other_times(int level,
NumberSeq* other_times_ms, NumberSeq* other_times_ms,
@ -891,6 +895,10 @@ public:
virtual void record_full_collection_start(); virtual void record_full_collection_start();
virtual void record_full_collection_end(); virtual void record_full_collection_end();
void record_gc_worker_start_time(int worker_i, double ms) {
_par_last_gc_worker_start_times_ms[worker_i] = ms;
}
void record_ext_root_scan_time(int worker_i, double ms) { void record_ext_root_scan_time(int worker_i, double ms) {
_par_last_ext_root_scan_times_ms[worker_i] = ms; _par_last_ext_root_scan_times_ms[worker_i] = ms;
} }
@ -912,10 +920,6 @@ public:
_all_mod_union_times_ms->add(ms); _all_mod_union_times_ms->add(ms);
} }
void record_update_rs_start_time(int thread, double ms) {
_par_last_update_rs_start_times_ms[thread] = ms;
}
void record_update_rs_time(int thread, double ms) { void record_update_rs_time(int thread, double ms) {
_par_last_update_rs_times_ms[thread] = ms; _par_last_update_rs_times_ms[thread] = ms;
} }
@ -925,10 +929,6 @@ public:
_par_last_update_rs_processed_buffers[thread] = processed_buffers; _par_last_update_rs_processed_buffers[thread] = processed_buffers;
} }
void record_scan_rs_start_time(int thread, double ms) {
_par_last_scan_rs_start_times_ms[thread] = ms;
}
void record_scan_rs_time(int thread, double ms) { void record_scan_rs_time(int thread, double ms) {
_par_last_scan_rs_times_ms[thread] = ms; _par_last_scan_rs_times_ms[thread] = ms;
} }
@ -953,16 +953,13 @@ public:
_par_last_obj_copy_times_ms[thread] += ms; _par_last_obj_copy_times_ms[thread] += ms;
} }
void record_obj_copy_time(double ms) { void record_termination(int thread, double ms, size_t attempts) {
record_obj_copy_time(0, ms);
}
void record_termination_time(int thread, double ms) {
_par_last_termination_times_ms[thread] = ms; _par_last_termination_times_ms[thread] = ms;
_par_last_termination_attempts[thread] = (double) attempts;
} }
void record_termination_time(double ms) { void record_gc_worker_end_time(int worker_i, double ms) {
record_termination_time(0, ms); _par_last_gc_worker_end_times_ms[worker_i] = ms;
} }
void record_pause_time_ms(double ms) { void record_pause_time_ms(double ms) {
@ -1013,7 +1010,7 @@ public:
// Choose a new collection set. Marks the chosen regions as being // Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of // "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods. // the collection set are available via access methods.
virtual bool choose_collection_set() = 0; virtual bool choose_collection_set(double target_pause_time_ms) = 0;
// The head of the list (via "next_in_collection_set()") representing the // The head of the list (via "next_in_collection_set()") representing the
// current collection set. // current collection set.
@ -1082,6 +1079,12 @@ public:
void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
// This sets the initiate_conc_mark_if_possible() flag to start a
// new cycle, as long as we are not already in one. It's best if it
// is called during a safepoint when the test whether a cycle is in
// progress or not is stable.
bool force_initial_mark_if_outside_cycle();
// This is called at the very beginning of an evacuation pause (it // This is called at the very beginning of an evacuation pause (it
// has to be the first thing that the pause does). If // has to be the first thing that the pause does). If
// initiate_conc_mark_if_possible() is true, and the concurrent // initiate_conc_mark_if_possible() is true, and the concurrent
@ -1264,7 +1267,7 @@ class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
// If the estimated is less then desirable, resize if possible. // If the estimated is less then desirable, resize if possible.
void expand_if_possible(size_t numRegions); void expand_if_possible(size_t numRegions);
virtual bool choose_collection_set(); virtual bool choose_collection_set(double target_pause_time_ms);
virtual void record_collection_pause_start(double start_time_sec, virtual void record_collection_pause_start(double start_time_sec,
size_t start_used); size_t start_used);
virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes, virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -303,7 +303,6 @@ void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
assert( _cards_scanned != NULL, "invariant" ); assert( _cards_scanned != NULL, "invariant" );
_cards_scanned[worker_i] = scanRScl.cards_done(); _cards_scanned[worker_i] = scanRScl.cards_done();
_g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
_g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
} }
@ -311,8 +310,6 @@ void HRInto_G1RemSet::updateRS(int worker_i) {
ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
double start = os::elapsedTime(); double start = os::elapsedTime();
_g1p->record_update_rs_start_time(worker_i, start * 1000.0);
// Apply the appropriate closure to all remaining log entries. // Apply the appropriate closure to all remaining log entries.
_g1->iterate_dirty_card_closure(false, worker_i); _g1->iterate_dirty_card_closure(false, worker_i);
// Now there should be no dirty cards. // Now there should be no dirty cards.
@ -471,7 +468,6 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
updateRS(worker_i); updateRS(worker_i);
scanNewRefsRS(oc, worker_i); scanNewRefsRS(oc, worker_i);
} else { } else {
_g1p->record_update_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
_g1p->record_update_rs_processed_buffers(worker_i, 0.0); _g1p->record_update_rs_processed_buffers(worker_i, 0.0);
_g1p->record_update_rs_time(worker_i, 0.0); _g1p->record_update_rs_time(worker_i, 0.0);
_g1p->record_scan_new_refs_time(worker_i, 0.0); _g1p->record_scan_new_refs_time(worker_i, 0.0);
@ -479,7 +475,6 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
if (G1UseParallelRSetScanning || (worker_i == 0)) { if (G1UseParallelRSetScanning || (worker_i == 0)) {
scanRS(oc, worker_i); scanRS(oc, worker_i);
} else { } else {
_g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
_g1p->record_scan_rs_time(worker_i, 0.0); _g1p->record_scan_rs_time(worker_i, 0.0);
} }
} else { } else {
@ -681,9 +676,27 @@ void HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i
// We must complete this write before we do any of the reads below. // We must complete this write before we do any of the reads below.
OrderAccess::storeload(); OrderAccess::storeload();
// And process it, being careful of unallocated portions of TLAB's. // And process it, being careful of unallocated portions of TLAB's.
// The region for the current card may be a young region. The
// current card may have been a card that was evicted from the
// card cache. When the card was inserted into the cache, we had
// determined that its region was non-young. While in the cache,
// the region may have been freed during a cleanup pause, reallocated
// and tagged as young.
//
// We wish to filter out cards for such a region but the current
// thread, if we're running conucrrently, may "see" the young type
// change at any time (so an earlier "is_young" check may pass or
// fail arbitrarily). We tell the iteration code to perform this
// filtering when it has been determined that there has been an actual
// allocation in this region and making it safe to check the young type.
bool filter_young = true;
HeapWord* stop_point = HeapWord* stop_point =
r->oops_on_card_seq_iterate_careful(dirtyRegion, r->oops_on_card_seq_iterate_careful(dirtyRegion,
&filter_then_update_rs_oop_cl); &filter_then_update_rs_oop_cl,
filter_young);
// If stop_point is non-null, then we encountered an unallocated region // If stop_point is non-null, then we encountered an unallocated region
// (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
// card and re-enqueue: if we put off the card until a GC pause, then the // card and re-enqueue: if we put off the card until a GC pause, then the
@ -794,8 +807,14 @@ void HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {
if (r == NULL) { if (r == NULL) {
assert(_g1->is_in_permanent(start), "Or else where?"); assert(_g1->is_in_permanent(start), "Or else where?");
} else { } else {
guarantee(!r->is_young(), "It was evicted in the current minor cycle."); // Checking whether the region we got back from the cache
// Process card pointer we get back from the hot card cache // is young here is inappropriate. The region could have been
// freed, reallocated and tagged as young while in the cache.
// Hence we could see its young type change at any time.
//
// Process card pointer we get back from the hot card cache. This
// will check whether the region containing the card is young
// _after_ checking that the region has been allocated from.
concurrentRefineOneCard_impl(res, worker_i); concurrentRefineOneCard_impl(res, worker_i);
} }
} }

View File

@ -658,7 +658,8 @@ HeapRegion::object_iterate_mem_careful(MemRegion mr,
HeapWord* HeapWord*
HeapRegion:: HeapRegion::
oops_on_card_seq_iterate_careful(MemRegion mr, oops_on_card_seq_iterate_careful(MemRegion mr,
FilterOutOfRegionClosure* cl) { FilterOutOfRegionClosure* cl,
bool filter_young) {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
// If we're within a stop-world GC, then we might look at a card in a // If we're within a stop-world GC, then we might look at a card in a
@ -672,6 +673,16 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
if (mr.is_empty()) return NULL; if (mr.is_empty()) return NULL;
// Otherwise, find the obj that extends onto mr.start(). // Otherwise, find the obj that extends onto mr.start().
// The intersection of the incoming mr (for the card) and the
// allocated part of the region is non-empty. This implies that
// we have actually allocated into this region. The code in
// G1CollectedHeap.cpp that allocates a new region sets the
// is_young tag on the region before allocating. Thus we
// safely know if this region is young.
if (is_young() && filter_young) {
return NULL;
}
// We used to use "block_start_careful" here. But we're actually happy // We used to use "block_start_careful" here. But we're actually happy
// to update the BOT while we do this... // to update the BOT while we do this...
HeapWord* cur = block_start(mr.start()); HeapWord* cur = block_start(mr.start());

View File

@ -252,7 +252,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// survivor // survivor
}; };
YoungType _young_type; volatile YoungType _young_type;
int _young_index_in_cset; int _young_index_in_cset;
SurvRateGroup* _surv_rate_group; SurvRateGroup* _surv_rate_group;
int _age_index; int _age_index;
@ -726,9 +726,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
HeapWord* HeapWord*
object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl); object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
// In this version - if filter_young is true and the region
// is a young region then we skip the iteration.
HeapWord* HeapWord*
oops_on_card_seq_iterate_careful(MemRegion mr, oops_on_card_seq_iterate_careful(MemRegion mr,
FilterOutOfRegionClosure* cl); FilterOutOfRegionClosure* cl,
bool filter_young);
// The region "mr" is entirely in "this", and starts and ends at block // The region "mr" is entirely in "this", and starts and ends at block
// boundaries. The caller declares that all the contained blocks are // boundaries. The caller declares that all the contained blocks are

View File

@ -42,8 +42,65 @@ void VM_G1CollectFull::doit() {
void VM_G1IncCollectionPause::doit() { void VM_G1IncCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm; JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
"only a GC locker or a System.gc() induced GC should start a cycle");
GCCauseSetter x(g1h, _gc_cause); GCCauseSetter x(g1h, _gc_cause);
g1h->do_collection_pause_at_safepoint(); if (_should_initiate_conc_mark) {
// It's safer to read full_collections_completed() here, given
// that noone else will be updating it concurrently. Since we'll
// only need it if we're initiating a marking cycle, no point in
// setting it earlier.
_full_collections_completed_before = g1h->full_collections_completed();
// At this point we are supposed to start a concurrent cycle. We
// will do so if one is not already in progress.
bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
}
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
}
void VM_G1IncCollectionPause::doit_epilogue() {
VM_GC_Operation::doit_epilogue();
// If the pause was initiated by a System.gc() and
// +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
// that just started (or maybe one that was already in progress) to
// finish.
if (_gc_cause == GCCause::_java_lang_system_gc &&
_should_initiate_conc_mark) {
assert(ExplicitGCInvokesConcurrent,
"the only way to be here is if ExplicitGCInvokesConcurrent is set");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// In the doit() method we saved g1h->full_collections_completed()
// in the _full_collections_completed_before field. We have to
// wait until we observe that g1h->full_collections_completed()
// has increased by at least one. This can happen if a) we started
// a cycle and it completes, b) a cycle already in progress
// completes, or c) a Full GC happens.
// If the condition has already been reached, there's no point in
// actually taking the lock and doing the wait.
if (g1h->full_collections_completed() <=
_full_collections_completed_before) {
// The following is largely copied from CMS
Thread* thr = Thread::current();
assert(thr->is_Java_thread(), "invariant");
JavaThread* jt = (JavaThread*)thr;
ThreadToNativeFromVM native(jt);
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
while (g1h->full_collections_completed() <=
_full_collections_completed_before) {
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
}
}
}
} }
void VM_CGC_Operation::doit() { void VM_CGC_Operation::doit() {

View File

@ -31,13 +31,12 @@
// - VM_G1PopRegionCollectionPause // - VM_G1PopRegionCollectionPause
class VM_G1CollectFull: public VM_GC_Operation { class VM_G1CollectFull: public VM_GC_Operation {
private:
public: public:
VM_G1CollectFull(int gc_count_before, VM_G1CollectFull(unsigned int gc_count_before,
GCCause::Cause gc_cause) unsigned int full_gc_count_before,
: VM_GC_Operation(gc_count_before) GCCause::Cause cause)
{ : VM_GC_Operation(gc_count_before, full_gc_count_before) {
_gc_cause = gc_cause; _gc_cause = cause;
} }
~VM_G1CollectFull() {} ~VM_G1CollectFull() {}
virtual VMOp_Type type() const { return VMOp_G1CollectFull; } virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
@ -67,12 +66,28 @@ class VM_G1CollectForAllocation: public VM_GC_Operation {
}; };
class VM_G1IncCollectionPause: public VM_GC_Operation { class VM_G1IncCollectionPause: public VM_GC_Operation {
public: private:
VM_G1IncCollectionPause(int gc_count_before, bool _should_initiate_conc_mark;
GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) : double _target_pause_time_ms;
VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; } unsigned int _full_collections_completed_before;
public:
VM_G1IncCollectionPause(unsigned int gc_count_before,
bool should_initiate_conc_mark,
double target_pause_time_ms,
GCCause::Cause cause)
: VM_GC_Operation(gc_count_before),
_full_collections_completed_before(0),
_should_initiate_conc_mark(should_initiate_conc_mark),
_target_pause_time_ms(target_pause_time_ms) {
guarantee(target_pause_time_ms > 0.0,
err_msg("target_pause_time_ms = %1.6lf should be positive",
target_pause_time_ms));
_gc_cause = cause;
}
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; } virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
virtual void doit(); virtual void doit();
virtual void doit_epilogue();
virtual const char* name() const { virtual const char* name() const {
return "garbage-first incremental collection pause"; return "garbage-first incremental collection pause";
} }

View File

@ -367,4 +367,6 @@ vm_operations_g1.hpp vmGCOperations.hpp
vm_operations_g1.cpp vm_operations_g1.hpp vm_operations_g1.cpp vm_operations_g1.hpp
vm_operations_g1.cpp g1CollectedHeap.inline.hpp vm_operations_g1.cpp g1CollectedHeap.inline.hpp
vm_operations_g1.cpp g1CollectorPolicy.hpp
vm_operations_g1.cpp interfaceSupport.hpp
vm_operations_g1.cpp isGCActiveMark.hpp vm_operations_g1.cpp isGCActiveMark.hpp

Some files were not shown because too many files have changed in this diff Show More