This commit is contained in:
J. Duke 2017-07-05 17:38:16 +02:00
commit 69b987746b
120 changed files with 2837 additions and 1626 deletions

View File

@ -108,3 +108,4 @@ cc58c11af15411042719e9c82707fdbef60a9e0f jdk7-b130
5d86d951426aaf340b1ba84ae2d5ab5da65a71e2 jdk7-b131
0f62a65fb666b337caa585015ab6ea2e60e709ca jdk7-b132
c6f380693342feadccc5fe2c5adf500e861361aa jdk7-b133
ddc2fcb3682ffd27f44354db666128827be7e3c3 jdk7-b134

9
README
View File

@ -29,13 +29,14 @@ Simple Build Instructions:
Set the environment variable ALT_BOOTDIR to the location of JDK 6.
2. Check the sanity of doing a build with your current system:
gnumake sanity
make sanity
See README-builds.html if you run into problems.
3. Do a complete build of the OpenJDK:
gnumake all
make all
The resulting JDK image should be found in build/*/j2sdk-image
where gnumake is GNU make 3.81 or newer, /usr/bin/make on Linux and
/usr/sfw/bin/gmake or /opt/sfw/bin/gmake on Solaris.
where make is GNU make 3.81 or newer, /usr/bin/make on Linux usually
is 3.81 or newer.
Complete details are available in README-builds.html.

View File

@ -54,7 +54,11 @@
<li><a href="#opensolaris">OpenSolaris</a></li>
</ul>
</li>
<li><a href="#directories">Source Directory Structure</a> </li>
<li><a href="#directories">Source Directory Structure</a>
<ul>
<li><a href="#drops">Managing the Source Drops</a></li>
</ul>
</li>
<li><a href="#building">Build Information</a>
<ul>
<li><a href="#gmake">GNU Make (<tt><i>gmake</i></tt>)</a> </li>
@ -65,7 +69,7 @@
<ul>
<li><a href="#bootjdk">Bootstrap JDK</a> </li>
<li><a href="#importjdk">Optional Import JDK</a> </li>
<li><a href="#ant">Ant</a> </li>
<li><a href="#ant">Ant 1.7.1</a> </li>
<li><a href="#cacerts">Certificate Authority File (cacert)</a> </li>
<li><a href="#compilers">Compilers</a>
<ul>
@ -114,13 +118,13 @@
<a href="http://mercurial.selenic.com/wiki/Mercurial">Mercurial</a>.
If you are new to Mercurial, please see the
<a href="http://mercurial.selenic.com/wiki/BeginnersGuides">Beginner Guides</a>
or refer to the <a href=""http://hgbook.red-bean.com/">Mercurial Book</a>.
or refer to the <a href="http://hgbook.red-bean.com/">Mercurial Book</a>.
The first few chapters of the book provide an excellent overview of
Mercurial, what it is and how it works.
<br>
For using Mercurial with the OpenJDK refer to the
<a href=""http://openjdk.java.net/guide/repositories.html#installConfig">
Developer Guide: Installing and Configuring Mercurial</a>
<a href="http://openjdk.java.net/guide/repositories.html#installConfig">
Developer Guide: Installing and Configuring Mercurial</a>
section for more information.
The Forest Extension is not part of the Mercurial install,
and is optional,
@ -146,14 +150,14 @@
using the Forest Extension:
<blockquote>
<tt>
hg fclone http://openjdk.java.net/jdk7/jdk7 <i>YourOpenJDK</i>
hg fclone http://hg.openjdk.java.net/jdk7/jdk7 <i>YourOpenJDK</i>
</tt>
</blockquote>
To get the entire set of OpenJDK Mercurial repositories
without using the Forest Extension:
<blockquote>
<tt>
hg clone http://openjdk.java.net/jdk7/jdk7 <i>YourOpenJDK</i>
hg clone http://hg.openjdk.java.net/jdk7/jdk7 <i>YourOpenJDK</i>
<br>cd <i>YourOpenJDK</i>
<br>sh ./get_source.sh
</tt>
@ -207,66 +211,50 @@
<tr>
<td>Linux X86 (32-bit)</td>
<td>Fedora 9</td>
<td>gcc 4 </td>
<td>JDK 6u14 FCS </td>
<td>gcc 4.3 </td>
<td>JDK 6u18</td>
</tr>
<tr>
<td>Linux X64 (64-bit)</td>
<td>Fedora 9</td>
<td>gcc 4 </td>
<td>JDK 6u14 FCS </td>
<td>gcc 4.3 </td>
<td>JDK 6u18</td>
</tr>
<tr>
<td>Solaris SPARC (32-bit)</td>
<td>Solaris 10u2 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
SunSolve</a> for patch downloads.
</td>
<td>Solaris 10 Update 6</td>
<td>Sun Studio 12 Update 1 + patches</td>
<td>JDK 6u14 FCS </td>
<td>JDK 6u18</td>
</tr>
<tr>
<td>Solaris SPARCV9 (64-bit)</td>
<td>Solaris 10u2 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
SunSolve</a> for patch downloads.
</td>
<td>Solaris 10 Update 6</td>
<td>Sun Studio 12 Update 1 + patches</td>
<td>JDK 6u14 FCS </td>
<td>JDK 6u18</td>
</tr>
<tr>
<td>Solaris X86 (32-bit)</td>
<td>Solaris 10u2 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
SunSolve</a> for patch downloads.
</td>
<td>Solaris 10 Update 6</td>
<td>Sun Studio 12 Update 1 + patches</td>
<td>JDK 6u14 FCS </td>
<td>JDK 6u18</td>
</tr>
<tr>
<td>Solaris X64 (64-bit)</td>
<td>Solaris 10u2 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
SunSolve</a> for patch downloads.
</td>
<td>Solaris 10 Update 6</td>
<td>Sun Studio 12 Update 1 + patches</td>
<td>JDK 6u14 FCS </td>
<td>JDK 6u18</td>
</tr>
<tr>
<td>Windows X86 (32-bit)</td>
<td>Windows XP</td>
<td>Microsoft Visual Studio C++ 2010 Professional Edition</td>
<td>JDK 6u14 FCS </td>
<td>JDK 6u18</td>
</tr>
<tr>
<td>Windows X64 (64-bit)</td>
<td>Windows Server 2003 - Enterprise x64 Edition</td>
<td>Microsoft Visual Studio C++ 2010 Professional Edition</td>
<td>JDK 6u14 FCS </td>
<td>JDK 6u18</td>
</tr>
</tbody>
</table>
@ -300,7 +288,7 @@
way to do it is to execute the following commands as user
<tt>root</tt>:
<p/>
<code>yum-builddep java-openjdk</code>
<code>yum-builddep java-1.6.0-openjdk</code>
<p/>
<code>yum install gcc gcc-c++</code>
<p/>
@ -345,10 +333,10 @@
</blockquote>
</blockquote>
<!-- ------------------------------------------------------ -->
<h3><a name="centos">CentOS 5.2</a></h3>
<h3><a name="centos">CentOS 5.5</a></h3>
<blockquote>
After installing
<a href="http://www.centos.org/">CentOS 5.2</a>
<a href="http://www.centos.org/">CentOS 5.5</a>
you need to make sure you have
the following Development bundles installed:
<blockquote>
@ -356,7 +344,7 @@
<li>Development Libraries</li>
<li>Development Tools</li>
<li>Java Development</li>
<li>X Software Development</li>
<li>X Software Development (Including XFree86-devel)</li>
</ul>
</blockquote>
<p>
@ -552,15 +540,105 @@
building the OpenJDK runtime libraries and misc files.
The top level <tt>Makefile</tt>
is used to build the entire OpenJDK.
<h3><a name="drops">Managing the Source Drops</a></h3>
<blockquote>
<p>
The repositories <tt>jaxp</tt> and <tt>jaxws</tt> actually
do not contain the sources for JAXP or JAX-WS.
These products have their own open source procedures at their
<a href="http://jaxp.java.net/">JAXP</a> and
<a href="http://jax-ws.java.net/">JAX-WS</a> home pages.
The OpenJDK project does need access to these sources to build
a complete JDK image because JAXP and JAX-WS are part of the JDK.
The current process for delivery of the JAXP and JAX-WS sources
involves so called "source drop bundles" downloaded from a public
website.
There are many reasons for this current mechanism, and it is
understood that this is not ideal for the open source community.
It is possible this process could change in the future.
<br>
<b>NOTE:</b> The <a href="http://download.java.net/openjdk/jdk7/">
Complete OpenJDK Source Bundles</a> <u>will</u> contain the JAXP and
JAX-WS sources.
</p>
<h4><a name="dropcreation">Creation of New Source Drop Bundles</a></h4>
<blockquote>
<ol>
<li>
The JAXP or JAX-WS team prepares a new zip bundle,
places a copy in a public download area on java.net,
sends us a link and a list of CRs (Change Request Numbers).
The older download bundles should not be deleted.
It is the responsibility of the JAXP and JAX-WS team to
place the proper GPL legal notices on the sources
and do any filtering or java re-packaging for the
OpenJDK instances of these classes.
</li>
<li>
The OpenJDK team copies this new bundle into shared
area (e.g. <tt>/java/devtools/share/jdk7-drops</tt>).
Older bundles are never deleted so we retain the history.
</li>
<li>
The OpenJDK team edits the ant property file
<tt>jaxp/jaxp.properties</tt> or
<tt>jaxws/jaxws.properties</tt> to update the
base URL, the zip bundle name, and the MD5 checksum
of the zip bundle
(on Solaris: <tt>sum -c md5 <i>bundlename</i></tt>)
</li>
<li>
OpenJDK team reviews and commits those changes with the
given CRs.
</li>
</ol>
</blockquote>
<h4><a name="dropusage">Using Source Drop Bundles</a></h4>
<blockquote>
<p>
The ant scripts that build <tt>jaxp</tt> and <tt>jaxws</tt>
will attempt to locate these zip bundles from the directory
in the environment variable
<tt><a href="#ALT_DROPS_DIR">ALT_DROPS_DIR</a></tt>.
The checksums protect from getting the wrong, corrupted, or
improperly modified sources.
Once the sources are made available, the population will not
happen again unless a <tt>make clobber</tt> is requested
or the <tt>jaxp/drop/</tt> or <tt>jaxws/drop/</tt>
directory is explicitly deleted.
<br>
<b>NOTE:</b> The default Makefile and ant script behavior
is to NOT download these bundles from the public http site.
In general, doing downloads
during the build process is not advised, it creates too much
unpredictability in the build process.
However, you can use <tt>make ALLOW_DOWNLOADS=true</tt> to
tell the ant script that the download of the zip bundle is
acceptable.
</p>
<p>
The recommended procedure for keeping a cache of these
source bundles would be to download them once, place them
in a directory outside the repositories, and then set
<tt><a href="#ALT_DROPS_DIR">ALT_DROPS_DIR</a></tt> to refer
to that directory.
These drop bundles do change occasionally, so the newer
bundles may need to be added to this area from time to time.
</p>
</blockquote>
</blockquote>
</blockquote>
<!-- ------------------------------------------------------ -->
<hr>
<h2><a name="building">Build Information</a></h2>
<blockquote>
Building the OpenJDK
is done with a <tt><i>gmake</i></tt>
command line and various
environment or make variable settings that direct the make rules
is done with a <a href="#gmake">GNU <tt>make</tt></a> command line
and various
environment or make variable settings that direct the makefile rules
to where various components have been installed.
Where possible the makefiles will attempt to located the various
components in the default locations or any component specific
@ -578,7 +656,7 @@
<pre><tt>
bash
. jdk/make/jdk_generic_profile.sh
<i>gmake</i> sanity &amp;&amp; <i>gmake</i>
<a href="#gmake"><tt>make</tt></a> sanity &amp;&amp; <a href="#gmake"><tt>make</tt></a>
</tt></pre>
</blockquote>
<p>
@ -599,25 +677,31 @@
A few notes about using GNU make:
<ul>
<li>
In general, you need GNU make version 3.81 or newer.
You need GNU make version 3.81 or newer.
</li>
<li>
Place the location of the GNU make binary in the <tt>PATH</tt>.
</li>
<li>
<strong>Linux:</strong>
The <tt>/usr/bin/make</tt> command should work fine for you.
The <tt>/usr/bin/make</tt> should be 3.81 or newer
and should work fine for you.
If this version is not 3.81 or newer,
see the <a href="#buildgmake">"Building GNU make"</a> section.
</li>
<li>
<strong>Solaris:</strong>
Do NOT use <tt>/usr/bin/make</tt> on Solaris.
If your Solaris system has the software
from the Solaris Companion CD installed,
you should use <tt>gmake</tt>
you should try and use <tt>gmake</tt>
which will be located in either the <tt>/opt/sfw/bin</tt> or
<tt>/usr/sfw/bin</tt> directory.
In more recent versions of Solaris GNU make can be found
at <tt>/usr/bin/gmake</tt>.
In more recent versions of Solaris GNU make might be found
at <tt>/usr/bin/gmake</tt>.<br>
<b>NOTE:</b> It is very likely that this <tt>gmake</tt>
could be 3.80, you need 3.81, in which case,
see the <a href="#buildgmake">"Building GNU make"</a> section.
</li>
<li>
<strong>Windows:</strong>
@ -627,30 +711,25 @@
as a <tt>make.exe</tt> built for something like
<a href="http://www.mkssoftware.com/">MKS</a>).
<br>
<b>WARNING:</b> Watch out for make version 3.81, it may
<b>WARNING:</b> Watch out on some make 3.81 versions, it may
not work due to a lack of support for MS-DOS drive letter paths
like <tt>C:/</tt> or <tt>C:\</tt>.
Use a 3.80 version, or find a newer
version that has this problem fixed.
The older 3.80 version of make.exe can be downloaded with this
<a href="http://cygwin.paracoda.com/release/make/make-3.80-1.tar.bz2" target="_blank">
link</a>.
Use of this older 3.80 make.exe may require that you install the
libintl2.dll library or libintl2 cygwin package which is
no longer installed by default by the cygwin installer.
<br>
Also see the
You may be able to use the information at the
<a href="http://developer.mozilla.org/en/docs/Windows_build_prerequisites_using_cygwin#make" target="_blank">
mozilla developer center</a>
on this topic.
<br>
It's hoped that when make 3.82 starts shipping in a future cygwin
release that this MS-DOS path issue will be fixed.
In addition to the above 3.80 make.exe you can download
this
<br>
It may be possible to download the version at
<a href="http://www.cmake.org/files/cygwin/make.exe">
www.cmake.org make.exe</a> which will not have a libintl2.dll
dependency.
www.cmake.org make.exe</a>.
<br>
It might be necessary for you to build your own GNU make 3.81,
see the <a href="#buildgmake">"Building GNU make"</a> section
in that case.
</li>
</ul>
<p>
@ -662,6 +741,34 @@
The latest source to GNU make is available at
<a href="http://ftp.gnu.org/pub/gnu/make/" target="_blank">
ftp.gnu.org/pub/gnu/make/</a>.
</p>
<!-- ------------------------------------------------------ -->
<h4><a name="buildgmake">Building GNU make</a></h4>
<blockquote>
First step is to get the GNU make 3.81 source from
<a href="http://ftp.gnu.org/pub/gnu/make/" target="_blank">
ftp.gnu.org/pub/gnu/make/</a>.
Building is a little different depending on the OS and unix toolset
on Windows:
<ul>
<li>
<strong>Linux:</strong>
<tt>./configure && make</tt>
</li>
<li>
<strong>Solaris:</strong>
<tt>./configure && gmake CC=gcc</tt>
</li>
<li>
<strong>Windows for CYGWIN:</strong>
<tt>./configure && make</tt>
</li>
<li>
<strong>Windows for MKS: (CYGWIN is recommended)</strong>
<tt>./configure && make -f Makefile.win32</tt>
</li>
</ul>
</blockquote>
</blockquote>
<!-- ------------------------------------------------------ -->
<hr>
@ -713,7 +820,7 @@
</li>
<li>
Install
<a href="#ant">Ant</a>,
<a href="#ant">Ant 1.7.1 or newer</a>,
make sure it is in your PATH.
</li>
</ol>
@ -776,7 +883,7 @@
</li>
<li>
Install
<a href="#ant">Ant</a>,
<a href="#ant">Ant 1.7.1 or newer</a>,
make sure it is in your PATH.
</li>
</ol>
@ -862,7 +969,7 @@
</li>
<li>
Install
<a href="#ant">Ant</a>,
<a href="#ant">Ant 1.7.1 or newer</a>,
make sure it is in your PATH and set
<tt><a href="#ANT_HOME">ANT_HOME</a></tt>.
</li>
@ -923,14 +1030,20 @@
<blockquote>
All OpenJDK builds require access to least Ant 1.7.1.
The Ant tool is available from the
<a href="http://ant.apache.org" target="_blank">
Ant download site</a>.
<a href="http://archive.apache.org/dist/ant/binaries/apache-ant-1.7.1-bin.zip" target="_blank">
Ant 1.7.1 archive download site</a>.
You should always make sure <tt>ant</tt> is in your PATH, and
on Windows you may also need to set
<tt><a href="#ANT_HOME">ANT_HOME</a></tt>
to point to the location of
the Ant installation, this is the directory pathname
that contains a <tt>bin and lib</tt>.
<br>
<b>WARNING:</b> Ant versions used from IDE tools like NetBeans
or installed via system packages may not operate the same
as the one obtained from the Ant download bundles.
These system and IDE installers sometimes choose to change
the ant installation enough to cause differences.
</blockquote>
<!-- ------------------------------------------------------ -->
<h4><a name="cacerts">Certificate Authority File (cacert)</a></h4>
@ -962,7 +1075,7 @@
<blockquote>
<strong><a name="gcc">Linux gcc/binutils</a></strong>
<blockquote>
The GNU gcc compiler version should be 4 or newer.
The GNU gcc compiler version should be 4.3 or newer.
The compiler used should be the default compiler installed
in <tt>/usr/bin</tt>.
</blockquote>
@ -1047,21 +1160,16 @@
<strong><a name="msvc32">Windows i586: Microsoft Visual Studio 2010 Compilers</a></strong>
<blockquote>
<p>
<b>BEGIN WARNING</b>: At this time (Spring/Summer 2010) JDK 7 is starting a transition to
use the newest VS2010 Microsoft compilers. These build instructions are updated
to show where we are going. We have a QA process to go through before
official builds actually use VS2010. So for now, official builds are
still using VS2003. No other compilers are known to build the entire JDK,
<b>BEGIN WARNING</b>: JDK 7 has transitioned to
use the newest VS2010 Microsoft compilers.
No other compilers are known to build the entire JDK,
including non-open portions.
So for now you should be able to build with either VS2003 or VS2010.
We do not guarantee that VS2008 will work, although there is sufficient
makefile support to make at least basic JDK builds plausible.
Visual Studio 2010 Express compilers are now able to build all the
open source repositories, but this is 32 bit only. To build 64 bit
Windows binaries use the the 7.1 Windows SDK.<b>END WARNING.</b>
Windows binaries use the the 7.1 Windows SDK.
<b>END WARNING.</b>
<p>
The 32-bit OpenJDK Windows build
requires
The 32-bit OpenJDK Windows build requires
Microsoft Visual Studio C++ 2010 (VS2010) Professional
Edition or Express compiler.
The compiler and other tools are expected to reside
@ -1088,11 +1196,10 @@
</blockquote>
<strong><a name="msvc64">Windows x64: Microsoft Visual Studio 2010 Professional Compiler</a></strong>
<blockquote>
For <b>X64</b>, builds, when using the VS2010 Professional
compiler, the 64 bit build set up is much the same as 32 bit
For <b>X64</b>, the set up is much the same as 32 bit
except that you run <tt>amd64\VCVARS64.BAT</tt>
to set the compiler environment variables.
Previously 64 bit builds had used the 64 bit compiler in
Previously 64 bit builds had to use the 64 bit compiler in
an unbundled Windows SDK but this is no longer necessary if
you have VS2010 Professional.
</blockquote>
@ -1614,6 +1721,13 @@
and on Windows with CYGWIN
<tt>/usr/bin</tt>.
</dd>
<dt><tt><a name="ALT_DROPS_DIR">ALT_DROPS_DIR</a></tt> </dt>
<dd>
The location of any source drop bundles
(see <a href="#drops">Managing the Source Drops</a>).
The default will be
<tt>$(ALT_JDK_DEVTOOLS_PATH)/share/jdk7-drops</tt>.
</dd>
<dt><a name="ALT_UNIXCCS_PATH"><tt>ALT_UNIXCCS_PATH</tt></a></dt>
<dd>
<strong>Solaris only:</strong>
@ -1659,9 +1773,8 @@
located.
The default is whatever WINDOWSSDKDIR is set to
(or WindowsSdkDir) or the path
<pre>
c:\Program Files\Microsoft SDKs\Windows\v6.1a
</pre>
<br>
<tt>c:\Program Files\Microsoft SDKs\Windows\v7.0a</tt>
</dd>
<dt><tt><a name="ALT_DXSDK_PATH">ALT_DXSDK_PATH</a></tt> </dt>
<dd>
@ -1682,6 +1795,81 @@
</blockquote>
<!-- ------------------------------------------------------ -->
<hr>
<h2><a name="hints">Hints and Tips</a></h2>
<blockquote>
You don't have to use all these hints and tips, and in fact people do actually
build with systems that contradict these, but they might prove to be
helpful to some.
<ul>
<li>
If <tt>make sanity</tt> does not work, find out why, fix that
before going any further. Or at least understand what the
complaints are from it.
</li>
<li>
JDK: Keep in mind that you are building a JDK, but you need
a JDK (BOOTDIR JDK) to build this JDK.
</li>
<li>
Ant: The ant utility is a java application and besides having
ant available to you, it's important that ant finds the right
java to run with. Make sure you can type <tt>ant -version</tt>
and get clean results with no error messages.
</li>
<li>
Linux: Try and favor the system packages over building your own
or getting packages from other areas.
Most Linux builds should be possible with the system's
available packages.
</li>
<li>
Solaris: Typically you will need to get compilers on your systems
and occasionally GNU make 3.81 if a gmake binary is not available.
The gmake binary might not be 3.81, be careful.
</li>
<li>
Windows VS2010:
<ul>
<li>
Only the C++ part of VS2010 is needed.
Try to let the installation go to the default install directory.
Always reboot your system after installing VS2010.
The system environment variable VS100COMNTOOLS should be
set in your environment.
</li>
<li>
Make sure that TMP and TEMP are also set in the environment
and refer to Windows paths that exist, like <tt>C:\temp</tt>,
not <tt>/tmp</tt>, not <tt>/cygdrive/c/temp</tt>, and not <tt>C:/temp</tt>.
<tt>C:\temp</tt> is just an example, it is assumed that this area is
private to the user, so by default after installs you should
see a unique user path in these variables.
</li>
<li>
You need to use vsvars32.bat or vsvars64.bat to get the
PATH, INCLUDE, LIB, LIBPATH, and WINDOWSSDKDIR
variables set in your shell environment.
These bat files are not easy to use from a shell environment.
However, there is a script placed in the root jdk7 repository called
vsvars.sh that can help, it should only be done once in a shell
that will be doing the build, e.g.<br>
<tt>sh ./make/scripts/vsvars.sh -v10 > settings<br>
eval `cat settings`</tt><br>
Or just <tt>eval `sh ./make/scripts/vsvars.sh -v10`</tt>.
</li>
</ul>
</li>
<li>
Windows: PATH order is critical, see the
<a href="#paths">paths</a> section for more information.
</li>
<li>
Windows 64bit builds: Use ARCH_DATA_MODEL=64.
</li>
</ul>
</blockquote>
<!-- ------------------------------------------------------ -->
<hr>
<h2><a name="troubleshooting">Troubleshooting</a></h2>
<blockquote>
A build can fail for any number of reasons.
@ -1706,6 +1894,19 @@
Some of the more common problems with builds are briefly described
below, with suggestions for remedies.
<ul>
<li>
<b>Corrupted Bundles on Windows:</b>
<blockquote>
Some virus scanning software has been known to corrupt the
downloading of zip bundles.
It may be necessary to disable the 'on access' or 'real time'
virus scanning features to prevent this corruption.
This type of "real time" virus scanning can also slow down the
build process significantly.
Temporarily disabling the feature, or excluding the build
output directory may be necessary to get correct and faster builds.
</blockquote>
</li>
<li>
<b>Slow Builds:</b>
<blockquote>
@ -1801,7 +2002,11 @@
</blockquote>
</li>
<li>
<b>Windows Error Message: <tt>*** fatal error - couldn't allocate heap, ... </tt></b>
<b>Windows Error Messages:</b><br>
<tt>*** fatal error - couldn't allocate heap, ... </tt><br>
<tt>rm fails with "Directory not empty"</tt><br>
<tt>unzip fails with "cannot create ... Permission denied"</tt><br>
<tt>unzip fails with "cannot create ... Error 50"</tt><br>
<blockquote>
The CYGWIN software can conflict with other non-CYGWIN
software. See the CYGWIN FAQ section on
@ -1810,12 +2015,11 @@
</blockquote>
</li>
<li>
<b>Windows Error Message: <tt>*** multiple target patterns. Stop.</tt></b>
<b>Windows Error Message: <tt>spawn failed</tt></b>
<blockquote>
The CYGWIN make version 3.81 may not like the Windows <tt>C:/</tt>
style paths, it may not like the ':' character in the path
when used in a makefile target definition.
See the <a href="#gmake"><tt><i>gmake</i></tt></a> section.
Try rebooting the system, or there could be some kind of
issue with the disk or disk partition being used.
Sometimes it comes with a "Permission Denied" message.
</blockquote>
</li>
</ul>

View File

@ -154,3 +154,5 @@ e9aa2ca89ad6c53420623d579765f9706ec523d7 hs21-b02
0e531ab5ba04967a0e9aa6aef65e6eb3a0dcf632 jdk7-b132
a8d643a4db47c7b58e0bcb49c77b5c3610de86a8 hs21-b03
1b3a350709e4325d759bb453ff3fb6a463270488 jdk7-b133
447e6faab4a8755d4860c2366630729dbaec111c jdk7-b134
3c76374706ea8a77e15aec8310e831e5734f8775 hs21-b04

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=21
HS_MINOR_VER=0
HS_BUILD_NUMBER=04
HS_BUILD_NUMBER=05
JDK_MAJOR_VER=1
JDK_MINOR_VER=7

View File

@ -3179,7 +3179,7 @@ void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_re
Label& wrong_method_type) {
assert_different_registers(mtype_reg, mh_reg, temp_reg);
// compare method type against that of the receiver
RegisterOrConstant mhtype_offset = delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg);
RegisterOrConstant mhtype_offset = delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg);
load_heap_oop(mh_reg, mhtype_offset, temp_reg);
cmp(temp_reg, mtype_reg);
br(Assembler::notEqual, false, Assembler::pn, wrong_method_type);
@ -3195,14 +3195,14 @@ void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register m
Register temp_reg) {
assert_different_registers(vmslots_reg, mh_reg, temp_reg);
// load mh.type.form.vmslots
if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
if (java_lang_invoke_MethodHandle::vmslots_offset_in_bytes() != 0) {
// hoist vmslots into every mh to avoid dependent load chain
ld( Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
ld( Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
} else {
Register temp2_reg = vmslots_reg;
load_heap_oop(Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg);
load_heap_oop(Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg);
ld( Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
load_heap_oop(Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)), temp2_reg);
load_heap_oop(Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)), temp2_reg);
ld( Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)), vmslots_reg);
}
}
@ -3213,7 +3213,7 @@ void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_
// pick out the interpreted side of the handler
// NOTE: vmentry is not an oop!
ld_ptr(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
ld_ptr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg), temp_reg);
// off we go...
ld_ptr(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes(), temp_reg);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1188,7 +1188,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
__ st_ptr(O2, XXX_STATE(_stack)); // PREPUSH
__ lduh(max_stack, O3); // Full size expression stack
guarantee(!EnableMethodHandles, "no support yet for java.dyn.MethodHandle"); //6815692
guarantee(!EnableMethodHandles, "no support yet for java.lang.invoke.MethodHandle"); //6815692
//6815692//if (EnableMethodHandles)
//6815692// __ inc(O3, methodOopDesc::extra_stack_entries());
__ sll(O3, LogBytesPerWord, O3);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,6 +51,7 @@ define_pd_global(intx, CodeEntryAlignment, 32);
define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize
define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
define_pd_global(intx, InlineSmallCode, 1500);
#ifdef _LP64
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
define_pd_global(intx, ThreadStackSize, 1024);
@ -71,4 +72,6 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -260,7 +260,7 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// Method handle invoker
// Dispatch a method of the form java.dyn.MethodHandles::invoke(...)
// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...)
address InterpreterGenerator::generate_method_handle_entry(void) {
if (!EnableMethodHandles) {
return generate_abstract_entry();

View File

@ -112,8 +112,8 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
}
// given the MethodType, find out where the MH argument is buried
__ load_heap_oop(Address(O0_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot);
__ ldsw( Address(O4_argslot, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot);
__ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot);
__ ldsw( Address(O4_argslot, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot);
__ add(Gargs, __ argument_offset(O4_argslot, 1), O4_argbase);
// Note: argument_address uses its input as a scratch register!
__ ld_ptr(Address(O4_argbase, -Interpreter::stackElementSize), G3_method_handle);
@ -141,10 +141,10 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
// load up an adapter from the calling type (Java weaves this)
Register O2_form = O2_scratch;
Register O3_adapter = O3_scratch;
__ load_heap_oop(Address(O0_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O2_form);
// load_heap_oop(Address(O2_form, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
__ load_heap_oop(Address(O0_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, O1_scratch)), O2_form);
// load_heap_oop(Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
// deal with old JDK versions:
__ add( Address(O2_form, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
__ add( Address(O2_form, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
__ cmp(O3_adapter, O2_form);
Label sorry_no_invoke_generic;
__ brx(Assembler::lessUnsigned, false, Assembler::pn, sorry_no_invoke_generic);
@ -376,16 +376,16 @@ void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adapt
// which conversion op types are implemented here?
int MethodHandles::adapter_conversion_ops_supported_mask() {
return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
|(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
|(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
|(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
|(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
|(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
//|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
//|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
);
// FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
}
@ -413,22 +413,22 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
const Register O1_actual = O1;
const Register O2_required = O2;
guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
// Some handy addresses:
Address G5_method_fie( G5_method, in_bytes(methodOopDesc::from_interpreted_offset()));
Address G5_method_fce( G5_method, in_bytes(methodOopDesc::from_compiled_offset()));
Address G3_mh_vmtarget( G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
Address G3_mh_vmtarget( G3_method_handle, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes());
Address G3_dmh_vmindex( G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
Address G3_dmh_vmindex( G3_method_handle, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes());
Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
Address G3_bmh_argument( G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
Address G3_bmh_vmargslot( G3_method_handle, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes());
Address G3_bmh_argument( G3_method_handle, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes());
Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
Address G3_amh_vmargslot( G3_method_handle, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes());
Address G3_amh_argument ( G3_method_handle, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes());
Address G3_amh_conversion(G3_method_handle, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes());
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
@ -453,7 +453,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
__ mov(O5_savedSP, SP); // Cut the stack back to where the caller started.
Label L_no_method;
// FIXME: fill in _raise_exception_method with a suitable sun.dyn method
// FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
__ set(AddressLiteral((address) &_raise_exception_method), G5_method);
__ ld_ptr(Address(G5_method, 0), G5_method);
__ tst(G5_method);

View File

@ -3303,7 +3303,7 @@ void TemplateTable::invokedynamic(int byte_no) {
__ sll(Rret, LogBytesPerWord, Rret);
__ ld_ptr(Rtemp, Rret, Rret); // get return address
__ load_heap_oop(G5_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
__ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
__ null_check(G3_method_handle);
// Adjust Rret first so Llast_SP can be same as Rret

View File

@ -7831,7 +7831,7 @@ RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_ad
void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
Register temp_reg,
Label& wrong_method_type) {
Address type_addr(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg));
Address type_addr(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg));
// compare method type against that of the receiver
if (UseCompressedOops) {
load_heap_oop(temp_reg, type_addr);
@ -7851,14 +7851,14 @@ void MacroAssembler::load_method_handle_vmslots(Register vmslots_reg, Register m
Register temp_reg) {
assert_different_registers(vmslots_reg, mh_reg, temp_reg);
// load mh.type.form.vmslots
if (java_dyn_MethodHandle::vmslots_offset_in_bytes() != 0) {
if (java_lang_invoke_MethodHandle::vmslots_offset_in_bytes() != 0) {
// hoist vmslots into every mh to avoid dependent load chain
movl(vmslots_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmslots_offset_in_bytes, temp_reg)));
movl(vmslots_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmslots_offset_in_bytes, temp_reg)));
} else {
Register temp2_reg = vmslots_reg;
load_heap_oop(temp2_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::type_offset_in_bytes, temp_reg)));
load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_dyn_MethodType::form_offset_in_bytes, temp_reg)));
movl(vmslots_reg, Address(temp2_reg, delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)));
load_heap_oop(temp2_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg)));
load_heap_oop(temp2_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, temp_reg)));
movl(vmslots_reg, Address(temp2_reg, delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, temp_reg)));
}
}
@ -7873,7 +7873,7 @@ void MacroAssembler::jump_to_method_handle_entry(Register mh_reg, Register temp_
// pick out the interpreted side of the handler
// NOTE: vmentry is not an oop!
movptr(temp_reg, Address(mh_reg, delayed_value(java_dyn_MethodHandle::vmentry_offset_in_bytes, temp_reg)));
movptr(temp_reg, Address(mh_reg, delayed_value(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes, temp_reg)));
// off we go...
jmp(Address(temp_reg, MethodHandleEntry::from_interpreted_entry_offset_in_bytes()));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,4 +72,6 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
#endif // CPU_X86_VM_GLOBALS_X86_HPP

View File

@ -231,7 +231,7 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// Method handle invoker
// Dispatch a method of the form java.dyn.MethodHandles::invoke(...)
// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...)
address InterpreterGenerator::generate_method_handle_entry(void) {
if (!EnableMethodHandles) {
return generate_abstract_entry();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -318,7 +318,7 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// Method handle invoker
// Dispatch a method of the form java.dyn.MethodHandles::invoke(...)
// Dispatch a method of the form java.lang.invoke.MethodHandles::invoke(...)
address InterpreterGenerator::generate_method_handle_entry(void) {
if (!EnableMethodHandles) {
return generate_abstract_entry();

View File

@ -125,9 +125,9 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
}
// given the MethodType, find out where the MH argument is buried
__ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
__ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp)));
Register rdx_vmslots = rdx_temp;
__ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
__ movl(rdx_vmslots, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
__ movptr(rcx_recv, __ argument_address(rdx_vmslots));
trace_method_handle(_masm, "invokeExact");
@ -154,11 +154,11 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
rcx_argslot, rbx_temp, rdx_temp);
// load up an adapter from the calling type (Java weaves this)
__ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
__ load_heap_oop(rdx_temp, Address(rax_mtype, __ delayed_value(java_lang_invoke_MethodType::form_offset_in_bytes, rdi_temp)));
Register rdx_adapter = rdx_temp;
// __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes()));
// __ load_heap_oop(rdx_adapter, Address(rdx_temp, java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes()));
// deal with old JDK versions:
__ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
__ lea(rdi_temp, Address(rdx_temp, __ delayed_value(java_lang_invoke_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
__ cmpptr(rdi_temp, rdx_temp);
Label sorry_no_invoke_generic;
__ jcc(Assembler::below, sorry_no_invoke_generic);
@ -371,16 +371,16 @@ void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adapt
// which conversion op types are implemented here?
int MethodHandles::adapter_conversion_ops_supported_mask() {
return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
|(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
|(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
|(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
|(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
|(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
|(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
//|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
|(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
//|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
);
// FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
}
@ -415,21 +415,21 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
const Register rarg2_required = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
assert_different_registers(rarg0_code, rarg1_actual, rarg2_required, saved_last_sp);
guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
// some handy addresses
Address rbx_method_fie( rbx, methodOopDesc::from_interpreted_offset() );
Address rbx_method_fce( rbx, methodOopDesc::from_compiled_offset() );
Address rcx_mh_vmtarget( rcx_recv, java_dyn_MethodHandle::vmtarget_offset_in_bytes() );
Address rcx_dmh_vmindex( rcx_recv, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes() );
Address rcx_mh_vmtarget( rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
Address rcx_dmh_vmindex( rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() );
Address rcx_bmh_vmargslot( rcx_recv, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes() );
Address rcx_bmh_argument( rcx_recv, sun_dyn_BoundMethodHandle::argument_offset_in_bytes() );
Address rcx_bmh_vmargslot( rcx_recv, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes() );
Address rcx_bmh_argument( rcx_recv, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes() );
Address rcx_amh_vmargslot( rcx_recv, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes() );
Address rcx_amh_argument( rcx_recv, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes() );
Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() );
Address rcx_amh_vmargslot( rcx_recv, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes() );
Address rcx_amh_argument( rcx_recv, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes() );
Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
Address vmarg; // __ argument_address(vmargslot)
const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
@ -460,7 +460,7 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
Register rbx_method = rbx_temp;
Label L_no_method;
// FIXME: fill in _raise_exception_method with a suitable sun.dyn method
// FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
__ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
__ testptr(rbx_method, rbx_method);
__ jccb(Assembler::zero, L_no_method);

View File

@ -3110,7 +3110,7 @@ void TemplateTable::invokedynamic(int byte_no) {
__ profile_call(rsi);
}
__ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
__ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx)));
__ null_check(rcx_method_handle);
__ prepare_to_jump_from_interpreted();
__ jump_to_method_handle_entry(rcx_method_handle, rdx);

View File

@ -3145,7 +3145,7 @@ void TemplateTable::invokedynamic(int byte_no) {
__ profile_call(r13);
}
__ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
__ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx)));
__ null_check(rcx_method_handle);
__ prepare_to_jump_from_interpreted();
__ jump_to_method_handle_entry(rcx_method_handle, rdx);

View File

@ -436,6 +436,13 @@ void VM_Version::get_processor_features() {
UseCountLeadingZerosInstruction = true;
}
}
// On family 21 processors default is no sw prefetch
if ( cpu_family() == 21 ) {
if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
AllocatePrefetchStyle = 0;
}
}
}
if( is_intel() ) { // Intel cpus specific settings

View File

@ -524,6 +524,8 @@ name_for_methodOop(jvm_agent_t* J, uint64_t methodOopPtr, char * result, size_t
CHECK_FAIL(err);
err = read_pointer(J, constantPool + nameIndex * POINTER_SIZE + SIZE_constantPoolOopDesc, &nameSymbol);
CHECK_FAIL(err);
// The symbol is a CPSlot and has lower bit set to indicate metadata
nameSymbol &= (~1); // remove metadata lsb
err = ps_pread(J->P, nameSymbol + OFFSET_Symbol_length, &nameSymbolLength, 2);
CHECK_FAIL(err);
nameString = (char*)calloc(nameSymbolLength + 1, 1);
@ -535,6 +537,7 @@ name_for_methodOop(jvm_agent_t* J, uint64_t methodOopPtr, char * result, size_t
CHECK_FAIL(err);
err = read_pointer(J, constantPool + signatureIndex * POINTER_SIZE + SIZE_constantPoolOopDesc, &signatureSymbol);
CHECK_FAIL(err);
signatureSymbol &= (~1); // remove metadata lsb
err = ps_pread(J->P, signatureSymbol + OFFSET_Symbol_length, &signatureSymbolLength, 2);
CHECK_FAIL(err);
signatureString = (char*)calloc(signatureSymbolLength + 1, 1);

View File

@ -209,7 +209,7 @@ void Canonicalizer::do_StoreField (StoreField* x) {
// limit this optimization to current block
if (value != NULL && in_current_block(conv)) {
set_canonical(new StoreField(x->obj(), x->offset(), x->field(), value, x->is_static(),
x->state_before(), x->is_loaded(), x->is_initialized()));
x->state_before(), x->needs_patching()));
return;
}
}

View File

@ -1456,12 +1456,12 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
BasicType field_type = field->type()->basic_type();
ValueType* type = as_ValueType(field_type);
// call will_link again to determine if the field is valid.
const bool is_loaded = holder->is_loaded() &&
field->will_link(method()->holder(), code);
const bool is_initialized = is_loaded && holder->is_initialized();
const bool needs_patching = !holder->is_loaded() ||
!field->will_link(method()->holder(), code) ||
PatchALot;
ValueStack* state_before = NULL;
if (!is_initialized || PatchALot) {
if (!holder->is_initialized() || needs_patching) {
// save state before instruction for debug info when
// deoptimization happens during patching
state_before = copy_state_before();
@ -1469,10 +1469,6 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
Value obj = NULL;
if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) {
// commoning of class constants should only occur if the class is
// fully initialized and resolved in this constant pool. The will_link test
// above essentially checks if this class is resolved in this constant pool
// so, the is_initialized flag should be suffiect.
if (state_before != NULL) {
// build a patching constant
obj = new Constant(new ClassConstant(holder), state_before);
@ -1482,7 +1478,7 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
}
const int offset = is_loaded ? field->offset() : -1;
const int offset = !needs_patching ? field->offset() : -1;
switch (code) {
case Bytecodes::_getstatic: {
// check for compile-time constants, i.e., initialized static final fields
@ -1509,7 +1505,7 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
state_before = copy_state_for_exception();
}
push(type, append(new LoadField(append(obj), offset, field, true,
state_before, is_loaded, is_initialized)));
state_before, needs_patching)));
}
break;
}
@ -1518,7 +1514,7 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
if (state_before == NULL) {
state_before = copy_state_for_exception();
}
append(new StoreField(append(obj), offset, field, val, true, state_before, is_loaded, is_initialized));
append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching));
}
break;
case Bytecodes::_getfield :
@ -1526,8 +1522,8 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
if (state_before == NULL) {
state_before = copy_state_for_exception();
}
LoadField* load = new LoadField(apop(), offset, field, false, state_before, is_loaded, true);
Value replacement = is_loaded ? _memory->load(load) : load;
LoadField* load = new LoadField(apop(), offset, field, false, state_before, needs_patching);
Value replacement = !needs_patching ? _memory->load(load) : load;
if (replacement != load) {
assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked");
push(type, replacement);
@ -1542,8 +1538,8 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
if (state_before == NULL) {
state_before = copy_state_for_exception();
}
StoreField* store = new StoreField(apop(), offset, field, val, false, state_before, is_loaded, true);
if (is_loaded) store = _memory->store(store);
StoreField* store = new StoreField(apop(), offset, field, val, false, state_before, needs_patching);
if (!needs_patching) store = _memory->store(store);
if (store != NULL) {
append(store);
}

View File

@ -323,8 +323,6 @@ class Instruction: public CompilationResourceObj {
CanTrapFlag,
DirectCompareFlag,
IsEliminatedFlag,
IsInitializedFlag,
IsLoadedFlag,
IsSafepointFlag,
IsStaticFlag,
IsStrictfpFlag,
@ -693,7 +691,7 @@ BASE(AccessField, Instruction)
public:
// creation
AccessField(Value obj, int offset, ciField* field, bool is_static,
ValueStack* state_before, bool is_loaded, bool is_initialized)
ValueStack* state_before, bool needs_patching)
: Instruction(as_ValueType(field->type()->basic_type()), state_before)
, _obj(obj)
, _offset(offset)
@ -701,16 +699,9 @@ BASE(AccessField, Instruction)
, _explicit_null_check(NULL)
{
set_needs_null_check(!is_static);
set_flag(IsLoadedFlag, is_loaded);
set_flag(IsInitializedFlag, is_initialized);
set_flag(IsStaticFlag, is_static);
set_flag(NeedsPatchingFlag, needs_patching);
ASSERT_VALUES
if (!is_loaded || (PatchALot && !field->is_volatile())) {
// need to patch if the holder wasn't loaded or we're testing
// using PatchALot. Don't allow PatchALot for fields which are
// known to be volatile they aren't patchable.
set_flag(NeedsPatchingFlag, true);
}
// pin of all instructions with memory access
pin();
}
@ -721,11 +712,14 @@ BASE(AccessField, Instruction)
ciField* field() const { return _field; }
BasicType field_type() const { return _field->type()->basic_type(); }
bool is_static() const { return check_flag(IsStaticFlag); }
bool is_loaded() const { return check_flag(IsLoadedFlag); }
bool is_initialized() const { return check_flag(IsInitializedFlag); }
NullCheck* explicit_null_check() const { return _explicit_null_check; }
bool needs_patching() const { return check_flag(NeedsPatchingFlag); }
// Unresolved getstatic and putstatic can cause initialization.
// Technically it occurs at the Constant that materializes the base
// of the static fields but it's simpler to model it here.
bool is_init_point() const { return is_static() && (needs_patching() || !_field->holder()->is_initialized()); }
// manipulation
// Under certain circumstances, if a previous NullCheck instruction
@ -745,15 +739,15 @@ LEAF(LoadField, AccessField)
public:
// creation
LoadField(Value obj, int offset, ciField* field, bool is_static,
ValueStack* state_before, bool is_loaded, bool is_initialized)
: AccessField(obj, offset, field, is_static, state_before, is_loaded, is_initialized)
ValueStack* state_before, bool needs_patching)
: AccessField(obj, offset, field, is_static, state_before, needs_patching)
{}
ciType* declared_type() const;
ciType* exact_type() const;
// generic
HASHING2(LoadField, is_loaded() && !field()->is_volatile(), obj()->subst(), offset()) // cannot be eliminated if not yet loaded or if volatile
HASHING2(LoadField, !needs_patching() && !field()->is_volatile(), obj()->subst(), offset()) // cannot be eliminated if needs patching or if volatile
};
@ -764,8 +758,8 @@ LEAF(StoreField, AccessField)
public:
// creation
StoreField(Value obj, int offset, ciField* field, Value value, bool is_static,
ValueStack* state_before, bool is_loaded, bool is_initialized)
: AccessField(obj, offset, field, is_static, state_before, is_loaded, is_initialized)
ValueStack* state_before, bool needs_patching)
: AccessField(obj, offset, field, is_static, state_before, needs_patching)
, _value(value)
{
set_flag(NeedsWriteBarrierFlag, as_ValueType(field_type())->is_object());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1156,7 +1156,7 @@ class LIR_OpJavaCall: public LIR_OpCall {
return
is_invokedynamic() // An invokedynamic is always a MethodHandle call site.
||
(method()->holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
(method()->holder()->name() == ciSymbol::java_lang_invoke_MethodHandle() &&
methodOopDesc::is_method_handle_invoke_name(method()->name()->sid()));
}

View File

@ -1559,9 +1559,7 @@ void LIRGenerator::do_StoreField(StoreField* x) {
(info ? new CodeEmitInfo(info) : NULL));
}
if (is_volatile) {
assert(!needs_patching && x->is_loaded(),
"how do we know it's volatile if it's not loaded");
if (is_volatile && !needs_patching) {
volatile_field_store(value.result(), address, info);
} else {
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
@ -1627,9 +1625,7 @@ void LIRGenerator::do_LoadField(LoadField* x) {
address = generate_address(object.result(), x->offset(), field_type);
}
if (is_volatile) {
assert(!needs_patching && x->is_loaded(),
"how do we know it's volatile if it's not loaded");
if (is_volatile && !needs_patching) {
volatile_field_load(address, reg, info);
} else {
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
@ -2516,7 +2512,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
__ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
// Load target MethodHandle from CallSite object.
__ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
__ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
__ call_dynamic(target, receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(),

View File

@ -2703,7 +2703,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
assert(_fpu_stack_allocator != NULL, "must be present");
opr = _fpu_stack_allocator->to_fpu_stack(opr);
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)");
#endif
#ifdef SPARC
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
@ -2715,7 +2715,12 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
#endif
#ifdef VM_LITTLE_ENDIAN
VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo());
#else
VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
#endif
#ifdef _LP64
first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
second = &_int_0_scope_value;

View File

@ -141,7 +141,8 @@ class ValueNumberingVisitor: public InstructionVisitor {
// visitor functions
void do_StoreField (StoreField* x) {
if (!x->is_initialized()) {
if (x->is_init_point()) {
// putstatic is an initialization point so treat it as a wide kill
kill_memory();
} else {
kill_field(x->field());
@ -159,7 +160,8 @@ class ValueNumberingVisitor: public InstructionVisitor {
void do_Local (Local* x) { /* nothing to do */ }
void do_Constant (Constant* x) { /* nothing to do */ }
void do_LoadField (LoadField* x) {
if (!x->is_initialized()) {
if (x->is_init_point()) {
// getstatic is an initialization point so treat it as a wide kill
kill_memory();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@
// Return the target MethodHandle of this CallSite.
ciMethodHandle* ciCallSite::get_target() const {
VM_ENTRY_MARK;
oop method_handle_oop = java_dyn_CallSite::target(get_oop());
oop method_handle_oop = java_lang_invoke_CallSite::target(get_oop());
return CURRENT_ENV->get_object(method_handle_oop)->as_method_handle();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@
// ciCallSite
//
// The class represents a java.dyn.CallSite object.
// The class represents a java.lang.invoke.CallSite object.
class ciCallSite : public ciInstance {
public:
ciCallSite(instanceHandle h_i) : ciInstance(h_i) {}

View File

@ -177,7 +177,7 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// Never trust strangely unstable finals: System.out, etc.
return false;
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/dyn") || holder->is_in_package("sun/dyn"))
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke"))
return true;
return TrustFinalNonStaticFields;
}
@ -191,8 +191,9 @@ void ciField::initialize_from(fieldDescriptor* fd) {
// Check to see if the field is constant.
if (_holder->is_initialized() && this->is_final()) {
if (!this->is_static()) {
// A field can be constant if it's a final static field or if it's
// a final non-static field of a trusted class ({java,sun}.dyn).
// A field can be constant if it's a final static field or if
// it's a final non-static field of a trusted class (classes in
// java.lang.invoke and sun.invoke packages and subpackages).
if (trust_final_non_static_fields(_holder)) {
_is_constant = true;
return;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -769,7 +769,7 @@ int ciMethod::scale_count(int count, float prof_factor) {
// signature-polymorphic MethodHandle methods, invokeExact or invokeGeneric.
bool ciMethod::is_method_handle_invoke() const {
if (!is_loaded()) {
bool flag = (holder()->name() == ciSymbol::java_dyn_MethodHandle() &&
bool flag = (holder()->name() == ciSymbol::java_lang_invoke_MethodHandle() &&
methodOopDesc::is_method_handle_invoke_name(name()->sid()));
return flag;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
// ciMethodHandle
//
// The class represents a java.dyn.MethodHandle object.
// The class represents a java.lang.invoke.MethodHandle object.
class ciMethodHandle : public ciInstance {
private:
ciMethod* _callee;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -342,9 +342,9 @@ ciObject* ciObjectFactory::create_new_object(oop o) {
return new (arena()) ciMethodData(h_md);
} else if (o->is_instance()) {
instanceHandle h_i(THREAD, (instanceOop)o);
if (java_dyn_CallSite::is_instance(o))
if (java_lang_invoke_CallSite::is_instance(o))
return new (arena()) ciCallSite(h_i);
else if (java_dyn_MethodHandle::is_instance(o))
else if (java_lang_invoke_MethodHandle::is_instance(o))
return new (arena()) ciMethodHandle(h_i);
else
return new (arena()) ciInstance(h_i);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -380,7 +380,7 @@ ciKlass* ciBytecodeStream::get_declared_method_holder() {
bool ignore;
// report as InvokeDynamic for invokedynamic, which is syntactically classless
if (cur_bc() == Bytecodes::_invokedynamic)
return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_dyn_InvokeDynamic(), false);
return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_lang_invoke_InvokeDynamic(), false);
return CURRENT_ENV->get_klass_by_index(cpool, get_method_holder_index(), ignore, _holder);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -146,12 +146,14 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
break;
case JVM_CONSTANT_MethodHandle :
case JVM_CONSTANT_MethodType :
if (!EnableMethodHandles ||
_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
classfile_parse_error(
(!EnableMethodHandles ?
"This JVM does not support constant tag %u in class file %s" :
"Class file version does not support constant tag %u in class file %s"),
"Class file version does not support constant tag %u in class file %s",
tag, CHECK);
}
if (!EnableMethodHandles) {
classfile_parse_error(
"This JVM does not support constant tag %u in class file %s",
tag, CHECK);
}
if (tag == JVM_CONSTANT_MethodHandle) {
@ -170,12 +172,14 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
case JVM_CONSTANT_InvokeDynamicTrans : // this tag appears only in old classfiles
case JVM_CONSTANT_InvokeDynamic :
{
if (!EnableInvokeDynamic ||
_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
if (_major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
classfile_parse_error(
(!EnableInvokeDynamic ?
"This JVM does not support constant tag %u in class file %s" :
"Class file version does not support constant tag %u in class file %s"),
"Class file version does not support constant tag %u in class file %s",
tag, CHECK);
}
if (!EnableInvokeDynamic) {
classfile_parse_error(
"This JVM does not support constant tag %u in class file %s",
tag, CHECK);
}
cfs->guarantee_more(5, CHECK); // bsm_index, nt, tag/access_flags
@ -1616,8 +1620,13 @@ methodHandle ClassFileParser::parse_method(constantPoolHandle cp, bool is_interf
AccessFlags access_flags;
if (name == vmSymbols::class_initializer_name()) {
// We ignore the access flags for a class initializer. (JVM Spec. p. 116)
flags = JVM_ACC_STATIC;
// We ignore the other access flags for a valid class initializer.
// (JVM Spec 2nd ed., chapter 4.6)
if (_major_version < 51) { // backward compatibility
flags = JVM_ACC_STATIC;
} else if ((flags & JVM_ACC_STATIC) == JVM_ACC_STATIC) {
flags &= JVM_ACC_STATIC | JVM_ACC_STRICT;
}
} else {
verify_legal_method_modifiers(flags, is_interface, name, CHECK_(nullHandle));
}
@ -2797,11 +2806,11 @@ void ClassFileParser::java_lang_Class_fix_post(int* next_nonstatic_oop_offset_pt
// Force MethodHandle.vmentry to be an unmanaged pointer.
// There is no way for a classfile to express this, so we must help it.
void ClassFileParser::java_dyn_MethodHandle_fix_pre(constantPoolHandle cp,
void ClassFileParser::java_lang_invoke_MethodHandle_fix_pre(constantPoolHandle cp,
typeArrayHandle fields,
FieldAllocationCount *fac_ptr,
TRAPS) {
// Add fake fields for java.dyn.MethodHandle instances
// Add fake fields for java.lang.invoke.MethodHandle instances
//
// This is not particularly nice, but since there is no way to express
// a native wordSize field in Java, we must do it at this level.
@ -2818,9 +2827,10 @@ void ClassFileParser::java_dyn_MethodHandle_fix_pre(constantPoolHandle cp,
}
}
if (AllowTransitionalJSR292 && word_sig_index == 0) return;
if (word_sig_index == 0)
THROW_MSG(vmSymbols::java_lang_VirtualMachineError(),
"missing I or J signature (for vmentry) in java.dyn.MethodHandle");
"missing I or J signature (for vmentry) in java.lang.invoke.MethodHandle");
// Find vmentry field and change the signature.
bool found_vmentry = false;
@ -2857,9 +2867,10 @@ void ClassFileParser::java_dyn_MethodHandle_fix_pre(constantPoolHandle cp,
}
}
if (AllowTransitionalJSR292 && !found_vmentry) return;
if (!found_vmentry)
THROW_MSG(vmSymbols::java_lang_VirtualMachineError(),
"missing vmentry byte field in java.dyn.MethodHandle");
"missing vmentry byte field in java.lang.invoke.MethodHandle");
}
@ -3224,9 +3235,18 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
java_lang_Class_fix_pre(&methods, &fac, CHECK_(nullHandle));
}
// adjust the vmentry field declaration in java.dyn.MethodHandle
if (EnableMethodHandles && class_name == vmSymbols::sun_dyn_MethodHandleImpl() && class_loader.is_null()) {
java_dyn_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle));
// adjust the vmentry field declaration in java.lang.invoke.MethodHandle
if (EnableMethodHandles && class_name == vmSymbols::java_lang_invoke_MethodHandle() && class_loader.is_null()) {
java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle));
}
if (AllowTransitionalJSR292 &&
EnableMethodHandles && class_name == vmSymbols::java_dyn_MethodHandle() && class_loader.is_null()) {
java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle));
}
if (AllowTransitionalJSR292 &&
EnableMethodHandles && class_name == vmSymbols::sun_dyn_MethodHandleImpl() && class_loader.is_null()) {
// allow vmentry field in MethodHandleImpl also
java_lang_invoke_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle));
}
// Add a fake "discovered" field if it is not present

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -162,9 +162,9 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
// Adjust the next_nonstatic_oop_offset to place the fake fields
// before any Java fields.
void java_lang_Class_fix_post(int* next_nonstatic_oop_offset);
// Adjust the field allocation counts for java.dyn.MethodHandle to add
// Adjust the field allocation counts for java.lang.invoke.MethodHandle to add
// a fake address (void*) field.
void java_dyn_MethodHandle_fix_pre(constantPoolHandle cp,
void java_lang_invoke_MethodHandle_fix_pre(constantPoolHandle cp,
typeArrayHandle fields,
FieldAllocationCount *fac_ptr, TRAPS);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,6 +66,28 @@ static bool find_field(instanceKlass* ik,
return ik->find_local_field(name_symbol, signature_symbol, fd);
}
static bool find_hacked_field(instanceKlass* ik,
Symbol* name_symbol, Symbol* signature_symbol,
fieldDescriptor* fd,
bool allow_super = false) {
bool found = find_field(ik, name_symbol, signature_symbol, fd, allow_super);
if (!found && AllowTransitionalJSR292) {
Symbol* backup_sig = SystemDictionary::find_backup_signature(signature_symbol);
if (backup_sig != NULL) {
found = find_field(ik, name_symbol, backup_sig, fd, allow_super);
if (TraceMethodHandles) {
ResourceMark rm;
tty->print_cr("MethodHandles: %s.%s: backup for %s => %s%s",
ik->name()->as_C_string(), name_symbol->as_C_string(),
signature_symbol->as_C_string(), backup_sig->as_C_string(),
(found ? "" : " (NOT FOUND)"));
}
}
}
return found;
}
#define find_field find_hacked_field /* remove after AllowTransitionalJSR292 */
// Helpful routine for computing field offsets at run time rather than hardcoding them
static void
compute_offset(int &dest_offset,
@ -2176,41 +2198,43 @@ void java_lang_ref_SoftReference::set_clock(jlong value) {
}
// Support for java_dyn_MethodHandle
// Support for java_lang_invoke_MethodHandle
int java_dyn_MethodHandle::_type_offset;
int java_dyn_MethodHandle::_vmtarget_offset;
int java_dyn_MethodHandle::_vmentry_offset;
int java_dyn_MethodHandle::_vmslots_offset;
int java_lang_invoke_MethodHandle::_type_offset;
int java_lang_invoke_MethodHandle::_vmtarget_offset;
int java_lang_invoke_MethodHandle::_vmentry_offset;
int java_lang_invoke_MethodHandle::_vmslots_offset;
int sun_dyn_MemberName::_clazz_offset;
int sun_dyn_MemberName::_name_offset;
int sun_dyn_MemberName::_type_offset;
int sun_dyn_MemberName::_flags_offset;
int sun_dyn_MemberName::_vmtarget_offset;
int sun_dyn_MemberName::_vmindex_offset;
int java_lang_invoke_MemberName::_clazz_offset;
int java_lang_invoke_MemberName::_name_offset;
int java_lang_invoke_MemberName::_type_offset;
int java_lang_invoke_MemberName::_flags_offset;
int java_lang_invoke_MemberName::_vmtarget_offset;
int java_lang_invoke_MemberName::_vmindex_offset;
int sun_dyn_DirectMethodHandle::_vmindex_offset;
int java_lang_invoke_DirectMethodHandle::_vmindex_offset;
int sun_dyn_BoundMethodHandle::_argument_offset;
int sun_dyn_BoundMethodHandle::_vmargslot_offset;
int java_lang_invoke_BoundMethodHandle::_argument_offset;
int java_lang_invoke_BoundMethodHandle::_vmargslot_offset;
int sun_dyn_AdapterMethodHandle::_conversion_offset;
int java_lang_invoke_AdapterMethodHandle::_conversion_offset;
void java_dyn_MethodHandle::compute_offsets() {
void java_lang_invoke_MethodHandle::compute_offsets() {
klassOop k = SystemDictionary::MethodHandle_klass();
if (k != NULL && EnableMethodHandles) {
compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_dyn_MethodType_signature(), true);
compute_offset(_vmtarget_offset, k, vmSymbols::vmtarget_name(), vmSymbols::object_signature(), true);
compute_offset(_vmentry_offset, k, vmSymbols::vmentry_name(), vmSymbols::machine_word_signature(), true);
bool allow_super = false;
if (AllowTransitionalJSR292) allow_super = true; // temporary, to access java.dyn.MethodHandleImpl
compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_lang_invoke_MethodType_signature(), allow_super);
compute_offset(_vmtarget_offset, k, vmSymbols::vmtarget_name(), vmSymbols::object_signature(), allow_super);
compute_offset(_vmentry_offset, k, vmSymbols::vmentry_name(), vmSymbols::machine_word_signature(), allow_super);
// Note: MH.vmslots (if it is present) is a hoisted copy of MH.type.form.vmslots.
// It is optional pending experiments to keep or toss.
compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true);
compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), allow_super);
}
}
void sun_dyn_MemberName::compute_offsets() {
void java_lang_invoke_MemberName::compute_offsets() {
klassOop k = SystemDictionary::MemberName_klass();
if (k != NULL && EnableMethodHandles) {
compute_offset(_clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature());
@ -2222,14 +2246,14 @@ void sun_dyn_MemberName::compute_offsets() {
}
}
void sun_dyn_DirectMethodHandle::compute_offsets() {
void java_lang_invoke_DirectMethodHandle::compute_offsets() {
klassOop k = SystemDictionary::DirectMethodHandle_klass();
if (k != NULL && EnableMethodHandles) {
compute_offset(_vmindex_offset, k, vmSymbols::vmindex_name(), vmSymbols::int_signature(), true);
}
}
void sun_dyn_BoundMethodHandle::compute_offsets() {
void java_lang_invoke_BoundMethodHandle::compute_offsets() {
klassOop k = SystemDictionary::BoundMethodHandle_klass();
if (k != NULL && EnableMethodHandles) {
compute_offset(_vmargslot_offset, k, vmSymbols::vmargslot_name(), vmSymbols::int_signature(), true);
@ -2237,22 +2261,22 @@ void sun_dyn_BoundMethodHandle::compute_offsets() {
}
}
void sun_dyn_AdapterMethodHandle::compute_offsets() {
void java_lang_invoke_AdapterMethodHandle::compute_offsets() {
klassOop k = SystemDictionary::AdapterMethodHandle_klass();
if (k != NULL && EnableMethodHandles) {
compute_offset(_conversion_offset, k, vmSymbols::conversion_name(), vmSymbols::int_signature(), true);
}
}
oop java_dyn_MethodHandle::type(oop mh) {
oop java_lang_invoke_MethodHandle::type(oop mh) {
return mh->obj_field(_type_offset);
}
void java_dyn_MethodHandle::set_type(oop mh, oop mtype) {
void java_lang_invoke_MethodHandle::set_type(oop mh, oop mtype) {
mh->obj_field_put(_type_offset, mtype);
}
int java_dyn_MethodHandle::vmslots(oop mh) {
int java_lang_invoke_MethodHandle::vmslots(oop mh) {
int vmslots_offset = _vmslots_offset;
if (vmslots_offset != 0) {
#ifdef ASSERT
@ -2267,7 +2291,7 @@ int java_dyn_MethodHandle::vmslots(oop mh) {
}
// if MH.vmslots exists, hoist into it the value of type.form.vmslots
void java_dyn_MethodHandle::init_vmslots(oop mh) {
void java_lang_invoke_MethodHandle::init_vmslots(oop mh) {
int vmslots_offset = _vmslots_offset;
if (vmslots_offset != 0) {
mh->int_field_put(vmslots_offset, compute_vmslots(mh));
@ -2276,20 +2300,20 @@ void java_dyn_MethodHandle::init_vmslots(oop mh) {
// fetch type.form.vmslots, which is the number of JVM stack slots
// required to carry the arguments of this MH
int java_dyn_MethodHandle::compute_vmslots(oop mh) {
int java_lang_invoke_MethodHandle::compute_vmslots(oop mh) {
oop mtype = type(mh);
if (mtype == NULL) return 0; // Java code would get NPE
oop form = java_dyn_MethodType::form(mtype);
oop form = java_lang_invoke_MethodType::form(mtype);
if (form == NULL) return 0; // Java code would get NPE
return java_dyn_MethodTypeForm::vmslots(form);
return java_lang_invoke_MethodTypeForm::vmslots(form);
}
// fetch the low-level entry point for this mh
MethodHandleEntry* java_dyn_MethodHandle::vmentry(oop mh) {
MethodHandleEntry* java_lang_invoke_MethodHandle::vmentry(oop mh) {
return (MethodHandleEntry*) mh->address_field(_vmentry_offset);
}
void java_dyn_MethodHandle::set_vmentry(oop mh, MethodHandleEntry* me) {
void java_lang_invoke_MethodHandle::set_vmentry(oop mh, MethodHandleEntry* me) {
assert(_vmentry_offset != 0, "must be present");
// This is always the final step that initializes a valid method handle:
@ -2303,123 +2327,123 @@ void java_dyn_MethodHandle::set_vmentry(oop mh, MethodHandleEntry* me) {
/// MemberName accessors
oop sun_dyn_MemberName::clazz(oop mname) {
oop java_lang_invoke_MemberName::clazz(oop mname) {
assert(is_instance(mname), "wrong type");
return mname->obj_field(_clazz_offset);
}
void sun_dyn_MemberName::set_clazz(oop mname, oop clazz) {
void java_lang_invoke_MemberName::set_clazz(oop mname, oop clazz) {
assert(is_instance(mname), "wrong type");
mname->obj_field_put(_clazz_offset, clazz);
}
oop sun_dyn_MemberName::name(oop mname) {
oop java_lang_invoke_MemberName::name(oop mname) {
assert(is_instance(mname), "wrong type");
return mname->obj_field(_name_offset);
}
void sun_dyn_MemberName::set_name(oop mname, oop name) {
void java_lang_invoke_MemberName::set_name(oop mname, oop name) {
assert(is_instance(mname), "wrong type");
mname->obj_field_put(_name_offset, name);
}
oop sun_dyn_MemberName::type(oop mname) {
oop java_lang_invoke_MemberName::type(oop mname) {
assert(is_instance(mname), "wrong type");
return mname->obj_field(_type_offset);
}
void sun_dyn_MemberName::set_type(oop mname, oop type) {
void java_lang_invoke_MemberName::set_type(oop mname, oop type) {
assert(is_instance(mname), "wrong type");
mname->obj_field_put(_type_offset, type);
}
int sun_dyn_MemberName::flags(oop mname) {
int java_lang_invoke_MemberName::flags(oop mname) {
assert(is_instance(mname), "wrong type");
return mname->int_field(_flags_offset);
}
void sun_dyn_MemberName::set_flags(oop mname, int flags) {
void java_lang_invoke_MemberName::set_flags(oop mname, int flags) {
assert(is_instance(mname), "wrong type");
mname->int_field_put(_flags_offset, flags);
}
oop sun_dyn_MemberName::vmtarget(oop mname) {
oop java_lang_invoke_MemberName::vmtarget(oop mname) {
assert(is_instance(mname), "wrong type");
return mname->obj_field(_vmtarget_offset);
}
void sun_dyn_MemberName::set_vmtarget(oop mname, oop ref) {
void java_lang_invoke_MemberName::set_vmtarget(oop mname, oop ref) {
assert(is_instance(mname), "wrong type");
mname->obj_field_put(_vmtarget_offset, ref);
}
int sun_dyn_MemberName::vmindex(oop mname) {
int java_lang_invoke_MemberName::vmindex(oop mname) {
assert(is_instance(mname), "wrong type");
return mname->int_field(_vmindex_offset);
}
void sun_dyn_MemberName::set_vmindex(oop mname, int index) {
void java_lang_invoke_MemberName::set_vmindex(oop mname, int index) {
assert(is_instance(mname), "wrong type");
mname->int_field_put(_vmindex_offset, index);
}
oop java_dyn_MethodHandle::vmtarget(oop mh) {
oop java_lang_invoke_MethodHandle::vmtarget(oop mh) {
assert(is_instance(mh), "MH only");
return mh->obj_field(_vmtarget_offset);
}
void java_dyn_MethodHandle::set_vmtarget(oop mh, oop ref) {
void java_lang_invoke_MethodHandle::set_vmtarget(oop mh, oop ref) {
assert(is_instance(mh), "MH only");
mh->obj_field_put(_vmtarget_offset, ref);
}
int sun_dyn_DirectMethodHandle::vmindex(oop mh) {
int java_lang_invoke_DirectMethodHandle::vmindex(oop mh) {
assert(is_instance(mh), "DMH only");
return mh->int_field(_vmindex_offset);
}
void sun_dyn_DirectMethodHandle::set_vmindex(oop mh, int index) {
void java_lang_invoke_DirectMethodHandle::set_vmindex(oop mh, int index) {
assert(is_instance(mh), "DMH only");
mh->int_field_put(_vmindex_offset, index);
}
int sun_dyn_BoundMethodHandle::vmargslot(oop mh) {
int java_lang_invoke_BoundMethodHandle::vmargslot(oop mh) {
assert(is_instance(mh), "BMH only");
return mh->int_field(_vmargslot_offset);
}
oop sun_dyn_BoundMethodHandle::argument(oop mh) {
oop java_lang_invoke_BoundMethodHandle::argument(oop mh) {
assert(is_instance(mh), "BMH only");
return mh->obj_field(_argument_offset);
}
int sun_dyn_AdapterMethodHandle::conversion(oop mh) {
int java_lang_invoke_AdapterMethodHandle::conversion(oop mh) {
assert(is_instance(mh), "AMH only");
return mh->int_field(_conversion_offset);
}
void sun_dyn_AdapterMethodHandle::set_conversion(oop mh, int conv) {
void java_lang_invoke_AdapterMethodHandle::set_conversion(oop mh, int conv) {
assert(is_instance(mh), "AMH only");
mh->int_field_put(_conversion_offset, conv);
}
// Support for java_dyn_MethodType
// Support for java_lang_invoke_MethodType
int java_dyn_MethodType::_rtype_offset;
int java_dyn_MethodType::_ptypes_offset;
int java_dyn_MethodType::_form_offset;
int java_lang_invoke_MethodType::_rtype_offset;
int java_lang_invoke_MethodType::_ptypes_offset;
int java_lang_invoke_MethodType::_form_offset;
void java_dyn_MethodType::compute_offsets() {
void java_lang_invoke_MethodType::compute_offsets() {
klassOop k = SystemDictionary::MethodType_klass();
if (k != NULL) {
compute_offset(_rtype_offset, k, vmSymbols::rtype_name(), vmSymbols::class_signature());
compute_offset(_ptypes_offset, k, vmSymbols::ptypes_name(), vmSymbols::class_array_signature());
compute_offset(_form_offset, k, vmSymbols::form_name(), vmSymbols::java_dyn_MethodTypeForm_signature());
compute_offset(_form_offset, k, vmSymbols::form_name(), vmSymbols::java_lang_invoke_MethodTypeForm_signature());
}
}
void java_dyn_MethodType::print_signature(oop mt, outputStream* st) {
void java_lang_invoke_MethodType::print_signature(oop mt, outputStream* st) {
st->print("(");
objArrayOop pts = ptypes(mt);
for (int i = 0, limit = pts->length(); i < limit; i++) {
@ -2429,7 +2453,7 @@ void java_dyn_MethodType::print_signature(oop mt, outputStream* st) {
java_lang_Class::print_signature(rtype(mt), st);
}
Symbol* java_dyn_MethodType::as_signature(oop mt, bool intern_if_not_found, TRAPS) {
Symbol* java_lang_invoke_MethodType::as_signature(oop mt, bool intern_if_not_found, TRAPS) {
ResourceMark rm;
stringStream buffer(128);
print_signature(mt, &buffer);
@ -2444,103 +2468,83 @@ Symbol* java_dyn_MethodType::as_signature(oop mt, bool intern_if_not_found, TRAP
return name;
}
oop java_dyn_MethodType::rtype(oop mt) {
oop java_lang_invoke_MethodType::rtype(oop mt) {
assert(is_instance(mt), "must be a MethodType");
return mt->obj_field(_rtype_offset);
}
objArrayOop java_dyn_MethodType::ptypes(oop mt) {
objArrayOop java_lang_invoke_MethodType::ptypes(oop mt) {
assert(is_instance(mt), "must be a MethodType");
return (objArrayOop) mt->obj_field(_ptypes_offset);
}
oop java_dyn_MethodType::form(oop mt) {
oop java_lang_invoke_MethodType::form(oop mt) {
assert(is_instance(mt), "must be a MethodType");
return mt->obj_field(_form_offset);
}
oop java_dyn_MethodType::ptype(oop mt, int idx) {
oop java_lang_invoke_MethodType::ptype(oop mt, int idx) {
return ptypes(mt)->obj_at(idx);
}
int java_dyn_MethodType::ptype_count(oop mt) {
int java_lang_invoke_MethodType::ptype_count(oop mt) {
return ptypes(mt)->length();
}
// Support for java_dyn_MethodTypeForm
// Support for java_lang_invoke_MethodTypeForm
int java_dyn_MethodTypeForm::_vmslots_offset;
int java_dyn_MethodTypeForm::_erasedType_offset;
int java_dyn_MethodTypeForm::_genericInvoker_offset;
int java_lang_invoke_MethodTypeForm::_vmslots_offset;
int java_lang_invoke_MethodTypeForm::_erasedType_offset;
int java_lang_invoke_MethodTypeForm::_genericInvoker_offset;
void java_dyn_MethodTypeForm::compute_offsets() {
void java_lang_invoke_MethodTypeForm::compute_offsets() {
klassOop k = SystemDictionary::MethodTypeForm_klass();
if (k != NULL) {
compute_optional_offset(_vmslots_offset, k, vmSymbols::vmslots_name(), vmSymbols::int_signature(), true);
compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_dyn_MethodType_signature(), true);
compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_lang_invoke_MethodType_signature(), true);
compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_lang_invoke_MethodHandle_signature(), true);
if (_genericInvoker_offset == 0) _genericInvoker_offset = -1; // set to explicit "empty" value
}
}
int java_dyn_MethodTypeForm::vmslots(oop mtform) {
int java_lang_invoke_MethodTypeForm::vmslots(oop mtform) {
assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
return mtform->int_field(_vmslots_offset);
}
oop java_dyn_MethodTypeForm::erasedType(oop mtform) {
oop java_lang_invoke_MethodTypeForm::erasedType(oop mtform) {
assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
return mtform->obj_field(_erasedType_offset);
}
oop java_dyn_MethodTypeForm::genericInvoker(oop mtform) {
oop java_lang_invoke_MethodTypeForm::genericInvoker(oop mtform) {
assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
return mtform->obj_field(_genericInvoker_offset);
}
// Support for java_dyn_CallSite
// Support for java_lang_invoke_CallSite
int java_dyn_CallSite::_target_offset;
int java_dyn_CallSite::_caller_method_offset;
int java_dyn_CallSite::_caller_bci_offset;
int java_lang_invoke_CallSite::_target_offset;
void java_dyn_CallSite::compute_offsets() {
void java_lang_invoke_CallSite::compute_offsets() {
if (!EnableInvokeDynamic) return;
klassOop k = SystemDictionary::CallSite_klass();
if (k != NULL) {
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature());
compute_offset(_caller_method_offset, k, vmSymbols::vmmethod_name(), vmSymbols::sun_dyn_MemberName_signature());
compute_offset(_caller_bci_offset, k, vmSymbols::vmindex_name(), vmSymbols::int_signature());
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature());
}
}
oop java_dyn_CallSite::target(oop site) {
oop java_lang_invoke_CallSite::target(oop site) {
return site->obj_field(_target_offset);
}
void java_dyn_CallSite::set_target(oop site, oop target) {
void java_lang_invoke_CallSite::set_target(oop site, oop target) {
site->obj_field_put(_target_offset, target);
}
oop java_dyn_CallSite::caller_method(oop site) {
return site->obj_field(_caller_method_offset);
}
void java_dyn_CallSite::set_caller_method(oop site, oop ref) {
site->obj_field_put(_caller_method_offset, ref);
}
jint java_dyn_CallSite::caller_bci(oop site) {
return site->int_field(_caller_bci_offset);
}
void java_dyn_CallSite::set_caller_bci(oop site, jint bci) {
site->int_field_put(_caller_bci_offset, bci);
}
// Support for java_security_AccessControlContext
@ -2877,16 +2881,16 @@ void JavaClasses::compute_offsets() {
java_lang_Thread::compute_offsets();
java_lang_ThreadGroup::compute_offsets();
if (EnableMethodHandles) {
java_dyn_MethodHandle::compute_offsets();
sun_dyn_MemberName::compute_offsets();
sun_dyn_DirectMethodHandle::compute_offsets();
sun_dyn_BoundMethodHandle::compute_offsets();
sun_dyn_AdapterMethodHandle::compute_offsets();
java_dyn_MethodType::compute_offsets();
java_dyn_MethodTypeForm::compute_offsets();
java_lang_invoke_MethodHandle::compute_offsets();
java_lang_invoke_MemberName::compute_offsets();
java_lang_invoke_DirectMethodHandle::compute_offsets();
java_lang_invoke_BoundMethodHandle::compute_offsets();
java_lang_invoke_AdapterMethodHandle::compute_offsets();
java_lang_invoke_MethodType::compute_offsets();
java_lang_invoke_MethodTypeForm::compute_offsets();
}
if (EnableInvokeDynamic) {
java_dyn_CallSite::compute_offsets();
java_lang_invoke_CallSite::compute_offsets();
}
java_security_AccessControlContext::compute_offsets();
// Initialize reflection classes. The layouts of these classes

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -794,11 +794,11 @@ class java_lang_ref_SoftReference: public java_lang_ref_Reference {
};
// Interface to java.dyn.MethodHandle objects
// Interface to java.lang.invoke.MethodHandle objects
class MethodHandleEntry;
class java_dyn_MethodHandle: AllStatic {
class java_lang_invoke_MethodHandle: AllStatic {
friend class JavaClasses;
private:
@ -839,7 +839,7 @@ class java_dyn_MethodHandle: AllStatic {
static int vmslots_offset_in_bytes() { return _vmslots_offset; }
};
class sun_dyn_DirectMethodHandle: public java_dyn_MethodHandle {
class java_lang_invoke_DirectMethodHandle: public java_lang_invoke_MethodHandle {
friend class JavaClasses;
private:
@ -864,7 +864,7 @@ class sun_dyn_DirectMethodHandle: public java_dyn_MethodHandle {
static int vmindex_offset_in_bytes() { return _vmindex_offset; }
};
class sun_dyn_BoundMethodHandle: public java_dyn_MethodHandle {
class java_lang_invoke_BoundMethodHandle: public java_lang_invoke_MethodHandle {
friend class JavaClasses;
private:
@ -891,7 +891,7 @@ public:
static int vmargslot_offset_in_bytes() { return _vmargslot_offset; }
};
class sun_dyn_AdapterMethodHandle: public sun_dyn_BoundMethodHandle {
class java_lang_invoke_AdapterMethodHandle: public java_lang_invoke_BoundMethodHandle {
friend class JavaClasses;
private:
@ -942,14 +942,14 @@ class sun_dyn_AdapterMethodHandle: public sun_dyn_BoundMethodHandle {
};
// Interface to sun.dyn.MemberName objects
// Interface to java.lang.invoke.MemberName objects
// (These are a private interface for Java code to query the class hierarchy.)
class sun_dyn_MemberName: AllStatic {
class java_lang_invoke_MemberName: AllStatic {
friend class JavaClasses;
private:
// From java.dyn.MemberName:
// From java.lang.invoke.MemberName:
// private Class<?> clazz; // class in which the method is defined
// private String name; // may be null if not yet materialized
// private Object type; // may be null if not yet materialized
@ -1018,9 +1018,9 @@ class sun_dyn_MemberName: AllStatic {
};
// Interface to java.dyn.MethodType objects
// Interface to java.lang.invoke.MethodType objects
class java_dyn_MethodType: AllStatic {
class java_lang_invoke_MethodType: AllStatic {
friend class JavaClasses;
private:
@ -1052,7 +1052,7 @@ class java_dyn_MethodType: AllStatic {
static int form_offset_in_bytes() { return _form_offset; }
};
class java_dyn_MethodTypeForm: AllStatic {
class java_lang_invoke_MethodTypeForm: AllStatic {
friend class JavaClasses;
private:
@ -1075,9 +1075,9 @@ class java_dyn_MethodTypeForm: AllStatic {
};
// Interface to java.dyn.CallSite objects
// Interface to java.lang.invoke.CallSite objects
class java_dyn_CallSite: AllStatic {
class java_lang_invoke_CallSite: AllStatic {
friend class JavaClasses;
private:

View File

@ -88,7 +88,7 @@ int SymbolTable::symbols_counted = 0;
void SymbolTable::unlink() {
int removed = 0;
int total = 0;
int memory_total = 0;
size_t memory_total = 0;
for (int i = 0; i < the_table()->table_size(); ++i) {
for (HashtableEntry<Symbol*>** p = the_table()->bucket_addr(i); *p != NULL; ) {
HashtableEntry<Symbol*>* entry = *p;
@ -112,8 +112,10 @@ void SymbolTable::unlink() {
}
symbols_removed += removed;
symbols_counted += total;
if (PrintGCDetails) {
gclog_or_tty->print(" [Symbols=%d size=%dK] ", total,
// Exclude printing for normal PrintGCDetails because people parse
// this output.
if (PrintGCDetails && Verbose && WizardMode) {
gclog_or_tty->print(" [Symbols=%d size=" SIZE_FORMAT "K] ", total,
(memory_total*HeapWordSize)/1024);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1887,27 +1887,99 @@ static const short wk_init_info[] = {
0
};
Symbol* SystemDictionary::find_backup_symbol(Symbol* symbol,
const char* from_prefix,
const char* to_prefix) {
assert(AllowTransitionalJSR292, ""); // delete this subroutine
Symbol* backup_symbol = NULL;
size_t from_len = strlen(from_prefix);
if (strncmp((const char*) symbol->base(), from_prefix, from_len) != 0)
return NULL;
char buf[100];
size_t to_len = strlen(to_prefix);
size_t tail_len = symbol->utf8_length() - from_len;
size_t new_len = to_len + tail_len;
guarantee(new_len < sizeof(buf), "buf too small");
memcpy(buf, to_prefix, to_len);
memcpy(buf + to_len, symbol->base() + from_len, tail_len);
buf[new_len] = '\0';
vmSymbols::SID backup_sid = vmSymbols::find_sid(buf);
if (backup_sid != vmSymbols::NO_SID) {
backup_symbol = vmSymbols::symbol_at(backup_sid);
}
return backup_symbol;
}
Symbol* SystemDictionary::find_backup_class_name(Symbol* symbol) {
assert(AllowTransitionalJSR292, ""); // delete this subroutine
if (symbol == NULL) return NULL;
Symbol* backup_symbol = find_backup_symbol(symbol, "java/lang/invoke/", "java/dyn/"); // AllowTransitionalJSR292 ONLY
if (backup_symbol == NULL)
backup_symbol = find_backup_symbol(symbol, "java/dyn/", "sun/dyn/"); // AllowTransitionalJSR292 ONLY
return backup_symbol;
}
Symbol* SystemDictionary::find_backup_signature(Symbol* symbol) {
assert(AllowTransitionalJSR292, ""); // delete this subroutine
if (symbol == NULL) return NULL;
return find_backup_symbol(symbol, "Ljava/lang/invoke/", "Ljava/dyn/");
}
bool SystemDictionary::initialize_wk_klass(WKID id, int init_opt, TRAPS) {
assert(id >= (int)FIRST_WKID && id < (int)WKID_LIMIT, "oob");
int info = wk_init_info[id - FIRST_WKID];
int sid = (info >> CEIL_LG_OPTION_LIMIT);
Symbol* symbol = vmSymbols::symbol_at((vmSymbols::SID)sid);
klassOop* klassp = &_well_known_klasses[id];
bool must_load = (init_opt < SystemDictionary::Opt);
bool try_load = true;
bool pre_load = (init_opt < SystemDictionary::Opt);
bool try_load = true;
if (init_opt == SystemDictionary::Opt_Kernel) {
#ifndef KERNEL
try_load = false;
#endif //KERNEL
}
if ((*klassp) == NULL && try_load) {
Symbol* backup_symbol = NULL; // symbol to try if the current symbol fails
if (init_opt == SystemDictionary::Pre_JSR292) {
if (!EnableMethodHandles) try_load = false; // do not bother to load such classes
if (AllowTransitionalJSR292) {
backup_symbol = find_backup_class_name(symbol);
if (try_load && PreferTransitionalJSR292) {
while (backup_symbol != NULL) {
(*klassp) = resolve_or_null(backup_symbol, CHECK_0); // try backup early
if (TraceMethodHandles) {
ResourceMark rm;
tty->print_cr("MethodHandles: try backup first for %s => %s (%s)",
symbol->as_C_string(), backup_symbol->as_C_string(),
((*klassp) == NULL) ? "no such class" : "backup load succeeded");
}
if ((*klassp) != NULL) return true;
backup_symbol = find_backup_class_name(backup_symbol); // find next backup
}
}
}
}
if ((*klassp) != NULL) return true;
if (!try_load) return false;
while (symbol != NULL) {
bool must_load = (pre_load && (backup_symbol == NULL));
if (must_load) {
(*klassp) = resolve_or_fail(symbol, true, CHECK_0); // load required class
} else {
(*klassp) = resolve_or_null(symbol, CHECK_0); // load optional klass
}
if ((*klassp) != NULL) return true;
// Go around again. Example of long backup sequence:
// java.lang.invoke.MemberName, java.dyn.MemberName, sun.dyn.MemberName, ONLY if AllowTransitionalJSR292
if (TraceMethodHandles && (backup_symbol != NULL)) {
ResourceMark rm;
tty->print_cr("MethodHandles: backup for %s => %s",
symbol->as_C_string(), backup_symbol->as_C_string());
}
symbol = backup_symbol;
if (AllowTransitionalJSR292)
backup_symbol = find_backup_class_name(symbol);
}
return ((*klassp) != NULL);
return false;
}
void SystemDictionary::initialize_wk_klasses_until(WKID limit_id, WKID &start_id, TRAPS) {
@ -2348,6 +2420,8 @@ methodOop SystemDictionary::find_method_handle_invoke(Symbol* name,
if (THREAD->is_Compiler_thread())
return NULL; // do not attempt from within compiler
bool for_invokeGeneric = (name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name));
if (AllowInvokeForInvokeGeneric && name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name))
for_invokeGeneric = true;
bool found_on_bcp = false;
Handle mt = find_method_handle_type(signature, accessing_klass,
for_invokeGeneric,
@ -2376,7 +2450,7 @@ methodOop SystemDictionary::find_method_handle_invoke(Symbol* name,
}
}
// Ask Java code to find or construct a java.dyn.MethodType for the given
// Ask Java code to find or construct a java.lang.invoke.MethodType for the given
// signature, as interpreted relative to the given class loader.
// Because of class loader constraints, all method handle usage must be
// consistent with this loader.
@ -2430,25 +2504,33 @@ Handle SystemDictionary::find_method_handle_type(Symbol* signature,
}
assert(arg == npts, "");
// call sun.dyn.MethodHandleNatives::findMethodType(Class rt, Class[] pts) -> MethodType
// call java.lang.invoke.MethodHandleNatives::findMethodType(Class rt, Class[] pts) -> MethodType
JavaCallArguments args(Handle(THREAD, rt()));
args.push_oop(pts());
JavaValue result(T_OBJECT);
Symbol* findMethodHandleType_signature = vmSymbols::findMethodHandleType_signature();
if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodType_klass()->name() == vmSymbols::java_dyn_MethodType()) {
findMethodHandleType_signature = vmSymbols::findMethodHandleType_TRANS_signature();
}
JavaCalls::call_static(&result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::findMethodHandleType_name(),
vmSymbols::findMethodHandleType_signature(),
findMethodHandleType_signature,
&args, CHECK_(empty));
Handle method_type(THREAD, (oop) result.get_jobject());
if (for_invokeGeneric) {
// call sun.dyn.MethodHandleNatives::notifyGenericMethodType(MethodType) -> void
// call java.lang.invoke.MethodHandleNatives::notifyGenericMethodType(MethodType) -> void
JavaCallArguments args(Handle(THREAD, method_type()));
JavaValue no_result(T_VOID);
Symbol* notifyGenericMethodType_signature = vmSymbols::notifyGenericMethodType_signature();
if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodType_klass()->name() == vmSymbols::java_dyn_MethodType()) {
notifyGenericMethodType_signature = vmSymbols::notifyGenericMethodType_TRANS_signature();
}
JavaCalls::call_static(&no_result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::notifyGenericMethodType_name(),
vmSymbols::notifyGenericMethodType_signature(),
notifyGenericMethodType_signature,
&args, THREAD);
if (HAS_PENDING_EXCEPTION) {
// If the notification fails, just kill it.
@ -2489,7 +2571,7 @@ Handle SystemDictionary::link_method_handle_constant(KlassHandle caller,
THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad signature", empty);
}
// call sun.dyn.MethodHandleNatives::linkMethodHandleConstant(Class caller, int refKind, Class callee, String name, Object type) -> MethodHandle
// call java.lang.invoke.MethodHandleNatives::linkMethodHandleConstant(Class caller, int refKind, Class callee, String name, Object type) -> MethodHandle
JavaCallArguments args;
args.push_oop(caller->java_mirror()); // the referring class
args.push_int(ref_kind);
@ -2497,15 +2579,19 @@ Handle SystemDictionary::link_method_handle_constant(KlassHandle caller,
args.push_oop(name());
args.push_oop(type());
JavaValue result(T_OBJECT);
Symbol* linkMethodHandleConstant_signature = vmSymbols::linkMethodHandleConstant_signature();
if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodHandle_klass()->name() == vmSymbols::java_dyn_MethodHandle()) {
linkMethodHandleConstant_signature = vmSymbols::linkMethodHandleConstant_TRANS_signature();
}
JavaCalls::call_static(&result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::linkMethodHandleConstant_name(),
vmSymbols::linkMethodHandleConstant_signature(),
linkMethodHandleConstant_signature,
&args, CHECK_(empty));
return Handle(THREAD, (oop) result.get_jobject());
}
// Ask Java code to find or construct a java.dyn.CallSite for the given
// Ask Java code to find or construct a java.lang.invoke.CallSite for the given
// name and signature, as interpreted relative to the given class loader.
Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method,
Symbol* name,
@ -2516,13 +2602,13 @@ Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method,
TRAPS) {
Handle empty;
guarantee(bootstrap_method.not_null() &&
java_dyn_MethodHandle::is_instance(bootstrap_method()),
java_lang_invoke_MethodHandle::is_instance(bootstrap_method()),
"caller must supply a valid BSM");
Handle caller_mname = MethodHandles::new_MemberName(CHECK_(empty));
MethodHandles::init_MemberName(caller_mname(), caller_method());
// call sun.dyn.MethodHandleNatives::makeDynamicCallSite(bootm, name, mtype, info, caller_mname, caller_pos)
// call java.lang.invoke.MethodHandleNatives::makeDynamicCallSite(bootm, name, mtype, info, caller_mname, caller_pos)
oop name_str_oop = StringTable::intern(name, CHECK_(empty)); // not a handle!
JavaCallArguments args(Handle(THREAD, bootstrap_method()));
args.push_oop(name_str_oop);
@ -2531,14 +2617,21 @@ Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method,
args.push_oop(caller_mname());
args.push_int(caller_bci);
JavaValue result(T_OBJECT);
Symbol* makeDynamicCallSite_signature = vmSymbols::makeDynamicCallSite_signature();
if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodHandleNatives_klass()->name() == vmSymbols::sun_dyn_MethodHandleNatives()) {
makeDynamicCallSite_signature = vmSymbols::makeDynamicCallSite_TRANS_signature();
}
if (AllowTransitionalJSR292 && SystemDictionaryHandles::MethodHandleNatives_klass()->name() == vmSymbols::java_dyn_MethodHandleNatives()) {
makeDynamicCallSite_signature = vmSymbols::makeDynamicCallSite_TRANS2_signature();
}
JavaCalls::call_static(&result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::makeDynamicCallSite_name(),
vmSymbols::makeDynamicCallSite_signature(),
makeDynamicCallSite_signature,
&args, CHECK_(empty));
oop call_site_oop = (oop) result.get_jobject();
assert(call_site_oop->is_oop()
/*&& java_dyn_CallSite::is_instance(call_site_oop)*/, "must be sane");
/*&& java_lang_invoke_CallSite::is_instance(call_site_oop)*/, "must be sane");
if (TraceMethodHandles) {
#ifndef PRODUCT
tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);

View File

@ -144,18 +144,18 @@ class SymbolPropertyTable;
template(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \
\
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
template(MethodHandle_klass, java_dyn_MethodHandle, Opt) \
template(MemberName_klass, sun_dyn_MemberName, Opt) \
template(MethodHandleImpl_klass, sun_dyn_MethodHandleImpl, Opt) \
template(MethodHandleNatives_klass, sun_dyn_MethodHandleNatives, Opt) \
template(AdapterMethodHandle_klass, sun_dyn_AdapterMethodHandle, Opt) \
template(BoundMethodHandle_klass, sun_dyn_BoundMethodHandle, Opt) \
template(DirectMethodHandle_klass, sun_dyn_DirectMethodHandle, Opt) \
template(MethodType_klass, java_dyn_MethodType, Opt) \
template(MethodTypeForm_klass, java_dyn_MethodTypeForm, Opt) \
template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \
template(Linkage_klass, java_dyn_Linkage, Opt) \
template(CallSite_klass, java_dyn_CallSite, Opt) \
template(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292) \
template(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292) \
template(MethodHandleImpl_klass, sun_dyn_MethodHandleImpl, Opt) /* AllowTransitionalJSR292 ONLY */ \
template(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292) \
template(AdapterMethodHandle_klass, java_lang_invoke_AdapterMethodHandle, Pre_JSR292) \
template(BoundMethodHandle_klass, java_lang_invoke_BoundMethodHandle, Pre_JSR292) \
template(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Pre_JSR292) \
template(MethodType_klass, java_lang_invoke_MethodType, Pre_JSR292) \
template(MethodTypeForm_klass, java_lang_invoke_MethodTypeForm, Pre_JSR292) \
template(WrongMethodTypeException_klass, java_lang_invoke_WrongMethodTypeException, Pre_JSR292) \
template(Linkage_klass, java_lang_invoke_Linkage, Opt) /* AllowTransitionalJSR292 ONLY */ \
template(CallSite_klass, java_lang_invoke_CallSite, Pre_JSR292) \
/* Note: MethodHandle must be first, and CallSite last in group */ \
\
template(StringBuffer_klass, java_lang_StringBuffer, Pre) \
@ -207,6 +207,7 @@ class SystemDictionary : AllStatic {
enum InitOption {
Pre, // preloaded; error if not present
Pre_JSR292, // preloaded if EnableMethodHandles
// Order is significant. Options before this point require resolve_or_fail.
// Options after this point will use resolve_or_null instead.
@ -401,6 +402,7 @@ public:
}
static klassOop check_klass_Pre(klassOop k) { return check_klass(k); }
static klassOop check_klass_Pre_JSR292(klassOop k) { return EnableInvokeDynamic ? check_klass(k) : k; }
static klassOop check_klass_Opt(klassOop k) { return k; }
static klassOop check_klass_Opt_Kernel(klassOop k) { return k; } //== Opt
static klassOop check_klass_Opt_Only_JDK15(klassOop k) {
@ -420,6 +422,8 @@ public:
initialize_wk_klasses_until((WKID) limit, start_id, THREAD);
}
static Symbol* find_backup_symbol(Symbol* symbol, const char* from_prefix, const char* to_prefix);
public:
#define WK_KLASS_DECLARE(name, ignore_symbol, option) \
static klassOop name() { return check_klass_##option(_well_known_klasses[WK_KLASS_ENUM_NAME(name)]); }
@ -441,6 +445,9 @@ public:
static void load_abstract_ownable_synchronizer_klass(TRAPS);
static Symbol* find_backup_class_name(Symbol* class_name_symbol);
static Symbol* find_backup_signature(Symbol* signature_symbol);
private:
// Tells whether ClassLoader.loadClassInternal is present
static bool has_loadClassInternal() { return _has_loadClassInternal; }
@ -475,18 +482,18 @@ public:
Handle loader2, bool is_method, TRAPS);
// JSR 292
// find the java.dyn.MethodHandles::invoke method for a given signature
// find the java.lang.invoke.MethodHandles::invoke method for a given signature
static methodOop find_method_handle_invoke(Symbol* name,
Symbol* signature,
KlassHandle accessing_klass,
TRAPS);
// ask Java to compute a java.dyn.MethodType object for a given signature
// ask Java to compute a java.lang.invoke.MethodType object for a given signature
static Handle find_method_handle_type(Symbol* signature,
KlassHandle accessing_klass,
bool for_invokeGeneric,
bool& return_bcp_flag,
TRAPS);
// ask Java to compute a java.dyn.MethodHandle object for a given CP entry
// ask Java to compute a java.lang.invoke.MethodHandle object for a given CP entry
static Handle link_method_handle_constant(KlassHandle caller,
int ref_kind, //e.g., JVM_REF_invokeVirtual
KlassHandle callee,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1671,13 +1671,19 @@ void ClassVerifier::verify_ldc(
VerificationType::long_type(),
VerificationType::long2_type(), CHECK_VERIFY(this));
} else if (tag.is_method_handle()) {
Symbol* methodHandle_name = vmSymbols::java_lang_invoke_MethodHandle();
if (AllowTransitionalJSR292 && !Universe::is_bootstrapping())
methodHandle_name = SystemDictionaryHandles::MethodHandle_klass()->name();
current_frame->push_stack(
VerificationType::reference_type(
vmSymbols::java_dyn_MethodHandle()), CHECK_VERIFY(this));
methodHandle_name), CHECK_VERIFY(this));
} else if (tag.is_method_type()) {
Symbol* methodType_name = vmSymbols::java_lang_invoke_MethodType();
if (AllowTransitionalJSR292 && !Universe::is_bootstrapping())
methodType_name = SystemDictionaryHandles::MethodType_klass()->name();
current_frame->push_stack(
VerificationType::reference_type(
vmSymbols::java_dyn_MethodType()), CHECK_VERIFY(this));
methodType_name), CHECK_VERIFY(this));
} else {
verify_error(bci, "Invalid index in ldc");
return;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -277,6 +277,12 @@ vmSymbols::SID vmSymbols::find_sid(Symbol* symbol) {
return sid;
}
vmSymbols::SID vmSymbols::find_sid(const char* symbol_name) {
Symbol* symbol = SymbolTable::probe(symbol_name, (int) strlen(symbol_name));
if (symbol == NULL) return NO_SID;
return find_sid(symbol);
}
static vmIntrinsics::ID wrapper_intrinsic(BasicType type, bool unboxing) {
#define TYPE2(type, unboxing) ((int)(type)*2 + ((unboxing) ? 1 : 0))
switch (TYPE2(type, unboxing)) {

View File

@ -229,33 +229,60 @@
template(base_name, "base") \
\
/* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \
template(java_dyn_InvokeDynamic, "java/dyn/InvokeDynamic") \
template(java_dyn_Linkage, "java/dyn/Linkage") \
template(java_dyn_CallSite, "java/dyn/CallSite") \
template(java_dyn_MethodHandle, "java/dyn/MethodHandle") \
template(java_dyn_MethodType, "java/dyn/MethodType") \
template(java_dyn_WrongMethodTypeException, "java/dyn/WrongMethodTypeException") \
template(java_dyn_MethodType_signature, "Ljava/dyn/MethodType;") \
template(java_dyn_MethodHandle_signature, "Ljava/dyn/MethodHandle;") \
template(java_lang_invoke_InvokeDynamic, "java/lang/invoke/InvokeDynamic") \
template(java_lang_invoke_Linkage, "java/lang/invoke/Linkage") \
template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \
template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \
template(java_lang_invoke_MethodType, "java/lang/invoke/MethodType") \
template(java_lang_invoke_WrongMethodTypeException, "java/lang/invoke/WrongMethodTypeException") \
template(java_lang_invoke_MethodType_signature, "Ljava/lang/invoke/MethodType;") \
template(java_lang_invoke_MethodHandle_signature, "Ljava/lang/invoke/MethodHandle;") \
/* internal classes known only to the JVM: */ \
template(java_dyn_MethodTypeForm, "java/dyn/MethodTypeForm") \
template(java_dyn_MethodTypeForm_signature, "Ljava/dyn/MethodTypeForm;") \
template(sun_dyn_MemberName, "sun/dyn/MemberName") \
template(sun_dyn_MemberName_signature, "Lsun/dyn/MemberName;") \
template(sun_dyn_MethodHandleImpl, "sun/dyn/MethodHandleImpl") \
template(sun_dyn_MethodHandleNatives, "sun/dyn/MethodHandleNatives") \
template(sun_dyn_AdapterMethodHandle, "sun/dyn/AdapterMethodHandle") \
template(sun_dyn_BoundMethodHandle, "sun/dyn/BoundMethodHandle") \
template(sun_dyn_DirectMethodHandle, "sun/dyn/DirectMethodHandle") \
/* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */ \
template(java_lang_invoke_MethodTypeForm, "java/lang/invoke/MethodTypeForm") \
template(java_lang_invoke_MethodTypeForm_signature, "Ljava/lang/invoke/MethodTypeForm;") \
template(java_lang_invoke_MemberName, "java/lang/invoke/MemberName") \
template(java_lang_invoke_MethodHandleNatives, "java/lang/invoke/MethodHandleNatives") \
template(java_lang_invoke_AdapterMethodHandle, "java/lang/invoke/AdapterMethodHandle") \
template(java_lang_invoke_BoundMethodHandle, "java/lang/invoke/BoundMethodHandle") \
template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \
/* temporary transitional public names from 6839872: */ \
template(java_dyn_InvokeDynamic, "java/dyn/InvokeDynamic") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_Linkage, "java/dyn/Linkage") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_CallSite, "java/dyn/CallSite") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodHandle, "java/dyn/MethodHandle") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodType, "java/dyn/MethodType") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_WrongMethodTypeException, "java/dyn/WrongMethodTypeException") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodType_signature, "Ljava/dyn/MethodType;") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodHandle_signature, "Ljava/dyn/MethodHandle;") /* AllowTransitionalJSR292 ONLY */ \
/* temporary transitional internal names from 6839872: */ \
template(java_dyn_MethodTypeForm, "java/dyn/MethodTypeForm") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodTypeForm_signature, "Ljava/dyn/MethodTypeForm;") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MemberName, "java/dyn/MemberName") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_MethodHandleNatives, "java/dyn/MethodHandleNatives") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_AdapterMethodHandle, "java/dyn/AdapterMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_BoundMethodHandle, "java/dyn/BoundMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
template(java_dyn_DirectMethodHandle, "java/dyn/DirectMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
/* temporary transitional internal names from EDR: */ \
template(sun_dyn_MemberName, "sun/dyn/MemberName") /* AllowTransitionalJSR292 ONLY */ \
template(sun_dyn_MethodHandleImpl, "sun/dyn/MethodHandleImpl") /* AllowTransitionalJSR292 ONLY */ \
template(sun_dyn_MethodHandleNatives, "sun/dyn/MethodHandleNatives") /* AllowTransitionalJSR292 ONLY */ \
template(sun_dyn_AdapterMethodHandle, "sun/dyn/AdapterMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
template(sun_dyn_BoundMethodHandle, "sun/dyn/BoundMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
template(sun_dyn_DirectMethodHandle, "sun/dyn/DirectMethodHandle") /* AllowTransitionalJSR292 ONLY */ \
/* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \
template(findMethodHandleType_name, "findMethodHandleType") \
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
template(findMethodHandleType_TRANS_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") /* AllowTransitionalJSR292 ONLY */ \
template(notifyGenericMethodType_name, "notifyGenericMethodType") \
template(notifyGenericMethodType_signature, "(Ljava/dyn/MethodType;)V") \
template(notifyGenericMethodType_signature, "(Ljava/lang/invoke/MethodType;)V") \
template(notifyGenericMethodType_TRANS_signature, "(Ljava/dyn/MethodType;)V") /* AllowTransitionalJSR292 ONLY */ \
template(linkMethodHandleConstant_name, "linkMethodHandleConstant") \
template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") \
template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/lang/invoke/MethodHandle;") \
template(linkMethodHandleConstant_TRANS_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") /* AllowTransitionalJSR292 ONLY */ \
template(makeDynamicCallSite_name, "makeDynamicCallSite") \
template(makeDynamicCallSite_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") \
template(makeDynamicCallSite_signature, "(Ljava/lang/invoke/MethodHandle;Ljava/lang/String;Ljava/lang/invoke/MethodType;Ljava/lang/Object;Ljava/lang/invoke/MemberName;I)Ljava/lang/invoke/CallSite;") \
template(makeDynamicCallSite_TRANS_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") /* AllowTransitionalJSR292 ONLY */ \
template(makeDynamicCallSite_TRANS2_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Ljava/dyn/MemberName;I)Ljava/dyn/CallSite;") /* AllowTransitionalJSR292 ONLY */ \
NOT_LP64( do_alias(machine_word_signature, int_signature) ) \
LP64_ONLY( do_alias(machine_word_signature, long_signature) ) \
\
@ -882,13 +909,15 @@
\
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_object_array_object_signature, F_R) \
/* (symbols invoke_name and invoke_signature defined above) */ \
do_intrinsic(_checkSpreadArgument, sun_dyn_MethodHandleImpl, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \
do_intrinsic(_checkSpreadArgument, java_lang_invoke_MethodHandleNatives, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \
do_intrinsic(_checkSpreadArgument_TRANS,sun_dyn_MethodHandleImpl, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) /* AllowTransitionalJSR292 ONLY */ \
do_intrinsic(_checkSpreadArgument_TRANS2,java_dyn_MethodHandleNatives, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) /* AllowTransitionalJSR292 ONLY */ \
do_name( checkSpreadArgument_name, "checkSpreadArgument") \
do_name( checkSpreadArgument_signature, "(Ljava/lang/Object;I)V") \
do_intrinsic(_invokeExact, java_dyn_MethodHandle, invokeExact_name, object_array_object_signature, F_RN) \
do_intrinsic(_invokeGeneric, java_dyn_MethodHandle, invokeGeneric_name, object_array_object_signature, F_RN) \
do_intrinsic(_invokeVarargs, java_dyn_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R) \
do_intrinsic(_invokeDynamic, java_dyn_InvokeDynamic, star_name, object_array_object_signature, F_SN) \
do_intrinsic(_invokeExact, java_lang_invoke_MethodHandle, invokeExact_name, object_array_object_signature, F_RN) \
do_intrinsic(_invokeGeneric, java_lang_invoke_MethodHandle, invokeGeneric_name, object_array_object_signature, F_RN) \
do_intrinsic(_invokeVarargs, java_lang_invoke_MethodHandle, invokeVarargs_name, object_array_object_signature, F_R) \
do_intrinsic(_invokeDynamic, java_lang_invoke_InvokeDynamic, star_name, object_array_object_signature, F_SN) \
\
/* unboxing methods: */ \
do_intrinsic(_booleanValue, java_lang_Boolean, booleanValue_name, void_boolean_signature, F_R) \
@ -995,6 +1024,7 @@ class vmSymbols: AllStatic {
// Returns symbol's SID if one is assigned, else NO_SID.
static SID find_sid(Symbol* symbol);
static SID find_sid(const char* symbol_name);
#ifndef PRODUCT
// No need for this in the product:

View File

@ -939,9 +939,16 @@ void CodeCache::print_bounds(outputStream* st) {
_heap->high(),
_heap->high_boundary());
st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
" adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT
" adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT "Kb"
" largest_free_block=" SIZE_FORMAT,
CodeCache::nof_blobs(), CodeCache::nof_nmethods(),
CodeCache::nof_adapters(), CodeCache::unallocated_capacity(),
CodeCache::largest_free_block());
nof_blobs(), nof_nmethods(), nof_adapters(),
unallocated_capacity()/K, largest_free_block());
}
void CodeCache::log_state(outputStream* st) {
st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'"
" largest_free_block='" SIZE_FORMAT "'",
nof_blobs(), nof_nmethods(), nof_adapters(),
unallocated_capacity(), largest_free_block());
}

View File

@ -147,6 +147,7 @@ class CodeCache : AllStatic {
static void verify(); // verifies the code cache
static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
static void print_bounds(outputStream* st); // Prints a summary of the bounds of the code cache
static void log_state(outputStream* st);
// The full limits of the codeCache
static address low_bound() { return (address) _heap->low_boundary(); }
@ -159,7 +160,7 @@ class CodeCache : AllStatic {
static size_t max_capacity() { return _heap->max_capacity(); }
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
static size_t largest_free_block() { return _heap->largest_free_block(); }
static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
static bool needs_flushing() { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; }
static bool needs_cache_clean() { return _needs_cache_clean; }
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }

View File

@ -170,7 +170,7 @@ struct nmethod_stats_struct {
int pc_desc_resets; // number of resets (= number of caches)
int pc_desc_queries; // queries to nmethod::find_pc_desc
int pc_desc_approx; // number of those which have approximate true
int pc_desc_repeats; // number of _last_pc_desc hits
int pc_desc_repeats; // number of _pc_descs[0] hits
int pc_desc_hits; // number of LRU cache hits
int pc_desc_tests; // total number of PcDesc examinations
int pc_desc_searches; // total number of quasi-binary search steps
@ -278,40 +278,44 @@ static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
if (initial_pc_desc == NULL) {
_last_pc_desc = NULL; // native method
_pc_descs[0] = NULL; // native method; no PcDescs at all
return;
}
NOT_PRODUCT(++nmethod_stats.pc_desc_resets);
// reset the cache by filling it with benign (non-null) values
assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
_last_pc_desc = initial_pc_desc + 1; // first valid one is after sentinel
for (int i = 0; i < cache_size; i++)
_pc_descs[i] = initial_pc_desc;
}
PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
NOT_PRODUCT(++nmethod_stats.pc_desc_queries);
NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx);
// Note: one might think that caching the most recently
// read value separately would be a win, but one would be
// wrong. When many threads are updating it, the cache
// line it's in would bounce between caches, negating
// any benefit.
// In order to prevent race conditions do not load cache elements
// repeatedly, but use a local copy:
PcDesc* res;
// Step one: Check the most recently returned value.
res = _last_pc_desc;
if (res == NULL) return NULL; // native method; no PcDescs at all
// Step one: Check the most recently added value.
res = _pc_descs[0];
if (res == NULL) return NULL; // native method; no PcDescs at all
if (match_desc(res, pc_offset, approximate)) {
NOT_PRODUCT(++nmethod_stats.pc_desc_repeats);
return res;
}
// Step two: Check the LRU cache.
for (int i = 0; i < cache_size; i++) {
// Step two: Check the rest of the LRU cache.
for (int i = 1; i < cache_size; ++i) {
res = _pc_descs[i];
if (res->pc_offset() < 0) break; // optimization: skip empty cache
if (res->pc_offset() < 0) break; // optimization: skip empty cache
if (match_desc(res, pc_offset, approximate)) {
NOT_PRODUCT(++nmethod_stats.pc_desc_hits);
_last_pc_desc = res; // record this cache hit in case of repeat
return res;
}
}
@ -322,24 +326,23 @@ PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
NOT_PRODUCT(++nmethod_stats.pc_desc_adds);
// Update the LRU cache by shifting pc_desc forward:
// Update the LRU cache by shifting pc_desc forward.
for (int i = 0; i < cache_size; i++) {
PcDesc* next = _pc_descs[i];
_pc_descs[i] = pc_desc;
pc_desc = next;
}
// Note: Do not update _last_pc_desc. It fronts for the LRU cache.
}
// adjust pcs_size so that it is a multiple of both oopSize and
// sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
// of oopSize, then 2*sizeof(PcDesc) is)
static int adjust_pcs_size(int pcs_size) {
static int adjust_pcs_size(int pcs_size) {
int nsize = round_to(pcs_size, oopSize);
if ((nsize % sizeof(PcDesc)) != 0) {
nsize = pcs_size + sizeof(PcDesc);
}
assert((nsize % oopSize) == 0, "correct alignment");
assert((nsize % oopSize) == 0, "correct alignment");
return nsize;
}
@ -762,7 +765,7 @@ nmethod::nmethod(
void* nmethod::operator new(size_t size, int nmethod_size) {
// Always leave some room in the CodeCache for I2C/C2I adapters
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) return NULL;
if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) return NULL;
return CodeCache::allocate(nmethod_size);
}
@ -1180,14 +1183,17 @@ void nmethod::mark_as_seen_on_stack() {
set_stack_traversal_mark(NMethodSweeper::traversal_count());
}
// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
// Tell if a non-entrant method can be converted to a zombie (i.e.,
// there are no activations on the stack, not in use by the VM,
// and not in use by the ServiceThread)
bool nmethod::can_not_entrant_be_converted() {
assert(is_not_entrant(), "must be a non-entrant method");
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
// count can be greater than the stack traversal count before it hits the
// nmethod for the second time.
return stack_traversal_mark()+1 < NMethodSweeper::traversal_count();
return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
!is_locked_by_vm();
}
void nmethod::inc_decompile_count() {
@ -1294,6 +1300,7 @@ void nmethod::log_state_change() const {
// Common functionality for both make_not_entrant and make_zombie
bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
assert(!is_zombie(), "should not already be a zombie");
// Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
nmethodLocker nml(this);
@ -1301,11 +1308,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
No_Safepoint_Verifier nsv;
{
// If the method is already zombie there is nothing to do
if (is_zombie()) {
return false;
}
// invalidate osr nmethod before acquiring the patching lock since
// they both acquire leaf locks and we don't want a deadlock.
// This logic is equivalent to the logic below for patching the
@ -1375,13 +1377,12 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
flush_dependencies(NULL);
}
{
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
// and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading).
Pause_No_Safepoint_Verifier pnsv(&nsv);
post_compiled_method_unload();
}
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
// event and it hasn't already been reported for this nmethod then
// report it now. The event may have been reported earilier if the GC
// marked it for unloading). JvmtiDeferredEventQueue support means
// we no longer go to a safepoint here.
post_compiled_method_unload();
#ifdef ASSERT
// It's no longer safe to access the oops section since zombie
@ -1566,7 +1567,7 @@ void nmethod::post_compiled_method_unload() {
if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
assert(!unload_reported(), "already unloaded");
JvmtiDeferredEvent event =
JvmtiDeferredEvent::compiled_method_unload_event(
JvmtiDeferredEvent::compiled_method_unload_event(this,
_jmethod_id, insts_begin());
if (SafepointSynchronize::is_at_safepoint()) {
// Don't want to take the queueing lock. Add it as pending and
@ -1881,7 +1882,7 @@ void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map
oop nmethod::embeddedOop_at(u_char* p) {
RelocIterator iter(this, p, p + oopSize);
RelocIterator iter(this, p, p + 1);
while (iter.next())
if (iter.type() == relocInfo::oop_type) {
return iter.oop_reloc()->oop_value();
@ -2171,10 +2172,12 @@ nmethodLocker::nmethodLocker(address pc) {
lock_nmethod(_nm);
}
void nmethodLocker::lock_nmethod(nmethod* nm) {
// Only JvmtiDeferredEvent::compiled_method_unload_event()
// should pass zombie_ok == true.
void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
if (nm == NULL) return;
Atomic::inc(&nm->_lock_count);
guarantee(!nm->is_zombie(), "cannot lock a zombie method");
guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
}
void nmethodLocker::unlock_nmethod(nmethod* nm) {

View File

@ -69,14 +69,13 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
enum { cache_size = 4 };
PcDesc* _last_pc_desc; // most recent pc_desc found
PcDesc* _pc_descs[cache_size]; // last cache_size pc_descs found
public:
PcDescCache() { debug_only(_last_pc_desc = NULL); }
PcDescCache() { debug_only(_pc_descs[0] = NULL); }
void reset_to(PcDesc* initial_pc_desc);
PcDesc* find_pc_desc(int pc_offset, bool approximate);
void add_pc_desc(PcDesc* pc_desc);
PcDesc* last_pc_desc() { return _last_pc_desc; }
PcDesc* last_pc_desc() { return _pc_descs[0]; }
};
@ -178,7 +177,7 @@ class nmethod : public CodeBlob {
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
// Protected by Patching_lock
unsigned char _state; // {alive, not_entrant, zombie, unloaded)
unsigned char _state; // {alive, not_entrant, zombie, unloaded}
#ifdef ASSERT
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
@ -194,7 +193,10 @@ class nmethod : public CodeBlob {
NOT_PRODUCT(bool _has_debug_info; )
// Nmethod Flushing lock (if non-zero, then the nmethod is not removed)
// Nmethod Flushing lock. If non-zero, then the nmethod is not removed
// and is not made into a zombie. However, once the nmethod is made into
// a zombie, it will be locked one final time if CompiledMethodUnload
// event processing needs to be done.
jint _lock_count;
// not_entrant method removal. Each mark_sweep pass will update
@ -522,8 +524,9 @@ public:
void flush();
public:
// If returning true, it is unsafe to remove this nmethod even though it is a zombie
// nmethod, since the VM might have a reference to it. Should only be called from a safepoint.
// When true is returned, it is unsafe to remove this nmethod even if
// it is a zombie, since the VM or the ServiceThread might still be
// using it.
bool is_locked_by_vm() const { return _lock_count >0; }
// See comment at definition of _last_seen_on_stack
@ -689,13 +692,20 @@ public:
};
// Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method
// Locks an nmethod so its code will not get removed and it will not
// be made into a zombie, even if it is a not_entrant method. After the
// nmethod becomes a zombie, if CompiledMethodUnload event processing
// needs to be done, then lock_nmethod() is used directly to keep the
// generated code from being reused too early.
class nmethodLocker : public StackObj {
nmethod* _nm;
public:
static void lock_nmethod(nmethod* nm); // note: nm can be NULL
// note: nm can be NULL
// Only JvmtiDeferredEvent::compiled_method_unload_event()
// should pass zombie_ok == true.
static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
static void unlock_nmethod(nmethod* nm); // (ditto)
nmethodLocker(address pc); // derive nm from pc

View File

@ -1364,7 +1364,7 @@ void CompileBroker::compiler_thread_loop() {
// We need this HandleMark to avoid leaking VM handles.
HandleMark hm(thread);
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) {
// the code cache is really full
handle_full_code_cache();
} else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
@ -1645,11 +1645,13 @@ void CompileBroker::handle_full_code_cache() {
if (UseCompiler || AlwaysCompileLoopMethods ) {
if (xtty != NULL) {
xtty->begin_elem("code_cache_full");
CodeCache::log_state(xtty);
xtty->stamp();
xtty->end_elem();
}
warning("CodeCache is full. Compiler has been disabled.");
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
CodeCache::print_bounds(tty);
#ifndef PRODUCT
if (CompileTheWorld || ExitOnFullCodeCache) {
before_exit(JavaThread::current());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -292,13 +292,15 @@ void ConcurrentMarkSweepGeneration::ref_processor_init() {
void CMSCollector::ref_processor_init() {
if (_ref_processor == NULL) {
// Allocate and initialize a reference processor
_ref_processor = ReferenceProcessor::create_ref_processor(
_span, // span
_cmsGen->refs_discovery_is_atomic(), // atomic_discovery
_cmsGen->refs_discovery_is_mt(), // mt_discovery
&_is_alive_closure,
ParallelGCThreads,
ParallelRefProcEnabled);
_ref_processor =
new ReferenceProcessor(_span, // span
(ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
(int) ParallelGCThreads, // mt processing degree
_cmsGen->refs_discovery_is_mt(), // mt discovery
(int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
_cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
&_is_alive_closure, // closure for liveness info
false); // next field updates do not need write barrier
// Initialize the _ref_processor field of CMSGen
_cmsGen->set_ref_processor(_ref_processor);
@ -641,7 +643,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
}
// Support for multi-threaded concurrent phases
if (CollectedHeap::use_parallel_gc_threads() && CMSConcurrentMTEnabled) {
if (CMSConcurrentMTEnabled) {
if (FLAG_IS_DEFAULT(ConcGCThreads)) {
// just for now
FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
@ -1689,6 +1691,8 @@ void CMSCollector::request_full_gc(unsigned int full_gc_count) {
MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
_full_gc_requested = true;
CGC_lock->notify(); // nudge CMS thread
} else {
assert(gc_count > full_gc_count, "Error: causal loop");
}
}
@ -1988,17 +1992,16 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
// Temporarily widen the span of the weak reference processing to
// the entire heap.
MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
ReferenceProcessorSpanMutator x(ref_processor(), new_span);
ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
// Temporarily, clear the "is_alive_non_header" field of the
// reference processor.
ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
// Temporarily make reference _processing_ single threaded (non-MT).
ReferenceProcessorMTProcMutator z(ref_processor(), false);
ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
// Temporarily make refs discovery atomic
ReferenceProcessorAtomicMutator w(ref_processor(), true);
ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
// Temporarily make reference _discovery_ single threaded (non-MT)
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
ref_processor()->set_enqueuing_is_done(false);
ref_processor()->enable_discovery();
@ -4263,9 +4266,7 @@ bool CMSCollector::do_marking_mt(bool asynch) {
// Refs discovery is already non-atomic.
assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
// Mutate the Refs discovery so it is MT during the
// multi-threaded marking phase.
ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
conc_workers()->start_task(&tsk);
while (tsk.yielded()) {
@ -4318,6 +4319,8 @@ bool CMSCollector::do_marking_st(bool asynch) {
ResourceMark rm;
HandleMark hm;
// Temporarily make refs discovery single threaded (non-MT)
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
&_markStack, &_revisitStack, CMSYield && asynch);
// the last argument to iterate indicates whether the iteration
@ -4356,10 +4359,6 @@ void CMSCollector::preclean() {
verify_overflow_empty();
_abort_preclean = false;
if (CMSPrecleaningEnabled) {
// Precleaning is currently not MT but the reference processor
// may be set for MT. Disable it temporarily here.
ReferenceProcessor* rp = ref_processor();
ReferenceProcessorMTProcMutator z(rp, false);
_eden_chunk_index = 0;
size_t used = get_eden_used();
size_t capacity = get_eden_capacity();
@ -4502,11 +4501,16 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
_collectorState == AbortablePreclean, "incorrect state");
ResourceMark rm;
HandleMark hm;
// Precleaning is currently not MT but the reference processor
// may be set for MT. Disable it temporarily here.
ReferenceProcessor* rp = ref_processor();
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
// Do one pass of scrubbing the discovered reference lists
// to remove any reference objects with strongly-reachable
// referents.
if (clean_refs) {
ReferenceProcessor* rp = ref_processor();
CMSPrecleanRefsYieldClosure yield_cl(this);
assert(rp->span().equals(_span), "Spans should be equal");
CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
@ -5576,8 +5580,10 @@ void CMSCollector::do_remark_parallel() {
// in the multi-threaded case, but we special-case n=1 here to get
// repeatable measurements of the 1-thread overhead of the parallel code.
if (n_workers > 1) {
// Make refs discovery MT-safe
ReferenceProcessorMTMutator mt(ref_processor(), true);
// Make refs discovery MT-safe, if it isn't already: it may not
// necessarily be so, since it's possible that we are doing
// ST marking.
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
GenCollectedHeap::StrongRootsScope srs(gch);
workers->run_task(&tsk);
} else {
@ -5703,14 +5709,19 @@ public:
CMSBitMap* mark_bit_map,
AbstractWorkGang* workers,
OopTaskQueueSet* task_queues):
// XXX Should superclass AGTWOQ also know about AWG since it knows
// about the task_queues used by the AWG? Then it could initialize
// the terminator() object. See 6984287. The set_for_termination()
// below is a temporary band-aid for the regression in 6984287.
AbstractGangTaskWOopQueues("Process referents by policy in parallel",
task_queues),
_task(task),
_collector(collector), _span(span), _mark_bit_map(mark_bit_map)
{
assert(_collector->_span.equals(_span) && !_span.is_empty(),
"Inconsistency in _span");
}
{
assert(_collector->_span.equals(_span) && !_span.is_empty(),
"Inconsistency in _span");
set_for_termination(workers->active_workers());
}
OopTaskQueueSet* task_queues() { return queues(); }
@ -5872,8 +5883,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
// That is OK as long as the Reference lists are balanced (see
// balance_all_queues() and balance_queues()).
rp->set_mt_degree(ParallelGCThreads);
rp->set_active_mt_degree(ParallelGCThreads);
CMSRefProcTaskExecutor task_executor(*this);
rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1133,7 +1133,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// rare that the cost of the CAS's involved is in the
// noise. That's a measurement that should be done, and
// the code simplified if that turns out to be the case.
return false;
return ConcGCThreads > 1;
}
// Override

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,7 +51,7 @@ int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
volatile jint ConcurrentMarkSweepThread::_pending_decrements = 0;
volatile bool ConcurrentMarkSweepThread::_icms_enabled = false;
volatile jint ConcurrentMarkSweepThread::_icms_disabled = 0;
volatile bool ConcurrentMarkSweepThread::_should_run = false;
// When icms is enabled, the icms thread is stopped until explicitly
// started.
@ -84,7 +84,7 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
}
}
_sltMonitor = SLT_lock;
set_icms_enabled(CMSIncrementalMode);
assert(!CMSIncrementalMode || icms_is_enabled(), "Error");
}
void ConcurrentMarkSweepThread::run() {
@ -341,11 +341,11 @@ void ConcurrentMarkSweepThread::stop_icms() {
void ConcurrentMarkSweepThread::icms_wait() {
assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
if (_should_stop && icms_enabled()) {
if (_should_stop && icms_is_enabled()) {
MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
trace_state("pause_icms");
_collector->stats().stop_cms_timer();
while(!_should_run && icms_enabled()) {
while(!_should_run && icms_is_enabled()) {
iCMS_lock->wait(Mutex::_no_safepoint_check_flag);
}
_collector->stats().start_cms_timer();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@
class ConcurrentMarkSweepGeneration;
class CMSCollector;
// The Concurrent Mark Sweep GC Thread (could be several in the future).
// The Concurrent Mark Sweep GC Thread
class ConcurrentMarkSweepThread: public ConcurrentGCThread {
friend class VMStructs;
friend class ConcurrentMarkSweepGeneration; // XXX should remove friendship
@ -55,8 +55,6 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
static SurrogateLockerThread::SLT_msg_type _sltBuffer;
static Monitor* _sltMonitor;
ConcurrentMarkSweepThread* _next;
static bool _should_terminate;
enum CMS_flag_type {
@ -84,7 +82,7 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
// Tracing messages, enabled by CMSTraceThreadState.
static inline void trace_state(const char* desc);
static volatile bool _icms_enabled; // iCMS enabled?
static volatile int _icms_disabled; // a counter to track #iCMS disable & enable
static volatile bool _should_run; // iCMS may run
static volatile bool _should_stop; // iCMS should stop
@ -214,10 +212,25 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
// Incremental mode is enabled globally by the flag CMSIncrementalMode. It
// must also be enabled/disabled dynamically to allow foreground collections.
static inline void enable_icms() { _icms_enabled = true; }
static inline void disable_icms() { _icms_enabled = false; }
static inline void set_icms_enabled(bool val) { _icms_enabled = val; }
static inline bool icms_enabled() { return _icms_enabled; }
#define ICMS_ENABLING_ASSERT \
assert((CMSIncrementalMode && _icms_disabled >= 0) || \
(!CMSIncrementalMode && _icms_disabled <= 0), "Error")
static inline void enable_icms() {
ICMS_ENABLING_ASSERT;
Atomic::dec(&_icms_disabled);
}
static inline void disable_icms() {
ICMS_ENABLING_ASSERT;
Atomic::inc(&_icms_disabled);
}
static inline bool icms_is_disabled() {
ICMS_ENABLING_ASSERT;
return _icms_disabled > 0;
}
static inline bool icms_is_enabled() {
return !icms_is_disabled();
}
};
inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -192,14 +192,18 @@ void VM_GenCollectFullConcurrent::doit() {
"total_collections() should be monotonically increasing");
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
if (gch->total_full_collections() == _full_gc_count_before) {
// Disable iCMS until the full collection is done.
// Disable iCMS until the full collection is done, and
// remember that we did so.
CMSCollector::disable_icms();
_disabled_icms = true;
// In case CMS thread was in icms_wait(), wake it up.
CMSCollector::start_icms();
// Nudge the CMS thread to start a concurrent collection.
CMSCollector::request_full_gc(_full_gc_count_before);
} else {
assert(_full_gc_count_before < gch->total_full_collections(), "Error");
FullGCCount_lock->notify_all(); // Inform the Java thread its work is done
}
}
@ -259,6 +263,8 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
}
}
// Enable iCMS back.
CMSCollector::enable_icms();
// Enable iCMS back if we disabled it earlier.
if (_disabled_icms) {
CMSCollector::enable_icms();
}
}

View File

@ -128,11 +128,14 @@ class VM_CMS_Final_Remark: public VM_CMS_Operation {
// VM operation to invoke a concurrent collection of the heap as a
// GenCollectedHeap heap.
class VM_GenCollectFullConcurrent: public VM_GC_Operation {
bool _disabled_icms;
public:
VM_GenCollectFullConcurrent(unsigned int gc_count_before,
unsigned int full_gc_count_before,
GCCause::Cause gc_cause)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */) {
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
_disabled_icms(false)
{
assert(FullGCCount_lock != NULL, "Error");
assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
}

View File

@ -373,7 +373,7 @@ void ConcurrentG1Refine::clean_up_cache(int worker_i,
// RSet updating while within an evacuation pause.
// In this case worker_i should be the id of a GC worker thread
assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "incorrect worker id");
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
into_cset_dcq->enqueue(entry);
}
}

View File

@ -1828,7 +1828,7 @@ void ConcurrentMark::completeCleanup() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
_cleanup_list.verify_optional();
FreeRegionList local_free_list("Local Cleanup List");
FreeRegionList tmp_free_list("Tmp Free List");
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
@ -1842,7 +1842,7 @@ void ConcurrentMark::completeCleanup() {
HeapRegion* hr = _cleanup_list.remove_head();
assert(hr != NULL, "the list was not empty");
hr->rem_set()->clear();
local_free_list.add_as_tail(hr);
tmp_free_list.add_as_tail(hr);
// Instead of adding one region at a time to the secondary_free_list,
// we accumulate them in the local list and move them a few at a
@ -1850,20 +1850,20 @@ void ConcurrentMark::completeCleanup() {
// we do during this process. We'll also append the local list when
// _cleanup_list is empty (which means we just removed the last
// region from the _cleanup_list).
if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
_cleanup_list.is_empty()) {
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
"appending "SIZE_FORMAT" entries to the "
"secondary_free_list, clean list still has "
SIZE_FORMAT" entries",
local_free_list.length(),
tmp_free_list.length(),
_cleanup_list.length());
}
{
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
g1h->secondary_free_list_add_as_tail(&local_free_list);
g1h->secondary_free_list_add_as_tail(&tmp_free_list);
SecondaryFreeList_lock->notify_all();
}
@ -1874,7 +1874,7 @@ void ConcurrentMark::completeCleanup() {
}
}
}
assert(local_free_list.is_empty(), "post-condition");
assert(tmp_free_list.is_empty(), "post-condition");
}
// Support closures for reference procssing in G1
@ -2141,21 +2141,22 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
G1CMDrainMarkingStackClosure
g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
// We use the work gang from the G1CollectedHeap and we utilize all
// the worker threads.
int active_workers = MAX2(MIN2(g1h->workers()->total_workers(), (int)_max_task_num), 1);
int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
g1h->workers(), active_workers);
if (rp->processing_is_mt()) {
// Set the degree of MT here. If the discovery is done MT, there
// may have been a different number of threads doing the discovery
// and a different number of discovered lists may have Ref objects.
// That is OK as long as the Reference lists are balanced (see
// balance_all_queues() and balance_queues()).
rp->set_mt_degree(active_workers);
rp->set_active_mt_degree(active_workers);
rp->process_discovered_references(&g1_is_alive,
&g1_keep_alive,
@ -3182,7 +3183,7 @@ public:
template <class T> void do_oop_work(T* p) {
assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
assert(!_g1h->is_on_free_list(
assert(!_g1h->is_on_master_free_list(
_g1h->heap_region_containing((HeapWord*) p)), "invariant");
oop obj = oopDesc::load_decode_heap_oop(p);
@ -3403,7 +3404,7 @@ void CMTask::deal_with_reference(oop obj) {
void CMTask::push(oop obj) {
HeapWord* objAddr = (HeapWord*) obj;
assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
assert(!_g1h->is_on_free_list(
assert(!_g1h->is_on_master_free_list(
_g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
assert(!_g1h->is_obj_ill(obj), "invariant");
assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
@ -3649,7 +3650,7 @@ void CMTask::drain_local_queue(bool partially) {
(void*) obj);
assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
assert(!_g1h->is_on_free_list(
assert(!_g1h->is_on_master_free_list(
_g1h->heap_region_containing((HeapWord*) obj)), "invariant");
scan_object(obj);

View File

@ -237,9 +237,9 @@ void ConcurrentMarkThread::run() {
// The following will finish freeing up any regions that we
// found to be empty during cleanup. We'll do this part
// without joining the suspendible set. If an evacuation pause
// takes places, then we would carry on freeing regions in
// takes place, then we would carry on freeing regions in
// case they are needed by the pause. If a Full GC takes
// places, it would wait for us to process the regions
// place, it would wait for us to process the regions
// reclaimed by cleanup.
double cleanup_start_sec = os::elapsedTime();

View File

@ -479,7 +479,7 @@ G1CollectedHeap* G1CollectedHeap::_g1h;
// Private methods.
HeapRegion*
G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) {
G1CollectedHeap::new_region_try_secondary_free_list() {
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
while (!_secondary_free_list.is_empty() || free_regions_coming()) {
if (!_secondary_free_list.is_empty()) {
@ -531,7 +531,7 @@ HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
"forced to look at the secondary_free_list");
}
res = new_region_try_secondary_free_list(word_size);
res = new_region_try_secondary_free_list();
if (res != NULL) {
return res;
}
@ -543,7 +543,7 @@ HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
"res == NULL, trying the secondary_free_list");
}
res = new_region_try_secondary_free_list(word_size);
res = new_region_try_secondary_free_list();
}
if (res == NULL && do_expand) {
if (expand(word_size * HeapWordSize)) {
@ -579,6 +579,9 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
size_t word_size) {
assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
int first = -1;
if (num_regions == 1) {
// Only one region to allocate, no need to go through the slower
@ -600,7 +603,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
// request. If we are only allocating one region we use the common
// region allocation code (see above).
wait_while_free_regions_coming();
append_secondary_free_list_if_not_empty();
append_secondary_free_list_if_not_empty_with_lock();
if (free_regions() >= num_regions) {
first = _hrs->find_contiguous(num_regions);
@ -608,7 +611,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
for (int i = first; i < first + (int) num_regions; ++i) {
HeapRegion* hr = _hrs->at(i);
assert(hr->is_empty(), "sanity");
assert(is_on_free_list(hr), "sanity");
assert(is_on_master_free_list(hr), "sanity");
hr->set_pending_removal(true);
}
_free_list.remove_all_pending(num_regions);
@ -618,6 +621,126 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
return first;
}
HeapWord*
G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
size_t num_regions,
size_t word_size) {
assert(first != -1, "pre-condition");
assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
// Index of last region in the series + 1.
int last = first + (int) num_regions;
// We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with
// refinement threads refining cards on these regions and
// potentially wanting to refine the BOT as they are scanning
// those cards (this can happen shortly after a cleanup; see CR
// 6991377). So we have to set up the region(s) carefully and in
// a specific order.
// The word size sum of all the regions we will allocate.
size_t word_size_sum = num_regions * HeapRegion::GrainWords;
assert(word_size <= word_size_sum, "sanity");
// This will be the "starts humongous" region.
HeapRegion* first_hr = _hrs->at(first);
// The header of the new object will be placed at the bottom of
// the first region.
HeapWord* new_obj = first_hr->bottom();
// This will be the new end of the first region in the series that
// should also match the end of the last region in the seriers.
HeapWord* new_end = new_obj + word_size_sum;
// This will be the new top of the first region that will reflect
// this allocation.
HeapWord* new_top = new_obj + word_size;
// First, we need to zero the header of the space that we will be
// allocating. When we update top further down, some refinement
// threads might try to scan the region. By zeroing the header we
// ensure that any thread that will try to scan the region will
// come across the zero klass word and bail out.
//
// NOTE: It would not have been correct to have used
// CollectedHeap::fill_with_object() and make the space look like
// an int array. The thread that is doing the allocation will
// later update the object header to a potentially different array
// type and, for a very short period of time, the klass and length
// fields will be inconsistent. This could cause a refinement
// thread to calculate the object size incorrectly.
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
// We will set up the first region as "starts humongous". This
// will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the
// first region.
first_hr->set_startsHumongous(new_top, new_end);
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion* hr = NULL;
for (int i = first + 1; i < last; ++i) {
hr = _hrs->at(i);
hr->set_continuesHumongous(first_hr);
}
// If we have "continues humongous" regions (hr != NULL), then the
// end of the last one should match new_end.
assert(hr == NULL || hr->end() == new_end, "sanity");
// Up to this point no concurrent thread would have been able to
// do any scanning on any region in this series. All the top
// fields still point to bottom, so the intersection between
// [bottom,top] and [card_start,card_end] will be empty. Before we
// update the top fields, we'll do a storestore to make sure that
// no thread sees the update to top before the zeroing of the
// object header and the BOT initialization.
OrderAccess::storestore();
// Now that the BOT and the object header have been initialized,
// we can update top of the "starts humongous" region.
assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
"new_top should be in this region");
first_hr->set_top(new_top);
// Now, we will update the top fields of the "continues humongous"
// regions. The reason we need to do this is that, otherwise,
// these regions would look empty and this will confuse parts of
// G1. For example, the code that looks for a consecutive number
// of empty regions will consider them empty and try to
// re-allocate them. We can extend is_empty() to also include
// !continuesHumongous(), but it is easier to just update the top
// fields here. The way we set top for all regions (i.e., top ==
// end for all regions but the last one, top == new_top for the
// last one) is actually used when we will free up the humongous
// region in free_humongous_region().
hr = NULL;
for (int i = first + 1; i < last; ++i) {
hr = _hrs->at(i);
if ((i + 1) == last) {
// last continues humongous region
assert(hr->bottom() < new_top && new_top <= hr->end(),
"new_top should fall on this region");
hr->set_top(new_top);
} else {
// not last one
assert(new_top > hr->end(), "new_top should be above this region");
hr->set_top(hr->end());
}
}
// If we have continues humongous regions (hr != NULL), then the
// end of the last one should match new_end and its top should
// match new_top.
assert(hr == NULL ||
(hr->end() == new_end && hr->top() == new_top), "sanity");
assert(first_hr->used() == word_size * HeapWordSize, "invariant");
_summary_bytes_used += first_hr->used();
_humongous_set.add(first_hr);
return new_obj;
}
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
@ -653,121 +776,16 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
}
}
HeapWord* result = NULL;
if (first != -1) {
// Index of last region in the series + 1.
int last = first + (int) num_regions;
// We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with
// refinement threads refining cards on these regions and
// potentially wanting to refine the BOT as they are scanning
// those cards (this can happen shortly after a cleanup; see CR
// 6991377). So we have to set up the region(s) carefully and in
// a specific order.
// The word size sum of all the regions we will allocate.
size_t word_size_sum = num_regions * HeapRegion::GrainWords;
assert(word_size <= word_size_sum, "sanity");
// This will be the "starts humongous" region.
HeapRegion* first_hr = _hrs->at(first);
// The header of the new object will be placed at the bottom of
// the first region.
HeapWord* new_obj = first_hr->bottom();
// This will be the new end of the first region in the series that
// should also match the end of the last region in the seriers.
HeapWord* new_end = new_obj + word_size_sum;
// This will be the new top of the first region that will reflect
// this allocation.
HeapWord* new_top = new_obj + word_size;
// First, we need to zero the header of the space that we will be
// allocating. When we update top further down, some refinement
// threads might try to scan the region. By zeroing the header we
// ensure that any thread that will try to scan the region will
// come across the zero klass word and bail out.
//
// NOTE: It would not have been correct to have used
// CollectedHeap::fill_with_object() and make the space look like
// an int array. The thread that is doing the allocation will
// later update the object header to a potentially different array
// type and, for a very short period of time, the klass and length
// fields will be inconsistent. This could cause a refinement
// thread to calculate the object size incorrectly.
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
// We will set up the first region as "starts humongous". This
// will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the
// first region.
first_hr->set_startsHumongous(new_top, new_end);
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion* hr = NULL;
for (int i = first + 1; i < last; ++i) {
hr = _hrs->at(i);
hr->set_continuesHumongous(first_hr);
}
// If we have "continues humongous" regions (hr != NULL), then the
// end of the last one should match new_end.
assert(hr == NULL || hr->end() == new_end, "sanity");
// Up to this point no concurrent thread would have been able to
// do any scanning on any region in this series. All the top
// fields still point to bottom, so the intersection between
// [bottom,top] and [card_start,card_end] will be empty. Before we
// update the top fields, we'll do a storestore to make sure that
// no thread sees the update to top before the zeroing of the
// object header and the BOT initialization.
OrderAccess::storestore();
// Now that the BOT and the object header have been initialized,
// we can update top of the "starts humongous" region.
assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
"new_top should be in this region");
first_hr->set_top(new_top);
// Now, we will update the top fields of the "continues humongous"
// regions. The reason we need to do this is that, otherwise,
// these regions would look empty and this will confuse parts of
// G1. For example, the code that looks for a consecutive number
// of empty regions will consider them empty and try to
// re-allocate them. We can extend is_empty() to also include
// !continuesHumongous(), but it is easier to just update the top
// fields here. The way we set top for all regions (i.e., top ==
// end for all regions but the last one, top == new_top for the
// last one) is actually used when we will free up the humongous
// region in free_humongous_region().
hr = NULL;
for (int i = first + 1; i < last; ++i) {
hr = _hrs->at(i);
if ((i + 1) == last) {
// last continues humongous region
assert(hr->bottom() < new_top && new_top <= hr->end(),
"new_top should fall on this region");
hr->set_top(new_top);
} else {
// not last one
assert(new_top > hr->end(), "new_top should be above this region");
hr->set_top(hr->end());
}
}
// If we have continues humongous regions (hr != NULL), then the
// end of the last one should match new_end and its top should
// match new_top.
assert(hr == NULL ||
(hr->end() == new_end && hr->top() == new_top), "sanity");
assert(first_hr->used() == word_size * HeapWordSize, "invariant");
_summary_bytes_used += first_hr->used();
_humongous_set.add(first_hr);
return new_obj;
result =
humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
assert(result != NULL, "it should always return a valid result");
}
verify_region_sets_optional();
return NULL;
return result;
}
void
@ -1389,7 +1407,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
g1_policy()->record_full_collection_start();
wait_while_free_regions_coming();
append_secondary_free_list_if_not_empty();
append_secondary_free_list_if_not_empty_with_lock();
gc_prologue(true);
increment_total_collections(true /* full gc */);
@ -1444,7 +1462,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
// how reference processing currently works in G1.
// Temporarily make reference _discovery_ single threaded (non-MT).
ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
// Temporarily make refs discovery atomic
ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
@ -2201,16 +2219,16 @@ void G1CollectedHeap::ref_processing_init() {
SharedHeap::ref_processing_init();
MemRegion mr = reserved_region();
_ref_processor = ReferenceProcessor::create_ref_processor(
mr, // span
false, // Reference discovery is not atomic
true, // mt_discovery
&_is_alive_closure, // is alive closure
// for efficiency
ParallelGCThreads,
ParallelRefProcEnabled,
true); // Setting next fields of discovered
// lists requires a barrier.
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // degree of mt processing
ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery
(int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
false, // Reference discovery is not atomic
&_is_alive_closure, // is alive closure for efficiency
true); // Setting next fields of discovered
// lists requires a barrier.
}
size_t G1CollectedHeap::capacity() const {
@ -3377,15 +3395,14 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TraceMemoryManagerStats tms(false /* fullGC */);
// If there are any free regions available on the secondary_free_list
// make sure we append them to the free_list. However, we don't
// have to wait for the rest of the cleanup operation to
// finish. If it's still going on that's OK. If we run out of
// regions, the region allocation code will check the
// secondary_free_list and potentially wait if more free regions
// are coming (see new_region_try_secondary_free_list()).
// If the secondary_free_list is not empty, append it to the
// free_list. No need to wait for the cleanup operation to finish;
// the region allocation code will check the secondary_free_list
// and wait if necessary. If the G1StressConcRegionFreeing flag is
// set, skip this step so that the region allocation code has to
// get entries from the secondary_free_list.
if (!G1StressConcRegionFreeing) {
append_secondary_free_list_if_not_empty();
append_secondary_free_list_if_not_empty_with_lock();
}
increment_gc_time_stamp();
@ -5199,7 +5216,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
size_t rs_lengths = 0;
while (cur != NULL) {
assert(!is_on_free_list(cur), "sanity");
assert(!is_on_master_free_list(cur), "sanity");
if (non_young) {
if (cur->is_young()) {
@ -5543,13 +5560,10 @@ void G1CollectedHeap::verify_region_sets() {
return;
}
{
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
// Make sure we append the secondary_free_list on the free_list so
// that all free regions we will come across can be safely
// attributed to the free_list.
append_secondary_free_list();
}
// Make sure we append the secondary_free_list on the free_list so
// that all free regions we will come across can be safely
// attributed to the free_list.
append_secondary_free_list_if_not_empty_with_lock();
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.

View File

@ -56,7 +56,6 @@ class HeapRegionRemSetIterator;
class ConcurrentMark;
class ConcurrentMarkThread;
class ConcurrentG1Refine;
class ConcurrentZFThread;
typedef OverflowTaskQueue<StarTask> RefToScanQueue;
typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
@ -64,12 +63,6 @@ typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
enum G1GCThreadGroups {
G1CRGroup = 0,
G1ZFGroup = 1,
G1CMGroup = 2
};
enum GCAllocPurpose {
GCAllocForTenured,
GCAllocForSurvived,
@ -294,9 +287,9 @@ private:
// These are macros so that, if the assert fires, we get the correct
// line number, file, etc.
#define heap_locking_asserts_err_msg(__extra_message) \
#define heap_locking_asserts_err_msg(_extra_message_) \
err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
(__extra_message), \
(_extra_message_), \
BOOL_TO_STR(Heap_lock->owned_by_self()), \
BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
BOOL_TO_STR(Thread::current()->is_VM_thread()))
@ -307,11 +300,11 @@ private:
heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
} while (0)
#define assert_heap_locked_or_at_safepoint(__should_be_vm_thread) \
#define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
do { \
assert(Heap_lock->owned_by_self() || \
(SafepointSynchronize::is_at_safepoint() && \
((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \
((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
"should be at a safepoint")); \
} while (0)
@ -338,10 +331,10 @@ private:
"should not be at a safepoint")); \
} while (0)
#define assert_at_safepoint(__should_be_vm_thread) \
#define assert_at_safepoint(_should_be_vm_thread_) \
do { \
assert(SafepointSynchronize::is_at_safepoint() && \
((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \
((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
heap_locking_asserts_err_msg("should be at a safepoint")); \
} while (0)
@ -371,35 +364,40 @@ protected:
// will check whether there's anything available in the
// secondary_free_list and/or wait for more regions to appear in that
// list, if _free_regions_coming is set.
HeapRegion* new_region_try_secondary_free_list(size_t word_size);
HeapRegion* new_region_try_secondary_free_list();
// It will try to allocate a single non-humongous HeapRegion
// sufficient for an allocation of the given word_size. If
// do_expand is true, it will attempt to expand the heap if
// necessary to satisfy the allocation request. Note that word_size
// is only used to make sure that we expand sufficiently but, given
// that the allocation request is assumed not to be humongous,
// having word_size is not strictly necessary (expanding by a single
// region will always be sufficient). But let's keep that parameter
// in case we need it in the future.
// Try to allocate a single non-humongous HeapRegion sufficient for
// an allocation of the given word_size. If do_expand is true,
// attempt to expand the heap if necessary to satisfy the allocation
// request.
HeapRegion* new_region_work(size_t word_size, bool do_expand);
// It will try to allocate a new region to be used for allocation by
// mutator threads. It will not try to expand the heap if not region
// is available.
// Try to allocate a new region to be used for allocation by a
// mutator thread. Attempt to expand the heap if no region is
// available.
HeapRegion* new_alloc_region(size_t word_size) {
return new_region_work(word_size, false /* do_expand */);
}
// It will try to allocate a new region to be used for allocation by
// a GC thread. It will try to expand the heap if no region is
// available.
// Try to allocate a new region to be used for allocation by a GC
// thread. Attempt to expand the heap if no region is available.
HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
// Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the
// index of the first region or -1 if the search was unsuccessful.
int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
// Attempt to allocate an object of the given (very large) "word_size".
// Returns "NULL" on failure.
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
// humongous region.
HeapWord* humongous_obj_allocate_initialize_regions(int first,
size_t num_regions,
size_t word_size);
// Attempt to allocate a humongous object of the given size. Return
// NULL if unsuccessful.
HeapWord* humongous_obj_allocate(size_t word_size);
// The following two methods, allocate_new_tlab() and
@ -776,7 +774,7 @@ protected:
// Invoke "save_marks" on all heap regions.
void save_marks();
// It frees a non-humongous region by initializing its contents and
// Frees a non-humongous region by initializing its contents and
// adding it to the free list that's passed as a parameter (this is
// usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in
@ -787,13 +785,13 @@ protected:
FreeRegionList* free_list,
bool par);
// It frees a humongous region by collapsing it into individual
// regions and calling free_region() for each of them. The freed
// regions will be added to the free list that's passed as a parameter
// (this is usually a local list which will be appended to the
// master free list later). The used bytes of freed regions are
// accumulated in pre_used. If par is true, the region's RSet will
// not be freed up. The assumption is that this will be done later.
// Frees a humongous region by collapsing it into individual regions
// and calling free_region() for each of them. The freed regions
// will be added to the free list that's passed as a parameter (this
// is usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in
// pre_used. If par is true, the region's RSet will not be freed
// up. The assumption is that this will be done later.
void free_humongous_region(HeapRegion* hr,
size_t* pre_used,
FreeRegionList* free_list,
@ -1046,13 +1044,13 @@ public:
#endif // HEAP_REGION_SET_FORCE_VERIFY
#ifdef ASSERT
bool is_on_free_list(HeapRegion* hr) {
bool is_on_master_free_list(HeapRegion* hr) {
return hr->containing_set() == &_free_list;
}
bool is_on_humongous_set(HeapRegion* hr) {
bool is_in_humongous_set(HeapRegion* hr) {
return hr->containing_set() == &_humongous_set;
}
}
#endif // ASSERT
// Wrapper for the region list operations that can be called from
@ -1066,7 +1064,9 @@ public:
_free_list.add_as_tail(&_secondary_free_list);
}
void append_secondary_free_list_if_not_empty() {
void append_secondary_free_list_if_not_empty_with_lock() {
// If the secondary free list looks empty there's no reason to
// take the lock and then try to append it.
if (!_secondary_free_list.is_empty()) {
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
append_secondary_free_list();

View File

@ -81,6 +81,57 @@ static double non_young_other_cost_per_region_ms_defaults[] = {
// </NEW PREDICTION>
// Help class for avoiding interleaved logging
class LineBuffer: public StackObj {
private:
static const int BUFFER_LEN = 1024;
static const int INDENT_CHARS = 3;
char _buffer[BUFFER_LEN];
int _indent_level;
int _cur;
void vappend(const char* format, va_list ap) {
int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
if (res != -1) {
_cur += res;
} else {
DEBUG_ONLY(warning("buffer too small in LineBuffer");)
_buffer[BUFFER_LEN -1] = 0;
_cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
}
}
public:
explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
_buffer[_cur] = ' ';
}
}
#ifndef PRODUCT
~LineBuffer() {
assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
}
#endif
void append(const char* format, ...) {
va_list ap;
va_start(ap, format);
vappend(format, ap);
va_end(ap);
}
void append_and_print_cr(const char* format, ...) {
va_list ap;
va_start(ap, format);
vappend(format, ap);
va_end(ap);
gclog_or_tty->print_cr("%s", _buffer);
_cur = _indent_level * INDENT_CHARS;
}
};
G1CollectorPolicy::G1CollectorPolicy() :
_parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
? ParallelGCThreads : 1),
@ -1016,10 +1067,8 @@ void G1CollectorPolicy::print_par_stats(int level,
bool summary) {
double min = data[0], max = data[0];
double total = 0.0;
int j;
for (j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print("[%s (ms):", str);
LineBuffer buf(level);
buf.append("[%s (ms):", str);
for (uint i = 0; i < ParallelGCThreads; ++i) {
double val = data[i];
if (val < min)
@ -1027,18 +1076,16 @@ void G1CollectorPolicy::print_par_stats(int level,
if (val > max)
max = val;
total += val;
gclog_or_tty->print(" %3.1lf", val);
buf.append(" %3.1lf", val);
}
if (summary) {
gclog_or_tty->print_cr("");
buf.append_and_print_cr("");
double avg = total / (double) ParallelGCThreads;
gclog_or_tty->print(" ");
for (j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf",
buf.append(" ");
buf.append("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf",
avg, min, max);
}
gclog_or_tty->print_cr("]");
buf.append_and_print_cr("]");
}
void G1CollectorPolicy::print_par_sizes(int level,
@ -1047,10 +1094,8 @@ void G1CollectorPolicy::print_par_sizes(int level,
bool summary) {
double min = data[0], max = data[0];
double total = 0.0;
int j;
for (j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print("[%s :", str);
LineBuffer buf(level);
buf.append("[%s :", str);
for (uint i = 0; i < ParallelGCThreads; ++i) {
double val = data[i];
if (val < min)
@ -1058,34 +1103,28 @@ void G1CollectorPolicy::print_par_sizes(int level,
if (val > max)
max = val;
total += val;
gclog_or_tty->print(" %d", (int) val);
buf.append(" %d", (int) val);
}
if (summary) {
gclog_or_tty->print_cr("");
buf.append_and_print_cr("");
double avg = total / (double) ParallelGCThreads;
gclog_or_tty->print(" ");
for (j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print("Sum: %d, Avg: %d, Min: %d, Max: %d",
buf.append(" ");
buf.append("Sum: %d, Avg: %d, Min: %d, Max: %d",
(int)total, (int)avg, (int)min, (int)max);
}
gclog_or_tty->print_cr("]");
buf.append_and_print_cr("]");
}
void G1CollectorPolicy::print_stats (int level,
const char* str,
double value) {
for (int j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print_cr("[%s: %5.1lf ms]", str, value);
LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
}
void G1CollectorPolicy::print_stats (int level,
const char* str,
int value) {
for (int j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print_cr("[%s: %d]", str, value);
LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
}
double G1CollectorPolicy::avg_value (double* data) {
@ -2060,17 +2099,11 @@ void G1CollectorPolicy::count_CS_bytes_used() {
_g1->collection_set_iterate(&cs_closure);
}
static void print_indent(int level) {
for (int j = 0; j < level+1; ++j)
gclog_or_tty->print(" ");
}
void G1CollectorPolicy::print_summary (int level,
const char* str,
NumberSeq* seq) const {
double sum = seq->sum();
print_indent(level);
gclog_or_tty->print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
str, sum / 1000.0, seq->avg());
}
@ -2078,8 +2111,7 @@ void G1CollectorPolicy::print_summary_sd (int level,
const char* str,
NumberSeq* seq) const {
print_summary(level, str, seq);
print_indent(level + 5);
gclog_or_tty->print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
seq->num(), seq->sd(), seq->maximum());
}
@ -2087,6 +2119,7 @@ void G1CollectorPolicy::check_other_times(int level,
NumberSeq* other_times_ms,
NumberSeq* calc_other_times_ms) const {
bool should_print = false;
LineBuffer buf(level + 2);
double max_sum = MAX2(fabs(other_times_ms->sum()),
fabs(calc_other_times_ms->sum()));
@ -2095,8 +2128,7 @@ void G1CollectorPolicy::check_other_times(int level,
double sum_ratio = max_sum / min_sum;
if (sum_ratio > 1.1) {
should_print = true;
print_indent(level + 1);
gclog_or_tty->print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
}
double max_avg = MAX2(fabs(other_times_ms->avg()),
@ -2106,30 +2138,25 @@ void G1CollectorPolicy::check_other_times(int level,
double avg_ratio = max_avg / min_avg;
if (avg_ratio > 1.1) {
should_print = true;
print_indent(level + 1);
gclog_or_tty->print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
}
if (other_times_ms->sum() < -0.01) {
print_indent(level + 1);
gclog_or_tty->print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
}
if (other_times_ms->avg() < -0.01) {
print_indent(level + 1);
gclog_or_tty->print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
}
if (calc_other_times_ms->sum() < -0.01) {
should_print = true;
print_indent(level + 1);
gclog_or_tty->print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
}
if (calc_other_times_ms->avg() < -0.01) {
should_print = true;
print_indent(level + 1);
gclog_or_tty->print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
}
if (should_print)
@ -2210,10 +2237,9 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
}
}
} else {
print_indent(0);
gclog_or_tty->print_cr("none");
LineBuffer(1).append_and_print_cr("none");
}
gclog_or_tty->print_cr("");
LineBuffer(0).append_and_print_cr("");
}
void G1CollectorPolicy::print_tracing_info() const {
@ -2532,7 +2558,7 @@ public:
jint regions_added = parKnownGarbageCl.marked_regions_added();
_hrSorted->incNumMarkedHeapRegions(regions_added);
if (G1PrintParCleanupStats) {
gclog_or_tty->print(" Thread %d called %d times, added %d regions to list.\n",
gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
i, parKnownGarbageCl.invokes(), regions_added);
}
}

View File

@ -185,22 +185,22 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
ModRefBarrierSet* _mrbs;
CompactPoint _cp;
size_t _pre_used;
FreeRegionList _free_list;
HumongousRegionSet _humongous_proxy_set;
void free_humongous_region(HeapRegion* hr) {
HeapWord* end = hr->end();
size_t dummy_pre_used;
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
assert(hr->startsHumongous(),
"Only the start of a humongous region should be freed.");
_g1h->free_humongous_region(hr, &_pre_used, &_free_list,
_g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list,
&_humongous_proxy_set, false /* par */);
// Do we also need to do this for the continues humongous regions
// we just collapsed?
hr->prepare_for_compaction(&_cp);
// Also clear the part of the card table that will be unused after
// compaction.
_mrbs->clear(MemRegion(hr->compaction_top(), end));
dummy_free_list.remove_all();
}
public:
@ -208,8 +208,6 @@ public:
: _g1h(G1CollectedHeap::heap()),
_mrbs(G1CollectedHeap::heap()->mr_bs()),
_cp(NULL, cs, cs->initialize_threshold()),
_pre_used(0),
_free_list("Local Free List for G1MarkSweep"),
_humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
void update_sets() {
@ -219,7 +217,6 @@ public:
NULL, /* free_list */
&_humongous_proxy_set,
false /* par */);
_free_list.remove_all();
}
bool doHeapRegion(HeapRegion* hr) {

View File

@ -86,28 +86,6 @@ public:
bool idempotent() { return true; }
};
class IntoCSRegionClosure: public HeapRegionClosure {
IntoCSOopClosure _blk;
G1CollectedHeap* _g1;
public:
IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
_g1(g1), _blk(g1, blk) {}
bool doHeapRegion(HeapRegion* r) {
if (!r->in_collection_set()) {
_blk.set_region(r);
if (r->isHumongous()) {
if (r->startsHumongous()) {
oop obj = oop(r->bottom());
obj->oop_iterate(&_blk);
}
} else {
r->oop_before_save_marks_iterate(&_blk);
}
}
return false;
}
};
class VerifyRSCleanCardOopClosure: public OopClosure {
G1CollectedHeap* _g1;
public:
@ -329,7 +307,7 @@ public:
// is during RSet updating within an evacuation pause.
// In this case worker_i should be the id of a GC worker thread.
assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "should be a GC worker");
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
// 'card_ptr' contains references that point into the collection

View File

@ -53,8 +53,8 @@ class HeapRegion;
class HeapRegionSetBase;
#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \
(__hr)->top(), (__hr)->end()
#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \
(_hr_)->top(), (_hr_)->end()
// A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap
@ -518,13 +518,13 @@ class HeapRegion: public G1OffsetTableContigSpace {
containing_set, _containing_set));
_containing_set = containing_set;
}
}
HeapRegionSetBase* containing_set() { return _containing_set; }
#else // ASSERT
void set_containing_set(HeapRegionSetBase* containing_set) { }
// containing_set() is only used in asserts so there's not reason
// containing_set() is only used in asserts so there's no reason
// to provide a dummy version of it.
#endif // ASSERT
@ -535,14 +535,15 @@ class HeapRegion: public G1OffsetTableContigSpace {
bool pending_removal() { return _pending_removal; }
void set_pending_removal(bool pending_removal) {
// We can only set pending_removal to true, if it's false and the
// region belongs to a set.
assert(!pending_removal ||
(!_pending_removal && containing_set() != NULL), "pre-condition");
// We can only set pending_removal to false, if it's true and the
// region does not belong to a set.
assert( pending_removal ||
( _pending_removal && containing_set() == NULL), "pre-condition");
if (pending_removal) {
assert(!_pending_removal && containing_set() != NULL,
"can only set pending removal to true if it's false and "
"the region belongs to a region set");
} else {
assert( _pending_removal && containing_set() == NULL,
"can only set pending removal to false if it's true and "
"the region does not belong to a region set");
}
_pending_removal = pending_removal;
}

View File

@ -165,7 +165,7 @@ int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
assert(num_so_far <= num, "post-condition");
if (num_so_far == num) {
// we find enough space for the humongous object
// we found enough space for the humongous object
assert(from <= first && first < _regions.length(), "post-condition");
assert(first < curr && (curr - first) == (int) num, "post-condition");
for (int i = first; i < first + (int) num; ++i) {

View File

@ -76,7 +76,8 @@ class HeapRegionSeq: public CHeapObj {
// that are available for allocation.
size_t free_suffix();
// Finds a contiguous set of empty regions of length num.
// Find a contiguous set of empty regions of length num and return
// the index of the first region or -1 if the search was unsuccessful.
int find_contiguous(size_t num);
// Apply the "doHeapRegion" method of "blk" to all regions in "this",

View File

@ -42,7 +42,7 @@ size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
return region_num;
}
void HeapRegionSetBase::fill_in_ext_msg(hrl_ext_msg* msg, const char* message) {
void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
msg->append("[%s] %s "
"ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
"cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
@ -109,30 +109,30 @@ void HeapRegionSetBase::verify() {
// for the verification calls. If we do verification without the
// appropriate locks and the set changes underneath our feet
// verification might fail and send us on a wild goose chase.
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
guarantee(( is_empty() && length() == 0 && region_num() == 0 &&
total_used_bytes() == 0 && total_capacity_bytes() == 0) ||
(!is_empty() && length() >= 0 && region_num() >= 0 &&
total_used_bytes() >= 0 && total_capacity_bytes() >= 0),
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
guarantee((!regions_humongous() && region_num() == length()) ||
( regions_humongous() && region_num() >= length()),
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
guarantee(!regions_empty() || total_used_bytes() == 0,
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
guarantee(total_used_bytes() <= total_capacity_bytes(),
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
}
void HeapRegionSetBase::verify_start() {
// See comment in verify() about MT safety and verification.
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
assert(!_verify_in_progress,
hrl_ext_msg(this, "verification should not be in progress"));
hrs_ext_msg(this, "verification should not be in progress"));
// Do the basic verification first before we do the checks over the regions.
HeapRegionSetBase::verify();
@ -146,11 +146,11 @@ void HeapRegionSetBase::verify_start() {
void HeapRegionSetBase::verify_next_region(HeapRegion* hr) {
// See comment in verify() about MT safety and verification.
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
assert(_verify_in_progress,
hrl_ext_msg(this, "verification should be in progress"));
hrs_ext_msg(this, "verification should be in progress"));
guarantee(verify_region(hr, this), hrl_ext_msg(this, "region verification"));
guarantee(verify_region(hr, this), hrs_ext_msg(this, "region verification"));
_calc_length += 1;
if (!hr->isHumongous()) {
@ -164,28 +164,28 @@ void HeapRegionSetBase::verify_next_region(HeapRegion* hr) {
void HeapRegionSetBase::verify_end() {
// See comment in verify() about MT safety and verification.
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
assert(_verify_in_progress,
hrl_ext_msg(this, "verification should be in progress"));
hrs_ext_msg(this, "verification should be in progress"));
guarantee(length() == _calc_length,
hrl_err_msg("[%s] length: "SIZE_FORMAT" should be == "
hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == "
"calc length: "SIZE_FORMAT,
name(), length(), _calc_length));
guarantee(region_num() == _calc_region_num,
hrl_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
"calc region num: "SIZE_FORMAT,
name(), region_num(), _calc_region_num));
guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
hrl_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == "
hrs_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == "
"calc capacity bytes: "SIZE_FORMAT,
name(),
total_capacity_bytes(), _calc_total_capacity_bytes));
guarantee(total_used_bytes() == _calc_total_used_bytes,
hrl_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == "
hrs_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == "
"calc used bytes: "SIZE_FORMAT,
name(), total_used_bytes(), _calc_total_used_bytes));
@ -221,9 +221,9 @@ HeapRegionSetBase::HeapRegionSetBase(const char* name)
//////////////////// HeapRegionSet ////////////////////
void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
hrl_assert_mt_safety_ok(this);
hrl_assert_mt_safety_ok(proxy_set);
hrl_assert_sets_match(this, proxy_set);
hrs_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(proxy_set);
hrs_assert_sets_match(this, proxy_set);
verify_optional();
proxy_set->verify_optional();
@ -231,19 +231,19 @@ void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
if (proxy_set->is_empty()) return;
assert(proxy_set->length() <= _length,
hrl_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
"should be <= length: "SIZE_FORMAT,
name(), proxy_set->length(), _length));
_length -= proxy_set->length();
assert(proxy_set->region_num() <= _region_num,
hrl_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
"should be <= region num: "SIZE_FORMAT,
name(), proxy_set->region_num(), _region_num));
_region_num -= proxy_set->region_num();
assert(proxy_set->total_used_bytes() <= _total_used_bytes,
hrl_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" "
hrs_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" "
"should be <= used bytes: "SIZE_FORMAT,
name(), proxy_set->total_used_bytes(),
_total_used_bytes));
@ -257,13 +257,13 @@ void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
//////////////////// HeapRegionLinkedList ////////////////////
void HeapRegionLinkedList::fill_in_ext_msg_extra(hrl_ext_msg* msg) {
void HeapRegionLinkedList::fill_in_ext_msg_extra(hrs_ext_msg* msg) {
msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
}
void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
hrl_assert_mt_safety_ok(this);
hrl_assert_mt_safety_ok(from_list);
hrs_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(from_list);
verify_optional();
from_list->verify_optional();
@ -283,10 +283,10 @@ void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
#endif // ASSERT
if (_tail != NULL) {
assert(length() > 0 && _head != NULL, hrl_ext_msg(this, "invariant"));
assert(length() > 0 && _head != NULL, hrs_ext_msg(this, "invariant"));
_tail->set_next(from_list->_head);
} else {
assert(length() == 0 && _head == NULL, hrl_ext_msg(this, "invariant"));
assert(length() == 0 && _head == NULL, hrs_ext_msg(this, "invariant"));
_head = from_list->_head;
}
_tail = from_list->_tail;
@ -301,12 +301,12 @@ void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
}
void HeapRegionLinkedList::remove_all() {
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
verify_optional();
HeapRegion* curr = _head;
while (curr != NULL) {
hrl_assert_region_ok(this, curr, this);
hrs_assert_region_ok(this, curr, this);
HeapRegion* next = curr->next();
curr->set_next(NULL);
@ -319,9 +319,9 @@ void HeapRegionLinkedList::remove_all() {
}
void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
hrl_assert_mt_safety_ok(this);
assert(target_count > 1, hrl_ext_msg(this, "pre-condition"));
assert(!is_empty(), hrl_ext_msg(this, "pre-condition"));
hrs_assert_mt_safety_ok(this);
assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
verify_optional();
DEBUG_ONLY(size_t old_length = length();)
@ -330,27 +330,27 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
HeapRegion* prev = NULL;
size_t count = 0;
while (curr != NULL) {
hrl_assert_region_ok(this, curr, this);
hrs_assert_region_ok(this, curr, this);
HeapRegion* next = curr->next();
if (curr->pending_removal()) {
assert(count < target_count,
hrl_err_msg("[%s] should not come across more regions "
hrs_err_msg("[%s] should not come across more regions "
"pending for removal than target_count: "SIZE_FORMAT,
name(), target_count));
if (prev == NULL) {
assert(_head == curr, hrl_ext_msg(this, "invariant"));
assert(_head == curr, hrs_ext_msg(this, "invariant"));
_head = next;
} else {
assert(_head != curr, hrl_ext_msg(this, "invariant"));
assert(_head != curr, hrs_ext_msg(this, "invariant"));
prev->set_next(next);
}
if (next == NULL) {
assert(_tail == curr, hrl_ext_msg(this, "invariant"));
assert(_tail == curr, hrs_ext_msg(this, "invariant"));
_tail = prev;
} else {
assert(_tail != curr, hrl_ext_msg(this, "invariant"));
assert(_tail != curr, hrs_ext_msg(this, "invariant"));
}
curr->set_next(NULL);
@ -371,10 +371,10 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
}
assert(count == target_count,
hrl_err_msg("[%s] count: "SIZE_FORMAT" should be == "
hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == "
"target_count: "SIZE_FORMAT, name(), count, target_count));
assert(length() + target_count == old_length,
hrl_err_msg("[%s] new length should be consistent "
hrs_err_msg("[%s] new length should be consistent "
"new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" "
"target_count: "SIZE_FORMAT,
name(), length(), old_length, target_count));
@ -385,7 +385,7 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
void HeapRegionLinkedList::verify() {
// See comment in HeapRegionSetBase::verify() about MT safety and
// verification.
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
// This will also do the basic verification too.
verify_start();
@ -399,7 +399,7 @@ void HeapRegionLinkedList::verify() {
count += 1;
guarantee(count < _unrealistically_long_length,
hrl_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
"seems very long, is there maybe a cycle? "
"curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
"prev1: "PTR_FORMAT" length: "SIZE_FORMAT,
@ -410,7 +410,7 @@ void HeapRegionLinkedList::verify() {
curr = curr->next();
}
guarantee(_tail == prev0, hrl_ext_msg(this, "post-condition"));
guarantee(_tail == prev0, hrs_ext_msg(this, "post-condition"));
verify_end();
}

View File

@ -28,8 +28,8 @@
#include "gc_implementation/g1/heapRegion.hpp"
// Large buffer for some cases where the output might be larger than normal.
#define HRL_ERR_MSG_BUFSZ 512
typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg;
#define HRS_ERR_MSG_BUFSZ 512
typedef FormatBuffer<HRS_ERR_MSG_BUFSZ> hrs_err_msg;
// Set verification will be forced either if someone defines
// HEAP_REGION_SET_FORCE_VERIFY to be 1, or in builds in which
@ -45,10 +45,10 @@ typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg;
// (e.g., length, region num, used bytes sum) plus any shared
// functionality (e.g., verification).
class hrl_ext_msg;
class hrs_ext_msg;
class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
friend class hrl_ext_msg;
friend class hrs_ext_msg;
protected:
static size_t calculate_region_num(HeapRegion* hr);
@ -104,10 +104,10 @@ protected:
virtual bool check_mt_safety() { return true; }
// fill_in_ext_msg() writes the the values of the set's attributes
// in the custom err_msg (hrl_ext_msg). fill_in_ext_msg_extra()
// in the custom err_msg (hrs_ext_msg). fill_in_ext_msg_extra()
// allows subclasses to append further information.
virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg) { }
void fill_in_ext_msg(hrl_ext_msg* msg, const char* message);
virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg) { }
void fill_in_ext_msg(hrs_ext_msg* msg, const char* message);
// It updates the fields of the set to reflect hr being added to
// the set.
@ -170,9 +170,9 @@ public:
// the fields of the associated set. This can be very helpful in
// diagnosing failures.
class hrl_ext_msg : public hrl_err_msg {
class hrs_ext_msg : public hrs_err_msg {
public:
hrl_ext_msg(HeapRegionSetBase* set, const char* message) : hrl_err_msg("") {
hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("") {
set->fill_in_ext_msg(this, message);
}
};
@ -180,25 +180,25 @@ public:
// These two macros are provided for convenience, to keep the uses of
// these two asserts a bit more concise.
#define hrl_assert_mt_safety_ok(_set_) \
#define hrs_assert_mt_safety_ok(_set_) \
do { \
assert((_set_)->check_mt_safety(), hrl_ext_msg((_set_), "MT safety")); \
assert((_set_)->check_mt_safety(), hrs_ext_msg((_set_), "MT safety")); \
} while (0)
#define hrl_assert_region_ok(_set_, _hr_, _expected_) \
#define hrs_assert_region_ok(_set_, _hr_, _expected_) \
do { \
assert((_set_)->verify_region((_hr_), (_expected_)), \
hrl_ext_msg((_set_), "region verification")); \
hrs_ext_msg((_set_), "region verification")); \
} while (0)
//////////////////// HeapRegionSet ////////////////////
#define hrl_assert_sets_match(_set1_, _set2_) \
#define hrs_assert_sets_match(_set1_, _set2_) \
do { \
assert(((_set1_)->regions_humongous() == \
(_set2_)->regions_humongous()) && \
((_set1_)->regions_empty() == (_set2_)->regions_empty()), \
hrl_err_msg("the contents of set %s and set %s should match", \
hrs_err_msg("the contents of set %s and set %s should match", \
(_set1_)->name(), (_set2_)->name())); \
} while (0)
@ -267,7 +267,7 @@ private:
HeapRegion* tail() { return _tail; }
protected:
virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg);
virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg);
// See the comment for HeapRegionSetBase::clear()
virtual void clear();
@ -309,10 +309,10 @@ public:
virtual void print_on(outputStream* out, bool print_contents = false);
};
//////////////////// HeapRegionLinkedList ////////////////////
//////////////////// HeapRegionLinkedListIterator ////////////////////
// Iterator class that provides a convenient way to iterator over the
// regions in a HeapRegionLinkedList instance.
// Iterator class that provides a convenient way to iterate over the
// regions of a HeapRegionLinkedList instance.
class HeapRegionLinkedListIterator : public StackObj {
private:

View File

@ -42,8 +42,8 @@ inline void HeapRegionSetBase::update_for_addition(HeapRegion* hr) {
}
inline void HeapRegionSetBase::add_internal(HeapRegion* hr) {
hrl_assert_region_ok(this, hr, NULL);
assert(hr->next() == NULL, hrl_ext_msg(this, "should not already be linked"));
hrs_assert_region_ok(this, hr, NULL);
assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
update_for_addition(hr);
hr->set_containing_set(this);
@ -51,7 +51,7 @@ inline void HeapRegionSetBase::add_internal(HeapRegion* hr) {
inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
// Assumes the caller has already verified the region.
assert(_length > 0, hrl_ext_msg(this, "pre-condition"));
assert(_length > 0, hrs_ext_msg(this, "pre-condition"));
_length -= 1;
size_t region_num_diff;
@ -61,22 +61,22 @@ inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
region_num_diff = calculate_region_num(hr);
}
assert(region_num_diff <= _region_num,
hrl_err_msg("[%s] region's region num: "SIZE_FORMAT" "
hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" "
"should be <= region num: "SIZE_FORMAT,
name(), region_num_diff, _region_num));
_region_num -= region_num_diff;
size_t used_bytes = hr->used();
assert(used_bytes <= _total_used_bytes,
hrl_err_msg("[%s] region's used bytes: "SIZE_FORMAT" "
hrs_err_msg("[%s] region's used bytes: "SIZE_FORMAT" "
"should be <= used bytes: "SIZE_FORMAT,
name(), used_bytes, _total_used_bytes));
_total_used_bytes -= used_bytes;
}
inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) {
hrl_assert_region_ok(this, hr, this);
assert(hr->next() == NULL, hrl_ext_msg(this, "should already be unlinked"));
hrs_assert_region_ok(this, hr, this);
assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
hr->set_containing_set(NULL);
update_for_removal(hr);
@ -85,13 +85,13 @@ inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) {
//////////////////// HeapRegionSet ////////////////////
inline void HeapRegionSet::add(HeapRegion* hr) {
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
// add_internal() will verify the region.
add_internal(hr);
}
inline void HeapRegionSet::remove(HeapRegion* hr) {
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
// remove_internal() will verify the region.
remove_internal(hr);
}
@ -101,8 +101,8 @@ inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr,
// No need to fo the MT safety check here given that this method
// does not update the contents of the set but instead accumulates
// the changes in proxy_set which is assumed to be thread-local.
hrl_assert_sets_match(this, proxy_set);
hrl_assert_region_ok(this, hr, this);
hrs_assert_sets_match(this, proxy_set);
hrs_assert_region_ok(this, hr, this);
hr->set_containing_set(NULL);
proxy_set->update_for_addition(hr);
@ -111,10 +111,10 @@ inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr,
//////////////////// HeapRegionLinkedList ////////////////////
inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) {
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
assert((length() == 0 && _head == NULL && _tail == NULL) ||
(length() > 0 && _head != NULL && _tail != NULL),
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
// add_internal() will verify the region.
add_internal(hr);
@ -128,10 +128,10 @@ inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) {
}
inline HeapRegion* HeapRegionLinkedList::remove_head() {
hrl_assert_mt_safety_ok(this);
assert(!is_empty(), hrl_ext_msg(this, "the list should not be empty"));
hrs_assert_mt_safety_ok(this);
assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty"));
assert(length() > 0 && _head != NULL && _tail != NULL,
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
// We need to unlink it first.
HeapRegion* hr = _head;
@ -147,7 +147,7 @@ inline HeapRegion* HeapRegionLinkedList::remove_head() {
}
inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() {
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
if (!is_empty()) {
return remove_head();

View File

@ -52,7 +52,7 @@ bool MasterFreeRegionList::check_mt_safety() {
FreeList_lock->owned_by_self())) ||
(!SafepointSynchronize::is_at_safepoint() &&
Heap_lock->owned_by_self()),
hrl_ext_msg(this, "master free list MT safety protocol"));
hrs_ext_msg(this, "master free list MT safety protocol"));
return FreeRegionList::check_mt_safety();
}
@ -65,7 +65,7 @@ bool SecondaryFreeRegionList::check_mt_safety() {
// while holding the SecondaryFreeList_lock.
guarantee(SecondaryFreeList_lock->owned_by_self(),
hrl_ext_msg(this, "secondary free list MT safety protocol"));
hrs_ext_msg(this, "secondary free list MT safety protocol"));
return FreeRegionList::check_mt_safety();
}
@ -81,7 +81,7 @@ const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) {
return HeapRegionSet::verify_region_extra(hr);
}
//////////////////// HumongousRegionSet ////////////////////
//////////////////// MasterHumongousRegionSet ////////////////////
bool MasterHumongousRegionSet::check_mt_safety() {
// Master Humongous Set MT safety protocol:
@ -97,6 +97,6 @@ bool MasterHumongousRegionSet::check_mt_safety() {
OldSets_lock->owned_by_self())) ||
(!SafepointSynchronize::is_at_safepoint() &&
Heap_lock->owned_by_self()),
hrl_ext_msg(this, "master humongous set MT safety protocol"));
hrs_ext_msg(this, "master humongous set MT safety protocol"));
return HumongousRegionSet::check_mt_safety();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1530,13 +1530,15 @@ void ParNewGeneration::ref_processor_init()
{
if (_ref_processor == NULL) {
// Allocate and initialize a reference processor
_ref_processor = ReferenceProcessor::create_ref_processor(
_reserved, // span
refs_discovery_is_atomic(), // atomic_discovery
refs_discovery_is_mt(), // mt_discovery
NULL, // is_alive_non_header
ParallelGCThreads,
ParallelRefProcEnabled);
_ref_processor =
new ReferenceProcessor(_reserved, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // mt processing degree
refs_discovery_is_mt(), // mt discovery
(int) ParallelGCThreads, // mt discovery degree
refs_discovery_is_atomic(), // atomic_discovery
NULL, // is_alive_non_header
false); // write barrier for next field updates
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,9 +58,7 @@ CollectorCounters* PSMarkSweep::_counters = NULL;
void PSMarkSweep::initialize() {
MemRegion mr = Universe::heap()->reserved_region();
_ref_processor = new ReferenceProcessor(mr,
true, // atomic_discovery
false); // mt_discovery
_ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc
_counters = new CollectorCounters("PSMarkSweep", 1);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -827,13 +827,15 @@ void PSParallelCompact::post_initialize() {
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
MemRegion mr = heap->reserved_region();
_ref_processor = ReferenceProcessor::create_ref_processor(
mr, // span
true, // atomic_discovery
true, // mt_discovery
&_is_alive_closure,
ParallelGCThreads,
ParallelRefProcEnabled);
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // mt processing degree
true, // mt discovery
(int) ParallelGCThreads, // mt discovery degree
true, // atomic_discovery
&_is_alive_closure, // non-header is alive closure
false); // write barrier for next field updates
_counters = new CollectorCounters("PSParallelCompact", 1);
// Initialize static fields in ParCompactionManager.

View File

@ -411,7 +411,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
template <class T> void PSPromotionManager::process_array_chunk_work(
oop obj,
int start, int end) {
assert(start < end, "invariant");
assert(start <= end, "invariant");
T* const base = (T*)objArrayOop(obj)->base();
T* p = base + start;
T* const chunk_end = base + end;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -796,13 +796,15 @@ void PSScavenge::initialize() {
// Initialize ref handling object for scavenging.
MemRegion mr = young_gen->reserved();
_ref_processor = ReferenceProcessor::create_ref_processor(
mr, // span
true, // atomic_discovery
true, // mt_discovery
NULL, // is_alive_non_header
ParallelGCThreads,
ParallelRefProcEnabled);
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // mt processing degree
true, // mt discovery
(int) ParallelGCThreads, // mt discovery degree
true, // atomic_discovery
NULL, // header provides liveness info
false); // next field updates do not need write barrier
// Cache the cardtable
BarrierSet* bs = Universe::heap()->barrier_set();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,7 +96,7 @@ class AbstractInterpreter: AllStatic {
empty, // empty method (code: _return)
accessor, // accessor method (code: _aload_0, _getfield, _(a|i)return)
abstract, // abstract method (throws an AbstractMethodException)
method_handle, // java.dyn.MethodHandles::invoke
method_handle, // java.lang.invoke.MethodHandles::invoke
java_lang_math_sin, // implementation of java.lang.Math.sin (x)
java_lang_math_cos, // implementation of java.lang.Math.cos (x)
java_lang_math_tan, // implementation of java.lang.Math.tan (x)

View File

@ -369,7 +369,10 @@ IRT_ENTRY(void, InterpreterRuntime::throw_WrongMethodTypeException(JavaThread* t
}
// create exception
THROW_MSG(vmSymbols::java_dyn_WrongMethodTypeException(), message);
Symbol* java_lang_invoke_WrongMethodTypeException = vmSymbols::java_lang_invoke_WrongMethodTypeException();
if (AllowTransitionalJSR292)
java_lang_invoke_WrongMethodTypeException = SystemDictionaryHandles::WrongMethodTypeException_klass()->name();
THROW_MSG(java_lang_invoke_WrongMethodTypeException, message);
}
IRT_END
@ -794,7 +797,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
Handle info; // optional argument(s) in JVM_CONSTANT_InvokeDynamic
Handle bootm = SystemDictionary::find_bootstrap_method(caller_method, caller_bci,
main_index, info, CHECK);
if (!java_dyn_MethodHandle::is_instance(bootm())) {
if (!java_lang_invoke_MethodHandle::is_instance(bootm())) {
THROW_MSG(vmSymbols::java_lang_IllegalStateException(),
"no bootstrap method found for invokedynamic");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -217,11 +217,13 @@ void LinkResolver::lookup_implicit_method(methodHandle& result,
if (EnableMethodHandles &&
klass() == SystemDictionary::MethodHandle_klass() &&
methodOopDesc::is_method_handle_invoke_name(name)) {
if (!MethodHandles::enabled()) {
if (!THREAD->is_Compiler_thread() && !MethodHandles::enabled()) {
// Make sure the Java part of the runtime has been booted up.
klassOop natives = SystemDictionary::MethodHandleNatives_klass();
if (natives == NULL || instanceKlass::cast(natives)->is_not_initialized()) {
SystemDictionary::resolve_or_fail(vmSymbols::sun_dyn_MethodHandleNatives(),
Symbol* natives_name = vmSymbols::java_lang_invoke_MethodHandleNatives();
if (natives != NULL && AllowTransitionalJSR292) natives_name = Klass::cast(natives)->name();
SystemDictionary::resolve_or_fail(natives_name,
Handle(),
Handle(),
true,
@ -298,7 +300,7 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle& re
}
void LinkResolver::resolve_dynamic_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) {
// The class is java.dyn.MethodHandle
// The class is java.lang.invoke.MethodHandle
resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
Symbol* method_name = vmSymbols::invokeExact_name();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -293,10 +293,11 @@ void GenCollectorPolicy::initialize_size_info() {
// Determine maximum size of gen0
size_t max_new_size = 0;
if (FLAG_IS_CMDLINE(MaxNewSize)) {
if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
if (MaxNewSize < min_alignment()) {
max_new_size = min_alignment();
} else if (MaxNewSize >= max_heap_byte_size()) {
}
if (MaxNewSize >= max_heap_byte_size()) {
max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
min_alignment());
warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
@ -333,7 +334,7 @@ void GenCollectorPolicy::initialize_size_info() {
assert(max_new_size > 0, "All paths should set max_new_size");
// Given the maximum gen0 size, determine the initial and
// minimum sizes.
// minimum gen0 sizes.
if (max_heap_byte_size() == min_heap_byte_size()) {
// The maximum and minimum heap sizes are the same so
@ -396,7 +397,7 @@ void GenCollectorPolicy::initialize_size_info() {
}
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size());
}
@ -448,7 +449,7 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
// At this point the minimum, initial and maximum sizes
// of the overall heap and of gen0 have been determined.
// The maximum gen1 size can be determined from the maximum gen0
// and maximum heap size since not explicit flags exits
// and maximum heap size since no explicit flags exits
// for setting the gen1 maximum.
_max_gen1_size = max_heap_byte_size() - _max_gen0_size;
_max_gen1_size =
@ -494,13 +495,13 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
"generation sizes: using maximum heap = " SIZE_FORMAT
" -XX:OldSize flag is being ignored",
max_heap_byte_size());
}
}
// If there is an inconsistency between the OldSize and the minimum and/or
// initial size of gen0, since OldSize was explicitly set, OldSize wins.
if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
min_heap_byte_size(), OldSize)) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size());
}
@ -509,7 +510,7 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
initial_heap_byte_size(), OldSize)) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -83,14 +83,11 @@ void Generation::print_heap_change(size_t prev_used) const {
}
// By default we get a single threaded default reference processor;
// generations needing multi-threaded refs discovery override this method.
// generations needing multi-threaded refs processing or discovery override this method.
void Generation::ref_processor_init() {
assert(_ref_processor == NULL, "a reference processor already exists");
assert(!_reserved.is_empty(), "empty generation?");
_ref_processor =
new ReferenceProcessor(_reserved, // span
refs_discovery_is_atomic(), // atomic_discovery
refs_discovery_is_mt()); // mt_discovery
_ref_processor = new ReferenceProcessor(_reserved); // a vanilla reference processor
if (_ref_processor == NULL) {
vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
}

View File

@ -316,12 +316,19 @@ size_t CodeHeap::allocated_capacity() const {
}
size_t CodeHeap::largest_free_block() const {
// First check unused space excluding free blocks.
size_t free_sz = size(_free_segments);
size_t unused = max_capacity() - allocated_capacity() - free_sz;
if (unused >= free_sz)
return unused;
// Now check largest free block.
size_t len = 0;
for (FreeBlock* b = _freelist; b != NULL; b = b->link()) {
if (b->length() > len)
len = b->length();
}
return size(len);
return MAX2(unused, size(len));
}
// Free list management

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,40 +102,17 @@ void ReferenceProcessor::init_statics() {
"Unrecongnized RefDiscoveryPolicy");
}
ReferenceProcessor*
ReferenceProcessor::create_ref_processor(MemRegion span,
bool atomic_discovery,
bool mt_discovery,
BoolObjectClosure* is_alive_non_header,
int parallel_gc_threads,
bool mt_processing,
bool dl_needs_barrier) {
int mt_degree = 1;
if (parallel_gc_threads > 1) {
mt_degree = parallel_gc_threads;
}
ReferenceProcessor* rp =
new ReferenceProcessor(span, atomic_discovery,
mt_discovery, mt_degree,
mt_processing && (parallel_gc_threads > 0),
dl_needs_barrier);
if (rp == NULL) {
vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
}
rp->set_is_alive_non_header(is_alive_non_header);
rp->setup_policy(false /* default soft ref policy */);
return rp;
}
ReferenceProcessor::ReferenceProcessor(MemRegion span,
bool atomic_discovery,
bool mt_discovery,
int mt_degree,
bool mt_processing,
int mt_processing_degree,
bool mt_discovery,
int mt_discovery_degree,
bool atomic_discovery,
BoolObjectClosure* is_alive_non_header,
bool discovered_list_needs_barrier) :
_discovering_refs(false),
_enqueuing_is_done(false),
_is_alive_non_header(NULL),
_is_alive_non_header(is_alive_non_header),
_discovered_list_needs_barrier(discovered_list_needs_barrier),
_bs(NULL),
_processing_is_mt(mt_processing),
@ -144,8 +121,8 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
_span = span;
_discovery_is_atomic = atomic_discovery;
_discovery_is_mt = mt_discovery;
_num_q = mt_degree;
_max_num_q = mt_degree;
_num_q = MAX2(1, mt_processing_degree);
_max_num_q = MAX2(_num_q, mt_discovery_degree);
_discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
if (_discoveredSoftRefs == NULL) {
vm_exit_during_initialization("Could not allocated RefProc Array");
@ -163,6 +140,7 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
if (discovered_list_needs_barrier) {
_bs = Universe::heap()->barrier_set();
}
setup_policy(false /* default soft ref policy */);
}
#ifndef PRODUCT
@ -405,15 +383,14 @@ public:
{ }
virtual void work(unsigned int work_id) {
assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
// Simplest first cut: static partitioning.
int index = work_id;
// The increment on "index" must correspond to the maximum number of queues
// (n_queues) with which that ReferenceProcessor was created. That
// is because of the "clever" way the discovered references lists were
// allocated and are indexed into. That number is ParallelGCThreads
// currently. Assert that.
assert(_n_queues == (int) ParallelGCThreads, "Different number not expected");
// allocated and are indexed into.
assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
for (int j = 0;
j < subclasses_of_ref;
j++, index += _n_queues) {
@ -672,7 +649,7 @@ ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
}
}
NOT_PRODUCT(
if (PrintGCDetails && TraceReferenceGC) {
if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
"Refs in discovered list " INTPTR_FORMAT,
iter.removed(), iter.processed(), (address)refs_list.head());
@ -711,7 +688,7 @@ ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
// Now close the newly reachable set
complete_gc->do_void();
NOT_PRODUCT(
if (PrintGCDetails && TraceReferenceGC) {
if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
"Refs in discovered list " INTPTR_FORMAT,
iter.removed(), iter.processed(), (address)refs_list.head());
@ -951,7 +928,7 @@ ReferenceProcessor::process_discovered_reflist(
}
if (PrintReferenceGC && PrintGCDetails) {
size_t total = 0;
for (int i = 0; i < _num_q; ++i) {
for (int i = 0; i < _max_num_q; ++i) {
total += refs_lists[i].length();
}
gclog_or_tty->print(", %u refs", total);
@ -967,7 +944,7 @@ ReferenceProcessor::process_discovered_reflist(
RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
task_executor->execute(phase1);
} else {
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
process_phase1(refs_lists[i], policy,
is_alive, keep_alive, complete_gc);
}
@ -983,7 +960,7 @@ ReferenceProcessor::process_discovered_reflist(
RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
task_executor->execute(phase2);
} else {
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
}
}
@ -994,7 +971,7 @@ ReferenceProcessor::process_discovered_reflist(
RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
task_executor->execute(phase3);
} else {
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
process_phase3(refs_lists[i], clear_referent,
is_alive, keep_alive, complete_gc);
}
@ -1008,7 +985,7 @@ void ReferenceProcessor::clean_up_discovered_references() {
// for (int j = 0; j < _num_q; j++) {
// int index = i * _max_num_q + j;
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
gclog_or_tty->print_cr(
"\nScrubbing %s discovered list of Null referents",
list_name(i));
@ -1350,7 +1327,7 @@ void ReferenceProcessor::preclean_discovered_references(
{
TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty);
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
}
@ -1363,7 +1340,7 @@ void ReferenceProcessor::preclean_discovered_references(
{
TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty);
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
}
@ -1376,7 +1353,7 @@ void ReferenceProcessor::preclean_discovered_references(
{
TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty);
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
}
@ -1433,7 +1410,7 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
complete_gc->do_void();
NOT_PRODUCT(
if (PrintGCDetails && PrintReferenceGC) {
if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
"Refs in discovered list " INTPTR_FORMAT,
iter.removed(), iter.processed(), (address)refs_list.head());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -71,7 +71,7 @@ class ReferenceProcessor : public CHeapObj {
bool _enqueuing_is_done; // true if all weak references enqueued
bool _processing_is_mt; // true during phases when
// reference processing is MT.
int _next_id; // round-robin counter in
int _next_id; // round-robin mod _num_q counter in
// support of work distribution
// For collectors that do not keep GC marking information
@ -103,7 +103,8 @@ class ReferenceProcessor : public CHeapObj {
public:
int num_q() { return _num_q; }
void set_mt_degree(int v) { _num_q = v; }
int max_num_q() { return _max_num_q; }
void set_active_mt_degree(int v) { _num_q = v; }
DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
static oop sentinel_ref() { return _sentinelRef; }
static oop* adr_sentinel_ref() { return &_sentinelRef; }
@ -216,6 +217,7 @@ class ReferenceProcessor : public CHeapObj {
VoidClosure* complete_gc,
YieldClosure* yield);
// round-robin mod _num_q (not: _not_ mode _max_num_q)
int next_id() {
int id = _next_id;
if (++_next_id == _num_q) {
@ -256,24 +258,16 @@ class ReferenceProcessor : public CHeapObj {
_max_num_q(0),
_processing_is_mt(false),
_next_id(0)
{}
{ }
ReferenceProcessor(MemRegion span, bool atomic_discovery,
bool mt_discovery,
int mt_degree = 1,
bool mt_processing = false,
// Default parameters give you a vanilla reference processor.
ReferenceProcessor(MemRegion span,
bool mt_processing = false, int mt_processing_degree = 1,
bool mt_discovery = false, int mt_discovery_degree = 1,
bool atomic_discovery = true,
BoolObjectClosure* is_alive_non_header = NULL,
bool discovered_list_needs_barrier = false);
// Allocates and initializes a reference processor.
static ReferenceProcessor* create_ref_processor(
MemRegion span,
bool atomic_discovery,
bool mt_discovery,
BoolObjectClosure* is_alive_non_header = NULL,
int parallel_gc_threads = 1,
bool mt_processing = false,
bool discovered_list_needs_barrier = false);
// RefDiscoveryPolicy values
enum DiscoveryPolicy {
ReferenceBasedDiscovery = 0,
@ -397,20 +391,20 @@ class ReferenceProcessorSpanMutator: StackObj {
// A utility class to temporarily change the MT'ness of
// reference discovery for the given ReferenceProcessor
// in the scope that contains it.
class ReferenceProcessorMTMutator: StackObj {
class ReferenceProcessorMTDiscoveryMutator: StackObj {
private:
ReferenceProcessor* _rp;
bool _saved_mt;
public:
ReferenceProcessorMTMutator(ReferenceProcessor* rp,
bool mt):
ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
bool mt):
_rp(rp) {
_saved_mt = _rp->discovery_is_mt();
_rp->set_mt_discovery(mt);
}
~ReferenceProcessorMTMutator() {
~ReferenceProcessorMTDiscoveryMutator() {
_rp->set_mt_discovery(_saved_mt);
}
};

View File

@ -1175,8 +1175,15 @@ void constantPoolOopDesc::copy_entry_to(constantPoolHandle from_cp, int from_i,
case JVM_CONSTANT_UnresolvedClass:
{
Symbol* k = from_cp->unresolved_klass_at(from_i);
to_cp->unresolved_klass_at_put(to_i, k);
// Can be resolved after checking tag, so check the slot first.
CPSlot entry = from_cp->slot_at(from_i);
if (entry.is_oop()) {
assert(entry.get_oop()->is_klass(), "must be");
// Already resolved
to_cp->klass_at_put(to_i, (klassOop)entry.get_oop());
} else {
to_cp->unresolved_klass_at_put(to_i, entry.get_symbol());
}
} break;
case JVM_CONSTANT_UnresolvedClassInError:
@ -1189,8 +1196,14 @@ void constantPoolOopDesc::copy_entry_to(constantPoolHandle from_cp, int from_i,
case JVM_CONSTANT_UnresolvedString:
{
Symbol* s = from_cp->unresolved_string_at(from_i);
to_cp->unresolved_string_at_put(to_i, s);
// Can be resolved after checking tag, so check the slot first.
CPSlot entry = from_cp->slot_at(from_i);
if (entry.is_oop()) {
// Already resolved (either string or pseudo-string)
to_cp->string_at_put(to_i, entry.get_oop());
} else {
to_cp->unresolved_string_at_put(to_i, entry.get_symbol());
}
} break;
case JVM_CONSTANT_Utf8:

View File

@ -190,7 +190,7 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
);
void set_dynamic_call(
Handle call_site, // Resolved java.dyn.CallSite (f1)
Handle call_site, // Resolved java.lang.invoke.CallSite (f1)
methodHandle signature_invoker // determines signature information
);

View File

@ -735,7 +735,12 @@ void instanceKlass::call_class_initializer(TRAPS) {
static int call_class_initializer_impl_counter = 0; // for debugging
methodOop instanceKlass::class_initializer() {
return find_method(vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
methodOop clinit = find_method(
vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
if (clinit != NULL && clinit->has_valid_initializer_flags()) {
return clinit;
}
return NULL;
}
void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
@ -2415,7 +2420,7 @@ void instanceKlass::oop_print_on(oop obj, outputStream* st) {
st->cr();
} else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
st->print(BULLET"signature: ");
java_dyn_MethodType::print_signature(obj, st);
java_lang_invoke_MethodType::print_signature(obj, st);
st->cr();
}
}
@ -2446,7 +2451,7 @@ void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
}
} else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
st->print(" = ");
java_dyn_MethodType::print_signature(obj, st);
java_lang_invoke_MethodType::print_signature(obj, st);
} else if (java_lang_boxing_object::is_instance(obj)) {
st->print(" = ");
java_lang_boxing_object::print(obj, st);

View File

@ -193,8 +193,8 @@ class instanceKlass: public Klass {
typeArrayOop _inner_classes;
// Implementors of this interface (not valid if it overflows)
klassOop _implementors[implementors_limit];
// invokedynamic bootstrap method (a java.dyn.MethodHandle)
oop _bootstrap_method;
// invokedynamic bootstrap method (a java.lang.invoke.MethodHandle)
oop _bootstrap_method; // AllowTransitionalJSR292 ONLY
// Annotations for this class, or null if none.
typeArrayOop _class_annotations;
// Annotation objects (byte arrays) for fields, or null if no annotations.
@ -529,7 +529,7 @@ class instanceKlass: public Klass {
_enclosing_method_method_index = method_index; }
// JSR 292 support
oop bootstrap_method() const { return _bootstrap_method; }
oop bootstrap_method() const { return _bootstrap_method; } // AllowTransitionalJSR292 ONLY
void set_bootstrap_method(oop mh) { oop_store(&_bootstrap_method, mh); }
// jmethodID support
@ -817,7 +817,7 @@ private:
oop* adr_signers() const { return (oop*)&this->_signers;}
oop* adr_inner_classes() const { return (oop*)&this->_inner_classes;}
oop* adr_implementors() const { return (oop*)&this->_implementors[0];}
oop* adr_bootstrap_method() const { return (oop*)&this->_bootstrap_method;}
oop* adr_bootstrap_method() const { return (oop*)&this->_bootstrap_method;} // AllowTransitionalJSR292 ONLY
oop* adr_methods_jmethod_ids() const { return (oop*)&this->_methods_jmethod_ids;}
oop* adr_methods_cached_itable_indices() const { return (oop*)&this->_methods_cached_itable_indices;}
oop* adr_class_annotations() const { return (oop*)&this->_class_annotations;}

View File

@ -883,7 +883,7 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Klass
int ime_num = 0;
// Skip first methodOop if it is a class initializer
int i = ((methodOop)methods()->obj_at(0))->name() != vmSymbols::class_initializer_name() ? 0 : 1;
int i = ((methodOop)methods()->obj_at(0))->is_static_initializer() ? 1 : 0;
// m, method_name, method_signature, klass reset each loop so they
// don't need preserving across check_signature_loaders call
@ -1121,7 +1121,7 @@ int klassItable::compute_itable_index(methodOop m) {
assert(index < methods->length(), "should find index for resolve_invoke");
}
// Adjust for <clinit>, which is left out of table if first method
if (methods->length() > 0 && ((methodOop)methods->obj_at(0))->name() == vmSymbols::class_initializer_name()) {
if (methods->length() > 0 && ((methodOop)methods->obj_at(0))->is_static_initializer()) {
index--;
}
return index;
@ -1135,7 +1135,7 @@ methodOop klassItable::method_for_itable_index(klassOop intf, int itable_index)
int index = itable_index;
// Adjust for <clinit>, which is left out of table if first method
if (methods->length() > 0 && ((methodOop)methods->obj_at(0))->name() == vmSymbols::class_initializer_name()) {
if (methods->length() > 0 && ((methodOop)methods->obj_at(0))->is_static_initializer()) {
index++;
}

View File

@ -228,7 +228,7 @@ public:
return byte_offset_of(DataLayout, _header._struct._bci);
}
static ByteSize cell_offset(int index) {
return byte_offset_of(DataLayout, _cells[index]);
return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
}
// Return a value which, when or-ed as a byte into _flags, sets the flag.
static int flag_number_to_byte_constant(int flag_number) {

View File

@ -103,6 +103,12 @@ methodOop methodKlass::allocate(constMethodHandle xconst,
m->backedge_counter()->init();
m->clear_number_of_breakpoints();
#ifdef TIERED
m->set_rate(0);
m->set_prev_event_count(0);
m->set_prev_time(0);
#endif
assert(m->is_parsable(), "must be parsable here.");
assert(m->size() == size, "wrong size for object");
// We should not publish an uprasable object's reference

View File

@ -466,7 +466,20 @@ bool methodOopDesc::is_accessor() const {
bool methodOopDesc::is_initializer() const {
return name() == vmSymbols::object_initializer_name() || name() == vmSymbols::class_initializer_name();
return name() == vmSymbols::object_initializer_name() || is_static_initializer();
}
bool methodOopDesc::has_valid_initializer_flags() const {
return (is_static() ||
instanceKlass::cast(method_holder())->major_version() < 51);
}
bool methodOopDesc::is_static_initializer() const {
// For classfiles version 51 or greater, ensure that the clinit method is
// static. Non-static methods with the name "<clinit>" are not static
// initializers. (older classfiles exempted for backward compatibility)
return name() == vmSymbols::class_initializer_name() &&
has_valid_initializer_flags();
}
@ -842,7 +855,7 @@ bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) {
case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
return true;
}
if (AllowTransitionalJSR292
if ((AllowTransitionalJSR292 || AllowInvokeForInvokeGeneric)
&& name_sid == vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name))
return true;
return false;
@ -852,7 +865,7 @@ bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) {
enum {
_imcp_invoke_name = 1, // utf8: 'invokeExact' or 'invokeGeneric'
_imcp_invoke_signature, // utf8: (variable Symbol*)
_imcp_method_type_value, // string: (variable java/dyn/MethodType, sic)
_imcp_method_type_value, // string: (variable java/lang/invoke/MethodType, sic)
_imcp_limit
};
@ -1078,7 +1091,8 @@ void methodOopDesc::init_intrinsic_id() {
vmSymbols::SID name_id = vmSymbols::find_sid(name());
if (name_id == vmSymbols::NO_SID) return;
vmSymbols::SID sig_id = vmSymbols::find_sid(signature());
if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle)
if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle)
&& !(klass_id == vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle) && AllowTransitionalJSR292)
&& sig_id == vmSymbols::NO_SID) return;
jshort flags = access_flags().as_short();
@ -1104,7 +1118,8 @@ void methodOopDesc::init_intrinsic_id() {
break;
// Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*.
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle):
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle): // AllowTransitionalJSR292 ONLY
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle):
if (is_static() || !is_native()) break;
switch (name_id) {
case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
@ -1114,11 +1129,12 @@ void methodOopDesc::init_intrinsic_id() {
id = vmIntrinsics::_invokeExact;
break;
case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name):
if (AllowTransitionalJSR292) id = vmIntrinsics::_invokeExact;
if (AllowInvokeForInvokeGeneric) id = vmIntrinsics::_invokeGeneric;
else if (AllowTransitionalJSR292) id = vmIntrinsics::_invokeExact;
break;
}
break;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_InvokeDynamic):
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_InvokeDynamic):
if (!is_static() || !is_native()) break;
id = vmIntrinsics::_invokeDynamic;
break;

View File

@ -84,6 +84,11 @@
// | invocation_counter |
// | backedge_counter |
// |------------------------------------------------------|
// | prev_time (tiered only, 64 bit wide) |
// | |
// |------------------------------------------------------|
// | rate (tiered) |
// |------------------------------------------------------|
// | code (pointer) |
// | i2i (pointer) |
// | adapter (pointer) |
@ -124,6 +129,11 @@ class methodOopDesc : public oopDesc {
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations
#ifdef TIERED
jlong _prev_time; // Previous time the rate was acquired
float _rate; // Events (invocation and backedge counter increments) per millisecond
#endif
#ifndef PRODUCT
int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
#endif
@ -304,6 +314,17 @@ class methodOopDesc : public oopDesc {
InvocationCounter* invocation_counter() { return &_invocation_counter; }
InvocationCounter* backedge_counter() { return &_backedge_counter; }
#ifdef TIERED
// We are reusing interpreter_invocation_count as a holder for the previous event count!
// We can do that since interpreter_invocation_count is not used in tiered.
int prev_event_count() const { return _interpreter_invocation_count; }
void set_prev_event_count(int count) { _interpreter_invocation_count = count; }
jlong prev_time() const { return _prev_time; }
void set_prev_time(jlong time) { _prev_time = time; }
float rate() const { return _rate; }
void set_rate(float rate) { _rate = rate; }
#endif
int invocation_count();
int backedge_count();
@ -497,6 +518,13 @@ class methodOopDesc : public oopDesc {
// returns true if the method is an initializer (<init> or <clinit>).
bool is_initializer() const;
// returns true if the method is static OR if the classfile version < 51
bool has_valid_initializer_flags() const;
// returns true if the method name is <clinit> and the method has
// valid static initializer flags.
bool is_static_initializer() const;
// compiled code support
// NOTE: code() is inherently racy as deopt can be clearing code
// simultaneously. Use with caution.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -487,7 +487,7 @@ InlineTree *InlineTree::build_inline_tree_for_callee( ciMethod* callee_method, J
if (caller_jvms->method()->is_method_handle_adapter())
new_depth_adjust -= 1; // don't count actions in MH or indy adapter frames
else if (callee_method->is_method_handle_invoke()) {
new_depth_adjust -= 1; // don't count method handle calls from java.dyn implem
new_depth_adjust -= 1; // don't count method handle calls from java.lang.invoke implem
}
if (new_depth_adjust != 0 && PrintInlining) {
stringStream nm1; caller_jvms->method()->print_name(&nm1);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -199,7 +199,7 @@ JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
// Load the target MethodHandle from the CallSite object.
Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes());
Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
Node* target_mh = kit.make_load(kit.control(), target_mh_adr, TypeInstPtr::BOTTOM, T_OBJECT);
address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub();
@ -725,7 +725,7 @@ JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
// Load the target MethodHandle from the CallSite object.
Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes());
Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
// Check if the MethodHandle is still the same.

View File

@ -1028,7 +1028,7 @@ void NonSafepointEmitter::emit_non_safepoint() {
// helper for Fill_buffer bailout logic
static void turn_off_compiler(Compile* C) {
if (CodeCache::unallocated_capacity() >= CodeCacheMinimumFreeSpace*10) {
if (CodeCache::largest_free_block() >= CodeCacheMinimumFreeSpace*10) {
// Do not turn off compilation if a single giant method has
// blown the code cache size.
C->record_failure("excessive request to CodeCache");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -143,8 +143,8 @@ void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool
return;
}
else {
// final non-static field of a trusted class ({java,sun}.dyn
// classes).
// final non-static field of a trusted class (classes in
// java.lang.invoke and sun.invoke packages and subpackages).
if (obj->is_Con()) {
const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
ciObject* constant_oop = oop_ptr->const_oop();

View File

@ -3386,7 +3386,22 @@ const Type *TypeAryPtr::xmeet( const Type *t ) const {
instance_id = InstanceBot;
tary = TypeAry::make(Type::BOTTOM, tary->_size);
}
} else // Non integral arrays.
// Must fall to bottom if exact klasses in upper lattice
// are not equal or super klass is exact.
if ( above_centerline(ptr) && klass() != tap->klass() &&
// meet with top[] and bottom[] are processed further down:
tap ->_klass != NULL && this->_klass != NULL &&
// both are exact and not equal:
((tap ->_klass_is_exact && this->_klass_is_exact) ||
// 'tap' is exact and super or unrelated:
(tap ->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) ||
// 'this' is exact and super or unrelated:
(this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) {
tary = TypeAry::make(Type::BOTTOM, tary->_size);
return make( NotNull, NULL, tary, lazy_klass, false, off, InstanceBot );
}
bool xk = false;
switch (tap->ptr()) {
case AnyNull:
@ -3766,7 +3781,7 @@ ciKlass* TypeAryPtr::klass() const {
// Oops, need to compute _klass and cache it
ciKlass* k_ary = compute_klass();
if( this != TypeAryPtr::OOPS ) {
if( this != TypeAryPtr::OOPS && this->dual() != TypeAryPtr::OOPS ) {
// The _klass field acts as a cache of the underlying
// ciKlass for this array type. In order to set the field,
// we need to cast away const-ness.

View File

@ -919,15 +919,24 @@ JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
nmethod* nm) {
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
event._event_data.compiled_method_load = nm;
nmethodLocker::lock_nmethod(nm); // will be unlocked when posted
// Keep the nmethod alive until the ServiceThread can process
// this deferred event.
nmethodLocker::lock_nmethod(nm);
return event;
}
JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
jmethodID id, const void* code) {
nmethod* nm, jmethodID id, const void* code) {
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
event._event_data.compiled_method_unload.nm = nm;
event._event_data.compiled_method_unload.method_id = id;
event._event_data.compiled_method_unload.code_begin = code;
// Keep the nmethod alive until the ServiceThread can process
// this deferred event. This will keep the memory for the
// generated code from being reused too early. We pass
// zombie_ok == true here so that our nmethod that was just
// made into a zombie can be locked.
nmethodLocker::lock_nmethod(nm, true /* zombie_ok */);
return event;
}
JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
@ -946,14 +955,19 @@ void JvmtiDeferredEvent::post() {
case TYPE_COMPILED_METHOD_LOAD: {
nmethod* nm = _event_data.compiled_method_load;
JvmtiExport::post_compiled_method_load(nm);
// done with the deferred event so unlock the nmethod
nmethodLocker::unlock_nmethod(nm);
break;
}
case TYPE_COMPILED_METHOD_UNLOAD:
case TYPE_COMPILED_METHOD_UNLOAD: {
nmethod* nm = _event_data.compiled_method_unload.nm;
JvmtiExport::post_compiled_method_unload(
_event_data.compiled_method_unload.method_id,
_event_data.compiled_method_unload.code_begin);
// done with the deferred event so unlock the nmethod
nmethodLocker::unlock_nmethod(nm);
break;
}
case TYPE_DYNAMIC_CODE_GENERATED:
JvmtiExport::post_dynamic_code_generated_internal(
_event_data.dynamic_code_generated.name,

View File

@ -458,6 +458,7 @@ class JvmtiDeferredEvent VALUE_OBJ_CLASS_SPEC {
union {
nmethod* compiled_method_load;
struct {
nmethod* nm;
jmethodID method_id;
const void* code_begin;
} compiled_method_unload;
@ -477,7 +478,7 @@ class JvmtiDeferredEvent VALUE_OBJ_CLASS_SPEC {
// Factory methods
static JvmtiDeferredEvent compiled_method_load_event(nmethod* nm)
KERNEL_RETURN_(JvmtiDeferredEvent());
static JvmtiDeferredEvent compiled_method_unload_event(
static JvmtiDeferredEvent compiled_method_unload_event(nmethod* nm,
jmethodID id, const void* code) KERNEL_RETURN_(JvmtiDeferredEvent());
static JvmtiDeferredEvent dynamic_code_generated_event(
const char* name, const void* begin, const void* end)

View File

@ -1084,7 +1084,10 @@ bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp,
jbyte old_tag = old_cp->tag_at(old_i).value();
switch (old_tag) {
case JVM_CONSTANT_Class:
case JVM_CONSTANT_UnresolvedClass:
// revert the copy to JVM_CONSTANT_UnresolvedClass
// May be resolving while calling this so do the same for
// JVM_CONSTANT_UnresolvedClass (klass_name_at() deals with transition)
(*merge_cp_p)->unresolved_klass_at_put(old_i,
old_cp->klass_name_at(old_i));
break;

Some files were not shown because too many files have changed in this diff Show More