This commit is contained in:
Lana Steuck 2009-01-29 18:33:52 -08:00
commit 0d1c16da0e
1450 changed files with 92783 additions and 38660 deletions

View File

@ -12,3 +12,10 @@ a9f1805e3ba9ca520cad199d522c84af5433e85a jdk7-b32
6d909d5803e3a22850e6c4e5a75b888742ee7e20 jdk7-b35
d718a441936196b93d8bc9f084933af9a4c2a350 jdk7-b36
c2036bf76829c03b99108fffab52e20910a9be4f jdk7-b37
a2879b2837f5a4c87e9542efe69ef138194af8ff jdk7-b38
126f365cec6c3c2c72de934fa1c64b5f082b55b5 jdk7-b39
3c53424bbe3bb77e01b468b4b0140deec33e11fc jdk7-b40
3cb2a607c347934f8e7e86f840a094c28b08d9ea jdk7-b41
caf58ffa084568990cbb3441f9ae188e36b31770 jdk7-b42
41bd0a702bc8ec6feebd725a63e7c3227f82ab11 jdk7-b43
5843778bda89b1d5ac8e1aa05e26930ac90b3145 jdk7-b44

View File

@ -12,3 +12,10 @@ bb1ef4ee3d2c8cbf43a37d372325a7952be590b9 jdk7-b33
143c1abedb7d3095eff0f9ee5fec9bf48e3490fc jdk7-b35
4b4f5fea8d7d0743f0c30d91fcd9bf9d96e5d2ad jdk7-b36
744554f5a3290e11c71cd2ddb1aff49e431f9ed0 jdk7-b37
cc47a76899ed33a2c513cb688348244c9b5a1288 jdk7-b38
ab523b49de1fc73fefe6855ce1e0349bdbd7af29 jdk7-b39
44be42de6693063fb191989bf0e188de2fa51e7c jdk7-b40
541bdc5ad32fc33255944d0a044ad992f3d915e8 jdk7-b41
94052b87287303527125026fe4b2698cf867ea83 jdk7-b42
848e684279d2ba42577d9621d5b2e5af3823d12d jdk7-b43
a395e3aac4744cc9033fcd819fad1239a45add52 jdk7-b44

View File

@ -98,7 +98,8 @@
<h2><a name="MBE">Minimum Build Environments</a></h2>
<blockquote>
This file often describes specific requirements for what we call the
"minimum build environments" (MBE) for the JDK.
"minimum build environments" (MBE) for this
specific release of the JDK,
Building with the MBE will generate the most compatible
bits that install on, and run correctly on, the most variations
of the same base OS and hardware architecture.
@ -116,22 +117,22 @@
<tr>
<th>Base OS and Architecture</th>
<th>OS</th>
<th>Compiler</th>
<th>C/C++ Compiler</th>
</tr>
</thead>
<tbody>
<tr>
<td>Linux X86 (32bit)</td>
<td>Red Hat Enterprise Linux 4 </td>
<td>Linux X86 (32-bit)</td>
<td>Fedora 9</td>
<td>gcc 4 </td>
</tr>
<tr>
<td>Linux X64 (64bit)</td>
<td>Red Hat Enterprise Linux 4 </td>
<td>Linux X64 (64-bit)</td>
<td>Fedora 9</td>
<td>gcc 4 </td>
</tr>
<tr>
<td>Solaris SPARC (32bit)</td>
<td>Solaris SPARC (32-bit)</td>
<td>Solaris 10 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
@ -140,7 +141,7 @@
<td>Sun Studio 12</td>
</tr>
<tr>
<td>Solaris SPARCV9 (64bit)</td>
<td>Solaris SPARCV9 (64-bit)</td>
<td>Solaris 10 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
@ -149,7 +150,7 @@
<td>Sun Studio 12</td>
</tr>
<tr>
<td>Solaris X86 (32bit)</td>
<td>Solaris X86 (32-bit)</td>
<td>Solaris 10 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
@ -158,7 +159,7 @@
<td>Sun Studio 12</td>
</tr>
<tr>
<td>Solaris X64 (64bit)</td>
<td>Solaris X64 (64-bit)</td>
<td>Solaris 10 + patches
<br>
See <a href="http://sunsolve.sun.com/pub-cgi/show.pl?target=patches/JavaSE" target="_blank">
@ -167,17 +168,28 @@
<td>Sun Studio 12</td>
</tr>
<tr>
<td>Windows X86 (32bit)</td>
<td>Windows X86 (32-bit)</td>
<td>Windows XP</td>
<td>Microsoft Visual Studio .NET 2003 Professional</td>
<td>Microsoft Visual Studio C++ 2008 Standard Edition</td>
</tr>
<tr>
<td>Windows X64 (64bit)</td>
<td>Windows X64 (64-bit)</td>
<td>Windows Server 2003 - Enterprise x64 Edition</td>
<td>Microsoft Platform SDK - April 2005</td>
</tr>
</tbody>
</table>
<p>
These same sources do indeed build on many more systems than the
above older generation systems, again the above is just a minimum.
<p>
Compilation problems with newer or different C/C++ compilers is a
common problem.
Similarly, compilation problems related to changes to the
<tt>/usr/include</tt> or system header files is also a
common problem with newer or unreleased OS versions.
Please report these types of problems as bugs so that they
can be dealt with accordingly.
</blockquote>
<!-- ------------------------------------------------------ -->
<hr>
@ -488,7 +500,7 @@
not work due to a lack of support for MS-DOS drive letter paths
like <tt>C:/</tt> or <tt>C:\</tt>.
Use a 3.80 version, or find a newer
version that has this problem fixed, like 3.82.
version that has this problem fixed.
The older 3.80 version of make.exe can be downloaded with this
<a href="http://cygwin.paracoda.com/release/make/make-3.80-1.tar.bz2" target="_blank">
link</a>.
@ -575,8 +587,8 @@
</li>
<li>
Install
<a href="#ant">Ant</a>, set
<tt><a href="#ANT_HOME">ANT_HOME</a></tt>.
<a href="#ant">Ant</a>,
make sure it is in your PATH.
</li>
</ol>
</blockquote>
@ -592,7 +604,7 @@
Approximately 1.4 GB of free disk
space is needed for a 32-bit build.
<p>
If you are building the 64bit version, you should
If you are building the 64-bit version, you should
run the command "isainfo -v" to verify that you have a
64-bit installation, it should say <tt>sparcv9</tt> or
<tt>amd64</tt>.
@ -640,8 +652,8 @@
</li>
<li>
Install
<a href="#ant">Ant</a>, set
<tt><a href="#ANT_HOME">ANT_HOME</a></tt>.
<a href="#ant">Ant</a>,
make sure it is in your PATH.
</li>
</ol>
</blockquote>
@ -650,11 +662,11 @@
<h3><a name="windows">Basic Windows System Setup</a></h3>
<blockquote>
<strong>i586 only:</strong>
The minimum recommended hardware for building the 32bit or X86
The minimum recommended hardware for building the 32-bit or X86
Windows version is an Pentium class processor or better, at least
512 MB of RAM, and approximately 600 MB of free disk space.
<strong>
NOTE: The Windows 2000 build machines need to use the
NOTE: The Windows build machines need to use the
file system NTFS.
Build machines formatted to FAT32 will not work
because FAT32 doesn't support case-sensitivity in file names.
@ -719,8 +731,11 @@
</li>
<li>
Install the
<a href="#msvc">Microsoft Visual Studio .NET 2003 Professional</a> (32bit) or the
<a href="#mssdk">Microsoft Platform SDK</a> (64bit).
<a href="#msvc">Microsoft Visual Studio Compilers</a> (32-bit).
</li>
<li>
Install the
<a href="#mssdk">Microsoft Platform SDK</a>.
</li>
<li>
Setup all environment variables for compilers
@ -732,7 +747,8 @@
</li>
<li>
Install
<a href="#ant">Ant</a>, set
<a href="#ant">Ant</a>,
make sure it is in your PATH and set
<tt><a href="#ANT_HOME">ANT_HOME</a></tt>.
</li>
</ol>
@ -787,7 +803,9 @@
you must first download and install the appropriate
binary plug bundles for the OpenJDK, go to the
<a href="http://openjdk.java.net" target="_blank">OpenJDK</a> site and select
the "<b>Bundles(7)</b>" link and download the binaryplugs for
the
"<b>Bundles(7)</b>"
link and download the binaryplugs for
your particular platform.
The file downloaded is a jar file that must be extracted by running
the jar file with:
@ -821,16 +839,14 @@
<blockquote>
All OpenJDK builds require access to least Ant 1.6.5.
The Ant tool is available from the
<a href="http://ant.apache.org/antlibs/bindownload.cgi" target="_blank">
<a href="http://ant.apache.org" target="_blank">
Ant download site</a>.
You should always set
You should always make sure <tt>ant</tt> is in your PATH, and
on Windows you may also need to set
<tt><a href="#ANT_HOME">ANT_HOME</a></tt>
to point to the location of
the Ant installation, this is the directory pathname
that contains a <tt>bin and lib</tt>.
It's also a good idea to also place its <tt>bin</tt> directory
in the <tt>PATH</tt> environment variable, although it's
not absolutely required.
</blockquote>
<!-- ------------------------------------------------------ -->
<h4><a name="cacerts">Certificate Authority File (cacert)</a></h4>
@ -862,25 +878,9 @@
<blockquote>
<strong><a name="gcc">Linux gcc/binutils</a></strong>
<blockquote>
The GNU gcc compiler version should be 3.2.2 or newer.
The binutils package should be 2.11.93.0.2-11 or newer.
The GNU gcc compiler version should be 4 or newer.
The compiler used should be the default compiler installed
in <tt>/usr/bin</tt>.
<p>
Older Linux systems may require a gcc and bunutils update.
The Redhat Enterprise Advanced Server 2.1 update 2 system
is one of these systems.
RedHat Linux users can obtain this binutils package from
<a href="http://www.redhat.com"
target="_blank">Redhat web site</a>.
You will need to remove the default compiler and binutils
packages and install the required packages
into the default location on the system.
However if you have a new video card driver, like
Geforce 4 it is best to use
the same compiler as the kernel was built with to
build the new video card driver module.
So you should build the modules before making this change.
</blockquote>
<strong><a name="studio">Solaris: Sun Studio</a></strong>
<blockquote>
@ -903,19 +903,20 @@
are also an option, although these compilers have not
been extensively used yet.
</blockquote>
<strong><a name="msvc">Windows i586: Microsoft Visual Studio .NET 2003 Professional</a></strong>
<strong><a name="msvc">Windows i586: Microsoft Visual Studio Compilers</a></strong>
<blockquote>
The 32-bit OpenJDK Windows build
requires Microsoft Visual Studio .NET 2003 (VS2003) Professional
requires
Microsoft Visual Studio C++ 2008 (VS2008) Standard
Edition compiler.
The compiler and other tools are expected to reside
in the location defined by the variable <tt>VS71COMNTOOLS</tt> which
is set by the Microsoft Visual Studio .NET installer.
in the location defined by the variable
<tt>VS90COMNTOOLS</tt> which
is set by the Microsoft Visual Studio installer.
<p>
Once the compiler is installed,
it is recommended that you run <tt>VCVARS32.BAT</tt>
to set the compiler environment variables
<tt>MSVCDIR</tt>,
<tt>INCLUDE</tt>,
<tt>LIB</tt>, and
<tt>PATH</tt>
@ -923,16 +924,12 @@
OpenJDK.
The above environment variables <b>MUST</b> be set.
<p>
The Microsoft Visual Studio .NET 2005 (VS2005) compiler
will not work at this time due to the new runtime dll
and the manifest requirements.
<p>
<b>WARNING:</b> Make sure you check out the
<a href="#cygwin">CYGWIN link.exe WARNING</a>.
The path <tt>/usr/bin</tt> must be after the path to the
Visual Studio product.
</blockquote>
<strong><a name="mssdk">Windows X64: Microsoft Platform SDK April 2005</a></strong>
<strong><a name="mssdk">Windows: Microsoft Platform SDK</a></strong>
<blockquote>
On <b>X64</b>, the Microsoft Platform Software
Development Kit (SDK), April 2005 Edition compiler,
@ -953,10 +950,9 @@
OpenJDK.
The above environment variables <b>MUST</b> be set.
<p>
Note that this compiler may say it's version is a
Microsoft Visual Studio .NET 2005 (VS2005), but be careful,
it will not match the official VS2005 product.
This Platform SDK compiler is only used on X64 builds.
This Platform SDK compiler is only used on X64 builds
but other parts of the Platform SDK may be used
for the X86 builds.
</blockquote>
</blockquote>
<!-- ------------------------------------------------------ -->
@ -1241,37 +1237,37 @@
<strong><a name="msvcrt"><tt>MSVCRT.DLL</tt></a></strong>
<blockquote>
<strong>i586 only:</strong>
The OpenJDK 32bit build requires access to
<tt>MSVCRT.DLL</tt> version 6.00.8337.0 or newer.
The OpenJDK 32-bit build requires access to a redistributable
<tt>MSVCRT.DLL</tt>.
If the <tt>MSVCRT.DLL</tt> is not installed in
the system32 directory set the
<a href="#ALT_MSVCRT_DLL_PATH"><tt>ALT_MSVCRT_DLL_PATH</tt></a>
variable to the location.
variable to the location of this file.
<p>
<strong>X64 only:</strong>
The OpenJDK 64bit build requires access to
<tt>MSVCRT.DLL</tt> version 7.0.3790.0 or newer, which is
The OpenJDK 64-bit build requires access to a redistributable
<tt>MSVCRT.DLL</tt>, which is
usually supplied by the
<a href="#mssdk">Platform SDK</a>.
If it is not available from the Platform SDK,
set the
<a href="#ALT_MSVCRT_DLL_PATH"><tt>ALT_MSVCRT_DLL_PATH</tt></a>
variable to the location.
variable to the location of this file.
</blockquote>
<strong><tt><a name="msvcr71">MSVCR71.DLL</a></tt></strong>
<strong><tt><a name="msvcr90">MSVCR90.DLL</a></tt></strong>
<blockquote>
<strong>i586 only:</strong>
The
OpenJDK
build requires access to
MSVCR71.DLL version 7.10.3052.4 or newer which should be
build requires access to a redistributable
<tt>MSVCR90.DLL</tt> which should be
supplied by the
<a href="#msvc">Visual Studio product</a>
If the <tt>MSVCR71.DLL</tt> is not available from the
<a href="#msvc">Visual Studio product</a>.
If the <tt>MSVCR90.DLL</tt> is not available from the
Visual Studio product
set the
<a href="#ALT_MSVCR71_DLL_PATH"><tt>ALT_MSVCR71_DLL_PATH</tt></a>
variable to the location.
<a href="#ALT_MSVCR90_DLL_PATH"><tt>ALT_MSVCR90_DLL_PATH</tt></a>
variable to the location of this file.
</blockquote>
</blockquote>
<!-- ------------------------------------------------------ -->
@ -1359,13 +1355,38 @@
document) that can impact the build are:
<blockquote>
<dl>
<dt><a name="ALT_BINARY_PLUGS_PATH"><tt>ALT_BINARY_PLUGS_PATH</tt></a></dt>
<dt><a name="path"><tt>PATH</tt></a> </dt>
<dd>Typically you want to set the <tt>PATH</tt> to include:
<ul>
<li>The location of the GNU make binary</li>
<li>The location of the Bootstrap JDK <tt>java</tt>
(see <a href="#bootjdk">Bootstrap JDK</a>)</li>
<li>The location of the C/C++ compilers
(see <a href="#compilers"><tt>compilers</tt></a>)</li>
<li>The location or locations for the Unix command utilities
(e.g. <tt>/usr/bin</tt>)</li>
</ul>
</dd>
<dt><tt>MILESTONE</tt> </dt>
<dd>
The location of the binary plugs installation.
See <a href="#binaryplugs">Binary Plugs</a> for more information.
You should always have a local copy of a
recent Binary Plugs install image
and set this variable to that location.
The milestone name for the build (<i>e.g.</i>"beta").
The default value is "internal".
</dd>
<dt><tt>BUILD_NUMBER</tt> </dt>
<dd>
The build number for the build (<i>e.g.</i> "b27").
The default value is "b00".
</dd>
<dt><a name="arch_data_model"><tt>ARCH_DATA_MODEL</tt></a></dt>
<dd>The <tt>ARCH_DATA_MODEL</tt> variable
is used to specify whether the build is to generate 32-bit or 64-bit
binaries.
The Solaris build supports either 32-bit or 64-bit builds, but
Windows and Linux will support only one, depending on the specific
OS being used.
Normally, setting this variable is only necessary on Solaris.
Set <tt>ARCH_DATA_MODEL</tt> to <tt>32</tt> for generating 32-bit binaries,
or to <tt>64</tt> for generating 64-bit binaries.
</dd>
<dt><a name="ALT_BOOTDIR"><tt>ALT_BOOTDIR</tt></a></dt>
<dd>
@ -1374,25 +1395,89 @@
You should always install your own local Bootstrap JDK and
always set <tt>ALT_BOOTDIR</tt> explicitly.
</dd>
<dt><a name="ALT_BUILD_BINARY_PLUGS_PATH"><tt>ALT_BUILD_BINARY_PLUGS_PATH</tt></a></dt>
<dt><a name="ALT_BINARY_PLUGS_PATH"><tt>ALT_BINARY_PLUGS_PATH</tt></a></dt>
<dd>
These are useful in managing builds on multiple platforms.
The default network location for all of the binary plug images
for all platforms.
If <tt><a href="#ALT_BINARY_PLUGS_PATH">ALT_BINARY_PLUGS_PATH</a></tt>
is not set, this directory will be used and should contain
the following directories:
<tt>solaris-sparc</tt>,
<tt>solaris-i586</tt>,
<tt>solaris-sparcv9</tt>,
<tt>solaris-amd64</tt>,
<tt>linux-i586</tt>,
<tt>linux-amd64</tt>,
<tt>windows-i586</tt>,
and
<tt>windows-amd64</tt>.
Where each of these directories contain the binary plugs image
for that platform.
The location of the binary plugs installation.
See <a href="#binaryplugs">Binary Plugs</a> for more information.
You should always have a local copy of a
recent Binary Plugs install image
and set this variable to that location.
</dd>
<dt><a name="ALT_JDK_IMPORT_PATH"><tt>ALT_JDK_IMPORT_PATH</tt></a></dt>
<dd>
The location of a previously built JDK installation.
See <a href="#importjdk">Optional Import JDK</a> for more information.
</dd>
<dt><a name="ALT_OUTPUTDIR"><tt>ALT_OUTPUTDIR</tt></a> </dt>
<dd>
An override for specifying the (absolute) path of where the
build output is to go.
The default output directory will be build/<i>platform</i>.
</dd>
<dt><a name="ALT_COMPILER_PATH"><tt>ALT_COMPILER_PATH</tt></a> </dt>
<dd>
The location of the C/C++ compiler.
The default varies depending on the platform.
</dd>
<dt><tt><a name="ALT_CACERTS_FILE">ALT_CACERTS_FILE</a></tt></dt>
<dd>
The location of the <a href="#cacerts">cacerts</a> file.
The default will refer to
<tt>jdk/src/share/lib/security/cacerts</tt>.
</dd>
<dt><a name="ALT_CUPS_HEADERS_PATH"><tt>ALT_CUPS_HEADERS_PATH</tt></a> </dt>
<dd>
The location of the CUPS header files.
See <a href="#cups">CUPS information</a> for more information.
If this path does not exist the fallback path is
<tt>/usr/include</tt>.
</dd>
<dt><a name="ALT_FREETYPE_LIB_PATH"><tt>ALT_FREETYPE_LIB_PATH</tt></a></dt>
<dd>
The location of the FreeType shared library.
See <a href="#freetype">FreeType information</a> for details.
</dd>
<dt><a name="ALT_FREETYPE_HEADERS_PATH"><tt>ALT_FREETYPE_HEADERS_PATH</tt></a></dt>
<dd>
The location of the FreeType header files.
See <a href="#freetype">FreeType information</a> for details.
</dd>
<dt><a name="ALT_JDK_DEVTOOLS_PATH"><tt>ALT_JDK_DEVTOOLS_PATH</tt></a></dt>
<dd>
The default root location of the devtools.
The default value is
<tt>$(ALT_SLASH_JAVA)/devtools</tt>.
</dd>
<dt><tt><a name="ALT_DEVTOOLS_PATH">ALT_DEVTOOLS_PATH</a></tt> </dt>
<dd>
The location of tools like the
<a href="#zip"><tt>zip</tt> and <tt>unzip</tt></a>
binaries, but might also contain the GNU make utility
(<tt><i>gmake</i></tt>).
So this area is a bit of a grab bag, especially on Windows.
The default value depends on the platform and
Unix Commands being used.
On Linux the default will be
<tt>$(ALT_JDK_DEVTOOLS_PATH)/linux/bin</tt>,
on Solaris
<tt>$(ALT_JDK_DEVTOOLS_PATH)/<i>{sparc,i386}</i>/bin</tt>,
and on Windows with CYGWIN
<tt>/usr/bin</tt>.
</dd>
<dt><a name="ALT_UNIXCCS_PATH"><tt>ALT_UNIXCCS_PATH</tt></a></dt>
<dd>
<strong>Solaris only:</strong>
An override for specifying where the Unix CCS
command set are located.
The default location is <tt>/usr/ccs/bin</tt>
</dd>
<dt><a name="ALT_SLASH_JAVA"><tt>ALT_SLASH_JAVA</tt></a></dt>
<dd>
The default root location for many of the ALT path locations
of the following ALT variables.
The default value is
<tt>"/java"</tt> on Solaris and Linux,
<tt>"J:"</tt> on Windows.
</dd>
<dt><a name="ALT_BUILD_JDK_IMPORT_PATH"><tt>ALT_BUILD_JDK_IMPORT_PATH</tt></a></dt>
<dd>
@ -1414,166 +1499,57 @@
Where each of these directories contain the import JDK image
for that platform.
</dd>
<dt><tt><a name="ALT_CACERTS_FILE">ALT_CACERTS_FILE</a></tt></dt>
<dt><a name="ALT_BUILD_BINARY_PLUGS_PATH"><tt>ALT_BUILD_BINARY_PLUGS_PATH</tt></a></dt>
<dd>
The location of the <a href="#cacerts">cacerts</a> file.
The default will refer to
<tt>jdk/src/share/lib/security/cacerts</tt>.
These are useful in managing builds on multiple platforms.
The default network location for all of the binary plug images
for all platforms.
If <tt><a href="#ALT_BINARY_PLUGS_PATH">ALT_BINARY_PLUGS_PATH</a></tt>
is not set, this directory will be used and should contain
the following directories:
<tt>solaris-sparc</tt>,
<tt>solaris-i586</tt>,
<tt>solaris-sparcv9</tt>,
<tt>solaris-amd64</tt>,
<tt>linux-i586</tt>,
<tt>linux-amd64</tt>,
<tt>windows-i586</tt>,
and
<tt>windows-amd64</tt>.
Where each of these directories contain the binary plugs image
for that platform.
</dd>
<dt><a name="ALT_COMPILER_PATH"><tt>ALT_COMPILER_PATH</tt></a> </dt>
<dt><strong>Windows specific:</strong></dt>
<dd>
The location of the C/C++ compiler.
The default varies depending on the platform.
</dd>
<dt><a name="ALT_CUPS_HEADERS_PATH"><tt>ALT_CUPS_HEADERS_PATH</tt></a> </dt>
<dd>
The location of the CUPS header files.
See <a href="#cups">CUPS information</a> for more information.
If this path does not exist the fallback path is
<tt>/usr/include</tt>.
</dd>
<dt><tt><a name="ALT_DEVTOOLS_PATH">ALT_DEVTOOLS_PATH</a></tt> </dt>
<dd>
The location of tools like the
<a href="#zip"><tt>zip</tt> and <tt>unzip</tt></a>
binaries, but might also contain the GNU make utility
(<tt><i>gmake</i></tt>).
So this area is a bit of a grab bag, especially on Windows.
The default value depends on the platform and
Unix Commands being used.
On Linux the default will be
<tt>$(ALT_JDK_DEVTOOLS_PATH)/linux/bin</tt>,
on Solaris
<tt>$(ALT_JDK_DEVTOOLS_PATH)/<i>{sparc,i386}</i>/bin</tt>,
on Windows with MKS
<tt>%SYSTEMDRIVE%/UTILS</tt>,
and on Windows with CYGWIN
<tt>/usr/bin</tt>.
</dd>
<dt><tt><a name="ALT_DXSDK_PATH">ALT_DXSDK_PATH</a></tt> </dt>
<dd>
<strong>Windows Only:</strong>
The location of the
<a href="#dxsdk">Microsoft DirectX 9 SDK</a>.
The default will be to try and use the DirectX environment
variable <tt>DXSDK_DIR</tt>,
failing that, look in <tt>C:/DXSDK</tt>.
</dd>
<dt><a name="ALT_FREETYPE_HEADERS_PATH"><tt>ALT_FREETYPE_HEADERS_PATH</tt></a></dt>
<dd>
The location of the FreeType header files.
See <a href="#freetype">FreeType information</a> for details.
</dd>
<dt><a name="ALT_FREETYPE_LIB_PATH"><tt>ALT_FREETYPE_LIB_PATH</tt></a></dt>
<dd>
The location of the FreeType shared library.
See <a href="#freetype">FreeType information</a> for details.
</dd>
<dt><a name="ALT_JDK_DEVTOOLS_PATH"><tt>ALT_JDK_DEVTOOLS_PATH</tt></a></dt>
<dd>
The default root location of the devtools.
The default value is
<tt>$(ALT_SLASH_JAVA)/devtools</tt>.
</dd>
<dt><a name="ALT_JDK_IMPORT_PATH"><tt>ALT_JDK_IMPORT_PATH</tt></a></dt>
<dd>
The location of a previously built JDK installation.
See <a href="#importjdk">Optional Import JDK</a> for more information.
</dd>
<dt><a name="ALT_MSDEVTOOLS_PATH"><tt>ALT_MSDEVTOOLS_PATH</tt></a> </dt>
<dd>
<strong>Windows Only:</strong>
The location of the Microsoft Visual Studio .NET 2003
tools 'bin' directory.
The default is usually derived from
<a href="#ALT_COMPILER_PATH"><tt>ALT_COMPILER_PATH</tt></a>.
</dd>
<dt><tt><a name="ALT_MSVCR71_DLL_PATH">ALT_MSVCR71_DLL_PATH</a></tt> </dt>
<dd>
<strong>Windows i586 only:</strong>
The location of the
<a href="#msvcr71"><tt>MSVCR71.DLL</tt></a>.
</dd>
<dt><tt><a name="ALT_MSVCRT_DLL_PATH">ALT_MSVCRT_DLL_PATH</a></tt> </dt>
<dd>
<strong>Windows Only:</strong>
The location of the
<a href="#msvcrt"><tt>MSVCRT.DLL</tt></a>.
</dd>
<dt><a name="ALT_OUTPUTDIR"><tt>ALT_OUTPUTDIR</tt></a> </dt>
<dd>
An override for specifying the (absolute) path of where the
build output is to go.
The default output directory will be build/<i>platform</i>.
</dd>
<dt><a name="ALT_SLASH_JAVA"><tt>ALT_SLASH_JAVA</tt></a></dt>
<dd>
The default root location for many of the ALT path locations
of the following ALT variables.
The default value is
<tt>"/java"</tt> on Solaris and Linux,
<tt>"J:"</tt> on Windows.
</dd>
<dt><a name="ALT_UNIXCCS_PATH"><tt>ALT_UNIXCCS_PATH</tt></a></dt>
<dd>
<strong>Solaris only:</strong>
An override for specifying where the Unix CCS
command set are located.
The default location is <tt>/usr/ccs/bin</tt>
</dd>
<dt><a name="ALT_UNIXCOMMAND_PATH"><tt>ALT_UNIXCOMMAND_PATH</tt></a> </dt>
<dd>
An override for specifying where the
Unix command set are located.
The default location varies depending on the platform,
<tt>"%SYSTEMDRIVE%/MKSNT"</tt> or
<tt>$(ROOTDIR)</tt> on Windows with MKS, otherwise it's
<tt>"/bin"</tt> or <tt>/usr/bin</tt>.
</dd>
<dt><a name="ALT_USRBIN_PATH"><tt>ALT_USRBIN_PATH</tt></a></dt>
<dd>
An override for specifying where the
Unix <tt>/usr/bin</tt> commands are located. You usually do not need
to set this variable: the default location is <tt>/usr/bin</tt>)
</dd>
<dt><a name="ANT_HOME"><tt>ANT_HOME</tt></a></dt>
<dd>
The location of the Ant installation.
See <a href="#ant">Ant</a> for more information.
You should always set <tt>ANT_HOME</tt> explicitly.
</dd>
<dt><a name="arch_data_model"><tt>ARCH_DATA_MODEL</tt></a></dt>
<dd>The <tt>ARCH_DATA_MODEL</tt> variable
is used to specify whether the build is to generate 32-bit or 64-bit
binaries.
The Solaris build supports either 32-bit or 64-bit builds, but
Windows and Linux will support only one, depending on the specific
OS being used.
Normally, setting this variable is only necessary on Solaris.
Set <tt>ARCH_DATA_MODEL</tt> to <tt>32</tt> for generating 32-bit binaries,
or to <tt>64</tt> for generating 64-bit binaries.
</dd>
<dt><tt>BUILD_NUMBER</tt> </dt>
<dd>
The build number for the build (<i>e.g.</i> "b27").
The default value is "b00".
</dd>
<dt><tt>MILESTONE</tt> </dt>
<dd>
The milestone name for the build (<i>e.g.</i>"beta").
The default value is "internal".
</dd>
<dt><a name="path"><tt>PATH</tt></a> </dt>
<dd>Typically you want to set the <tt>PATH</tt> to include:
<ul>
<li>The location of the GNU make binary</li>
<li>The location of the Bootstrap JDK <tt>java</tt>
(see <a href="#bootjdk">Bootstrap JDK</a>)</li>
<li>The location of the C/C++ compilers
(see <a href="#compilers"><tt>compilers</tt></a>)</li>
<li>The location or locations for the Unix command utilities
(e.g. <tt>/usr/bin</tt>)</li>
</ul>
<dl>
<dt><a name="ALT_MSDEVTOOLS_PATH"><tt>ALT_MSDEVTOOLS_PATH</tt></a> </dt>
<dd>
The location of the
Microsoft Visual Studio
tools 'bin' directory.
The default is usually derived from
<a href="#ALT_COMPILER_PATH"><tt>ALT_COMPILER_PATH</tt></a>.
</dd>
<dt><tt><a name="ALT_DXSDK_PATH">ALT_DXSDK_PATH</a></tt> </dt>
<dd>
The location of the
<a href="#dxsdk">Microsoft DirectX 9 SDK</a>.
The default will be to try and use the DirectX environment
variable <tt>DXSDK_DIR</tt>,
failing that, look in <tt>C:/DXSDK</tt>.
</dd>
<dt><tt><a name="ALT_MSVCRT_DLL_PATH">ALT_MSVCRT_DLL_PATH</a></tt> </dt>
<dd>
The location of the
<a href="#msvcrt"><tt>MSVCRT.DLL</tt></a>.
</dd>
<dt><tt><a name="ALT_MSVCR90_DLL_PATH">ALT_MSVCR90_DLL_PATH</a></tt> </dt>
<dd>
<strong>i586 only:</strong>
The location of the
<a href="#msvcr90"><tt>MSVCR90.DLL</tt></a>.
</dd>
</dl>
</dd>
</dl>
</blockquote>
@ -1661,8 +1637,8 @@
This is caused by a missing libstdc++.a library.
This is installed as part of a specific package
(e.g. libstdc++.so.devel.386).
By default some 64bit Linux versions (e.g. Fedora)
only install the 64bit version of the libstdc++ package.
By default some 64-bit Linux versions (e.g. Fedora)
only install the 64-bit version of the libstdc++ package.
Various parts of the JDK build require a static
link of the C++ runtime libraries to allow for maximum
portability of the built images.

View File

@ -12,3 +12,10 @@ ef6af34d75a7b44e77083f1d4ee47631fa09d3b4 jdk7-b31
3867c4d14a5bfdbb37c97b4874ccb0ee5343111c jdk7-b35
0723891eb8d1c27e67c54163af0b4cea05a4e036 jdk7-b36
59d5848bdedebe91cc2753acce78911bcb4a66db jdk7-b37
08be802754b0296c91a7713b6d85a015dbcd5349 jdk7-b38
55078b6661e286e90387d1d9950bd865f5cc436e jdk7-b39
184e21992f47a8d730df1adc5b21a108f3125489 jdk7-b40
c90eeda9594ed2983403e2049aed8d503126c62e jdk7-b41
ccd6a16502e0650d91d85c4b86be05cbcd461a87 jdk7-b42
9cd740d48a4855321d69f137a7109c00bcda76be jdk7-b43
9803dac7254041b30ca65e3852d4c566b9757c3b jdk7-b44

View File

@ -1,5 +1,5 @@
#
# Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
# Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
#
# Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved.
# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it

View File

@ -12,3 +12,10 @@ b727c32788a906c04839516ae7443a085185a300 jdk7-b32
5fa96a5a7e76da7c8dad12486293a0456c2c116c jdk7-b35
e91159f921a58af3698e6479ea1fc5818da66d09 jdk7-b36
9ee9cf798b59e7d51f8c0a686959f313867a55d6 jdk7-b37
d9bc824aa078573829bb66572af847e26e1bd12e jdk7-b38
49ca90d77f34571b0757ebfcb8a7848ef2696b88 jdk7-b39
81a0cbe3b28460ce836109934ece03db7afaf9cc jdk7-b40
f9d938ede1960d18cb7cf23c645b026519c1a678 jdk7-b41
ad8c8ca4ab0f4c86e74c061958f44a8f4a930f2c jdk7-b42
fc6a5ae3fef5ebacfa896dbb3ae37715e388e282 jdk7-b43
809e899c638bd9b21836abf9d09ab2a30ff3900b jdk7-b44

View File

@ -1,4 +1,4 @@
#
#
# Copyright 2006-2008 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
@ -19,7 +19,7 @@
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
# CA 95054 USA or visit www.sun.com if you need additional information or
# have any questions.
#
#
#
# This file format must remain compatible with both

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2008
HS_MAJOR_VER=14
HS_MINOR_VER=0
HS_BUILD_NUMBER=05
HS_BUILD_NUMBER=10
JDK_MAJOR_VER=1
JDK_MINOR_VER=7

View File

@ -7,5 +7,13 @@
#
# adlc-updater <file> <source-dir> <target-dir>
#
[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
( [ -f $3/$1 ]; echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )
fix_lines() {
# repair bare #line directives in $1 to refer to $2
awk < $1 > $1+ '
/^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
{print}
' F2=$2
mv $1+ $1
}
[ -f $3/$1 ] && (fix_lines $2/$1 $3/$1; cmp -s $2/$1 $3/$1) || \
( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )

View File

@ -54,10 +54,12 @@ VPATH += $(Src_Dirs_V:%=%:)
Src_Dirs_I = ${Src_Dirs} $(GENERATED)
INCLUDES += $(Src_Dirs_I:%=-I%)
# Force assertions on.
SYSDEFS += -DASSERT
# set flags for adlc compilation
CPPFLAGS = $(SYSDEFS) $(INCLUDES)
# Force assertions on.
CPPFLAGS += -DASSERT
# CFLAGS_WARN holds compiler options to suppress/enable warnings.
# Suppress warnings (for now)
CFLAGS_WARN = -w
@ -125,7 +127,15 @@ $(GENERATEDFILES): refresh_adfiles
# Note that product files are updated via "mv", which is atomic.
TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
ADLCFLAGS = -q -T
# Pass -D flags into ADLC.
ADLCFLAGS += $(SYSDEFS)
# Note "+="; it is a hook so flags.make can add more flags, like -g or -DFOO.
ADLCFLAGS += -q -T
# Normally, debugging is done directly on the ad_<arch>*.cpp files.
# But -g will put #line directives in those files pointing back to <arch>.ad.
#ADLCFLAGS += -g
ifdef LP64
ADLCFLAGS += -D_LP64
@ -140,6 +150,8 @@ endif
#
ADLC_UPDATER_DIRECTORY = $(GAMMADIR)/make/$(OS)
ADLC_UPDATER = adlc_updater
$(ADLC_UPDATER): $(ADLC_UPDATER_DIRECTORY)/$(ADLC_UPDATER)
$(QUIETLY) cp $< $@; chmod +x $@
# This action refreshes all generated adlc files simultaneously.
# The way it works is this:
@ -149,9 +161,8 @@ ADLC_UPDATER = adlc_updater
# 4) call $(ADLC_UPDATER) on each generated adlc file. It will selectively update changed or missing files.
# 5) If we actually updated any files, echo a notice.
#
refresh_adfiles: $(EXEC) $(SOURCE.AD)
refresh_adfiles: $(EXEC) $(SOURCE.AD) $(ADLC_UPDATER)
@rm -rf $(TEMPDIR); mkdir $(TEMPDIR)
$(QUIETLY) [ -f $(ADLC_UPDATER) ] || ( cp $(ADLC_UPDATER_DIRECTORY)/$(ADLC_UPDATER) . ; chmod +x $(ADLC_UPDATER) )
$(QUIETLY) $(EXEC) $(ADLCFLAGS) $(SOURCE.AD) \
-c$(TEMPDIR)/ad_$(Platform_arch_model).cpp -h$(TEMPDIR)/ad_$(Platform_arch_model).hpp -a$(TEMPDIR)/dfa_$(Platform_arch_model).cpp -v$(TEMPDIR)/adGlobals_$(Platform_arch_model).hpp \
|| { rm -rf $(TEMPDIR); exit 1; }
@ -174,7 +185,15 @@ refresh_adfiles: $(EXEC) $(SOURCE.AD)
# #########################################################################
$(SOURCE.AD): $(SOURCES.AD)
$(QUIETLY) cat $(SOURCES.AD) > $(SOURCE.AD)
$(QUIETLY) $(PROCESS_AD_FILES) $(SOURCES.AD) > $(SOURCE.AD)
#PROCESS_AD_FILES = cat
# Pass through #line directives, in case user enables -g option above:
PROCESS_AD_FILES = awk '{ \
if (CUR_FN != FILENAME) { CUR_FN=FILENAME; NR_BASE=NR-1; need_lineno=1 } \
if (need_lineno && $$0 !~ /\/\//) \
{ print "\n\n\#line " (NR-NR_BASE) " \"" FILENAME "\""; need_lineno=0 }; \
print }'
$(OUTDIR)/%.o: %.cpp
@echo Compiling $<

View File

@ -64,6 +64,7 @@ Include_DBs/GC = $(VM)/includeDB_gc \
$(VM)/gc_implementation/includeDB_gc_parallelScavenge \
$(VM)/gc_implementation/includeDB_gc_concurrentMarkSweep \
$(VM)/gc_implementation/includeDB_gc_parNew \
$(VM)/gc_implementation/includeDB_gc_g1 \
$(VM)/gc_implementation/includeDB_gc_serial \
$(VM)/gc_implementation/includeDB_gc_shared
@ -84,9 +85,9 @@ Incremental_Lists = $(Cached_db)
AD_Dir = $(GENERATED)/adfiles
ADLC = $(AD_Dir)/adlc
AD_Spec = $(GAMMADIR)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad
AD_Spec = $(GAMMADIR)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad
AD_Src = $(GAMMADIR)/src/share/vm/adlc
AD_Names = ad_$(Platform_arch).hpp ad_$(Platform_arch).cpp
AD_Names = ad_$(Platform_arch_model).hpp ad_$(Platform_arch_model).cpp
AD_Files = $(AD_Names:%=$(AD_Dir)/%)
# AD_Files_If_Required/COMPILER1 = ad_stuff

View File

@ -7,5 +7,13 @@
#
# adlc-updater <file> <source-dir> <target-dir>
#
[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
( [ -f $3/$1 ]; echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )
fix_lines() {
# repair bare #line directives in $1 to refer to $2
awk < $1 > $1+ '
/^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
{print}
' F2=$2
mv $1+ $1
}
[ -f $3/$1 ] && (fix_lines $2/$1 $3/$1; cmp -s $2/$1 $3/$1) || \
( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )

View File

@ -54,10 +54,12 @@ VPATH += $(Src_Dirs_V:%=%:)
Src_Dirs_I = ${Src_Dirs} $(GENERATED)
INCLUDES += $(Src_Dirs_I:%=-I%)
# Force assertions on.
SYSDEFS += -DASSERT
# set flags for adlc compilation
CPPFLAGS = $(SYSDEFS) $(INCLUDES)
# Force assertions on.
CPPFLAGS += -DASSERT
ifndef USE_GCC
# We need libCstd.so for adlc
CFLAGS += -library=Cstd -g
@ -141,7 +143,15 @@ $(GENERATEDFILES): refresh_adfiles
# Note that product files are updated via "mv", which is atomic.
TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
ADLCFLAGS = -q -T
# Pass -D flags into ADLC.
ADLCFLAGS += $(SYSDEFS)
# Note "+="; it is a hook so flags.make can add more flags, like -g or -DFOO.
ADLCFLAGS += -q -T
# Normally, debugging is done directly on the ad_<arch>*.cpp files.
# But -g will put #line directives in those files pointing back to <arch>.ad.
#ADLCFLAGS += -g
ifdef LP64
ADLCFLAGS += -D_LP64
@ -156,6 +166,8 @@ endif
#
ADLC_UPDATER_DIRECTORY = $(GAMMADIR)/make/$(OS)
ADLC_UPDATER = adlc_updater
$(ADLC_UPDATER): $(ADLC_UPDATER_DIRECTORY)/$(ADLC_UPDATER)
$(QUIETLY) cp $< $@; chmod +x $@
# This action refreshes all generated adlc files simultaneously.
# The way it works is this:
@ -165,9 +177,8 @@ ADLC_UPDATER = adlc_updater
# 4) call $(ADLC_UPDATER) on each generated adlc file. It will selectively update changed or missing files.
# 5) If we actually updated any files, echo a notice.
#
refresh_adfiles: $(EXEC) $(SOURCE.AD)
refresh_adfiles: $(EXEC) $(SOURCE.AD) $(ADLC_UPDATER)
@rm -rf $(TEMPDIR); mkdir $(TEMPDIR)
$(QUIETLY) [ -f $(ADLC_UPDATER) ] || ( cp $(ADLC_UPDATER_DIRECTORY)/$(ADLC_UPDATER) . ; chmod +x $(ADLC_UPDATER) )
$(QUIETLY) $(EXEC) $(ADLCFLAGS) $(SOURCE.AD) \
-c$(TEMPDIR)/ad_$(Platform_arch_model).cpp -h$(TEMPDIR)/ad_$(Platform_arch_model).hpp -a$(TEMPDIR)/dfa_$(Platform_arch_model).cpp -v$(TEMPDIR)/adGlobals_$(Platform_arch_model).hpp \
|| { rm -rf $(TEMPDIR); exit 1; }
@ -190,7 +201,15 @@ refresh_adfiles: $(EXEC) $(SOURCE.AD)
# #########################################################################
$(SOURCE.AD): $(SOURCES.AD)
$(QUIETLY) cat $(SOURCES.AD) > $(SOURCE.AD)
$(QUIETLY) $(PROCESS_AD_FILES) $(SOURCES.AD) > $(SOURCE.AD)
#PROCESS_AD_FILES = cat
# Pass through #line directives, in case user enables -g option above:
PROCESS_AD_FILES = awk '{ \
if (CUR_FN != FILENAME) { CUR_FN=FILENAME; NR_BASE=NR-1; need_lineno=1 } \
if (need_lineno && $$0 !~ /\/\//) \
{ print "\n\n\#line " (NR-NR_BASE) " \"" FILENAME "\""; need_lineno=0 }; \
print }'
$(OUTDIR)/%.o: %.cpp
@echo Compiling $<

View File

@ -26,7 +26,6 @@
CFLAGS += -DVM_LITTLE_ENDIAN
# Not included in includeDB because it has no dependencies
# Obj_Files += solaris_amd64.o
Obj_Files += solaris_x86_64.o
#
@ -38,8 +37,6 @@ ifeq ("${Platform_compiler}", "sparcWorks")
# _lwp_create_interpose must have a frame
OPT_CFLAGS/os_solaris_x86_64.o = -xO1
# force C++ interpreter to be full optimization
#OPT_CFLAGS/interpret.o = -fast -O4
# Temporary until SS10 C++ compiler is fixed
OPT_CFLAGS/generateOptoStub.o = -xO2
@ -51,8 +48,6 @@ ifeq ("${Platform_compiler}", "gcc")
# gcc
# The serviceability agent relies on frame pointer (%rbp) to walk thread stack
CFLAGS += -fno-omit-frame-pointer
# force C++ interpreter to be full optimization
#OPT_CFLAGS/interpret.o = -O3
else
# error

View File

@ -30,7 +30,7 @@ DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)
ifeq ("${Platform_compiler}", "sparcWorks")
ifeq ($(COMPILER_REV),5.8)
ifeq ($(COMPILER_REV_NUMERIC),508)
# SS11 SEGV when compiling with -g and -xarch=v8, using different backend
DEBUG_CFLAGS/compileBroker.o = $(DEBUG_CFLAGS) -xO0
DEBUG_CFLAGS/jvmtiTagMap.o = $(DEBUG_CFLAGS) -xO0

View File

@ -87,17 +87,16 @@ ifneq ("${ISA}","${BUILDARCH}")
XLIBJVM_DB = 64/$(LIBJVM_DB)
XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
XARCH = $(subst sparcv9,v9,$(shell echo $(ISA)))
$(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
@echo Making $@
$(QUIETLY) mkdir -p 64/ ; \
$(CC) $(SYMFLAG) $(ARCHFLAG/$(XARCH)) -D$(TYPE) -I. -I$(GENERATED) \
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \
$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
$(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
@echo Making $@
$(QUIETLY) mkdir -p 64/ ; \
$(CC) $(SYMFLAG) $(ARCHFLAG/$(XARCH)) -D$(TYPE) -I. \
$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
endif # ifneq ("${ISA}","${BUILDARCH}")
@ -116,27 +115,25 @@ $(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).so
$(QUIETLY) $(LINK.CC) -z nodefs -o $@ $(DTRACE_SRCDIR)/$(GENOFFS)Main.c \
./lib$(GENOFFS).so
# $@.tmp is created first. It's to avoid empty $(JVMOFFS).h produced in error case.
CONDITIONALLY_UPDATE_JVMOFFS_TARGET = \
cmp -s $@ $@.tmp; \
case $$? in \
0) rm -f $@.tmp;; \
*) rm -f $@ && mv $@.tmp $@ && echo Updated $@;; \
esac
# $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs.
$(JVMOFFS).h: $(GENOFFS)
$(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -header > $@.tmp ; \
if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
then rm -f $@; mv $@.tmp $@; echo Updated $@ ; \
else rm -f $@.tmp; \
fi
$(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -header > $@.tmp
$(QUIETLY) $(CONDITIONALLY_UPDATE_JVMOFFS_TARGET)
$(JVMOFFS)Index.h: $(GENOFFS)
$(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -index > $@.tmp ; \
if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
then rm -f $@; mv $@.tmp $@; echo Updated $@ ; \
else rm -f $@.tmp; \
fi
$(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -index > $@.tmp
$(QUIETLY) $(CONDITIONALLY_UPDATE_JVMOFFS_TARGET)
$(JVMOFFS).cpp: $(GENOFFS) $(JVMOFFS).h $(JVMOFFS)Index.h
$(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -table > $@.tmp ; \
if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
then rm -f $@; mv $@.tmp $@; echo Updated $@ ; \
else rm -f $@.tmp; \
fi
$(QUIETLY) LD_LIBRARY_PATH=. ./$(GENOFFS) -table > $@.tmp
$(QUIETLY) $(CONDITIONALLY_UPDATE_JVMOFFS_TARGET)
$(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp
$(QUIETLY) $(CCC) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp

View File

@ -37,7 +37,7 @@ ifeq ("${Platform_compiler}", "sparcWorks")
OPT_CFLAGS/SLOWER = -xO2
# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876)
ifeq ($(COMPILER_REV), 5.9)
ifeq ($(COMPILER_REV_NUMERIC), 509)
# To avoid jvm98 crash
OPT_CFLAGS/instanceKlass.o = $(OPT_CFLAGS/SLOWER)
# Not clear this workaround could be skipped in some cases.
@ -46,47 +46,41 @@ ifeq ($(COMPILER_REV), 5.9)
OPT_CFLAGS/jni.o = $(OPT_CFLAGS/SLOWER)
endif
ifeq ($(COMPILER_REV), 5.5)
ifeq ($(COMPILER_REV_NUMERIC), 505)
# CC 5.5 has bug 4908364 with -xO4 (Fixed in 5.6)
OPT_CFLAGS/library_call.o = $(OPT_CFLAGS/SLOWER)
endif # COMPILER_REV == 5.5
endif # COMPILER_REV_NUMERIC == 505
ifeq ($(shell expr $(COMPILER_REV) \<= 5.4), 1)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \<= 504), 1)
# Compilation of *_<arch>.cpp can take an hour or more at O3. Use O2
# See comments at top of sparc.make.
OPT_CFLAGS/ad_$(Platform_arch).o = $(OPT_CFLAGS/SLOWER)
OPT_CFLAGS/dfa_$(Platform_arch).o = $(OPT_CFLAGS/SLOWER)
endif # COMPILER_REV <= 5.4
OPT_CFLAGS/ad_$(Platform_arch_model).o = $(OPT_CFLAGS/SLOWER)
OPT_CFLAGS/dfa_$(Platform_arch_model).o = $(OPT_CFLAGS/SLOWER)
endif # COMPILER_REV_NUMERIC <= 504
ifeq (${COMPILER_REV}, 5.0)
# Avoid a compiler bug caused by using -xO<level> -g<level>
# Since the bug also occurs with -xO0, use an innocuous value (must not be null)
OPT_CFLAGS/c1_LIROptimizer_i486.o = -c
endif
ifeq ($(shell expr $(COMPILER_REV) \< 5.5), 1)
# Same problem with Solaris/x86 compiler (both 5.0 and 5.2) on ad_i486.cpp.
# CC build time is also too long for ad_i486_{gen,misc}.o
OPT_CFLAGS/ad_i486.o = -c
OPT_CFLAGS/ad_i486_gen.o = -c
OPT_CFLAGS/ad_i486_misc.o = -c
ifeq ($(Platform_arch), i486)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 505), 1)
# Same problem with Solaris/x86 compiler (both 5.0 and 5.2) on ad_x86_{32,64}.cpp.
# CC build time is also too long for ad_$(Platform_arch_model)_{gen,misc}.o
OPT_CFLAGS/ad_$(Platform_arch_model).o = -c
OPT_CFLAGS/ad_$(Platform_arch_model)_gen.o = -c
OPT_CFLAGS/ad_$(Platform_arch_model)_misc.o = -c
ifeq ($(Platform_arch), x86)
# Same problem for the wrapper roosts: jni.o jvm.o
OPT_CFLAGS/jni.o = -c
OPT_CFLAGS/jvm.o = -c
# Same problem in parse2.o (probably the Big Switch over bytecodes)
OPT_CFLAGS/parse2.o = -c
endif # Platform_arch == i486
endif # Platform_arch == x86
endif
# Frame size > 100k if we allow inlining via -g0!
DEBUG_CFLAGS/bytecodeInterpreter.o = -g
DEBUG_CFLAGS/bytecodeInterpreterWithChecks.o = -g
ifeq ($(Platform_arch), i486)
ifeq ($(Platform_arch), x86)
# ube explodes on x86
OPT_CFLAGS/bytecodeInterpreter.o = -xO1
OPT_CFLAGS/bytecodeInterpreterWithChecks.o = -xO1
endif # Platform_arch == i486
endif # Platform_arch == x86
endif # Platform_compiler == sparcWorks

View File

@ -35,17 +35,13 @@ Obj_Files += solaris_x86_32.o
ifeq ("${Platform_compiler}", "sparcWorks")
# _lwp_create_interpose must have a frame
OPT_CFLAGS/os_solaris_i486.o = -xO1
# force C++ interpreter to be full optimization
OPT_CFLAGS/interpret.o = -fast -O4
OPT_CFLAGS/os_solaris_x86.o = -xO1
else
ifeq ("${Platform_compiler}", "gcc")
# gcc
# _lwp_create_interpose must have a frame
OPT_CFLAGS/os_solaris_i486.o = -fno-omit-frame-pointer
# force C++ interpreter to be full optimization
OPT_CFLAGS/interpret.o = -O3
OPT_CFLAGS/os_solaris_x86.o = -fno-omit-frame-pointer
#
else
# error
@ -57,7 +53,7 @@ endif
ifeq ("${Platform_compiler}", "sparcWorks")
# ILD is gone as of SS11 (5.8), not supported in SS10 (5.7)
ifeq ($(shell expr $(COMPILER_REV) \< 5.7), 1)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 507), 1)
#
# Bug in ild causes it to fail randomly. Until we get a fix we can't
# use ild.

View File

@ -30,7 +30,7 @@ DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)
ifeq ("${Platform_compiler}", "sparcWorks")
ifeq ($(COMPILER_REV),5.8)
ifeq ($(COMPILER_REV_NUMERIC),508)
# SS11 SEGV when compiling with -g and -xarch=v8, using different backend
DEBUG_CFLAGS/compileBroker.o = $(DEBUG_CFLAGS) -xO0
DEBUG_CFLAGS/jvmtiTagMap.o = $(DEBUG_CFLAGS) -xO0

View File

@ -33,7 +33,7 @@ OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@))
ifeq ("${Platform_compiler}", "sparcWorks")
# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876)
ifeq ($(COMPILER_REV),5.9)
ifeq ($(COMPILER_REV_NUMERIC),509)
# Not clear this workaround could be skipped in some cases.
OPT_CFLAGS/vmGCOperations.o = $(OPT_CFLAGS/SLOWER) -g
OPT_CFLAGS/java.o = $(OPT_CFLAGS/SLOWER) -g
@ -41,9 +41,9 @@ ifeq ($(COMPILER_REV),5.9)
endif
# Workaround SS11 bug 6345274 (all platforms) (Fixed in SS11 patch and SS12)
ifeq ($(COMPILER_REV),5.8))
ifeq ($(COMPILER_REV_NUMERIC),508))
OPT_CFLAGS/ciTypeFlow.o = $(OPT_CFLAGS/O2)
endif # COMPILER_REV == 5.8
endif # COMPILER_REV_NUMERIC == 508
endif # Platform_compiler == sparcWorks

View File

@ -41,7 +41,7 @@ endif
ifeq ("${Platform_compiler}", "sparcWorks")
# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876)
ifeq ($(COMPILER_REV),5.9)
ifeq ($(COMPILER_REV_NUMERIC),509)
# Not clear this workaround could be skipped in some cases.
OPT_CFLAGS/vmGCOperations.o = $(OPT_CFLAGS/SLOWER) -g
OPT_CFLAGS/java.o = $(OPT_CFLAGS/SLOWER) -g
@ -49,9 +49,9 @@ ifeq ($(COMPILER_REV),5.9)
endif
# Workaround SS11 bug 6345274 (all platforms) (Fixed in SS11 patch and SS12)
ifeq ($(COMPILER_REV),5.8)
ifeq ($(COMPILER_REV_NUMERIC),508)
OPT_CFLAGS/ciTypeFlow.o = $(OPT_CFLAGS/O2)
endif # COMPILER_REV == 5.8
endif # COMPILER_REV_NUMERIC == 508
endif # Platform_compiler == sparcWorks

View File

@ -26,7 +26,7 @@ Obj_Files += solaris_sparc.o
ASFLAGS += $(AS_ARCHFLAG)
ifeq ("${Platform_compiler}", "sparcWorks")
ifeq ($(shell expr $(COMPILER_REV) \< 5.5), 1)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 505), 1)
# For 5.2 ad_sparc file is compiled with -O2 %%%% remove when adlc is fixed
OPT_CFLAGS/ad_sparc.o = $(OPT_CFLAGS/SLOWER)
OPT_CFLAGS/dfa_sparc.o = $(OPT_CFLAGS/SLOWER)
@ -39,7 +39,7 @@ OPT_CFLAGS/carRememberedSet.o = $(OPT_CFLAGS/O2)
OPT_CFLAGS/jniHandles.o = $(OPT_CFLAGS/O2)
# CC brings an US-II to its knees compiling the vmStructs asserts under -xO4
OPT_CFLAGS/vmStructs.o = $(OPT_CFLAGS/O2)
endif
endif # COMPILER_REV_NUMERIC < 505
else
# Options for gcc
OPT_CFLAGS/ad_sparc.o = $(OPT_CFLAGS/SLOWER)

View File

@ -41,9 +41,9 @@ REORDER_FLAG = -xF
# Get the last thing on the line that looks like x.x+ (x is a digit).
COMPILER_REV := \
$(shell $(CPP) -V 2>&1 | sed -e 's/^.*\([1-9]\.[0-9][0-9]*\).*/\1/')
$(shell $(CPP) -V 2>&1 | sed -n 's/^.*[ ,\t]C++[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p')
C_COMPILER_REV := \
$(shell $(CC) -V 2>&1 | grep -i "cc:" | sed -e 's/^.*\([1-9]\.[0-9][0-9]*\).*/\1/')
$(shell $(CC) -V 2>&1 | sed -n 's/^.*[ ,\t]C[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p')
# Pick which compiler is validated
ifeq ($(JDK_MINOR_VERSION),6)
@ -60,17 +60,19 @@ endif
ENFORCE_COMPILER_REV${ENFORCE_COMPILER_REV} := ${VALIDATED_COMPILER_REV}
ifneq (${COMPILER_REV},${ENFORCE_COMPILER_REV})
dummy_target_to_enforce_compiler_rev:=\
$(info WARNING: You are using CC version ${COMPILER_REV} \
and should be using version ${ENFORCE_COMPILER_REV})
$(shell echo >&2 WARNING: You are using CC version ${COMPILER_REV} \
and should be using version ${ENFORCE_COMPILER_REV}. Set ENFORCE_COMPILER_REV=${COMPILER_REV} to avoid this warning.)
endif
ENFORCE_C_COMPILER_REV${ENFORCE_C_COMPILER_REV} := ${VALIDATED_C_COMPILER_REV}
ifneq (${C_COMPILER_REV},${ENFORCE_C_COMPILER_REV})
dummy_target_to_enforce_c_compiler_rev:=\
$(info WARNING: You are using cc version ${C_COMPILER_REV} \
and should be using version ${ENFORCE_C_COMPILER_REV})
$(shell echo >&2 WARNING: You are using cc version ${C_COMPILER_REV} \
and should be using version ${ENFORCE_C_COMPILER_REV}. Set ENFORCE_C_COMPILER_REV=${C_COMPILER_REV} to avoid this warning.)
endif
COMPILER_REV_NUMERIC := $(shell echo $(COMPILER_REV) | awk -F. '{ print $$1 * 100 + $$2 }')
# Fail the build if __fabsf is used. __fabsf exists only in Solaris 8 2/04
# and newer; objects with a dependency on this symbol will not run on older
# Solaris 8.
@ -120,7 +122,7 @@ ARCHFLAG_OLD/amd64 = -xarch=amd64
ARCHFLAG_NEW/amd64 = -m64
# Select the ARCHFLAGs and other SS12 (5.9) options
ifeq ($(shell expr $(COMPILER_REV) \>= 5.9), 1)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
ARCHFLAG/sparc = $(ARCHFLAG_NEW/sparc)
ARCHFLAG/sparcv9 = $(ARCHFLAG_NEW/sparcv9)
ARCHFLAG/i486 = $(ARCHFLAG_NEW/i486)
@ -150,7 +152,7 @@ OPT_CFLAGS/NOOPT=-xO1
# Begin current (>=5.6) Forte compiler options #
#################################################
ifeq ($(shell expr $(COMPILER_REV) \>= 5.6), 1)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 506), 1)
ifeq ("${Platform_arch}", "sparc")
@ -167,7 +169,7 @@ endif
# Begin current (>=5.5) Forte compiler options #
#################################################
ifeq ($(shell expr $(COMPILER_REV) \>= 5.5), 1)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 505), 1)
CFLAGS += $(ARCHFLAG)
AOUT_FLAGS += $(ARCHFLAG)
@ -255,7 +257,7 @@ LFLAGS += -library=%none
LFLAGS += -mt
endif # COMPILER_REV >= 5.5
endif # COMPILER_REV_NUMERIC >= 505
######################################
# End 5.5 Forte compiler options #
@ -265,7 +267,7 @@ endif # COMPILER_REV >= 5.5
# Begin 5.2 Forte compiler options #
######################################
ifeq ($(COMPILER_REV), 5.2)
ifeq ($(COMPILER_REV_NUMERIC), 502)
CFLAGS += $(ARCHFLAG)
AOUT_FLAGS += $(ARCHFLAG)
@ -324,7 +326,7 @@ PICFLAG/BYFILE = $(PICFLAG/$@)$(PICFLAG/DEFAULT$(PICFLAG/$@))
LFLAGS += -library=Crun
LIBS += -library=Crun -lCrun
endif # COMPILER_REV == 5.2
endif # COMPILER_REV_NUMERIC == 502
##################################
# End 5.2 Forte compiler options #
@ -333,7 +335,7 @@ endif # COMPILER_REV == 5.2
##################################
# Begin old 5.1 compiler options #
##################################
ifeq ($(COMPILER_REV), 5.1)
ifeq ($(COMPILER_REV_NUMERIC), 501)
_JUNK_ := $(shell echo >&2 \
"*** ERROR: sparkWorks.make incomplete for 5.1 compiler")
@ -347,7 +349,7 @@ endif
# Begin old 5.0 compiler options #
##################################
ifeq (${COMPILER_REV}, 5.0)
ifeq (${COMPILER_REV_NUMERIC}, 500)
# Had to hoist this higher apparently because of other changes. Must
# come before -xarch specification.
@ -379,7 +381,7 @@ endif # sparc
ifeq ("${Platform_arch_model}", "x86_32")
OPT_CFLAGS=-xtarget=pentium $(EXTRA_OPT_CFLAGS)
ifeq ("${COMPILER_REV}", "5.0")
ifeq ("${COMPILER_REV_NUMERIC}", "500")
# SC5.0 tools on x86 are flakey at -xO4
OPT_CFLAGS+=-xO3
else
@ -405,13 +407,13 @@ PICFLAG/DEFAULT = $(PICFLAG)
PICFLAG/BETTER = $(PICFLAG/DEFAULT)
PICFLAG/BYFILE = $(PICFLAG/$@)$(PICFLAG/DEFAULT$(PICFLAG/$@))
endif # COMPILER_REV = 5.0
endif # COMPILER_REV_NUMERIC = 500
################################
# End old 5.0 compiler options #
################################
ifeq ("${COMPILER_REV}", "4.2")
ifeq ("${COMPILER_REV_NUMERIC}", "402")
# 4.2 COMPILERS SHOULD NO LONGER BE USED
_JUNK_ := $(shell echo >&2 \
"*** ERROR: SC4.2 compilers are not supported by this code base!")
@ -443,7 +445,7 @@ LINK_MODE/debug =
LINK_MODE/optimized = -Bsymbolic -znodefs
# Have thread local errnos
ifeq ($(shell expr $(COMPILER_REV) \>= 5.5), 1)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 505), 1)
CFLAGS += -mt
else
CFLAGS += -D_REENTRANT
@ -460,7 +462,7 @@ FASTDEBUG_CFLAGS = -g0
# The -g0 setting allows the C++ frontend to inline, which is a big win.
# Special global options for SS12
ifeq ($(COMPILER_REV),5.9)
ifeq ($(COMPILER_REV_NUMERIC),509)
# There appears to be multiple issues with the new Dwarf2 debug format, so
# we tell the compiler to use the older 'stabs' debug format all the time.
# Note that this needs to be used in optimized compiles too to be 100%.
@ -479,8 +481,8 @@ endif
#DEBUG_CFLAGS += -Qoption ccfe -xglobalstatic
#FASTDEBUG_CFLAGS += -Qoption ccfe -xglobalstatic
ifeq (${COMPILER_REV}, 5.2)
COMPILER_DATE := $(shell $(CPP) -V 2>&1 | awk '{ print $$NF; }')
ifeq (${COMPILER_REV_NUMERIC}, 502)
COMPILER_DATE := $(shell $(CPP) -V 2>&1 | sed -n '/^.*[ ]C++[ ]\([1-9]\.[0-9][0-9]*\)/p' | awk '{ print $$NF; }')
ifeq (${COMPILER_DATE}, 2001/01/31)
# disable -g0 in fastdebug since SC6.1 dated 2001/01/31 seems to be buggy
# use an innocuous value because it will get -g if it's empty
@ -493,7 +495,7 @@ endif
CFLAGS += $(CFLAGS_BROWSE)
# ILD is gone as of SS11 (5.8), not supportted in SS10 (5.7)
ifeq ($(shell expr $(COMPILER_REV) \< 5.7), 1)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 507), 1)
# use ild when debugging (but when optimizing we want reproducible results)
ILDFLAG = $(ILDFLAG/$(VERSION))
ILDFLAG/debug = -xildon

View File

@ -26,7 +26,7 @@ Obj_Files += solaris_sparc.o
ASFLAGS += $(AS_ARCHFLAG)
ifeq ("${Platform_compiler}", "sparcWorks")
ifeq ($(shell expr $(COMPILER_REV) \< 5.5), 1)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 505), 1)
# When optimized fully, stubGenerator_sparc.cpp
# has bogus code for the routine
# StubGenerator::generate_flush_callers_register_windows()

View File

@ -54,6 +54,7 @@ Include_DBs/GC = $(VM)/includeDB_gc \
$(VM)/gc_implementation/includeDB_gc_parallelScavenge \
$(VM)/gc_implementation/includeDB_gc_concurrentMarkSweep \
$(VM)/gc_implementation/includeDB_gc_parNew \
$(VM)/gc_implementation/includeDB_gc_g1 \
$(VM)/gc_implementation/includeDB_gc_serial \
$(VM)/gc_implementation/includeDB_gc_shared
@ -82,9 +83,9 @@ Incremental_Lists =$(GENERATED)/$(Cached_db)
AD_Dir = $(GENERATED)/adfiles
ADLC = $(AD_Dir)/adlc
AD_Spec = $(GAMMADIR)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad
AD_Spec = $(GAMMADIR)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad
AD_Src = $(GAMMADIR)/src/share/vm/adlc
AD_Names = ad_$(Platform_arch).hpp ad_$(Platform_arch).cpp
AD_Names = ad_$(Platform_arch_model).hpp ad_$(Platform_arch_model).cpp
AD_Files = $(AD_Names:%=$(AD_Dir)/%)
# AD_Files_If_Required/COMPILER1 = ad_stuff

View File

@ -101,7 +101,7 @@ LIBM=/usr/lib$(ISA_DIR)/libm.so.1
ifeq ("${Platform_compiler}", "sparcWorks")
# The whole megilla:
ifeq ($(shell expr $(COMPILER_REV) \>= 5.5), 1)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 505), 1)
# Old Comment: List the libraries in the order the compiler was designed for
# Not sure what the 'designed for' comment is referring too above.
# The order may not be too significant anymore, but I have placed this

View File

@ -200,29 +200,6 @@ BUILD_WIN_SA = 0
checkSA::
@echo Not building SA: ARCH = ia64
!elseif exist("$(MSVCDIR)\PlatformSDK\Include\dbgeng.h")
# These don't have to be set because the default
# setting of INCLUDE and LIB already contain the needed dirs.
SA_INCLUDE =
SA_LIB =
!elseif exist("$(SYSTEMROOT)\..\Program Files\Microsoft SDK\include\dbgeng.h")
# These don't have to be set because the default
# setting of INCLUDE and LIB already contain the needed dirs.
SA_INCLUDE =
SA_LIB =
!else
checkSA::
@echo .
@echo ERROR: Can't build SA because dbgeng.h does not exist here:
@echo $(MSVCDIR)\PlatformSDK\Include\dbgeng.h
@echo nor here:
@echo $(SYSTEMROOT)\..\Program Files\Microsoft SDK\include\dbgeng.h
@echo You must use Vis. Studio .Net 2003 on Win 32, and you must
@echo have the Microsoft SDK installed on Win amd64.
@echo You can disable building of SA by specifying BUILD_WIN_SA = 0
@echo . && false
!endif # ! "$(BUILD_WIN_SA)" != "1"
#########################################################################

View File

@ -102,6 +102,12 @@ GENERATED_NAMES_IN_INCL=\
adlc.exe: main.obj adlparse.obj archDesc.obj arena.obj dfa.obj dict2.obj filebuff.obj \
forms.obj formsopt.obj formssel.obj opcodes.obj output_c.obj output_h.obj
$(LINK) $(LINK_FLAGS) /subsystem:console /out:$@ $**
!if "$(MT)" != ""
# The previous link command created a .manifest file that we want to
# insert into the linked artifact so we do not need to track it
# separately. Use ";#2" for .dll and ";#1" for .exe:
$(MT) /manifest $@.manifest /outputresource:$@;#1
!endif
$(GENERATED_NAMES_IN_INCL): $(Platform_arch_model).ad adlc.exe includeDB.current
rm -f $(GENERATED_NAMES)

View File

@ -30,7 +30,7 @@ CPP=cl.exe
# /W3 Warning level 3
# /Zi Include debugging information
# /WX Treat any warning error as a fatal error
# /MD Use dynamic multi-threaded runtime (msvcrt.dll or msvc*71.dll)
# /MD Use dynamic multi-threaded runtime (msvcrt.dll or msvc*NN.dll)
# /MTd Use static multi-threaded runtime debug versions
# /O1 Optimize for size (/Os), skips /Oi
# /O2 Optimize for speed (/Ot), adds /Oi to /O1
@ -80,8 +80,10 @@ CPP_FLAGS=$(CPP_FLAGS) /D "IA32"
CPP=ARCH_ERROR
!endif
# MSC_VER is a 4 digit number that tells us what compiler is being used, it is
# generated when the local.make file is created by the script gen_msc_ver.sh.
# MSC_VER is a 4 digit number that tells us what compiler is being used
# and is generated when the local.make file is created by build.make
# via the script get_msc_ver.sh
#
# If MSC_VER is set, it overrides the above default setting.
# But it should be set.
# Possible values:
@ -89,13 +91,14 @@ CPP=ARCH_ERROR
# 1300 and 1310 is VS2003 or VC7
# 1399 is our fake number for the VS2005 compiler that really isn't 1400
# 1400 is for VS2005
# 1500 is for VS2008
# Do not confuse this MSC_VER with the predefined macro _MSC_VER that the
# compiler provides, when MSC_VER==1399, _MSC_VER will be 1400.
# Normally they are the same, but a pre-release of the VS2005 compilers
# in the Windows 64bit Platform SDK said it was 1400 when it was really
# closer to VS2003 in terms of option spellings, so we use 1399 for that
# 1400 version that really isn't 1400.
# See the file gen_msc_ver.sh for more info.
# See the file get_msc_ver.sh for more info.
!if "x$(MSC_VER)" == "x"
COMPILER_NAME=$(DEFAULT_COMPILER_NAME)
!else
@ -115,6 +118,9 @@ COMPILER_NAME=VS2003
!if "$(MSC_VER)" == "1400"
COMPILER_NAME=VS2005
!endif
!if "$(MSC_VER)" == "1500"
COMPILER_NAME=VS2008
!endif
!endif
# Add what version of the compiler we think this is to the compile line
@ -160,7 +166,25 @@ GX_OPTION = /EHsc
# externals at link time. Even with /GS-, you need bufferoverflowU.lib.
# NOTE: Currently we decided to not use /GS-
BUFFEROVERFLOWLIB = bufferoverflowU.lib
LINK_FLAGS = $(LINK_FLAGS) $(BUFFEROVERFLOWLIB)
LINK_FLAGS = /manifest $(LINK_FLAGS) $(BUFFEROVERFLOWLIB)
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
MT=mt.exe
!if "$(BUILDARCH)" == "i486"
# VS2005 on x86 restricts the use of certain libc functions without this
CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_DEPRECATE
!endif
!endif
!if "$(COMPILER_NAME)" == "VS2008"
PRODUCT_OPT_OPTION = /O2 /Oy-
FASTDEBUG_OPT_OPTION = /O2 /Oy-
DEBUG_OPT_OPTION = /Od
GX_OPTION = /EHsc
LINK_FLAGS = /manifest $(LINK_FLAGS)
# Manifest Tool - used in VS2005 and later to adjust manifests stored
# as resources inside build artifacts.
MT=mt.exe
!if "$(BUILDARCH)" == "i486"
# VS2005 on x86 restricts the use of certain libc functions without this
CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_DEPRECATE

View File

@ -50,6 +50,12 @@ $(AOUT): $(Res_Files) $(Obj_Files)
$(LINK) @<<
$(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
<<
!if "$(MT)" != ""
# The previous link command created a .manifest file that we want to
# insert into the linked artifact so we do not need to track it
# separately. Use ";#2" for .dll and ";#1" for .exe:
$(MT) /manifest $@.manifest /outputresource:$@;#2
!endif
!include $(WorkSpace)/make/windows/makefiles/shared.make
!include $(WorkSpace)/make/windows/makefiles/sa.make

View File

@ -25,7 +25,7 @@
# The common definitions for hotspot windows builds.
# Include the top level defs.make under make directory instead of this one.
# This file is included into make/defs.make.
# On windows it is only used to construct parameters for
# On windows it is only used to construct parameters for
# make/windows/build.make when make/Makefile is used to build VM.
SLASH_JAVA ?= J:
@ -69,7 +69,7 @@ endif
JDK_INCLUDE_SUBDIR=win32
# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
# and added to MAKE_ARGS list in $(GAMMADIR)/make/defs.make.
# next parameters are defined in $(GAMMADIR)/make/defs.make.
@ -119,13 +119,13 @@ endif
# we want to release it. If we build it here,
# the SDK makefiles will copy it over and put it into
# the created image.
BUILD_WIN_SA = 0
BUILD_WIN_SA = 1
ifneq ($(ALT_BUILD_WIN_SA),)
BUILD_WIN_SA = $(ALT_BUILD_WIN_SA)
endif
ifeq ($(BUILD_WIN_SA), 1)
ifeq ($(ARCH),ia64)
ifeq ($(ARCH),ia64)
BUILD_WIN_SA = 0
endif
endif
@ -154,7 +154,7 @@ ifeq ($(BUILD_WIN_SA), 1)
EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.dll
EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.pdb
EXPORT_LIST += $(EXPORT_JRE_BIN_DIR)/sawindbg.map
EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar
EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar
# Must pass this down to nmake.
MAKE_ARGS += BUILD_WIN_SA=1
endif

View File

@ -50,6 +50,13 @@ $(AOUT): $(Res_Files) $(Obj_Files)
$(LINK) @<<
$(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
<<
!if "$(MT)" != ""
# The previous link command created a .manifest file that we want to
# insert into the linked artifact so we do not need to track it
# separately. Use ";#2" for .dll and ";#1" for .exe:
$(MT) /manifest $@.manifest /outputresource:$@;#2
!endif
!include $(WorkSpace)/make/windows/makefiles/shared.make
!include $(WorkSpace)/make/windows/makefiles/sa.make

View File

@ -50,7 +50,8 @@ IncludeDBs_gc= $(WorkSpace)/src/share/vm/includeDB_gc_parallel \
$(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge \
$(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_shared \
$(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_parNew \
$(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep
$(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep \
$(WorkSpace)/src/share/vm/gc_implementation/includeDB_gc_g1
IncludeDBs_core=$(IncludeDBs_base) $(IncludeDBs_gc) \
$(WorkSpace)/src/share/vm/includeDB_features

View File

@ -64,6 +64,7 @@ MakeDepsIncludesPRIVATE=\
-relativeInclude src\share\vm\gc_implementation\shared \
-relativeInclude src\share\vm\gc_implementation\parNew \
-relativeInclude src\share\vm\gc_implementation\concurrentMarkSweep \
-relativeInclude src\share\vm\gc_implementation\g1 \
-relativeInclude src\share\vm\gc_interface \
-relativeInclude src\share\vm\asm \
-relativeInclude src\share\vm\memory \
@ -115,6 +116,7 @@ MakeDepsIDEOptions=\
-additionalFile includeDB_gc_parallel \
-additionalFile includeDB_gc_parallelScavenge \
-additionalFile includeDB_gc_concurrentMarkSweep \
-additionalFile includeDB_gc_g1 \
-additionalFile includeDB_gc_parNew \
-additionalFile includeDB_gc_shared \
-additionalFile includeDB_gc_serial \

View File

@ -61,6 +61,12 @@ $(AOUT): $(Res_Files) $(Obj_Files)
$(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
<<
!endif
!if "$(MT)" != ""
# The previous link command created a .manifest file that we want to
# insert into the linked artifact so we do not need to track it
# separately. Use ";#2" for .dll and ";#1" for .exe:
$(MT) /manifest $@.manifest /outputresource:$@;#2
!endif
!include $(WorkSpace)/make/windows/makefiles/shared.make
!include $(WorkSpace)/make/windows/makefiles/sa.make

View File

@ -49,6 +49,9 @@ SA_PROPERTIES = $(SA_CLASSDIR)\sa.properties
default:: $(GENERATED)\sa-jdi.jar
# Remove the space between $(SA_BUILD_VERSION_PROP) and > below as it adds a white space
# at the end of SA version string and causes a version mismatch with the target VM version.
$(GENERATED)\sa-jdi.jar: $(AGENT_FILES1:/=\) $(AGENT_FILES2:/=\)
@if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR)
@echo ...Building sa-jdi.jar
@ -56,15 +59,15 @@ $(GENERATED)\sa-jdi.jar: $(AGENT_FILES1:/=\) $(AGENT_FILES2:/=\)
@$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\)
@$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\)
$(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo $(SA_BUILD_VERSION_PROP) > $(SA_PROPERTIES)
$(RUN_JAR) cf $@ -C saclasses .
$(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector
$(QUIETLY) echo $(SA_BUILD_VERSION_PROP)> $(SA_PROPERTIES)
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
$(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/*
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/
$(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/
$(QUIETLY) rm -rf $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) mkdir $(SA_CLASSDIR)\sun\jvm\hotspot\ui\resources
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)
$(RUN_JAR) cf $@ -C saclasses .
$(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext
@ -92,13 +95,18 @@ SA_LINK_FLAGS = bufferoverflowU.lib
!else
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 /Gm $(GX_OPTION) /ZI /Od /D "WIN32" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
!endif
!if "$(MT)" != ""
SA_LINK_FLAGS = /manifest $(SA_LINK_FLAGS)
!endif
SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp
SA_LFLAGS = $(SA_LINK_FLAGS) /nologo /subsystem:console /map /debug /machine:$(MACHINE)
# Note that we do not keep sawindbj.obj around as it would then
# get included in the dumpbin command in build_vm_def.sh
# In VS2005 or VS2008 the link command creates a .manifest file that we want
# to insert into the linked artifact so we do not need to track it separately.
# Use ";#2" for .dll and ";#1" for .exe in the MT command below:
$(SAWINDBG): $(SASRCFILE)
set INCLUDE=$(SA_INCLUDE)$(INCLUDE)
$(CPP) @<<
@ -109,6 +117,9 @@ $(SAWINDBG): $(SASRCFILE)
<<
set LIB=$(SA_LIB)$(LIB)
$(LINK) /out:$@ /DLL sawindbg.obj dbgeng.lib $(SA_LFLAGS)
!if "$(MT)" != ""
$(MT) /manifest $(@F).manifest /outputresource:$(@F);#2
!endif
-@rm -f sawindbg.obj
cleanall :

View File

@ -117,6 +117,7 @@ CPP_INCLUDE_DIRS=\
/I "$(WorkSpace)\src\share\vm\gc_implementation\shared"\
/I "$(WorkSpace)\src\share\vm\gc_implementation\parNew"\
/I "$(WorkSpace)\src\share\vm\gc_implementation\concurrentMarkSweep"\
/I "$(WorkSpace)\src\share\vm\gc_implementation\g1"\
/I "$(WorkSpace)\src\share\vm\gc_interface"\
/I "$(WorkSpace)\src\share\vm\asm" \
/I "$(WorkSpace)\src\share\vm\memory" \
@ -146,6 +147,7 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/parallelScavenge
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/shared
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/parNew
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/concurrentMarkSweep
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/g1
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_interface
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/asm
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/memory
@ -222,6 +224,9 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
{$(WorkSpace)\src\share\vm\gc_implementation\concurrentMarkSweep}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
{$(WorkSpace)\src\share\vm\gc_implementation\g1}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<
{$(WorkSpace)\src\share\vm\gc_interface}.cpp.obj::
$(CPP) $(CPP_FLAGS) $(CPP_USE_PCH) /c $<

View File

@ -56,7 +56,8 @@ IncludeDBs_gc=$(HOTSPOTWORKSPACE)/src/share/vm/includeDB_gc_parallel \
$(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_shared \
$(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_parNew \
$(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge \
$(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep
$(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep \
$(HOTSPOTWORKSPACE)/src/share/vm/gc_implementation/includeDB_gc_g1
IncludeDBs_kernel =$(IncludeDBs_base) \

View File

@ -130,6 +130,20 @@ int AbstractAssembler::code_fill_byte() {
return 0x00; // illegal instruction 0x00000000
}
Assembler::Condition Assembler::reg_cond_to_cc_cond(Assembler::RCondition in) {
switch (in) {
case rc_z: return equal;
case rc_lez: return lessEqual;
case rc_lz: return less;
case rc_nz: return notEqual;
case rc_gz: return greater;
case rc_gez: return greaterEqual;
default:
ShouldNotReachHere();
}
return equal;
}
// Generate a bunch 'o stuff (including v9's
#ifndef PRODUCT
void Assembler::test_v9() {
@ -1213,31 +1227,19 @@ void MacroAssembler::set_vm_result(Register oop_result) {
}
void MacroAssembler::store_check(Register tmp, Register obj) {
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
/* $$$ This stuff needs to go into one of the BarrierSet generator
functions. (The particular barrier sets will have to be friends of
MacroAssembler, I guess.) */
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
void MacroAssembler::card_table_write(jbyte* byte_map_base,
Register tmp, Register obj) {
#ifdef _LP64
srlx(obj, CardTableModRefBS::card_shift, obj);
#else
srl(obj, CardTableModRefBS::card_shift, obj);
#endif
assert( tmp != obj, "need separate temp reg");
Address rs(tmp, (address)ct->byte_map_base);
Address rs(tmp, (address)byte_map_base);
load_address(rs);
stb(G0, rs.base(), obj);
}
void MacroAssembler::store_check(Register tmp, Register obj, Register offset) {
store_check(tmp, obj);
}
// %%% Note: The following six instructions have been moved,
// unchanged, from assembler_sparc.inline.hpp.
// They will be refactored at a later date.
@ -1663,11 +1665,21 @@ void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * fil
if (reg == G0) return; // always NULL, which is always an oop
char buffer[16];
char buffer[64];
#ifdef COMPILER1
if (CommentedAssembly) {
snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset());
block_comment(buffer);
}
#endif
int len = strlen(file) + strlen(msg) + 1 + 4;
sprintf(buffer, "%d", line);
int len = strlen(file) + strlen(msg) + 1 + 4 + strlen(buffer);
len += strlen(buffer);
sprintf(buffer, " at offset %d ", offset());
len += strlen(buffer);
char * real_msg = new char[len];
sprintf(real_msg, "%s (%s:%d)", msg, file, line);
sprintf(real_msg, "%s%s(%s:%d)", msg, buffer, file, line);
// Call indirectly to solve generation ordering problem
Address a(O7, (address)StubRoutines::verify_oop_subroutine_entry_address());
@ -2059,6 +2071,27 @@ void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
#endif
}
void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
Register s1, address d,
relocInfo::relocType rt ) {
if (VM_Version::v9_instructions_work()) {
bpr(rc, a, p, s1, d, rt);
} else {
tst(s1);
br(reg_cond_to_cc_cond(rc), a, p, d, rt);
}
}
void MacroAssembler::br_on_reg_cond( RCondition rc, bool a, Predict p,
Register s1, Label& L ) {
if (VM_Version::v9_instructions_work()) {
bpr(rc, a, p, s1, L);
} else {
tst(s1);
br(reg_cond_to_cc_cond(rc), a, p, L);
}
}
// instruction sequences factored across compiler & interpreter
@ -2582,7 +2615,8 @@ void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Regi
}
}
void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
Register temp_reg,
Label& done, Label* slow_case,
BiasedLockingCounters* counters) {
assert(UseBiasedLocking, "why call this otherwise?");
@ -2658,8 +2692,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, R
markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
mark_reg);
or3(G2_thread, mark_reg, temp_reg);
casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
casn(mark_addr.base(), mark_reg, temp_reg);
// If the biasing toward our thread failed, this means that
// another thread succeeded in biasing it toward itself and we
// need to revoke that bias. The revocation will occur in the
@ -2688,8 +2721,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, R
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
or3(G2_thread, temp_reg, temp_reg);
casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
casn(mark_addr.base(), mark_reg, temp_reg);
// If the biasing toward our thread failed, this means that
// another thread succeeded in biasing it toward itself and we
// need to revoke that bias. The revocation will occur in the
@ -2719,8 +2751,7 @@ void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg, R
// bits in this situation. Should attempt to preserve them.
load_klass(obj_reg, temp_reg);
ld_ptr(Address(temp_reg, 0, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg);
casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
(address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
casn(mark_addr.base(), mark_reg, temp_reg);
// Fall through to the normal CAS-based lock, because no matter what
// the result of the above CAS, some thread must have succeeded in
// removing the bias bit from the object's header.
@ -2782,8 +2813,10 @@ void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg
// effect).
void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch,
BiasedLockingCounters* counters) {
void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
Register Rbox, Register Rscratch,
BiasedLockingCounters* counters,
bool try_bias) {
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
verify_oop(Roop);
@ -2805,7 +2838,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, Registe
// Fetch object's markword
ld_ptr(mark_addr, Rmark);
if (UseBiasedLocking) {
if (try_bias) {
biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
}
@ -2848,7 +2881,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, Registe
ld_ptr (mark_addr, Rmark); // fetch obj->mark
// Triage: biased, stack-locked, neutral, inflated
if (UseBiasedLocking) {
if (try_bias) {
biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
// Invariant: if control reaches this point in the emitted stream
// then Rmark has not been modified.
@ -2912,7 +2945,7 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, Registe
ld_ptr (mark_addr, Rmark); // fetch obj->mark
// Triage: biased, stack-locked, neutral, inflated
if (UseBiasedLocking) {
if (try_bias) {
biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
// Invariant: if control reaches this point in the emitted stream
// then Rmark has not been modified.
@ -3006,7 +3039,9 @@ void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark, Registe
bind (done) ;
}
void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch) {
void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
Register Rbox, Register Rscratch,
bool try_bias) {
Address mark_addr(Roop, 0, oopDesc::mark_offset_in_bytes());
Label done ;
@ -3017,7 +3052,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, Regis
}
if (EmitSync & 8) {
if (UseBiasedLocking) {
if (try_bias) {
biased_locking_exit(mark_addr, Rscratch, done);
}
@ -3044,7 +3079,7 @@ void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, Regis
// I$ effects.
Label LStacked ;
if (UseBiasedLocking) {
if (try_bias) {
// TODO: eliminate redundant LDs of obj->mark
biased_locking_exit(mark_addr, Rscratch, done);
}
@ -3241,68 +3276,74 @@ void MacroAssembler::eden_allocate(
assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
// get eden boundaries
// note: we need both top & top_addr!
const Register top_addr = t1;
const Register end = t2;
CollectedHeap* ch = Universe::heap();
set((intx)ch->top_addr(), top_addr);
intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
ld_ptr(top_addr, delta, end);
ld_ptr(top_addr, 0, obj);
// try to allocate
Label retry;
bind(retry);
#ifdef ASSERT
// make sure eden top is properly aligned
{
Label L;
btst(MinObjAlignmentInBytesMask, obj);
br(Assembler::zero, false, Assembler::pt, L);
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
// No allocation in the shared eden.
br(Assembler::always, false, Assembler::pt, slow_case);
delayed()->nop();
stop("eden top is not properly aligned");
bind(L);
}
#endif // ASSERT
const Register free = end;
sub(end, obj, free); // compute amount of free space
if (var_size_in_bytes->is_valid()) {
// size is unknown at compile time
cmp(free, var_size_in_bytes);
br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
delayed()->add(obj, var_size_in_bytes, end);
} else {
// size is known at compile time
cmp(free, con_size_in_bytes);
br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
delayed()->add(obj, con_size_in_bytes, end);
}
// Compare obj with the value at top_addr; if still equal, swap the value of
// end with the value at top_addr. If not equal, read the value at top_addr
// into end.
casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
// if someone beat us on the allocation, try again, otherwise continue
cmp(obj, end);
brx(Assembler::notEqual, false, Assembler::pn, retry);
delayed()->mov(end, obj); // nop if successfull since obj == end
// get eden boundaries
// note: we need both top & top_addr!
const Register top_addr = t1;
const Register end = t2;
CollectedHeap* ch = Universe::heap();
set((intx)ch->top_addr(), top_addr);
intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
ld_ptr(top_addr, delta, end);
ld_ptr(top_addr, 0, obj);
// try to allocate
Label retry;
bind(retry);
#ifdef ASSERT
// make sure eden top is properly aligned
{
Label L;
btst(MinObjAlignmentInBytesMask, obj);
br(Assembler::zero, false, Assembler::pt, L);
delayed()->nop();
stop("eden top is not properly aligned");
bind(L);
}
#endif // ASSERT
const Register free = end;
sub(end, obj, free); // compute amount of free space
if (var_size_in_bytes->is_valid()) {
// size is unknown at compile time
cmp(free, var_size_in_bytes);
br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
delayed()->add(obj, var_size_in_bytes, end);
} else {
// size is known at compile time
cmp(free, con_size_in_bytes);
br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
delayed()->add(obj, con_size_in_bytes, end);
}
// Compare obj with the value at top_addr; if still equal, swap the value of
// end with the value at top_addr. If not equal, read the value at top_addr
// into end.
casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
// if someone beat us on the allocation, try again, otherwise continue
cmp(obj, end);
brx(Assembler::notEqual, false, Assembler::pn, retry);
delayed()->mov(end, obj); // nop if successfull since obj == end
#ifdef ASSERT
// make sure eden top is properly aligned
{
Label L;
const Register top_addr = t1;
// make sure eden top is properly aligned
{
Label L;
const Register top_addr = t1;
set((intx)ch->top_addr(), top_addr);
ld_ptr(top_addr, 0, top_addr);
btst(MinObjAlignmentInBytesMask, top_addr);
br(Assembler::zero, false, Assembler::pt, L);
delayed()->nop();
stop("eden top is not properly aligned");
bind(L);
}
set((intx)ch->top_addr(), top_addr);
ld_ptr(top_addr, 0, top_addr);
btst(MinObjAlignmentInBytesMask, top_addr);
br(Assembler::zero, false, Assembler::pt, L);
delayed()->nop();
stop("eden top is not properly aligned");
bind(L);
}
#endif // ASSERT
}
}
@ -3554,6 +3595,468 @@ void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
}
}
///////////////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
static uint num_stores = 0;
static uint num_null_pre_stores = 0;
static void count_null_pre_vals(void* pre_val) {
num_stores++;
if (pre_val == NULL) num_null_pre_stores++;
if ((num_stores % 1000000) == 0) {
tty->print_cr(UINT32_FORMAT " stores, " UINT32_FORMAT " (%5.2f%%) with null pre-vals.",
num_stores, num_null_pre_stores,
100.0*(float)num_null_pre_stores/(float)num_stores);
}
}
static address satb_log_enqueue_with_frame = 0;
static u_char* satb_log_enqueue_with_frame_end = 0;
static address satb_log_enqueue_frameless = 0;
static u_char* satb_log_enqueue_frameless_end = 0;
static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
// The calls to this don't work. We'd need to do a fair amount of work to
// make it work.
static void check_index(int ind) {
assert(0 <= ind && ind <= 64*K && ((ind % oopSize) == 0),
"Invariants.")
}
static void generate_satb_log_enqueue(bool with_frame) {
BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
CodeBuffer buf(bb->instructions_begin(), bb->instructions_size());
MacroAssembler masm(&buf);
address start = masm.pc();
Register pre_val;
Label refill, restart;
if (with_frame) {
masm.save_frame(0);
pre_val = I0; // Was O0 before the save.
} else {
pre_val = O0;
}
int satb_q_index_byte_offset =
in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_index());
int satb_q_buf_byte_offset =
in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_buf());
assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) &&
in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t),
"check sizes in assembly below");
masm.bind(restart);
masm.ld_ptr(G2_thread, satb_q_index_byte_offset, L0);
masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, L0, refill);
// If the branch is taken, no harm in executing this in the delay slot.
masm.delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, L1);
masm.sub(L0, oopSize, L0);
masm.st_ptr(pre_val, L1, L0); // [_buf + index] := I0
if (!with_frame) {
// Use return-from-leaf
masm.retl();
masm.delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset);
} else {
// Not delayed.
masm.st_ptr(L0, G2_thread, satb_q_index_byte_offset);
}
if (with_frame) {
masm.ret();
masm.delayed()->restore();
}
masm.bind(refill);
address handle_zero =
CAST_FROM_FN_PTR(address,
&SATBMarkQueueSet::handle_zero_index_for_thread);
// This should be rare enough that we can afford to save all the
// scratch registers that the calling context might be using.
masm.mov(G1_scratch, L0);
masm.mov(G3_scratch, L1);
masm.mov(G4, L2);
// We need the value of O0 above (for the write into the buffer), so we
// save and restore it.
masm.mov(O0, L3);
// Since the call will overwrite O7, we save and restore that, as well.
masm.mov(O7, L4);
masm.call_VM_leaf(L5, handle_zero, G2_thread);
masm.mov(L0, G1_scratch);
masm.mov(L1, G3_scratch);
masm.mov(L2, G4);
masm.mov(L3, O0);
masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart);
masm.delayed()->mov(L4, O7);
if (with_frame) {
satb_log_enqueue_with_frame = start;
satb_log_enqueue_with_frame_end = masm.pc();
} else {
satb_log_enqueue_frameless = start;
satb_log_enqueue_frameless_end = masm.pc();
}
}
static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
if (with_frame) {
if (satb_log_enqueue_with_frame == 0) {
generate_satb_log_enqueue(with_frame);
assert(satb_log_enqueue_with_frame != 0, "postcondition.");
if (G1SATBPrintStubs) {
tty->print_cr("Generated with-frame satb enqueue:");
Disassembler::decode((u_char*)satb_log_enqueue_with_frame,
satb_log_enqueue_with_frame_end,
tty);
}
}
} else {
if (satb_log_enqueue_frameless == 0) {
generate_satb_log_enqueue(with_frame);
assert(satb_log_enqueue_frameless != 0, "postcondition.");
if (G1SATBPrintStubs) {
tty->print_cr("Generated frameless satb enqueue:");
Disassembler::decode((u_char*)satb_log_enqueue_frameless,
satb_log_enqueue_frameless_end,
tty);
}
}
}
}
void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs) {
assert(offset == 0 || index == noreg, "choose one");
if (G1DisablePreBarrier) return;
// satb_log_barrier(tmp, obj, offset, preserve_o_regs);
Label filtered;
// satb_log_barrier_work0(tmp, filtered);
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
ld(G2,
in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()),
tmp);
} else {
guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
"Assumption");
ldsb(G2,
in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()),
tmp);
}
// Check on whether to annul.
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
delayed() -> nop();
// satb_log_barrier_work1(tmp, offset);
if (index == noreg) {
if (Assembler::is_simm13(offset)) {
ld_ptr(obj, offset, tmp);
} else {
set(offset, tmp);
ld_ptr(obj, tmp, tmp);
}
} else {
ld_ptr(obj, index, tmp);
}
// satb_log_barrier_work2(obj, tmp, offset);
// satb_log_barrier_work3(tmp, filtered, preserve_o_regs);
const Register pre_val = tmp;
if (G1SATBBarrierPrintNullPreVals) {
save_frame(0);
mov(pre_val, O0);
// Save G-regs that target may use.
mov(G1, L1);
mov(G2, L2);
mov(G3, L3);
mov(G4, L4);
mov(G5, L5);
call(CAST_FROM_FN_PTR(address, &count_null_pre_vals));
delayed()->nop();
// Restore G-regs that target may have used.
mov(L1, G1);
mov(L2, G2);
mov(L3, G3);
mov(L4, G4);
mov(L5, G5);
restore(G0, G0, G0);
}
// Check on whether to annul.
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered);
delayed() -> nop();
// OK, it's not filtered, so we'll need to call enqueue. In the normal
// case, pre_val will be a scratch G-reg, but there's some cases in which
// it's an O-reg. In the first case, do a normal call. In the latter,
// do a save here and call the frameless version.
guarantee(pre_val->is_global() || pre_val->is_out(),
"Or we need to think harder.");
if (pre_val->is_global() && !preserve_o_regs) {
generate_satb_log_enqueue_if_necessary(true); // with frame.
call(satb_log_enqueue_with_frame);
delayed()->mov(pre_val, O0);
} else {
generate_satb_log_enqueue_if_necessary(false); // with frameless.
save_frame(0);
call(satb_log_enqueue_frameless);
delayed()->mov(pre_val->after_save(), O0);
restore();
}
bind(filtered);
}
static jint num_ct_writes = 0;
static jint num_ct_writes_filtered_in_hr = 0;
static jint num_ct_writes_filtered_null = 0;
static jint num_ct_writes_filtered_pop = 0;
static G1CollectedHeap* g1 = NULL;
static Thread* count_ct_writes(void* filter_val, void* new_val) {
Atomic::inc(&num_ct_writes);
if (filter_val == NULL) {
Atomic::inc(&num_ct_writes_filtered_in_hr);
} else if (new_val == NULL) {
Atomic::inc(&num_ct_writes_filtered_null);
} else {
if (g1 == NULL) {
g1 = G1CollectedHeap::heap();
}
if ((HeapWord*)new_val < g1->popular_object_boundary()) {
Atomic::inc(&num_ct_writes_filtered_pop);
}
}
if ((num_ct_writes % 1000000) == 0) {
jint num_ct_writes_filtered =
num_ct_writes_filtered_in_hr +
num_ct_writes_filtered_null +
num_ct_writes_filtered_pop;
tty->print_cr("%d potential CT writes: %5.2f%% filtered\n"
" (%5.2f%% intra-HR, %5.2f%% null, %5.2f%% popular).",
num_ct_writes,
100.0*(float)num_ct_writes_filtered/(float)num_ct_writes,
100.0*(float)num_ct_writes_filtered_in_hr/
(float)num_ct_writes,
100.0*(float)num_ct_writes_filtered_null/
(float)num_ct_writes,
100.0*(float)num_ct_writes_filtered_pop/
(float)num_ct_writes);
}
return Thread::current();
}
static address dirty_card_log_enqueue = 0;
static u_char* dirty_card_log_enqueue_end = 0;
// This gets to assume that o0 contains the object address.
static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
CodeBuffer buf(bb->instructions_begin(), bb->instructions_size());
MacroAssembler masm(&buf);
address start = masm.pc();
Label not_already_dirty, restart, refill;
#ifdef _LP64
masm.srlx(O0, CardTableModRefBS::card_shift, O0);
#else
masm.srl(O0, CardTableModRefBS::card_shift, O0);
#endif
Address rs(O1, (address)byte_map_base);
masm.load_address(rs); // O1 := <card table base>
masm.ldub(O0, O1, O2); // O2 := [O0 + O1]
masm.br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
O2, not_already_dirty);
// Get O1 + O2 into a reg by itself -- useful in the take-the-branch
// case, harmless if not.
masm.delayed()->add(O0, O1, O3);
// We didn't take the branch, so we're already dirty: return.
// Use return-from-leaf
masm.retl();
masm.delayed()->nop();
// Not dirty.
masm.bind(not_already_dirty);
// First, dirty it.
masm.stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty).
int dirty_card_q_index_byte_offset =
in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_index());
int dirty_card_q_buf_byte_offset =
in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_buf());
masm.bind(restart);
masm.ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
masm.br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn,
L0, refill);
// If the branch is taken, no harm in executing this in the delay slot.
masm.delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1);
masm.sub(L0, oopSize, L0);
masm.st_ptr(O3, L1, L0); // [_buf + index] := I0
// Use return-from-leaf
masm.retl();
masm.delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset);
masm.bind(refill);
address handle_zero =
CAST_FROM_FN_PTR(address,
&DirtyCardQueueSet::handle_zero_index_for_thread);
// This should be rare enough that we can afford to save all the
// scratch registers that the calling context might be using.
masm.mov(G1_scratch, L3);
masm.mov(G3_scratch, L5);
// We need the value of O3 above (for the write into the buffer), so we
// save and restore it.
masm.mov(O3, L6);
// Since the call will overwrite O7, we save and restore that, as well.
masm.mov(O7, L4);
masm.call_VM_leaf(L7_thread_cache, handle_zero, G2_thread);
masm.mov(L3, G1_scratch);
masm.mov(L5, G3_scratch);
masm.mov(L6, O3);
masm.br(Assembler::always, /*annul*/false, Assembler::pt, restart);
masm.delayed()->mov(L4, O7);
dirty_card_log_enqueue = start;
dirty_card_log_enqueue_end = masm.pc();
// XXX Should have a guarantee here about not going off the end!
// Does it already do so? Do an experiment...
}
static inline void
generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) {
if (dirty_card_log_enqueue == 0) {
generate_dirty_card_log_enqueue(byte_map_base);
assert(dirty_card_log_enqueue != 0, "postcondition.");
if (G1SATBPrintStubs) {
tty->print_cr("Generated dirty_card enqueue:");
Disassembler::decode((u_char*)dirty_card_log_enqueue,
dirty_card_log_enqueue_end,
tty);
}
}
}
void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
Label filtered;
MacroAssembler* post_filter_masm = this;
if (new_val == G0) return;
if (G1DisablePostBarrier) return;
G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::G1SATBCT ||
bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
if (G1RSBarrierRegionFilter) {
xor3(store_addr, new_val, tmp);
#ifdef _LP64
srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
#else
srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
#endif
if (G1PrintCTFilterStats) {
guarantee(tmp->is_global(), "Or stats won't work...");
// This is a sleazy hack: I'm temporarily hijacking G2, which I
// promise to restore.
mov(new_val, G2);
save_frame(0);
mov(tmp, O0);
mov(G2, O1);
// Save G-regs that target may use.
mov(G1, L1);
mov(G2, L2);
mov(G3, L3);
mov(G4, L4);
mov(G5, L5);
call(CAST_FROM_FN_PTR(address, &count_ct_writes));
delayed()->nop();
mov(O0, G2);
// Restore G-regs that target may have used.
mov(L1, G1);
mov(L3, G3);
mov(L4, G4);
mov(L5, G5);
restore(G0, G0, G0);
}
// XXX Should I predict this taken or not? Does it mattern?
br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered);
delayed()->nop();
}
// Now we decide how to generate the card table write. If we're
// enqueueing, we call out to a generated function. Otherwise, we do it
// inline here.
if (G1RSBarrierUseQueue) {
// If the "store_addr" register is an "in" or "local" register, move it to
// a scratch reg so we can pass it as an argument.
bool use_scr = !(store_addr->is_global() || store_addr->is_out());
// Pick a scratch register different from "tmp".
Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch);
// Make sure we use up the delay slot!
if (use_scr) {
post_filter_masm->mov(store_addr, scr);
} else {
post_filter_masm->nop();
}
generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base);
save_frame(0);
call(dirty_card_log_enqueue);
if (use_scr) {
delayed()->mov(scr, O0);
} else {
delayed()->mov(store_addr->after_save(), O0);
}
restore();
} else {
#ifdef _LP64
post_filter_masm->srlx(store_addr, CardTableModRefBS::card_shift, store_addr);
#else
post_filter_masm->srl(store_addr, CardTableModRefBS::card_shift, store_addr);
#endif
assert( tmp != store_addr, "need separate temp reg");
Address rs(tmp, (address)bs->byte_map_base);
load_address(rs);
stb(G0, rs.base(), store_addr);
}
bind(filtered);
}
#endif // SERIALGC
///////////////////////////////////////////////////////////////////////////////////
void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
// If we're writing constant NULL, we can skip the write barrier.
if (new_val == G0) return;
CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
assert(bs->kind() == BarrierSet::CardTableModRef ||
bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
card_table_write(bs->byte_map_base, tmp, store_addr);
}
void MacroAssembler::load_klass(Register src_oop, Register klass) {
// The number of bytes in this code is used by
// MachCallDynamicJavaNode::ret_addr_offset()

View File

@ -1439,7 +1439,11 @@ public:
// pp 214
void save( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | rs2(s2) ); }
void save( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
void save( Register s1, int simm13a, Register d ) {
// make sure frame is at least large enough for the register save area
assert(-simm13a >= 16 * wordSize, "frame too small");
emit_long( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) );
}
void restore( Register s1 = G0, Register s2 = G0, Register d = G0 ) { emit_long( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | rs2(s2) ); }
void restore( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
@ -1594,6 +1598,11 @@ public:
inline void wrasi( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
// For a given register condition, return the appropriate condition code
// Condition (the one you would use to get the same effect after "tst" on
// the target register.)
Assembler::Condition reg_cond_to_cc_cond(RCondition in);
// Creation
Assembler(CodeBuffer* code) : AbstractAssembler(code) {
@ -1630,6 +1639,8 @@ class RegistersForDebugging : public StackObj {
// restore global registers in case C code disturbed them
static void restore_registers(MacroAssembler* a, Register r);
};
@ -1722,6 +1733,12 @@ class MacroAssembler: public Assembler {
void br_null ( Register s1, bool a, Predict p, Label& L );
void br_notnull( Register s1, bool a, Predict p, Label& L );
// These versions will do the most efficient thing on v8 and v9. Perhaps
// this is what the routine above was meant to do, but it didn't (and
// didn't cover both target address kinds.)
void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none );
void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, Label& L);
inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
@ -2056,9 +2073,23 @@ class MacroAssembler: public Assembler {
#endif // ASSERT
public:
// Stores
void store_check(Register tmp, Register obj); // store check for obj - register is destroyed afterwards
void store_check(Register tmp, Register obj, Register offset); // store check for obj - register is destroyed afterwards
// Write to card table for - register is destroyed afterwards.
void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
#ifndef SERIALGC
// Array store and offset
void g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs);
void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
// May do filtering, depending on the boolean arguments.
void g1_card_table_write(jbyte* byte_map_base,
Register tmp, Register obj, Register new_val,
bool region_filter, bool null_filter);
#endif // SERIALGC
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
void push_fTOS();
@ -2189,9 +2220,13 @@ class MacroAssembler: public Assembler {
// These set the icc condition code to equal if the lock succeeded
// and notEqual if it failed and requires a slow case
void compiler_lock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch,
BiasedLockingCounters* counters = NULL);
void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox, Register Rscratch);
void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
Register Rscratch,
BiasedLockingCounters* counters = NULL,
bool try_bias = UseBiasedLocking);
void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox,
Register Rscratch,
bool try_bias = UseBiasedLocking);
// Biased locking support
// Upon entry, lock_reg must point to the lock record on the stack,

View File

@ -404,4 +404,55 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
}
///////////////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
pre_val_reg, _continuation);
__ delayed()->nop();
__ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
__ delayed()->mov(pre_val_reg, G4);
__ br(Assembler::always, false, Assembler::pt, _continuation);
__ delayed()->nop();
}
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
jbyte* G1PostBarrierStub::byte_map_base_slow() {
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->is_a(BarrierSet::G1SATBCTLogging),
"Must be if we're using this.");
return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register addr_reg = addr()->as_pointer_register();
Register new_val_reg = new_val()->as_register();
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
new_val_reg, _continuation);
__ delayed()->nop();
__ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id));
__ delayed()->mov(addr_reg, G4);
__ br(Assembler::always, false, Assembler::pt, _continuation);
__ delayed()->nop();
}
#endif // SERIALGC
///////////////////////////////////////////////////////////////////////////////////
#undef __

View File

@ -2093,7 +2093,11 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// the known type isn't loaded since the code sanity checks
// in debug mode and the type isn't required when we know the exact type
// also check that the type is an array type.
if (op->expected_type() == NULL) {
// We also, for now, always call the stub if the barrier set requires a
// write_ref_pre barrier (which the stub does, but none of the optimized
// cases currently does).
if (op->expected_type() == NULL ||
Universe::heap()->barrier_set()->has_write_ref_pre_barrier()) {
__ mov(src, O0);
__ mov(src_pos, O1);
__ mov(dst, O2);

View File

@ -365,6 +365,10 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
__ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info);
}
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
}
__ move(value.result(), array_addr, null_check_info);
if (obj_store) {
// Is this precise?
@ -663,6 +667,10 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
__ add(obj.result(), offset.result(), addr);
if (type == objectType) { // Write-barrier needed for Object fields.
pre_barrier(obj.result(), false, NULL);
}
if (type == objectType)
__ cas_obj(addr, cmp.result(), val.result(), t1, t2);
else if (type == intType)
@ -677,7 +685,11 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
LIR_Opr result = rlock_result(x);
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
if (type == objectType) { // Write-barrier needed for Object fields.
#ifdef PRECISE_CARDMARK
post_barrier(addr, val.result());
#else
post_barrier(obj.result(), val.result());
#endif // PRECISE_CARDMARK
}
}
@ -1154,6 +1166,10 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
addr = new LIR_Address(base_op, index_op, type);
}
if (is_obj) {
pre_barrier(LIR_OprFact::address(addr), false, NULL);
// _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
}
__ move(data, addr);
if (is_obj) {
// This address is precise

View File

@ -832,6 +832,163 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
#ifndef SERIALGC
case g1_pre_barrier_slow_id:
{ // G4: previous value of memory
BarrierSet* bs = Universe::heap()->barrier_set();
if (bs->kind() != BarrierSet::G1SATBCTLogging) {
__ save_frame(0);
__ set((int)id, O1);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
__ should_not_reach_here();
break;
}
__ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
Register pre_val = G4;
Register tmp = G1_scratch;
Register tmp2 = G3_scratch;
Label refill, restart;
bool with_frame = false; // I don't know if we can do with-frame.
int satb_q_index_byte_offset =
in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_index());
int satb_q_buf_byte_offset =
in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_buf());
__ bind(restart);
__ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false,
Assembler::pn, tmp, refill);
// If the branch is taken, no harm in executing this in the delay slot.
__ delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
__ sub(tmp, oopSize, tmp);
__ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
// Use return-from-leaf
__ retl();
__ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset);
__ bind(refill);
__ save_frame(0);
__ mov(pre_val, L0);
__ mov(tmp, L1);
__ mov(tmp2, L2);
__ call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address,
SATBMarkQueueSet::handle_zero_index_for_thread),
G2_thread);
__ mov(L0, pre_val);
__ mov(L1, tmp);
__ mov(L2, tmp2);
__ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
__ delayed()->restore();
}
break;
case g1_post_barrier_slow_id:
{
BarrierSet* bs = Universe::heap()->barrier_set();
if (bs->kind() != BarrierSet::G1SATBCTLogging) {
__ save_frame(0);
__ set((int)id, O1);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
__ should_not_reach_here();
break;
}
__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
Register addr = G4;
Register cardtable = G5;
Register tmp = G1_scratch;
Register tmp2 = G3_scratch;
jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
Label not_already_dirty, restart, refill;
#ifdef _LP64
__ srlx(addr, CardTableModRefBS::card_shift, addr);
#else
__ srl(addr, CardTableModRefBS::card_shift, addr);
#endif
Address rs(cardtable, (address)byte_map_base);
__ load_address(rs); // cardtable := <card table base>
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
__ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt,
tmp, not_already_dirty);
// Get cardtable + tmp into a reg by itself -- useful in the take-the-branch
// case, harmless if not.
__ delayed()->add(addr, cardtable, tmp2);
// We didn't take the branch, so we're already dirty: return.
// Use return-from-leaf
__ retl();
__ delayed()->nop();
// Not dirty.
__ bind(not_already_dirty);
// First, dirty it.
__ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty).
Register tmp3 = cardtable;
Register tmp4 = tmp;
// these registers are now dead
addr = cardtable = tmp = noreg;
int dirty_card_q_index_byte_offset =
in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_index());
int dirty_card_q_buf_byte_offset =
in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_buf());
__ bind(restart);
__ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn,
tmp3, refill);
// If the branch is taken, no harm in executing this in the delay slot.
__ delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
__ sub(tmp3, oopSize, tmp3);
__ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card>
// Use return-from-leaf
__ retl();
__ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset);
__ bind(refill);
__ save_frame(0);
__ mov(tmp2, L0);
__ mov(tmp3, L1);
__ mov(tmp4, L2);
__ call_VM_leaf(L7_thread_cache,
CAST_FROM_FN_PTR(address,
DirtyCardQueueSet::handle_zero_index_for_thread),
G2_thread);
__ mov(L0, tmp2);
__ mov(L1, tmp3);
__ mov(L2, tmp4);
__ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
__ delayed()->restore();
}
break;
#endif // !SERIALGC
default:
{ __ set_info("unimplemented entry", dont_gc_arguments);
__ save_frame(0);

View File

@ -906,7 +906,7 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
// load next super to check
if (UseCompressedOops) {
ld( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
lduw( Rtmp2, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rtmp3);
// Bump array pointer forward one oop
add( Rtmp2, 4, Rtmp2 );
} else {

View File

@ -395,6 +395,7 @@ reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4,
);
reg_class g1_regL(R_G1H,R_G1);
reg_class g3_regL(R_G3H,R_G3);
reg_class o2_regL(R_O2H,R_O2);
reg_class o7_regL(R_O7H,R_O7);
@ -1743,7 +1744,7 @@ const bool Matcher::convL2FSupported(void) {
//
// NOTE: If the platform does not provide any short branch variants, then
// this method should return false for offset 0.
bool Matcher::is_short_branch_offset(int offset) {
bool Matcher::is_short_branch_offset(int rule, int offset) {
return false;
}
@ -1926,18 +1927,23 @@ encode %{
$mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
%}
enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{
emit_form3_mem_reg(cbuf, this, $primary, -1,
$mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
%}
enc_class form3_mem_reg_little( memory mem, iRegI dst) %{
emit_form3_mem_reg_asi(cbuf, this, $primary, $tertiary,
emit_form3_mem_reg_asi(cbuf, this, $primary, -1,
$mem$$base, $mem$$disp, $mem$$index, $dst$$reg, Assembler::ASI_PRIMARY_LITTLE);
%}
enc_class form3_mem_prefetch_read( memory mem ) %{
emit_form3_mem_reg(cbuf, this, $primary, $tertiary,
emit_form3_mem_reg(cbuf, this, $primary, -1,
$mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
%}
enc_class form3_mem_prefetch_write( memory mem ) %{
emit_form3_mem_reg(cbuf, this, $primary, $tertiary,
emit_form3_mem_reg(cbuf, this, $primary, -1,
$mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/);
%}
@ -1945,8 +1951,8 @@ encode %{
assert( Assembler::is_simm13($mem$$disp ), "need disp and disp+4" );
assert( Assembler::is_simm13($mem$$disp+4), "need disp and disp+4" );
guarantee($mem$$index == R_G0_enc, "double index?");
emit_form3_mem_reg(cbuf, this, $primary, $tertiary, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc );
emit_form3_mem_reg(cbuf, this, $primary, $tertiary, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg );
emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc );
emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg );
emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 );
emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc );
%}
@ -1956,14 +1962,14 @@ encode %{
assert( Assembler::is_simm13($mem$$disp+4), "need disp and disp+4" );
guarantee($mem$$index == R_G0_enc, "double index?");
// Load long with 2 instructions
emit_form3_mem_reg(cbuf, this, $primary, $tertiary, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 );
emit_form3_mem_reg(cbuf, this, $primary, $tertiary, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 );
emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 );
emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 );
%}
//%%% form3_mem_plus_4_reg is a hack--get rid of it
enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{
guarantee($mem$$disp, "cannot offset a reg-reg operand by 4");
emit_form3_mem_reg(cbuf, this, $primary, $tertiary, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg);
emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg);
%}
enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{
@ -2683,7 +2689,7 @@ enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
assert(Rbox != Rscratch, "");
assert(Rbox != Rmark, "");
__ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters);
__ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining);
%}
enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
@ -2699,7 +2705,7 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
assert(Rbox != Rscratch, "");
assert(Rbox != Rmark, "");
__ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch);
__ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining);
%}
enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{
@ -2711,8 +2717,7 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
// casx_under_lock picks 1 of 3 encodings:
// For 32-bit pointers you get a 32-bit CAS
// For 64-bit pointers you get a 64-bit CASX
__ casx_under_lock(Rmem, Rold, Rnew, // Swap(*Rmem,Rnew) if *Rmem == Rold
(address) StubRoutines::Sparc::atomic_memory_operation_lock_addr());
__ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
__ cmp( Rold, Rnew );
%}
@ -3761,6 +3766,14 @@ operand g1RegL() %{
interface(REG_INTER);
%}
operand g3RegL() %{
constraint(ALLOC_IN_RC(g3_regL));
match(iRegL);
format %{ %}
interface(REG_INTER);
%}
// Int Register safe
// This is 64bit safe
operand iRegIsafe() %{
@ -5062,7 +5075,7 @@ instruct stkI_to_regF(regF dst, stackSlotI src) %{
size(4);
format %{ "LDF $src,$dst\t! stkI to regF" %}
opcode(Assembler::ldf_op3);
ins_encode(form3_mem_reg(src, dst));
ins_encode(simple_form3_mem_reg(src, dst));
ins_pipe(floadF_stk);
%}
@ -5073,7 +5086,7 @@ instruct stkL_to_regD(regD dst, stackSlotL src) %{
size(4);
format %{ "LDDF $src,$dst\t! stkL to regD" %}
opcode(Assembler::lddf_op3);
ins_encode(form3_mem_reg(src, dst));
ins_encode(simple_form3_mem_reg(src, dst));
ins_pipe(floadD_stk);
%}
@ -5084,7 +5097,7 @@ instruct regF_to_stkI(stackSlotI dst, regF src) %{
size(4);
format %{ "STF $src,$dst\t! regF to stkI" %}
opcode(Assembler::stf_op3);
ins_encode(form3_mem_reg(dst, src));
ins_encode(simple_form3_mem_reg(dst, src));
ins_pipe(fstoreF_stk_reg);
%}
@ -5095,7 +5108,7 @@ instruct regD_to_stkL(stackSlotL dst, regD src) %{
size(4);
format %{ "STDF $src,$dst\t! regD to stkL" %}
opcode(Assembler::stdf_op3);
ins_encode(form3_mem_reg(dst, src));
ins_encode(simple_form3_mem_reg(dst, src));
ins_pipe(fstoreD_stk_reg);
%}
@ -5106,7 +5119,7 @@ instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{
format %{ "STW $src,$dst.hi\t! long\n\t"
"STW R_G0,$dst.lo" %}
opcode(Assembler::stw_op3);
ins_encode(form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0));
ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0));
ins_pipe(lstoreI_stk_reg);
%}
@ -5117,7 +5130,7 @@ instruct regL_to_stkD(stackSlotD dst, iRegL src) %{
size(4);
format %{ "STX $src,$dst\t! regL to stkD" %}
opcode(Assembler::stx_op3);
ins_encode( form3_mem_reg( dst, src ) );
ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_stk_reg);
%}
@ -5131,7 +5144,7 @@ instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{
size(4);
format %{ "LDUW $src,$dst\t!stk" %}
opcode(Assembler::lduw_op3);
ins_encode( form3_mem_reg( src, dst ) );
ins_encode(simple_form3_mem_reg( src, dst ) );
ins_pipe(iload_mem);
%}
@ -5143,7 +5156,7 @@ instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{
size(4);
format %{ "STW $src,$dst\t!stk" %}
opcode(Assembler::stw_op3);
ins_encode( form3_mem_reg( dst, src ) );
ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_reg);
%}
@ -5155,7 +5168,7 @@ instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{
size(4);
format %{ "LDX $src,$dst\t! long" %}
opcode(Assembler::ldx_op3);
ins_encode( form3_mem_reg( src, dst ) );
ins_encode(simple_form3_mem_reg( src, dst ) );
ins_pipe(iload_mem);
%}
@ -5167,7 +5180,7 @@ instruct regL_to_stkL(stackSlotL dst, iRegL src) %{
size(4);
format %{ "STX $src,$dst\t! long" %}
opcode(Assembler::stx_op3);
ins_encode( form3_mem_reg( dst, src ) );
ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_reg);
%}
@ -5179,7 +5192,7 @@ instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
size(4);
format %{ "LDX $src,$dst\t!ptr" %}
opcode(Assembler::ldx_op3);
ins_encode( form3_mem_reg( src, dst ) );
ins_encode(simple_form3_mem_reg( src, dst ) );
ins_pipe(iload_mem);
%}
@ -5190,7 +5203,7 @@ instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
size(4);
format %{ "STX $src,$dst\t!ptr" %}
opcode(Assembler::stx_op3);
ins_encode( form3_mem_reg( dst, src ) );
ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_reg);
%}
#else // _LP64
@ -5200,7 +5213,7 @@ instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
ins_cost(MEMORY_REF_COST);
format %{ "LDUW $src,$dst\t!ptr" %}
opcode(Assembler::lduw_op3, Assembler::ldst_op);
ins_encode( form3_mem_reg( src, dst ) );
ins_encode(simple_form3_mem_reg( src, dst ) );
ins_pipe(iload_mem);
%}
@ -5210,7 +5223,7 @@ instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
ins_cost(MEMORY_REF_COST);
format %{ "STW $src,$dst\t!ptr" %}
opcode(Assembler::stw_op3, Assembler::ldst_op);
ins_encode( form3_mem_reg( dst, src ) );
ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_reg);
%}
#endif // _LP64
@ -5273,7 +5286,7 @@ instruct loadB(iRegI dst, memory mem) %{
size(4);
format %{ "LDSB $mem,$dst" %}
opcode(Assembler::ldsb_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mask_mem);
%}
@ -5285,7 +5298,7 @@ instruct loadUB(iRegI dst, memory mem, immI_255 bytemask) %{
size(4);
format %{ "LDUB $mem,$dst" %}
opcode(Assembler::ldub_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mask_mem);
%}
@ -5297,7 +5310,7 @@ instruct loadUBL(iRegL dst, memory mem, immL_FF bytemask) %{
size(4);
format %{ "LDUB $mem,$dst" %}
opcode(Assembler::ldub_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mask_mem);
%}
@ -5309,7 +5322,7 @@ instruct loadUCL(iRegL dst, memory mem, immL_FFFF bytemask) %{
size(4);
format %{ "LDUH $mem,$dst" %}
opcode(Assembler::lduh_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mask_mem);
%}
@ -5321,7 +5334,7 @@ instruct loadC(iRegI dst, memory mem) %{
size(4);
format %{ "LDUH $mem,$dst" %}
opcode(Assembler::lduh_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mask_mem);
%}
@ -5333,7 +5346,7 @@ instruct loadI(iRegI dst, memory mem) %{
format %{ "LDUW $mem,$dst" %}
opcode(Assembler::lduw_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem);
%}
@ -5344,7 +5357,7 @@ instruct loadL(iRegL dst, memory mem ) %{
size(4);
format %{ "LDX $mem,$dst\t! long" %}
opcode(Assembler::ldx_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem);
%}
@ -5359,7 +5372,7 @@ instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{
"\tSLLX #32, $dst, $dst\n"
"\tOR $dst, R_O7, $dst" %}
opcode(Assembler::lduw_op3);
ins_encode( form3_mem_reg_long_unaligned_marshal( mem, dst ));
ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst ));
ins_pipe(iload_mem);
%}
@ -5370,7 +5383,7 @@ instruct loadA8B(regD dst, memory mem) %{
size(4);
format %{ "LDDF $mem,$dst\t! packed8B" %}
opcode(Assembler::lddf_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(floadD_mem);
%}
@ -5381,7 +5394,7 @@ instruct loadA4C(regD dst, memory mem) %{
size(4);
format %{ "LDDF $mem,$dst\t! packed4C" %}
opcode(Assembler::lddf_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(floadD_mem);
%}
@ -5392,7 +5405,7 @@ instruct loadA4S(regD dst, memory mem) %{
size(4);
format %{ "LDDF $mem,$dst\t! packed4S" %}
opcode(Assembler::lddf_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(floadD_mem);
%}
@ -5403,7 +5416,7 @@ instruct loadA2I(regD dst, memory mem) %{
size(4);
format %{ "LDDF $mem,$dst\t! packed2I" %}
opcode(Assembler::lddf_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(floadD_mem);
%}
@ -5415,7 +5428,7 @@ instruct loadRange(iRegI dst, memory mem) %{
size(4);
format %{ "LDUW $mem,$dst\t! range" %}
opcode(Assembler::lduw_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem);
%}
@ -5427,7 +5440,7 @@ instruct loadI_freg(regF dst, memory mem) %{
format %{ "LDF $mem,$dst\t! for fitos/fitod" %}
opcode(Assembler::ldf_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(floadF_mem);
%}
@ -5514,7 +5527,7 @@ instruct loadS(iRegI dst, memory mem) %{
size(4);
format %{ "LDSH $mem,$dst" %}
opcode(Assembler::ldsh_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mask_mem);
%}
@ -5526,7 +5539,7 @@ instruct loadD(regD dst, memory mem) %{
size(4);
format %{ "LDDF $mem,$dst" %}
opcode(Assembler::lddf_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(floadD_mem);
%}
@ -5550,7 +5563,7 @@ instruct loadF(regF dst, memory mem) %{
size(4);
format %{ "LDF $mem,$dst" %}
opcode(Assembler::ldf_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(floadF_mem);
%}
@ -5719,7 +5732,7 @@ instruct storeB(memory mem, iRegI src) %{
size(4);
format %{ "STB $src,$mem\t! byte" %}
opcode(Assembler::stb_op3);
ins_encode( form3_mem_reg( mem, src ) );
ins_encode(simple_form3_mem_reg( mem, src ) );
ins_pipe(istore_mem_reg);
%}
@ -5730,7 +5743,7 @@ instruct storeB0(memory mem, immI0 src) %{
size(4);
format %{ "STB $src,$mem\t! byte" %}
opcode(Assembler::stb_op3);
ins_encode( form3_mem_reg( mem, R_G0 ) );
ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
ins_pipe(istore_mem_zero);
%}
@ -5741,7 +5754,7 @@ instruct storeCM0(memory mem, immI0 src) %{
size(4);
format %{ "STB $src,$mem\t! CMS card-mark byte 0" %}
opcode(Assembler::stb_op3);
ins_encode( form3_mem_reg( mem, R_G0 ) );
ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
ins_pipe(istore_mem_zero);
%}
@ -5753,7 +5766,7 @@ instruct storeC(memory mem, iRegI src) %{
size(4);
format %{ "STH $src,$mem\t! short" %}
opcode(Assembler::sth_op3);
ins_encode( form3_mem_reg( mem, src ) );
ins_encode(simple_form3_mem_reg( mem, src ) );
ins_pipe(istore_mem_reg);
%}
@ -5764,7 +5777,7 @@ instruct storeC0(memory mem, immI0 src) %{
size(4);
format %{ "STH $src,$mem\t! short" %}
opcode(Assembler::sth_op3);
ins_encode( form3_mem_reg( mem, R_G0 ) );
ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
ins_pipe(istore_mem_zero);
%}
@ -5776,7 +5789,7 @@ instruct storeI(memory mem, iRegI src) %{
size(4);
format %{ "STW $src,$mem" %}
opcode(Assembler::stw_op3);
ins_encode( form3_mem_reg( mem, src ) );
ins_encode(simple_form3_mem_reg( mem, src ) );
ins_pipe(istore_mem_reg);
%}
@ -5787,7 +5800,7 @@ instruct storeL(memory mem, iRegL src) %{
size(4);
format %{ "STX $src,$mem\t! long" %}
opcode(Assembler::stx_op3);
ins_encode( form3_mem_reg( mem, src ) );
ins_encode(simple_form3_mem_reg( mem, src ) );
ins_pipe(istore_mem_reg);
%}
@ -5798,7 +5811,7 @@ instruct storeI0(memory mem, immI0 src) %{
size(4);
format %{ "STW $src,$mem" %}
opcode(Assembler::stw_op3);
ins_encode( form3_mem_reg( mem, R_G0 ) );
ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
ins_pipe(istore_mem_zero);
%}
@ -5809,7 +5822,7 @@ instruct storeL0(memory mem, immL0 src) %{
size(4);
format %{ "STX $src,$mem" %}
opcode(Assembler::stx_op3);
ins_encode( form3_mem_reg( mem, R_G0 ) );
ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
ins_pipe(istore_mem_zero);
%}
@ -5821,7 +5834,7 @@ instruct storeI_Freg(memory mem, regF src) %{
size(4);
format %{ "STF $src,$mem\t! after fstoi/fdtoi" %}
opcode(Assembler::stf_op3);
ins_encode( form3_mem_reg( mem, src ) );
ins_encode(simple_form3_mem_reg( mem, src ) );
ins_pipe(fstoreF_mem_reg);
%}
@ -5904,7 +5917,7 @@ instruct storeD( memory mem, regD src) %{
size(4);
format %{ "STDF $src,$mem" %}
opcode(Assembler::stdf_op3);
ins_encode( form3_mem_reg( mem, src ) );
ins_encode(simple_form3_mem_reg( mem, src ) );
ins_pipe(fstoreD_mem_reg);
%}
@ -5915,7 +5928,7 @@ instruct storeD0( memory mem, immD0 src) %{
size(4);
format %{ "STX $src,$mem" %}
opcode(Assembler::stx_op3);
ins_encode( form3_mem_reg( mem, R_G0 ) );
ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
ins_pipe(fstoreD_mem_zero);
%}
@ -5927,7 +5940,7 @@ instruct storeF( memory mem, regF src) %{
size(4);
format %{ "STF $src,$mem" %}
opcode(Assembler::stf_op3);
ins_encode( form3_mem_reg( mem, src ) );
ins_encode(simple_form3_mem_reg( mem, src ) );
ins_pipe(fstoreF_mem_reg);
%}
@ -5938,7 +5951,7 @@ instruct storeF0( memory mem, immF0 src) %{
size(4);
format %{ "STW $src,$mem\t! storeF0" %}
opcode(Assembler::stw_op3);
ins_encode( form3_mem_reg( mem, R_G0 ) );
ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
ins_pipe(fstoreF_mem_zero);
%}
@ -5949,7 +5962,7 @@ instruct storeA8B(memory mem, regD src) %{
size(4);
format %{ "STDF $src,$mem\t! packed8B" %}
opcode(Assembler::stdf_op3);
ins_encode( form3_mem_reg( mem, src ) );
ins_encode(simple_form3_mem_reg( mem, src ) );
ins_pipe(fstoreD_mem_reg);
%}
@ -6004,7 +6017,7 @@ instruct storeA8B0(memory mem, immI0 zero) %{
size(4);
format %{ "STX $zero,$mem\t! packed8B" %}
opcode(Assembler::stx_op3);
ins_encode( form3_mem_reg( mem, R_G0 ) );
ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
ins_pipe(fstoreD_mem_zero);
%}
@ -6015,7 +6028,7 @@ instruct storeA4C(memory mem, regD src) %{
size(4);
format %{ "STDF $src,$mem\t! packed4C" %}
opcode(Assembler::stdf_op3);
ins_encode( form3_mem_reg( mem, src ) );
ins_encode(simple_form3_mem_reg( mem, src ) );
ins_pipe(fstoreD_mem_reg);
%}
@ -6026,7 +6039,7 @@ instruct storeA4C0(memory mem, immI0 zero) %{
size(4);
format %{ "STX $zero,$mem\t! packed4C" %}
opcode(Assembler::stx_op3);
ins_encode( form3_mem_reg( mem, R_G0 ) );
ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
ins_pipe(fstoreD_mem_zero);
%}
@ -6037,7 +6050,7 @@ instruct storeA2I(memory mem, regD src) %{
size(4);
format %{ "STDF $src,$mem\t! packed2I" %}
opcode(Assembler::stdf_op3);
ins_encode( form3_mem_reg( mem, src ) );
ins_encode(simple_form3_mem_reg( mem, src ) );
ins_pipe(fstoreD_mem_reg);
%}
@ -6048,7 +6061,7 @@ instruct storeA2I0(memory mem, immI0 zero) %{
size(4);
format %{ "STX $zero,$mem\t! packed2I" %}
opcode(Assembler::stx_op3);
ins_encode( form3_mem_reg( mem, R_G0 ) );
ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
ins_pipe(fstoreD_mem_zero);
%}
@ -6162,7 +6175,7 @@ instruct stfSSD(stackSlotD stkSlot, regD src) %{
ins_cost(MEMORY_REF_COST);
format %{ "STDF $src,$stkSlot\t!stk" %}
opcode(Assembler::stdf_op3);
ins_encode(form3_mem_reg(stkSlot, src));
ins_encode(simple_form3_mem_reg(stkSlot, src));
ins_pipe(fstoreD_stk_reg);
%}
@ -6172,7 +6185,7 @@ instruct ldfSSD(regD dst, stackSlotD stkSlot) %{
ins_cost(MEMORY_REF_COST);
format %{ "LDDF $stkSlot,$dst\t!stk" %}
opcode(Assembler::lddf_op3);
ins_encode(form3_mem_reg(stkSlot, dst));
ins_encode(simple_form3_mem_reg(stkSlot, dst));
ins_pipe(floadD_stk);
%}
@ -6182,7 +6195,7 @@ instruct stfSSF(stackSlotF stkSlot, regF src) %{
ins_cost(MEMORY_REF_COST);
format %{ "STF $src,$stkSlot\t!stk" %}
opcode(Assembler::stf_op3);
ins_encode(form3_mem_reg(stkSlot, src));
ins_encode(simple_form3_mem_reg(stkSlot, src));
ins_pipe(fstoreF_stk_reg);
%}
@ -6584,7 +6597,7 @@ instruct loadLLocked(iRegL dst, memory mem) %{
size(4);
format %{ "LDX $mem,$dst\t! long" %}
opcode(Assembler::ldx_op3);
ins_encode( form3_mem_reg( mem, dst ) );
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem);
%}
@ -6597,32 +6610,23 @@ instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, fla
ins_pipe( long_memory_op );
%}
instruct storeLConditional_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
match(Set res (StoreLConditional mem_ptr (Binary oldval newval)));
effect( USE mem_ptr, KILL ccr, KILL tmp1);
// Marshal the register pairs into V9 64-bit registers, then do the compare-and-swap
format %{
"MOV $newval,R_O7\n\t"
"CASXA [$mem_ptr],$oldval,R_O7\t! If $oldval==[$mem_ptr] Then store R_O7 into [$mem_ptr], set R_O7=[$mem_ptr] in any case\n\t"
"CMP $oldval,R_O7\t\t! See if we made progress\n\t"
"MOV 1,$res\n\t"
"MOVne xcc,R_G0,$res"
%}
ins_encode( enc_casx(mem_ptr, oldval, newval),
enc_lflags_ne_to_boolean(res) );
// Conditional-store of an int value.
instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{
match(Set icc (StoreIConditional mem_ptr (Binary oldval newval)));
effect( KILL newval );
format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
"CMP $oldval,$newval\t\t! See if we made progress" %}
ins_encode( enc_cas(mem_ptr,oldval,newval) );
ins_pipe( long_memory_op );
%}
instruct storeLConditional_flags(iRegP mem_ptr, iRegL oldval, iRegL newval, flagsRegL xcc, o7RegI tmp1, immI0 zero) %{
match(Set xcc (CmpI (StoreLConditional mem_ptr (Binary oldval newval)) zero));
effect( USE mem_ptr, KILL tmp1);
// Marshal the register pairs into V9 64-bit registers, then do the compare-and-swap
format %{
"MOV $newval,R_O7\n\t"
"CASXA [$mem_ptr],$oldval,R_O7\t! If $oldval==[$mem_ptr] Then store R_O7 into [$mem_ptr], set R_O7=[$mem_ptr] in any case\n\t"
"CMP $oldval,R_O7\t\t! See if we made progress"
%}
ins_encode( enc_casx(mem_ptr, oldval, newval));
// Conditional-store of a long value.
instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{
match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval)));
effect( KILL newval );
format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
"CMP $oldval,$newval\t\t! See if we made progress" %}
ins_encode( enc_cas(mem_ptr,oldval,newval) );
ins_pipe( long_memory_op );
%}
@ -7405,6 +7409,34 @@ instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
ins_pipe(ialu_reg_imm);
%}
#ifndef _LP64
// Use sp_ptr_RegP to match G2 (TLS register) without spilling.
instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
match(Set dst (OrI src1 (CastP2X src2)));
size(4);
format %{ "OR $src1,$src2,$dst" %}
opcode(Assembler::or_op3, Assembler::arith_op);
ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
ins_pipe(ialu_reg_reg);
%}
#else
instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
match(Set dst (OrL src1 (CastP2X src2)));
ins_cost(DEFAULT_COST);
size(4);
format %{ "OR $src1,$src2,$dst\t! long" %}
opcode(Assembler::or_op3, Assembler::arith_op);
ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
ins_pipe(ialu_reg_reg);
%}
#endif
// Xor Instructions
// Register Xor
instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
@ -7666,7 +7698,7 @@ instruct convI2D_mem( regD_low dst, memory mem ) %{
format %{ "LDF $mem,$dst\n\t"
"FITOD $dst,$dst" %}
opcode(Assembler::ldf_op3, Assembler::fitod_opf);
ins_encode( form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
ins_pipe(floadF_mem);
%}
@ -7696,7 +7728,7 @@ instruct convI2F_mem( regF dst, memory mem ) %{
format %{ "LDF $mem,$dst\n\t"
"FITOS $dst,$dst" %}
opcode(Assembler::ldf_op3, Assembler::fitos_opf);
ins_encode( form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
ins_pipe(floadF_mem);
%}
@ -7738,7 +7770,7 @@ instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{
size(4);
format %{ "LDUW $src,$dst\t! MoveF2I" %}
opcode(Assembler::lduw_op3);
ins_encode( form3_mem_reg( src, dst ) );
ins_encode(simple_form3_mem_reg( src, dst ) );
ins_pipe(iload_mem);
%}
@ -7750,7 +7782,7 @@ instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{
size(4);
format %{ "LDF $src,$dst\t! MoveI2F" %}
opcode(Assembler::ldf_op3);
ins_encode(form3_mem_reg(src, dst));
ins_encode(simple_form3_mem_reg(src, dst));
ins_pipe(floadF_stk);
%}
@ -7762,7 +7794,7 @@ instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{
size(4);
format %{ "LDX $src,$dst\t! MoveD2L" %}
opcode(Assembler::ldx_op3);
ins_encode( form3_mem_reg( src, dst ) );
ins_encode(simple_form3_mem_reg( src, dst ) );
ins_pipe(iload_mem);
%}
@ -7774,7 +7806,7 @@ instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{
size(4);
format %{ "LDDF $src,$dst\t! MoveL2D" %}
opcode(Assembler::lddf_op3);
ins_encode(form3_mem_reg(src, dst));
ins_encode(simple_form3_mem_reg(src, dst));
ins_pipe(floadD_stk);
%}
@ -7786,7 +7818,7 @@ instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{
size(4);
format %{ "STF $src,$dst\t!MoveF2I" %}
opcode(Assembler::stf_op3);
ins_encode(form3_mem_reg(dst, src));
ins_encode(simple_form3_mem_reg(dst, src));
ins_pipe(fstoreF_stk_reg);
%}
@ -7798,7 +7830,7 @@ instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
size(4);
format %{ "STW $src,$dst\t!MoveI2F" %}
opcode(Assembler::stw_op3);
ins_encode( form3_mem_reg( dst, src ) );
ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_reg);
%}
@ -7810,7 +7842,7 @@ instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{
size(4);
format %{ "STDF $src,$dst\t!MoveD2L" %}
opcode(Assembler::stdf_op3);
ins_encode(form3_mem_reg(dst, src));
ins_encode(simple_form3_mem_reg(dst, src));
ins_pipe(fstoreD_stk_reg);
%}
@ -7822,7 +7854,7 @@ instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
size(4);
format %{ "STX $src,$dst\t!MoveL2D" %}
opcode(Assembler::stx_op3);
ins_encode( form3_mem_reg( dst, src ) );
ins_encode(simple_form3_mem_reg( dst, src ) );
ins_pipe(istore_mem_reg);
%}

View File

@ -956,7 +956,7 @@ class StubGenerator: public StubCodeGenerator {
// Load a little early; will load 1 off the end of the array.
// Ok for now; revisit if we have other uses of this routine.
if (UseCompressedOops) {
__ ld(L1_ary_ptr,0,L2_super);// Will load a little early
__ lduw(L1_ary_ptr,0,L2_super);// Will load a little early
} else {
__ ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
}
@ -973,7 +973,7 @@ class StubGenerator: public StubCodeGenerator {
#ifdef _LP64
__ subcc(L2_super,L4_ooptmp,Rret); // Check for match; zero in Rret for a hit
__ br( Assembler::notEqual, false, Assembler::pt, loop );
__ delayed()->ld(L1_ary_ptr,0,L2_super);// Will load a little early
__ delayed()->lduw(L1_ary_ptr,0,L2_super);// Will load a little early
#else
ShouldNotReachHere();
#endif
@ -1110,30 +1110,31 @@ class StubGenerator: public StubCodeGenerator {
// The input registers are overwritten.
//
void gen_write_ref_array_pre_barrier(Register addr, Register count) {
#if 0 // G1 only
BarrierSet* bs = Universe::heap()->barrier_set();
if (bs->has_write_ref_pre_barrier()) {
assert(bs->has_write_ref_array_pre_opt(),
"Else unsupported barrier set.");
assert(addr->is_global() && count->is_global(),
"If not, then we have to fix this code to handle more "
"general cases.");
// Get some new fresh output registers.
__ save_frame(0);
// Save the necessary global regs... will be used after.
__ mov(addr, L0);
__ mov(count, L1);
__ mov(addr, O0);
if (addr->is_global()) {
__ mov(addr, L0);
}
if (count->is_global()) {
__ mov(count, L1);
}
__ mov(addr->after_save(), O0);
// Get the count into O1
__ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
__ delayed()->mov(count, O1);
__ mov(L0, addr);
__ mov(L1, count);
__ delayed()->mov(count->after_save(), O1);
if (addr->is_global()) {
__ mov(L0, addr);
}
if (count->is_global()) {
__ mov(L1, count);
}
__ restore();
}
#endif // 0
}
//
// Generate post-write barrier for array.
@ -1150,22 +1151,17 @@ class StubGenerator: public StubCodeGenerator {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
#if 0 // G1 - only
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
assert(addr->is_global() && count->is_global(),
"If not, then we have to fix this code to handle more "
"general cases.");
// Get some new fresh output registers.
__ save_frame(0);
__ mov(addr, O0);
__ mov(addr->after_save(), O0);
__ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
__ delayed()->mov(count, O1);
__ delayed()->mov(count->after_save(), O1);
__ restore();
}
break;
#endif // 0 G1 - only
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
{
@ -2412,8 +2408,7 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
gen_write_ref_array_pre_barrier(G1, G5);
gen_write_ref_array_pre_barrier(O1, O2);
#ifdef ASSERT
// We sometimes save a frame (see partial_subtype_check below).

View File

@ -28,6 +28,79 @@
#ifndef CC_INTERP
#define __ _masm->
// Misc helpers
// Do an oop store like *(base + index + offset) = val
// index can be noreg,
static void do_oop_store(InterpreterMacroAssembler* _masm,
Register base,
Register index,
int offset,
Register val,
Register tmp,
BarrierSet::Name barrier,
bool precise) {
assert(tmp != val && tmp != base && tmp != index, "register collision");
assert(index == noreg || offset == 0, "only one offset");
switch (barrier) {
#ifndef SERIALGC
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
__ g1_write_barrier_pre( base, index, offset, tmp, /*preserve_o_regs*/true);
if (index == noreg ) {
assert(Assembler::is_simm13(offset), "fix this code");
__ store_heap_oop(val, base, offset);
} else {
__ store_heap_oop(val, base, index);
}
// No need for post barrier if storing NULL
if (val != G0) {
if (precise) {
if (index == noreg) {
__ add(base, offset, base);
} else {
__ add(base, index, base);
}
}
__ g1_write_barrier_post(base, val, tmp);
}
}
break;
#endif // SERIALGC
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
{
if (index == noreg ) {
assert(Assembler::is_simm13(offset), "fix this code");
__ store_heap_oop(val, base, offset);
} else {
__ store_heap_oop(val, base, index);
}
// No need for post barrier if storing NULL
if (val != G0) {
if (precise) {
if (index == noreg) {
__ add(base, offset, base);
} else {
__ add(base, index, base);
}
}
__ card_write_barrier_post(base, val, tmp);
}
}
break;
case BarrierSet::ModRef:
case BarrierSet::Other:
ShouldNotReachHere();
break;
default :
ShouldNotReachHere();
}
}
//----------------------------------------------------------------------------------------------------
// Platform-dependent initialization
@ -758,6 +831,8 @@ void TemplateTable::aastore() {
// O4: array element klass
// O5: value klass
// Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Generate a fast subtype check. Branch to store_ok if no
// failure. Throw if failure.
__ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
@ -767,18 +842,14 @@ void TemplateTable::aastore() {
// Store is OK.
__ bind(store_ok);
__ store_heap_oop(Otos_i, O1, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Quote from rememberedSet.hpp: For objArrays, the precise card
// corresponding to the pointer store is dirtied so we don't need to
// scavenge the entire array.
Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ add(element, O1); // address the element precisely
__ store_check(G3_scratch, O1);
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
__ ba(false,done);
__ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
__ bind(is_null);
__ store_heap_oop(Otos_i, element);
do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
__ profile_null_seen(G3_scratch);
__ inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
__ bind(done);
@ -2014,7 +2085,7 @@ void TemplateTable::jvmti_post_field_access(Register Rcache,
} else {
if (has_tos) {
// save object pointer before call_VM() clobbers it
__ mov(Otos_i, Lscratch);
__ push_ptr(Otos_i); // put object on tos where GC wants it.
} else {
// Load top of stack (do not pop the value off the stack);
__ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
@ -2026,7 +2097,7 @@ void TemplateTable::jvmti_post_field_access(Register Rcache,
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
Otos_i, Rcache);
if (!is_static && has_tos) {
__ mov(Lscratch, Otos_i); // restore object pointer
__ pop_ptr(Otos_i); // restore object pointer
__ verify_oop(Otos_i);
}
__ get_cache_and_index_at_bcp(Rcache, index, 1);
@ -2449,8 +2520,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
// atos
__ pop_ptr();
__ verify_oop(Otos_i);
__ store_heap_oop(Otos_i, Rclass, Roffset);
__ store_check(G1_scratch, Rclass, Roffset);
do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
__ ba(false, checkVolatile);
__ delayed()->tst(Lscratch);
@ -2491,8 +2563,9 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop_ptr();
pop_and_check_object(Rclass);
__ verify_oop(Otos_i);
__ store_heap_oop(Otos_i, Rclass, Roffset);
__ store_check(G1_scratch, Rclass, Roffset);
do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch);
__ ba(false, checkVolatile);
__ delayed()->tst(Lscratch);
@ -2646,8 +2719,7 @@ void TemplateTable::fast_storefield(TosState state) {
__ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
break;
case Bytecodes::_fast_aputfield:
__ store_heap_oop(Otos_i, Rclass, Roffset);
__ store_check(G1_scratch, Rclass, Roffset);
do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
break;
default:
ShouldNotReachHere();

View File

@ -621,6 +621,10 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
debug_only(has_disp32 = true);
break;
case 0xF0: // Lock
assert(os::is_MP(), "only on MP");
goto again_after_prefix;
case 0xF3: // For SSE
case 0xF2: // For SSE2
switch (0xFF & *ip++) {
@ -1575,6 +1579,35 @@ void Assembler::movdqa(Address dst, XMMRegister src) {
emit_operand(src, dst);
}
void Assembler::movdqu(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
emit_byte(0xF3);
prefix(src, dst);
emit_byte(0x0F);
emit_byte(0x6F);
emit_operand(dst, src);
}
void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_byte(0xF3);
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
emit_byte(0x6F);
emit_byte(0xC0 | encode);
}
void Assembler::movdqu(Address dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
emit_byte(0xF3);
prefix(dst, src);
emit_byte(0x0F);
emit_byte(0x7F);
emit_operand(src, dst);
}
// Uses zero extension on 64bit
void Assembler::movl(Register dst, int32_t imm32) {
@ -5935,26 +5968,30 @@ void MacroAssembler::eden_allocate(Register obj,
Label& slow_case) {
assert(obj == rax, "obj must be in rax, for cmpxchg");
assert_different_registers(obj, var_size_in_bytes, t1);
Register end = t1;
Label retry;
bind(retry);
ExternalAddress heap_top((address) Universe::heap()->top_addr());
movptr(obj, heap_top);
if (var_size_in_bytes == noreg) {
lea(end, Address(obj, con_size_in_bytes));
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
jmp(slow_case);
} else {
lea(end, Address(obj, var_size_in_bytes, Address::times_1));
Register end = t1;
Label retry;
bind(retry);
ExternalAddress heap_top((address) Universe::heap()->top_addr());
movptr(obj, heap_top);
if (var_size_in_bytes == noreg) {
lea(end, Address(obj, con_size_in_bytes));
} else {
lea(end, Address(obj, var_size_in_bytes, Address::times_1));
}
// if end < obj then we wrapped around => object too long => slow case
cmpptr(end, obj);
jcc(Assembler::below, slow_case);
cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
jcc(Assembler::above, slow_case);
// Compare obj with the top addr, and if still equal, store the new top addr in
// end at the address of the top addr pointer. Sets ZF if was equal, and clears
// it otherwise. Use lock prefix for atomicity on MPs.
locked_cmpxchgptr(end, heap_top);
jcc(Assembler::notEqual, retry);
}
// if end < obj then we wrapped around => object too long => slow case
cmpptr(end, obj);
jcc(Assembler::below, slow_case);
cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
jcc(Assembler::above, slow_case);
// Compare obj with the top addr, and if still equal, store the new top addr in
// end at the address of the top addr pointer. Sets ZF if was equal, and clears
// it otherwise. Use lock prefix for atomicity on MPs.
locked_cmpxchgptr(end, heap_top);
jcc(Assembler::notEqual, retry);
}
void MacroAssembler::enter() {
@ -6491,6 +6528,179 @@ void MacroAssembler::sign_extend_short(Register reg) {
}
}
//////////////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
void MacroAssembler::g1_write_barrier_pre(Register obj,
#ifndef _LP64
Register thread,
#endif
Register tmp,
Register tmp2,
bool tosca_live) {
LP64_ONLY(Register thread = r15_thread;)
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_index()));
Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_buf()));
Label done;
Label runtime;
// if (!marking_in_progress) goto done;
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
cmpl(in_progress, 0);
} else {
assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
cmpb(in_progress, 0);
}
jcc(Assembler::equal, done);
// if (x.f == NULL) goto done;
cmpptr(Address(obj, 0), NULL_WORD);
jcc(Assembler::equal, done);
// Can we store original value in the thread's buffer?
LP64_ONLY(movslq(tmp, index);)
movptr(tmp2, Address(obj, 0));
#ifdef _LP64
cmpq(tmp, 0);
#else
cmpl(index, 0);
#endif
jcc(Assembler::equal, runtime);
#ifdef _LP64
subq(tmp, wordSize);
movl(index, tmp);
addq(tmp, buffer);
#else
subl(index, wordSize);
movl(tmp, buffer);
addl(tmp, index);
#endif
movptr(Address(tmp, 0), tmp2);
jmp(done);
bind(runtime);
// save the live input values
if(tosca_live) push(rax);
push(obj);
#ifdef _LP64
movq(c_rarg0, Address(obj, 0));
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, r15_thread);
#else
push(thread);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread);
pop(thread);
#endif
pop(obj);
if(tosca_live) pop(rax);
bind(done);
}
void MacroAssembler::g1_write_barrier_post(Register store_addr,
Register new_val,
#ifndef _LP64
Register thread,
#endif
Register tmp,
Register tmp2) {
LP64_ONLY(Register thread = r15_thread;)
Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_index()));
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_buf()));
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
Label done;
Label runtime;
// Does store cross heap regions?
movptr(tmp, store_addr);
xorptr(tmp, new_val);
shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
jcc(Assembler::equal, done);
// crosses regions, storing NULL?
cmpptr(new_val, (int32_t) NULL_WORD);
jcc(Assembler::equal, done);
// storing region crossing non-NULL, is card already dirty?
ExternalAddress cardtable((address) ct->byte_map_base);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
#ifdef _LP64
const Register card_addr = tmp;
movq(card_addr, store_addr);
shrq(card_addr, CardTableModRefBS::card_shift);
lea(tmp2, cardtable);
// get the address of the card
addq(card_addr, tmp2);
#else
const Register card_index = tmp;
movl(card_index, store_addr);
shrl(card_index, CardTableModRefBS::card_shift);
Address index(noreg, card_index, Address::times_1);
const Register card_addr = tmp;
lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
#endif
cmpb(Address(card_addr, 0), 0);
jcc(Assembler::equal, done);
// storing a region crossing, non-NULL oop, card is clean.
// dirty card and log.
movb(Address(card_addr, 0), 0);
cmpl(queue_index, 0);
jcc(Assembler::equal, runtime);
subl(queue_index, wordSize);
movptr(tmp2, buffer);
#ifdef _LP64
movslq(rscratch1, queue_index);
addq(tmp2, rscratch1);
movq(Address(tmp2, 0), card_addr);
#else
addl(tmp2, queue_index);
movl(Address(tmp2, 0), card_index);
#endif
jmp(done);
bind(runtime);
// save the live input values
push(store_addr);
push(new_val);
#ifdef _LP64
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
#else
push(thread);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
pop(thread);
#endif
pop(new_val);
pop(store_addr);
bind(done);
}
#endif // SERIALGC
//////////////////////////////////////////////////////////////////////////////////
void MacroAssembler::store_check(Register obj) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.

View File

@ -227,9 +227,11 @@ class Address VALUE_OBJ_CLASS_SPEC {
#endif // ASSERT
// accessors
bool uses(Register reg) const {
return _base == reg || _index == reg;
}
bool uses(Register reg) const { return _base == reg || _index == reg; }
Register base() const { return _base; }
Register index() const { return _index; }
ScaleFactor scale() const { return _scale; }
int disp() const { return _disp; }
// Convert the raw encoding form into the form expected by the constructor for
// Address. An index of 4 (rsp) corresponds to having no index, so convert
@ -1053,6 +1055,11 @@ private:
void movdqa(XMMRegister dst, Address src);
void movdqa(XMMRegister dst, XMMRegister src);
// Move Unaligned Double Quadword
void movdqu(Address dst, XMMRegister src);
void movdqu(XMMRegister dst, Address src);
void movdqu(XMMRegister dst, XMMRegister src);
void movl(Register dst, int32_t imm32);
void movl(Address dst, int32_t imm32);
void movl(Register dst, Register src);
@ -1310,7 +1317,8 @@ private:
// on arguments should also go in here.
class MacroAssembler: public Assembler {
friend class LIR_Assembler;
friend class LIR_Assembler;
friend class Runtime1; // as_Address()
protected:
Address as_Address(AddressLiteral adr);
@ -1453,6 +1461,7 @@ class MacroAssembler: public Assembler {
// The pointer will be loaded into the thread register.
void get_thread(Register thread);
// Support for VM calls
//
// It is imperative that all calls into the VM are handled via the call_VM macros.
@ -1527,6 +1536,22 @@ class MacroAssembler: public Assembler {
void store_check(Register obj); // store check for obj - register is destroyed afterwards
void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
void g1_write_barrier_pre(Register obj,
#ifndef _LP64
Register thread,
#endif
Register tmp,
Register tmp2,
bool tosca_live);
void g1_write_barrier_post(Register store_addr,
Register new_val,
#ifndef _LP64
Register thread,
#endif
Register tmp,
Register tmp2);
// split store_check(Register obj) to enhance instruction interleaving
void store_check_part_1(Register obj);
void store_check_part_2(Register obj);
@ -1755,7 +1780,8 @@ class MacroAssembler: public Assembler {
// check info (currently consumed only by C1). If
// swap_reg_contains_mark is true then returns -1 as it is assumed
// the calling code has already passed any potential faults.
int biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg,
int biased_locking_enter(Register lock_reg, Register obj_reg,
Register swap_reg, Register tmp_reg,
bool swap_reg_contains_mark,
Label& done, Label* slow_case = NULL,
BiasedLockingCounters* counters = NULL);

View File

@ -456,5 +456,50 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
__ jmp(_continuation);
}
/////////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
// At this point we know that marking is in progress
__ bind(_entry);
assert(pre_val()->is_register(), "Precondition.");
Register pre_val_reg = pre_val()->as_register();
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
__ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
ce->store_parameter(pre_val()->as_register(), 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
__ jmp(_continuation);
}
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
jbyte* G1PostBarrierStub::byte_map_base_slow() {
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->is_a(BarrierSet::G1SATBCTLogging),
"Must be if we're using this.");
return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
}
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
assert(addr()->is_register(), "Precondition.");
assert(new_val()->is_register(), "Precondition.");
Register new_val_reg = new_val()->as_register();
__ cmpptr(new_val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
ce->store_parameter(addr()->as_register(), 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
__ jmp(_continuation);
}
#endif // SERIALGC
/////////////////////////////////////////////////////////////////////////////
#undef __

View File

@ -302,6 +302,8 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
}
if (obj_store) {
// Needs GC write barriers.
pre_barrier(LIR_OprFact::address(array_addr), false, NULL);
__ move(value.result(), array_addr, null_check_info);
// Seems to be a precise
post_barrier(LIR_OprFact::address(array_addr), value.result());
@ -756,7 +758,10 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
__ move(obj.result(), addr);
__ add(addr, offset.result(), addr);
if (type == objectType) { // Write-barrier needed for Object fields.
// Do the pre-write barrier, if any.
pre_barrier(addr, false, NULL);
}
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
if (type == objectType)
@ -1286,6 +1291,8 @@ void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
LIR_Address* addr = new LIR_Address(src, offset, type);
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
if (is_obj) {
// Do the pre-write barrier, if any.
pre_barrier(LIR_OprFact::address(addr), false, NULL);
__ move(data, addr);
assert(src->is_register(), "must be register");
// Seems to be a precise address

View File

@ -1583,6 +1583,166 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
#ifndef SERIALGC
case g1_pre_barrier_slow_id:
{
StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
// arg0 : previous value of memory
BarrierSet* bs = Universe::heap()->barrier_set();
if (bs->kind() != BarrierSet::G1SATBCTLogging) {
__ movptr(rax, (int)id);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
__ should_not_reach_here();
break;
}
__ push(rax);
__ push(rdx);
const Register pre_val = rax;
const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
const Register tmp = rdx;
NOT_LP64(__ get_thread(thread);)
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_active()));
Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_index()));
Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
PtrQueue::byte_offset_of_buf()));
Label done;
Label runtime;
// Can we store original value in the thread's buffer?
LP64_ONLY(__ movslq(tmp, queue_index);)
#ifdef _LP64
__ cmpq(tmp, 0);
#else
__ cmpl(queue_index, 0);
#endif
__ jcc(Assembler::equal, runtime);
#ifdef _LP64
__ subq(tmp, wordSize);
__ movl(queue_index, tmp);
__ addq(tmp, buffer);
#else
__ subl(queue_index, wordSize);
__ movl(tmp, buffer);
__ addl(tmp, queue_index);
#endif
// prev_val (rax)
f.load_argument(0, pre_val);
__ movptr(Address(tmp, 0), pre_val);
__ jmp(done);
__ bind(runtime);
// load the pre-value
__ push(rcx);
f.load_argument(0, rcx);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
__ pop(rcx);
__ bind(done);
__ pop(rdx);
__ pop(rax);
}
break;
case g1_post_barrier_slow_id:
{
StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
// arg0: store_address
Address store_addr(rbp, 2*BytesPerWord);
BarrierSet* bs = Universe::heap()->barrier_set();
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
Label done;
Label runtime;
// At this point we know new_value is non-NULL and the new_value crosses regsion.
// Must check to see if card is already dirty
const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_index()));
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
PtrQueue::byte_offset_of_buf()));
__ push(rax);
__ push(rdx);
NOT_LP64(__ get_thread(thread);)
ExternalAddress cardtable((address)ct->byte_map_base);
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
const Register card_addr = rdx;
#ifdef _LP64
const Register tmp = rscratch1;
f.load_argument(0, card_addr);
__ shrq(card_addr, CardTableModRefBS::card_shift);
__ lea(tmp, cardtable);
// get the address of the card
__ addq(card_addr, tmp);
#else
const Register card_index = rdx;
f.load_argument(0, card_index);
__ shrl(card_index, CardTableModRefBS::card_shift);
Address index(noreg, card_index, Address::times_1);
__ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
#endif
__ cmpb(Address(card_addr, 0), 0);
__ jcc(Assembler::equal, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
__ movb(Address(card_addr, 0), 0);
__ cmpl(queue_index, 0);
__ jcc(Assembler::equal, runtime);
__ subl(queue_index, wordSize);
const Register buffer_addr = rbx;
__ push(rbx);
__ movptr(buffer_addr, buffer);
#ifdef _LP64
__ movslq(rscratch1, queue_index);
__ addptr(buffer_addr, rscratch1);
#else
__ addptr(buffer_addr, queue_index);
#endif
__ movptr(Address(buffer_addr, 0), card_addr);
__ pop(rbx);
__ jmp(done);
__ bind(runtime);
NOT_LP64(__ push(rcx);)
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
NOT_LP64(__ pop(rcx);)
__ bind(done);
__ pop(rdx);
__ pop(rax);
}
break;
#endif // !SERIALGC
default:
{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
__ movptr(rax, (int)id);

View File

@ -44,8 +44,13 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
// Note: No need to save/restore bcp & locals (r13 & r14) pointer
// since these are callee saved registers and no blocking/
// GC can happen in leaf calls.
// Further Note: DO NOT save/restore bcp/locals. If a caller has
// already saved them so that it can use esi/edi as temporaries
// then a save/restore here will DESTROY the copy the caller
// saved! There used to be a save_bcp() that only happened in
// the ASSERT path (no restore_bcp). Which caused bizarre failures
// when jvm built with ASSERTs.
#ifdef ASSERT
save_bcp();
{
Label L;
cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
@ -58,24 +63,9 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
// super call
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
// interpreter specific
#ifdef ASSERT
{
Label L;
cmpptr(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize));
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" r13 not callee saved?");
bind(L);
}
{
Label L;
cmpptr(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" r14 not callee saved?");
bind(L);
}
#endif
// Used to ASSERT that r13/r14 were equal to frame's bcp/locals
// but since they may not have been saved (and we don't want to
// save thme here (see note above) the assert is invalid.
}
void InterpreterMacroAssembler::call_VM_base(Register oop_result,

View File

@ -22,9 +22,6 @@
*
*/
// make sure the defines don't screw up the declarations later on in this file
#define DONT_USE_REGISTER_DEFINES
#include "incls/_precompiled.incl"
#include "incls/_register_definitions_x86.cpp.incl"

View File

@ -712,7 +712,6 @@ class StubGenerator: public StubCodeGenerator {
// end - element count
void gen_write_ref_array_pre_barrier(Register start, Register count) {
assert_different_registers(start, count);
#if 0 // G1 only
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1SATBCT:
@ -721,8 +720,8 @@ class StubGenerator: public StubCodeGenerator {
__ pusha(); // push registers
__ push(count);
__ push(start);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
__ addl(esp, wordSize * 2);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
__ addptr(rsp, 2*wordSize);
__ popa();
}
break;
@ -734,7 +733,6 @@ class StubGenerator: public StubCodeGenerator {
ShouldNotReachHere();
}
#endif // 0 - G1 only
}
@ -750,20 +748,18 @@ class StubGenerator: public StubCodeGenerator {
BarrierSet* bs = Universe::heap()->barrier_set();
assert_different_registers(start, count);
switch (bs->kind()) {
#if 0 // G1 only
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
__ pusha(); // push registers
__ push(count);
__ push(start);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
__ addl(esp, wordSize * 2);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
__ addptr(rsp, 2*wordSize);
__ popa();
}
break;
#endif // 0 G1 only
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
@ -795,6 +791,69 @@ class StubGenerator: public StubCodeGenerator {
}
}
// Copy 64 bytes chunks
//
// Inputs:
// from - source array address
// to_from - destination array address - from
// qword_count - 8-bytes element count, negative
//
void xmm_copy_forward(Register from, Register to_from, Register qword_count) {
assert( UseSSE >= 2, "supported cpu only" );
Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
// Copy 64-byte chunks
__ jmpb(L_copy_64_bytes);
__ align(16);
__ BIND(L_copy_64_bytes_loop);
if(UseUnalignedLoadStores) {
__ movdqu(xmm0, Address(from, 0));
__ movdqu(Address(from, to_from, Address::times_1, 0), xmm0);
__ movdqu(xmm1, Address(from, 16));
__ movdqu(Address(from, to_from, Address::times_1, 16), xmm1);
__ movdqu(xmm2, Address(from, 32));
__ movdqu(Address(from, to_from, Address::times_1, 32), xmm2);
__ movdqu(xmm3, Address(from, 48));
__ movdqu(Address(from, to_from, Address::times_1, 48), xmm3);
} else {
__ movq(xmm0, Address(from, 0));
__ movq(Address(from, to_from, Address::times_1, 0), xmm0);
__ movq(xmm1, Address(from, 8));
__ movq(Address(from, to_from, Address::times_1, 8), xmm1);
__ movq(xmm2, Address(from, 16));
__ movq(Address(from, to_from, Address::times_1, 16), xmm2);
__ movq(xmm3, Address(from, 24));
__ movq(Address(from, to_from, Address::times_1, 24), xmm3);
__ movq(xmm4, Address(from, 32));
__ movq(Address(from, to_from, Address::times_1, 32), xmm4);
__ movq(xmm5, Address(from, 40));
__ movq(Address(from, to_from, Address::times_1, 40), xmm5);
__ movq(xmm6, Address(from, 48));
__ movq(Address(from, to_from, Address::times_1, 48), xmm6);
__ movq(xmm7, Address(from, 56));
__ movq(Address(from, to_from, Address::times_1, 56), xmm7);
}
__ addl(from, 64);
__ BIND(L_copy_64_bytes);
__ subl(qword_count, 8);
__ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
__ addl(qword_count, 8);
__ jccb(Assembler::zero, L_exit);
//
// length is too short, just copy qwords
//
__ BIND(L_copy_8_bytes);
__ movq(xmm0, Address(from, 0));
__ movq(Address(from, to_from, Address::times_1), xmm0);
__ addl(from, 8);
__ decrement(qword_count);
__ jcc(Assembler::greater, L_copy_8_bytes);
__ BIND(L_exit);
}
// Copy 64 bytes chunks
//
// Inputs:
@ -803,6 +862,7 @@ class StubGenerator: public StubCodeGenerator {
// qword_count - 8-bytes element count, negative
//
void mmx_copy_forward(Register from, Register to_from, Register qword_count) {
assert( VM_Version::supports_mmx(), "supported cpu only" );
Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
// Copy 64-byte chunks
__ jmpb(L_copy_64_bytes);
@ -880,7 +940,7 @@ class StubGenerator: public StubCodeGenerator {
__ subptr(to, from); // to --> to_from
__ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
__ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
if (!aligned && (t == T_BYTE || t == T_SHORT)) {
if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
// align source address at 4 bytes address boundary
if (t == T_BYTE) {
// One byte misalignment happens only for byte arrays
@ -910,20 +970,26 @@ class StubGenerator: public StubCodeGenerator {
__ mov(count, rax); // restore 'count'
__ jmpb(L_copy_2_bytes); // all dwords were copied
} else {
// align to 8 bytes, we know we are 4 byte aligned to start
__ testptr(from, 4);
__ jccb(Assembler::zero, L_copy_64_bytes);
__ movl(rax, Address(from, 0));
__ movl(Address(from, to_from, Address::times_1, 0), rax);
__ addptr(from, 4);
__ subl(count, 1<<shift);
if (!UseUnalignedLoadStores) {
// align to 8 bytes, we know we are 4 byte aligned to start
__ testptr(from, 4);
__ jccb(Assembler::zero, L_copy_64_bytes);
__ movl(rax, Address(from, 0));
__ movl(Address(from, to_from, Address::times_1, 0), rax);
__ addptr(from, 4);
__ subl(count, 1<<shift);
}
__ BIND(L_copy_64_bytes);
__ mov(rax, count);
__ shrl(rax, shift+1); // 8 bytes chunk count
//
// Copy 8-byte chunks through MMX registers, 8 per iteration of the loop
//
mmx_copy_forward(from, to_from, rax);
if (UseXMMForArrayCopy) {
xmm_copy_forward(from, to_from, rax);
} else {
mmx_copy_forward(from, to_from, rax);
}
}
// copy tailing dword
__ BIND(L_copy_4_bytes);
@ -1073,13 +1139,20 @@ class StubGenerator: public StubCodeGenerator {
__ align(16);
// Move 8 bytes
__ BIND(L_copy_8_bytes_loop);
__ movq(mmx0, Address(from, count, sf, 0));
__ movq(Address(to, count, sf, 0), mmx0);
if (UseXMMForArrayCopy) {
__ movq(xmm0, Address(from, count, sf, 0));
__ movq(Address(to, count, sf, 0), xmm0);
} else {
__ movq(mmx0, Address(from, count, sf, 0));
__ movq(Address(to, count, sf, 0), mmx0);
}
__ BIND(L_copy_8_bytes);
__ subl(count, 2<<shift);
__ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
__ addl(count, 2<<shift);
__ emms();
if (!UseXMMForArrayCopy) {
__ emms();
}
}
__ BIND(L_copy_4_bytes);
// copy prefix qword
@ -1147,7 +1220,11 @@ class StubGenerator: public StubCodeGenerator {
__ subptr(to, from); // to --> to_from
if (VM_Version::supports_mmx()) {
mmx_copy_forward(from, to_from, count);
if (UseXMMForArrayCopy) {
xmm_copy_forward(from, to_from, count);
} else {
mmx_copy_forward(from, to_from, count);
}
} else {
__ jmpb(L_copy_8_bytes);
__ align(16);
@ -1200,8 +1277,13 @@ class StubGenerator: public StubCodeGenerator {
__ align(16);
__ BIND(L_copy_8_bytes_loop);
if (VM_Version::supports_mmx()) {
__ movq(mmx0, Address(from, count, Address::times_8));
__ movq(Address(to, count, Address::times_8), mmx0);
if (UseXMMForArrayCopy) {
__ movq(xmm0, Address(from, count, Address::times_8));
__ movq(Address(to, count, Address::times_8), xmm0);
} else {
__ movq(mmx0, Address(from, count, Address::times_8));
__ movq(Address(to, count, Address::times_8), mmx0);
}
} else {
__ fild_d(Address(from, count, Address::times_8));
__ fistp_d(Address(to, count, Address::times_8));
@ -1210,7 +1292,7 @@ class StubGenerator: public StubCodeGenerator {
__ decrement(count);
__ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
if (VM_Version::supports_mmx()) {
if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
__ emms();
}
inc_copy_counter_np(T_LONG);
@ -1378,9 +1460,9 @@ class StubGenerator: public StubCodeGenerator {
Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes());
// Copy from low to high addresses, indexed from the end of each array.
gen_write_ref_array_pre_barrier(to, count);
__ lea(end_from, end_from_addr);
__ lea(end_to, end_to_addr);
gen_write_ref_array_pre_barrier(to, count);
assert(length == count, ""); // else fix next line:
__ negptr(count); // negate and test the length
__ jccb(Assembler::notZero, L_load_element);

View File

@ -1153,18 +1153,26 @@ class StubGenerator: public StubCodeGenerator {
// Destroy no registers!
//
void gen_write_ref_array_pre_barrier(Register addr, Register count) {
#if 0 // G1 - only
assert_different_registers(addr, c_rarg1);
assert_different_registers(count, c_rarg0);
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
__ pusha(); // push registers
__ movptr(c_rarg0, addr);
__ movptr(c_rarg1, count);
__ call(RuntimeAddress(BarrierSet::static_write_ref_array_pre));
if (count == c_rarg0) {
if (addr == c_rarg1) {
// exactly backwards!!
__ xchgptr(c_rarg1, c_rarg0);
} else {
__ movptr(c_rarg1, count);
__ movptr(c_rarg0, addr);
}
} else {
__ movptr(c_rarg0, addr);
__ movptr(c_rarg1, count);
}
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
__ popa();
}
break;
@ -1172,11 +1180,10 @@ class StubGenerator: public StubCodeGenerator {
case BarrierSet::CardTableExtension:
case BarrierSet::ModRef:
break;
default :
default:
ShouldNotReachHere();
}
#endif // 0 G1 - only
}
//
@ -1193,7 +1200,6 @@ class StubGenerator: public StubCodeGenerator {
assert_different_registers(start, end, scratch);
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
#if 0 // G1 - only
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
@ -1206,11 +1212,10 @@ class StubGenerator: public StubCodeGenerator {
__ shrptr(scratch, LogBytesPerWord);
__ mov(c_rarg0, start);
__ mov(c_rarg1, scratch);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
__ popa();
}
break;
#endif // 0 G1 - only
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
{
@ -1239,8 +1244,13 @@ class StubGenerator: public StubCodeGenerator {
__ decrement(count);
__ jcc(Assembler::greaterEqual, L_loop);
}
}
}
break;
default:
ShouldNotReachHere();
}
}
// Copy big chunks forward
//
@ -1259,14 +1269,22 @@ class StubGenerator: public StubCodeGenerator {
Label L_loop;
__ align(16);
__ BIND(L_loop);
__ movq(to, Address(end_from, qword_count, Address::times_8, -24));
__ movq(Address(end_to, qword_count, Address::times_8, -24), to);
__ movq(to, Address(end_from, qword_count, Address::times_8, -16));
__ movq(Address(end_to, qword_count, Address::times_8, -16), to);
__ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
__ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
__ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
__ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
if(UseUnalignedLoadStores) {
__ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
__ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0);
__ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8));
__ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1);
} else {
__ movq(to, Address(end_from, qword_count, Address::times_8, -24));
__ movq(Address(end_to, qword_count, Address::times_8, -24), to);
__ movq(to, Address(end_from, qword_count, Address::times_8, -16));
__ movq(Address(end_to, qword_count, Address::times_8, -16), to);
__ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
__ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
__ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
__ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
}
__ BIND(L_copy_32_bytes);
__ addptr(qword_count, 4);
__ jcc(Assembler::lessEqual, L_loop);
@ -1292,14 +1310,22 @@ class StubGenerator: public StubCodeGenerator {
Label L_loop;
__ align(16);
__ BIND(L_loop);
__ movq(to, Address(from, qword_count, Address::times_8, 24));
__ movq(Address(dest, qword_count, Address::times_8, 24), to);
__ movq(to, Address(from, qword_count, Address::times_8, 16));
__ movq(Address(dest, qword_count, Address::times_8, 16), to);
__ movq(to, Address(from, qword_count, Address::times_8, 8));
__ movq(Address(dest, qword_count, Address::times_8, 8), to);
__ movq(to, Address(from, qword_count, Address::times_8, 0));
__ movq(Address(dest, qword_count, Address::times_8, 0), to);
if(UseUnalignedLoadStores) {
__ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
__ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0);
__ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
__ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
} else {
__ movq(to, Address(from, qword_count, Address::times_8, 24));
__ movq(Address(dest, qword_count, Address::times_8, 24), to);
__ movq(to, Address(from, qword_count, Address::times_8, 16));
__ movq(Address(dest, qword_count, Address::times_8, 16), to);
__ movq(to, Address(from, qword_count, Address::times_8, 8));
__ movq(Address(dest, qword_count, Address::times_8, 8), to);
__ movq(to, Address(from, qword_count, Address::times_8, 0));
__ movq(Address(dest, qword_count, Address::times_8, 0), to);
}
__ BIND(L_copy_32_bytes);
__ subptr(qword_count, 4);
__ jcc(Assembler::greaterEqual, L_loop);
@ -2282,7 +2308,7 @@ class StubGenerator: public StubCodeGenerator {
// and report their number to the caller.
assert_different_registers(rax, r14_length, count, to, end_to, rcx);
__ lea(end_to, to_element_addr);
gen_write_ref_array_post_barrier(to, end_to, rcx);
gen_write_ref_array_post_barrier(to, end_to, rscratch1);
__ movptr(rax, r14_length); // original oops
__ addptr(rax, count); // K = (original - remaining) oops
__ notptr(rax); // report (-1^K) to caller
@ -2291,7 +2317,7 @@ class StubGenerator: public StubCodeGenerator {
// Come here on success only.
__ BIND(L_do_card_marks);
__ addptr(end_to, -wordSize); // make an inclusive end pointer
gen_write_ref_array_post_barrier(to, end_to, rcx);
gen_write_ref_array_post_barrier(to, end_to, rscratch1);
__ xorptr(rax, rax); // return 0 on success
// Common exit point (success or failure).

View File

@ -107,6 +107,78 @@ static Assembler::Condition j_not(TemplateTable::Condition cc) {
//----------------------------------------------------------------------------------------------------
// Miscelaneous helper routines
// Store an oop (or NULL) at the address described by obj.
// If val == noreg this means store a NULL
static void do_oop_store(InterpreterMacroAssembler* _masm,
Address obj,
Register val,
BarrierSet::Name barrier,
bool precise) {
assert(val == noreg || val == rax, "parameter is just for looks");
switch (barrier) {
#ifndef SERIALGC
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
// flatten object address if needed
// We do it regardless of precise because we need the registers
if (obj.index() == noreg && obj.disp() == 0) {
if (obj.base() != rdx) {
__ movl(rdx, obj.base());
}
} else {
__ leal(rdx, obj);
}
__ get_thread(rcx);
__ save_bcp();
__ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
// Do the actual store
// noreg means NULL
if (val == noreg) {
__ movl(Address(rdx, 0), NULL_WORD);
// No post barrier for NULL
} else {
__ movl(Address(rdx, 0), val);
__ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
}
__ restore_bcp();
}
break;
#endif // SERIALGC
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
{
if (val == noreg) {
__ movl(obj, NULL_WORD);
} else {
__ movl(obj, val);
// flatten object address if needed
if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
__ store_check(obj.base());
} else {
__ leal(rdx, obj);
__ store_check(rdx);
}
}
}
break;
case BarrierSet::ModRef:
case BarrierSet::Other:
if (val == noreg) {
__ movl(obj, NULL_WORD);
} else {
__ movl(obj, val);
}
break;
default :
ShouldNotReachHere();
}
}
Address TemplateTable::at_bcp(int offset) {
assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
return Address(rsi, offset);
@ -876,6 +948,8 @@ void TemplateTable::aastore() {
__ movptr(rax, at_tos()); // Value
__ movl(rcx, at_tos_p1()); // Index
__ movptr(rdx, at_tos_p2()); // Array
Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
index_check_without_pop(rdx, rcx); // kills rbx,
// do array store check - check for NULL value first
__ testptr(rax, rax);
@ -887,7 +961,7 @@ void TemplateTable::aastore() {
__ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
__ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
// Compress array+index*wordSize+12 into a single register. Frees ECX.
__ lea(rdx, Address(rdx, rcx, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
__ lea(rdx, element_address);
// Generate subtype check. Blows ECX. Resets EDI to locals.
// Superklass in EAX. Subklass in EBX.
@ -899,15 +973,20 @@ void TemplateTable::aastore() {
// Come here on success
__ bind(ok_is_subtype);
__ movptr(rax, at_rsp()); // Value
__ movptr(Address(rdx, 0), rax);
__ store_check(rdx);
__ jmpb(done);
// Get the value to store
__ movptr(rax, at_rsp());
// and store it with appropriate barrier
do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
__ jmp(done);
// Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
__ bind(is_null);
__ profile_null_seen(rbx);
__ movptr(Address(rdx, rcx, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), rax);
// Store NULL, (noreg means NULL to do_oop_store)
do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
// Pop stack arguments
__ bind(done);
@ -1515,7 +1594,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// compute return address as bci in rax,
__ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
__ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
// Adjust the bcp in ESI by the displacement in EDX
// Adjust the bcp in RSI by the displacement in EDX
__ addptr(rsi, rdx);
// Push return address
__ push_i(rax);
@ -1526,7 +1605,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Normal (non-jsr) branch handling
// Adjust the bcp in ESI by the displacement in EDX
// Adjust the bcp in RSI by the displacement in EDX
__ addptr(rsi, rdx);
assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
@ -2439,11 +2518,12 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(atos);
if (!is_static) pop_and_check_object(obj);
__ movptr(lo, rax );
__ store_check(obj, lo); // Need to mark card
do_oop_store(_masm, lo, rax, _bs->kind(), false);
if (!is_static) {
patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
}
__ jmp(Done);
__ bind(notObj);
@ -2664,7 +2744,10 @@ void TemplateTable::fast_storefield(TosState state) {
break;
case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
case Bytecodes::_fast_aputfield: __ movptr(lo, rax); __ store_check(rcx, lo); break;
case Bytecodes::_fast_aputfield: {
do_oop_store(_masm, lo, rax, _bs->kind(), false);
break;
}
default:
ShouldNotReachHere();
}
@ -2672,7 +2755,8 @@ void TemplateTable::fast_storefield(TosState state) {
Label done;
volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
Assembler::StoreStore));
__ jmpb(done);
// Barriers are so large that short branch doesn't reach!
__ jmp(done);
// Same code as above, but don't need rdx to test for volatile.
__ bind(notVolatile);
@ -2694,7 +2778,10 @@ void TemplateTable::fast_storefield(TosState state) {
break;
case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
case Bytecodes::_fast_aputfield: __ movptr(lo, rax); __ store_check(rcx, lo); break;
case Bytecodes::_fast_aputfield: {
do_oop_store(_masm, lo, rax, _bs->kind(), false);
break;
}
default:
ShouldNotReachHere();
}
@ -3054,8 +3141,6 @@ void TemplateTable::_new() {
Label initialize_object; // including clearing the fields
Label allocate_shared;
ExternalAddress heap_top((address)Universe::heap()->top_addr());
__ get_cpool_and_tags(rcx, rax);
// get instanceKlass
__ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
@ -3112,6 +3197,8 @@ void TemplateTable::_new() {
if (allow_shared_alloc) {
__ bind(allocate_shared);
ExternalAddress heap_top((address)Universe::heap()->top_addr());
Label retry;
__ bind(retry);
__ movptr(rax, heap_top);

View File

@ -115,6 +115,69 @@ static Assembler::Condition j_not(TemplateTable::Condition cc) {
// Miscelaneous helper routines
// Store an oop (or NULL) at the address described by obj.
// If val == noreg this means store a NULL
static void do_oop_store(InterpreterMacroAssembler* _masm,
Address obj,
Register val,
BarrierSet::Name barrier,
bool precise) {
assert(val == noreg || val == rax, "parameter is just for looks");
switch (barrier) {
#ifndef SERIALGC
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
// flatten object address if needed
if (obj.index() == noreg && obj.disp() == 0) {
if (obj.base() != rdx) {
__ movq(rdx, obj.base());
}
} else {
__ leaq(rdx, obj);
}
__ g1_write_barrier_pre(rdx, r8, rbx, val != noreg);
if (val == noreg) {
__ store_heap_oop(Address(rdx, 0), NULL_WORD);
} else {
__ store_heap_oop(Address(rdx, 0), val);
__ g1_write_barrier_post(rdx, val, r8, rbx);
}
}
break;
#endif // SERIALGC
case BarrierSet::CardTableModRef:
case BarrierSet::CardTableExtension:
{
if (val == noreg) {
__ store_heap_oop(obj, NULL_WORD);
} else {
__ store_heap_oop(obj, val);
// flatten object address if needed
if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
__ store_check(obj.base());
} else {
__ leaq(rdx, obj);
__ store_check(rdx);
}
}
}
break;
case BarrierSet::ModRef:
case BarrierSet::Other:
if (val == noreg) {
__ store_heap_oop(obj, NULL_WORD);
} else {
__ store_heap_oop(obj, val);
}
break;
default :
ShouldNotReachHere();
}
}
Address TemplateTable::at_bcp(int offset) {
assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
@ -560,8 +623,8 @@ void TemplateTable::aaload() {
// rdx: array
index_check(rdx, rax); // kills rbx
__ load_heap_oop(rax, Address(rdx, rax,
UseCompressedOops ? Address::times_4 : Address::times_8,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
UseCompressedOops ? Address::times_4 : Address::times_8,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
void TemplateTable::baload() {
@ -866,6 +929,11 @@ void TemplateTable::aastore() {
__ movptr(rax, at_tos()); // value
__ movl(rcx, at_tos_p1()); // index
__ movptr(rdx, at_tos_p2()); // array
Address element_address(rdx, rcx,
UseCompressedOops? Address::times_4 : Address::times_8,
arrayOopDesc::base_offset_in_bytes(T_OBJECT));
index_check(rdx, rcx); // kills rbx
// do array store check - check for NULL value first
__ testptr(rax, rax);
@ -879,9 +947,7 @@ void TemplateTable::aastore() {
sizeof(oopDesc) +
objArrayKlass::element_klass_offset_in_bytes()));
// Compress array + index*oopSize + 12 into a single register. Frees rcx.
__ lea(rdx, Address(rdx, rcx,
UseCompressedOops ? Address::times_4 : Address::times_8,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
__ lea(rdx, element_address);
// Generate subtype check. Blows rcx, rdi
// Superklass in rax. Subklass in rbx.
@ -893,18 +959,19 @@ void TemplateTable::aastore() {
// Come here on success
__ bind(ok_is_subtype);
__ movptr(rax, at_tos()); // Value
__ store_heap_oop(Address(rdx, 0), rax);
__ store_check(rdx);
// Get the value we will store
__ movptr(rax, at_tos());
// Now store using the appropriate barrier
do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
__ jmp(done);
// Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
__ bind(is_null);
__ profile_null_seen(rbx);
__ store_heap_oop(Address(rdx, rcx,
UseCompressedOops ? Address::times_4 : Address::times_8,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
rax);
// Store a NULL
do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
// Pop stack arguments
__ bind(done);
@ -2396,8 +2463,10 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
// atos
__ pop(atos);
if (!is_static) pop_and_check_object(obj);
__ store_heap_oop(field, rax);
__ store_check(obj, field); // Need to mark card
// Store into the field
do_oop_store(_masm, field, rax, _bs->kind(), false);
if (!is_static) {
patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx);
}
@ -2584,8 +2653,7 @@ void TemplateTable::fast_storefield(TosState state) {
// access field
switch (bytecode()) {
case Bytecodes::_fast_aputfield:
__ store_heap_oop(field, rax);
__ store_check(rcx, field);
do_oop_store(_masm, field, rax, _bs->kind(), false);
break;
case Bytecodes::_fast_lputfield:
__ movq(field, rax);
@ -3044,8 +3112,6 @@ void TemplateTable::_new() {
Label initialize_header;
Label initialize_object; // including clearing the fields
Label allocate_shared;
ExternalAddress top((address)Universe::heap()->top_addr());
ExternalAddress end((address)Universe::heap()->end_addr());
__ get_cpool_and_tags(rsi, rax);
// get instanceKlass
@ -3106,6 +3172,9 @@ void TemplateTable::_new() {
if (allow_shared_alloc) {
__ bind(allocate_shared);
ExternalAddress top((address)Universe::heap()->top_addr());
ExternalAddress end((address)Universe::heap()->end_addr());
const Register RtopAddr = rscratch1;
const Register RendAddr = rscratch2;

View File

@ -242,9 +242,11 @@ void VM_Version::get_processor_features() {
_supports_cx8 = supports_cmpxchg8();
// if the OS doesn't support SSE, we can't use this feature even if the HW does
if( !os::supports_sse())
_cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4|CPU_SSE4A);
if (UseSSE < 4)
_cpuFeatures &= ~CPU_SSE4;
_cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);
if (UseSSE < 4) {
_cpuFeatures &= ~CPU_SSE4_1;
_cpuFeatures &= ~CPU_SSE4_2;
}
if (UseSSE < 3) {
_cpuFeatures &= ~CPU_SSE3;
_cpuFeatures &= ~CPU_SSSE3;
@ -261,7 +263,7 @@ void VM_Version::get_processor_features() {
}
char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""),
@ -272,7 +274,8 @@ void VM_Version::get_processor_features() {
(supports_sse2() ? ", sse2" : ""),
(supports_sse3() ? ", sse3" : ""),
(supports_ssse3()? ", ssse3": ""),
(supports_sse4() ? ", sse4" : ""),
(supports_sse4_1() ? ", sse4.1" : ""),
(supports_sse4_2() ? ", sse4.2" : ""),
(supports_mmx_ext() ? ", mmxext" : ""),
(supports_3dnow() ? ", 3dnow" : ""),
(supports_3dnow2() ? ", 3dnowext" : ""),
@ -285,7 +288,7 @@ void VM_Version::get_processor_features() {
// older Pentiums which do not support it.
if( UseSSE > 4 ) UseSSE=4;
if( UseSSE < 0 ) UseSSE=0;
if( !supports_sse4() ) // Drop to 3 if no SSE4 support
if( !supports_sse4_1() ) // Drop to 3 if no SSE4 support
UseSSE = MIN2((intx)3,UseSSE);
if( !supports_sse3() ) // Drop to 2 if no SSE3 support
UseSSE = MIN2((intx)2,UseSSE);
@ -375,6 +378,14 @@ void VM_Version::get_processor_features() {
MaxLoopPad = 11;
}
#endif // COMPILER2
if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
}
if( supports_sse4_2() && supports_ht() ) { // Newest Intel cpus
if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) {
UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
}
}
}
}
@ -413,7 +424,7 @@ void VM_Version::get_processor_features() {
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Logical CPUs per package: %u",
tty->print_cr("Logical CPUs per core: %u",
logical_processors_per_package());
tty->print_cr("UseSSE=%d",UseSSE);
tty->print("Allocation: ");

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,9 +68,9 @@ public:
cmpxchg16: 1,
: 4,
dca : 1,
: 4,
popcnt : 1,
: 8;
sse4_1 : 1,
sse4_2 : 1,
: 11;
} bits;
};
@ -177,8 +177,9 @@ protected:
CPU_SSE2 = (1 << 7),
CPU_SSE3 = (1 << 8), // sse3 comes from cpuid 1 (ECX)
CPU_SSSE3= (1 << 9),
CPU_SSE4 = (1 <<10),
CPU_SSE4A= (1 <<11)
CPU_SSE4A= (1 <<10),
CPU_SSE4_1 = (1 << 11),
CPU_SSE4_2 = (1 << 12)
} cpuFeatureFlags;
// cpuid information block. All info derived from executing cpuid with
@ -240,22 +241,14 @@ protected:
static CpuidInfo _cpuid_info;
// Extractors and predicates
static bool is_extended_cpu_family() {
const uint32_t Extended_Cpu_Family = 0xf;
return _cpuid_info.std_cpuid1_rax.bits.family == Extended_Cpu_Family;
}
static uint32_t extended_cpu_family() {
uint32_t result = _cpuid_info.std_cpuid1_rax.bits.family;
if (is_extended_cpu_family()) {
result += _cpuid_info.std_cpuid1_rax.bits.ext_family;
}
result += _cpuid_info.std_cpuid1_rax.bits.ext_family;
return result;
}
static uint32_t extended_cpu_model() {
uint32_t result = _cpuid_info.std_cpuid1_rax.bits.model;
if (is_extended_cpu_family()) {
result |= _cpuid_info.std_cpuid1_rax.bits.ext_model << 4;
}
result |= _cpuid_info.std_cpuid1_rax.bits.ext_model << 4;
return result;
}
static uint32_t cpu_stepping() {
@ -293,6 +286,10 @@ protected:
result |= CPU_SSSE3;
if (is_amd() && _cpuid_info.ext_cpuid1_rcx.bits.sse4a != 0)
result |= CPU_SSE4A;
if (_cpuid_info.std_cpuid1_rcx.bits.sse4_1 != 0)
result |= CPU_SSE4_1;
if (_cpuid_info.std_cpuid1_rcx.bits.sse4_2 != 0)
result |= CPU_SSE4_2;
return result;
}
@ -380,7 +377,8 @@ public:
static bool supports_sse2() { return (_cpuFeatures & CPU_SSE2) != 0; }
static bool supports_sse3() { return (_cpuFeatures & CPU_SSE3) != 0; }
static bool supports_ssse3() { return (_cpuFeatures & CPU_SSSE3)!= 0; }
static bool supports_sse4() { return (_cpuFeatures & CPU_SSE4) != 0; }
static bool supports_sse4_1() { return (_cpuFeatures & CPU_SSE4_1) != 0; }
static bool supports_sse4_2() { return (_cpuFeatures & CPU_SSE4_2) != 0; }
//
// AMD features
//

View File

@ -186,8 +186,10 @@ void VM_Version::get_processor_features() {
if (!VM_Version::supports_sse2()) {
vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
}
if (UseSSE < 4)
_cpuFeatures &= ~CPU_SSE4;
if (UseSSE < 4) {
_cpuFeatures &= ~CPU_SSE4_1;
_cpuFeatures &= ~CPU_SSE4_2;
}
if (UseSSE < 3) {
_cpuFeatures &= ~CPU_SSE3;
_cpuFeatures &= ~CPU_SSSE3;
@ -204,7 +206,7 @@ void VM_Version::get_processor_features() {
}
char buf[256];
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""),
@ -215,7 +217,8 @@ void VM_Version::get_processor_features() {
(supports_sse2() ? ", sse2" : ""),
(supports_sse3() ? ", sse3" : ""),
(supports_ssse3()? ", ssse3": ""),
(supports_sse4() ? ", sse4" : ""),
(supports_sse4_1() ? ", sse4.1" : ""),
(supports_sse4_2() ? ", sse4.2" : ""),
(supports_mmx_ext() ? ", mmxext" : ""),
(supports_3dnow() ? ", 3dnow" : ""),
(supports_3dnow2() ? ", 3dnowext" : ""),
@ -228,7 +231,7 @@ void VM_Version::get_processor_features() {
// older Pentiums which do not support it.
if( UseSSE > 4 ) UseSSE=4;
if( UseSSE < 0 ) UseSSE=0;
if( !supports_sse4() ) // Drop to 3 if no SSE4 support
if( !supports_sse4_1() ) // Drop to 3 if no SSE4 support
UseSSE = MIN2((intx)3,UseSSE);
if( !supports_sse3() ) // Drop to 2 if no SSE3 support
UseSSE = MIN2((intx)2,UseSSE);
@ -314,6 +317,14 @@ void VM_Version::get_processor_features() {
MaxLoopPad = 11;
}
#endif // COMPILER2
if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
}
if( supports_sse4_2() && supports_ht() ) { // Newest Intel cpus
if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) {
UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
}
}
}
}
@ -355,7 +366,7 @@ void VM_Version::get_processor_features() {
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Logical CPUs per package: %u",
tty->print_cr("Logical CPUs per core: %u",
logical_processors_per_package());
tty->print_cr("UseSSE=%d",UseSSE);
tty->print("Allocation: ");

View File

@ -1,5 +1,5 @@
/*
* Copyright 2003-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,9 +68,9 @@ public:
cmpxchg16: 1,
: 4,
dca : 1,
: 4,
popcnt : 1,
: 8;
sse4_1 : 1,
sse4_2 : 1,
: 11;
} bits;
};
@ -177,8 +177,9 @@ protected:
CPU_SSE2 = (1 << 7),
CPU_SSE3 = (1 << 8),
CPU_SSSE3= (1 << 9),
CPU_SSE4 = (1 <<10),
CPU_SSE4A= (1 <<11)
CPU_SSE4A= (1 <<10),
CPU_SSE4_1 = (1 << 11),
CPU_SSE4_2 = (1 << 12)
} cpuFeatureFlags;
// cpuid information block. All info derived from executing cpuid with
@ -240,22 +241,14 @@ protected:
static CpuidInfo _cpuid_info;
// Extractors and predicates
static bool is_extended_cpu_family() {
const uint32_t Extended_Cpu_Family = 0xf;
return _cpuid_info.std_cpuid1_eax.bits.family == Extended_Cpu_Family;
}
static uint32_t extended_cpu_family() {
uint32_t result = _cpuid_info.std_cpuid1_eax.bits.family;
if (is_extended_cpu_family()) {
result += _cpuid_info.std_cpuid1_eax.bits.ext_family;
}
result += _cpuid_info.std_cpuid1_eax.bits.ext_family;
return result;
}
static uint32_t extended_cpu_model() {
uint32_t result = _cpuid_info.std_cpuid1_eax.bits.model;
if (is_extended_cpu_family()) {
result |= _cpuid_info.std_cpuid1_eax.bits.ext_model << 4;
}
result |= _cpuid_info.std_cpuid1_eax.bits.ext_model << 4;
return result;
}
static uint32_t cpu_stepping() {
@ -293,6 +286,10 @@ protected:
result |= CPU_SSSE3;
if (is_amd() && _cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0)
result |= CPU_SSE4A;
if (_cpuid_info.std_cpuid1_ecx.bits.sse4_1 != 0)
result |= CPU_SSE4_1;
if (_cpuid_info.std_cpuid1_ecx.bits.sse4_2 != 0)
result |= CPU_SSE4_2;
return result;
}
@ -380,7 +377,8 @@ public:
static bool supports_sse2() { return (_cpuFeatures & CPU_SSE2) != 0; }
static bool supports_sse3() { return (_cpuFeatures & CPU_SSE3) != 0; }
static bool supports_ssse3() { return (_cpuFeatures & CPU_SSSE3)!= 0; }
static bool supports_sse4() { return (_cpuFeatures & CPU_SSE4) != 0; }
static bool supports_sse4_1() { return (_cpuFeatures & CPU_SSE4_1) != 0; }
static bool supports_sse4_2() { return (_cpuFeatures & CPU_SSE4_2) != 0; }
//
// AMD features
//

View File

@ -495,8 +495,8 @@ void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
Compile* C = ra_->C;
if( C->in_24_bit_fp_mode() ) {
tty->print("FLDCW 24 bit fpu control word");
tty->print_cr(""); tty->print("\t");
st->print("FLDCW 24 bit fpu control word");
st->print_cr(""); st->print("\t");
}
int framesize = C->frame_slots() << LogBytesPerInt;
@ -510,22 +510,22 @@ void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
// stack. But the stack safety zone should account for that.
// See bugs 4446381, 4468289, 4497237.
if (C->need_stack_bang(framesize)) {
tty->print_cr("# stack bang"); tty->print("\t");
st->print_cr("# stack bang"); st->print("\t");
}
tty->print_cr("PUSHL EBP"); tty->print("\t");
st->print_cr("PUSHL EBP"); st->print("\t");
if( VerifyStackAtCalls ) { // Majik cookie to verify stack depth
tty->print("PUSH 0xBADB100D\t# Majik cookie for stack depth check");
tty->print_cr(""); tty->print("\t");
st->print("PUSH 0xBADB100D\t# Majik cookie for stack depth check");
st->print_cr(""); st->print("\t");
framesize -= wordSize;
}
if ((C->in_24_bit_fp_mode() || VerifyStackAtCalls ) && framesize < 128 ) {
if (framesize) {
tty->print("SUB ESP,%d\t# Create frame",framesize);
st->print("SUB ESP,%d\t# Create frame",framesize);
}
} else {
tty->print("SUB ESP,%d\t# Create frame",framesize);
st->print("SUB ESP,%d\t# Create frame",framesize);
}
}
#endif
@ -725,18 +725,19 @@ static enum RC rc_class( OptoReg::Name reg ) {
return rc_xmm;
}
static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size ) {
static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset, int reg,
int opcode, const char *op_str, int size, outputStream* st ) {
if( cbuf ) {
emit_opcode (*cbuf, opcode );
encode_RegMem(*cbuf, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, false);
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) tty->print("\n\t");
if( size != 0 ) st->print("\n\t");
if( opcode == 0x8B || opcode == 0x89 ) { // MOV
if( is_load ) tty->print("%s %s,[ESP + #%d]",op_str,Matcher::regName[reg],offset);
else tty->print("%s [ESP + #%d],%s",op_str,offset,Matcher::regName[reg]);
if( is_load ) st->print("%s %s,[ESP + #%d]",op_str,Matcher::regName[reg],offset);
else st->print("%s [ESP + #%d],%s",op_str,offset,Matcher::regName[reg]);
} else { // FLD, FST, PUSH, POP
tty->print("%s [ESP + #%d]",op_str,offset);
st->print("%s [ESP + #%d]",op_str,offset);
}
#endif
}
@ -746,7 +747,7 @@ static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset
// Helper for XMM registers. Extra opcode bits, limited syntax.
static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
int offset, int reg_lo, int reg_hi, int size ) {
int offset, int reg_lo, int reg_hi, int size, outputStream* st ) {
if( cbuf ) {
if( reg_lo+1 == reg_hi ) { // double move?
if( is_load && !UseXmmLoadAndClearUpper )
@ -764,17 +765,17 @@ static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
encode_RegMem(*cbuf, Matcher::_regEncode[reg_lo], ESP_enc, 0x4, 0, offset, false);
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) tty->print("\n\t");
if( size != 0 ) st->print("\n\t");
if( reg_lo+1 == reg_hi ) { // double move?
if( is_load ) tty->print("%s %s,[ESP + #%d]",
if( is_load ) st->print("%s %s,[ESP + #%d]",
UseXmmLoadAndClearUpper ? "MOVSD " : "MOVLPD",
Matcher::regName[reg_lo], offset);
else tty->print("MOVSD [ESP + #%d],%s",
else st->print("MOVSD [ESP + #%d],%s",
offset, Matcher::regName[reg_lo]);
} else {
if( is_load ) tty->print("MOVSS %s,[ESP + #%d]",
if( is_load ) st->print("MOVSS %s,[ESP + #%d]",
Matcher::regName[reg_lo], offset);
else tty->print("MOVSS [ESP + #%d],%s",
else st->print("MOVSS [ESP + #%d],%s",
offset, Matcher::regName[reg_lo]);
}
#endif
@ -785,7 +786,7 @@ static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
int src_hi, int dst_hi, int size ) {
int src_hi, int dst_hi, int size, outputStream* st ) {
if( UseXmmRegToRegMoveAll ) {//Use movaps,movapd to move between xmm registers
if( cbuf ) {
if( (src_lo+1 == src_hi && dst_lo+1 == dst_hi) ) {
@ -796,11 +797,11 @@ static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst
emit_rm (*cbuf, 0x3, Matcher::_regEncode[dst_lo], Matcher::_regEncode[src_lo] );
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) tty->print("\n\t");
if( size != 0 ) st->print("\n\t");
if( src_lo+1 == src_hi && dst_lo+1 == dst_hi ) { // double move?
tty->print("MOVAPD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
st->print("MOVAPD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
} else {
tty->print("MOVAPS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
st->print("MOVAPS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
}
#endif
}
@ -813,11 +814,11 @@ static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst
emit_rm (*cbuf, 0x3, Matcher::_regEncode[dst_lo], Matcher::_regEncode[src_lo] );
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) tty->print("\n\t");
if( size != 0 ) st->print("\n\t");
if( src_lo+1 == src_hi && dst_lo+1 == dst_hi ) { // double move?
tty->print("MOVSD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
st->print("MOVSD %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
} else {
tty->print("MOVSS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
st->print("MOVSS %s,%s",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
}
#endif
}
@ -825,28 +826,29 @@ static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst
}
}
static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size ) {
static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size, outputStream* st ) {
if( cbuf ) {
emit_opcode(*cbuf, 0x8B );
emit_rm (*cbuf, 0x3, Matcher::_regEncode[dst], Matcher::_regEncode[src] );
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) tty->print("\n\t");
tty->print("MOV %s,%s",Matcher::regName[dst],Matcher::regName[src]);
if( size != 0 ) st->print("\n\t");
st->print("MOV %s,%s",Matcher::regName[dst],Matcher::regName[src]);
#endif
}
return size+2;
}
static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi, int offset, int size ) {
static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi,
int offset, int size, outputStream* st ) {
if( src_lo != FPR1L_num ) { // Move value to top of FP stack, if not already there
if( cbuf ) {
emit_opcode( *cbuf, 0xD9 ); // FLD (i.e., push it)
emit_d8( *cbuf, 0xC0-1+Matcher::_regEncode[src_lo] );
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) tty->print("\n\t");
tty->print("FLD %s",Matcher::regName[src_lo]);
if( size != 0 ) st->print("\n\t");
st->print("FLD %s",Matcher::regName[src_lo]);
#endif
}
size += 2;
@ -864,7 +866,7 @@ static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int
assert( !OptoReg::is_valid(src_hi) && !OptoReg::is_valid(dst_hi), "no non-adjacent float-stores" );
}
return impl_helper(cbuf,do_size,false,offset,st_op,op,op_str,size);
return impl_helper(cbuf,do_size,false,offset,st_op,op,op_str,size, st);
}
uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
@ -892,16 +894,16 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
if( src_second == dst_first ) { // overlapping stack copy ranges
assert( src_second_rc == rc_stack && dst_second_rc == rc_stack, "we only expect a stk-stk copy here" );
size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size);
size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size);
size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st);
size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st);
src_second_rc = dst_second_rc = rc_bad; // flag as already moved the second bits
}
// move low bits
size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),ESI_num,0xFF,"PUSH ",size);
size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),EAX_num,0x8F,"POP ",size);
size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),ESI_num,0xFF,"PUSH ",size, st);
size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),EAX_num,0x8F,"POP ",size, st);
if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { // mov second bits
size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size);
size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size);
size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st);
size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st);
}
return size;
}
@ -909,15 +911,15 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
// --------------------------------------
// Check for integer reg-reg copy
if( src_first_rc == rc_int && dst_first_rc == rc_int )
size = impl_mov_helper(cbuf,do_size,src_first,dst_first,size);
size = impl_mov_helper(cbuf,do_size,src_first,dst_first,size, st);
// Check for integer store
if( src_first_rc == rc_int && dst_first_rc == rc_stack )
size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first,0x89,"MOV ",size);
size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first,0x89,"MOV ",size, st);
// Check for integer load
if( dst_first_rc == rc_int && src_first_rc == rc_stack )
size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size);
size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size, st);
// --------------------------------------
// Check for float reg-reg copy
@ -951,7 +953,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
// Check for float store
if( src_first_rc == rc_float && dst_first_rc == rc_stack ) {
return impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,ra_->reg2offset(dst_first),size);
return impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,ra_->reg2offset(dst_first),size, st);
}
// Check for float load
@ -987,17 +989,17 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad) ||
(src_first+1 == src_second && dst_first+1 == dst_second),
"no non-adjacent float-moves" );
return impl_movx_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size);
return impl_movx_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
}
// Check for xmm store
if( src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
return impl_x_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first, src_second, size);
return impl_x_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first, src_second, size, st);
}
// Check for float xmm load
if( dst_first_rc == rc_xmm && src_first_rc == rc_stack ) {
return impl_x_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first, dst_second, size);
return impl_x_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first, dst_second, size, st);
}
// Copy from float reg to xmm reg
@ -1017,10 +1019,10 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
}
size += 4;
size = impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,0,size);
size = impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,0,size, st);
// Copy from the temp memory to the xmm reg.
size = impl_x_helper(cbuf,do_size,true ,0,dst_first, dst_second, size);
size = impl_x_helper(cbuf,do_size,true ,0,dst_first, dst_second, size, st);
if( cbuf ) {
emit_opcode(*cbuf,0x8D); // LEA ESP,[ESP+8]
@ -1047,15 +1049,15 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
// Check for second word int-int move
if( src_second_rc == rc_int && dst_second_rc == rc_int )
return impl_mov_helper(cbuf,do_size,src_second,dst_second,size);
return impl_mov_helper(cbuf,do_size,src_second,dst_second,size, st);
// Check for second word integer store
if( src_second_rc == rc_int && dst_second_rc == rc_stack )
return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size);
return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st);
// Check for second word integer load
if( dst_second_rc == rc_int && src_second_rc == rc_stack )
return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size);
return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st);
Unimplemented();
@ -1318,7 +1320,11 @@ const uint Matcher::vector_ideal_reg(void) {
//
// NOTE: If the platform does not provide any short branch variants, then
// this method should return false for offset 0.
bool Matcher::is_short_branch_offset(int offset) {
bool Matcher::is_short_branch_offset(int rule, int offset) {
// the short version of jmpConUCF2 contains multiple branches,
// making the reach slightly less
if (rule == jmpConUCF2_rule)
return (-126 <= offset && offset <= 125);
return (-128 <= offset && offset <= 127);
}
@ -3307,7 +3313,7 @@ encode %{
// Beware -- there's a subtle invariant that fetch of the markword
// at [FETCH], below, will never observe a biased encoding (*101b).
// If this invariant is not held we risk exclusion (safety) failure.
if (UseBiasedLocking) {
if (UseBiasedLocking && !UseOptoBiasInlining) {
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
}
@ -3528,7 +3534,7 @@ encode %{
// Critically, the biased locking test must have precedence over
// and appear before the (box->dhw == 0) recursive stack-lock test.
if (UseBiasedLocking) {
if (UseBiasedLocking && !UseOptoBiasInlining) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
}
@ -4810,6 +4816,16 @@ operand immL0() %{
interface(CONST_INTER);
%}
// Long Immediate zero
operand immL_M1() %{
predicate( n->get_long() == -1L );
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Long immediate from 0 to 127.
// Used for a shorter form of long mul by 10.
operand immL_127() %{
@ -5262,6 +5278,15 @@ operand eFlagsRegU() %{
interface(REG_INTER);
%}
operand eFlagsRegUCF() %{
constraint(ALLOC_IN_RC(int_flags));
match(RegFlags);
predicate(false);
format %{ "EFLAGS_U_CF" %}
interface(REG_INTER);
%}
// Condition Code Register used by long compare
operand flagsReg_long_LTGE() %{
constraint(ALLOC_IN_RC(int_flags));
@ -5739,12 +5764,12 @@ operand cmpOp() %{
format %{ "" %}
interface(COND_INTER) %{
equal(0x4);
not_equal(0x5);
less(0xC);
greater_equal(0xD);
less_equal(0xE);
greater(0xF);
equal(0x4, "e");
not_equal(0x5, "ne");
less(0xC, "l");
greater_equal(0xD, "ge");
less_equal(0xE, "le");
greater(0xF, "g");
%}
%}
@ -5756,12 +5781,47 @@ operand cmpOpU() %{
format %{ "" %}
interface(COND_INTER) %{
equal(0x4);
not_equal(0x5);
less(0x2);
greater_equal(0x3);
less_equal(0x6);
greater(0x7);
equal(0x4, "e");
not_equal(0x5, "ne");
less(0x2, "b");
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
%}
%}
// Floating comparisons that don't require any fixup for the unordered case
operand cmpOpUCF() %{
match(Bool);
predicate(n->as_Bool()->_test._test == BoolTest::lt ||
n->as_Bool()->_test._test == BoolTest::ge ||
n->as_Bool()->_test._test == BoolTest::le ||
n->as_Bool()->_test._test == BoolTest::gt);
format %{ "" %}
interface(COND_INTER) %{
equal(0x4, "e");
not_equal(0x5, "ne");
less(0x2, "b");
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
%}
%}
// Floating comparisons that can be fixed up with extra conditional jumps
operand cmpOpUCF2() %{
match(Bool);
predicate(n->as_Bool()->_test._test == BoolTest::ne ||
n->as_Bool()->_test._test == BoolTest::eq);
format %{ "" %}
interface(COND_INTER) %{
equal(0x4, "e");
not_equal(0x5, "ne");
less(0x2, "b");
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
%}
%}
@ -5786,12 +5846,12 @@ operand cmpOp_commute() %{
format %{ "" %}
interface(COND_INTER) %{
equal(0x4);
not_equal(0x5);
less(0xF);
greater_equal(0xE);
less_equal(0xD);
greater(0xC);
equal(0x4, "e");
not_equal(0x5, "ne");
less(0xF, "g");
greater_equal(0xE, "le");
less_equal(0xD, "ge");
greater(0xC, "l");
%}
%}
@ -7347,7 +7407,7 @@ instruct cmovI_reg(eRegI dst, eRegI src, eFlagsReg cr, cmpOp cop ) %{
ins_pipe( pipe_cmov_reg );
%}
instruct cmovI_regU( eRegI dst, eRegI src, eFlagsRegU cr, cmpOpU cop ) %{
instruct cmovI_regU( cmpOpU cop, eFlagsRegU cr, eRegI dst, eRegI src ) %{
predicate(VM_Version::supports_cmov() );
match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
ins_cost(200);
@ -7357,6 +7417,15 @@ instruct cmovI_regU( eRegI dst, eRegI src, eFlagsRegU cr, cmpOpU cop ) %{
ins_pipe( pipe_cmov_reg );
%}
instruct cmovI_regUCF( cmpOpUCF cop, eFlagsRegUCF cr, eRegI dst, eRegI src ) %{
predicate(VM_Version::supports_cmov() );
match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
ins_cost(200);
expand %{
cmovI_regU(cop, cr, dst, src);
%}
%}
// Conditional move
instruct cmovI_mem(cmpOp cop, eFlagsReg cr, eRegI dst, memory src) %{
predicate(VM_Version::supports_cmov() );
@ -7369,7 +7438,7 @@ instruct cmovI_mem(cmpOp cop, eFlagsReg cr, eRegI dst, memory src) %{
%}
// Conditional move
instruct cmovI_memu(cmpOpU cop, eFlagsRegU cr, eRegI dst, memory src) %{
instruct cmovI_memU(cmpOpU cop, eFlagsRegU cr, eRegI dst, memory src) %{
predicate(VM_Version::supports_cmov() );
match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
ins_cost(250);
@ -7379,6 +7448,15 @@ instruct cmovI_memu(cmpOpU cop, eFlagsRegU cr, eRegI dst, memory src) %{
ins_pipe( pipe_cmov_mem );
%}
instruct cmovI_memUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegI dst, memory src) %{
predicate(VM_Version::supports_cmov() );
match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
ins_cost(250);
expand %{
cmovI_memU(cop, cr, dst, src);
%}
%}
// Conditional move
instruct cmovP_reg(eRegP dst, eRegP src, eFlagsReg cr, cmpOp cop ) %{
predicate(VM_Version::supports_cmov() );
@ -7406,7 +7484,7 @@ instruct cmovP_reg_nonP6(eRegP dst, eRegP src, eFlagsReg cr, cmpOp cop ) %{
%}
// Conditional move
instruct cmovP_regU(eRegP dst, eRegP src, eFlagsRegU cr, cmpOpU cop ) %{
instruct cmovP_regU(cmpOpU cop, eFlagsRegU cr, eRegP dst, eRegP src ) %{
predicate(VM_Version::supports_cmov() );
match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
ins_cost(200);
@ -7416,6 +7494,15 @@ instruct cmovP_regU(eRegP dst, eRegP src, eFlagsRegU cr, cmpOpU cop ) %{
ins_pipe( pipe_cmov_reg );
%}
instruct cmovP_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegP dst, eRegP src ) %{
predicate(VM_Version::supports_cmov() );
match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
ins_cost(200);
expand %{
cmovP_regU(cop, cr, dst, src);
%}
%}
// DISABLED: Requires the ADLC to emit a bottom_type call that
// correctly meets the two pointer arguments; one is an incoming
// register but the other is a memory operand. ALSO appears to
@ -7545,6 +7632,15 @@ instruct fcmovX_regU(cmpOpU cop, eFlagsRegU cr, regX dst, regX src) %{
ins_pipe( pipe_slow );
%}
instruct fcmovX_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, regX dst, regX src) %{
predicate (UseSSE>=1);
match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
ins_cost(200);
expand %{
fcmovX_regU(cop, cr, dst, src);
%}
%}
// unsigned version
instruct fcmovXD_regU(cmpOpU cop, eFlagsRegU cr, regXD dst, regXD src) %{
predicate (UseSSE>=2);
@ -7563,6 +7659,15 @@ instruct fcmovXD_regU(cmpOpU cop, eFlagsRegU cr, regXD dst, regXD src) %{
ins_pipe( pipe_slow );
%}
instruct fcmovXD_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, regXD dst, regXD src) %{
predicate (UseSSE>=2);
match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
ins_cost(200);
expand %{
fcmovXD_regU(cop, cr, dst, src);
%}
%}
instruct cmovL_reg(cmpOp cop, eFlagsReg cr, eRegL dst, eRegL src) %{
predicate(VM_Version::supports_cmov() );
match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
@ -7585,6 +7690,15 @@ instruct cmovL_regU(cmpOpU cop, eFlagsRegU cr, eRegL dst, eRegL src) %{
ins_pipe( pipe_cmov_reg_long );
%}
instruct cmovL_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegL dst, eRegL src) %{
predicate(VM_Version::supports_cmov() );
match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
ins_cost(200);
expand %{
cmovL_regU(cop, cr, dst, src);
%}
%}
//----------Arithmetic Instructions--------------------------------------------
//----------Addition Instructions----------------------------------------------
// Integer Addition Instructions
@ -7816,33 +7930,36 @@ instruct storePConditional( memory heap_top_ptr, eAXRegP oldval, eRegP newval, e
ins_pipe( pipe_cmpxchg );
%}
// Conditional-store of a long value
// Returns a boolean value (0/1) on success. Implemented with a CMPXCHG8 on Intel.
// mem_ptr can actually be in either ESI or EDI
instruct storeLConditional( eRegI res, eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
match(Set res (StoreLConditional mem_ptr (Binary oldval newval)));
effect(KILL cr);
// EDX:EAX is killed if there is contention, but then it's also unused.
// In the common case of no contention, EDX:EAX holds the new oop address.
format %{ "CMPXCHG8 [$mem_ptr],$newval\t# If EDX:EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t"
"MOV $res,0\n\t"
"JNE,s fail\n\t"
"MOV $res,1\n"
"fail:" %}
ins_encode( enc_cmpxchg8(mem_ptr),
enc_flags_ne_to_boolean(res) );
// Conditional-store of an int value.
// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
instruct storeIConditional( memory mem, eAXRegI oldval, eRegI newval, eFlagsReg cr ) %{
match(Set cr (StoreIConditional mem (Binary oldval newval)));
effect(KILL oldval);
format %{ "CMPXCHG $mem,$newval\t# If EAX==$mem Then store $newval into $mem" %}
ins_encode( lock_prefix, Opcode(0x0F), Opcode(0xB1), RegMem(newval, mem) );
ins_pipe( pipe_cmpxchg );
%}
// Conditional-store of a long value
// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG8 on Intel.
// mem_ptr can actually be in either ESI or EDI
instruct storeLConditional_flags( eSIRegP mem_ptr, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr, immI0 zero ) %{
match(Set cr (CmpI (StoreLConditional mem_ptr (Binary oldval newval)) zero));
// EDX:EAX is killed if there is contention, but then it's also unused.
// In the common case of no contention, EDX:EAX holds the new oop address.
format %{ "CMPXCHG8 [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" %}
ins_encode( enc_cmpxchg8(mem_ptr) );
// Conditional-store of a long value.
// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG8 on Intel.
instruct storeLConditional( memory mem, eADXRegL oldval, eBCXRegL newval, eFlagsReg cr ) %{
match(Set cr (StoreLConditional mem (Binary oldval newval)));
effect(KILL oldval);
format %{ "XCHG EBX,ECX\t# correct order for CMPXCHG8 instruction\n\t"
"CMPXCHG8 $mem,ECX:EBX\t# If EDX:EAX==$mem Then store ECX:EBX into $mem\n\t"
"XCHG EBX,ECX"
%}
ins_encode %{
// Note: we need to swap rbx, and rcx before and after the
// cmpxchg8 instruction because the instruction uses
// rcx as the high order word of the new value to store but
// our register encoding uses rbx.
__ xchgl(as_Register(EBX_enc), as_Register(ECX_enc));
if( os::is_MP() )
__ lock();
__ cmpxchg8(Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp));
__ xchgl(as_Register(EBX_enc), as_Register(ECX_enc));
%}
ins_pipe( pipe_cmpxchg );
%}
@ -8309,6 +8426,7 @@ instruct shrI_eReg_imm(eRegI dst, immI8 shift, eFlagsReg cr) %{
ins_pipe( ialu_reg );
%}
// Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
// This idiom is used by the compiler for the i2b bytecode.
instruct i2b(eRegI dst, xRegI src, immI_24 twentyfour, eFlagsReg cr) %{
@ -8426,6 +8544,18 @@ instruct orI_eReg(eRegI dst, eRegI src, eFlagsReg cr) %{
ins_pipe( ialu_reg_reg );
%}
instruct orI_eReg_castP2X(eRegI dst, eRegP src, eFlagsReg cr) %{
match(Set dst (OrI dst (CastP2X src)));
effect(KILL cr);
size(2);
format %{ "OR $dst,$src" %}
opcode(0x0B);
ins_encode( OpcP, RegReg( dst, src) );
ins_pipe( ialu_reg_reg );
%}
// Or Register with Immediate
instruct orI_eReg_imm(eRegI dst, immI src, eFlagsReg cr) %{
match(Set dst (OrI dst src));
@ -8621,6 +8751,18 @@ instruct xorI_eReg(eRegI dst, eRegI src, eFlagsReg cr) %{
ins_pipe( ialu_reg_reg );
%}
// Xor Register with Immediate -1
instruct xorI_eReg_im1(eRegI dst, immI_M1 imm) %{
match(Set dst (XorI dst imm));
size(2);
format %{ "NOT $dst" %}
ins_encode %{
__ notl($dst$$Register);
%}
ins_pipe( ialu_reg );
%}
// Xor Register with Immediate
instruct xorI_eReg_imm(eRegI dst, immI src, eFlagsReg cr) %{
match(Set dst (XorI dst src));
@ -8938,6 +9080,18 @@ instruct xorl_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
ins_pipe( ialu_reg_reg_long );
%}
// Xor Long Register with Immediate -1
instruct xorl_eReg_im1(eRegL dst, immL_M1 imm) %{
match(Set dst (XorL dst imm));
format %{ "NOT $dst.lo\n\t"
"NOT $dst.hi" %}
ins_encode %{
__ notl($dst$$Register);
__ notl(HIGH_FROM_LOW($dst$$Register));
%}
ins_pipe( ialu_reg_long );
%}
// Xor Long Register with Immediate
instruct xorl_eReg_imm(eRegL dst, immL src, eFlagsReg cr) %{
match(Set dst (XorL dst src));
@ -9166,6 +9320,18 @@ instruct cmpD_cc_P6(eFlagsRegU cr, regD src1, regD src2, eAXRegI rax) %{
ins_pipe( pipe_slow );
%}
instruct cmpD_cc_P6CF(eFlagsRegUCF cr, regD src1, regD src2) %{
predicate(VM_Version::supports_cmov() && UseSSE <=1);
match(Set cr (CmpD src1 src2));
ins_cost(150);
format %{ "FLD $src1\n\t"
"FUCOMIP ST,$src2 // P6 instruction" %}
opcode(0xDF, 0x05); /* DF E8+i or DF /5 */
ins_encode( Push_Reg_D(src1),
OpcP, RegOpc(src2));
ins_pipe( pipe_slow );
%}
// Compare & branch
instruct cmpD_cc(eFlagsRegU cr, regD src1, regD src2, eAXRegI rax) %{
predicate(UseSSE<=1);
@ -9230,6 +9396,16 @@ instruct cmpXD_cc(eFlagsRegU cr, regXD dst, regXD src, eAXRegI rax) %{
ins_pipe( pipe_slow );
%}
instruct cmpXD_ccCF(eFlagsRegUCF cr, regXD dst, regXD src) %{
predicate(UseSSE>=2);
match(Set cr (CmpD dst src));
ins_cost(100);
format %{ "COMISD $dst,$src" %}
opcode(0x66, 0x0F, 0x2F);
ins_encode(OpcP, OpcS, Opcode(tertiary), RegReg(dst, src));
ins_pipe( pipe_slow );
%}
// float compare and set condition codes in EFLAGS by XMM regs
instruct cmpXD_ccmem(eFlagsRegU cr, regXD dst, memory src, eAXRegI rax) %{
predicate(UseSSE>=2);
@ -9246,6 +9422,16 @@ instruct cmpXD_ccmem(eFlagsRegU cr, regXD dst, memory src, eAXRegI rax) %{
ins_pipe( pipe_slow );
%}
instruct cmpXD_ccmemCF(eFlagsRegUCF cr, regXD dst, memory src) %{
predicate(UseSSE>=2);
match(Set cr (CmpD dst (LoadD src)));
ins_cost(100);
format %{ "COMISD $dst,$src" %}
opcode(0x66, 0x0F, 0x2F);
ins_encode(OpcP, OpcS, Opcode(tertiary), RegMem(dst, src));
ins_pipe( pipe_slow );
%}
// Compare into -1,0,1 in XMM
instruct cmpXD_reg(eRegI dst, regXD src1, regXD src2, eFlagsReg cr) %{
predicate(UseSSE>=2);
@ -10133,6 +10319,18 @@ instruct cmpF_cc_P6(eFlagsRegU cr, regF src1, regF src2, eAXRegI rax) %{
ins_pipe( pipe_slow );
%}
instruct cmpF_cc_P6CF(eFlagsRegUCF cr, regF src1, regF src2) %{
predicate(VM_Version::supports_cmov() && UseSSE == 0);
match(Set cr (CmpF src1 src2));
ins_cost(100);
format %{ "FLD $src1\n\t"
"FUCOMIP ST,$src2 // P6 instruction" %}
opcode(0xDF, 0x05); /* DF E8+i or DF /5 */
ins_encode( Push_Reg_D(src1),
OpcP, RegOpc(src2));
ins_pipe( pipe_slow );
%}
// Compare & branch
instruct cmpF_cc(eFlagsRegU cr, regF src1, regF src2, eAXRegI rax) %{
@ -10198,6 +10396,16 @@ instruct cmpX_cc(eFlagsRegU cr, regX dst, regX src, eAXRegI rax) %{
ins_pipe( pipe_slow );
%}
instruct cmpX_ccCF(eFlagsRegUCF cr, regX dst, regX src) %{
predicate(UseSSE>=1);
match(Set cr (CmpF dst src));
ins_cost(100);
format %{ "COMISS $dst,$src" %}
opcode(0x0F, 0x2F);
ins_encode(OpcP, OpcS, RegReg(dst, src));
ins_pipe( pipe_slow );
%}
// float compare and set condition codes in EFLAGS by XMM regs
instruct cmpX_ccmem(eFlagsRegU cr, regX dst, memory src, eAXRegI rax) %{
predicate(UseSSE>=1);
@ -10214,6 +10422,16 @@ instruct cmpX_ccmem(eFlagsRegU cr, regX dst, memory src, eAXRegI rax) %{
ins_pipe( pipe_slow );
%}
instruct cmpX_ccmemCF(eFlagsRegUCF cr, regX dst, memory src) %{
predicate(UseSSE>=1);
match(Set cr (CmpF dst (LoadF src)));
ins_cost(100);
format %{ "COMISS $dst,$src" %}
opcode(0x0F, 0x2F);
ins_encode(OpcP, OpcS, RegMem(dst, src));
ins_pipe( pipe_slow );
%}
// Compare into -1,0,1 in XMM
instruct cmpX_reg(eRegI dst, regX src1, regX src2, eFlagsReg cr) %{
predicate(UseSSE>=1);
@ -12065,6 +12283,19 @@ instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
ins_pc_relative(1);
%}
instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
match(CountedLoopEnd cop cmp);
effect(USE labl);
ins_cost(200);
format %{ "J$cop,u $labl\t# Loop end" %}
size(6);
opcode(0x0F, 0x80);
ins_encode( Jcc( cop, labl) );
ins_pipe( pipe_jcc );
ins_pc_relative(1);
%}
// Jump Direct Conditional - using unsigned comparison
instruct jmpConU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
match(If cop cmp);
@ -12074,8 +12305,63 @@ instruct jmpConU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
format %{ "J$cop,u $labl" %}
size(6);
opcode(0x0F, 0x80);
ins_encode( Jcc( cop, labl) );
ins_pipe( pipe_jcc );
ins_encode(Jcc(cop, labl));
ins_pipe(pipe_jcc);
ins_pc_relative(1);
%}
instruct jmpConUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
match(If cop cmp);
effect(USE labl);
ins_cost(200);
format %{ "J$cop,u $labl" %}
size(6);
opcode(0x0F, 0x80);
ins_encode(Jcc(cop, labl));
ins_pipe(pipe_jcc);
ins_pc_relative(1);
%}
instruct jmpConUCF2(cmpOpUCF2 cop, eFlagsRegUCF cmp, label labl) %{
match(If cop cmp);
effect(USE labl);
ins_cost(200);
format %{ $$template
if ($cop$$cmpcode == Assembler::notEqual) {
$$emit$$"JP,u $labl\n\t"
$$emit$$"J$cop,u $labl"
} else {
$$emit$$"JP,u done\n\t"
$$emit$$"J$cop,u $labl\n\t"
$$emit$$"done:"
}
%}
size(12);
opcode(0x0F, 0x80);
ins_encode %{
Label* l = $labl$$label;
$$$emit8$primary;
emit_cc(cbuf, $secondary, Assembler::parity);
int parity_disp = -1;
bool ok = false;
if ($cop$$cmpcode == Assembler::notEqual) {
// the two jumps 6 bytes apart so the jump distances are too
parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
} else if ($cop$$cmpcode == Assembler::equal) {
parity_disp = 6;
ok = true;
} else {
ShouldNotReachHere();
}
emit_d32(cbuf, parity_disp);
$$$emit8$primary;
emit_cc(cbuf, $secondary, $cop$$cmpcode);
int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
emit_d32(cbuf, disp);
%}
ins_pipe(pipe_jcc);
ins_pc_relative(1);
%}
@ -12174,7 +12460,7 @@ instruct jmpLoopEnd_short(cmpOp cop, eFlagsReg cr, label labl) %{
effect(USE labl);
ins_cost(300);
format %{ "J$cop,s $labl" %}
format %{ "J$cop,s $labl\t# Loop end" %}
size(2);
opcode(0x70);
ins_encode( JccShort( cop, labl) );
@ -12189,7 +12475,21 @@ instruct jmpLoopEndU_short(cmpOpU cop, eFlagsRegU cmp, label labl) %{
effect(USE labl);
ins_cost(300);
format %{ "J$cop,us $labl" %}
format %{ "J$cop,us $labl\t# Loop end" %}
size(2);
opcode(0x70);
ins_encode( JccShort( cop, labl) );
ins_pipe( pipe_jcc );
ins_pc_relative(1);
ins_short_branch(1);
%}
instruct jmpLoopEndUCF_short(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
match(CountedLoopEnd cop cmp);
effect(USE labl);
ins_cost(300);
format %{ "J$cop,us $labl\t# Loop end" %}
size(2);
opcode(0x70);
ins_encode( JccShort( cop, labl) );
@ -12213,6 +12513,60 @@ instruct jmpConU_short(cmpOpU cop, eFlagsRegU cmp, label labl) %{
ins_short_branch(1);
%}
instruct jmpConUCF_short(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
match(If cop cmp);
effect(USE labl);
ins_cost(300);
format %{ "J$cop,us $labl" %}
size(2);
opcode(0x70);
ins_encode( JccShort( cop, labl) );
ins_pipe( pipe_jcc );
ins_pc_relative(1);
ins_short_branch(1);
%}
instruct jmpConUCF2_short(cmpOpUCF2 cop, eFlagsRegUCF cmp, label labl) %{
match(If cop cmp);
effect(USE labl);
ins_cost(300);
format %{ $$template
if ($cop$$cmpcode == Assembler::notEqual) {
$$emit$$"JP,u,s $labl\n\t"
$$emit$$"J$cop,u,s $labl"
} else {
$$emit$$"JP,u,s done\n\t"
$$emit$$"J$cop,u,s $labl\n\t"
$$emit$$"done:"
}
%}
size(4);
opcode(0x70);
ins_encode %{
Label* l = $labl$$label;
emit_cc(cbuf, $primary, Assembler::parity);
int parity_disp = -1;
if ($cop$$cmpcode == Assembler::notEqual) {
parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
} else if ($cop$$cmpcode == Assembler::equal) {
parity_disp = 2;
} else {
ShouldNotReachHere();
}
emit_d8(cbuf, parity_disp);
emit_cc(cbuf, $primary, $cop$$cmpcode);
int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
emit_d8(cbuf, disp);
assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp");
%}
ins_pipe(pipe_jcc);
ins_pc_relative(1);
ins_short_branch(1);
%}
// ============================================================================
// Long Compare
//

View File

@ -2004,9 +2004,12 @@ const uint Matcher::vector_ideal_reg(void) {
//
// NOTE: If the platform does not provide any short branch variants, then
// this method should return false for offset 0.
bool Matcher::is_short_branch_offset(int offset)
{
return -0x80 <= offset && offset < 0x80;
bool Matcher::is_short_branch_offset(int rule, int offset) {
// the short version of jmpConUCF2 contains multiple branches,
// making the reach slightly less
if (rule == jmpConUCF2_rule)
return (-126 <= offset && offset <= 125);
return (-128 <= offset && offset <= 127);
}
const bool Matcher::isSimpleConstant64(jlong value) {
@ -3569,7 +3572,7 @@ encode %{
// at [FETCH], below, will never observe a biased encoding (*101b).
// If this invariant is not held we'll suffer exclusion (safety) failure.
if (UseBiasedLocking) {
if (UseBiasedLocking && !UseOptoBiasInlining) {
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
}
@ -3657,7 +3660,7 @@ encode %{
} else {
Label DONE_LABEL, Stacked, CheckSucc ;
if (UseBiasedLocking) {
if (UseBiasedLocking && !UseOptoBiasInlining) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
}
@ -5134,6 +5137,15 @@ operand rFlagsRegU()
interface(REG_INTER);
%}
operand rFlagsRegUCF() %{
constraint(ALLOC_IN_RC(int_flags));
match(RegFlags);
predicate(false);
format %{ "RFLAGS_U_CF" %}
interface(REG_INTER);
%}
// Float register operands
operand regF()
%{
@ -5405,12 +5417,12 @@ operand cmpOp()
format %{ "" %}
interface(COND_INTER) %{
equal(0x4);
not_equal(0x5);
less(0xC);
greater_equal(0xD);
less_equal(0xE);
greater(0xF);
equal(0x4, "e");
not_equal(0x5, "ne");
less(0xC, "l");
greater_equal(0xD, "ge");
less_equal(0xE, "le");
greater(0xF, "g");
%}
%}
@ -5423,12 +5435,48 @@ operand cmpOpU()
format %{ "" %}
interface(COND_INTER) %{
equal(0x4);
not_equal(0x5);
less(0x2);
greater_equal(0x3);
less_equal(0x6);
greater(0x7);
equal(0x4, "e");
not_equal(0x5, "ne");
less(0x2, "b");
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
%}
%}
// Floating comparisons that don't require any fixup for the unordered case
operand cmpOpUCF() %{
match(Bool);
predicate(n->as_Bool()->_test._test == BoolTest::lt ||
n->as_Bool()->_test._test == BoolTest::ge ||
n->as_Bool()->_test._test == BoolTest::le ||
n->as_Bool()->_test._test == BoolTest::gt);
format %{ "" %}
interface(COND_INTER) %{
equal(0x4, "e");
not_equal(0x5, "ne");
less(0x2, "b");
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
%}
%}
// Floating comparisons that can be fixed up with extra conditional jumps
operand cmpOpUCF2() %{
match(Bool);
predicate(n->as_Bool()->_test._test == BoolTest::ne ||
n->as_Bool()->_test._test == BoolTest::eq);
format %{ "" %}
interface(COND_INTER) %{
equal(0x4, "e");
not_equal(0x5, "ne");
less(0x2, "b");
greater_equal(0x3, "nb");
less_equal(0x6, "be");
greater(0x7, "nbe");
%}
%}
@ -7176,8 +7224,7 @@ instruct cmovI_reg(rRegI dst, rRegI src, rFlagsReg cr, cmpOp cop)
ins_pipe(pipe_cmov_reg);
%}
instruct cmovI_regU(rRegI dst, rRegI src, rFlagsRegU cr, cmpOpU cop)
%{
instruct cmovI_regU(cmpOpU cop, rFlagsRegU cr, rRegI dst, rRegI src) %{
match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
ins_cost(200); // XXX
@ -7187,9 +7234,16 @@ instruct cmovI_regU(rRegI dst, rRegI src, rFlagsRegU cr, cmpOpU cop)
ins_pipe(pipe_cmov_reg);
%}
instruct cmovI_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, rRegI src) %{
match(Set dst (CMoveI (Binary cop cr) (Binary dst src)));
ins_cost(200);
expand %{
cmovI_regU(cop, cr, dst, src);
%}
%}
// Conditional move
instruct cmovI_mem(cmpOp cop, rFlagsReg cr, rRegI dst, memory src)
%{
instruct cmovI_mem(cmpOp cop, rFlagsReg cr, rRegI dst, memory src) %{
match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
ins_cost(250); // XXX
@ -7211,6 +7265,14 @@ instruct cmovI_memU(cmpOpU cop, rFlagsRegU cr, rRegI dst, memory src)
ins_pipe(pipe_cmov_mem);
%}
instruct cmovI_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, memory src) %{
match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src))));
ins_cost(250);
expand %{
cmovI_memU(cop, cr, dst, src);
%}
%}
// Conditional move
instruct cmovN_reg(rRegN dst, rRegN src, rFlagsReg cr, cmpOp cop)
%{
@ -7224,7 +7286,7 @@ instruct cmovN_reg(rRegN dst, rRegN src, rFlagsReg cr, cmpOp cop)
%}
// Conditional move
instruct cmovN_regU(rRegN dst, rRegN src, rFlagsRegU cr, cmpOpU cop)
instruct cmovN_regU(cmpOpU cop, rFlagsRegU cr, rRegN dst, rRegN src)
%{
match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
@ -7235,6 +7297,14 @@ instruct cmovN_regU(rRegN dst, rRegN src, rFlagsRegU cr, cmpOpU cop)
ins_pipe(pipe_cmov_reg);
%}
instruct cmovN_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegN dst, rRegN src) %{
match(Set dst (CMoveN (Binary cop cr) (Binary dst src)));
ins_cost(200);
expand %{
cmovN_regU(cop, cr, dst, src);
%}
%}
// Conditional move
instruct cmovP_reg(rRegP dst, rRegP src, rFlagsReg cr, cmpOp cop)
%{
@ -7248,7 +7318,7 @@ instruct cmovP_reg(rRegP dst, rRegP src, rFlagsReg cr, cmpOp cop)
%}
// Conditional move
instruct cmovP_regU(rRegP dst, rRegP src, rFlagsRegU cr, cmpOpU cop)
instruct cmovP_regU(cmpOpU cop, rFlagsRegU cr, rRegP dst, rRegP src)
%{
match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
@ -7259,6 +7329,14 @@ instruct cmovP_regU(rRegP dst, rRegP src, rFlagsRegU cr, cmpOpU cop)
ins_pipe(pipe_cmov_reg); // XXX
%}
instruct cmovP_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegP dst, rRegP src) %{
match(Set dst (CMoveP (Binary cop cr) (Binary dst src)));
ins_cost(200);
expand %{
cmovP_regU(cop, cr, dst, src);
%}
%}
// DISABLED: Requires the ADLC to emit a bottom_type call that
// correctly meets the two pointer arguments; one is an incoming
// register but the other is a memory operand. ALSO appears to
@ -7319,6 +7397,14 @@ instruct cmovL_regU(cmpOpU cop, rFlagsRegU cr, rRegL dst, rRegL src)
ins_pipe(pipe_cmov_reg); // XXX
%}
instruct cmovL_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, rRegL src) %{
match(Set dst (CMoveL (Binary cop cr) (Binary dst src)));
ins_cost(200);
expand %{
cmovL_regU(cop, cr, dst, src);
%}
%}
instruct cmovL_memU(cmpOpU cop, rFlagsRegU cr, rRegL dst, memory src)
%{
match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
@ -7330,6 +7416,14 @@ instruct cmovL_memU(cmpOpU cop, rFlagsRegU cr, rRegL dst, memory src)
ins_pipe(pipe_cmov_mem); // XXX
%}
instruct cmovL_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, memory src) %{
match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src))));
ins_cost(200);
expand %{
cmovL_memU(cop, cr, dst, src);
%}
%}
instruct cmovF_reg(cmpOp cop, rFlagsReg cr, regF dst, regF src)
%{
match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
@ -7366,6 +7460,14 @@ instruct cmovF_regU(cmpOpU cop, rFlagsRegU cr, regF dst, regF src)
ins_pipe(pipe_slow);
%}
instruct cmovF_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regF dst, regF src) %{
match(Set dst (CMoveF (Binary cop cr) (Binary dst src)));
ins_cost(200);
expand %{
cmovF_regU(cop, cr, dst, src);
%}
%}
instruct cmovD_reg(cmpOp cop, rFlagsReg cr, regD dst, regD src)
%{
match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
@ -7390,6 +7492,14 @@ instruct cmovD_regU(cmpOpU cop, rFlagsRegU cr, regD dst, regD src)
ins_pipe(pipe_slow);
%}
instruct cmovD_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regD dst, regD src) %{
match(Set dst (CMoveD (Binary cop cr) (Binary dst src)));
ins_cost(200);
expand %{
cmovD_regU(cop, cr, dst, src);
%}
%}
//----------Arithmetic Instructions--------------------------------------------
//----------Addition Instructions----------------------------------------------
@ -7735,7 +7845,7 @@ instruct storePConditional(memory heap_top_ptr,
rFlagsReg cr)
%{
match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) "
"If rax == $heap_top_ptr then store $newval into $heap_top_ptr" %}
opcode(0x0F, 0xB1);
@ -7746,53 +7856,40 @@ instruct storePConditional(memory heap_top_ptr,
ins_pipe(pipe_cmpxchg);
%}
// Conditional-store of a long value
// Returns a boolean value (0/1) on success. Implemented with a
// CMPXCHG8 on Intel. mem_ptr can actually be in either RSI or RDI
instruct storeLConditional(rRegI res,
memory mem_ptr,
rax_RegL oldval, rRegL newval,
rFlagsReg cr)
// Conditional-store of an int value.
// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
instruct storeIConditional(memory mem, rax_RegI oldval, rRegI newval, rFlagsReg cr)
%{
match(Set res (StoreLConditional mem_ptr (Binary oldval newval)));
effect(KILL cr);
match(Set cr (StoreIConditional mem (Binary oldval newval)));
effect(KILL oldval);
format %{ "cmpxchgq $mem_ptr, $newval\t# (long) "
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
"sete $res\n\t"
"movzbl $res, $res" %}
format %{ "cmpxchgl $mem, $newval\t# If rax == $mem then store $newval into $mem" %}
opcode(0x0F, 0xB1);
ins_encode(lock_prefix,
REX_reg_mem_wide(newval, mem_ptr),
REX_reg_mem(newval, mem),
OpcP, OpcS,
reg_mem(newval, mem_ptr),
REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
REX_reg_breg(res, res), // movzbl
Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
reg_mem(newval, mem));
ins_pipe(pipe_cmpxchg);
%}
// Conditional-store of a long value
// ZF flag is set on success, reset otherwise. Implemented with a
// CMPXCHG8 on Intel. mem_ptr can actually be in either RSI or RDI
instruct storeLConditional_flags(memory mem_ptr,
rax_RegL oldval, rRegL newval,
rFlagsReg cr,
immI0 zero)
// Conditional-store of a long value.
// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
instruct storeLConditional(memory mem, rax_RegL oldval, rRegL newval, rFlagsReg cr)
%{
match(Set cr (CmpI (StoreLConditional mem_ptr (Binary oldval newval)) zero));
match(Set cr (StoreLConditional mem (Binary oldval newval)));
effect(KILL oldval);
format %{ "cmpxchgq $mem_ptr, $newval\t# (long) "
"If rax == $mem_ptr then store $newval into $mem_ptr" %}
format %{ "cmpxchgq $mem, $newval\t# If rax == $mem then store $newval into $mem" %}
opcode(0x0F, 0xB1);
ins_encode(lock_prefix,
REX_reg_mem_wide(newval, mem_ptr),
REX_reg_mem_wide(newval, mem),
OpcP, OpcS,
reg_mem(newval, mem_ptr));
reg_mem(newval, mem));
ins_pipe(pipe_cmpxchg);
%}
// XXX No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
instruct compareAndSwapP(rRegI res,
memory mem_ptr,
rax_RegP oldval, rRegP newval,
@ -7816,7 +7913,6 @@ instruct compareAndSwapP(rRegI res,
ins_pipe( pipe_cmpxchg );
%}
// XXX No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
instruct compareAndSwapL(rRegI res,
memory mem_ptr,
rax_RegL oldval, rRegL newval,
@ -8766,6 +8862,7 @@ instruct shrL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr)
ins_pipe(ialu_reg);
%}
// Logical Shift Right by 8-bit immediate
instruct shrL_mem_imm(memory dst, immI8 shift, rFlagsReg cr)
%{
@ -9309,6 +9406,17 @@ instruct xorI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
ins_pipe(ialu_reg_reg);
%}
// Xor Register with Immediate -1
instruct xorI_rReg_im1(rRegI dst, immI_M1 imm) %{
match(Set dst (XorI dst imm));
format %{ "not $dst" %}
ins_encode %{
__ notl($dst$$Register);
%}
ins_pipe(ialu_reg);
%}
// Xor Register with Immediate
instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
%{
@ -9464,6 +9572,18 @@ instruct orL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
ins_pipe(ialu_reg_reg);
%}
// Use any_RegP to match R15 (TLS register) without spilling.
instruct orL_rReg_castP2X(rRegL dst, any_RegP src, rFlagsReg cr) %{
match(Set dst (OrL dst (CastP2X src)));
effect(KILL cr);
format %{ "orq $dst, $src\t# long" %}
opcode(0x0B);
ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src));
ins_pipe(ialu_reg_reg);
%}
// Or Register with Immediate
instruct orL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
%{
@ -9529,6 +9649,17 @@ instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
ins_pipe(ialu_reg_reg);
%}
// Xor Register with Immediate -1
instruct xorL_rReg_im1(rRegL dst, immL_M1 imm) %{
match(Set dst (XorL dst imm));
format %{ "notq $dst" %}
ins_encode %{
__ notq($dst$$Register);
%}
ins_pipe(ialu_reg);
%}
// Xor Register with Immediate
instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
%{
@ -9694,6 +9825,17 @@ instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
ins_pipe(pipe_slow);
%}
instruct cmpF_cc_reg_CF(rFlagsRegUCF cr, regF src1, regF src2) %{
match(Set cr (CmpF src1 src2));
ins_cost(145);
format %{ "ucomiss $src1, $src2" %}
ins_encode %{
__ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct cmpF_cc_mem(rFlagsRegU cr, regF src1, memory src2)
%{
match(Set cr (CmpF src1 (LoadF src2)));
@ -9711,6 +9853,16 @@ instruct cmpF_cc_mem(rFlagsRegU cr, regF src1, memory src2)
ins_pipe(pipe_slow);
%}
instruct cmpF_cc_memCF(rFlagsRegUCF cr, regF src1, memory src2) %{
match(Set cr (CmpF src1 (LoadF src2)));
ins_cost(100);
format %{ "ucomiss $src1, $src2" %}
opcode(0x0F, 0x2E);
ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2));
ins_pipe(pipe_slow);
%}
instruct cmpF_cc_imm(rFlagsRegU cr, regF src1, immF src2)
%{
match(Set cr (CmpF src1 src2));
@ -9728,6 +9880,16 @@ instruct cmpF_cc_imm(rFlagsRegU cr, regF src1, immF src2)
ins_pipe(pipe_slow);
%}
instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src1, immF src2) %{
match(Set cr (CmpF src1 src2));
ins_cost(100);
format %{ "ucomiss $src1, $src2" %}
opcode(0x0F, 0x2E);
ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2));
ins_pipe(pipe_slow);
%}
instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2)
%{
match(Set cr (CmpD src1 src2));
@ -9745,6 +9907,17 @@ instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2)
ins_pipe(pipe_slow);
%}
instruct cmpD_cc_reg_CF(rFlagsRegUCF cr, regD src1, regD src2) %{
match(Set cr (CmpD src1 src2));
ins_cost(100);
format %{ "ucomisd $src1, $src2 test" %}
ins_encode %{
__ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
%}
ins_pipe(pipe_slow);
%}
instruct cmpD_cc_mem(rFlagsRegU cr, regD src1, memory src2)
%{
match(Set cr (CmpD src1 (LoadD src2)));
@ -9762,6 +9935,16 @@ instruct cmpD_cc_mem(rFlagsRegU cr, regD src1, memory src2)
ins_pipe(pipe_slow);
%}
instruct cmpD_cc_memCF(rFlagsRegUCF cr, regD src1, memory src2) %{
match(Set cr (CmpD src1 (LoadD src2)));
ins_cost(100);
format %{ "ucomisd $src1, $src2" %}
opcode(0x66, 0x0F, 0x2E);
ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2));
ins_pipe(pipe_slow);
%}
instruct cmpD_cc_imm(rFlagsRegU cr, regD src1, immD src2)
%{
match(Set cr (CmpD src1 src2));
@ -9779,6 +9962,16 @@ instruct cmpD_cc_imm(rFlagsRegU cr, regD src1, immD src2)
ins_pipe(pipe_slow);
%}
instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src1, immD src2) %{
match(Set cr (CmpD src1 src2));
ins_cost(100);
format %{ "ucomisd $src1, [$src2]" %}
opcode(0x66, 0x0F, 0x2E);
ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2));
ins_pipe(pipe_slow);
%}
// Compare into -1,0,1
instruct cmpF_reg(rRegI dst, regF src1, regF src2, rFlagsReg cr)
%{
@ -11384,8 +11577,7 @@ instruct jmpLoopEnd(cmpOp cop, rFlagsReg cr, label labl)
%}
// Jump Direct Conditional - Label defines a relative address from Jcc+1
instruct jmpLoopEndU(cmpOpU cop, rFlagsRegU cmp, label labl)
%{
instruct jmpLoopEndU(cmpOpU cop, rFlagsRegU cmp, label labl) %{
match(CountedLoopEnd cop cmp);
effect(USE labl);
@ -11398,14 +11590,26 @@ instruct jmpLoopEndU(cmpOpU cop, rFlagsRegU cmp, label labl)
ins_pc_relative(1);
%}
instruct jmpLoopEndUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
match(CountedLoopEnd cop cmp);
effect(USE labl);
ins_cost(200);
format %{ "j$cop,u $labl\t# loop end" %}
size(6);
opcode(0x0F, 0x80);
ins_encode(Jcc(cop, labl));
ins_pipe(pipe_jcc);
ins_pc_relative(1);
%}
// Jump Direct Conditional - using unsigned comparison
instruct jmpConU(cmpOpU cop, rFlagsRegU cmp, label labl)
%{
instruct jmpConU(cmpOpU cop, rFlagsRegU cmp, label labl) %{
match(If cop cmp);
effect(USE labl);
ins_cost(300);
format %{ "j$cop,u $labl" %}
format %{ "j$cop,u $labl" %}
size(6);
opcode(0x0F, 0x80);
ins_encode(Jcc(cop, labl));
@ -11413,6 +11617,59 @@ instruct jmpConU(cmpOpU cop, rFlagsRegU cmp, label labl)
ins_pc_relative(1);
%}
instruct jmpConUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
match(If cop cmp);
effect(USE labl);
ins_cost(200);
format %{ "j$cop,u $labl" %}
size(6);
opcode(0x0F, 0x80);
ins_encode(Jcc(cop, labl));
ins_pipe(pipe_jcc);
ins_pc_relative(1);
%}
instruct jmpConUCF2(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
match(If cop cmp);
effect(USE labl);
ins_cost(200);
format %{ $$template
if ($cop$$cmpcode == Assembler::notEqual) {
$$emit$$"jp,u $labl\n\t"
$$emit$$"j$cop,u $labl"
} else {
$$emit$$"jp,u done\n\t"
$$emit$$"j$cop,u $labl\n\t"
$$emit$$"done:"
}
%}
size(12);
opcode(0x0F, 0x80);
ins_encode %{
Label* l = $labl$$label;
$$$emit8$primary;
emit_cc(cbuf, $secondary, Assembler::parity);
int parity_disp = -1;
if ($cop$$cmpcode == Assembler::notEqual) {
// the two jumps 6 bytes apart so the jump distances are too
parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
} else if ($cop$$cmpcode == Assembler::equal) {
parity_disp = 6;
} else {
ShouldNotReachHere();
}
emit_d32(cbuf, parity_disp);
$$$emit8$primary;
emit_cc(cbuf, $secondary, $cop$$cmpcode);
int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0;
emit_d32(cbuf, disp);
%}
ins_pipe(pipe_jcc);
ins_pc_relative(1);
%}
// ============================================================================
// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary
// superklass array for an instance of the superklass. Set a hidden
@ -11483,8 +11740,7 @@ instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr,
// specific code section of the file.
// Jump Direct - Label defines a relative address from JMP+1
instruct jmpDir_short(label labl)
%{
instruct jmpDir_short(label labl) %{
match(Goto);
effect(USE labl);
@ -11499,8 +11755,7 @@ instruct jmpDir_short(label labl)
%}
// Jump Direct Conditional - Label defines a relative address from Jcc+1
instruct jmpCon_short(cmpOp cop, rFlagsReg cr, label labl)
%{
instruct jmpCon_short(cmpOp cop, rFlagsReg cr, label labl) %{
match(If cop cr);
effect(USE labl);
@ -11515,13 +11770,12 @@ instruct jmpCon_short(cmpOp cop, rFlagsReg cr, label labl)
%}
// Jump Direct Conditional - Label defines a relative address from Jcc+1
instruct jmpLoopEnd_short(cmpOp cop, rFlagsReg cr, label labl)
%{
instruct jmpLoopEnd_short(cmpOp cop, rFlagsReg cr, label labl) %{
match(CountedLoopEnd cop cr);
effect(USE labl);
ins_cost(300);
format %{ "j$cop,s $labl" %}
format %{ "j$cop,s $labl\t# loop end" %}
size(2);
opcode(0x70);
ins_encode(JccShort(cop, labl));
@ -11531,11 +11785,39 @@ instruct jmpLoopEnd_short(cmpOp cop, rFlagsReg cr, label labl)
%}
// Jump Direct Conditional - Label defines a relative address from Jcc+1
instruct jmpLoopEndU_short(cmpOpU cop, rFlagsRegU cmp, label labl)
%{
instruct jmpLoopEndU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{
match(CountedLoopEnd cop cmp);
effect(USE labl);
ins_cost(300);
format %{ "j$cop,us $labl\t# loop end" %}
size(2);
opcode(0x70);
ins_encode(JccShort(cop, labl));
ins_pipe(pipe_jcc);
ins_pc_relative(1);
ins_short_branch(1);
%}
instruct jmpLoopEndUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
match(CountedLoopEnd cop cmp);
effect(USE labl);
ins_cost(300);
format %{ "j$cop,us $labl\t# loop end" %}
size(2);
opcode(0x70);
ins_encode(JccShort(cop, labl));
ins_pipe(pipe_jcc);
ins_pc_relative(1);
ins_short_branch(1);
%}
// Jump Direct Conditional - using unsigned comparison
instruct jmpConU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{
match(If cop cmp);
effect(USE labl);
ins_cost(300);
format %{ "j$cop,us $labl" %}
size(2);
@ -11546,9 +11828,7 @@ instruct jmpLoopEndU_short(cmpOpU cop, rFlagsRegU cmp, label labl)
ins_short_branch(1);
%}
// Jump Direct Conditional - using unsigned comparison
instruct jmpConU_short(cmpOpU cop, rFlagsRegU cmp, label labl)
%{
instruct jmpConUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{
match(If cop cmp);
effect(USE labl);
@ -11562,6 +11842,46 @@ instruct jmpConU_short(cmpOpU cop, rFlagsRegU cmp, label labl)
ins_short_branch(1);
%}
instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{
match(If cop cmp);
effect(USE labl);
ins_cost(300);
format %{ $$template
if ($cop$$cmpcode == Assembler::notEqual) {
$$emit$$"jp,u,s $labl\n\t"
$$emit$$"j$cop,u,s $labl"
} else {
$$emit$$"jp,u,s done\n\t"
$$emit$$"j$cop,u,s $labl\n\t"
$$emit$$"done:"
}
%}
size(4);
opcode(0x70);
ins_encode %{
Label* l = $labl$$label;
emit_cc(cbuf, $primary, Assembler::parity);
int parity_disp = -1;
if ($cop$$cmpcode == Assembler::notEqual) {
parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
} else if ($cop$$cmpcode == Assembler::equal) {
parity_disp = 2;
} else {
ShouldNotReachHere();
}
emit_d8(cbuf, parity_disp);
emit_cc(cbuf, $primary, $cop$$cmpcode);
int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
emit_d8(cbuf, disp);
assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp");
%}
ins_pipe(pipe_jcc);
ins_pc_relative(1);
ins_short_branch(1);
%}
// ============================================================================
// inlined locking and unlocking

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1110,7 +1110,7 @@ static jstring getPlatformEncoding(JNIEnv *env) {
if (propname) {
jclass cls;
jmethodID mid;
NULL_CHECK0 (cls = (*env)->FindClass(env, "java/lang/System"));
NULL_CHECK0 (cls = FindBootStrapClass(env, "java/lang/System"));
NULL_CHECK0 (mid = (*env)->GetStaticMethodID(
env, cls,
"getProperty",
@ -1125,7 +1125,7 @@ static jstring getPlatformEncoding(JNIEnv *env) {
static jboolean isEncodingSupported(JNIEnv *env, jstring enc) {
jclass cls;
jmethodID mid;
NULL_CHECK0 (cls = (*env)->FindClass(env, "java/nio/charset/Charset"));
NULL_CHECK0 (cls = FindBootStrapClass(env, "java/nio/charset/Charset"));
NULL_CHECK0 (mid = (*env)->GetStaticMethodID(
env, cls,
"isSupported",
@ -1161,7 +1161,7 @@ NewPlatformString(JNIEnv *env, char *s)
#else
if (isEncodingSupported(env, enc) == JNI_TRUE) {
#endif
NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String"));
NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
"([BLjava/lang/String;)V"));
str = (*env)->NewObject(env, cls, mid, ary, enc);
@ -1172,7 +1172,7 @@ NewPlatformString(JNIEnv *env, char *s)
the encoding name, in which the StringCoding class will
pickup the iso-8859-1 as the fallback converter for us.
*/
NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String"));
NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
"([B)V"));
str = (*env)->NewObject(env, cls, mid, ary);
@ -1195,7 +1195,7 @@ NewPlatformStringArray(JNIEnv *env, char **strv, int strc)
jarray ary;
int i;
NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String"));
NULL_CHECK0(ary = (*env)->NewObjectArray(env, strc, cls, 0));
for (i = 0; i < strc; i++) {
jstring str = NewPlatformString(env, *strv++);
@ -1224,6 +1224,7 @@ LoadClass(JNIEnv *env, char *name)
c = *t++;
*s++ = (c == '.') ? '/' : c;
} while (c != '\0');
// use the application class loader for main-class
cls = (*env)->FindClass(env, buf);
free(buf);
@ -1250,7 +1251,7 @@ GetMainClassName(JNIEnv *env, char *jarname)
jobject jar, man, attr;
jstring str, result = 0;
NULL_CHECK0(cls = (*env)->FindClass(env, "java/util/jar/JarFile"));
NULL_CHECK0(cls = FindBootStrapClass(env, "java/util/jar/JarFile"));
NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
"(Ljava/lang/String;)V"));
NULL_CHECK0(str = NewPlatformString(env, jarname));
@ -1471,7 +1472,7 @@ PrintJavaVersion(JNIEnv *env)
jclass ver;
jmethodID print;
NULL_CHECK(ver = (*env)->FindClass(env, "sun/misc/Version"));
NULL_CHECK(ver = FindBootStrapClass(env, "sun/misc/Version"));
NULL_CHECK(print = (*env)->GetStaticMethodID(env, ver, "print", "()V"));
(*env)->CallStaticVoidMethod(env, ver, print);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -100,5 +100,15 @@ void* MemAlloc(size_t size);
* Make launcher spit debug output.
*/
extern jboolean _launcher_debug;
/*
* This allows for finding classes from the VM's bootstrap class loader
* directly, FindClass uses the application class loader internally, this will
* cause unnecessary searching of the classpath for the required classes.
*/
typedef jclass (JNICALL FindClassFromBootLoader_t(JNIEnv *env,
const char *name,
jboolean throwError));
jclass FindBootStrapClass(JNIEnv *env, const char *classname);
#endif /* _JAVA_H_ */

View File

@ -1,5 +1,5 @@
/*
* Copyright 1999-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1826,3 +1826,23 @@ UnsetEnv(char *name)
{
return(borrowed_unsetenv(name));
}
/*
* The implementation for finding classes from the bootstrap
* class loader, refer to java.h
*/
static FindClassFromBootLoader_t *findBootClass = NULL;
jclass
FindBootStrapClass(JNIEnv *env, const char* classname)
{
if (findBootClass == NULL) {
findBootClass = (FindClassFromBootLoader_t *)dlsym(RTLD_DEFAULT,
"JVM_FindClassFromBootLoader");
if (findBootClass == NULL) {
fprintf(stderr, "Error: could load method JVM_FindClassFromBootLoader");
return NULL;
}
}
return findBootClass(env, classname, JNI_FALSE);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,5 +38,6 @@
// platforms, but they may have different default values on other platforms.
//
define_pd_global(bool, UseLargePages, false);
define_pd_global(bool, UseLargePagesIndividualAllocation, false);
define_pd_global(bool, UseOSErrorReporting, false);
define_pd_global(bool, UseThreadPriorities, true) ;

View File

@ -1261,6 +1261,17 @@ jlong os::elapsed_frequency() {
return (1000 * 1000);
}
// For now, we say that linux does not support vtime. I have no idea
// whether it can actually be made to (DLD, 9/13/05).
bool os::supports_vtime() { return false; }
bool os::enable_vtime() { return false; }
bool os::vtime_enabled() { return false; }
double os::elapsedVTime() {
// better than nothing, but not much
return elapsedTime();
}
jlong os::javaTimeMillis() {
timeval time;
int status = gettimeofday(&time, NULL);
@ -2261,7 +2272,9 @@ void os::free_memory(char *addr, size_t bytes) {
uncommit_memory(addr, bytes);
}
void os::numa_make_global(char *addr, size_t bytes) { }
void os::numa_make_global(char *addr, size_t bytes) {
Linux::numa_interleave_memory(addr, bytes);
}
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
@ -2303,7 +2316,7 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info
extern "C" void numa_warn(int number, char *where, ...) { }
extern "C" void numa_error(char *where) { }
void os::Linux::libnuma_init() {
bool os::Linux::libnuma_init() {
// sched_getcpu() should be in libc.
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
dlsym(RTLD_DEFAULT, "sched_getcpu")));
@ -2319,31 +2332,51 @@ void os::Linux::libnuma_init() {
dlsym(handle, "numa_available")));
set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
dlsym(handle, "numa_tonode_memory")));
set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
dlsym(handle, "numa_interleave_memory")));
if (numa_available() != -1) {
set_numa_all_nodes((unsigned long*)dlsym(handle, "numa_all_nodes"));
// Create a cpu -> node mapping
_cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true);
rebuild_cpu_to_node_map();
return true;
}
}
}
return false;
}
// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
// The table is later used in get_node_by_cpu().
void os::Linux::rebuild_cpu_to_node_map() {
int cpu_num = os::active_processor_count();
const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
// in libnuma (possible values are starting from 16,
// and continuing up with every other power of 2, but less
// than the maximum number of CPUs supported by kernel), and
// is a subject to change (in libnuma version 2 the requirements
// are more reasonable) we'll just hardcode the number they use
// in the library.
const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
size_t cpu_num = os::active_processor_count();
size_t cpu_map_size = NCPUS / BitsPerCLong;
size_t cpu_map_valid_size =
MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
cpu_to_node()->clear();
cpu_to_node()->at_grow(cpu_num - 1);
int node_num = numa_get_groups_num();
int cpu_map_size = (cpu_num + BitsPerLong - 1) / BitsPerLong;
size_t node_num = numa_get_groups_num();
unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size);
for (int i = 0; i < node_num; i++) {
for (size_t i = 0; i < node_num; i++) {
if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
for (int j = 0; j < cpu_map_size; j++) {
for (size_t j = 0; j < cpu_map_valid_size; j++) {
if (cpu_map[j] != 0) {
for (int k = 0; k < BitsPerLong; k++) {
for (size_t k = 0; k < BitsPerCLong; k++) {
if (cpu_map[j] & (1UL << k)) {
cpu_to_node()->at_put(j * BitsPerLong + k, i);
cpu_to_node()->at_put(j * BitsPerCLong + k, i);
}
}
}
@ -2366,7 +2399,8 @@ os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
os::Linux::numa_available_func_t os::Linux::_numa_available;
os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
unsigned long* os::Linux::_numa_all_nodes;
bool os::uncommit_memory(char* addr, size_t size) {
return ::mmap(addr, size,
@ -2466,7 +2500,7 @@ bool os::guard_memory(char* addr, size_t size) {
}
bool os::unguard_memory(char* addr, size_t size) {
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
}
// Large page support
@ -3684,7 +3718,17 @@ jint os::init_2(void)
}
if (UseNUMA) {
Linux::libnuma_init();
if (!Linux::libnuma_init()) {
UseNUMA = false;
} else {
if ((Linux::numa_max_node() < 1)) {
// There's only one node(they start from 0), disable NUMA.
UseNUMA = false;
}
}
if (!UseNUMA && ForceNUMA) {
UseNUMA = true;
}
}
if (MaxFDLimit) {

View File

@ -146,7 +146,7 @@ class Linux {
static bool is_floating_stack() { return _is_floating_stack; }
static void libpthread_init();
static void libnuma_init();
static bool libnuma_init();
// Minimum stack size a thread can be created with (allowing
// the VM to completely create the thread and enter user code)
@ -240,20 +240,23 @@ private:
typedef int (*numa_max_node_func_t)(void);
typedef int (*numa_available_func_t)(void);
typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
static sched_getcpu_func_t _sched_getcpu;
static numa_node_to_cpus_func_t _numa_node_to_cpus;
static numa_max_node_func_t _numa_max_node;
static numa_available_func_t _numa_available;
static numa_tonode_memory_func_t _numa_tonode_memory;
static numa_interleave_memory_func_t _numa_interleave_memory;
static unsigned long* _numa_all_nodes;
static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; }
static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; }
static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
public:
static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
@ -264,6 +267,11 @@ public:
static int numa_tonode_memory(void *start, size_t size, int node) {
return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1;
}
static void numa_interleave_memory(void *start, size_t size) {
if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) {
_numa_interleave_memory(start, size, _numa_all_nodes);
}
}
static int get_node_by_cpu(int cpu_id);
};

View File

@ -1,5 +1,5 @@
/*
* Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1110,7 +1110,7 @@ static jstring getPlatformEncoding(JNIEnv *env) {
if (propname) {
jclass cls;
jmethodID mid;
NULL_CHECK0 (cls = (*env)->FindClass(env, "java/lang/System"));
NULL_CHECK0 (cls = FindBootStrapClass(env, "java/lang/System"));
NULL_CHECK0 (mid = (*env)->GetStaticMethodID(
env, cls,
"getProperty",
@ -1125,7 +1125,7 @@ static jstring getPlatformEncoding(JNIEnv *env) {
static jboolean isEncodingSupported(JNIEnv *env, jstring enc) {
jclass cls;
jmethodID mid;
NULL_CHECK0 (cls = (*env)->FindClass(env, "java/nio/charset/Charset"));
NULL_CHECK0 (cls = FindBootStrapClass(env, "java/nio/charset/Charset"));
NULL_CHECK0 (mid = (*env)->GetStaticMethodID(
env, cls,
"isSupported",
@ -1161,7 +1161,7 @@ NewPlatformString(JNIEnv *env, char *s)
#else
if (isEncodingSupported(env, enc) == JNI_TRUE) {
#endif
NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String"));
NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
"([BLjava/lang/String;)V"));
str = (*env)->NewObject(env, cls, mid, ary, enc);
@ -1172,7 +1172,7 @@ NewPlatformString(JNIEnv *env, char *s)
the encoding name, in which the StringCoding class will
pickup the iso-8859-1 as the fallback converter for us.
*/
NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String"));
NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
"([B)V"));
str = (*env)->NewObject(env, cls, mid, ary);
@ -1195,7 +1195,7 @@ NewPlatformStringArray(JNIEnv *env, char **strv, int strc)
jarray ary;
int i;
NULL_CHECK0(cls = (*env)->FindClass(env, "java/lang/String"));
NULL_CHECK0(cls = FindBootStrapClass(env, "java/lang/String"));
NULL_CHECK0(ary = (*env)->NewObjectArray(env, strc, cls, 0));
for (i = 0; i < strc; i++) {
jstring str = NewPlatformString(env, *strv++);
@ -1224,6 +1224,7 @@ LoadClass(JNIEnv *env, char *name)
c = *t++;
*s++ = (c == '.') ? '/' : c;
} while (c != '\0');
// use the application class loader for the main-class
cls = (*env)->FindClass(env, buf);
free(buf);
@ -1250,7 +1251,7 @@ GetMainClassName(JNIEnv *env, char *jarname)
jobject jar, man, attr;
jstring str, result = 0;
NULL_CHECK0(cls = (*env)->FindClass(env, "java/util/jar/JarFile"));
NULL_CHECK0(cls = FindBootStrapClass(env, "java/util/jar/JarFile"));
NULL_CHECK0(mid = (*env)->GetMethodID(env, cls, "<init>",
"(Ljava/lang/String;)V"));
NULL_CHECK0(str = NewPlatformString(env, jarname));
@ -1471,7 +1472,7 @@ PrintJavaVersion(JNIEnv *env)
jclass ver;
jmethodID print;
NULL_CHECK(ver = (*env)->FindClass(env, "sun/misc/Version"));
NULL_CHECK(ver = FindBootStrapClass(env, "sun/misc/Version"));
NULL_CHECK(print = (*env)->GetStaticMethodID(env, ver, "print", "()V"));
(*env)->CallStaticVoidMethod(env, ver, print);

View File

@ -1,5 +1,5 @@
/*
* Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -101,4 +101,15 @@ void* MemAlloc(size_t size);
*/
extern jboolean _launcher_debug;
/*
* This allows for finding classes from the VM's bootstrap class loader
* directly, FindClass uses the application class loader internally, this will
* cause unnecessary searching of the classpath for the required classes.
*/
typedef jclass (JNICALL FindClassFromBootLoader_t(JNIEnv *env,
const char *name,
jboolean throwError));
jclass FindBootStrapClass(JNIEnv *env, const char *classname);
#endif /* _JAVA_H_ */

View File

@ -1,5 +1,5 @@
/*
* Copyright 1998-2005 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1826,3 +1826,24 @@ UnsetEnv(char *name)
{
return(borrowed_unsetenv(name));
}
/*
* The implementation for finding classes from the bootstrap
* class loader, refer to java.h
*/
static FindClassFromBootLoader_t *findBootClass = NULL;
jclass
FindBootStrapClass(JNIEnv *env, const char* classname)
{
if (findBootClass == NULL) {
findBootClass = (FindClassFromBootLoader_t *)dlsym(RTLD_DEFAULT,
"JVM_FindClassFromBootLoader");
if (findBootClass == NULL) {
fprintf(stderr, "Error: could not load method JVM_FindClassFromBootLoader");
return NULL;
}
}
return findBootClass(env, classname, JNI_FALSE);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,5 +44,6 @@
// platforms, but they may have different default values on other platforms.
//
define_pd_global(bool, UseLargePages, true);
define_pd_global(bool, UseLargePagesIndividualAllocation, false);
define_pd_global(bool, UseOSErrorReporting, false);
define_pd_global(bool, UseThreadPriorities, false);

View File

@ -462,16 +462,14 @@ int os::active_processor_count() {
int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
pid_t pid = getpid();
psetid_t pset = PS_NONE;
// Are we running in a processor set?
// Are we running in a processor set or is there any processor set around?
if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
if (pset != PS_NONE) {
uint_t pset_cpus;
// Query number of cpus in processor set
if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
_processors_online = pset_cpus;
return pset_cpus;
}
uint_t pset_cpus;
// Query the number of cpus available to us.
if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
_processors_online = pset_cpus;
return pset_cpus;
}
}
// Otherwise return number of online cpus
@ -1640,16 +1638,24 @@ inline hrtime_t oldgetTimeNanos() {
// getTimeNanos is guaranteed to not move backward on Solaris
inline hrtime_t getTimeNanos() {
if (VM_Version::supports_cx8()) {
bool retry = false;
hrtime_t newtime = gethrtime();
hrtime_t oldmaxtime = max_hrtime;
hrtime_t retmaxtime = oldmaxtime;
while ((newtime > retmaxtime) && (retry == false || retmaxtime != oldmaxtime)) {
oldmaxtime = retmaxtime;
retmaxtime = Atomic::cmpxchg(newtime, (volatile jlong *)&max_hrtime, oldmaxtime);
retry = true;
}
return (newtime > retmaxtime) ? newtime : retmaxtime;
const hrtime_t now = gethrtime();
const hrtime_t prev = max_hrtime;
if (now <= prev) return prev; // same or retrograde time;
const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
assert(obsv >= prev, "invariant"); // Monotonicity
// If the CAS succeeded then we're done and return "now".
// If the CAS failed and the observed value "obs" is >= now then
// we should return "obs". If the CAS failed and now > obs > prv then
// some other thread raced this thread and installed a new value, in which case
// we could either (a) retry the entire operation, (b) retry trying to install now
// or (c) just return obs. We use (c). No loop is required although in some cases
// we might discard a higher "now" value in deference to a slightly lower but freshly
// installed obs value. That's entirely benign -- it admits no new orderings compared
// to (a) or (b) -- and greatly reduces coherence traffic.
// We might also condition (c) on the magnitude of the delta between obs and now.
// Avoiding excessive CAS operations to hot RW locations is critical.
// See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate
return (prev == obsv) ? now : obsv ;
} else {
return oldgetTimeNanos();
}
@ -1691,6 +1697,40 @@ bool os::getTimesSecs(double* process_real_time,
}
}
bool os::supports_vtime() { return true; }
bool os::enable_vtime() {
int fd = open("/proc/self/ctl", O_WRONLY);
if (fd == -1)
return false;
long cmd[] = { PCSET, PR_MSACCT };
int res = write(fd, cmd, sizeof(long) * 2);
close(fd);
if (res != sizeof(long) * 2)
return false;
return true;
}
bool os::vtime_enabled() {
int fd = open("/proc/self/status", O_RDONLY);
if (fd == -1)
return false;
pstatus_t status;
int res = read(fd, (void*) &status, sizeof(pstatus_t));
close(fd);
if (res != sizeof(pstatus_t))
return false;
return status.pr_flags & PR_MSACCT;
}
double os::elapsedVTime() {
return (double)gethrvtime() / (double)hrtime_hz;
}
// Used internally for comparisons only
// getTimeMillis guaranteed to not move backwards on Solaris
jlong getTimeMillis() {
@ -2688,7 +2728,7 @@ size_t os::numa_get_leaf_groups(int *ids, size_t size) {
return bottom;
}
// Detect the topology change. Typically happens during CPU pluggin-unplugging.
// Detect the topology change. Typically happens during CPU plugging-unplugging.
bool os::numa_topology_changed() {
int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
if (is_stale != -1 && is_stale) {
@ -2994,6 +3034,8 @@ static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
// Protect memory (Used to pass readonly pages through
// JNI GetArray<type>Elements with empty arrays.)
// Also, used for serialization page and for compressed oops null pointer
// checking.
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
bool is_committed) {
unsigned int p = 0;
@ -3017,7 +3059,7 @@ bool os::guard_memory(char* addr, size_t bytes) {
}
bool os::unguard_memory(char* addr, size_t bytes) {
return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE|PROT_EXEC);
return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
}
// Large page support
@ -3724,7 +3766,6 @@ int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
int maxClamped = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim);
iaInfo->ia_upri = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio);
iaInfo->ia_uprilim = IA_NOCHANGE;
iaInfo->ia_nice = IA_NOCHANGE;
iaInfo->ia_mode = IA_NOCHANGE;
if (ThreadPriorityVerbose) {
tty->print_cr ("IA: [%d...%d] %d->%d\n",
@ -4607,7 +4648,7 @@ void os::Solaris::synchronization_init() {
}
}
void os::Solaris::liblgrp_init() {
bool os::Solaris::liblgrp_init() {
void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
if (handle != NULL) {
os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
@ -4622,9 +4663,9 @@ void os::Solaris::liblgrp_init() {
lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
set_lgrp_cookie(c);
} else {
warning("your OS does not support NUMA");
return true;
}
return false;
}
void os::Solaris::misc_sym_init() {
@ -4793,9 +4834,25 @@ jint os::init_2(void) {
vm_page_size()));
Solaris::libthread_init();
if (UseNUMA) {
Solaris::liblgrp_init();
if (!Solaris::liblgrp_init()) {
UseNUMA = false;
} else {
size_t lgrp_limit = os::numa_get_groups_num();
int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
FREE_C_HEAP_ARRAY(int, lgrp_ids);
if (lgrp_num < 2) {
// There's only one locality group, disable NUMA.
UseNUMA = false;
}
}
if (!UseNUMA && ForceNUMA) {
UseNUMA = true;
}
}
Solaris::misc_sym_init();
Solaris::signal_sets_init();
Solaris::init_signal_mem();

View File

@ -176,7 +176,7 @@ class Solaris {
public:
static void libthread_init();
static void synchronization_init();
static void liblgrp_init();
static bool liblgrp_init();
// Load miscellaneous symbols.
static void misc_sym_init();
// This boolean allows users to forward their own non-matching signals

View File

@ -1,5 +1,5 @@
/*
* Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,5 +37,6 @@
// platforms, but they may have different default values on other platforms.
//
define_pd_global(bool, UseLargePages, false);
define_pd_global(bool, UseLargePagesIndividualAllocation, true);
define_pd_global(bool, UseOSErrorReporting, false); // for now.
define_pd_global(bool, UseThreadPriorities, true) ;

View File

@ -737,6 +737,17 @@ FILETIME java_to_windows_time(jlong l) {
return result;
}
// For now, we say that Windows does not support vtime. I have no idea
// whether it can actually be made to (DLD, 9/13/05).
bool os::supports_vtime() { return false; }
bool os::enable_vtime() { return false; }
bool os::vtime_enabled() { return false; }
double os::elapsedVTime() {
// better than nothing, but not much
return elapsedTime();
}
jlong os::javaTimeMillis() {
if (UseFakeTimers) {
return fake_time++;
@ -2009,10 +2020,11 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
// Unguard and retry
// Set memory to RWX and retry
address page_start =
(address) align_size_down((intptr_t) addr, (intptr_t) page_size);
bool res = os::unguard_memory((char*) page_start, page_size);
bool res = os::protect_memory((char*) page_start, page_size,
os::MEM_PROT_RWX);
if (PrintMiscellaneous && Verbose) {
char buf[256];
@ -2206,15 +2218,10 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// We only expect null pointers in the stubs (vtable)
// the rest are checked explicitly now.
//
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb != NULL) {
if (VtableStubs::stub_containing(pc) != NULL) {
if (((uintptr_t)addr) < os::vm_page_size() ) {
// an access to the first page of VM--assume it is a null pointer
return Handle_Exception(exceptionInfo,
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL));
}
}
if (((uintptr_t)addr) < os::vm_page_size() ) {
// an access to the first page of VM--assume it is a null pointer
address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
}
}
} // in_java
@ -2230,9 +2237,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// Windows 98 reports faulting addresses incorrectly
if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
!os::win32::is_nt()) {
return Handle_Exception(exceptionInfo,
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL));
address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
}
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
@ -2582,9 +2588,104 @@ bool os::can_execute_large_page_memory() {
}
char* os::reserve_memory_special(size_t bytes) {
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
char * res = (char *)VirtualAlloc(NULL, bytes, flag, PAGE_EXECUTE_READWRITE);
return res;
if (UseLargePagesIndividualAllocation) {
if (TracePageSizes && Verbose) {
tty->print_cr("Reserving large pages individually.");
}
char * p_buf;
// first reserve enough address space in advance since we want to be
// able to break a single contiguous virtual address range into multiple
// large page commits but WS2003 does not allow reserving large page space
// so we just use 4K pages for reserve, this gives us a legal contiguous
// address space. then we will deallocate that reservation, and re alloc
// using large pages
const size_t size_of_reserve = bytes + _large_page_size;
if (bytes > size_of_reserve) {
// Overflowed.
warning("Individually allocated large pages failed, "
"use -XX:-UseLargePagesIndividualAllocation to turn off");
return NULL;
}
p_buf = (char *) VirtualAlloc(NULL,
size_of_reserve, // size of Reserve
MEM_RESERVE,
PAGE_EXECUTE_READWRITE);
// If reservation failed, return NULL
if (p_buf == NULL) return NULL;
release_memory(p_buf, bytes + _large_page_size);
// round up to page boundary. If the size_of_reserve did not
// overflow and the reservation did not fail, this align up
// should not overflow.
p_buf = (char *) align_size_up((size_t)p_buf, _large_page_size);
// now go through and allocate one page at a time until all bytes are
// allocated
size_t bytes_remaining = align_size_up(bytes, _large_page_size);
// An overflow of align_size_up() would have been caught above
// in the calculation of size_of_reserve.
char * next_alloc_addr = p_buf;
#ifdef ASSERT
// Variable for the failure injection
long ran_num = os::random();
size_t fail_after = ran_num % bytes;
#endif
while (bytes_remaining) {
size_t bytes_to_rq = MIN2(bytes_remaining, _large_page_size);
// Note allocate and commit
char * p_new;
#ifdef ASSERT
bool inject_error = LargePagesIndividualAllocationInjectError &&
(bytes_remaining <= fail_after);
#else
const bool inject_error = false;
#endif
if (inject_error) {
p_new = NULL;
} else {
p_new = (char *) VirtualAlloc(next_alloc_addr,
bytes_to_rq,
MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES,
PAGE_EXECUTE_READWRITE);
}
if (p_new == NULL) {
// Free any allocated pages
if (next_alloc_addr > p_buf) {
// Some memory was committed so release it.
size_t bytes_to_release = bytes - bytes_remaining;
release_memory(p_buf, bytes_to_release);
}
#ifdef ASSERT
if (UseLargePagesIndividualAllocation &&
LargePagesIndividualAllocationInjectError) {
if (TracePageSizes && Verbose) {
tty->print_cr("Reserving large pages individually failed.");
}
}
#endif
return NULL;
}
bytes_remaining -= bytes_to_rq;
next_alloc_addr += bytes_to_rq;
}
return p_buf;
} else {
// normal policy just allocate it all at once
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
char * res = (char *)VirtualAlloc(NULL,
bytes,
flag,
PAGE_EXECUTE_READWRITE);
return res;
}
}
bool os::release_memory_special(char* base, size_t bytes) {
@ -2655,12 +2756,12 @@ bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
bool os::guard_memory(char* addr, size_t bytes) {
DWORD old_status;
return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE | PAGE_GUARD, &old_status) != 0;
return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
}
bool os::unguard_memory(char* addr, size_t bytes) {
DWORD old_status;
return VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &old_status) != 0;
return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
}
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
@ -2972,6 +3073,7 @@ size_t os::win32::_default_stack_size = 0;
volatile intx os::win32::_os_thread_count = 0;
bool os::win32::_is_nt = false;
bool os::win32::_is_windows_2003 = false;
void os::win32::initialize_system_info() {
@ -2994,7 +3096,15 @@ void os::win32::initialize_system_info() {
GetVersionEx(&oi);
switch(oi.dwPlatformId) {
case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break;
case VER_PLATFORM_WIN32_NT: _is_nt = true; break;
case VER_PLATFORM_WIN32_NT:
_is_nt = true;
{
int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
if (os_vers == 5002) {
_is_windows_2003 = true;
}
}
break;
default: fatal("Unknown platform");
}
@ -3092,9 +3202,13 @@ void os::init(void) {
NoYieldsInMicrolock = true;
}
#endif
// This may be overridden later when argument processing is done.
FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
os::win32::is_windows_2003());
// Initialize main_process and main_thread
main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle
if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
&main_thread, THREAD_ALL_ACCESS, false, 0)) {
fatal("DuplicateHandle failed\n");
}
@ -3234,6 +3348,10 @@ jint os::init_2(void) {
// initialize thread priority policy
prio_init();
if (UseNUMA && !ForceNUMA) {
UseNUMA = false; // Currently unsupported.
}
return JNI_OK;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,7 @@ class win32 {
static julong _physical_memory;
static size_t _default_stack_size;
static bool _is_nt;
static bool _is_windows_2003;
public:
// Windows-specific interface:
@ -60,6 +61,9 @@ class win32 {
// Tells whether the platform is NT or Windown95
static bool is_nt() { return _is_nt; }
// Tells whether the platform is Windows 2003
static bool is_windows_2003() { return _is_windows_2003; }
// Returns the byte size of a virtual memory page
static int vm_page_size() { return _vm_page_size; }

View File

@ -1,5 +1,5 @@
//
// Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved.
// Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -103,16 +103,16 @@ encode %{
// This name is KNOWN by the ADLC and cannot be changed.
// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
// for this guy.
instruct tlsLoadP(eAXRegP dst, eFlagsReg cr) %{
instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
match(Set dst (ThreadLocal));
effect(DEF dst, KILL cr);
format %{ "MOV EAX, Thread::current()" %}
format %{ "MOV $dst, Thread::current()" %}
ins_encode( linux_tlsencode(dst) );
ins_pipe( ialu_reg_fat );
%}
instruct TLS(eAXRegP dst) %{
instruct TLS(eRegP dst) %{
match(Set dst (ThreadLocal));
expand %{

View File

@ -422,10 +422,11 @@ JVM_handle_linux_signal(int sig,
if (addr != last_addr &&
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
// Unguard and retry
// Set memory to RWX and retry
address page_start =
(address) align_size_down((intptr_t) addr, (intptr_t) page_size);
bool res = os::unguard_memory((char*) page_start, page_size);
bool res = os::protect_memory((char*) page_start, page_size,
os::MEM_PROT_RWX);
if (PrintMiscellaneous && Verbose) {
char buf[256];

View File

@ -203,10 +203,10 @@ frame os::get_sender_for_C_frame(frame* fr) {
return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
}
extern "C" intptr_t *_get_previous_fp(); // in .il file.
extern "C" intptr_t *_get_current_fp(); // in .il file
frame os::current_frame() {
intptr_t* fp = _get_previous_fp();
intptr_t* fp = _get_current_fp(); // it's inlined so want current fp
frame myframe((intptr_t*)os::current_stack_pointer(),
(intptr_t*)fp,
CAST_FROM_FN_PTR(address, os::current_frame));
@ -576,10 +576,11 @@ int JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, int abort_
if (addr != last_addr &&
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
// Unguard and retry
// Make memory rwx and retry
address page_start =
(address) align_size_down((intptr_t) addr, (intptr_t) page_size);
bool res = os::unguard_memory((char*) page_start, page_size);
bool res = os::protect_memory((char*) page_start, page_size,
os::MEM_PROT_RWX);
if (PrintMiscellaneous && Verbose) {
char buf[256];

View File

@ -110,16 +110,16 @@ encode %{
// This name is KNOWN by the ADLC and cannot be changed.
// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
// for this guy.
instruct tlsLoadP(eAXRegP dst, eFlagsReg cr) %{
instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
match(Set dst (ThreadLocal));
effect(DEF dst, KILL cr);
format %{ "MOV EAX, Thread::current()" %}
format %{ "MOV $dst, Thread::current()" %}
ins_encode( solaris_tlsencode(dst) );
ins_pipe( ialu_reg_fat );
%}
instruct TLS(eAXRegP dst) %{
instruct TLS(eRegP dst) %{
match(Set dst (ThreadLocal));
expand %{

View File

@ -37,10 +37,10 @@
movl %gs:0, %eax
.end
// Get callers fp
.inline _get_previous_fp,0
// Get current fp
.inline _get_current_fp,0
.volatile
movl %ebp, %eax
movl %eax, %eax
.end
// Support for jint Atomic::add(jint inc, volatile jint* dest)

View File

@ -30,10 +30,10 @@
movq %fs:0, %rax
.end
// Get the frame pointer from previous frame.
.inline _get_previous_fp,0
// Get the frame pointer from current frame.
.inline _get_current_fp,0
.volatile
movq %rbp, %rax
movq %rax, %rax
.end
// Support for jint Atomic::add(jint add_value, volatile jint* dest)

View File

@ -28,6 +28,7 @@ import com.sun.hotspot.igv.data.InputGraph;
import com.sun.hotspot.igv.data.services.InputGraphProvider;
import java.awt.BorderLayout;
import java.io.Serializable;
import javax.swing.SwingUtilities;
import org.openide.ErrorManager;
import org.openide.explorer.ExplorerManager;
import org.openide.explorer.ExplorerUtils;
@ -151,14 +152,18 @@ final class BytecodeViewTopComponent extends TopComponent implements ExplorerMan
}
public void resultChanged(LookupEvent lookupEvent) {
InputGraphProvider p = Lookup.getDefault().lookup(InputGraphProvider.class);
final InputGraphProvider p = Lookup.getDefault().lookup(InputGraphProvider.class);
if (p != null) {
SwingUtilities.invokeLater(new Runnable() {
public void run() {
InputGraph graph = p.getGraph();
if (graph != null) {
Group g = graph.getGroup();
rootNode.update(graph, g.getMethod());
}
}
});
}
}
final static class ResolvableHelper implements Serializable {

View File

@ -33,7 +33,7 @@ import java.awt.Point;
import java.awt.Rectangle;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.HashMap;
import java.util.Set;
import javax.swing.BorderFactory;
import org.netbeans.api.visual.action.ActionFactory;
@ -44,7 +44,6 @@ import org.netbeans.api.visual.action.SelectProvider;
import org.netbeans.api.visual.action.WidgetAction;
import org.netbeans.api.visual.anchor.AnchorFactory;
import org.netbeans.api.visual.anchor.AnchorShape;
import com.sun.hotspot.igv.controlflow.HierarchicalGraphLayout;
import org.netbeans.api.visual.layout.LayoutFactory;
import org.netbeans.api.visual.router.RouterFactory;
import org.netbeans.api.visual.widget.LayerWidget;
@ -61,8 +60,8 @@ import org.openide.util.Lookup;
*/
public class ControlFlowScene extends GraphScene<InputBlock, InputBlockEdge> implements SelectProvider, MoveProvider, RectangularSelectDecorator, RectangularSelectProvider {
private Set<BlockWidget> selection;
private Hashtable<InputBlock, BlockWidget> blockMap;
private HashSet<BlockWidget> selection;
private HashMap<InputBlock, BlockWidget> blockMap;
private InputGraph oldGraph;
private LayerWidget edgeLayer;
private LayerWidget mainLayer;

View File

@ -28,6 +28,7 @@ import com.sun.hotspot.igv.data.services.InputGraphProvider;
import java.awt.BorderLayout;
import java.io.Serializable;
import javax.swing.JScrollPane;
import javax.swing.SwingUtilities;
import org.openide.ErrorManager;
import org.openide.util.Lookup;
import org.openide.util.LookupEvent;
@ -143,13 +144,17 @@ final class ControlFlowTopComponent extends TopComponent implements LookupListen
public void resultChanged(LookupEvent lookupEvent) {
InputGraphProvider p = Lookup.getDefault().lookup(InputGraphProvider.class);
final InputGraphProvider p = Lookup.getDefault().lookup(InputGraphProvider.class);
if (p != null) {
SwingUtilities.invokeLater(new Runnable() {
public void run() {
InputGraph g = p.getGraph();
if (g != null) {
scene.setGraph(g);
}
}
});
}
}
@Override

View File

@ -24,6 +24,7 @@
package com.sun.hotspot.igv.coordinator;
import com.sun.hotspot.igv.coordinator.actions.RemoveCookie;
import com.sun.hotspot.igv.data.ChangedListener;
import com.sun.hotspot.igv.data.Group;
import com.sun.hotspot.igv.data.services.GroupOrganizer;
import com.sun.hotspot.igv.data.InputGraph;
@ -50,17 +51,24 @@ public class FolderNode extends AbstractNode {
private List<String> subFolders;
private FolderChildren children;
private static class FolderChildren extends Children.Keys {
private static class FolderChildren extends Children.Keys implements ChangedListener<Group> {
private FolderNode parent;
private List<Group> registeredGroups;
public void setParent(FolderNode parent) {
this.parent = parent;
this.registeredGroups = new ArrayList<Group>();
}
@Override
protected Node[] createNodes(Object arg0) {
for(Group g : registeredGroups) {
g.getChangedEvent().removeListener(this);
}
registeredGroups.clear();
Pair<String, List<Group>> p = (Pair<String, List<Group>>) arg0;
if (p.getLeft().length() == 0) {
@ -69,6 +77,8 @@ public class FolderNode extends AbstractNode {
for (InputGraph graph : g.getGraphs()) {
curNodes.add(new GraphNode(graph));
}
g.getChangedEvent().addListener(this);
registeredGroups.add(g);
}
Node[] result = new Node[curNodes.size()];
@ -85,7 +95,13 @@ public class FolderNode extends AbstractNode {
@Override
public void addNotify() {
this.setKeys(parent.structure);
}
public void changed(Group source) {
List<Pair<String, List<Group>>> newStructure = new ArrayList<Pair<String, List<Group>>>();
for(Pair<String, List<Group>> p : parent.structure) {
refreshKey(p);
}
}
}

View File

@ -31,7 +31,7 @@ import java.util.List;
*
* @author Thomas Wuerthinger
*/
public class GraphDocument extends Properties.Object implements ChangedEventProvider<GraphDocument> {
public class GraphDocument extends Properties.Entity implements ChangedEventProvider<GraphDocument> {
private List<Group> groups;
private ChangedEvent<GraphDocument> changedEvent;

View File

@ -37,7 +37,7 @@ import java.util.Set;
*
* @author Thomas Wuerthinger
*/
public class Group extends Properties.Object implements ChangedEventProvider<Group> {
public class Group extends Properties.Entity implements ChangedEventProvider<Group> {
private List<InputGraph> graphs;
private transient ChangedEvent<Group> changedEvent;

View File

@ -23,26 +23,25 @@
*/
package com.sun.hotspot.igv.data;
import com.sun.hotspot.igv.data.Properties;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
*
* @author Thomas Wuerthinger
*/
public class InputGraph extends Properties.Object {
public class InputGraph extends Properties.Entity {
private Map<Integer, InputNode> nodes;
private Set<InputEdge> edges;
private HashMap<Integer, InputNode> nodes;
private ArrayList<InputEdge> edges;
private Group parent;
private Map<String, InputBlock> blocks;
private Map<Integer, InputBlock> nodeToBlock;
private HashMap<String, InputBlock> blocks;
private HashMap<Integer, InputBlock> nodeToBlock;
private boolean isDifferenceGraph;
public InputGraph(Group parent) {
@ -61,10 +60,10 @@ public class InputGraph extends Properties.Object {
public InputGraph(Group parent, InputGraph last, String name) {
this.parent = parent;
setName(name);
nodes = new Hashtable<Integer, InputNode>();
edges = new HashSet<InputEdge>();
blocks = new Hashtable<String, InputBlock>();
nodeToBlock = new Hashtable<Integer, InputBlock>();
nodes = new HashMap<Integer, InputNode>();
edges = new ArrayList<InputEdge>();
blocks = new HashMap<String, InputBlock>();
nodeToBlock = new HashMap<Integer, InputBlock>();
if (last != null) {
for (InputNode n : last.getNodes()) {
@ -182,8 +181,8 @@ public class InputGraph extends Properties.Object {
return nodes.remove(index);
}
public Set<InputEdge> getEdges() {
return Collections.unmodifiableSet(edges);
public Collection<InputEdge> getEdges() {
return Collections.unmodifiableList(edges);
}
public void removeEdge(InputEdge c) {

View File

@ -32,7 +32,7 @@ import java.util.List;
*
* @author Thomas Wuerthinger
*/
public class InputMethod extends Properties.Object {
public class InputMethod extends Properties.Entity {
private String name;
private int bci;

View File

@ -27,7 +27,7 @@ package com.sun.hotspot.igv.data;
*
* @author Thomas Wuerthinger
*/
public class InputNode extends Properties.Object {
public class InputNode extends Properties.Entity {
private int id;

View File

@ -26,24 +26,22 @@ package com.sun.hotspot.igv.data;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
*
* @author Thomas Wuerthinger
*/
public class Properties implements Serializable {
public class Properties implements Serializable, Iterable<Property> {
public static final long serialVersionUID = 1L;
private Map<String, Property> map;
private String[] map = new String[4];
public Properties() {
map = new HashMap<String, Property>(5);
}
@Override
@ -54,10 +52,7 @@ public class Properties implements Serializable {
Properties p = (Properties) o;
if (getProperties().size() != p.getProperties().size()) {
return false;
}
for (Property prop : getProperties()) {
for (Property prop : this) {
String value = p.get(prop.getName());
if (value == null || !value.equals(prop.getValue())) {
return false;
@ -75,32 +70,33 @@ public class Properties implements Serializable {
public Properties(String name, String value) {
this();
this.add(new Property(name, value));
this.setProperty(name, value);
}
public Properties(String name, String value, String name1, String value1) {
this(name, value);
this.add(new Property(name1, value1));
this.setProperty(name1, value1);
}
public Properties(String name, String value, String name1, String value1, String name2, String value2) {
this(name, value, name1, value1);
this.add(new Property(name2, value2));
this.setProperty(name2, value2);
}
public Properties(Properties p) {
map = new HashMap<String, Property>(p.map);
map = new String[p.map.length];
System.arraycopy(map, 0, p.map, 0, p.map.length);
}
public static class Object implements Provider {
public static class Entity implements Provider {
private Properties properties;
public Object() {
public Entity() {
properties = new Properties();
}
public Object(Properties.Object object) {
public Entity(Properties.Entity object) {
properties = new Properties(object.getProperties());
}
@ -109,6 +105,14 @@ public class Properties implements Serializable {
}
}
private String getProperty(String key) {
for (int i = 0; i < map.length; i += 2)
if (map[i] != null && map[i].equals(key)) {
return map[i + 1];
}
return null;
}
public interface PropertyMatcher {
String getName();
@ -173,13 +177,15 @@ public class Properties implements Serializable {
}
public Property selectSingle(PropertyMatcher matcher) {
Property p = this.map.get(matcher.getName());
if (p == null) {
return null;
String value = null;
for (int i = 0; i < map.length; i += 2) {
if (map[i] != null && matcher.getName().equals(map[i])) {
value = map[i + 1];
break;
}
}
if (matcher.match(p.getValue())) {
return p;
if (value != null && matcher.match(value)) {
return new Property(matcher.getName(), value);
} else {
return null;
}
@ -194,8 +200,11 @@ public class Properties implements Serializable {
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("[");
for (Property p : map.values()) {
sb.append(p.toString());
for (int i = 0; i < map.length; i += 2) {
if (map[i + 1] != null) {
String p = map[i + 1];
sb.append(map[i] + " = " + map[i + 1] + "; ");
}
}
return sb.append("]").toString();
}
@ -241,41 +250,51 @@ public class Properties implements Serializable {
}
public String get(String key) {
Property p = map.get(key);
if (p == null) {
return null;
} else {
return p.getValue();
}
}
public String getProperty(String string) {
return get(string);
}
public Property setProperty(String name, String value) {
if (value == null) {
// remove this property
return map.remove(name);
} else {
Property p = map.get(name);
if (p == null) {
p = new Property(name, value);
map.put(name, p);
} else {
p.setValue(value);
for (int i = 0; i < map.length; i += 2) {
if (map[i] != null && map[i].equals(key)) {
return map[i + 1];
}
return p;
}
return null;
}
public Collection<Property> getProperties() {
return Collections.unmodifiableCollection(map.values());
public void setProperty(String name, String value) {
for (int i = 0; i < map.length; i += 2) {
if (map[i] != null && map[i].equals(name)) {
String p = map[i + 1];
if (value == null) {
// remove this property
map[i] = null;
map[i + 1] = null;
} else {
map[i + 1] = value;
}
return;
}
}
if (value == null) {
return;
}
for (int i = 0; i < map.length; i += 2) {
if (map[i] == null) {
map[i] = name;
map[i + 1] = value;
return;
}
}
String[] newMap = new String[map.length + 4];
System.arraycopy(map, 0, newMap, 0, map.length);
newMap[map.length] = name;
newMap[map.length + 1] = value;
map = newMap;
}
public Iterator<Property> getProperties() {
return iterator();
}
public void add(Properties properties) {
for (Property p : properties.getProperties()) {
for (Property p : properties) {
add(p);
}
}
@ -283,6 +302,35 @@ public class Properties implements Serializable {
public void add(Property property) {
assert property.getName() != null;
assert property.getValue() != null;
map.put(property.getName(), property);
setProperty(property.getName(), property.getValue());
}
class PropertiesIterator implements Iterator<Property>, Iterable<Property> {
public Iterator<Property> iterator() {
return this;
}
int index;
public boolean hasNext() {
while (index < map.length && map[index + 1] == null)
index += 2;
return index < map.length;
}
public Property next() {
if (index < map.length) {
index += 2;
return new Property(map[index - 2], map[index - 1]);
}
return null;
}
public void remove() {
throw new UnsupportedOperationException("Not supported yet.");
}
}
public Iterator<Property> iterator() {
return new PropertiesIterator();
}
}

View File

@ -32,18 +32,19 @@ import java.io.Serializable;
public class Property implements Serializable {
public static final long serialVersionUID = 1L;
private String name;
private String value;
public Property() {
private Property() {
this(null, null);
}
public Property(Property p) {
private Property(Property p) {
this(p.getName(), p.getValue());
}
public Property(String name) {
private Property(String name) {
this(name, null);
}
@ -60,16 +61,19 @@ public class Property implements Serializable {
return value;
}
public void setName(String s) {
this.name = s;
}
public void setValue(String s) {
this.value = s;
}
@Override
public String toString() {
return name + " = " + value + "; ";
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Property)) return false;
Property p2 = (Property)o;
return name.equals(p2.name) && value.equals(p2.value);
}
@Override
public int hashCode() {
return name.hashCode() + value == null ? 0 : value.hashCode();
}
}

View File

@ -38,6 +38,7 @@ import com.sun.hotspot.igv.data.serialization.XMLParser.HandoverElementHandler;
import com.sun.hotspot.igv.data.serialization.XMLParser.ParseMonitor;
import com.sun.hotspot.igv.data.serialization.XMLParser.TopElementHandler;
import java.io.IOException;
import java.util.HashMap;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.XMLReader;
@ -88,6 +89,18 @@ public class Parser {
private TopElementHandler xmlDocument = new TopElementHandler();
private boolean difference;
private GroupCallback groupCallback;
private HashMap<String, Integer> idCache = new HashMap<String, Integer>();
private int maxId = 0;
private int lookupID(String i) {
Integer id = idCache.get(i);
if (id == null) {
id = maxId++;
idCache.put(i, id);
}
return id.intValue();
}
// <graphDocument>
private ElementHandler<GraphDocument, Object> topHandler = new ElementHandler<GraphDocument, Object>(TOP_ELEMENT) {
@ -187,13 +200,13 @@ public class Parser {
previous = null;
}
InputGraph curGraph = new InputGraph(getParentObject(), previous, name);
getParentObject().addGraph(curGraph);
this.graph = curGraph;
return curGraph;
}
@Override
protected void end(String text) throws SAXException {
getParentObject().addGraph(graph);
graph.resolveBlockLinks();
}
};
@ -207,7 +220,7 @@ public class Parser {
@Override
protected InputBlock start() throws SAXException {
InputGraph graph = getParentObject();
String name = readRequiredAttribute(BLOCK_NAME_PROPERTY);
String name = readRequiredAttribute(BLOCK_NAME_PROPERTY).intern();
InputBlock b = new InputBlock(getParentObject(), name);
graph.addBlock(b);
return b;
@ -224,7 +237,7 @@ public class Parser {
int id = 0;
try {
id = Integer.parseInt(s);
id = lookupID(s);
} catch (NumberFormatException e) {
throw new SAXException(e);
}
@ -252,7 +265,7 @@ public class Parser {
String s = readRequiredAttribute(NODE_ID_PROPERTY);
int id = 0;
try {
id = Integer.parseInt(s);
id = lookupID(s);
} catch (NumberFormatException e) {
throw new SAXException(e);
}
@ -269,7 +282,7 @@ public class Parser {
String s = readRequiredAttribute(NODE_ID_PROPERTY);
int id = 0;
try {
id = Integer.parseInt(s);
id = lookupID(s);
} catch (NumberFormatException e) {
throw new SAXException(e);
}
@ -280,7 +293,7 @@ public class Parser {
private HandoverElementHandler<InputGraph> edgesHandler = new HandoverElementHandler<InputGraph>(EDGES_ELEMENT);
// Local class for edge elements
private static class EdgeElementHandler extends ElementHandler<InputEdge, InputGraph> {
private class EdgeElementHandler extends ElementHandler<InputEdge, InputGraph> {
public EdgeElementHandler(String name) {
super(name);
@ -298,8 +311,8 @@ public class Parser {
toIndex = Integer.parseInt(toIndexString);
}
from = Integer.parseInt(readRequiredAttribute(FROM_PROPERTY));
to = Integer.parseInt(readRequiredAttribute(TO_PROPERTY));
from = lookupID(readRequiredAttribute(FROM_PROPERTY));
to = lookupID(readRequiredAttribute(TO_PROPERTY));
} catch (NumberFormatException e) {
throw new SAXException(e);
}
@ -344,18 +357,16 @@ public class Parser {
}
};
// <property>
private ElementHandler<Property, Properties.Provider> propertyHandler = new XMLParser.ElementHandler<Property, Properties.Provider>(PROPERTY_ELEMENT, true) {
private ElementHandler<String, Properties.Provider> propertyHandler = new XMLParser.ElementHandler<String, Properties.Provider>(PROPERTY_ELEMENT, true) {
@Override
public Property start() throws SAXException {
String value = "";
String name = readRequiredAttribute(PROPERTY_NAME_PROPERTY).intern();
return getParentObject().getProperties().setProperty(name, value);
public String start() throws SAXException {
return readRequiredAttribute(PROPERTY_NAME_PROPERTY).intern();
}
@Override
public void end(String text) {
getObject().setValue(text.trim().intern());
getParentObject().getProperties().setProperty(getObject(), text.trim().intern());
}
};

View File

@ -67,7 +67,7 @@ public class Printer {
private void export(XMLWriter writer, Group g) throws IOException {
Properties attributes = new Properties();
attributes.add(new Property("difference", Boolean.toString(true)));
attributes.setProperty("difference", Boolean.toString(true));
writer.startTag(Parser.GROUP_ELEMENT, attributes);
writer.writeProperties(g.getProperties());

Some files were not shown because too many files have changed in this diff Show More