Merge
This commit is contained in:
commit
e585259ea2
2
.hgtags
2
.hgtags
@ -225,3 +225,5 @@ ea73f01b9053e7165e7ba80f242bafecbc6af712 jdk8-b96
|
|||||||
edb01c460d4cab21ff0ff13512df7b746efaa0e7 jdk8-b101
|
edb01c460d4cab21ff0ff13512df7b746efaa0e7 jdk8-b101
|
||||||
bbe43d712fe08e650808d774861b256ccb34e500 jdk8-b102
|
bbe43d712fe08e650808d774861b256ccb34e500 jdk8-b102
|
||||||
30a1d677a20c6a95f98043d8f20ce570304e3818 jdk8-b103
|
30a1d677a20c6a95f98043d8f20ce570304e3818 jdk8-b103
|
||||||
|
b5ed503c26ad38869c247c5e32debec217fd056b jdk8-b104
|
||||||
|
589f4fdc584e373a47cde0162e9eceec9165c381 jdk8-b105
|
||||||
|
@ -226,3 +226,5 @@ d2dcb110e9dbaf9903c05b211df800e78e4b394e jdk8-b100
|
|||||||
5eb3c1dc348f72a7f84f7d9d07834e8bbe09a799 jdk8-b102
|
5eb3c1dc348f72a7f84f7d9d07834e8bbe09a799 jdk8-b102
|
||||||
b7e64be81c8a7690703df5711f4fc2375da8a9cb jdk8-b103
|
b7e64be81c8a7690703df5711f4fc2375da8a9cb jdk8-b103
|
||||||
96c1b9b7524b52c3fcefc90ffad4c767396727c8 jdk8-b104
|
96c1b9b7524b52c3fcefc90ffad4c767396727c8 jdk8-b104
|
||||||
|
5166118c59178b5d31001bc4058e92486ee07d9b jdk8-b105
|
||||||
|
8e7b4d9fb00fdf1334376aeac050c9bca6d1b383 jdk8-b106
|
||||||
|
@ -69,11 +69,11 @@ else
|
|||||||
# Run the makefile with an arbitraty SPEC using -p -q (quiet dry-run and dump rules) to find
|
# Run the makefile with an arbitraty SPEC using -p -q (quiet dry-run and dump rules) to find
|
||||||
# available PHONY targets. Use this list as valid targets to pass on to the repeated calls.
|
# available PHONY targets. Use this list as valid targets to pass on to the repeated calls.
|
||||||
all_phony_targets=$(filter-out $(global_targets) bundles-only, $(strip $(shell \
|
all_phony_targets=$(filter-out $(global_targets) bundles-only, $(strip $(shell \
|
||||||
$(MAKE) -p -q -f common/makefiles/Main.gmk SPEC=$(firstword $(SPEC)) | \
|
$(MAKE) -p -q -f common/makefiles/Main.gmk FRC SPEC=$(firstword $(SPEC)) | \
|
||||||
grep ^.PHONY: | head -n 1 | cut -d " " -f 2-)))
|
grep ^.PHONY: | head -n 1 | cut -d " " -f 2-)))
|
||||||
|
|
||||||
$(all_phony_targets):
|
$(all_phony_targets):
|
||||||
$(foreach spec,$(SPEC),($(MAKE) -f NewMakefile.gmk SPEC=$(spec) \
|
@$(foreach spec,$(SPEC),($(MAKE) -f NewMakefile.gmk SPEC=$(spec) \
|
||||||
$(VERBOSE) VERBOSE=$(VERBOSE) LOG_LEVEL=$(LOG_LEVEL) $@) &&) true
|
$(VERBOSE) VERBOSE=$(VERBOSE) LOG_LEVEL=$(LOG_LEVEL) $@) &&) true
|
||||||
|
|
||||||
.PHONY: $(all_phony_targets)
|
.PHONY: $(all_phony_targets)
|
||||||
|
@ -154,7 +154,7 @@
|
|||||||
</code>
|
</code>
|
||||||
</blockquote>
|
</blockquote>
|
||||||
Once you have all the repositories, keep in mind that each
|
Once you have all the repositories, keep in mind that each
|
||||||
repository is it's own independent repository.
|
repository is its own independent repository.
|
||||||
You can also re-run <code>./get_source.sh</code> anytime to
|
You can also re-run <code>./get_source.sh</code> anytime to
|
||||||
pull over all the latest changesets in all the repositories.
|
pull over all the latest changesets in all the repositories.
|
||||||
This set of nested repositories has been given the term
|
This set of nested repositories has been given the term
|
||||||
@ -241,6 +241,14 @@
|
|||||||
source code for the OpenJDK Corba functionality
|
source code for the OpenJDK Corba functionality
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
nashorn
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
source code for the OpenJDK JavaScript implementation
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
</blockquote>
|
</blockquote>
|
||||||
@ -386,7 +394,7 @@
|
|||||||
<code>--with-boot-jdk</code>.
|
<code>--with-boot-jdk</code>.
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
Insure that GNU make, the Bootstrap JDK,
|
Ensure that GNU make, the Bootstrap JDK,
|
||||||
and the compilers are all
|
and the compilers are all
|
||||||
in your PATH environment variable
|
in your PATH environment variable
|
||||||
</li>
|
</li>
|
||||||
@ -1202,19 +1210,18 @@
|
|||||||
<blockquote>
|
<blockquote>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
<b>Q:</b> The <code>configure</code> file looks horrible!
|
<b>Q:</b> The <code>generated-configure.sh</code> file looks horrible!
|
||||||
How are you going to edit it?
|
How are you going to edit it?
|
||||||
<br>
|
<br>
|
||||||
<b>A:</b> The <code>configure</code> file is generated (think
|
<b>A:</b> The <code>generated-configure.sh</code> file is generated (think
|
||||||
"compiled") by the autoconf tools. The source code is
|
"compiled") by the autoconf tools. The source code is
|
||||||
in <code>configure.ac</code> various .m4 files in common/autoconf,
|
in <code>configure.ac</code> and various .m4 files in common/autoconf,
|
||||||
which are
|
which are much more readable.
|
||||||
much more readable.
|
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
<b>Q:</b>
|
<b>Q:</b>
|
||||||
Why is the <code>configure</code> file checked in,
|
Why is the <code>generated-configure.sh</code> file checked in,
|
||||||
if it is generated?
|
if it is generated?
|
||||||
<br>
|
<br>
|
||||||
<b>A:</b>
|
<b>A:</b>
|
||||||
@ -1229,13 +1236,29 @@
|
|||||||
<p>
|
<p>
|
||||||
<b>Q:</b>
|
<b>Q:</b>
|
||||||
Do you require a specific version of autoconf for regenerating
|
Do you require a specific version of autoconf for regenerating
|
||||||
<code>configure</code>?
|
<code>generated-configure.sh</code>?
|
||||||
<br>
|
<br>
|
||||||
<b>A:</b>
|
<b>A:</b>
|
||||||
Currently, no, but this will likely be the case when things have
|
Yes, version 2.69 is required and should be easy
|
||||||
settled down a bit more. (The reason for this is to avoid
|
enough to aquire on all supported operating
|
||||||
large spurious changes in <code>configure</code>
|
systems. The reason for this is to avoid
|
||||||
in commits that made small changes to <code>configure.ac</code>).
|
large spurious changes in <code>generated-configure.sh</code>.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
<b>Q:</b>
|
||||||
|
How do you regenerate <code>generated-configure.sh</code>
|
||||||
|
after making changes to the input files?
|
||||||
|
<br>
|
||||||
|
<b>A:</b>
|
||||||
|
Regnerating <code>generated-configure.sh</code>
|
||||||
|
should always be done using the
|
||||||
|
script <code>common/autoconf/autogen.sh</code> to
|
||||||
|
ensure that the correct files get updated. This
|
||||||
|
script should also be run after mercurial tries to
|
||||||
|
merge <code>generated-configure.sh</code> as a
|
||||||
|
merge of the generated file is not guaranteed to
|
||||||
|
be correct.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
@ -1307,9 +1330,9 @@
|
|||||||
you will need to modify the makefiles. But for normal file
|
you will need to modify the makefiles. But for normal file
|
||||||
additions or removals, no changes are needed. There are certan
|
additions or removals, no changes are needed. There are certan
|
||||||
exceptions for some native libraries where the source files are spread
|
exceptions for some native libraries where the source files are spread
|
||||||
over many directories which also contain courses for other
|
over many directories which also contain sources for other
|
||||||
libraries. In these cases it was simply easier to create include lists
|
libraries. In these cases it was simply easier to create include lists
|
||||||
rather thane excludes.
|
rather than excludes.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
@ -1327,14 +1350,14 @@
|
|||||||
<p>
|
<p>
|
||||||
<b>Q:</b>
|
<b>Q:</b>
|
||||||
<code>configure</code> provides OpenJDK-specific features such as
|
<code>configure</code> provides OpenJDK-specific features such as
|
||||||
<code>--enable-jigsaw</code> or <code>--with-builddeps-server</code>
|
<code>--with-builddeps-server</code> that are not
|
||||||
that are not described in this document. What about those?
|
described in this document. What about those?
|
||||||
<br>
|
<br>
|
||||||
<b>A:</b>
|
<b>A:</b>
|
||||||
Try them out if you like! But be aware that most of these are
|
Try them out if you like! But be aware that most of these are
|
||||||
experimental features.
|
experimental features.
|
||||||
Many of them don't do anything at all at the moment; the option
|
Many of them don't do anything at all at the moment; the option
|
||||||
is just a placeholder. Other depends on
|
is just a placeholder. Others depend on
|
||||||
pieces of code or infrastructure that is currently
|
pieces of code or infrastructure that is currently
|
||||||
not ready for prime time.
|
not ready for prime time.
|
||||||
</p>
|
</p>
|
||||||
@ -1385,24 +1408,6 @@
|
|||||||
system and some will need to wait until after.
|
system and some will need to wait until after.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p>
|
|
||||||
<b>Q:</b> What is @GenerateNativeHeaders?
|
|
||||||
<br>
|
|
||||||
<b>A:</b>
|
|
||||||
To speed up compilation, we added a flag to javac which makes it
|
|
||||||
do the job of javah as well, as a by-product; that is, generating
|
|
||||||
native .h header files. These files are only generated
|
|
||||||
if a class contains native methods. However, sometimes
|
|
||||||
a class contains no native method,
|
|
||||||
but still contains constants that native code needs to use.
|
|
||||||
The new GenerateNativeHeaders annotation tells javac to
|
|
||||||
force generation of a
|
|
||||||
header file in these cases. (We don't want to generate
|
|
||||||
native headers for all classes that contains constants
|
|
||||||
but no native methods, since
|
|
||||||
that would slow down the compilation process needlessly.)
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
<b>Q:</b>
|
<b>Q:</b>
|
||||||
Is anything able to use the results of the new build's default make target?
|
Is anything able to use the results of the new build's default make target?
|
||||||
@ -1429,10 +1434,9 @@
|
|||||||
What should I do?
|
What should I do?
|
||||||
<br>
|
<br>
|
||||||
<b>A:</b>
|
<b>A:</b>
|
||||||
It might very well be that we have missed to add support for
|
It might very well be that we have neglected to add support for
|
||||||
an option that was actually used from outside the build system.
|
an option that was actually used from outside the build system.
|
||||||
Email us and we will
|
Email us and we will add support for it!
|
||||||
add support for it!
|
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
</blockquote>
|
</blockquote>
|
||||||
|
@ -44,10 +44,8 @@ fi
|
|||||||
custom_hook=$custom_script_dir/custom-hook.m4
|
custom_hook=$custom_script_dir/custom-hook.m4
|
||||||
|
|
||||||
AUTOCONF="`which autoconf 2> /dev/null | grep -v '^no autoconf in'`"
|
AUTOCONF="`which autoconf 2> /dev/null | grep -v '^no autoconf in'`"
|
||||||
AUTOCONF_267="`which autoconf-2.67 2> /dev/null | grep -v '^no autoconf-2.67 in'`"
|
|
||||||
|
|
||||||
echo "Autoconf found: ${AUTOCONF}"
|
echo "Autoconf found: ${AUTOCONF}"
|
||||||
echo "Autoconf-2.67 found: ${AUTOCONF_267}"
|
|
||||||
|
|
||||||
if test "x${AUTOCONF}" = x; then
|
if test "x${AUTOCONF}" = x; then
|
||||||
echo You need autoconf installed to be able to regenerate the configure script
|
echo You need autoconf installed to be able to regenerate the configure script
|
||||||
@ -55,10 +53,6 @@ if test "x${AUTOCONF}" = x; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test "x${AUTOCONF_267}" != x; then
|
|
||||||
AUTOCONF=${AUTOCONF_267};
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo Generating generated-configure.sh with ${AUTOCONF}
|
echo Generating generated-configure.sh with ${AUTOCONF}
|
||||||
cat $script_dir/configure.ac | sed -e "s|@DATE_WHEN_GENERATED@|$TIMESTAMP|" | ${AUTOCONF} -W all -I$script_dir - > $script_dir/generated-configure.sh
|
cat $script_dir/configure.ac | sed -e "s|@DATE_WHEN_GENERATED@|$TIMESTAMP|" | ${AUTOCONF} -W all -I$script_dir - > $script_dir/generated-configure.sh
|
||||||
rm -rf autom4te.cache
|
rm -rf autom4te.cache
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
|
|
||||||
AC_PREREQ([2.61])
|
AC_PREREQ([2.69])
|
||||||
AC_INIT(OpenJDK, jdk8, build-dev@openjdk.java.net,,http://openjdk.java.net)
|
AC_INIT(OpenJDK, jdk8, build-dev@openjdk.java.net,,http://openjdk.java.net)
|
||||||
|
|
||||||
AC_CONFIG_AUX_DIR([build-aux])
|
AC_CONFIG_AUX_DIR([build-aux])
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -83,7 +83,7 @@ apt_help() {
|
|||||||
pulse)
|
pulse)
|
||||||
PKGHANDLER_COMMAND="sudo apt-get install libpulse-dev" ;;
|
PKGHANDLER_COMMAND="sudo apt-get install libpulse-dev" ;;
|
||||||
x11)
|
x11)
|
||||||
PKGHANDLER_COMMAND="sudo apt-get install libX11-dev libxext-dev libxrender-dev libxtst-dev" ;;
|
PKGHANDLER_COMMAND="sudo apt-get install libX11-dev libxext-dev libxrender-dev libxtst-dev libxt-dev" ;;
|
||||||
ccache)
|
ccache)
|
||||||
PKGHANDLER_COMMAND="sudo apt-get install ccache" ;;
|
PKGHANDLER_COMMAND="sudo apt-get install ccache" ;;
|
||||||
* )
|
* )
|
||||||
@ -102,11 +102,11 @@ yum_help() {
|
|||||||
cups)
|
cups)
|
||||||
PKGHANDLER_COMMAND="sudo yum install cups-devel" ;;
|
PKGHANDLER_COMMAND="sudo yum install cups-devel" ;;
|
||||||
freetype2)
|
freetype2)
|
||||||
PKGHANDLER_COMMAND="sudo yum install freetype2-devel" ;;
|
PKGHANDLER_COMMAND="sudo yum install freetype-devel" ;;
|
||||||
pulse)
|
pulse)
|
||||||
PKGHANDLER_COMMAND="sudo yum install pulseaudio-libs-devel" ;;
|
PKGHANDLER_COMMAND="sudo yum install pulseaudio-libs-devel" ;;
|
||||||
x11)
|
x11)
|
||||||
PKGHANDLER_COMMAND="sudo yum install libXtst-devel" ;;
|
PKGHANDLER_COMMAND="sudo yum install libXtst-devel libXt-devel libXrender-devel" ;;
|
||||||
ccache)
|
ccache)
|
||||||
PKGHANDLER_COMMAND="sudo yum install ccache" ;;
|
PKGHANDLER_COMMAND="sudo yum install ccache" ;;
|
||||||
* )
|
* )
|
||||||
|
@ -185,7 +185,7 @@ OLD_CFLAGS="$CFLAGS"
|
|||||||
CFLAGS="$CFLAGS $X_CFLAGS"
|
CFLAGS="$CFLAGS $X_CFLAGS"
|
||||||
|
|
||||||
# Need to include Xlib.h and Xutil.h to avoid "present but cannot be compiled" warnings on Solaris 10
|
# Need to include Xlib.h and Xutil.h to avoid "present but cannot be compiled" warnings on Solaris 10
|
||||||
AC_CHECK_HEADERS([X11/extensions/shape.h X11/extensions/Xrender.h X11/extensions/XTest.h],
|
AC_CHECK_HEADERS([X11/extensions/shape.h X11/extensions/Xrender.h X11/extensions/XTest.h X11/Intrinsic.h],
|
||||||
[X11_A_OK=yes],
|
[X11_A_OK=yes],
|
||||||
[X11_A_OK=no; break],
|
[X11_A_OK=no; break],
|
||||||
[ # include <X11/Xlib.h>
|
[ # include <X11/Xlib.h>
|
||||||
@ -197,7 +197,7 @@ AC_LANG_POP(C)
|
|||||||
|
|
||||||
if test "x$X11_A_OK" = xno && test "x$X11_NOT_NEEDED" != xyes; then
|
if test "x$X11_A_OK" = xno && test "x$X11_NOT_NEEDED" != xyes; then
|
||||||
HELP_MSG_MISSING_DEPENDENCY([x11])
|
HELP_MSG_MISSING_DEPENDENCY([x11])
|
||||||
AC_MSG_ERROR([Could not find all X11 headers (shape.h Xrender.h XTest.h). $HELP_MSG])
|
AC_MSG_ERROR([Could not find all X11 headers (shape.h Xrender.h XTest.h Intrinsic.h). $HELP_MSG])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
AC_SUBST(X_CFLAGS)
|
AC_SUBST(X_CFLAGS)
|
||||||
|
@ -242,7 +242,7 @@ clean-test:
|
|||||||
|
|
||||||
.PHONY: langtools corba jaxp jaxws hotspot jdk nashorn images overlay-images install test docs
|
.PHONY: langtools corba jaxp jaxws hotspot jdk nashorn images overlay-images install test docs
|
||||||
.PHONY: langtools-only corba-only jaxp-only jaxws-only hotspot-only jdk-only nashorn-only images-only overlay-images-only install-only test-only docs-only
|
.PHONY: langtools-only corba-only jaxp-only jaxws-only hotspot-only jdk-only nashorn-only images-only overlay-images-only install-only test-only docs-only
|
||||||
.PHONY: all clean dist-clean bootcycle-images start-make
|
.PHONY: default all clean dist-clean bootcycle-images start-make
|
||||||
.PHONY: clean-langtools clean-corba clean-jaxp clean-jaxws clean-hotspot clean-jdk clean-nashorn clean-images clean-docs clean-test clean-overlay-images clean-bootcycle-build
|
.PHONY: clean-langtools clean-corba clean-jaxp clean-jaxws clean-hotspot clean-jdk clean-nashorn clean-images clean-docs clean-test clean-overlay-images clean-bootcycle-build
|
||||||
.PHONY: profiles profiles-only profiles-oscheck
|
.PHONY: profiles profiles-only profiles-oscheck
|
||||||
|
|
||||||
|
@ -225,3 +225,5 @@ c8286839d0df04aba819ec4bef12b86babccf30e jdk8-b90
|
|||||||
a013024b07475782f1fa8e196e950b34b4077663 jdk8-b101
|
a013024b07475782f1fa8e196e950b34b4077663 jdk8-b101
|
||||||
528c7e76eaeee022817ee085668459bc97cf5665 jdk8-b102
|
528c7e76eaeee022817ee085668459bc97cf5665 jdk8-b102
|
||||||
49c4a777fdfd648d4c3fffc940fdb97a23108ca8 jdk8-b103
|
49c4a777fdfd648d4c3fffc940fdb97a23108ca8 jdk8-b103
|
||||||
|
d411c60a8c2fe8fdc572af907775e90f7eefd513 jdk8-b104
|
||||||
|
4e38de7c767e34104fa147b5b346d9fe6b731279 jdk8-b105
|
||||||
|
@ -369,3 +369,7 @@ c4697c1c448416108743b59118b4a2498b339d0c jdk8-b102
|
|||||||
7f55137d6aa81efc6eb0035813709f2cb6a26b8b hs25-b45
|
7f55137d6aa81efc6eb0035813709f2cb6a26b8b hs25-b45
|
||||||
6f9be7f87b9653e94fd8fb3070891a0cc91b15bf jdk8-b103
|
6f9be7f87b9653e94fd8fb3070891a0cc91b15bf jdk8-b103
|
||||||
580430d131ccd475e2f2ad4006531b8c4813d102 hs25-b46
|
580430d131ccd475e2f2ad4006531b8c4813d102 hs25-b46
|
||||||
|
104743074675359cfbf7f4dcd9ab2a5974a16627 jdk8-b104
|
||||||
|
c1604d5885a6f2adc0bcea2fa142a8f6bafad2f0 hs25-b47
|
||||||
|
acac3bde66b2c22791c257a8d99611d6d08c6713 jdk8-b105
|
||||||
|
18b4798adbc42c6fa16f5ecb7d5cd3ca130754bf hs25-b48
|
||||||
|
@ -75,19 +75,19 @@ public class InstanceKlass extends Klass {
|
|||||||
javaFieldsCount = new CIntField(type.getCIntegerField("_java_fields_count"), 0);
|
javaFieldsCount = new CIntField(type.getCIntegerField("_java_fields_count"), 0);
|
||||||
constants = new MetadataField(type.getAddressField("_constants"), 0);
|
constants = new MetadataField(type.getAddressField("_constants"), 0);
|
||||||
classLoaderData = type.getAddressField("_class_loader_data");
|
classLoaderData = type.getAddressField("_class_loader_data");
|
||||||
sourceFileName = type.getAddressField("_source_file_name");
|
|
||||||
sourceDebugExtension = type.getAddressField("_source_debug_extension");
|
sourceDebugExtension = type.getAddressField("_source_debug_extension");
|
||||||
innerClasses = type.getAddressField("_inner_classes");
|
innerClasses = type.getAddressField("_inner_classes");
|
||||||
|
sourceFileNameIndex = new CIntField(type.getCIntegerField("_source_file_name_index"), 0);
|
||||||
nonstaticFieldSize = new CIntField(type.getCIntegerField("_nonstatic_field_size"), 0);
|
nonstaticFieldSize = new CIntField(type.getCIntegerField("_nonstatic_field_size"), 0);
|
||||||
staticFieldSize = new CIntField(type.getCIntegerField("_static_field_size"), 0);
|
staticFieldSize = new CIntField(type.getCIntegerField("_static_field_size"), 0);
|
||||||
staticOopFieldCount = new CIntField(type.getCIntegerField("_static_oop_field_count"), 0);
|
staticOopFieldCount = new CIntField(type.getCIntegerField("_static_oop_field_count"), 0);
|
||||||
nonstaticOopMapSize = new CIntField(type.getCIntegerField("_nonstatic_oop_map_size"), 0);
|
nonstaticOopMapSize = new CIntField(type.getCIntegerField("_nonstatic_oop_map_size"), 0);
|
||||||
isMarkedDependent = new CIntField(type.getCIntegerField("_is_marked_dependent"), 0);
|
isMarkedDependent = new CIntField(type.getCIntegerField("_is_marked_dependent"), 0);
|
||||||
initState = new CIntField(type.getCIntegerField("_init_state"), 0);
|
initState = new CIntField(type.getCIntegerField("_init_state"), 0);
|
||||||
vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0);
|
vtableLen = new CIntField(type.getCIntegerField("_vtable_len"), 0);
|
||||||
itableLen = new CIntField(type.getCIntegerField("_itable_len"), 0);
|
itableLen = new CIntField(type.getCIntegerField("_itable_len"), 0);
|
||||||
breakpoints = type.getAddressField("_breakpoints");
|
breakpoints = type.getAddressField("_breakpoints");
|
||||||
genericSignature = type.getAddressField("_generic_signature");
|
genericSignatureIndex = new CIntField(type.getCIntegerField("_generic_signature_index"), 0);
|
||||||
majorVersion = new CIntField(type.getCIntegerField("_major_version"), 0);
|
majorVersion = new CIntField(type.getCIntegerField("_major_version"), 0);
|
||||||
minorVersion = new CIntField(type.getCIntegerField("_minor_version"), 0);
|
minorVersion = new CIntField(type.getCIntegerField("_minor_version"), 0);
|
||||||
headerSize = Oop.alignObjectOffset(type.getSize());
|
headerSize = Oop.alignObjectOffset(type.getSize());
|
||||||
@ -134,9 +134,9 @@ public class InstanceKlass extends Klass {
|
|||||||
private static CIntField javaFieldsCount;
|
private static CIntField javaFieldsCount;
|
||||||
private static MetadataField constants;
|
private static MetadataField constants;
|
||||||
private static AddressField classLoaderData;
|
private static AddressField classLoaderData;
|
||||||
private static AddressField sourceFileName;
|
|
||||||
private static AddressField sourceDebugExtension;
|
private static AddressField sourceDebugExtension;
|
||||||
private static AddressField innerClasses;
|
private static AddressField innerClasses;
|
||||||
|
private static CIntField sourceFileNameIndex;
|
||||||
private static CIntField nonstaticFieldSize;
|
private static CIntField nonstaticFieldSize;
|
||||||
private static CIntField staticFieldSize;
|
private static CIntField staticFieldSize;
|
||||||
private static CIntField staticOopFieldCount;
|
private static CIntField staticOopFieldCount;
|
||||||
@ -146,7 +146,7 @@ public class InstanceKlass extends Klass {
|
|||||||
private static CIntField vtableLen;
|
private static CIntField vtableLen;
|
||||||
private static CIntField itableLen;
|
private static CIntField itableLen;
|
||||||
private static AddressField breakpoints;
|
private static AddressField breakpoints;
|
||||||
private static AddressField genericSignature;
|
private static CIntField genericSignatureIndex;
|
||||||
private static CIntField majorVersion;
|
private static CIntField majorVersion;
|
||||||
private static CIntField minorVersion;
|
private static CIntField minorVersion;
|
||||||
|
|
||||||
@ -346,7 +346,7 @@ public class InstanceKlass extends Klass {
|
|||||||
public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); }
|
public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); }
|
||||||
public ClassLoaderData getClassLoaderData() { return ClassLoaderData.instantiateWrapperFor(classLoaderData.getValue(getAddress())); }
|
public ClassLoaderData getClassLoaderData() { return ClassLoaderData.instantiateWrapperFor(classLoaderData.getValue(getAddress())); }
|
||||||
public Oop getClassLoader() { return getClassLoaderData().getClassLoader(); }
|
public Oop getClassLoader() { return getClassLoaderData().getClassLoader(); }
|
||||||
public Symbol getSourceFileName() { return getSymbol(sourceFileName); }
|
public Symbol getSourceFileName() { return getConstants().getSymbolAt(sourceFileNameIndex.getValue(this)); }
|
||||||
public String getSourceDebugExtension(){ return CStringUtilities.getString(sourceDebugExtension.getValue(getAddress())); }
|
public String getSourceDebugExtension(){ return CStringUtilities.getString(sourceDebugExtension.getValue(getAddress())); }
|
||||||
public long getNonstaticFieldSize() { return nonstaticFieldSize.getValue(this); }
|
public long getNonstaticFieldSize() { return nonstaticFieldSize.getValue(this); }
|
||||||
public long getStaticOopFieldCount() { return staticOopFieldCount.getValue(this); }
|
public long getStaticOopFieldCount() { return staticOopFieldCount.getValue(this); }
|
||||||
@ -354,7 +354,7 @@ public class InstanceKlass extends Klass {
|
|||||||
public boolean getIsMarkedDependent() { return isMarkedDependent.getValue(this) != 0; }
|
public boolean getIsMarkedDependent() { return isMarkedDependent.getValue(this) != 0; }
|
||||||
public long getVtableLen() { return vtableLen.getValue(this); }
|
public long getVtableLen() { return vtableLen.getValue(this); }
|
||||||
public long getItableLen() { return itableLen.getValue(this); }
|
public long getItableLen() { return itableLen.getValue(this); }
|
||||||
public Symbol getGenericSignature() { return getSymbol(genericSignature); }
|
public Symbol getGenericSignature() { return getConstants().getSymbolAt(genericSignatureIndex.getValue(this)); }
|
||||||
public long majorVersion() { return majorVersion.getValue(this); }
|
public long majorVersion() { return majorVersion.getValue(this); }
|
||||||
public long minorVersion() { return minorVersion.getValue(this); }
|
public long minorVersion() { return minorVersion.getValue(this); }
|
||||||
|
|
||||||
|
@ -92,8 +92,13 @@ public class ClassDump extends Tool {
|
|||||||
System.err.println("Warning: Can not create class filter!");
|
System.err.println("Warning: Can not create class filter!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
String outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir", ".");
|
|
||||||
setOutputDirectory(outputDirectory);
|
// outputDirectory and jarStream are alternatives: setting one closes the other.
|
||||||
|
// If neither is set, use outputDirectory from the System property:
|
||||||
|
if (outputDirectory == null && jarStream == null) {
|
||||||
|
String dirName = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir", ".");
|
||||||
|
setOutputDirectory(dirName);
|
||||||
|
}
|
||||||
|
|
||||||
// walk through the system dictionary
|
// walk through the system dictionary
|
||||||
SystemDictionary dict = VM.getVM().getSystemDictionary();
|
SystemDictionary dict = VM.getVM().getSystemDictionary();
|
||||||
|
@ -35,8 +35,9 @@ sapkg.c1 = sapkg.hotspot.c1;
|
|||||||
sapkg.code = sapkg.hotspot.code;
|
sapkg.code = sapkg.hotspot.code;
|
||||||
sapkg.compiler = sapkg.hotspot.compiler;
|
sapkg.compiler = sapkg.hotspot.compiler;
|
||||||
|
|
||||||
// 'debugger' is a JavaScript keyword :-(
|
// 'debugger' is a JavaScript keyword, but ES5 relaxes the
|
||||||
// sapkg.debugger = sapkg.hotspot.debugger;
|
// restriction of using keywords as property name
|
||||||
|
sapkg.debugger = sapkg.hotspot.debugger;
|
||||||
|
|
||||||
sapkg.interpreter = sapkg.hotspot.interpreter;
|
sapkg.interpreter = sapkg.hotspot.interpreter;
|
||||||
sapkg.jdi = sapkg.hotspot.jdi;
|
sapkg.jdi = sapkg.hotspot.jdi;
|
||||||
@ -116,27 +117,36 @@ function main(globals, jvmarg) {
|
|||||||
return args;
|
return args;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle __has__ specially to avoid metacircularity problems
|
||||||
|
// when called from __get__.
|
||||||
|
// Calling
|
||||||
|
// this.__has__(name)
|
||||||
|
// will in turn call
|
||||||
|
// this.__call__('__has__', name)
|
||||||
|
// which is not handled below
|
||||||
|
function __has__(name) {
|
||||||
|
if (typeof(name) == 'number') {
|
||||||
|
return so["has(int)"](name);
|
||||||
|
} else {
|
||||||
|
if (name == '__wrapped__') {
|
||||||
|
return true;
|
||||||
|
} else if (so["has(java.lang.String)"](name)) {
|
||||||
|
return true;
|
||||||
|
} else if (name.equals('toString')) {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (so instanceof sapkg.utilities.soql.ScriptObject) {
|
if (so instanceof sapkg.utilities.soql.ScriptObject) {
|
||||||
return new JSAdapter() {
|
return new JSAdapter() {
|
||||||
__getIds__: function() {
|
__getIds__: function() {
|
||||||
return so.getIds();
|
return so.getIds();
|
||||||
},
|
},
|
||||||
|
|
||||||
__has__ : function(name) {
|
__has__ : __has__,
|
||||||
if (typeof(name) == 'number') {
|
|
||||||
return so["has(int)"](name);
|
|
||||||
} else {
|
|
||||||
if (name == '__wrapped__') {
|
|
||||||
return true;
|
|
||||||
} else if (so["has(java.lang.String)"](name)) {
|
|
||||||
return true;
|
|
||||||
} else if (name.equals('toString')) {
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
__delete__ : function(name) {
|
__delete__ : function(name) {
|
||||||
if (typeof(name) == 'number') {
|
if (typeof(name) == 'number') {
|
||||||
@ -147,7 +157,8 @@ function main(globals, jvmarg) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
__get__ : function(name) {
|
__get__ : function(name) {
|
||||||
if (! this.__has__(name)) {
|
// don't call this.__has__(name); see comments above function __has__
|
||||||
|
if (! __has__.call(this, name)) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
if (typeof(name) == 'number') {
|
if (typeof(name) == 'number') {
|
||||||
@ -162,7 +173,7 @@ function main(globals, jvmarg) {
|
|||||||
var args = prepareArgsArray(arguments);
|
var args = prepareArgsArray(arguments);
|
||||||
var r;
|
var r;
|
||||||
try {
|
try {
|
||||||
r = value.call(args);
|
r = value.call(Java.to(args, 'java.lang.Object[]'));
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
println("call to " + name + " failed!");
|
println("call to " + name + " failed!");
|
||||||
throw e;
|
throw e;
|
||||||
@ -204,6 +215,18 @@ function main(globals, jvmarg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// define "writeln" and "write" if not defined
|
// define "writeln" and "write" if not defined
|
||||||
|
if (typeof(println) == 'undefined') {
|
||||||
|
println = function (str) {
|
||||||
|
java.lang.System.out.println(String(str));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof(print) == 'undefined') {
|
||||||
|
print = function (str) {
|
||||||
|
java.lang.System.out.print(String(str));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (typeof(writeln) == 'undefined') {
|
if (typeof(writeln) == 'undefined') {
|
||||||
writeln = println;
|
writeln = println;
|
||||||
}
|
}
|
||||||
@ -235,7 +258,7 @@ function main(globals, jvmarg) {
|
|||||||
|
|
||||||
this.jclasses = function() {
|
this.jclasses = function() {
|
||||||
forEachKlass(function (clazz) {
|
forEachKlass(function (clazz) {
|
||||||
writeln(clazz.getName().asString() + " @" + clazz.getHandle().toString());
|
writeln(clazz.getName().asString() + " @" + clazz.getAddress().toString());
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
registerCommand("classes", "classes", "jclasses");
|
registerCommand("classes", "classes", "jclasses");
|
||||||
@ -490,14 +513,14 @@ function systemLoader() {
|
|||||||
function forEachKlass(callback) {
|
function forEachKlass(callback) {
|
||||||
var VisitorClass = sapkg.memory.SystemDictionary.ClassVisitor;
|
var VisitorClass = sapkg.memory.SystemDictionary.ClassVisitor;
|
||||||
var visitor = new VisitorClass() { visit: callback };
|
var visitor = new VisitorClass() { visit: callback };
|
||||||
sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary$ClassVisitor)"](visitor);
|
sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary.ClassVisitor)"](visitor);
|
||||||
}
|
}
|
||||||
|
|
||||||
// iterate system dictionary for each 'Klass' and initiating loader
|
// iterate system dictionary for each 'Klass' and initiating loader
|
||||||
function forEachKlassAndLoader(callback) {
|
function forEachKlassAndLoader(callback) {
|
||||||
var VisitorClass = sapkg.memory.SystemDictionary.ClassAndLoaderVisitor;
|
var VisitorClass = sapkg.memory.SystemDictionary.ClassAndLoaderVisitor;
|
||||||
var visitor = new VisitorClass() { visit: callback };
|
var visitor = new VisitorClass() { visit: callback };
|
||||||
sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary$ClassAndLoaderVisitor)"](visitor);
|
sa.sysDict["classesDo(sun.jvm.hotspot.memory.SystemDictionary.ClassAndLoaderVisitor)"](visitor);
|
||||||
}
|
}
|
||||||
|
|
||||||
// iterate system dictionary for each primitive array klass
|
// iterate system dictionary for each primitive array klass
|
||||||
@ -522,7 +545,12 @@ function obj2oop(obj) {
|
|||||||
|
|
||||||
// iterates Java heap for each Oop
|
// iterates Java heap for each Oop
|
||||||
function forEachOop(callback) {
|
function forEachOop(callback) {
|
||||||
sa.objHeap.iterate(new sapkg.oops.HeapVisitor() { doObj: callback });
|
function empty() { }
|
||||||
|
sa.objHeap.iterate(new sapkg.oops.HeapVisitor() {
|
||||||
|
prologue: empty,
|
||||||
|
doObj: callback,
|
||||||
|
epilogue: empty
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// iterates Java heap for each Oop of given 'klass'.
|
// iterates Java heap for each Oop of given 'klass'.
|
||||||
@ -536,8 +564,14 @@ function forEachOopOfKlass(callback, klass, includeSubtypes) {
|
|||||||
if (includeSubtypes == undefined) {
|
if (includeSubtypes == undefined) {
|
||||||
includeSubtypes = true;
|
includeSubtypes = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function empty() { }
|
||||||
sa.objHeap.iterateObjectsOfKlass(
|
sa.objHeap.iterateObjectsOfKlass(
|
||||||
new sapkg.oops.HeapVisitor() { doObj: callback },
|
new sapkg.oops.HeapVisitor() {
|
||||||
|
prologue: empty,
|
||||||
|
doObj: callback,
|
||||||
|
epilogue: empty
|
||||||
|
},
|
||||||
klass, includeSubtypes);
|
klass, includeSubtypes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -746,9 +780,9 @@ while (tmp.itr.hasNext()) {
|
|||||||
// ignore;
|
// ignore;
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
// some type names have ':'. replace to make it as a
|
// some type names have ':', '<', '>', '*', ' '. replace to make it as a
|
||||||
// JavaScript identifier
|
// JavaScript identifier
|
||||||
tmp.name = tmp.name.replace(':', '_').replace('<', '_').replace('>', '_').replace('*', '_').replace(' ', '_');
|
tmp.name = ("" + tmp.name).replace(/[:<>* ]/g, '_');
|
||||||
eval("function read" + tmp.name + "(addr) {" +
|
eval("function read" + tmp.name + "(addr) {" +
|
||||||
" return readVMType('" + tmp.name + "', addr);}");
|
" return readVMType('" + tmp.name + "', addr);}");
|
||||||
eval("function print" + tmp.name + "(addr) {" +
|
eval("function print" + tmp.name + "(addr) {" +
|
||||||
|
@ -247,7 +247,7 @@ ifeq ($(USE_CLANG), true)
|
|||||||
# Not yet supported by clang in Xcode 4.6.2
|
# Not yet supported by clang in Xcode 4.6.2
|
||||||
# WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare
|
# WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare
|
||||||
WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
|
WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
|
||||||
WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
|
WARNINGS_ARE_ERRORS += -Wno-empty-body
|
||||||
endif
|
endif
|
||||||
|
|
||||||
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
|
WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
|
||||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
|
|||||||
|
|
||||||
HS_MAJOR_VER=25
|
HS_MAJOR_VER=25
|
||||||
HS_MINOR_VER=0
|
HS_MINOR_VER=0
|
||||||
HS_BUILD_NUMBER=46
|
HS_BUILD_NUMBER=48
|
||||||
|
|
||||||
JDK_MAJOR_VER=1
|
JDK_MAJOR_VER=1
|
||||||
JDK_MINOR_VER=8
|
JDK_MINOR_VER=8
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
#
|
#
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
# This code is free software; you can redistribute it and/or modify it
|
||||||
@ -31,9 +31,4 @@ CFLAGS += -DVM_LITTLE_ENDIAN
|
|||||||
|
|
||||||
CFLAGS += -D_LP64=1
|
CFLAGS += -D_LP64=1
|
||||||
|
|
||||||
# The serviceability agent relies on frame pointer (%rbp) to walk thread stack
|
|
||||||
ifndef USE_SUNCC
|
|
||||||
CFLAGS += -fno-omit-frame-pointer
|
|
||||||
endif
|
|
||||||
|
|
||||||
OPT_CFLAGS/compactingPermGenGen.o = -O1
|
OPT_CFLAGS/compactingPermGenGen.o = -O1
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
#
|
#
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
# This code is free software; you can redistribute it and/or modify it
|
||||||
@ -398,3 +398,10 @@ endif
|
|||||||
ifdef MINIMIZE_RAM_USAGE
|
ifdef MINIMIZE_RAM_USAGE
|
||||||
CFLAGS += -DMINIMIZE_RAM_USAGE
|
CFLAGS += -DMINIMIZE_RAM_USAGE
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
# Stack walking in the JVM relies on frame pointer (%rbp) to walk thread stack.
|
||||||
|
# Explicitly specify -fno-omit-frame-pointer because it is off by default
|
||||||
|
# starting with gcc 4.6.
|
||||||
|
ifndef USE_SUNCC
|
||||||
|
CFLAGS += -fno-omit-frame-pointer
|
||||||
|
endif
|
||||||
|
@ -42,8 +42,6 @@ else
|
|||||||
MKS_HOME=`dirname "$SH"`
|
MKS_HOME=`dirname "$SH"`
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "EXPORTS" > vm1.def
|
|
||||||
|
|
||||||
AWK="$MKS_HOME/awk.exe"
|
AWK="$MKS_HOME/awk.exe"
|
||||||
if [ ! -e $AWK ]; then
|
if [ ! -e $AWK ]; then
|
||||||
AWK="$MKS_HOME/gawk.exe"
|
AWK="$MKS_HOME/gawk.exe"
|
||||||
@ -55,6 +53,22 @@ CAT="$MKS_HOME/cat.exe"
|
|||||||
RM="$MKS_HOME/rm.exe"
|
RM="$MKS_HOME/rm.exe"
|
||||||
DUMPBIN="link.exe /dump"
|
DUMPBIN="link.exe /dump"
|
||||||
|
|
||||||
|
if [ "$1" = "-nosa" ]; then
|
||||||
|
echo EXPORTS > vm.def
|
||||||
|
echo ""
|
||||||
|
echo "***"
|
||||||
|
echo "*** Not building SA: BUILD_WIN_SA != 1"
|
||||||
|
echo "*** C++ Vtables NOT included in vm.def"
|
||||||
|
echo "*** This jvm.dll will NOT work properly with SA."
|
||||||
|
echo "***"
|
||||||
|
echo "*** When in doubt, set BUILD_WIN_SA=1, clean and rebuild."
|
||||||
|
echo "***"
|
||||||
|
echo ""
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "EXPORTS" > vm1.def
|
||||||
|
|
||||||
# When called from IDE the first param should contain the link version, otherwise may be nill
|
# When called from IDE the first param should contain the link version, otherwise may be nill
|
||||||
if [ "x$1" != "x" ]; then
|
if [ "x$1" != "x" ]; then
|
||||||
LD_VER="$1"
|
LD_VER="$1"
|
||||||
|
@ -49,9 +49,6 @@ HS_BUILD_ID=$(HS_BUILD_VER)-debug
|
|||||||
# Force resources to be rebuilt every time
|
# Force resources to be rebuilt every time
|
||||||
$(Res_Files): FORCE
|
$(Res_Files): FORCE
|
||||||
|
|
||||||
vm.def: $(Obj_Files)
|
|
||||||
sh $(WorkSpace)/make/windows/build_vm_def.sh
|
|
||||||
|
|
||||||
$(AOUT): $(Res_Files) $(Obj_Files) vm.def
|
$(AOUT): $(Res_Files) $(Obj_Files) vm.def
|
||||||
$(LD) @<<
|
$(LD) @<<
|
||||||
$(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
|
$(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
|
||||||
|
@ -48,9 +48,6 @@ HS_BUILD_ID=$(HS_BUILD_VER)-fastdebug
|
|||||||
# Force resources to be rebuilt every time
|
# Force resources to be rebuilt every time
|
||||||
$(Res_Files): FORCE
|
$(Res_Files): FORCE
|
||||||
|
|
||||||
vm.def: $(Obj_Files)
|
|
||||||
sh $(WorkSpace)/make/windows/build_vm_def.sh
|
|
||||||
|
|
||||||
$(AOUT): $(Res_Files) $(Obj_Files) vm.def
|
$(AOUT): $(Res_Files) $(Obj_Files) vm.def
|
||||||
$(LD) @<<
|
$(LD) @<<
|
||||||
$(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
|
$(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
|
||||||
|
@ -51,9 +51,6 @@ HS_BUILD_ID=$(HS_BUILD_VER)
|
|||||||
# Force resources to be rebuilt every time
|
# Force resources to be rebuilt every time
|
||||||
$(Res_Files): FORCE
|
$(Res_Files): FORCE
|
||||||
|
|
||||||
vm.def: $(Obj_Files)
|
|
||||||
sh $(WorkSpace)/make/windows/build_vm_def.sh
|
|
||||||
|
|
||||||
$(AOUT): $(Res_Files) $(Obj_Files) vm.def
|
$(AOUT): $(Res_Files) $(Obj_Files) vm.def
|
||||||
$(LD) @<<
|
$(LD) @<<
|
||||||
$(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
|
$(LD_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files)
|
||||||
|
@ -92,6 +92,10 @@ ProjectCreatorIDEOptions = \
|
|||||||
-disablePch getThread_windows_$(Platform_arch).cpp \
|
-disablePch getThread_windows_$(Platform_arch).cpp \
|
||||||
-disablePch_compiler2 opcodes.cpp
|
-disablePch_compiler2 opcodes.cpp
|
||||||
|
|
||||||
|
!if "$(BUILD_WIN_SA)" != "1"
|
||||||
|
BUILD_VM_DEF_FLAG=-nosa
|
||||||
|
!endif
|
||||||
|
|
||||||
# Common options for the IDE builds for c1, and c2
|
# Common options for the IDE builds for c1, and c2
|
||||||
ProjectCreatorIDEOptions=\
|
ProjectCreatorIDEOptions=\
|
||||||
$(ProjectCreatorIDEOptions) \
|
$(ProjectCreatorIDEOptions) \
|
||||||
@ -104,7 +108,7 @@ ProjectCreatorIDEOptions=\
|
|||||||
-jdkTargetRoot $(HOTSPOTJDKDIST) \
|
-jdkTargetRoot $(HOTSPOTJDKDIST) \
|
||||||
-define ALIGN_STACK_FRAMES \
|
-define ALIGN_STACK_FRAMES \
|
||||||
-define VM_LITTLE_ENDIAN \
|
-define VM_LITTLE_ENDIAN \
|
||||||
-prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) set JAVA_HOME=$(HOTSPOTJDKDIST) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LD_VER)" \
|
-prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) set JAVA_HOME=$(HOTSPOTJDKDIST) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(BUILD_VM_DEF_FLAG) $(LD_VER)" \
|
||||||
-ignoreFile jsig.c \
|
-ignoreFile jsig.c \
|
||||||
-ignoreFile jvmtiEnvRecommended.cpp \
|
-ignoreFile jvmtiEnvRecommended.cpp \
|
||||||
-ignoreFile jvmtiEnvStub.cpp \
|
-ignoreFile jvmtiEnvStub.cpp \
|
||||||
|
@ -393,3 +393,11 @@ default::
|
|||||||
_build_pch_file.obj:
|
_build_pch_file.obj:
|
||||||
@echo #include "precompiled.hpp" > ../generated/_build_pch_file.cpp
|
@echo #include "precompiled.hpp" > ../generated/_build_pch_file.cpp
|
||||||
$(CXX) $(CXX_FLAGS) /Fp"vm.pch" /Yc"precompiled.hpp" /c ../generated/_build_pch_file.cpp
|
$(CXX) $(CXX_FLAGS) /Fp"vm.pch" /Yc"precompiled.hpp" /c ../generated/_build_pch_file.cpp
|
||||||
|
|
||||||
|
!if "$(BUILD_WIN_SA)" != "1"
|
||||||
|
BUILD_VM_DEF_FLAG=-nosa
|
||||||
|
!endif
|
||||||
|
|
||||||
|
vm.def: $(Obj_Files)
|
||||||
|
sh $(WorkSpace)/make/windows/build_vm_def.sh $(BUILD_VM_DEF_FLAG)
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -29,6 +29,7 @@
|
|||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "memory/cardTableModRefBS.hpp"
|
#include "memory/cardTableModRefBS.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
|
#include "memory/universe.hpp"
|
||||||
#include "prims/methodHandles.hpp"
|
#include "prims/methodHandles.hpp"
|
||||||
#include "runtime/biasedLocking.hpp"
|
#include "runtime/biasedLocking.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
@ -1145,7 +1146,7 @@ void MacroAssembler::set_narrow_klass(Klass* k, Register d) {
|
|||||||
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||||
int klass_index = oop_recorder()->find_index(k);
|
int klass_index = oop_recorder()->find_index(k);
|
||||||
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
||||||
narrowOop encoded_k = oopDesc::encode_klass(k);
|
narrowOop encoded_k = Klass::encode_klass(k);
|
||||||
|
|
||||||
assert_not_delayed();
|
assert_not_delayed();
|
||||||
// Relocation with special format (see relocInfo_sparc.hpp).
|
// Relocation with special format (see relocInfo_sparc.hpp).
|
||||||
@ -1419,7 +1420,6 @@ void MacroAssembler::verify_oop_subroutine() {
|
|||||||
load_klass(O0_obj, O0_obj);
|
load_klass(O0_obj, O0_obj);
|
||||||
// assert((klass != NULL)
|
// assert((klass != NULL)
|
||||||
br_null_short(O0_obj, pn, fail);
|
br_null_short(O0_obj, pn, fail);
|
||||||
// TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
|
|
||||||
|
|
||||||
wrccr( O5_save_flags ); // Restore CCR's
|
wrccr( O5_save_flags ); // Restore CCR's
|
||||||
|
|
||||||
@ -4089,52 +4089,91 @@ void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::encode_klass_not_null(Register r) {
|
void MacroAssembler::encode_klass_not_null(Register r) {
|
||||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
|
||||||
assert (UseCompressedKlassPointers, "must be compressed");
|
assert (UseCompressedKlassPointers, "must be compressed");
|
||||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
|
||||||
if (Universe::narrow_klass_base() != NULL)
|
assert(r != G6_heapbase, "bad register choice");
|
||||||
sub(r, G6_heapbase, r);
|
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
|
||||||
srlx(r, LogKlassAlignmentInBytes, r);
|
sub(r, G6_heapbase, r);
|
||||||
|
if (Universe::narrow_klass_shift() != 0) {
|
||||||
|
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||||
|
srlx(r, LogKlassAlignmentInBytes, r);
|
||||||
|
}
|
||||||
|
reinit_heapbase();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
|
void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
|
||||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
if (src == dst) {
|
||||||
assert (UseCompressedKlassPointers, "must be compressed");
|
encode_klass_not_null(src);
|
||||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
|
||||||
if (Universe::narrow_klass_base() == NULL) {
|
|
||||||
srlx(src, LogKlassAlignmentInBytes, dst);
|
|
||||||
} else {
|
} else {
|
||||||
sub(src, G6_heapbase, dst);
|
assert (UseCompressedKlassPointers, "must be compressed");
|
||||||
srlx(dst, LogKlassAlignmentInBytes, dst);
|
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
|
||||||
|
set((intptr_t)Universe::narrow_klass_base(), dst);
|
||||||
|
sub(src, dst, dst);
|
||||||
|
if (Universe::narrow_klass_shift() != 0) {
|
||||||
|
srlx(dst, LogKlassAlignmentInBytes, dst);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Function instr_size_for_decode_klass_not_null() counts the instructions
|
||||||
|
// generated by decode_klass_not_null() and reinit_heapbase(). Hence, if
|
||||||
|
// the instructions they generate change, then this method needs to be updated.
|
||||||
|
int MacroAssembler::instr_size_for_decode_klass_not_null() {
|
||||||
|
assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
|
||||||
|
// set + add + set
|
||||||
|
int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
|
||||||
|
insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
|
||||||
|
if (Universe::narrow_klass_shift() == 0) {
|
||||||
|
return num_instrs * BytesPerInstWord;
|
||||||
|
} else { // sllx
|
||||||
|
return (num_instrs + 1) * BytesPerInstWord;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// !!! If the instructions that get generated here change then function
|
||||||
|
// instr_size_for_decode_klass_not_null() needs to get updated.
|
||||||
void MacroAssembler::decode_klass_not_null(Register r) {
|
void MacroAssembler::decode_klass_not_null(Register r) {
|
||||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
|
||||||
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
|
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
|
||||||
// pd_code_size_limit.
|
// pd_code_size_limit.
|
||||||
assert (UseCompressedKlassPointers, "must be compressed");
|
assert (UseCompressedKlassPointers, "must be compressed");
|
||||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
|
||||||
sllx(r, LogKlassAlignmentInBytes, r);
|
assert(r != G6_heapbase, "bad register choice");
|
||||||
if (Universe::narrow_klass_base() != NULL)
|
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
|
||||||
add(r, G6_heapbase, r);
|
if (Universe::narrow_klass_shift() != 0)
|
||||||
|
sllx(r, LogKlassAlignmentInBytes, r);
|
||||||
|
add(r, G6_heapbase, r);
|
||||||
|
reinit_heapbase();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
|
void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
|
||||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
if (src == dst) {
|
||||||
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
|
decode_klass_not_null(src);
|
||||||
// pd_code_size_limit.
|
} else {
|
||||||
assert (UseCompressedKlassPointers, "must be compressed");
|
// Do not add assert code to this unless you change vtableStubs_sparc.cpp
|
||||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
// pd_code_size_limit.
|
||||||
sllx(src, LogKlassAlignmentInBytes, dst);
|
assert (UseCompressedKlassPointers, "must be compressed");
|
||||||
if (Universe::narrow_klass_base() != NULL)
|
assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
|
||||||
add(dst, G6_heapbase, dst);
|
if (Universe::narrow_klass_shift() != 0) {
|
||||||
|
assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
|
||||||
|
set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
|
||||||
|
sllx(src, LogKlassAlignmentInBytes, dst);
|
||||||
|
add(dst, G6_heapbase, dst);
|
||||||
|
reinit_heapbase();
|
||||||
|
} else {
|
||||||
|
set((intptr_t)Universe::narrow_klass_base(), dst);
|
||||||
|
add(src, dst, dst);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::reinit_heapbase() {
|
void MacroAssembler::reinit_heapbase() {
|
||||||
if (UseCompressedOops || UseCompressedKlassPointers) {
|
if (UseCompressedOops || UseCompressedKlassPointers) {
|
||||||
AddressLiteral base(Universe::narrow_ptrs_base_addr());
|
if (Universe::heap() != NULL) {
|
||||||
load_ptr_contents(base, G6_heapbase);
|
set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
|
||||||
|
} else {
|
||||||
|
AddressLiteral base(Universe::narrow_ptrs_base_addr());
|
||||||
|
load_ptr_contents(base, G6_heapbase);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1177,6 +1177,9 @@ public:
|
|||||||
void push_CPU_state();
|
void push_CPU_state();
|
||||||
void pop_CPU_state();
|
void pop_CPU_state();
|
||||||
|
|
||||||
|
// Returns the byte size of the instructions generated by decode_klass_not_null().
|
||||||
|
static int instr_size_for_decode_klass_not_null();
|
||||||
|
|
||||||
// if heap base register is used - reinit it with the correct value
|
// if heap base register is used - reinit it with the correct value
|
||||||
void reinit_heapbase();
|
void reinit_heapbase();
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -97,7 +97,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
|||||||
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
|
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
|
||||||
if (format() != 0) {
|
if (format() != 0) {
|
||||||
assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case");
|
assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case");
|
||||||
jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : oopDesc::encode_klass((Klass*)x);
|
jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
|
||||||
inst &= ~Assembler::hi22(-1);
|
inst &= ~Assembler::hi22(-1);
|
||||||
inst |= Assembler::hi22((intptr_t)np);
|
inst |= Assembler::hi22((intptr_t)np);
|
||||||
if (verify_only) {
|
if (verify_only) {
|
||||||
|
@ -559,10 +559,7 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
|
|||||||
int klass_load_size;
|
int klass_load_size;
|
||||||
if (UseCompressedKlassPointers) {
|
if (UseCompressedKlassPointers) {
|
||||||
assert(Universe::heap() != NULL, "java heap should be initialized");
|
assert(Universe::heap() != NULL, "java heap should be initialized");
|
||||||
if (Universe::narrow_klass_base() == NULL)
|
klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
|
||||||
klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass()
|
|
||||||
else
|
|
||||||
klass_load_size = 3*BytesPerInstWord;
|
|
||||||
} else {
|
} else {
|
||||||
klass_load_size = 1*BytesPerInstWord;
|
klass_load_size = 1*BytesPerInstWord;
|
||||||
}
|
}
|
||||||
@ -1663,9 +1660,12 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
|||||||
if (UseCompressedKlassPointers) {
|
if (UseCompressedKlassPointers) {
|
||||||
assert(Universe::heap() != NULL, "java heap should be initialized");
|
assert(Universe::heap() != NULL, "java heap should be initialized");
|
||||||
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
|
st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
|
||||||
st->print_cr("\tSLL R_G5,3,R_G5");
|
st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base");
|
||||||
if (Universe::narrow_klass_base() != NULL)
|
if (Universe::narrow_klass_shift() != 0) {
|
||||||
st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
|
st->print_cr("\tSLL R_G5,3,R_G5");
|
||||||
|
}
|
||||||
|
st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
|
||||||
|
st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base");
|
||||||
} else {
|
} else {
|
||||||
st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
|
st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
|
||||||
}
|
}
|
||||||
@ -2563,10 +2563,7 @@ encode %{
|
|||||||
int klass_load_size;
|
int klass_load_size;
|
||||||
if (UseCompressedKlassPointers) {
|
if (UseCompressedKlassPointers) {
|
||||||
assert(Universe::heap() != NULL, "java heap should be initialized");
|
assert(Universe::heap() != NULL, "java heap should be initialized");
|
||||||
if (Universe::narrow_klass_base() == NULL)
|
klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
|
||||||
klass_load_size = 2*BytesPerInstWord;
|
|
||||||
else
|
|
||||||
klass_load_size = 3*BytesPerInstWord;
|
|
||||||
} else {
|
} else {
|
||||||
klass_load_size = 1*BytesPerInstWord;
|
klass_load_size = 1*BytesPerInstWord;
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -219,13 +219,13 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
|||||||
const int basic = 5*BytesPerInstWord +
|
const int basic = 5*BytesPerInstWord +
|
||||||
// shift;add for load_klass (only shift with zero heap based)
|
// shift;add for load_klass (only shift with zero heap based)
|
||||||
(UseCompressedKlassPointers ?
|
(UseCompressedKlassPointers ?
|
||||||
((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
|
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
|
||||||
return basic + slop;
|
return basic + slop;
|
||||||
} else {
|
} else {
|
||||||
const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
|
const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
|
||||||
// shift;add for load_klass (only shift with zero heap based)
|
// shift;add for load_klass (only shift with zero heap based)
|
||||||
(UseCompressedKlassPointers ?
|
(UseCompressedKlassPointers ?
|
||||||
((Universe::narrow_klass_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0);
|
MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
|
||||||
return (basic + slop);
|
return (basic + slop);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "memory/cardTableModRefBS.hpp"
|
#include "memory/cardTableModRefBS.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
|
#include "memory/universe.hpp"
|
||||||
#include "prims/methodHandles.hpp"
|
#include "prims/methodHandles.hpp"
|
||||||
#include "runtime/biasedLocking.hpp"
|
#include "runtime/biasedLocking.hpp"
|
||||||
#include "runtime/interfaceSupport.hpp"
|
#include "runtime/interfaceSupport.hpp"
|
||||||
@ -4810,23 +4811,8 @@ void MacroAssembler::load_klass(Register dst, Register src) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::load_prototype_header(Register dst, Register src) {
|
void MacroAssembler::load_prototype_header(Register dst, Register src) {
|
||||||
#ifdef _LP64
|
load_klass(dst, src);
|
||||||
if (UseCompressedKlassPointers) {
|
movptr(dst, Address(dst, Klass::prototype_header_offset()));
|
||||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
|
||||||
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
|
|
||||||
if (Universe::narrow_klass_shift() != 0) {
|
|
||||||
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
|
||||||
assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
|
|
||||||
movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
|
|
||||||
} else {
|
|
||||||
movq(dst, Address(dst, Klass::prototype_header_offset()));
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
|
|
||||||
movptr(dst, Address(dst, Klass::prototype_header_offset()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::store_klass(Register dst, Register src) {
|
void MacroAssembler::store_klass(Register dst, Register src) {
|
||||||
@ -4914,7 +4900,7 @@ void MacroAssembler::store_klass_gap(Register dst, Register src) {
|
|||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
void MacroAssembler::verify_heapbase(const char* msg) {
|
void MacroAssembler::verify_heapbase(const char* msg) {
|
||||||
assert (UseCompressedOops || UseCompressedKlassPointers, "should be compressed");
|
assert (UseCompressedOops, "should be compressed");
|
||||||
assert (Universe::heap() != NULL, "java heap should be initialized");
|
assert (Universe::heap() != NULL, "java heap should be initialized");
|
||||||
if (CheckCompressedOops) {
|
if (CheckCompressedOops) {
|
||||||
Label ok;
|
Label ok;
|
||||||
@ -5058,69 +5044,80 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::encode_klass_not_null(Register r) {
|
void MacroAssembler::encode_klass_not_null(Register r) {
|
||||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
|
||||||
#ifdef ASSERT
|
// Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
|
||||||
verify_heapbase("MacroAssembler::encode_klass_not_null: heap base corrupted?");
|
assert(r != r12_heapbase, "Encoding a klass in r12");
|
||||||
#endif
|
mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
|
||||||
if (Universe::narrow_klass_base() != NULL) {
|
subq(r, r12_heapbase);
|
||||||
subq(r, r12_heapbase);
|
|
||||||
}
|
|
||||||
if (Universe::narrow_klass_shift() != 0) {
|
if (Universe::narrow_klass_shift() != 0) {
|
||||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||||
shrq(r, LogKlassAlignmentInBytes);
|
shrq(r, LogKlassAlignmentInBytes);
|
||||||
}
|
}
|
||||||
|
reinit_heapbase();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
|
void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
|
||||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
if (dst == src) {
|
||||||
#ifdef ASSERT
|
encode_klass_not_null(src);
|
||||||
verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?");
|
} else {
|
||||||
#endif
|
mov64(dst, (int64_t)Universe::narrow_klass_base());
|
||||||
if (dst != src) {
|
negq(dst);
|
||||||
movq(dst, src);
|
addq(dst, src);
|
||||||
}
|
if (Universe::narrow_klass_shift() != 0) {
|
||||||
if (Universe::narrow_klass_base() != NULL) {
|
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||||
subq(dst, r12_heapbase);
|
shrq(dst, LogKlassAlignmentInBytes);
|
||||||
}
|
}
|
||||||
if (Universe::narrow_klass_shift() != 0) {
|
|
||||||
assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
|
||||||
shrq(dst, LogKlassAlignmentInBytes);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Function instr_size_for_decode_klass_not_null() counts the instructions
|
||||||
|
// generated by decode_klass_not_null(register r) and reinit_heapbase(),
|
||||||
|
// when (Universe::heap() != NULL). Hence, if the instructions they
|
||||||
|
// generate change, then this method needs to be updated.
|
||||||
|
int MacroAssembler::instr_size_for_decode_klass_not_null() {
|
||||||
|
assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
|
||||||
|
// mov64 + addq + shlq? + mov64 (for reinit_heapbase()).
|
||||||
|
return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
|
||||||
|
}
|
||||||
|
|
||||||
|
// !!! If the instructions that get generated here change then function
|
||||||
|
// instr_size_for_decode_klass_not_null() needs to get updated.
|
||||||
void MacroAssembler::decode_klass_not_null(Register r) {
|
void MacroAssembler::decode_klass_not_null(Register r) {
|
||||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
|
||||||
// Note: it will change flags
|
// Note: it will change flags
|
||||||
|
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
|
||||||
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
|
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
|
||||||
|
assert(r != r12_heapbase, "Decoding a klass in r12");
|
||||||
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
||||||
// vtableStubs also counts instructions in pd_code_size_limit.
|
// vtableStubs also counts instructions in pd_code_size_limit.
|
||||||
// Also do not verify_oop as this is called by verify_oop.
|
// Also do not verify_oop as this is called by verify_oop.
|
||||||
if (Universe::narrow_klass_shift() != 0) {
|
if (Universe::narrow_klass_shift() != 0) {
|
||||||
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||||
shlq(r, LogKlassAlignmentInBytes);
|
shlq(r, LogKlassAlignmentInBytes);
|
||||||
if (Universe::narrow_klass_base() != NULL) {
|
|
||||||
addq(r, r12_heapbase);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assert (Universe::narrow_klass_base() == NULL, "sanity");
|
|
||||||
}
|
}
|
||||||
|
// Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
|
||||||
|
mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
|
||||||
|
addq(r, r12_heapbase);
|
||||||
|
reinit_heapbase();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
|
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
|
||||||
assert(Metaspace::is_initialized(), "metaspace should be initialized");
|
|
||||||
// Note: it will change flags
|
// Note: it will change flags
|
||||||
|
assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
|
||||||
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
|
assert (UseCompressedKlassPointers, "should only be used for compressed headers");
|
||||||
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
if (dst == src) {
|
||||||
// vtableStubs also counts instructions in pd_code_size_limit.
|
decode_klass_not_null(dst);
|
||||||
// Also do not verify_oop as this is called by verify_oop.
|
|
||||||
if (Universe::narrow_klass_shift() != 0) {
|
|
||||||
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
|
||||||
assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
|
|
||||||
leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
|
|
||||||
} else {
|
} else {
|
||||||
assert (Universe::narrow_klass_base() == NULL, "sanity");
|
// Cannot assert, unverified entry point counts instructions (see .ad file)
|
||||||
if (dst != src) {
|
// vtableStubs also counts instructions in pd_code_size_limit.
|
||||||
movq(dst, src);
|
// Also do not verify_oop as this is called by verify_oop.
|
||||||
|
|
||||||
|
mov64(dst, (int64_t)Universe::narrow_klass_base());
|
||||||
|
if (Universe::narrow_klass_shift() != 0) {
|
||||||
|
assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
|
||||||
|
assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
|
||||||
|
leaq(dst, Address(dst, src, Address::times_8, 0));
|
||||||
|
} else {
|
||||||
|
addq(dst, src);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -5148,7 +5145,7 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
|
|||||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||||
int klass_index = oop_recorder()->find_index(k);
|
int klass_index = oop_recorder()->find_index(k);
|
||||||
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
||||||
mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
|
mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
|
void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
|
||||||
@ -5156,7 +5153,7 @@ void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
|
|||||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||||
int klass_index = oop_recorder()->find_index(k);
|
int klass_index = oop_recorder()->find_index(k);
|
||||||
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
||||||
mov_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
|
mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
|
void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
|
||||||
@ -5182,7 +5179,7 @@ void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
|
|||||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||||
int klass_index = oop_recorder()->find_index(k);
|
int klass_index = oop_recorder()->find_index(k);
|
||||||
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
||||||
Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
|
Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
|
void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
|
||||||
@ -5190,14 +5187,23 @@ void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
|
|||||||
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||||
int klass_index = oop_recorder()->find_index(k);
|
int klass_index = oop_recorder()->find_index(k);
|
||||||
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
RelocationHolder rspec = metadata_Relocation::spec(klass_index);
|
||||||
Assembler::cmp_narrow_oop(dst, oopDesc::encode_klass(k), rspec);
|
Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::reinit_heapbase() {
|
void MacroAssembler::reinit_heapbase() {
|
||||||
if (UseCompressedOops || UseCompressedKlassPointers) {
|
if (UseCompressedOops || UseCompressedKlassPointers) {
|
||||||
movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
|
if (Universe::heap() != NULL) {
|
||||||
|
if (Universe::narrow_oop_base() == NULL) {
|
||||||
|
MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
|
||||||
|
} else {
|
||||||
|
mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // _LP64
|
#endif // _LP64
|
||||||
|
|
||||||
|
|
||||||
|
@ -371,6 +371,10 @@ class MacroAssembler: public Assembler {
|
|||||||
void cmp_narrow_klass(Register dst, Klass* k);
|
void cmp_narrow_klass(Register dst, Klass* k);
|
||||||
void cmp_narrow_klass(Address dst, Klass* k);
|
void cmp_narrow_klass(Address dst, Klass* k);
|
||||||
|
|
||||||
|
// Returns the byte size of the instructions generated by decode_klass_not_null()
|
||||||
|
// when compressed klass pointers are being used.
|
||||||
|
static int instr_size_for_decode_klass_not_null();
|
||||||
|
|
||||||
// if heap base register is used - reinit it with the correct value
|
// if heap base register is used - reinit it with the correct value
|
||||||
void reinit_heapbase();
|
void reinit_heapbase();
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -55,9 +55,9 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (verify_only) {
|
if (verify_only) {
|
||||||
assert(*(uint32_t*) disp == oopDesc::encode_klass((Klass*)x), "instructions must match");
|
assert(*(uint32_t*) disp == Klass::encode_klass((Klass*)x), "instructions must match");
|
||||||
} else {
|
} else {
|
||||||
*(int32_t*) disp = oopDesc::encode_klass((Klass*)x);
|
*(int32_t*) disp = Klass::encode_klass((Klass*)x);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -675,7 +675,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
|
__ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
|
||||||
__ testptr(rax, rax);
|
__ testptr(rax, rax);
|
||||||
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
|
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
|
||||||
// TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
|
|
||||||
|
|
||||||
// return if everything seems ok
|
// return if everything seems ok
|
||||||
__ bind(exit);
|
__ bind(exit);
|
||||||
|
@ -1021,7 +1021,6 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
__ load_klass(rax, rax); // get klass
|
__ load_klass(rax, rax); // get klass
|
||||||
__ testptr(rax, rax);
|
__ testptr(rax, rax);
|
||||||
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
|
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
|
||||||
// TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
|
|
||||||
|
|
||||||
// return if everything seems ok
|
// return if everything seems ok
|
||||||
__ bind(exit);
|
__ bind(exit);
|
||||||
|
@ -849,9 +849,9 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
|
|||||||
address entry = __ pc();
|
address entry = __ pc();
|
||||||
|
|
||||||
// rbx,: Method*
|
// rbx,: Method*
|
||||||
// rsi: senderSP must preserved for slow path, set SP to it on fast path
|
// r13: senderSP must preserved for slow path, set SP to it on fast path
|
||||||
// rdx: scratch
|
// c_rarg0: scratch (rdi on non-Win64, rcx on Win64)
|
||||||
// rdi: scratch
|
// c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
|
||||||
|
|
||||||
Label slow_path;
|
Label slow_path;
|
||||||
// If we need a safepoint check, generate full interpreter entry.
|
// If we need a safepoint check, generate full interpreter entry.
|
||||||
@ -865,8 +865,8 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
|
|||||||
|
|
||||||
// Load parameters
|
// Load parameters
|
||||||
const Register crc = rax; // crc
|
const Register crc = rax; // crc
|
||||||
const Register val = rdx; // source java byte value
|
const Register val = c_rarg0; // source java byte value
|
||||||
const Register tbl = rdi; // scratch
|
const Register tbl = c_rarg1; // scratch
|
||||||
|
|
||||||
// Arguments are reversed on java expression stack
|
// Arguments are reversed on java expression stack
|
||||||
__ movl(val, Address(rsp, wordSize)); // byte value
|
__ movl(val, Address(rsp, wordSize)); // byte value
|
||||||
@ -880,7 +880,7 @@ address InterpreterGenerator::generate_CRC32_update_entry() {
|
|||||||
|
|
||||||
// _areturn
|
// _areturn
|
||||||
__ pop(rdi); // get return address
|
__ pop(rdi); // get return address
|
||||||
__ mov(rsp, rsi); // set sp to sender sp
|
__ mov(rsp, r13); // set sp to sender sp
|
||||||
__ jmp(rdi);
|
__ jmp(rdi);
|
||||||
|
|
||||||
// generate a vanilla native entry as the slow path
|
// generate a vanilla native entry as the slow path
|
||||||
@ -919,20 +919,24 @@ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpret
|
|||||||
const Register crc = c_rarg0; // crc
|
const Register crc = c_rarg0; // crc
|
||||||
const Register buf = c_rarg1; // source java byte array address
|
const Register buf = c_rarg1; // source java byte array address
|
||||||
const Register len = c_rarg2; // length
|
const Register len = c_rarg2; // length
|
||||||
|
const Register off = len; // offset (never overlaps with 'len')
|
||||||
|
|
||||||
// Arguments are reversed on java expression stack
|
// Arguments are reversed on java expression stack
|
||||||
__ movl(len, Address(rsp, wordSize)); // Length
|
|
||||||
// Calculate address of start element
|
// Calculate address of start element
|
||||||
if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
|
if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
|
||||||
__ movptr(buf, Address(rsp, 3*wordSize)); // long buf
|
__ movptr(buf, Address(rsp, 3*wordSize)); // long buf
|
||||||
__ addptr(buf, Address(rsp, 2*wordSize)); // + offset
|
__ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
|
||||||
|
__ addq(buf, off); // + offset
|
||||||
__ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC
|
__ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC
|
||||||
} else {
|
} else {
|
||||||
__ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
|
__ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
|
||||||
__ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
|
__ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
|
||||||
__ addptr(buf, Address(rsp, 2*wordSize)); // + offset
|
__ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
|
||||||
|
__ addq(buf, off); // + offset
|
||||||
__ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC
|
__ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC
|
||||||
}
|
}
|
||||||
|
// Can now load 'len' since we're finished with 'off'
|
||||||
|
__ movl(len, Address(rsp, wordSize)); // Length
|
||||||
|
|
||||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
|
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
|
||||||
// result in rax
|
// result in rax
|
||||||
|
@ -211,11 +211,11 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
|||||||
if (is_vtable_stub) {
|
if (is_vtable_stub) {
|
||||||
// Vtable stub size
|
// Vtable stub size
|
||||||
return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
|
return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
|
||||||
(UseCompressedKlassPointers ? 16 : 0); // 1 leaq can be 3 bytes + 1 long
|
(UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
|
||||||
} else {
|
} else {
|
||||||
// Itable stub size
|
// Itable stub size
|
||||||
return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
|
return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
|
||||||
(UseCompressedKlassPointers ? 32 : 0); // 2 leaqs
|
(UseCompressedKlassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
|
||||||
}
|
}
|
||||||
// In order to tune these parameters, run the JVM with VM options
|
// In order to tune these parameters, run the JVM with VM options
|
||||||
// +PrintMiscellaneous and +WizardMode to see information about
|
// +PrintMiscellaneous and +WizardMode to see information about
|
||||||
|
@ -1393,9 +1393,7 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
|
|||||||
{
|
{
|
||||||
if (UseCompressedKlassPointers) {
|
if (UseCompressedKlassPointers) {
|
||||||
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
|
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
|
||||||
if (Universe::narrow_klass_shift() != 0) {
|
st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
|
||||||
st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
|
|
||||||
}
|
|
||||||
st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
|
st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
|
||||||
} else {
|
} else {
|
||||||
st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
|
st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
|
||||||
@ -4035,146 +4033,6 @@ operand indPosIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegI idx, immI2 sca
|
|||||||
%}
|
%}
|
||||||
%}
|
%}
|
||||||
|
|
||||||
operand indirectNarrowKlass(rRegN reg)
|
|
||||||
%{
|
|
||||||
predicate(Universe::narrow_klass_shift() == 0);
|
|
||||||
constraint(ALLOC_IN_RC(ptr_reg));
|
|
||||||
match(DecodeNKlass reg);
|
|
||||||
|
|
||||||
format %{ "[$reg]" %}
|
|
||||||
interface(MEMORY_INTER) %{
|
|
||||||
base($reg);
|
|
||||||
index(0x4);
|
|
||||||
scale(0x0);
|
|
||||||
disp(0x0);
|
|
||||||
%}
|
|
||||||
%}
|
|
||||||
|
|
||||||
operand indOffset8NarrowKlass(rRegN reg, immL8 off)
|
|
||||||
%{
|
|
||||||
predicate(Universe::narrow_klass_shift() == 0);
|
|
||||||
constraint(ALLOC_IN_RC(ptr_reg));
|
|
||||||
match(AddP (DecodeNKlass reg) off);
|
|
||||||
|
|
||||||
format %{ "[$reg + $off (8-bit)]" %}
|
|
||||||
interface(MEMORY_INTER) %{
|
|
||||||
base($reg);
|
|
||||||
index(0x4);
|
|
||||||
scale(0x0);
|
|
||||||
disp($off);
|
|
||||||
%}
|
|
||||||
%}
|
|
||||||
|
|
||||||
operand indOffset32NarrowKlass(rRegN reg, immL32 off)
|
|
||||||
%{
|
|
||||||
predicate(Universe::narrow_klass_shift() == 0);
|
|
||||||
constraint(ALLOC_IN_RC(ptr_reg));
|
|
||||||
match(AddP (DecodeNKlass reg) off);
|
|
||||||
|
|
||||||
format %{ "[$reg + $off (32-bit)]" %}
|
|
||||||
interface(MEMORY_INTER) %{
|
|
||||||
base($reg);
|
|
||||||
index(0x4);
|
|
||||||
scale(0x0);
|
|
||||||
disp($off);
|
|
||||||
%}
|
|
||||||
%}
|
|
||||||
|
|
||||||
operand indIndexOffsetNarrowKlass(rRegN reg, rRegL lreg, immL32 off)
|
|
||||||
%{
|
|
||||||
predicate(Universe::narrow_klass_shift() == 0);
|
|
||||||
constraint(ALLOC_IN_RC(ptr_reg));
|
|
||||||
match(AddP (AddP (DecodeNKlass reg) lreg) off);
|
|
||||||
|
|
||||||
op_cost(10);
|
|
||||||
format %{"[$reg + $off + $lreg]" %}
|
|
||||||
interface(MEMORY_INTER) %{
|
|
||||||
base($reg);
|
|
||||||
index($lreg);
|
|
||||||
scale(0x0);
|
|
||||||
disp($off);
|
|
||||||
%}
|
|
||||||
%}
|
|
||||||
|
|
||||||
operand indIndexNarrowKlass(rRegN reg, rRegL lreg)
|
|
||||||
%{
|
|
||||||
predicate(Universe::narrow_klass_shift() == 0);
|
|
||||||
constraint(ALLOC_IN_RC(ptr_reg));
|
|
||||||
match(AddP (DecodeNKlass reg) lreg);
|
|
||||||
|
|
||||||
op_cost(10);
|
|
||||||
format %{"[$reg + $lreg]" %}
|
|
||||||
interface(MEMORY_INTER) %{
|
|
||||||
base($reg);
|
|
||||||
index($lreg);
|
|
||||||
scale(0x0);
|
|
||||||
disp(0x0);
|
|
||||||
%}
|
|
||||||
%}
|
|
||||||
|
|
||||||
operand indIndexScaleNarrowKlass(rRegN reg, rRegL lreg, immI2 scale)
|
|
||||||
%{
|
|
||||||
predicate(Universe::narrow_klass_shift() == 0);
|
|
||||||
constraint(ALLOC_IN_RC(ptr_reg));
|
|
||||||
match(AddP (DecodeNKlass reg) (LShiftL lreg scale));
|
|
||||||
|
|
||||||
op_cost(10);
|
|
||||||
format %{"[$reg + $lreg << $scale]" %}
|
|
||||||
interface(MEMORY_INTER) %{
|
|
||||||
base($reg);
|
|
||||||
index($lreg);
|
|
||||||
scale($scale);
|
|
||||||
disp(0x0);
|
|
||||||
%}
|
|
||||||
%}
|
|
||||||
|
|
||||||
operand indIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegL lreg, immI2 scale)
|
|
||||||
%{
|
|
||||||
predicate(Universe::narrow_klass_shift() == 0);
|
|
||||||
constraint(ALLOC_IN_RC(ptr_reg));
|
|
||||||
match(AddP (AddP (DecodeNKlass reg) (LShiftL lreg scale)) off);
|
|
||||||
|
|
||||||
op_cost(10);
|
|
||||||
format %{"[$reg + $off + $lreg << $scale]" %}
|
|
||||||
interface(MEMORY_INTER) %{
|
|
||||||
base($reg);
|
|
||||||
index($lreg);
|
|
||||||
scale($scale);
|
|
||||||
disp($off);
|
|
||||||
%}
|
|
||||||
%}
|
|
||||||
|
|
||||||
operand indCompressedKlassOffset(rRegN reg, immL32 off) %{
|
|
||||||
predicate(UseCompressedKlassPointers && (Universe::narrow_klass_shift() == Address::times_8));
|
|
||||||
constraint(ALLOC_IN_RC(ptr_reg));
|
|
||||||
match(AddP (DecodeNKlass reg) off);
|
|
||||||
|
|
||||||
op_cost(10);
|
|
||||||
format %{"[R12 + $reg << 3 + $off] (compressed klass addressing)" %}
|
|
||||||
interface(MEMORY_INTER) %{
|
|
||||||
base(0xc); // R12
|
|
||||||
index($reg);
|
|
||||||
scale(0x3);
|
|
||||||
disp($off);
|
|
||||||
%}
|
|
||||||
%}
|
|
||||||
|
|
||||||
operand indPosIndexScaleOffsetNarrowKlass(rRegN reg, immL32 off, rRegI idx, immI2 scale)
|
|
||||||
%{
|
|
||||||
constraint(ALLOC_IN_RC(ptr_reg));
|
|
||||||
predicate(Universe::narrow_klass_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
|
|
||||||
match(AddP (AddP (DecodeNKlass reg) (LShiftL (ConvI2L idx) scale)) off);
|
|
||||||
|
|
||||||
op_cost(10);
|
|
||||||
format %{"[$reg + $off + $idx << $scale]" %}
|
|
||||||
interface(MEMORY_INTER) %{
|
|
||||||
base($reg);
|
|
||||||
index($idx);
|
|
||||||
scale($scale);
|
|
||||||
disp($off);
|
|
||||||
%}
|
|
||||||
%}
|
|
||||||
|
|
||||||
//----------Special Memory Operands--------------------------------------------
|
//----------Special Memory Operands--------------------------------------------
|
||||||
// Stack Slot Operand - This operand is used for loading and storing temporary
|
// Stack Slot Operand - This operand is used for loading and storing temporary
|
||||||
// values on the stack where a match requires a value to
|
// values on the stack where a match requires a value to
|
||||||
@ -4345,11 +4203,7 @@ opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex,
|
|||||||
indCompressedOopOffset,
|
indCompressedOopOffset,
|
||||||
indirectNarrow, indOffset8Narrow, indOffset32Narrow,
|
indirectNarrow, indOffset8Narrow, indOffset32Narrow,
|
||||||
indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
|
indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow,
|
||||||
indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow,
|
indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow);
|
||||||
indCompressedKlassOffset,
|
|
||||||
indirectNarrowKlass, indOffset8NarrowKlass, indOffset32NarrowKlass,
|
|
||||||
indIndexOffsetNarrowKlass, indIndexNarrowKlass, indIndexScaleNarrowKlass,
|
|
||||||
indIndexScaleOffsetNarrowKlass, indPosIndexScaleOffsetNarrowKlass);
|
|
||||||
|
|
||||||
//----------PIPELINE-----------------------------------------------------------
|
//----------PIPELINE-----------------------------------------------------------
|
||||||
// Rules which define the behavior of the target architectures pipeline.
|
// Rules which define the behavior of the target architectures pipeline.
|
||||||
@ -6665,7 +6519,7 @@ instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
|
|||||||
instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
|
instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
|
||||||
match(Set dst (EncodePKlass src));
|
match(Set dst (EncodePKlass src));
|
||||||
effect(KILL cr);
|
effect(KILL cr);
|
||||||
format %{ "encode_heap_oop_not_null $dst,$src" %}
|
format %{ "encode_klass_not_null $dst,$src" %}
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
__ encode_klass_not_null($dst$$Register, $src$$Register);
|
__ encode_klass_not_null($dst$$Register, $src$$Register);
|
||||||
%}
|
%}
|
||||||
@ -6675,7 +6529,7 @@ instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
|
|||||||
instruct decodeKlass_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
|
instruct decodeKlass_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
|
||||||
match(Set dst (DecodeNKlass src));
|
match(Set dst (DecodeNKlass src));
|
||||||
effect(KILL cr);
|
effect(KILL cr);
|
||||||
format %{ "decode_heap_oop_not_null $dst,$src" %}
|
format %{ "decode_klass_not_null $dst,$src" %}
|
||||||
ins_encode %{
|
ins_encode %{
|
||||||
Register s = $src$$Register;
|
Register s = $src$$Register;
|
||||||
Register d = $dst$$Register;
|
Register d = $dst$$Register;
|
||||||
|
@ -50,6 +50,7 @@ int AbstractAssembler::code_fill_byte() {
|
|||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
bool AbstractAssembler::pd_check_instruction_mark() {
|
bool AbstractAssembler::pd_check_instruction_mark() {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -73,6 +74,7 @@ void MacroAssembler::advance(int bytes) {
|
|||||||
RegisterOrConstant MacroAssembler::delayed_value_impl(
|
RegisterOrConstant MacroAssembler::delayed_value_impl(
|
||||||
intptr_t* delayed_value_addr, Register tmpl, int offset) {
|
intptr_t* delayed_value_addr, Register tmpl, int offset) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return RegisterOrConstant();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::store_oop(jobject obj) {
|
void MacroAssembler::store_oop(jobject obj) {
|
||||||
|
@ -1008,6 +1008,7 @@ void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
|
|||||||
|
|
||||||
address CppInterpreter::return_entry(TosState state, int length) {
|
address CppInterpreter::return_entry(TosState state, int length) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
address CppInterpreter::deopt_entry(TosState state, int length) {
|
address CppInterpreter::deopt_entry(TosState state, int length) {
|
||||||
|
@ -116,6 +116,7 @@ void frame::patch_pc(Thread* thread, address pc) {
|
|||||||
|
|
||||||
bool frame::safe_for_sender(JavaThread *thread) {
|
bool frame::safe_for_sender(JavaThread *thread) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void frame::pd_gc_epilog() {
|
void frame::pd_gc_epilog() {
|
||||||
@ -123,6 +124,7 @@ void frame::pd_gc_epilog() {
|
|||||||
|
|
||||||
bool frame::is_interpreted_frame_valid(JavaThread *thread) const {
|
bool frame::is_interpreted_frame_valid(JavaThread *thread) const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
BasicType frame::interpreter_frame_result(oop* oop_result,
|
BasicType frame::interpreter_frame_result(oop* oop_result,
|
||||||
@ -184,9 +186,8 @@ BasicType frame::interpreter_frame_result(oop* oop_result,
|
|||||||
int frame::frame_size(RegisterMap* map) const {
|
int frame::frame_size(RegisterMap* map) const {
|
||||||
#ifdef PRODUCT
|
#ifdef PRODUCT
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
#else
|
|
||||||
return 0; // make javaVFrame::print_value work
|
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
return 0; // make javaVFrame::print_value work
|
||||||
}
|
}
|
||||||
|
|
||||||
intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
|
||||||
|
@ -36,7 +36,7 @@ inline frame::frame() {
|
|||||||
_deopt_state = unknown;
|
_deopt_state = unknown;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline address frame::sender_pc() const { ShouldNotCallThis(); }
|
inline address frame::sender_pc() const { ShouldNotCallThis(); return NULL; }
|
||||||
|
|
||||||
inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
|
inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
|
||||||
_zeroframe = zf;
|
_zeroframe = zf;
|
||||||
@ -89,6 +89,7 @@ inline intptr_t* frame::real_fp() const {
|
|||||||
|
|
||||||
inline intptr_t* frame::link() const {
|
inline intptr_t* frame::link() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CC_INTERP
|
#ifdef CC_INTERP
|
||||||
@ -151,14 +152,17 @@ inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
|
|||||||
|
|
||||||
inline oop frame::saved_oop_result(RegisterMap* map) const {
|
inline oop frame::saved_oop_result(RegisterMap* map) const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool frame::is_older(intptr_t* id) const {
|
inline bool frame::is_older(intptr_t* id) const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline intptr_t* frame::entry_frame_argument_at(int offset) const {
|
inline intptr_t* frame::entry_frame_argument_at(int offset) const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline intptr_t* frame::unextended_sp() const {
|
inline intptr_t* frame::unextended_sp() const {
|
||||||
|
@ -49,8 +49,10 @@ void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin,
|
|||||||
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
|
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
|
||||||
// NB ic_stub_code_size() must return the size of the code we generate
|
// NB ic_stub_code_size() must return the size of the code we generate
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
|
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,7 @@ class InterpreterMacroAssembler : public MacroAssembler {
|
|||||||
Register tmp,
|
Register tmp,
|
||||||
int offset) {
|
int offset) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return RegisterOrConstant();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -64,6 +64,7 @@ address InterpreterGenerator::generate_math_entry(
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
Unimplemented();
|
Unimplemented();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
address InterpreterGenerator::generate_abstract_entry() {
|
address InterpreterGenerator::generate_abstract_entry() {
|
||||||
|
@ -51,15 +51,18 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
|
|||||||
public:
|
public:
|
||||||
bool is_jump() {
|
bool is_jump() {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_safepoint_poll() {
|
bool is_safepoint_poll() {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
inline NativeInstruction* nativeInstruction_at(address address) {
|
inline NativeInstruction* nativeInstruction_at(address address) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
class NativeCall : public NativeInstruction {
|
class NativeCall : public NativeInstruction {
|
||||||
@ -70,18 +73,22 @@ class NativeCall : public NativeInstruction {
|
|||||||
|
|
||||||
address instruction_address() const {
|
address instruction_address() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
address next_instruction_address() const {
|
address next_instruction_address() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
address return_address() const {
|
address return_address() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
address destination() const {
|
address destination() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_destination_mt_safe(address dest) {
|
void set_destination_mt_safe(address dest) {
|
||||||
@ -98,25 +105,30 @@ class NativeCall : public NativeInstruction {
|
|||||||
|
|
||||||
static bool is_call_before(address return_address) {
|
static bool is_call_before(address return_address) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
inline NativeCall* nativeCall_before(address return_address) {
|
inline NativeCall* nativeCall_before(address return_address) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline NativeCall* nativeCall_at(address address) {
|
inline NativeCall* nativeCall_at(address address) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
class NativeMovConstReg : public NativeInstruction {
|
class NativeMovConstReg : public NativeInstruction {
|
||||||
public:
|
public:
|
||||||
address next_instruction_address() const {
|
address next_instruction_address() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
intptr_t data() const {
|
intptr_t data() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_data(intptr_t x) {
|
void set_data(intptr_t x) {
|
||||||
@ -126,12 +138,14 @@ class NativeMovConstReg : public NativeInstruction {
|
|||||||
|
|
||||||
inline NativeMovConstReg* nativeMovConstReg_at(address address) {
|
inline NativeMovConstReg* nativeMovConstReg_at(address address) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
class NativeMovRegMem : public NativeInstruction {
|
class NativeMovRegMem : public NativeInstruction {
|
||||||
public:
|
public:
|
||||||
int offset() const {
|
int offset() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_offset(intptr_t x) {
|
void set_offset(intptr_t x) {
|
||||||
@ -145,6 +159,7 @@ class NativeMovRegMem : public NativeInstruction {
|
|||||||
|
|
||||||
inline NativeMovRegMem* nativeMovRegMem_at(address address) {
|
inline NativeMovRegMem* nativeMovRegMem_at(address address) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
class NativeJump : public NativeInstruction {
|
class NativeJump : public NativeInstruction {
|
||||||
@ -155,6 +170,7 @@ class NativeJump : public NativeInstruction {
|
|||||||
|
|
||||||
address jump_destination() const {
|
address jump_destination() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_jump_destination(address dest) {
|
void set_jump_destination(address dest) {
|
||||||
@ -172,12 +188,14 @@ class NativeJump : public NativeInstruction {
|
|||||||
|
|
||||||
inline NativeJump* nativeJump_at(address address) {
|
inline NativeJump* nativeJump_at(address address) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
class NativeGeneralJump : public NativeInstruction {
|
class NativeGeneralJump : public NativeInstruction {
|
||||||
public:
|
public:
|
||||||
address jump_destination() const {
|
address jump_destination() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void insert_unconditional(address code_pos, address entry) {
|
static void insert_unconditional(address code_pos, address entry) {
|
||||||
@ -191,6 +209,7 @@ class NativeGeneralJump : public NativeInstruction {
|
|||||||
|
|
||||||
inline NativeGeneralJump* nativeGeneralJump_at(address address) {
|
inline NativeGeneralJump* nativeGeneralJump_at(address address) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // CPU_ZERO_VM_NATIVEINST_ZERO_HPP
|
#endif // CPU_ZERO_VM_NATIVEINST_ZERO_HPP
|
||||||
|
@ -32,8 +32,10 @@ const int ConcreteRegisterImpl::max_fpr =
|
|||||||
|
|
||||||
const char* RegisterImpl::name() const {
|
const char* RegisterImpl::name() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* FloatRegisterImpl::name() const {
|
const char* FloatRegisterImpl::name() const {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
|||||||
|
|
||||||
address Relocation::pd_call_destination(address orig_addr) {
|
address Relocation::pd_call_destination(address orig_addr) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Relocation::pd_set_call_destination(address x) {
|
void Relocation::pd_set_call_destination(address x) {
|
||||||
@ -45,6 +46,7 @@ void Relocation::pd_set_call_destination(address x) {
|
|||||||
|
|
||||||
address Relocation::pd_get_address_from_code() {
|
address Relocation::pd_get_address_from_code() {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
address* Relocation::pd_address_in_code() {
|
address* Relocation::pd_address_in_code() {
|
||||||
|
@ -89,6 +89,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
|||||||
ret_type);
|
ret_type);
|
||||||
#else
|
#else
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
#endif // SHARK
|
#endif // SHARK
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,6 +100,7 @@ int Deoptimization::last_frame_adjust(int callee_parameters,
|
|||||||
|
|
||||||
uint SharedRuntime::out_preserve_stack_slots() {
|
uint SharedRuntime::out_preserve_stack_slots() {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
JRT_LEAF(void, zero_stub())
|
JRT_LEAF(void, zero_stub())
|
||||||
@ -135,4 +137,5 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
|||||||
VMRegPair *regs,
|
VMRegPair *regs,
|
||||||
int total_args_passed) {
|
int total_args_passed) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -39,16 +39,20 @@
|
|||||||
|
|
||||||
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int VtableStub::pd_code_alignment() {
|
int VtableStub::pd_code_alignment() {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -642,13 +642,14 @@ objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction = NU
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
static uint64_t locate_unique_thread_id() {
|
static uint64_t locate_unique_thread_id(mach_port_t mach_thread_port) {
|
||||||
// Additional thread_id used to correlate threads in SA
|
// Additional thread_id used to correlate threads in SA
|
||||||
thread_identifier_info_data_t m_ident_info;
|
thread_identifier_info_data_t m_ident_info;
|
||||||
mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
|
mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
|
||||||
|
|
||||||
thread_info(::mach_thread_self(), THREAD_IDENTIFIER_INFO,
|
thread_info(mach_thread_port, THREAD_IDENTIFIER_INFO,
|
||||||
(thread_info_t) &m_ident_info, &count);
|
(thread_info_t) &m_ident_info, &count);
|
||||||
|
|
||||||
return m_ident_info.thread_id;
|
return m_ident_info.thread_id;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -679,9 +680,14 @@ static void *java_start(Thread *thread) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
// thread_id is mach thread on macos
|
// thread_id is mach thread on macos, which pthreads graciously caches and provides for us
|
||||||
osthread->set_thread_id(::mach_thread_self());
|
mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
|
||||||
osthread->set_unique_thread_id(locate_unique_thread_id());
|
guarantee(thread_id != 0, "thread id missing from pthreads");
|
||||||
|
osthread->set_thread_id(thread_id);
|
||||||
|
|
||||||
|
uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
|
||||||
|
guarantee(unique_thread_id != 0, "unique thread id was not found");
|
||||||
|
osthread->set_unique_thread_id(unique_thread_id);
|
||||||
#else
|
#else
|
||||||
// thread_id is pthread_id on BSD
|
// thread_id is pthread_id on BSD
|
||||||
osthread->set_thread_id(::pthread_self());
|
osthread->set_thread_id(::pthread_self());
|
||||||
@ -843,8 +849,14 @@ bool os::create_attached_thread(JavaThread* thread) {
|
|||||||
|
|
||||||
// Store pthread info into the OSThread
|
// Store pthread info into the OSThread
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
osthread->set_thread_id(::mach_thread_self());
|
// thread_id is mach thread on macos, which pthreads graciously caches and provides for us
|
||||||
osthread->set_unique_thread_id(locate_unique_thread_id());
|
mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
|
||||||
|
guarantee(thread_id != 0, "just checking");
|
||||||
|
osthread->set_thread_id(thread_id);
|
||||||
|
|
||||||
|
uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
|
||||||
|
guarantee(unique_thread_id != 0, "just checking");
|
||||||
|
osthread->set_unique_thread_id(unique_thread_id);
|
||||||
#else
|
#else
|
||||||
osthread->set_thread_id(::pthread_self());
|
osthread->set_thread_id(::pthread_self());
|
||||||
#endif
|
#endif
|
||||||
@ -1115,7 +1127,7 @@ size_t os::lasterror(char *buf, size_t len) {
|
|||||||
|
|
||||||
intx os::current_thread_id() {
|
intx os::current_thread_id() {
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
return (intx)::mach_thread_self();
|
return (intx)::pthread_mach_thread_np(::pthread_self());
|
||||||
#else
|
#else
|
||||||
return (intx)::pthread_self();
|
return (intx)::pthread_self();
|
||||||
#endif
|
#endif
|
||||||
@ -2313,7 +2325,9 @@ void os::large_page_init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||||
|
fatal("This code is not used or maintained.");
|
||||||
|
|
||||||
// "exec" is passed in but not used. Creating the shared image for
|
// "exec" is passed in but not used. Creating the shared image for
|
||||||
// the code cache doesn't have an SHM_X executable permission to check.
|
// the code cache doesn't have an SHM_X executable permission to check.
|
||||||
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
||||||
@ -3275,11 +3289,15 @@ void os::Bsd::install_signal_handlers() {
|
|||||||
// and if UserSignalHandler is installed all bets are off
|
// and if UserSignalHandler is installed all bets are off
|
||||||
if (CheckJNICalls) {
|
if (CheckJNICalls) {
|
||||||
if (libjsig_is_loaded) {
|
if (libjsig_is_loaded) {
|
||||||
tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
|
if (PrintJNIResolving) {
|
||||||
|
tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
|
||||||
|
}
|
||||||
check_signals = false;
|
check_signals = false;
|
||||||
}
|
}
|
||||||
if (AllowUserSignalHandlers) {
|
if (AllowUserSignalHandlers) {
|
||||||
tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
|
if (PrintJNIResolving) {
|
||||||
|
tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
|
||||||
|
}
|
||||||
check_signals = false;
|
check_signals = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -4736,3 +4754,8 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
|
|||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void TestReserveMemorySpecial_test() {
|
||||||
|
// No tests available for this platform
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -40,6 +40,9 @@
|
|||||||
product(bool, UseHugeTLBFS, false, \
|
product(bool, UseHugeTLBFS, false, \
|
||||||
"Use MAP_HUGETLB for large pages") \
|
"Use MAP_HUGETLB for large pages") \
|
||||||
\
|
\
|
||||||
|
product(bool, UseTransparentHugePages, false, \
|
||||||
|
"Use MADV_HUGEPAGE for large pages") \
|
||||||
|
\
|
||||||
product(bool, LoadExecStackDllInVMThread, true, \
|
product(bool, LoadExecStackDllInVMThread, true, \
|
||||||
"Load DLLs with executable-stack attribute in the VM Thread") \
|
"Load DLLs with executable-stack attribute in the VM Thread") \
|
||||||
\
|
\
|
||||||
|
@ -2720,36 +2720,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
|
|||||||
|
|
||||||
int os::Linux::commit_memory_impl(char* addr, size_t size,
|
int os::Linux::commit_memory_impl(char* addr, size_t size,
|
||||||
size_t alignment_hint, bool exec) {
|
size_t alignment_hint, bool exec) {
|
||||||
int err;
|
int err = os::Linux::commit_memory_impl(addr, size, exec);
|
||||||
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
|
|
||||||
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
|
||||||
uintptr_t res =
|
|
||||||
(uintptr_t) ::mmap(addr, size, prot,
|
|
||||||
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
|
|
||||||
-1, 0);
|
|
||||||
if (res != (uintptr_t) MAP_FAILED) {
|
|
||||||
if (UseNUMAInterleaving) {
|
|
||||||
numa_make_global(addr, size);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = errno; // save errno from mmap() call above
|
|
||||||
|
|
||||||
if (!recoverable_mmap_error(err)) {
|
|
||||||
// However, it is not clear that this loss of our reserved mapping
|
|
||||||
// happens with large pages on Linux or that we cannot recover
|
|
||||||
// from the loss. For now, we just issue a warning and we don't
|
|
||||||
// call vm_exit_out_of_memory(). This issue is being tracked by
|
|
||||||
// JBS-8007074.
|
|
||||||
warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
|
|
||||||
// vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
|
|
||||||
// "committing reserved memory.");
|
|
||||||
}
|
|
||||||
// Fall through and try to use small pages
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os::Linux::commit_memory_impl(addr, size, exec);
|
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
realign_memory(addr, size, alignment_hint);
|
realign_memory(addr, size, alignment_hint);
|
||||||
}
|
}
|
||||||
@ -2774,7 +2745,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
||||||
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
|
if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
|
||||||
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
|
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
|
||||||
// be supported or the memory may already be backed by huge pages.
|
// be supported or the memory may already be backed by huge pages.
|
||||||
::madvise(addr, bytes, MADV_HUGEPAGE);
|
::madvise(addr, bytes, MADV_HUGEPAGE);
|
||||||
@ -2787,7 +2758,7 @@ void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
|||||||
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
|
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
|
||||||
// small pages on top of the SHM segment. This method always works for small pages, so we
|
// small pages on top of the SHM segment. This method always works for small pages, so we
|
||||||
// allow that in any case.
|
// allow that in any case.
|
||||||
if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
|
if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
|
||||||
commit_memory(addr, bytes, alignment_hint, !ExecMem);
|
commit_memory(addr, bytes, alignment_hint, !ExecMem);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3157,11 +3128,31 @@ bool os::unguard_memory(char* addr, size_t size) {
|
|||||||
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
|
return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
|
||||||
|
bool result = false;
|
||||||
|
void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
|
||||||
|
MAP_ANONYMOUS|MAP_PRIVATE,
|
||||||
|
-1, 0);
|
||||||
|
if (p != MAP_FAILED) {
|
||||||
|
void *aligned_p = align_ptr_up(p, page_size);
|
||||||
|
|
||||||
|
result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
|
||||||
|
|
||||||
|
munmap(p, page_size * 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (warn && !result) {
|
||||||
|
warning("TransparentHugePages is not supported by the operating system.");
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
||||||
bool result = false;
|
bool result = false;
|
||||||
void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
|
void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
|
||||||
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
|
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
|
||||||
-1, 0);
|
-1, 0);
|
||||||
|
|
||||||
if (p != MAP_FAILED) {
|
if (p != MAP_FAILED) {
|
||||||
// We don't know if this really is a huge page or not.
|
// We don't know if this really is a huge page or not.
|
||||||
@ -3182,12 +3173,10 @@ bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
|
|||||||
}
|
}
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
}
|
}
|
||||||
munmap (p, page_size);
|
munmap(p, page_size);
|
||||||
if (result)
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (warn) {
|
if (warn && !result) {
|
||||||
warning("HugeTLBFS is not supported by the operating system.");
|
warning("HugeTLBFS is not supported by the operating system.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3235,82 +3224,114 @@ static void set_coredump_filter(void) {
|
|||||||
|
|
||||||
static size_t _large_page_size = 0;
|
static size_t _large_page_size = 0;
|
||||||
|
|
||||||
void os::large_page_init() {
|
size_t os::Linux::find_large_page_size() {
|
||||||
if (!UseLargePages) {
|
size_t large_page_size = 0;
|
||||||
UseHugeTLBFS = false;
|
|
||||||
UseSHM = false;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
|
// large_page_size on Linux is used to round up heap size. x86 uses either
|
||||||
// If UseLargePages is specified on the command line try both methods,
|
// 2M or 4M page, depending on whether PAE (Physical Address Extensions)
|
||||||
// if it's default, then try only HugeTLBFS.
|
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
|
||||||
if (FLAG_IS_DEFAULT(UseLargePages)) {
|
// page as large as 256M.
|
||||||
UseHugeTLBFS = true;
|
//
|
||||||
} else {
|
// Here we try to figure out page size by parsing /proc/meminfo and looking
|
||||||
UseHugeTLBFS = UseSHM = true;
|
// for a line with the following format:
|
||||||
}
|
// Hugepagesize: 2048 kB
|
||||||
}
|
//
|
||||||
|
// If we can't determine the value (e.g. /proc is not mounted, or the text
|
||||||
if (LargePageSizeInBytes) {
|
// format has been changed), we'll use the largest page size supported by
|
||||||
_large_page_size = LargePageSizeInBytes;
|
// the processor.
|
||||||
} else {
|
|
||||||
// large_page_size on Linux is used to round up heap size. x86 uses either
|
|
||||||
// 2M or 4M page, depending on whether PAE (Physical Address Extensions)
|
|
||||||
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
|
|
||||||
// page as large as 256M.
|
|
||||||
//
|
|
||||||
// Here we try to figure out page size by parsing /proc/meminfo and looking
|
|
||||||
// for a line with the following format:
|
|
||||||
// Hugepagesize: 2048 kB
|
|
||||||
//
|
|
||||||
// If we can't determine the value (e.g. /proc is not mounted, or the text
|
|
||||||
// format has been changed), we'll use the largest page size supported by
|
|
||||||
// the processor.
|
|
||||||
|
|
||||||
#ifndef ZERO
|
#ifndef ZERO
|
||||||
_large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
|
large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
|
||||||
ARM_ONLY(2 * M) PPC_ONLY(4 * M);
|
ARM_ONLY(2 * M) PPC_ONLY(4 * M);
|
||||||
#endif // ZERO
|
#endif // ZERO
|
||||||
|
|
||||||
FILE *fp = fopen("/proc/meminfo", "r");
|
FILE *fp = fopen("/proc/meminfo", "r");
|
||||||
if (fp) {
|
if (fp) {
|
||||||
while (!feof(fp)) {
|
while (!feof(fp)) {
|
||||||
int x = 0;
|
int x = 0;
|
||||||
char buf[16];
|
char buf[16];
|
||||||
if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
|
if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
|
||||||
if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
|
if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
|
||||||
_large_page_size = x * K;
|
large_page_size = x * K;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// skip to next line
|
// skip to next line
|
||||||
for (;;) {
|
for (;;) {
|
||||||
int ch = fgetc(fp);
|
int ch = fgetc(fp);
|
||||||
if (ch == EOF || ch == (int)'\n') break;
|
if (ch == EOF || ch == (int)'\n') break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fclose(fp);
|
|
||||||
}
|
}
|
||||||
|
fclose(fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// print a warning if any large page related flag is specified on command line
|
if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
|
||||||
bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
|
warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
|
||||||
|
SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
|
||||||
|
proper_unit_for_byte_size(large_page_size));
|
||||||
|
}
|
||||||
|
|
||||||
|
return large_page_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t os::Linux::setup_large_page_size() {
|
||||||
|
_large_page_size = Linux::find_large_page_size();
|
||||||
const size_t default_page_size = (size_t)Linux::page_size();
|
const size_t default_page_size = (size_t)Linux::page_size();
|
||||||
if (_large_page_size > default_page_size) {
|
if (_large_page_size > default_page_size) {
|
||||||
_page_sizes[0] = _large_page_size;
|
_page_sizes[0] = _large_page_size;
|
||||||
_page_sizes[1] = default_page_size;
|
_page_sizes[1] = default_page_size;
|
||||||
_page_sizes[2] = 0;
|
_page_sizes[2] = 0;
|
||||||
}
|
}
|
||||||
UseHugeTLBFS = UseHugeTLBFS &&
|
|
||||||
Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
|
|
||||||
|
|
||||||
if (UseHugeTLBFS)
|
return _large_page_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool os::Linux::setup_large_page_type(size_t page_size) {
|
||||||
|
if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
|
||||||
|
FLAG_IS_DEFAULT(UseSHM) &&
|
||||||
|
FLAG_IS_DEFAULT(UseTransparentHugePages)) {
|
||||||
|
// If UseLargePages is specified on the command line try all methods,
|
||||||
|
// if it's default, then try only UseTransparentHugePages.
|
||||||
|
if (FLAG_IS_DEFAULT(UseLargePages)) {
|
||||||
|
UseTransparentHugePages = true;
|
||||||
|
} else {
|
||||||
|
UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (UseTransparentHugePages) {
|
||||||
|
bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
|
||||||
|
if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
|
||||||
|
UseHugeTLBFS = false;
|
||||||
|
UseSHM = false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
UseTransparentHugePages = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (UseHugeTLBFS) {
|
||||||
|
bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
|
||||||
|
if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
|
||||||
|
UseSHM = false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
UseHugeTLBFS = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return UseSHM;
|
||||||
|
}
|
||||||
|
|
||||||
|
void os::large_page_init() {
|
||||||
|
if (!UseLargePages) {
|
||||||
|
UseHugeTLBFS = false;
|
||||||
|
UseTransparentHugePages = false;
|
||||||
UseSHM = false;
|
UseSHM = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
UseLargePages = UseHugeTLBFS || UseSHM;
|
size_t large_page_size = Linux::setup_large_page_size();
|
||||||
|
UseLargePages = Linux::setup_large_page_type(large_page_size);
|
||||||
|
|
||||||
set_coredump_filter();
|
set_coredump_filter();
|
||||||
}
|
}
|
||||||
@ -3319,16 +3340,22 @@ void os::large_page_init() {
|
|||||||
#define SHM_HUGETLB 04000
|
#define SHM_HUGETLB 04000
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||||
// "exec" is passed in but not used. Creating the shared image for
|
// "exec" is passed in but not used. Creating the shared image for
|
||||||
// the code cache doesn't have an SHM_X executable permission to check.
|
// the code cache doesn't have an SHM_X executable permission to check.
|
||||||
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
assert(UseLargePages && UseSHM, "only for SHM large pages");
|
||||||
|
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
|
||||||
|
|
||||||
|
if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
|
||||||
|
return NULL; // Fallback to small pages.
|
||||||
|
}
|
||||||
|
|
||||||
key_t key = IPC_PRIVATE;
|
key_t key = IPC_PRIVATE;
|
||||||
char *addr;
|
char *addr;
|
||||||
|
|
||||||
bool warn_on_failure = UseLargePages &&
|
bool warn_on_failure = UseLargePages &&
|
||||||
(!FLAG_IS_DEFAULT(UseLargePages) ||
|
(!FLAG_IS_DEFAULT(UseLargePages) ||
|
||||||
|
!FLAG_IS_DEFAULT(UseSHM) ||
|
||||||
!FLAG_IS_DEFAULT(LargePageSizeInBytes)
|
!FLAG_IS_DEFAULT(LargePageSizeInBytes)
|
||||||
);
|
);
|
||||||
char msg[128];
|
char msg[128];
|
||||||
@ -3376,42 +3403,219 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((addr != NULL) && UseNUMAInterleaving) {
|
return addr;
|
||||||
numa_make_global(addr, bytes);
|
}
|
||||||
|
|
||||||
|
static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
|
||||||
|
assert(error == ENOMEM, "Only expect to fail if no memory is available");
|
||||||
|
|
||||||
|
bool warn_on_failure = UseLargePages &&
|
||||||
|
(!FLAG_IS_DEFAULT(UseLargePages) ||
|
||||||
|
!FLAG_IS_DEFAULT(UseHugeTLBFS) ||
|
||||||
|
!FLAG_IS_DEFAULT(LargePageSizeInBytes));
|
||||||
|
|
||||||
|
if (warn_on_failure) {
|
||||||
|
char msg[128];
|
||||||
|
jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
|
||||||
|
PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
|
||||||
|
warning(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
|
||||||
|
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
|
||||||
|
assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
|
||||||
|
assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
|
||||||
|
|
||||||
|
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||||
|
char* addr = (char*)::mmap(req_addr, bytes, prot,
|
||||||
|
MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
|
||||||
|
-1, 0);
|
||||||
|
|
||||||
|
if (addr == MAP_FAILED) {
|
||||||
|
warn_on_large_pages_failure(req_addr, bytes, errno);
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The memory is committed
|
assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
|
||||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||||
|
size_t large_page_size = os::large_page_size();
|
||||||
|
|
||||||
|
assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
|
||||||
|
|
||||||
|
// Allocate small pages.
|
||||||
|
|
||||||
|
char* start;
|
||||||
|
if (req_addr != NULL) {
|
||||||
|
assert(is_ptr_aligned(req_addr, alignment), "Must be");
|
||||||
|
assert(is_size_aligned(bytes, alignment), "Must be");
|
||||||
|
start = os::reserve_memory(bytes, req_addr);
|
||||||
|
assert(start == NULL || start == req_addr, "Must be");
|
||||||
|
} else {
|
||||||
|
start = os::reserve_memory_aligned(bytes, alignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (start == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(is_ptr_aligned(start, alignment), "Must be");
|
||||||
|
|
||||||
|
// os::reserve_memory_special will record this memory area.
|
||||||
|
// Need to release it here to prevent overlapping reservations.
|
||||||
|
MemTracker::record_virtual_memory_release((address)start, bytes);
|
||||||
|
|
||||||
|
char* end = start + bytes;
|
||||||
|
|
||||||
|
// Find the regions of the allocated chunk that can be promoted to large pages.
|
||||||
|
char* lp_start = (char*)align_ptr_up(start, large_page_size);
|
||||||
|
char* lp_end = (char*)align_ptr_down(end, large_page_size);
|
||||||
|
|
||||||
|
size_t lp_bytes = lp_end - lp_start;
|
||||||
|
|
||||||
|
assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
|
||||||
|
|
||||||
|
if (lp_bytes == 0) {
|
||||||
|
// The mapped region doesn't even span the start and the end of a large page.
|
||||||
|
// Fall back to allocate a non-special area.
|
||||||
|
::munmap(start, end - start);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
|
||||||
|
|
||||||
|
|
||||||
|
void* result;
|
||||||
|
|
||||||
|
if (start != lp_start) {
|
||||||
|
result = ::mmap(start, lp_start - start, prot,
|
||||||
|
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
|
||||||
|
-1, 0);
|
||||||
|
if (result == MAP_FAILED) {
|
||||||
|
::munmap(lp_start, end - lp_start);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result = ::mmap(lp_start, lp_bytes, prot,
|
||||||
|
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
|
||||||
|
-1, 0);
|
||||||
|
if (result == MAP_FAILED) {
|
||||||
|
warn_on_large_pages_failure(req_addr, bytes, errno);
|
||||||
|
// If the mmap above fails, the large pages region will be unmapped and we
|
||||||
|
// have regions before and after with small pages. Release these regions.
|
||||||
|
//
|
||||||
|
// | mapped | unmapped | mapped |
|
||||||
|
// ^ ^ ^ ^
|
||||||
|
// start lp_start lp_end end
|
||||||
|
//
|
||||||
|
::munmap(start, lp_start - start);
|
||||||
|
::munmap(lp_end, end - lp_end);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (lp_end != end) {
|
||||||
|
result = ::mmap(lp_end, end - lp_end, prot,
|
||||||
|
MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
|
||||||
|
-1, 0);
|
||||||
|
if (result == MAP_FAILED) {
|
||||||
|
::munmap(start, lp_end - start);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return start;
|
||||||
|
}
|
||||||
|
|
||||||
|
char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||||
|
assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
|
||||||
|
assert(is_ptr_aligned(req_addr, alignment), "Must be");
|
||||||
|
assert(is_power_of_2(alignment), "Must be");
|
||||||
|
assert(is_power_of_2(os::large_page_size()), "Must be");
|
||||||
|
assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
|
||||||
|
|
||||||
|
if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
|
||||||
|
return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
|
||||||
|
} else {
|
||||||
|
return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
|
||||||
|
assert(UseLargePages, "only for large pages");
|
||||||
|
|
||||||
|
char* addr;
|
||||||
|
if (UseSHM) {
|
||||||
|
addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
|
||||||
|
} else {
|
||||||
|
assert(UseHugeTLBFS, "must be");
|
||||||
|
addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (addr != NULL) {
|
||||||
|
if (UseNUMAInterleaving) {
|
||||||
|
numa_make_global(addr, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The memory is committed
|
||||||
|
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
||||||
|
}
|
||||||
|
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
|
||||||
|
// detaching the SHM segment will also delete it, see reserve_memory_special_shm()
|
||||||
|
return shmdt(base) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
|
||||||
|
return pd_release_memory(base, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
bool os::release_memory_special(char* base, size_t bytes) {
|
bool os::release_memory_special(char* base, size_t bytes) {
|
||||||
|
assert(UseLargePages, "only for large pages");
|
||||||
|
|
||||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
|
||||||
int rslt = shmdt(base);
|
bool res;
|
||||||
if (rslt == 0) {
|
if (UseSHM) {
|
||||||
|
res = os::Linux::release_memory_special_shm(base, bytes);
|
||||||
|
} else {
|
||||||
|
assert(UseHugeTLBFS, "must be");
|
||||||
|
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (res) {
|
||||||
tkr.record((address)base, bytes);
|
tkr.record((address)base, bytes);
|
||||||
return true;
|
|
||||||
} else {
|
} else {
|
||||||
tkr.discard();
|
tkr.discard();
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t os::large_page_size() {
|
size_t os::large_page_size() {
|
||||||
return _large_page_size;
|
return _large_page_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
// HugeTLBFS allows application to commit large page memory on demand;
|
// With SysV SHM the entire memory region must be allocated as shared
|
||||||
// with SysV SHM the entire memory region must be allocated as shared
|
|
||||||
// memory.
|
// memory.
|
||||||
|
// HugeTLBFS allows application to commit large page memory on demand.
|
||||||
|
// However, when committing memory with HugeTLBFS fails, the region
|
||||||
|
// that was supposed to be committed will lose the old reservation
|
||||||
|
// and allow other threads to steal that memory region. Because of this
|
||||||
|
// behavior we can't commit HugeTLBFS memory.
|
||||||
bool os::can_commit_large_page_memory() {
|
bool os::can_commit_large_page_memory() {
|
||||||
return UseHugeTLBFS;
|
return UseTransparentHugePages;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool os::can_execute_large_page_memory() {
|
bool os::can_execute_large_page_memory() {
|
||||||
return UseHugeTLBFS;
|
return UseTransparentHugePages || UseHugeTLBFS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reserve memory at an arbitrary address, only if that area is
|
// Reserve memory at an arbitrary address, only if that area is
|
||||||
@ -4563,21 +4767,23 @@ jint os::init_2(void)
|
|||||||
UseNUMA = false;
|
UseNUMA = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// With SHM large pages we cannot uncommit a page, so there's not way
|
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
|
||||||
// we can make the adaptive lgrp chunk resizing work. If the user specified
|
// we can make the adaptive lgrp chunk resizing work. If the user specified
|
||||||
// both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
|
// both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
|
||||||
// disable adaptive resizing.
|
// disable adaptive resizing.
|
||||||
if (UseNUMA && UseLargePages && UseSHM) {
|
if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
|
||||||
if (!FLAG_IS_DEFAULT(UseNUMA)) {
|
if (FLAG_IS_DEFAULT(UseNUMA)) {
|
||||||
if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
|
UseNUMA = false;
|
||||||
|
} else {
|
||||||
|
if (FLAG_IS_DEFAULT(UseLargePages) &&
|
||||||
|
FLAG_IS_DEFAULT(UseSHM) &&
|
||||||
|
FLAG_IS_DEFAULT(UseHugeTLBFS)) {
|
||||||
UseLargePages = false;
|
UseLargePages = false;
|
||||||
} else {
|
} else {
|
||||||
warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
|
warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
|
||||||
UseAdaptiveSizePolicy = false;
|
UseAdaptiveSizePolicy = false;
|
||||||
UseAdaptiveNUMAChunkSizing = false;
|
UseAdaptiveNUMAChunkSizing = false;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
UseNUMA = false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!UseNUMA && ForceNUMA) {
|
if (!UseNUMA && ForceNUMA) {
|
||||||
@ -5848,3 +6054,149 @@ void MemNotifyThread::start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#endif // JAVASE_EMBEDDED
|
#endif // JAVASE_EMBEDDED
|
||||||
|
|
||||||
|
|
||||||
|
/////////////// Unit tests ///////////////
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
|
||||||
|
#define test_log(...) \
|
||||||
|
do {\
|
||||||
|
if (VerboseInternalVMTests) { \
|
||||||
|
tty->print_cr(__VA_ARGS__); \
|
||||||
|
tty->flush(); \
|
||||||
|
}\
|
||||||
|
} while (false)
|
||||||
|
|
||||||
|
class TestReserveMemorySpecial : AllStatic {
|
||||||
|
public:
|
||||||
|
static void small_page_write(void* addr, size_t size) {
|
||||||
|
size_t page_size = os::vm_page_size();
|
||||||
|
|
||||||
|
char* end = (char*)addr + size;
|
||||||
|
for (char* p = (char*)addr; p < end; p += page_size) {
|
||||||
|
*p = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
|
||||||
|
if (!UseHugeTLBFS) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
|
||||||
|
|
||||||
|
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
|
||||||
|
|
||||||
|
if (addr != NULL) {
|
||||||
|
small_page_write(addr, size);
|
||||||
|
|
||||||
|
os::Linux::release_memory_special_huge_tlbfs(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs_only() {
|
||||||
|
if (!UseHugeTLBFS) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t lp = os::large_page_size();
|
||||||
|
|
||||||
|
for (size_t size = lp; size <= lp * 10; size += lp) {
|
||||||
|
test_reserve_memory_special_huge_tlbfs_only(size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
|
||||||
|
if (!UseHugeTLBFS) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
|
||||||
|
size, alignment);
|
||||||
|
|
||||||
|
assert(size >= os::large_page_size(), "Incorrect input to test");
|
||||||
|
|
||||||
|
char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
|
||||||
|
|
||||||
|
if (addr != NULL) {
|
||||||
|
small_page_write(addr, size);
|
||||||
|
|
||||||
|
os::Linux::release_memory_special_huge_tlbfs(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) {
|
||||||
|
size_t lp = os::large_page_size();
|
||||||
|
size_t ag = os::vm_allocation_granularity();
|
||||||
|
|
||||||
|
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed(size, alignment);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs_mixed() {
|
||||||
|
size_t lp = os::large_page_size();
|
||||||
|
size_t ag = os::vm_allocation_granularity();
|
||||||
|
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_huge_tlbfs() {
|
||||||
|
if (!UseHugeTLBFS) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
test_reserve_memory_special_huge_tlbfs_only();
|
||||||
|
test_reserve_memory_special_huge_tlbfs_mixed();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
|
||||||
|
if (!UseSHM) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
|
||||||
|
|
||||||
|
char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
|
||||||
|
|
||||||
|
if (addr != NULL) {
|
||||||
|
assert(is_ptr_aligned(addr, alignment), "Check");
|
||||||
|
assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
|
||||||
|
|
||||||
|
small_page_write(addr, size);
|
||||||
|
|
||||||
|
os::Linux::release_memory_special_shm(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_reserve_memory_special_shm() {
|
||||||
|
size_t lp = os::large_page_size();
|
||||||
|
size_t ag = os::vm_allocation_granularity();
|
||||||
|
|
||||||
|
for (size_t size = ag; size < lp * 3; size += ag) {
|
||||||
|
for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
|
||||||
|
test_reserve_memory_special_shm(size, alignment);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test() {
|
||||||
|
test_reserve_memory_special_huge_tlbfs();
|
||||||
|
test_reserve_memory_special_shm();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void TestReserveMemorySpecial_test() {
|
||||||
|
TestReserveMemorySpecial::test();
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
@ -32,6 +32,7 @@ typedef int (*pthread_getattr_func_type) (pthread_t, pthread_attr_t *);
|
|||||||
|
|
||||||
class Linux {
|
class Linux {
|
||||||
friend class os;
|
friend class os;
|
||||||
|
friend class TestReserveMemorySpecial;
|
||||||
|
|
||||||
// For signal-chaining
|
// For signal-chaining
|
||||||
#define MAXSIGNUM 32
|
#define MAXSIGNUM 32
|
||||||
@ -92,8 +93,21 @@ class Linux {
|
|||||||
static void rebuild_cpu_to_node_map();
|
static void rebuild_cpu_to_node_map();
|
||||||
static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; }
|
static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; }
|
||||||
|
|
||||||
|
static size_t find_large_page_size();
|
||||||
|
static size_t setup_large_page_size();
|
||||||
|
|
||||||
|
static bool setup_large_page_type(size_t page_size);
|
||||||
|
static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size);
|
||||||
static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
|
static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
|
||||||
|
|
||||||
|
static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec);
|
||||||
|
static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec);
|
||||||
|
static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
|
||||||
|
static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
|
||||||
|
|
||||||
|
static bool release_memory_special_shm(char* base, size_t bytes);
|
||||||
|
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
|
||||||
|
|
||||||
static void print_full_memory_info(outputStream* st);
|
static void print_full_memory_info(outputStream* st);
|
||||||
static void print_distro_info(outputStream* st);
|
static void print_distro_info(outputStream* st);
|
||||||
static void print_libversion_info(outputStream* st);
|
static void print_libversion_info(outputStream* st);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -260,6 +260,55 @@ FILE* os::open(int fd, const char* mode) {
|
|||||||
return ::fdopen(fd, mode);
|
return ::fdopen(fd, mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* os::get_default_process_handle() {
|
||||||
|
return (void*)::dlopen(NULL, RTLD_LAZY);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Builds a platform dependent Agent_OnLoad_<lib_name> function name
|
||||||
|
// which is used to find statically linked in agents.
|
||||||
|
// Parameters:
|
||||||
|
// sym_name: Symbol in library we are looking for
|
||||||
|
// lib_name: Name of library to look in, NULL for shared libs.
|
||||||
|
// is_absolute_path == true if lib_name is absolute path to agent
|
||||||
|
// such as "/a/b/libL.so"
|
||||||
|
// == false if only the base name of the library is passed in
|
||||||
|
// such as "L"
|
||||||
|
char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
|
||||||
|
bool is_absolute_path) {
|
||||||
|
char *agent_entry_name;
|
||||||
|
size_t len;
|
||||||
|
size_t name_len;
|
||||||
|
size_t prefix_len = strlen(JNI_LIB_PREFIX);
|
||||||
|
size_t suffix_len = strlen(JNI_LIB_SUFFIX);
|
||||||
|
const char *start;
|
||||||
|
|
||||||
|
if (lib_name != NULL) {
|
||||||
|
len = name_len = strlen(lib_name);
|
||||||
|
if (is_absolute_path) {
|
||||||
|
// Need to strip path, prefix and suffix
|
||||||
|
if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
|
||||||
|
lib_name = ++start;
|
||||||
|
}
|
||||||
|
if (len <= (prefix_len + suffix_len)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
lib_name += prefix_len;
|
||||||
|
name_len = strlen(lib_name) - suffix_len;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
|
||||||
|
agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
|
||||||
|
if (agent_entry_name == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
strcpy(agent_entry_name, sym_name);
|
||||||
|
if (lib_name != NULL) {
|
||||||
|
strcat(agent_entry_name, "_");
|
||||||
|
strncat(agent_entry_name, lib_name, name_len);
|
||||||
|
}
|
||||||
|
return agent_entry_name;
|
||||||
|
}
|
||||||
|
|
||||||
os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
|
os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
|
||||||
assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
|
assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
|
||||||
}
|
}
|
||||||
|
@ -3385,7 +3385,7 @@ bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
|
char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
|
||||||
fatal("os::reserve_memory_special should not be called on Solaris.");
|
fatal("os::reserve_memory_special should not be called on Solaris.");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -6601,3 +6601,9 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
|
|||||||
|
|
||||||
return strlen(buffer);
|
return strlen(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void TestReserveMemorySpecial_test() {
|
||||||
|
// No tests available for this platform
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -3156,7 +3156,12 @@ bool os::can_execute_large_page_memory() {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
|
char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) {
|
||||||
|
assert(UseLargePages, "only for large pages");
|
||||||
|
|
||||||
|
if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
|
||||||
|
return NULL; // Fallback to small pages.
|
||||||
|
}
|
||||||
|
|
||||||
const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
||||||
const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
|
const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
|
||||||
@ -5394,6 +5399,75 @@ inline BOOL os::Advapi32Dll::AdvapiAvailable() {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* os::get_default_process_handle() {
|
||||||
|
return (void*)GetModuleHandle(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Builds a platform dependent Agent_OnLoad_<lib_name> function name
|
||||||
|
// which is used to find statically linked in agents.
|
||||||
|
// Additionally for windows, takes into account __stdcall names.
|
||||||
|
// Parameters:
|
||||||
|
// sym_name: Symbol in library we are looking for
|
||||||
|
// lib_name: Name of library to look in, NULL for shared libs.
|
||||||
|
// is_absolute_path == true if lib_name is absolute path to agent
|
||||||
|
// such as "C:/a/b/L.dll"
|
||||||
|
// == false if only the base name of the library is passed in
|
||||||
|
// such as "L"
|
||||||
|
char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
|
||||||
|
bool is_absolute_path) {
|
||||||
|
char *agent_entry_name;
|
||||||
|
size_t len;
|
||||||
|
size_t name_len;
|
||||||
|
size_t prefix_len = strlen(JNI_LIB_PREFIX);
|
||||||
|
size_t suffix_len = strlen(JNI_LIB_SUFFIX);
|
||||||
|
const char *start;
|
||||||
|
|
||||||
|
if (lib_name != NULL) {
|
||||||
|
len = name_len = strlen(lib_name);
|
||||||
|
if (is_absolute_path) {
|
||||||
|
// Need to strip path, prefix and suffix
|
||||||
|
if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
|
||||||
|
lib_name = ++start;
|
||||||
|
} else {
|
||||||
|
// Need to check for C:
|
||||||
|
if ((start = strchr(lib_name, ':')) != NULL) {
|
||||||
|
lib_name = ++start;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (len <= (prefix_len + suffix_len)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
lib_name += prefix_len;
|
||||||
|
name_len = strlen(lib_name) - suffix_len;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
|
||||||
|
agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
|
||||||
|
if (agent_entry_name == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
if (lib_name != NULL) {
|
||||||
|
const char *p = strrchr(sym_name, '@');
|
||||||
|
if (p != NULL && p != sym_name) {
|
||||||
|
// sym_name == _Agent_OnLoad@XX
|
||||||
|
strncpy(agent_entry_name, sym_name, (p - sym_name));
|
||||||
|
agent_entry_name[(p-sym_name)] = '\0';
|
||||||
|
// agent_entry_name == _Agent_OnLoad
|
||||||
|
strcat(agent_entry_name, "_");
|
||||||
|
strncat(agent_entry_name, lib_name, name_len);
|
||||||
|
strcat(agent_entry_name, p);
|
||||||
|
// agent_entry_name == _Agent_OnLoad_lib_name@XX
|
||||||
|
} else {
|
||||||
|
strcpy(agent_entry_name, sym_name);
|
||||||
|
strcat(agent_entry_name, "_");
|
||||||
|
strncat(agent_entry_name, lib_name, name_len);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
strcpy(agent_entry_name, sym_name);
|
||||||
|
}
|
||||||
|
return agent_entry_name;
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
// Kernel32 API
|
// Kernel32 API
|
||||||
typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
|
typedef BOOL (WINAPI* SwitchToThread_Fn)(void);
|
||||||
@ -5638,3 +5712,9 @@ BOOL os::Advapi32Dll::AdvapiAvailable() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void TestReserveMemorySpecial_test() {
|
||||||
|
// No tests available for this platform
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@ -190,7 +190,7 @@ inline void OrderAccess::release_store_fence(volatile juint* p, juint v)
|
|||||||
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
|
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
|
||||||
|
|
||||||
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
|
inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
|
||||||
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jdouble_cast(v)); }
|
inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
|
||||||
|
|
||||||
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
|
inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
|
@ -715,6 +715,7 @@ JVM_handle_bsd_signal(int sig,
|
|||||||
err.report_and_die();
|
err.report_and_die();
|
||||||
|
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// From solaris_i486.s ported to bsd_i486.s
|
// From solaris_i486.s ported to bsd_i486.s
|
||||||
|
@ -66,6 +66,7 @@ address os::current_stack_pointer() {
|
|||||||
|
|
||||||
frame os::get_sender_for_C_frame(frame* fr) {
|
frame os::get_sender_for_C_frame(frame* fr) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return frame();
|
||||||
}
|
}
|
||||||
|
|
||||||
frame os::current_frame() {
|
frame os::current_frame() {
|
||||||
@ -103,16 +104,19 @@ void os::initialize_thread(Thread* thr) {
|
|||||||
|
|
||||||
address os::Bsd::ucontext_get_pc(ucontext_t* uc) {
|
address os::Bsd::ucontext_get_pc(ucontext_t* uc) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ExtendedPC os::fetch_frame_from_context(void* ucVoid,
|
ExtendedPC os::fetch_frame_from_context(void* ucVoid,
|
||||||
intptr_t** ret_sp,
|
intptr_t** ret_sp,
|
||||||
intptr_t** ret_fp) {
|
intptr_t** ret_fp) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return ExtendedPC();
|
||||||
}
|
}
|
||||||
|
|
||||||
frame os::fetch_frame_from_context(void* ucVoid) {
|
frame os::fetch_frame_from_context(void* ucVoid) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return frame();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" JNIEXPORT int
|
extern "C" JNIEXPORT int
|
||||||
@ -240,6 +244,7 @@ JVM_handle_bsd_signal(int sig,
|
|||||||
|
|
||||||
sprintf(buf, fmt, sig, info->si_addr);
|
sprintf(buf, fmt, sig, info->si_addr);
|
||||||
fatal(buf);
|
fatal(buf);
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void os::Bsd::init_thread_fpu_state(void) {
|
void os::Bsd::init_thread_fpu_state(void) {
|
||||||
@ -373,17 +378,7 @@ void os::print_register_info(outputStream *st, void *context) {
|
|||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
int SpinPause() {
|
int SpinPause() {
|
||||||
}
|
return 1;
|
||||||
|
|
||||||
int SafeFetch32(int *adr, int errValue) {
|
|
||||||
int value = errValue;
|
|
||||||
value = *adr;
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
|
|
||||||
intptr_t value = errValue;
|
|
||||||
value = *adr;
|
|
||||||
return value;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
|
void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
|
||||||
|
@ -110,6 +110,7 @@
|
|||||||
void* ucontext,
|
void* ucontext,
|
||||||
bool isInJava) {
|
bool isInJava) {
|
||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// These routines are only used on cpu architectures that
|
// These routines are only used on cpu architectures that
|
||||||
|
@ -915,16 +915,6 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
|
|||||||
// Return to the now deoptimized frame.
|
// Return to the now deoptimized frame.
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we are patching in a non-perm oop, make sure the nmethod
|
|
||||||
// is on the right list.
|
|
||||||
if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
|
|
||||||
MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
|
||||||
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
|
|
||||||
guarantee(nm != NULL, "only nmethods can contain non-perm oops");
|
|
||||||
if (!nm->on_scavenge_root_list())
|
|
||||||
CodeCache::add_scavenge_root_nmethod(nm);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now copy code back
|
// Now copy code back
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -1125,6 +1115,21 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we are patching in a non-perm oop, make sure the nmethod
|
||||||
|
// is on the right list.
|
||||||
|
if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
|
||||||
|
MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
|
||||||
|
guarantee(nm != NULL, "only nmethods can contain non-perm oops");
|
||||||
|
if (!nm->on_scavenge_root_list()) {
|
||||||
|
CodeCache::add_scavenge_root_nmethod(nm);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since we've patched some oops in the nmethod,
|
||||||
|
// (re)register it with the heap.
|
||||||
|
Universe::heap()->register_nmethod(nm);
|
||||||
|
}
|
||||||
JRT_END
|
JRT_END
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -2590,7 +2590,7 @@ void ClassFileParser::parse_classfile_sourcefile_attribute(TRAPS) {
|
|||||||
valid_symbol_at(sourcefile_index),
|
valid_symbol_at(sourcefile_index),
|
||||||
"Invalid SourceFile attribute at constant pool index %u in class file %s",
|
"Invalid SourceFile attribute at constant pool index %u in class file %s",
|
||||||
sourcefile_index, CHECK);
|
sourcefile_index, CHECK);
|
||||||
set_class_sourcefile(_cp->symbol_at(sourcefile_index));
|
set_class_sourcefile_index(sourcefile_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2728,7 +2728,7 @@ void ClassFileParser::parse_classfile_signature_attribute(TRAPS) {
|
|||||||
valid_symbol_at(signature_index),
|
valid_symbol_at(signature_index),
|
||||||
"Invalid constant pool index %u in Signature attribute in class file %s",
|
"Invalid constant pool index %u in Signature attribute in class file %s",
|
||||||
signature_index, CHECK);
|
signature_index, CHECK);
|
||||||
set_class_generic_signature(_cp->symbol_at(signature_index));
|
set_class_generic_signature_index(signature_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_byte_length, TRAPS) {
|
void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_byte_length, TRAPS) {
|
||||||
@ -2975,13 +2975,11 @@ void ClassFileParser::parse_classfile_attributes(ClassFileParser::ClassAnnotatio
|
|||||||
void ClassFileParser::apply_parsed_class_attributes(instanceKlassHandle k) {
|
void ClassFileParser::apply_parsed_class_attributes(instanceKlassHandle k) {
|
||||||
if (_synthetic_flag)
|
if (_synthetic_flag)
|
||||||
k->set_is_synthetic();
|
k->set_is_synthetic();
|
||||||
if (_sourcefile != NULL) {
|
if (_sourcefile_index != 0) {
|
||||||
_sourcefile->increment_refcount();
|
k->set_source_file_name_index(_sourcefile_index);
|
||||||
k->set_source_file_name(_sourcefile);
|
|
||||||
}
|
}
|
||||||
if (_generic_signature != NULL) {
|
if (_generic_signature_index != 0) {
|
||||||
_generic_signature->increment_refcount();
|
k->set_generic_signature_index(_generic_signature_index);
|
||||||
k->set_generic_signature(_generic_signature);
|
|
||||||
}
|
}
|
||||||
if (_sde_buffer != NULL) {
|
if (_sde_buffer != NULL) {
|
||||||
k->set_source_debug_extension(_sde_buffer, _sde_length);
|
k->set_source_debug_extension(_sde_buffer, _sde_length);
|
||||||
|
@ -62,8 +62,8 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
|||||||
bool _synthetic_flag;
|
bool _synthetic_flag;
|
||||||
int _sde_length;
|
int _sde_length;
|
||||||
char* _sde_buffer;
|
char* _sde_buffer;
|
||||||
Symbol* _sourcefile;
|
u2 _sourcefile_index;
|
||||||
Symbol* _generic_signature;
|
u2 _generic_signature_index;
|
||||||
|
|
||||||
// Metadata created before the instance klass is created. Must be deallocated
|
// Metadata created before the instance klass is created. Must be deallocated
|
||||||
// if not transferred to the InstanceKlass upon successful class loading
|
// if not transferred to the InstanceKlass upon successful class loading
|
||||||
@ -81,16 +81,16 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
|||||||
Array<AnnotationArray*>* _fields_type_annotations;
|
Array<AnnotationArray*>* _fields_type_annotations;
|
||||||
InstanceKlass* _klass; // InstanceKlass once created.
|
InstanceKlass* _klass; // InstanceKlass once created.
|
||||||
|
|
||||||
void set_class_synthetic_flag(bool x) { _synthetic_flag = x; }
|
void set_class_synthetic_flag(bool x) { _synthetic_flag = x; }
|
||||||
void set_class_sourcefile(Symbol* x) { _sourcefile = x; }
|
void set_class_sourcefile_index(u2 x) { _sourcefile_index = x; }
|
||||||
void set_class_generic_signature(Symbol* x) { _generic_signature = x; }
|
void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; }
|
||||||
void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; }
|
void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; }
|
||||||
|
|
||||||
void init_parsed_class_attributes(ClassLoaderData* loader_data) {
|
void init_parsed_class_attributes(ClassLoaderData* loader_data) {
|
||||||
_loader_data = loader_data;
|
_loader_data = loader_data;
|
||||||
_synthetic_flag = false;
|
_synthetic_flag = false;
|
||||||
_sourcefile = NULL;
|
_sourcefile_index = 0;
|
||||||
_generic_signature = NULL;
|
_generic_signature_index = 0;
|
||||||
_sde_buffer = NULL;
|
_sde_buffer = NULL;
|
||||||
_sde_length = 0;
|
_sde_length = 0;
|
||||||
// initialize the other flags too:
|
// initialize the other flags too:
|
||||||
|
@ -687,6 +687,7 @@ nmethod::nmethod(
|
|||||||
code_buffer->copy_values_to(this);
|
code_buffer->copy_values_to(this);
|
||||||
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
|
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
|
||||||
CodeCache::add_scavenge_root_nmethod(this);
|
CodeCache::add_scavenge_root_nmethod(this);
|
||||||
|
Universe::heap()->register_nmethod(this);
|
||||||
}
|
}
|
||||||
debug_only(verify_scavenge_root_oops());
|
debug_only(verify_scavenge_root_oops());
|
||||||
CodeCache::commit(this);
|
CodeCache::commit(this);
|
||||||
@ -881,6 +882,7 @@ nmethod::nmethod(
|
|||||||
dependencies->copy_to(this);
|
dependencies->copy_to(this);
|
||||||
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
|
if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
|
||||||
CodeCache::add_scavenge_root_nmethod(this);
|
CodeCache::add_scavenge_root_nmethod(this);
|
||||||
|
Universe::heap()->register_nmethod(this);
|
||||||
}
|
}
|
||||||
debug_only(verify_scavenge_root_oops());
|
debug_only(verify_scavenge_root_oops());
|
||||||
|
|
||||||
@ -1300,6 +1302,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
|||||||
methodHandle the_method(method());
|
methodHandle the_method(method());
|
||||||
No_Safepoint_Verifier nsv;
|
No_Safepoint_Verifier nsv;
|
||||||
|
|
||||||
|
// during patching, depending on the nmethod state we must notify the GC that
|
||||||
|
// code has been unloaded, unregistering it. We cannot do this right while
|
||||||
|
// holding the Patching_lock because we need to use the CodeCache_lock. This
|
||||||
|
// would be prone to deadlocks.
|
||||||
|
// This flag is used to remember whether we need to later lock and unregister.
|
||||||
|
bool nmethod_needs_unregister = false;
|
||||||
|
|
||||||
{
|
{
|
||||||
// invalidate osr nmethod before acquiring the patching lock since
|
// invalidate osr nmethod before acquiring the patching lock since
|
||||||
// they both acquire leaf locks and we don't want a deadlock.
|
// they both acquire leaf locks and we don't want a deadlock.
|
||||||
@ -1332,6 +1341,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
|||||||
inc_decompile_count();
|
inc_decompile_count();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the state is becoming a zombie, signal to unregister the nmethod with
|
||||||
|
// the heap.
|
||||||
|
// This nmethod may have already been unloaded during a full GC.
|
||||||
|
if ((state == zombie) && !is_unloaded()) {
|
||||||
|
nmethod_needs_unregister = true;
|
||||||
|
}
|
||||||
|
|
||||||
// Change state
|
// Change state
|
||||||
_state = state;
|
_state = state;
|
||||||
|
|
||||||
@ -1367,6 +1383,9 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
|||||||
// safepoint can sneak in, otherwise the oops used by the
|
// safepoint can sneak in, otherwise the oops used by the
|
||||||
// dependency logic could have become stale.
|
// dependency logic could have become stale.
|
||||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
if (nmethod_needs_unregister) {
|
||||||
|
Universe::heap()->unregister_nmethod(this);
|
||||||
|
}
|
||||||
flush_dependencies(NULL);
|
flush_dependencies(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1817,21 +1836,10 @@ void nmethod::metadata_do(void f(Metadata*)) {
|
|||||||
if (_method != NULL) f(_method);
|
if (_method != NULL) f(_method);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
|
||||||
// This method is called twice during GC -- once while
|
|
||||||
// tracing the "active" nmethods on thread stacks during
|
|
||||||
// the (strong) marking phase, and then again when walking
|
|
||||||
// the code cache contents during the weak roots processing
|
|
||||||
// phase. The two uses are distinguished by means of the
|
|
||||||
// 'do_strong_roots_only' flag, which is true in the first
|
|
||||||
// case. We want to walk the weak roots in the nmethod
|
|
||||||
// only in the second case. The weak roots in the nmethod
|
|
||||||
// are the oops in the ExceptionCache and the InlineCache
|
|
||||||
// oops.
|
|
||||||
void nmethod::oops_do(OopClosure* f, bool do_strong_roots_only) {
|
|
||||||
// make sure the oops ready to receive visitors
|
// make sure the oops ready to receive visitors
|
||||||
assert(!is_zombie() && !is_unloaded(),
|
assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
|
||||||
"should not call follow on zombie or unloaded nmethod");
|
assert(!is_unloaded(), "should not call follow on unloaded nmethod");
|
||||||
|
|
||||||
// If the method is not entrant or zombie then a JMP is plastered over the
|
// If the method is not entrant or zombie then a JMP is plastered over the
|
||||||
// first few bytes. If an oop in the old code was there, that oop
|
// first few bytes. If an oop in the old code was there, that oop
|
||||||
|
@ -566,7 +566,7 @@ public:
|
|||||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
|
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
|
||||||
OopClosure* f);
|
OopClosure* f);
|
||||||
void oops_do(OopClosure* f) { oops_do(f, false); }
|
void oops_do(OopClosure* f) { oops_do(f, false); }
|
||||||
void oops_do(OopClosure* f, bool do_strong_roots_only);
|
void oops_do(OopClosure* f, bool allow_zombie);
|
||||||
bool detect_scavenge_root_oops();
|
bool detect_scavenge_root_oops();
|
||||||
void verify_scavenge_root_oops() PRODUCT_RETURN;
|
void verify_scavenge_root_oops() PRODUCT_RETURN;
|
||||||
|
|
||||||
|
@ -3460,7 +3460,9 @@ void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
|
|||||||
void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
|
void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
size_t size = ReservedSpace::page_align_size_down(bytes);
|
size_t size = ReservedSpace::page_align_size_down(bytes);
|
||||||
if (size > 0) {
|
// Only shrink if a compaction was done so that all the free space
|
||||||
|
// in the generation is in a contiguous block at the end.
|
||||||
|
if (size > 0 && did_compact()) {
|
||||||
shrink_by(size);
|
shrink_by(size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -5478,40 +5480,42 @@ CMSParMarkTask::do_young_space_rescan(uint worker_id,
|
|||||||
HandleMark hm;
|
HandleMark hm;
|
||||||
|
|
||||||
SequentialSubTasksDone* pst = space->par_seq_tasks();
|
SequentialSubTasksDone* pst = space->par_seq_tasks();
|
||||||
assert(pst->valid(), "Uninitialized use?");
|
|
||||||
|
|
||||||
uint nth_task = 0;
|
uint nth_task = 0;
|
||||||
uint n_tasks = pst->n_tasks();
|
uint n_tasks = pst->n_tasks();
|
||||||
|
|
||||||
HeapWord *start, *end;
|
if (n_tasks > 0) {
|
||||||
while (!pst->is_task_claimed(/* reference */ nth_task)) {
|
assert(pst->valid(), "Uninitialized use?");
|
||||||
// We claimed task # nth_task; compute its boundaries.
|
HeapWord *start, *end;
|
||||||
if (chunk_top == 0) { // no samples were taken
|
while (!pst->is_task_claimed(/* reference */ nth_task)) {
|
||||||
assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
|
// We claimed task # nth_task; compute its boundaries.
|
||||||
start = space->bottom();
|
if (chunk_top == 0) { // no samples were taken
|
||||||
end = space->top();
|
assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
|
||||||
} else if (nth_task == 0) {
|
start = space->bottom();
|
||||||
start = space->bottom();
|
end = space->top();
|
||||||
end = chunk_array[nth_task];
|
} else if (nth_task == 0) {
|
||||||
} else if (nth_task < (uint)chunk_top) {
|
start = space->bottom();
|
||||||
assert(nth_task >= 1, "Control point invariant");
|
end = chunk_array[nth_task];
|
||||||
start = chunk_array[nth_task - 1];
|
} else if (nth_task < (uint)chunk_top) {
|
||||||
end = chunk_array[nth_task];
|
assert(nth_task >= 1, "Control point invariant");
|
||||||
} else {
|
start = chunk_array[nth_task - 1];
|
||||||
assert(nth_task == (uint)chunk_top, "Control point invariant");
|
end = chunk_array[nth_task];
|
||||||
start = chunk_array[chunk_top - 1];
|
} else {
|
||||||
end = space->top();
|
assert(nth_task == (uint)chunk_top, "Control point invariant");
|
||||||
|
start = chunk_array[chunk_top - 1];
|
||||||
|
end = space->top();
|
||||||
|
}
|
||||||
|
MemRegion mr(start, end);
|
||||||
|
// Verify that mr is in space
|
||||||
|
assert(mr.is_empty() || space->used_region().contains(mr),
|
||||||
|
"Should be in space");
|
||||||
|
// Verify that "start" is an object boundary
|
||||||
|
assert(mr.is_empty() || oop(mr.start())->is_oop(),
|
||||||
|
"Should be an oop");
|
||||||
|
space->par_oop_iterate(mr, cl);
|
||||||
}
|
}
|
||||||
MemRegion mr(start, end);
|
pst->all_tasks_completed();
|
||||||
// Verify that mr is in space
|
|
||||||
assert(mr.is_empty() || space->used_region().contains(mr),
|
|
||||||
"Should be in space");
|
|
||||||
// Verify that "start" is an object boundary
|
|
||||||
assert(mr.is_empty() || oop(mr.start())->is_oop(),
|
|
||||||
"Should be an oop");
|
|
||||||
space->par_oop_iterate(mr, cl);
|
|
||||||
}
|
}
|
||||||
pst->all_tasks_completed();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -5788,7 +5792,7 @@ initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
|
|||||||
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
|
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
|
||||||
|
|
||||||
// Eden space
|
// Eden space
|
||||||
{
|
if (!dng->eden()->is_empty()) {
|
||||||
SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
|
SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
|
||||||
assert(!pst->valid(), "Clobbering existing data?");
|
assert(!pst->valid(), "Clobbering existing data?");
|
||||||
// Each valid entry in [0, _eden_chunk_index) represents a task.
|
// Each valid entry in [0, _eden_chunk_index) represents a task.
|
||||||
@ -8694,9 +8698,10 @@ void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
|
|||||||
assert(inFreeRange(), "Should only be called if currently in a free range.");
|
assert(inFreeRange(), "Should only be called if currently in a free range.");
|
||||||
HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
|
HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
|
||||||
assert(_sp->used_region().contains(eob - 1),
|
assert(_sp->used_region().contains(eob - 1),
|
||||||
err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
|
err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
|
||||||
|
" out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
|
||||||
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
|
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
|
||||||
_limit, _sp->bottom(), _sp->end(), fc, chunk_size));
|
eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
|
||||||
if (eob >= _limit) {
|
if (eob >= _limit) {
|
||||||
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
|
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
|
||||||
if (CMSTraceSweeper) {
|
if (CMSTraceSweeper) {
|
||||||
|
@ -4529,7 +4529,7 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
|
|||||||
_total_prev_live_bytes(0), _total_next_live_bytes(0),
|
_total_prev_live_bytes(0), _total_next_live_bytes(0),
|
||||||
_hum_used_bytes(0), _hum_capacity_bytes(0),
|
_hum_used_bytes(0), _hum_capacity_bytes(0),
|
||||||
_hum_prev_live_bytes(0), _hum_next_live_bytes(0),
|
_hum_prev_live_bytes(0), _hum_next_live_bytes(0),
|
||||||
_total_remset_bytes(0) {
|
_total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
MemRegion g1_committed = g1h->g1_committed();
|
MemRegion g1_committed = g1h->g1_committed();
|
||||||
MemRegion g1_reserved = g1h->g1_reserved();
|
MemRegion g1_reserved = g1h->g1_reserved();
|
||||||
@ -4553,9 +4553,11 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
|
|||||||
G1PPRL_BYTE_H_FORMAT
|
G1PPRL_BYTE_H_FORMAT
|
||||||
G1PPRL_BYTE_H_FORMAT
|
G1PPRL_BYTE_H_FORMAT
|
||||||
G1PPRL_DOUBLE_H_FORMAT
|
G1PPRL_DOUBLE_H_FORMAT
|
||||||
|
G1PPRL_BYTE_H_FORMAT
|
||||||
G1PPRL_BYTE_H_FORMAT,
|
G1PPRL_BYTE_H_FORMAT,
|
||||||
"type", "address-range",
|
"type", "address-range",
|
||||||
"used", "prev-live", "next-live", "gc-eff", "remset");
|
"used", "prev-live", "next-live", "gc-eff",
|
||||||
|
"remset", "code-roots");
|
||||||
_out->print_cr(G1PPRL_LINE_PREFIX
|
_out->print_cr(G1PPRL_LINE_PREFIX
|
||||||
G1PPRL_TYPE_H_FORMAT
|
G1PPRL_TYPE_H_FORMAT
|
||||||
G1PPRL_ADDR_BASE_H_FORMAT
|
G1PPRL_ADDR_BASE_H_FORMAT
|
||||||
@ -4563,9 +4565,11 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
|
|||||||
G1PPRL_BYTE_H_FORMAT
|
G1PPRL_BYTE_H_FORMAT
|
||||||
G1PPRL_BYTE_H_FORMAT
|
G1PPRL_BYTE_H_FORMAT
|
||||||
G1PPRL_DOUBLE_H_FORMAT
|
G1PPRL_DOUBLE_H_FORMAT
|
||||||
|
G1PPRL_BYTE_H_FORMAT
|
||||||
G1PPRL_BYTE_H_FORMAT,
|
G1PPRL_BYTE_H_FORMAT,
|
||||||
"", "",
|
"", "",
|
||||||
"(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", "(bytes)");
|
"(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
|
||||||
|
"(bytes)", "(bytes)");
|
||||||
}
|
}
|
||||||
|
|
||||||
// It takes as a parameter a reference to one of the _hum_* fields, it
|
// It takes as a parameter a reference to one of the _hum_* fields, it
|
||||||
@ -4608,6 +4612,8 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
|
|||||||
size_t next_live_bytes = r->next_live_bytes();
|
size_t next_live_bytes = r->next_live_bytes();
|
||||||
double gc_eff = r->gc_efficiency();
|
double gc_eff = r->gc_efficiency();
|
||||||
size_t remset_bytes = r->rem_set()->mem_size();
|
size_t remset_bytes = r->rem_set()->mem_size();
|
||||||
|
size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
|
||||||
|
|
||||||
if (r->used() == 0) {
|
if (r->used() == 0) {
|
||||||
type = "FREE";
|
type = "FREE";
|
||||||
} else if (r->is_survivor()) {
|
} else if (r->is_survivor()) {
|
||||||
@ -4642,6 +4648,7 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
|
|||||||
_total_prev_live_bytes += prev_live_bytes;
|
_total_prev_live_bytes += prev_live_bytes;
|
||||||
_total_next_live_bytes += next_live_bytes;
|
_total_next_live_bytes += next_live_bytes;
|
||||||
_total_remset_bytes += remset_bytes;
|
_total_remset_bytes += remset_bytes;
|
||||||
|
_total_strong_code_roots_bytes += strong_code_roots_bytes;
|
||||||
|
|
||||||
// Print a line for this particular region.
|
// Print a line for this particular region.
|
||||||
_out->print_cr(G1PPRL_LINE_PREFIX
|
_out->print_cr(G1PPRL_LINE_PREFIX
|
||||||
@ -4651,9 +4658,11 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
|
|||||||
G1PPRL_BYTE_FORMAT
|
G1PPRL_BYTE_FORMAT
|
||||||
G1PPRL_BYTE_FORMAT
|
G1PPRL_BYTE_FORMAT
|
||||||
G1PPRL_DOUBLE_FORMAT
|
G1PPRL_DOUBLE_FORMAT
|
||||||
|
G1PPRL_BYTE_FORMAT
|
||||||
G1PPRL_BYTE_FORMAT,
|
G1PPRL_BYTE_FORMAT,
|
||||||
type, bottom, end,
|
type, bottom, end,
|
||||||
used_bytes, prev_live_bytes, next_live_bytes, gc_eff , remset_bytes);
|
used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
|
||||||
|
remset_bytes, strong_code_roots_bytes);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -4669,7 +4678,8 @@ G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
|
|||||||
G1PPRL_SUM_MB_PERC_FORMAT("used")
|
G1PPRL_SUM_MB_PERC_FORMAT("used")
|
||||||
G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
|
G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
|
||||||
G1PPRL_SUM_MB_PERC_FORMAT("next-live")
|
G1PPRL_SUM_MB_PERC_FORMAT("next-live")
|
||||||
G1PPRL_SUM_MB_FORMAT("remset"),
|
G1PPRL_SUM_MB_FORMAT("remset")
|
||||||
|
G1PPRL_SUM_MB_FORMAT("code-roots"),
|
||||||
bytes_to_mb(_total_capacity_bytes),
|
bytes_to_mb(_total_capacity_bytes),
|
||||||
bytes_to_mb(_total_used_bytes),
|
bytes_to_mb(_total_used_bytes),
|
||||||
perc(_total_used_bytes, _total_capacity_bytes),
|
perc(_total_used_bytes, _total_capacity_bytes),
|
||||||
@ -4677,6 +4687,7 @@ G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
|
|||||||
perc(_total_prev_live_bytes, _total_capacity_bytes),
|
perc(_total_prev_live_bytes, _total_capacity_bytes),
|
||||||
bytes_to_mb(_total_next_live_bytes),
|
bytes_to_mb(_total_next_live_bytes),
|
||||||
perc(_total_next_live_bytes, _total_capacity_bytes),
|
perc(_total_next_live_bytes, _total_capacity_bytes),
|
||||||
bytes_to_mb(_total_remset_bytes));
|
bytes_to_mb(_total_remset_bytes),
|
||||||
|
bytes_to_mb(_total_strong_code_roots_bytes));
|
||||||
_out->cr();
|
_out->cr();
|
||||||
}
|
}
|
||||||
|
@ -1257,6 +1257,9 @@ private:
|
|||||||
// Accumulator for the remembered set size
|
// Accumulator for the remembered set size
|
||||||
size_t _total_remset_bytes;
|
size_t _total_remset_bytes;
|
||||||
|
|
||||||
|
// Accumulator for strong code roots memory size
|
||||||
|
size_t _total_strong_code_roots_bytes;
|
||||||
|
|
||||||
static double perc(size_t val, size_t total) {
|
static double perc(size_t val, size_t total) {
|
||||||
if (total == 0) {
|
if (total == 0) {
|
||||||
return 0.0;
|
return 0.0;
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
#include "code/codeCache.hpp"
|
||||||
#include "code/icBuffer.hpp"
|
#include "code/icBuffer.hpp"
|
||||||
#include "gc_implementation/g1/bufferingOopClosure.hpp"
|
#include "gc_implementation/g1/bufferingOopClosure.hpp"
|
||||||
#include "gc_implementation/g1/concurrentG1Refine.hpp"
|
#include "gc_implementation/g1/concurrentG1Refine.hpp"
|
||||||
@ -980,7 +981,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
|||||||
|
|
||||||
if (should_try_gc) {
|
if (should_try_gc) {
|
||||||
bool succeeded;
|
bool succeeded;
|
||||||
result = do_collection_pause(word_size, gc_count_before, &succeeded);
|
result = do_collection_pause(word_size, gc_count_before, &succeeded,
|
||||||
|
GCCause::_g1_inc_collection_pause);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert(succeeded, "only way to get back a non-NULL result");
|
assert(succeeded, "only way to get back a non-NULL result");
|
||||||
return result;
|
return result;
|
||||||
@ -1105,7 +1107,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
|||||||
// enough space for the allocation to succeed after the pause.
|
// enough space for the allocation to succeed after the pause.
|
||||||
|
|
||||||
bool succeeded;
|
bool succeeded;
|
||||||
result = do_collection_pause(word_size, gc_count_before, &succeeded);
|
result = do_collection_pause(word_size, gc_count_before, &succeeded,
|
||||||
|
GCCause::_g1_humongous_allocation);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
assert(succeeded, "only way to get back a non-NULL result");
|
assert(succeeded, "only way to get back a non-NULL result");
|
||||||
return result;
|
return result;
|
||||||
@ -1176,20 +1179,27 @@ class PostMCRemSetClearClosure: public HeapRegionClosure {
|
|||||||
ModRefBarrierSet* _mr_bs;
|
ModRefBarrierSet* _mr_bs;
|
||||||
public:
|
public:
|
||||||
PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
|
PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
|
||||||
_g1h(g1h), _mr_bs(mr_bs) { }
|
_g1h(g1h), _mr_bs(mr_bs) {}
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
|
HeapRegionRemSet* hrrs = r->rem_set();
|
||||||
|
|
||||||
if (r->continuesHumongous()) {
|
if (r->continuesHumongous()) {
|
||||||
|
// We'll assert that the strong code root list and RSet is empty
|
||||||
|
assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
|
||||||
|
assert(hrrs->occupied() == 0, "RSet should be empty");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
_g1h->reset_gc_time_stamps(r);
|
_g1h->reset_gc_time_stamps(r);
|
||||||
HeapRegionRemSet* hrrs = r->rem_set();
|
hrrs->clear();
|
||||||
if (hrrs != NULL) hrrs->clear();
|
|
||||||
// You might think here that we could clear just the cards
|
// You might think here that we could clear just the cards
|
||||||
// corresponding to the used region. But no: if we leave a dirty card
|
// corresponding to the used region. But no: if we leave a dirty card
|
||||||
// in a region we might allocate into, then it would prevent that card
|
// in a region we might allocate into, then it would prevent that card
|
||||||
// from being enqueued, and cause it to be missed.
|
// from being enqueued, and cause it to be missed.
|
||||||
// Re: the performance cost: we shouldn't be doing full GC anyway!
|
// Re: the performance cost: we shouldn't be doing full GC anyway!
|
||||||
_mr_bs->clear(MemRegion(r->bottom(), r->end()));
|
_mr_bs->clear(MemRegion(r->bottom(), r->end()));
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -1269,30 +1279,6 @@ void G1CollectedHeap::print_hrs_post_compaction() {
|
|||||||
heap_region_iterate(&cl);
|
heap_region_iterate(&cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
double G1CollectedHeap::verify(bool guard, const char* msg) {
|
|
||||||
double verify_time_ms = 0.0;
|
|
||||||
|
|
||||||
if (guard && total_collections() >= VerifyGCStartAt) {
|
|
||||||
double verify_start = os::elapsedTime();
|
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
|
||||||
prepare_for_verify();
|
|
||||||
Universe::verify(VerifyOption_G1UsePrevMarking, msg);
|
|
||||||
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
|
|
||||||
}
|
|
||||||
|
|
||||||
return verify_time_ms;
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::verify_before_gc() {
|
|
||||||
double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
|
|
||||||
g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::verify_after_gc() {
|
|
||||||
double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
|
|
||||||
g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool G1CollectedHeap::do_collection(bool explicit_gc,
|
bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||||
bool clear_all_soft_refs,
|
bool clear_all_soft_refs,
|
||||||
size_t word_size) {
|
size_t word_size) {
|
||||||
@ -1433,7 +1419,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
|
|
||||||
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
|
||||||
ClassLoaderDataGraph::purge();
|
ClassLoaderDataGraph::purge();
|
||||||
MetaspaceAux::verify_metrics();
|
MetaspaceAux::verify_metrics();
|
||||||
|
|
||||||
// Note: since we've just done a full GC, concurrent
|
// Note: since we've just done a full GC, concurrent
|
||||||
// marking is no longer active. Therefore we need not
|
// marking is no longer active. Therefore we need not
|
||||||
@ -1504,6 +1490,9 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
|||||||
heap_region_iterate(&rebuild_rs);
|
heap_region_iterate(&rebuild_rs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Rebuild the strong code root lists for each region
|
||||||
|
rebuild_strong_code_roots();
|
||||||
|
|
||||||
if (true) { // FIXME
|
if (true) { // FIXME
|
||||||
MetaspaceGC::compute_new_size();
|
MetaspaceGC::compute_new_size();
|
||||||
}
|
}
|
||||||
@ -2019,10 +2008,12 @@ jint G1CollectedHeap::initialize() {
|
|||||||
|
|
||||||
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
|
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
|
||||||
size_t max_byte_size = collector_policy()->max_heap_byte_size();
|
size_t max_byte_size = collector_policy()->max_heap_byte_size();
|
||||||
|
size_t heap_alignment = collector_policy()->max_alignment();
|
||||||
|
|
||||||
// Ensure that the sizes are properly aligned.
|
// Ensure that the sizes are properly aligned.
|
||||||
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
||||||
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
||||||
|
Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
|
||||||
|
|
||||||
_cg1r = new ConcurrentG1Refine(this);
|
_cg1r = new ConcurrentG1Refine(this);
|
||||||
|
|
||||||
@ -2039,12 +2030,8 @@ jint G1CollectedHeap::initialize() {
|
|||||||
// If this happens then we could end up using a non-optimal
|
// If this happens then we could end up using a non-optimal
|
||||||
// compressed oops mode.
|
// compressed oops mode.
|
||||||
|
|
||||||
// Since max_byte_size is aligned to the size of a heap region (checked
|
|
||||||
// above).
|
|
||||||
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
|
|
||||||
|
|
||||||
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
|
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
|
||||||
HeapRegion::GrainBytes);
|
heap_alignment);
|
||||||
|
|
||||||
// It is important to do this in a way such that concurrent readers can't
|
// It is important to do this in a way such that concurrent readers can't
|
||||||
// temporarily think something is in the heap. (I've actually seen this
|
// temporarily think something is in the heap. (I've actually seen this
|
||||||
@ -3109,6 +3096,145 @@ const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
|
|||||||
return NULL; // keep some compilers happy
|
return NULL; // keep some compilers happy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
|
||||||
|
// pass it as the perm_blk to SharedHeap::process_strong_roots.
|
||||||
|
// When process_strong_roots stop calling perm_blk->younger_refs_iterate
|
||||||
|
// we can change this closure to extend the simpler OopClosure.
|
||||||
|
class VerifyRootsClosure: public OopsInGenClosure {
|
||||||
|
private:
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
VerifyOption _vo;
|
||||||
|
bool _failures;
|
||||||
|
public:
|
||||||
|
// _vo == UsePrevMarking -> use "prev" marking information,
|
||||||
|
// _vo == UseNextMarking -> use "next" marking information,
|
||||||
|
// _vo == UseMarkWord -> use mark word from object header.
|
||||||
|
VerifyRootsClosure(VerifyOption vo) :
|
||||||
|
_g1h(G1CollectedHeap::heap()),
|
||||||
|
_vo(vo),
|
||||||
|
_failures(false) { }
|
||||||
|
|
||||||
|
bool failures() { return _failures; }
|
||||||
|
|
||||||
|
template <class T> void do_oop_nv(T* p) {
|
||||||
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
if (_g1h->is_obj_dead_cond(obj, _vo)) {
|
||||||
|
gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
|
||||||
|
"points to dead obj "PTR_FORMAT, p, (void*) obj);
|
||||||
|
if (_vo == VerifyOption_G1UseMarkWord) {
|
||||||
|
gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark()));
|
||||||
|
}
|
||||||
|
obj->print_on(gclog_or_tty);
|
||||||
|
_failures = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void do_oop(oop* p) { do_oop_nv(p); }
|
||||||
|
void do_oop(narrowOop* p) { do_oop_nv(p); }
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1VerifyCodeRootOopClosure: public OopsInGenClosure {
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
OopClosure* _root_cl;
|
||||||
|
nmethod* _nm;
|
||||||
|
VerifyOption _vo;
|
||||||
|
bool _failures;
|
||||||
|
|
||||||
|
template <class T> void do_oop_work(T* p) {
|
||||||
|
// First verify that this root is live
|
||||||
|
_root_cl->do_oop(p);
|
||||||
|
|
||||||
|
if (!G1VerifyHeapRegionCodeRoots) {
|
||||||
|
// We're not verifying the code roots attached to heap region.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't check the code roots during marking verification in a full GC
|
||||||
|
if (_vo == VerifyOption_G1UseMarkWord) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now verify that the current nmethod (which contains p) is
|
||||||
|
// in the code root list of the heap region containing the
|
||||||
|
// object referenced by p.
|
||||||
|
|
||||||
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
|
||||||
|
// Now fetch the region containing the object
|
||||||
|
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||||
|
HeapRegionRemSet* hrrs = hr->rem_set();
|
||||||
|
// Verify that the strong code root list for this region
|
||||||
|
// contains the nmethod
|
||||||
|
if (!hrrs->strong_code_roots_list_contains(_nm)) {
|
||||||
|
gclog_or_tty->print_cr("Code root location "PTR_FORMAT" "
|
||||||
|
"from nmethod "PTR_FORMAT" not in strong "
|
||||||
|
"code roots for region ["PTR_FORMAT","PTR_FORMAT")",
|
||||||
|
p, _nm, hr->bottom(), hr->end());
|
||||||
|
_failures = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
|
||||||
|
_g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
|
||||||
|
|
||||||
|
void do_oop(oop* p) { do_oop_work(p); }
|
||||||
|
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
|
|
||||||
|
void set_nmethod(nmethod* nm) { _nm = nm; }
|
||||||
|
bool failures() { return _failures; }
|
||||||
|
};
|
||||||
|
|
||||||
|
class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
|
||||||
|
G1VerifyCodeRootOopClosure* _oop_cl;
|
||||||
|
|
||||||
|
public:
|
||||||
|
G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
|
||||||
|
_oop_cl(oop_cl) {}
|
||||||
|
|
||||||
|
void do_code_blob(CodeBlob* cb) {
|
||||||
|
nmethod* nm = cb->as_nmethod_or_null();
|
||||||
|
if (nm != NULL) {
|
||||||
|
_oop_cl->set_nmethod(nm);
|
||||||
|
nm->oops_do(_oop_cl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class YoungRefCounterClosure : public OopClosure {
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
int _count;
|
||||||
|
public:
|
||||||
|
YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
|
||||||
|
void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
|
||||||
|
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
||||||
|
|
||||||
|
int count() { return _count; }
|
||||||
|
void reset_count() { _count = 0; };
|
||||||
|
};
|
||||||
|
|
||||||
|
class VerifyKlassClosure: public KlassClosure {
|
||||||
|
YoungRefCounterClosure _young_ref_counter_closure;
|
||||||
|
OopClosure *_oop_closure;
|
||||||
|
public:
|
||||||
|
VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
|
||||||
|
void do_klass(Klass* k) {
|
||||||
|
k->oops_do(_oop_closure);
|
||||||
|
|
||||||
|
_young_ref_counter_closure.reset_count();
|
||||||
|
k->oops_do(&_young_ref_counter_closure);
|
||||||
|
if (_young_ref_counter_closure.count() > 0) {
|
||||||
|
guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
class VerifyLivenessOopClosure: public OopClosure {
|
class VerifyLivenessOopClosure: public OopClosure {
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
VerifyOption _vo;
|
VerifyOption _vo;
|
||||||
@ -3242,75 +3368,7 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class YoungRefCounterClosure : public OopClosure {
|
// This is the task used for parallel verification of the heap regions
|
||||||
G1CollectedHeap* _g1h;
|
|
||||||
int _count;
|
|
||||||
public:
|
|
||||||
YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
|
|
||||||
void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
|
|
||||||
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
|
|
||||||
|
|
||||||
int count() { return _count; }
|
|
||||||
void reset_count() { _count = 0; };
|
|
||||||
};
|
|
||||||
|
|
||||||
class VerifyKlassClosure: public KlassClosure {
|
|
||||||
YoungRefCounterClosure _young_ref_counter_closure;
|
|
||||||
OopClosure *_oop_closure;
|
|
||||||
public:
|
|
||||||
VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
|
|
||||||
void do_klass(Klass* k) {
|
|
||||||
k->oops_do(_oop_closure);
|
|
||||||
|
|
||||||
_young_ref_counter_closure.reset_count();
|
|
||||||
k->oops_do(&_young_ref_counter_closure);
|
|
||||||
if (_young_ref_counter_closure.count() > 0) {
|
|
||||||
guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
|
|
||||||
// pass it as the perm_blk to SharedHeap::process_strong_roots.
|
|
||||||
// When process_strong_roots stop calling perm_blk->younger_refs_iterate
|
|
||||||
// we can change this closure to extend the simpler OopClosure.
|
|
||||||
class VerifyRootsClosure: public OopsInGenClosure {
|
|
||||||
private:
|
|
||||||
G1CollectedHeap* _g1h;
|
|
||||||
VerifyOption _vo;
|
|
||||||
bool _failures;
|
|
||||||
public:
|
|
||||||
// _vo == UsePrevMarking -> use "prev" marking information,
|
|
||||||
// _vo == UseNextMarking -> use "next" marking information,
|
|
||||||
// _vo == UseMarkWord -> use mark word from object header.
|
|
||||||
VerifyRootsClosure(VerifyOption vo) :
|
|
||||||
_g1h(G1CollectedHeap::heap()),
|
|
||||||
_vo(vo),
|
|
||||||
_failures(false) { }
|
|
||||||
|
|
||||||
bool failures() { return _failures; }
|
|
||||||
|
|
||||||
template <class T> void do_oop_nv(T* p) {
|
|
||||||
T heap_oop = oopDesc::load_heap_oop(p);
|
|
||||||
if (!oopDesc::is_null(heap_oop)) {
|
|
||||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
|
||||||
if (_g1h->is_obj_dead_cond(obj, _vo)) {
|
|
||||||
gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
|
|
||||||
"points to dead obj "PTR_FORMAT, p, (void*) obj);
|
|
||||||
if (_vo == VerifyOption_G1UseMarkWord) {
|
|
||||||
gclog_or_tty->print_cr(" Mark word: "PTR_FORMAT, (void*)(obj->mark()));
|
|
||||||
}
|
|
||||||
obj->print_on(gclog_or_tty);
|
|
||||||
_failures = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void do_oop(oop* p) { do_oop_nv(p); }
|
|
||||||
void do_oop(narrowOop* p) { do_oop_nv(p); }
|
|
||||||
};
|
|
||||||
|
|
||||||
// This is the task used for parallel heap verification.
|
|
||||||
|
|
||||||
class G1ParVerifyTask: public AbstractGangTask {
|
class G1ParVerifyTask: public AbstractGangTask {
|
||||||
private:
|
private:
|
||||||
@ -3344,20 +3402,15 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void G1CollectedHeap::verify(bool silent) {
|
void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
|
||||||
verify(silent, VerifyOption_G1UsePrevMarking);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::verify(bool silent,
|
|
||||||
VerifyOption vo) {
|
|
||||||
if (SafepointSynchronize::is_at_safepoint()) {
|
if (SafepointSynchronize::is_at_safepoint()) {
|
||||||
if (!silent) { gclog_or_tty->print("Roots "); }
|
|
||||||
VerifyRootsClosure rootsCl(vo);
|
|
||||||
|
|
||||||
assert(Thread::current()->is_VM_thread(),
|
assert(Thread::current()->is_VM_thread(),
|
||||||
"Expected to be executed serially by the VM thread at this point");
|
"Expected to be executed serially by the VM thread at this point");
|
||||||
|
|
||||||
CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
|
if (!silent) { gclog_or_tty->print("Roots "); }
|
||||||
|
VerifyRootsClosure rootsCl(vo);
|
||||||
|
G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
|
||||||
|
G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
|
||||||
VerifyKlassClosure klassCl(this, &rootsCl);
|
VerifyKlassClosure klassCl(this, &rootsCl);
|
||||||
|
|
||||||
// We apply the relevant closures to all the oops in the
|
// We apply the relevant closures to all the oops in the
|
||||||
@ -3376,7 +3429,7 @@ void G1CollectedHeap::verify(bool silent,
|
|||||||
&klassCl
|
&klassCl
|
||||||
);
|
);
|
||||||
|
|
||||||
bool failures = rootsCl.failures();
|
bool failures = rootsCl.failures() || codeRootsCl.failures();
|
||||||
|
|
||||||
if (vo != VerifyOption_G1UseMarkWord) {
|
if (vo != VerifyOption_G1UseMarkWord) {
|
||||||
// If we're verifying during a full GC then the region sets
|
// If we're verifying during a full GC then the region sets
|
||||||
@ -3445,6 +3498,34 @@ void G1CollectedHeap::verify(bool silent,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::verify(bool silent) {
|
||||||
|
verify(silent, VerifyOption_G1UsePrevMarking);
|
||||||
|
}
|
||||||
|
|
||||||
|
double G1CollectedHeap::verify(bool guard, const char* msg) {
|
||||||
|
double verify_time_ms = 0.0;
|
||||||
|
|
||||||
|
if (guard && total_collections() >= VerifyGCStartAt) {
|
||||||
|
double verify_start = os::elapsedTime();
|
||||||
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
|
prepare_for_verify();
|
||||||
|
Universe::verify(VerifyOption_G1UsePrevMarking, msg);
|
||||||
|
verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
return verify_time_ms;
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::verify_before_gc() {
|
||||||
|
double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
|
||||||
|
g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::verify_after_gc() {
|
||||||
|
double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
|
||||||
|
g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
|
||||||
|
}
|
||||||
|
|
||||||
class PrintRegionClosure: public HeapRegionClosure {
|
class PrintRegionClosure: public HeapRegionClosure {
|
||||||
outputStream* _st;
|
outputStream* _st;
|
||||||
public:
|
public:
|
||||||
@ -3619,14 +3700,15 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
|
|||||||
|
|
||||||
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
|
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
|
||||||
unsigned int gc_count_before,
|
unsigned int gc_count_before,
|
||||||
bool* succeeded) {
|
bool* succeeded,
|
||||||
|
GCCause::Cause gc_cause) {
|
||||||
assert_heap_not_locked_and_not_at_safepoint();
|
assert_heap_not_locked_and_not_at_safepoint();
|
||||||
g1_policy()->record_stop_world_start();
|
g1_policy()->record_stop_world_start();
|
||||||
VM_G1IncCollectionPause op(gc_count_before,
|
VM_G1IncCollectionPause op(gc_count_before,
|
||||||
word_size,
|
word_size,
|
||||||
false, /* should_initiate_conc_mark */
|
false, /* should_initiate_conc_mark */
|
||||||
g1_policy()->max_pause_time_ms(),
|
g1_policy()->max_pause_time_ms(),
|
||||||
GCCause::_g1_inc_collection_pause);
|
gc_cause);
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
|
|
||||||
HeapWord* result = op.result();
|
HeapWord* result = op.result();
|
||||||
@ -3866,8 +3948,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
|||||||
append_secondary_free_list_if_not_empty_with_lock();
|
append_secondary_free_list_if_not_empty_with_lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(check_young_list_well_formed(),
|
assert(check_young_list_well_formed(), "young list should be well formed");
|
||||||
"young list should be well formed");
|
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||||
|
"sanity check");
|
||||||
|
|
||||||
// Don't dynamically change the number of GC threads this early. A value of
|
// Don't dynamically change the number of GC threads this early. A value of
|
||||||
// 0 is used to indicate serial work. When parallel work is done,
|
// 0 is used to indicate serial work. When parallel work is done,
|
||||||
@ -4987,7 +5070,11 @@ public:
|
|||||||
|
|
||||||
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
|
G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
|
||||||
|
|
||||||
int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
|
// Don't scan the scavengable methods in the code cache as part
|
||||||
|
// of strong root scanning. The code roots that point into a
|
||||||
|
// region in the collection set are scanned when we scan the
|
||||||
|
// region's RSet.
|
||||||
|
int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings;
|
||||||
|
|
||||||
pss.start_strong_roots();
|
pss.start_strong_roots();
|
||||||
_g1h->g1_process_strong_roots(/* is scavenging */ true,
|
_g1h->g1_process_strong_roots(/* is scavenging */ true,
|
||||||
@ -5029,67 +5116,6 @@ public:
|
|||||||
|
|
||||||
// *** Common G1 Evacuation Stuff
|
// *** Common G1 Evacuation Stuff
|
||||||
|
|
||||||
// Closures that support the filtering of CodeBlobs scanned during
|
|
||||||
// external root scanning.
|
|
||||||
|
|
||||||
// Closure applied to reference fields in code blobs (specifically nmethods)
|
|
||||||
// to determine whether an nmethod contains references that point into
|
|
||||||
// the collection set. Used as a predicate when walking code roots so
|
|
||||||
// that only nmethods that point into the collection set are added to the
|
|
||||||
// 'marked' list.
|
|
||||||
|
|
||||||
class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
|
|
||||||
|
|
||||||
class G1PointsIntoCSOopClosure : public OopClosure {
|
|
||||||
G1CollectedHeap* _g1;
|
|
||||||
bool _points_into_cs;
|
|
||||||
public:
|
|
||||||
G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
|
|
||||||
_g1(g1), _points_into_cs(false) { }
|
|
||||||
|
|
||||||
bool points_into_cs() const { return _points_into_cs; }
|
|
||||||
|
|
||||||
template <class T>
|
|
||||||
void do_oop_nv(T* p) {
|
|
||||||
if (!_points_into_cs) {
|
|
||||||
T heap_oop = oopDesc::load_heap_oop(p);
|
|
||||||
if (!oopDesc::is_null(heap_oop) &&
|
|
||||||
_g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
|
|
||||||
_points_into_cs = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void do_oop(oop* p) { do_oop_nv(p); }
|
|
||||||
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
|
|
||||||
};
|
|
||||||
|
|
||||||
G1CollectedHeap* _g1;
|
|
||||||
|
|
||||||
public:
|
|
||||||
G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
|
|
||||||
CodeBlobToOopClosure(cl, true), _g1(g1) { }
|
|
||||||
|
|
||||||
virtual void do_code_blob(CodeBlob* cb) {
|
|
||||||
nmethod* nm = cb->as_nmethod_or_null();
|
|
||||||
if (nm != NULL && !(nm->test_oops_do_mark())) {
|
|
||||||
G1PointsIntoCSOopClosure predicate_cl(_g1);
|
|
||||||
nm->oops_do(&predicate_cl);
|
|
||||||
|
|
||||||
if (predicate_cl.points_into_cs()) {
|
|
||||||
// At least one of the reference fields or the oop relocations
|
|
||||||
// in the nmethod points into the collection set. We have to
|
|
||||||
// 'mark' this nmethod.
|
|
||||||
// Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
|
|
||||||
// or MarkingCodeBlobClosure::do_code_blob() change.
|
|
||||||
if (!nm->test_set_oops_do_mark()) {
|
|
||||||
do_newly_marked_nmethod(nm);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// This method is run in a GC worker.
|
// This method is run in a GC worker.
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -5107,9 +5133,10 @@ g1_process_strong_roots(bool is_scavenging,
|
|||||||
|
|
||||||
BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
|
BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
|
||||||
|
|
||||||
// Walk the code cache w/o buffering, because StarTask cannot handle
|
assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
|
||||||
// unaligned oop locations.
|
// Walk the code cache/strong code roots w/o buffering, because StarTask
|
||||||
G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
|
// cannot handle unaligned oop locations.
|
||||||
|
CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
|
||||||
|
|
||||||
process_strong_roots(false, // no scoping; this is parallel code
|
process_strong_roots(false, // no scoping; this is parallel code
|
||||||
is_scavenging, so,
|
is_scavenging, so,
|
||||||
@ -5154,9 +5181,22 @@ g1_process_strong_roots(bool is_scavenging,
|
|||||||
}
|
}
|
||||||
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
|
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
|
||||||
|
|
||||||
|
// If this is an initial mark pause, and we're not scanning
|
||||||
|
// the entire code cache, we need to mark the oops in the
|
||||||
|
// strong code root lists for the regions that are not in
|
||||||
|
// the collection set.
|
||||||
|
// Note all threads participate in this set of root tasks.
|
||||||
|
double mark_strong_code_roots_ms = 0.0;
|
||||||
|
if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
|
||||||
|
double mark_strong_roots_start = os::elapsedTime();
|
||||||
|
mark_strong_code_roots(worker_i);
|
||||||
|
mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
|
||||||
|
}
|
||||||
|
g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
|
||||||
|
|
||||||
// Now scan the complement of the collection set.
|
// Now scan the complement of the collection set.
|
||||||
if (scan_rs != NULL) {
|
if (scan_rs != NULL) {
|
||||||
g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
|
g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
|
||||||
}
|
}
|
||||||
_process_strong_tasks->all_tasks_completed();
|
_process_strong_tasks->all_tasks_completed();
|
||||||
}
|
}
|
||||||
@ -5774,9 +5814,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
|||||||
process_discovered_references(n_workers);
|
process_discovered_references(n_workers);
|
||||||
|
|
||||||
// Weak root processing.
|
// Weak root processing.
|
||||||
// Note: when JSR 292 is enabled and code blobs can contain
|
|
||||||
// non-perm oops then we will need to process the code blobs
|
|
||||||
// here too.
|
|
||||||
{
|
{
|
||||||
G1STWIsAliveClosure is_alive(this);
|
G1STWIsAliveClosure is_alive(this);
|
||||||
G1KeepAliveClosure keep_alive(this);
|
G1KeepAliveClosure keep_alive(this);
|
||||||
@ -5792,6 +5829,17 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
|||||||
hot_card_cache->reset_hot_cache();
|
hot_card_cache->reset_hot_cache();
|
||||||
hot_card_cache->set_use_cache(true);
|
hot_card_cache->set_use_cache(true);
|
||||||
|
|
||||||
|
// Migrate the strong code roots attached to each region in
|
||||||
|
// the collection set. Ideally we would like to do this
|
||||||
|
// after we have finished the scanning/evacuation of the
|
||||||
|
// strong code roots for a particular heap region.
|
||||||
|
migrate_strong_code_roots();
|
||||||
|
|
||||||
|
if (g1_policy()->during_initial_mark_pause()) {
|
||||||
|
// Reset the claim values set during marking the strong code roots
|
||||||
|
reset_heap_region_claim_values();
|
||||||
|
}
|
||||||
|
|
||||||
finalize_for_evac_failure();
|
finalize_for_evac_failure();
|
||||||
|
|
||||||
if (evacuation_failed()) {
|
if (evacuation_failed()) {
|
||||||
@ -6588,3 +6636,208 @@ void G1CollectedHeap::verify_region_sets() {
|
|||||||
_humongous_set.verify_end();
|
_humongous_set.verify_end();
|
||||||
_free_list.verify_end();
|
_free_list.verify_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Optimized nmethod scanning
|
||||||
|
|
||||||
|
class RegisterNMethodOopClosure: public OopClosure {
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
nmethod* _nm;
|
||||||
|
|
||||||
|
template <class T> void do_oop_work(T* p) {
|
||||||
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||||
|
assert(!hr->isHumongous(), "code root in humongous region?");
|
||||||
|
|
||||||
|
// HeapRegion::add_strong_code_root() avoids adding duplicate
|
||||||
|
// entries but having duplicates is OK since we "mark" nmethods
|
||||||
|
// as visited when we scan the strong code root lists during the GC.
|
||||||
|
hr->add_strong_code_root(_nm);
|
||||||
|
assert(hr->rem_set()->strong_code_roots_list_contains(_nm), "add failed?");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
|
||||||
|
_g1h(g1h), _nm(nm) {}
|
||||||
|
|
||||||
|
void do_oop(oop* p) { do_oop_work(p); }
|
||||||
|
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
|
};
|
||||||
|
|
||||||
|
class UnregisterNMethodOopClosure: public OopClosure {
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
nmethod* _nm;
|
||||||
|
|
||||||
|
template <class T> void do_oop_work(T* p) {
|
||||||
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||||
|
assert(!hr->isHumongous(), "code root in humongous region?");
|
||||||
|
hr->remove_strong_code_root(_nm);
|
||||||
|
assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), "remove failed?");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
|
||||||
|
_g1h(g1h), _nm(nm) {}
|
||||||
|
|
||||||
|
void do_oop(oop* p) { do_oop_work(p); }
|
||||||
|
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
|
};
|
||||||
|
|
||||||
|
void G1CollectedHeap::register_nmethod(nmethod* nm) {
|
||||||
|
CollectedHeap::register_nmethod(nm);
|
||||||
|
|
||||||
|
guarantee(nm != NULL, "sanity");
|
||||||
|
RegisterNMethodOopClosure reg_cl(this, nm);
|
||||||
|
nm->oops_do(®_cl);
|
||||||
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
|
||||||
|
CollectedHeap::unregister_nmethod(nm);
|
||||||
|
|
||||||
|
guarantee(nm != NULL, "sanity");
|
||||||
|
UnregisterNMethodOopClosure reg_cl(this, nm);
|
||||||
|
nm->oops_do(®_cl, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
|
||||||
|
public:
|
||||||
|
bool doHeapRegion(HeapRegion *hr) {
|
||||||
|
assert(!hr->isHumongous(), "humongous region in collection set?");
|
||||||
|
hr->migrate_strong_code_roots();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void G1CollectedHeap::migrate_strong_code_roots() {
|
||||||
|
MigrateCodeRootsHeapRegionClosure cl;
|
||||||
|
double migrate_start = os::elapsedTime();
|
||||||
|
collection_set_iterate(&cl);
|
||||||
|
double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
|
||||||
|
g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark all the code roots that point into regions *not* in the
|
||||||
|
// collection set.
|
||||||
|
//
|
||||||
|
// Note we do not want to use a "marking" CodeBlobToOopClosure while
|
||||||
|
// walking the the code roots lists of regions not in the collection
|
||||||
|
// set. Suppose we have an nmethod (M) that points to objects in two
|
||||||
|
// separate regions - one in the collection set (R1) and one not (R2).
|
||||||
|
// Using a "marking" CodeBlobToOopClosure here would result in "marking"
|
||||||
|
// nmethod M when walking the code roots for R1. When we come to scan
|
||||||
|
// the code roots for R2, we would see that M is already marked and it
|
||||||
|
// would be skipped and the objects in R2 that are referenced from M
|
||||||
|
// would not be evacuated.
|
||||||
|
|
||||||
|
class MarkStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
|
||||||
|
|
||||||
|
class MarkStrongCodeRootOopClosure: public OopClosure {
|
||||||
|
ConcurrentMark* _cm;
|
||||||
|
HeapRegion* _hr;
|
||||||
|
uint _worker_id;
|
||||||
|
|
||||||
|
template <class T> void do_oop_work(T* p) {
|
||||||
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
// Only mark objects in the region (which is assumed
|
||||||
|
// to be not in the collection set).
|
||||||
|
if (_hr->is_in(obj)) {
|
||||||
|
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
MarkStrongCodeRootOopClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id) :
|
||||||
|
_cm(cm), _hr(hr), _worker_id(worker_id) {
|
||||||
|
assert(!_hr->in_collection_set(), "sanity");
|
||||||
|
}
|
||||||
|
|
||||||
|
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
|
void do_oop(oop* p) { do_oop_work(p); }
|
||||||
|
};
|
||||||
|
|
||||||
|
MarkStrongCodeRootOopClosure _oop_cl;
|
||||||
|
|
||||||
|
public:
|
||||||
|
MarkStrongCodeRootCodeBlobClosure(ConcurrentMark* cm, HeapRegion* hr, uint worker_id):
|
||||||
|
_oop_cl(cm, hr, worker_id) {}
|
||||||
|
|
||||||
|
void do_code_blob(CodeBlob* cb) {
|
||||||
|
nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
|
||||||
|
if (nm != NULL) {
|
||||||
|
nm->oops_do(&_oop_cl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class MarkStrongCodeRootsHRClosure: public HeapRegionClosure {
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
uint _worker_id;
|
||||||
|
|
||||||
|
public:
|
||||||
|
MarkStrongCodeRootsHRClosure(G1CollectedHeap* g1h, uint worker_id) :
|
||||||
|
_g1h(g1h), _worker_id(worker_id) {}
|
||||||
|
|
||||||
|
bool doHeapRegion(HeapRegion *hr) {
|
||||||
|
HeapRegionRemSet* hrrs = hr->rem_set();
|
||||||
|
if (hr->isHumongous()) {
|
||||||
|
// Code roots should never be attached to a humongous region
|
||||||
|
assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hr->in_collection_set()) {
|
||||||
|
// Don't mark code roots into regions in the collection set here.
|
||||||
|
// They will be marked when we scan them.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
MarkStrongCodeRootCodeBlobClosure cb_cl(_g1h->concurrent_mark(), hr, _worker_id);
|
||||||
|
hr->strong_code_roots_do(&cb_cl);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void G1CollectedHeap::mark_strong_code_roots(uint worker_id) {
|
||||||
|
MarkStrongCodeRootsHRClosure cl(this, worker_id);
|
||||||
|
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||||
|
heap_region_par_iterate_chunked(&cl,
|
||||||
|
worker_id,
|
||||||
|
workers()->active_workers(),
|
||||||
|
HeapRegion::ParMarkRootClaimValue);
|
||||||
|
} else {
|
||||||
|
heap_region_iterate(&cl);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class RebuildStrongCodeRootClosure: public CodeBlobClosure {
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
|
||||||
|
public:
|
||||||
|
RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
|
||||||
|
_g1h(g1h) {}
|
||||||
|
|
||||||
|
void do_code_blob(CodeBlob* cb) {
|
||||||
|
nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
|
||||||
|
if (nm == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) {
|
||||||
|
_g1h->register_nmethod(nm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void G1CollectedHeap::rebuild_strong_code_roots() {
|
||||||
|
RebuildStrongCodeRootClosure blob_cl(this);
|
||||||
|
CodeCache::blobs_do(&blob_cl);
|
||||||
|
}
|
||||||
|
@ -46,6 +46,7 @@
|
|||||||
// may combine concurrent marking with parallel, incremental compaction of
|
// may combine concurrent marking with parallel, incremental compaction of
|
||||||
// heap subsets that will yield large amounts of garbage.
|
// heap subsets that will yield large amounts of garbage.
|
||||||
|
|
||||||
|
// Forward declarations
|
||||||
class HeapRegion;
|
class HeapRegion;
|
||||||
class HRRSCleanupTask;
|
class HRRSCleanupTask;
|
||||||
class GenerationSpec;
|
class GenerationSpec;
|
||||||
@ -69,6 +70,7 @@ class STWGCTimer;
|
|||||||
class G1NewTracer;
|
class G1NewTracer;
|
||||||
class G1OldTracer;
|
class G1OldTracer;
|
||||||
class EvacuationFailedInfo;
|
class EvacuationFailedInfo;
|
||||||
|
class nmethod;
|
||||||
|
|
||||||
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
|
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
|
||||||
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
|
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
|
||||||
@ -163,18 +165,6 @@ public:
|
|||||||
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
|
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
|
||||||
};
|
};
|
||||||
|
|
||||||
// The G1 STW is alive closure.
|
|
||||||
// An instance is embedded into the G1CH and used as the
|
|
||||||
// (optional) _is_alive_non_header closure in the STW
|
|
||||||
// reference processor. It is also extensively used during
|
|
||||||
// reference processing during STW evacuation pauses.
|
|
||||||
class G1STWIsAliveClosure: public BoolObjectClosure {
|
|
||||||
G1CollectedHeap* _g1;
|
|
||||||
public:
|
|
||||||
G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
|
|
||||||
bool do_object_b(oop p);
|
|
||||||
};
|
|
||||||
|
|
||||||
class SurvivorGCAllocRegion : public G1AllocRegion {
|
class SurvivorGCAllocRegion : public G1AllocRegion {
|
||||||
protected:
|
protected:
|
||||||
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
|
||||||
@ -193,6 +183,18 @@ public:
|
|||||||
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
|
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// The G1 STW is alive closure.
|
||||||
|
// An instance is embedded into the G1CH and used as the
|
||||||
|
// (optional) _is_alive_non_header closure in the STW
|
||||||
|
// reference processor. It is also extensively used during
|
||||||
|
// reference processing during STW evacuation pauses.
|
||||||
|
class G1STWIsAliveClosure: public BoolObjectClosure {
|
||||||
|
G1CollectedHeap* _g1;
|
||||||
|
public:
|
||||||
|
G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
|
||||||
|
bool do_object_b(oop p);
|
||||||
|
};
|
||||||
|
|
||||||
class RefineCardTableEntryClosure;
|
class RefineCardTableEntryClosure;
|
||||||
|
|
||||||
class G1CollectedHeap : public SharedHeap {
|
class G1CollectedHeap : public SharedHeap {
|
||||||
@ -774,9 +776,10 @@ protected:
|
|||||||
// it has to be read while holding the Heap_lock. Currently, both
|
// it has to be read while holding the Heap_lock. Currently, both
|
||||||
// methods that call do_collection_pause() release the Heap_lock
|
// methods that call do_collection_pause() release the Heap_lock
|
||||||
// before the call, so it's easy to read gc_count_before just before.
|
// before the call, so it's easy to read gc_count_before just before.
|
||||||
HeapWord* do_collection_pause(size_t word_size,
|
HeapWord* do_collection_pause(size_t word_size,
|
||||||
unsigned int gc_count_before,
|
unsigned int gc_count_before,
|
||||||
bool* succeeded);
|
bool* succeeded,
|
||||||
|
GCCause::Cause gc_cause);
|
||||||
|
|
||||||
// The guts of the incremental collection pause, executed by the vm
|
// The guts of the incremental collection pause, executed by the vm
|
||||||
// thread. It returns false if it is unable to do the collection due
|
// thread. It returns false if it is unable to do the collection due
|
||||||
@ -1549,42 +1552,6 @@ public:
|
|||||||
|
|
||||||
virtual jlong millis_since_last_gc();
|
virtual jlong millis_since_last_gc();
|
||||||
|
|
||||||
// Perform any cleanup actions necessary before allowing a verification.
|
|
||||||
virtual void prepare_for_verify();
|
|
||||||
|
|
||||||
// Perform verification.
|
|
||||||
|
|
||||||
// vo == UsePrevMarking -> use "prev" marking information,
|
|
||||||
// vo == UseNextMarking -> use "next" marking information
|
|
||||||
// vo == UseMarkWord -> use the mark word in the object header
|
|
||||||
//
|
|
||||||
// NOTE: Only the "prev" marking information is guaranteed to be
|
|
||||||
// consistent most of the time, so most calls to this should use
|
|
||||||
// vo == UsePrevMarking.
|
|
||||||
// Currently, there is only one case where this is called with
|
|
||||||
// vo == UseNextMarking, which is to verify the "next" marking
|
|
||||||
// information at the end of remark.
|
|
||||||
// Currently there is only one place where this is called with
|
|
||||||
// vo == UseMarkWord, which is to verify the marking during a
|
|
||||||
// full GC.
|
|
||||||
void verify(bool silent, VerifyOption vo);
|
|
||||||
|
|
||||||
// Override; it uses the "prev" marking information
|
|
||||||
virtual void verify(bool silent);
|
|
||||||
|
|
||||||
virtual void print_on(outputStream* st) const;
|
|
||||||
virtual void print_extended_on(outputStream* st) const;
|
|
||||||
virtual void print_on_error(outputStream* st) const;
|
|
||||||
|
|
||||||
virtual void print_gc_threads_on(outputStream* st) const;
|
|
||||||
virtual void gc_threads_do(ThreadClosure* tc) const;
|
|
||||||
|
|
||||||
// Override
|
|
||||||
void print_tracing_info() const;
|
|
||||||
|
|
||||||
// The following two methods are helpful for debugging RSet issues.
|
|
||||||
void print_cset_rsets() PRODUCT_RETURN;
|
|
||||||
void print_all_rsets() PRODUCT_RETURN;
|
|
||||||
|
|
||||||
// Convenience function to be used in situations where the heap type can be
|
// Convenience function to be used in situations where the heap type can be
|
||||||
// asserted to be this type.
|
// asserted to be this type.
|
||||||
@ -1661,13 +1628,86 @@ public:
|
|||||||
else return is_obj_ill(obj, hr);
|
else return is_obj_ill(obj, hr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
|
||||||
|
HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
|
||||||
|
bool is_marked(oop obj, VerifyOption vo);
|
||||||
|
const char* top_at_mark_start_str(VerifyOption vo);
|
||||||
|
|
||||||
|
ConcurrentMark* concurrent_mark() const { return _cm; }
|
||||||
|
|
||||||
|
// Refinement
|
||||||
|
|
||||||
|
ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
|
||||||
|
|
||||||
|
// The dirty cards region list is used to record a subset of regions
|
||||||
|
// whose cards need clearing. The list if populated during the
|
||||||
|
// remembered set scanning and drained during the card table
|
||||||
|
// cleanup. Although the methods are reentrant, population/draining
|
||||||
|
// phases must not overlap. For synchronization purposes the last
|
||||||
|
// element on the list points to itself.
|
||||||
|
HeapRegion* _dirty_cards_region_list;
|
||||||
|
void push_dirty_cards_region(HeapRegion* hr);
|
||||||
|
HeapRegion* pop_dirty_cards_region();
|
||||||
|
|
||||||
|
// Optimized nmethod scanning support routines
|
||||||
|
|
||||||
|
// Register the given nmethod with the G1 heap
|
||||||
|
virtual void register_nmethod(nmethod* nm);
|
||||||
|
|
||||||
|
// Unregister the given nmethod from the G1 heap
|
||||||
|
virtual void unregister_nmethod(nmethod* nm);
|
||||||
|
|
||||||
|
// Migrate the nmethods in the code root lists of the regions
|
||||||
|
// in the collection set to regions in to-space. In the event
|
||||||
|
// of an evacuation failure, nmethods that reference objects
|
||||||
|
// that were not successfullly evacuated are not migrated.
|
||||||
|
void migrate_strong_code_roots();
|
||||||
|
|
||||||
|
// During an initial mark pause, mark all the code roots that
|
||||||
|
// point into regions *not* in the collection set.
|
||||||
|
void mark_strong_code_roots(uint worker_id);
|
||||||
|
|
||||||
|
// Rebuild the stong code root lists for each region
|
||||||
|
// after a full GC
|
||||||
|
void rebuild_strong_code_roots();
|
||||||
|
|
||||||
|
// Verification
|
||||||
|
|
||||||
|
// The following is just to alert the verification code
|
||||||
|
// that a full collection has occurred and that the
|
||||||
|
// remembered sets are no longer up to date.
|
||||||
|
bool _full_collection;
|
||||||
|
void set_full_collection() { _full_collection = true;}
|
||||||
|
void clear_full_collection() {_full_collection = false;}
|
||||||
|
bool full_collection() {return _full_collection;}
|
||||||
|
|
||||||
|
// Perform any cleanup actions necessary before allowing a verification.
|
||||||
|
virtual void prepare_for_verify();
|
||||||
|
|
||||||
|
// Perform verification.
|
||||||
|
|
||||||
|
// vo == UsePrevMarking -> use "prev" marking information,
|
||||||
|
// vo == UseNextMarking -> use "next" marking information
|
||||||
|
// vo == UseMarkWord -> use the mark word in the object header
|
||||||
|
//
|
||||||
|
// NOTE: Only the "prev" marking information is guaranteed to be
|
||||||
|
// consistent most of the time, so most calls to this should use
|
||||||
|
// vo == UsePrevMarking.
|
||||||
|
// Currently, there is only one case where this is called with
|
||||||
|
// vo == UseNextMarking, which is to verify the "next" marking
|
||||||
|
// information at the end of remark.
|
||||||
|
// Currently there is only one place where this is called with
|
||||||
|
// vo == UseMarkWord, which is to verify the marking during a
|
||||||
|
// full GC.
|
||||||
|
void verify(bool silent, VerifyOption vo);
|
||||||
|
|
||||||
|
// Override; it uses the "prev" marking information
|
||||||
|
virtual void verify(bool silent);
|
||||||
|
|
||||||
// The methods below are here for convenience and dispatch the
|
// The methods below are here for convenience and dispatch the
|
||||||
// appropriate method depending on value of the given VerifyOption
|
// appropriate method depending on value of the given VerifyOption
|
||||||
// parameter. The options for that parameter are:
|
// parameter. The values for that parameter, and their meanings,
|
||||||
//
|
// are the same as those above.
|
||||||
// vo == UsePrevMarking -> use "prev" marking information,
|
|
||||||
// vo == UseNextMarking -> use "next" marking information,
|
|
||||||
// vo == UseMarkWord -> use mark word from object header
|
|
||||||
|
|
||||||
bool is_obj_dead_cond(const oop obj,
|
bool is_obj_dead_cond(const oop obj,
|
||||||
const HeapRegion* hr,
|
const HeapRegion* hr,
|
||||||
@ -1692,31 +1732,21 @@ public:
|
|||||||
return false; // keep some compilers happy
|
return false; // keep some compilers happy
|
||||||
}
|
}
|
||||||
|
|
||||||
bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
|
// Printing
|
||||||
HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
|
|
||||||
bool is_marked(oop obj, VerifyOption vo);
|
|
||||||
const char* top_at_mark_start_str(VerifyOption vo);
|
|
||||||
|
|
||||||
// The following is just to alert the verification code
|
virtual void print_on(outputStream* st) const;
|
||||||
// that a full collection has occurred and that the
|
virtual void print_extended_on(outputStream* st) const;
|
||||||
// remembered sets are no longer up to date.
|
virtual void print_on_error(outputStream* st) const;
|
||||||
bool _full_collection;
|
|
||||||
void set_full_collection() { _full_collection = true;}
|
|
||||||
void clear_full_collection() {_full_collection = false;}
|
|
||||||
bool full_collection() {return _full_collection;}
|
|
||||||
|
|
||||||
ConcurrentMark* concurrent_mark() const { return _cm; }
|
virtual void print_gc_threads_on(outputStream* st) const;
|
||||||
ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
|
virtual void gc_threads_do(ThreadClosure* tc) const;
|
||||||
|
|
||||||
// The dirty cards region list is used to record a subset of regions
|
// Override
|
||||||
// whose cards need clearing. The list if populated during the
|
void print_tracing_info() const;
|
||||||
// remembered set scanning and drained during the card table
|
|
||||||
// cleanup. Although the methods are reentrant, population/draining
|
// The following two methods are helpful for debugging RSet issues.
|
||||||
// phases must not overlap. For synchronization purposes the last
|
void print_cset_rsets() PRODUCT_RETURN;
|
||||||
// element on the list points to itself.
|
void print_all_rsets() PRODUCT_RETURN;
|
||||||
HeapRegion* _dirty_cards_region_list;
|
|
||||||
void push_dirty_cards_region(HeapRegion* hr);
|
|
||||||
HeapRegion* pop_dirty_cards_region();
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void stop_conc_gc_threads();
|
void stop_conc_gc_threads();
|
||||||
|
@ -313,7 +313,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
|||||||
void G1CollectorPolicy::initialize_flags() {
|
void G1CollectorPolicy::initialize_flags() {
|
||||||
set_min_alignment(HeapRegion::GrainBytes);
|
set_min_alignment(HeapRegion::GrainBytes);
|
||||||
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
|
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
|
||||||
set_max_alignment(MAX2(card_table_alignment, min_alignment()));
|
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||||
|
set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
|
||||||
if (SurvivorRatio < 1) {
|
if (SurvivorRatio < 1) {
|
||||||
vm_exit_during_initialization("Invalid survivor ratio specified");
|
vm_exit_during_initialization("Invalid survivor ratio specified");
|
||||||
}
|
}
|
||||||
|
@ -161,6 +161,8 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
|
|||||||
_last_update_rs_times_ms(_max_gc_threads, "%.1lf"),
|
_last_update_rs_times_ms(_max_gc_threads, "%.1lf"),
|
||||||
_last_update_rs_processed_buffers(_max_gc_threads, "%d"),
|
_last_update_rs_processed_buffers(_max_gc_threads, "%d"),
|
||||||
_last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
|
_last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
|
||||||
|
_last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),
|
||||||
|
_last_strong_code_root_mark_times_ms(_max_gc_threads, "%.1lf"),
|
||||||
_last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
|
_last_obj_copy_times_ms(_max_gc_threads, "%.1lf"),
|
||||||
_last_termination_times_ms(_max_gc_threads, "%.1lf"),
|
_last_termination_times_ms(_max_gc_threads, "%.1lf"),
|
||||||
_last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
|
_last_termination_attempts(_max_gc_threads, SIZE_FORMAT),
|
||||||
@ -182,6 +184,8 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
|
|||||||
_last_update_rs_times_ms.reset();
|
_last_update_rs_times_ms.reset();
|
||||||
_last_update_rs_processed_buffers.reset();
|
_last_update_rs_processed_buffers.reset();
|
||||||
_last_scan_rs_times_ms.reset();
|
_last_scan_rs_times_ms.reset();
|
||||||
|
_last_strong_code_root_scan_times_ms.reset();
|
||||||
|
_last_strong_code_root_mark_times_ms.reset();
|
||||||
_last_obj_copy_times_ms.reset();
|
_last_obj_copy_times_ms.reset();
|
||||||
_last_termination_times_ms.reset();
|
_last_termination_times_ms.reset();
|
||||||
_last_termination_attempts.reset();
|
_last_termination_attempts.reset();
|
||||||
@ -197,6 +201,8 @@ void G1GCPhaseTimes::note_gc_end() {
|
|||||||
_last_update_rs_times_ms.verify();
|
_last_update_rs_times_ms.verify();
|
||||||
_last_update_rs_processed_buffers.verify();
|
_last_update_rs_processed_buffers.verify();
|
||||||
_last_scan_rs_times_ms.verify();
|
_last_scan_rs_times_ms.verify();
|
||||||
|
_last_strong_code_root_scan_times_ms.verify();
|
||||||
|
_last_strong_code_root_mark_times_ms.verify();
|
||||||
_last_obj_copy_times_ms.verify();
|
_last_obj_copy_times_ms.verify();
|
||||||
_last_termination_times_ms.verify();
|
_last_termination_times_ms.verify();
|
||||||
_last_termination_attempts.verify();
|
_last_termination_attempts.verify();
|
||||||
@ -210,6 +216,8 @@ void G1GCPhaseTimes::note_gc_end() {
|
|||||||
_last_satb_filtering_times_ms.get(i) +
|
_last_satb_filtering_times_ms.get(i) +
|
||||||
_last_update_rs_times_ms.get(i) +
|
_last_update_rs_times_ms.get(i) +
|
||||||
_last_scan_rs_times_ms.get(i) +
|
_last_scan_rs_times_ms.get(i) +
|
||||||
|
_last_strong_code_root_scan_times_ms.get(i) +
|
||||||
|
_last_strong_code_root_mark_times_ms.get(i) +
|
||||||
_last_obj_copy_times_ms.get(i) +
|
_last_obj_copy_times_ms.get(i) +
|
||||||
_last_termination_times_ms.get(i);
|
_last_termination_times_ms.get(i);
|
||||||
|
|
||||||
@ -239,6 +247,9 @@ double G1GCPhaseTimes::accounted_time_ms() {
|
|||||||
// Now subtract the time taken to fix up roots in generated code
|
// Now subtract the time taken to fix up roots in generated code
|
||||||
misc_time_ms += _cur_collection_code_root_fixup_time_ms;
|
misc_time_ms += _cur_collection_code_root_fixup_time_ms;
|
||||||
|
|
||||||
|
// Strong code root migration time
|
||||||
|
misc_time_ms += _cur_strong_code_root_migration_time_ms;
|
||||||
|
|
||||||
// Subtract the time taken to clean the card table from the
|
// Subtract the time taken to clean the card table from the
|
||||||
// current value of "other time"
|
// current value of "other time"
|
||||||
misc_time_ms += _cur_clear_ct_time_ms;
|
misc_time_ms += _cur_clear_ct_time_ms;
|
||||||
@ -257,9 +268,13 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
|||||||
if (_last_satb_filtering_times_ms.sum() > 0.0) {
|
if (_last_satb_filtering_times_ms.sum() > 0.0) {
|
||||||
_last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
|
_last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
|
||||||
}
|
}
|
||||||
|
if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
|
||||||
|
_last_strong_code_root_mark_times_ms.print(2, "Code Root Marking (ms)");
|
||||||
|
}
|
||||||
_last_update_rs_times_ms.print(2, "Update RS (ms)");
|
_last_update_rs_times_ms.print(2, "Update RS (ms)");
|
||||||
_last_update_rs_processed_buffers.print(3, "Processed Buffers");
|
_last_update_rs_processed_buffers.print(3, "Processed Buffers");
|
||||||
_last_scan_rs_times_ms.print(2, "Scan RS (ms)");
|
_last_scan_rs_times_ms.print(2, "Scan RS (ms)");
|
||||||
|
_last_strong_code_root_scan_times_ms.print(2, "Code Root Scanning (ms)");
|
||||||
_last_obj_copy_times_ms.print(2, "Object Copy (ms)");
|
_last_obj_copy_times_ms.print(2, "Object Copy (ms)");
|
||||||
_last_termination_times_ms.print(2, "Termination (ms)");
|
_last_termination_times_ms.print(2, "Termination (ms)");
|
||||||
if (G1Log::finest()) {
|
if (G1Log::finest()) {
|
||||||
@ -273,12 +288,17 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
|||||||
if (_last_satb_filtering_times_ms.sum() > 0.0) {
|
if (_last_satb_filtering_times_ms.sum() > 0.0) {
|
||||||
_last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
|
_last_satb_filtering_times_ms.print(1, "SATB Filtering (ms)");
|
||||||
}
|
}
|
||||||
|
if (_last_strong_code_root_mark_times_ms.sum() > 0.0) {
|
||||||
|
_last_strong_code_root_mark_times_ms.print(1, "Code Root Marking (ms)");
|
||||||
|
}
|
||||||
_last_update_rs_times_ms.print(1, "Update RS (ms)");
|
_last_update_rs_times_ms.print(1, "Update RS (ms)");
|
||||||
_last_update_rs_processed_buffers.print(2, "Processed Buffers");
|
_last_update_rs_processed_buffers.print(2, "Processed Buffers");
|
||||||
_last_scan_rs_times_ms.print(1, "Scan RS (ms)");
|
_last_scan_rs_times_ms.print(1, "Scan RS (ms)");
|
||||||
|
_last_strong_code_root_scan_times_ms.print(1, "Code Root Scanning (ms)");
|
||||||
_last_obj_copy_times_ms.print(1, "Object Copy (ms)");
|
_last_obj_copy_times_ms.print(1, "Object Copy (ms)");
|
||||||
}
|
}
|
||||||
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
|
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
|
||||||
|
print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms);
|
||||||
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
|
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
|
||||||
double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
|
double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
|
||||||
print_stats(1, "Other", misc_time_ms);
|
print_stats(1, "Other", misc_time_ms);
|
||||||
|
@ -119,6 +119,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
|||||||
WorkerDataArray<double> _last_update_rs_times_ms;
|
WorkerDataArray<double> _last_update_rs_times_ms;
|
||||||
WorkerDataArray<int> _last_update_rs_processed_buffers;
|
WorkerDataArray<int> _last_update_rs_processed_buffers;
|
||||||
WorkerDataArray<double> _last_scan_rs_times_ms;
|
WorkerDataArray<double> _last_scan_rs_times_ms;
|
||||||
|
WorkerDataArray<double> _last_strong_code_root_scan_times_ms;
|
||||||
|
WorkerDataArray<double> _last_strong_code_root_mark_times_ms;
|
||||||
WorkerDataArray<double> _last_obj_copy_times_ms;
|
WorkerDataArray<double> _last_obj_copy_times_ms;
|
||||||
WorkerDataArray<double> _last_termination_times_ms;
|
WorkerDataArray<double> _last_termination_times_ms;
|
||||||
WorkerDataArray<size_t> _last_termination_attempts;
|
WorkerDataArray<size_t> _last_termination_attempts;
|
||||||
@ -128,6 +130,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
|||||||
|
|
||||||
double _cur_collection_par_time_ms;
|
double _cur_collection_par_time_ms;
|
||||||
double _cur_collection_code_root_fixup_time_ms;
|
double _cur_collection_code_root_fixup_time_ms;
|
||||||
|
double _cur_strong_code_root_migration_time_ms;
|
||||||
|
|
||||||
double _cur_clear_ct_time_ms;
|
double _cur_clear_ct_time_ms;
|
||||||
double _cur_ref_proc_time_ms;
|
double _cur_ref_proc_time_ms;
|
||||||
@ -179,6 +182,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
|||||||
_last_scan_rs_times_ms.set(worker_i, ms);
|
_last_scan_rs_times_ms.set(worker_i, ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void record_strong_code_root_scan_time(uint worker_i, double ms) {
|
||||||
|
_last_strong_code_root_scan_times_ms.set(worker_i, ms);
|
||||||
|
}
|
||||||
|
|
||||||
|
void record_strong_code_root_mark_time(uint worker_i, double ms) {
|
||||||
|
_last_strong_code_root_mark_times_ms.set(worker_i, ms);
|
||||||
|
}
|
||||||
|
|
||||||
void record_obj_copy_time(uint worker_i, double ms) {
|
void record_obj_copy_time(uint worker_i, double ms) {
|
||||||
_last_obj_copy_times_ms.set(worker_i, ms);
|
_last_obj_copy_times_ms.set(worker_i, ms);
|
||||||
}
|
}
|
||||||
@ -208,6 +219,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
|||||||
_cur_collection_code_root_fixup_time_ms = ms;
|
_cur_collection_code_root_fixup_time_ms = ms;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void record_strong_code_root_migration_time(double ms) {
|
||||||
|
_cur_strong_code_root_migration_time_ms = ms;
|
||||||
|
}
|
||||||
|
|
||||||
void record_ref_proc_time(double ms) {
|
void record_ref_proc_time(double ms) {
|
||||||
_cur_ref_proc_time_ms = ms;
|
_cur_ref_proc_time_ms = ms;
|
||||||
}
|
}
|
||||||
@ -294,6 +309,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
|||||||
return _last_scan_rs_times_ms.average();
|
return _last_scan_rs_times_ms.average();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double average_last_strong_code_root_scan_time(){
|
||||||
|
return _last_strong_code_root_scan_times_ms.average();
|
||||||
|
}
|
||||||
|
|
||||||
|
double average_last_strong_code_root_mark_time(){
|
||||||
|
return _last_strong_code_root_mark_times_ms.average();
|
||||||
|
}
|
||||||
|
|
||||||
double average_last_obj_copy_time() {
|
double average_last_obj_copy_time() {
|
||||||
return _last_obj_copy_times_ms.average();
|
return _last_obj_copy_times_ms.average();
|
||||||
}
|
}
|
||||||
|
@ -262,6 +262,7 @@ void G1MonitoringSupport::update_sizes() {
|
|||||||
old_collection_counters()->update_all();
|
old_collection_counters()->update_all();
|
||||||
young_collection_counters()->update_all();
|
young_collection_counters()->update_all();
|
||||||
MetaspaceCounters::update_performance_counters();
|
MetaspaceCounters::update_performance_counters();
|
||||||
|
CompressedClassSpaceCounters::update_performance_counters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,15 +104,25 @@ void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
|
|||||||
class ScanRSClosure : public HeapRegionClosure {
|
class ScanRSClosure : public HeapRegionClosure {
|
||||||
size_t _cards_done, _cards;
|
size_t _cards_done, _cards;
|
||||||
G1CollectedHeap* _g1h;
|
G1CollectedHeap* _g1h;
|
||||||
|
|
||||||
OopsInHeapRegionClosure* _oc;
|
OopsInHeapRegionClosure* _oc;
|
||||||
|
CodeBlobToOopClosure* _code_root_cl;
|
||||||
|
|
||||||
G1BlockOffsetSharedArray* _bot_shared;
|
G1BlockOffsetSharedArray* _bot_shared;
|
||||||
CardTableModRefBS *_ct_bs;
|
CardTableModRefBS *_ct_bs;
|
||||||
int _worker_i;
|
|
||||||
int _block_size;
|
double _strong_code_root_scan_time_sec;
|
||||||
bool _try_claimed;
|
int _worker_i;
|
||||||
|
int _block_size;
|
||||||
|
bool _try_claimed;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
|
ScanRSClosure(OopsInHeapRegionClosure* oc,
|
||||||
|
CodeBlobToOopClosure* code_root_cl,
|
||||||
|
int worker_i) :
|
||||||
_oc(oc),
|
_oc(oc),
|
||||||
|
_code_root_cl(code_root_cl),
|
||||||
|
_strong_code_root_scan_time_sec(0.0),
|
||||||
_cards(0),
|
_cards(0),
|
||||||
_cards_done(0),
|
_cards_done(0),
|
||||||
_worker_i(worker_i),
|
_worker_i(worker_i),
|
||||||
@ -160,6 +170,12 @@ public:
|
|||||||
card_start, card_start + G1BlockOffsetSharedArray::N_words);
|
card_start, card_start + G1BlockOffsetSharedArray::N_words);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void scan_strong_code_roots(HeapRegion* r) {
|
||||||
|
double scan_start = os::elapsedTime();
|
||||||
|
r->strong_code_roots_do(_code_root_cl);
|
||||||
|
_strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start);
|
||||||
|
}
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
assert(r->in_collection_set(), "should only be called on elements of CS.");
|
assert(r->in_collection_set(), "should only be called on elements of CS.");
|
||||||
HeapRegionRemSet* hrrs = r->rem_set();
|
HeapRegionRemSet* hrrs = r->rem_set();
|
||||||
@ -173,6 +189,7 @@ public:
|
|||||||
// _try_claimed || r->claim_iter()
|
// _try_claimed || r->claim_iter()
|
||||||
// is true: either we're supposed to work on claimed-but-not-complete
|
// is true: either we're supposed to work on claimed-but-not-complete
|
||||||
// regions, or we successfully claimed the region.
|
// regions, or we successfully claimed the region.
|
||||||
|
|
||||||
HeapRegionRemSetIterator iter(hrrs);
|
HeapRegionRemSetIterator iter(hrrs);
|
||||||
size_t card_index;
|
size_t card_index;
|
||||||
|
|
||||||
@ -205,30 +222,43 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!_try_claimed) {
|
if (!_try_claimed) {
|
||||||
|
// Scan the strong code root list attached to the current region
|
||||||
|
scan_strong_code_roots(r);
|
||||||
|
|
||||||
hrrs->set_iter_complete();
|
hrrs->set_iter_complete();
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
double strong_code_root_scan_time_sec() {
|
||||||
|
return _strong_code_root_scan_time_sec;
|
||||||
|
}
|
||||||
|
|
||||||
size_t cards_done() { return _cards_done;}
|
size_t cards_done() { return _cards_done;}
|
||||||
size_t cards_looked_up() { return _cards;}
|
size_t cards_looked_up() { return _cards;}
|
||||||
};
|
};
|
||||||
|
|
||||||
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
|
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
|
||||||
|
CodeBlobToOopClosure* code_root_cl,
|
||||||
|
int worker_i) {
|
||||||
double rs_time_start = os::elapsedTime();
|
double rs_time_start = os::elapsedTime();
|
||||||
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
|
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
|
||||||
|
|
||||||
ScanRSClosure scanRScl(oc, worker_i);
|
ScanRSClosure scanRScl(oc, code_root_cl, worker_i);
|
||||||
|
|
||||||
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
||||||
scanRScl.set_try_claimed();
|
scanRScl.set_try_claimed();
|
||||||
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
_g1->collection_set_iterate_from(startRegion, &scanRScl);
|
||||||
|
|
||||||
double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
|
double scan_rs_time_sec = (os::elapsedTime() - rs_time_start)
|
||||||
|
- scanRScl.strong_code_root_scan_time_sec();
|
||||||
|
|
||||||
assert( _cards_scanned != NULL, "invariant" );
|
assert(_cards_scanned != NULL, "invariant");
|
||||||
_cards_scanned[worker_i] = scanRScl.cards_done();
|
_cards_scanned[worker_i] = scanRScl.cards_done();
|
||||||
|
|
||||||
_g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
|
_g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
|
||||||
|
_g1p->phase_times()->record_strong_code_root_scan_time(worker_i,
|
||||||
|
scanRScl.strong_code_root_scan_time_sec() * 1000.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Closure used for updating RSets and recording references that
|
// Closure used for updating RSets and recording references that
|
||||||
@ -288,7 +318,8 @@ void G1RemSet::cleanupHRRS() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
||||||
int worker_i) {
|
CodeBlobToOopClosure* code_root_cl,
|
||||||
|
int worker_i) {
|
||||||
#if CARD_REPEAT_HISTO
|
#if CARD_REPEAT_HISTO
|
||||||
ct_freq_update_histo_and_reset();
|
ct_freq_update_histo_and_reset();
|
||||||
#endif
|
#endif
|
||||||
@ -328,7 +359,7 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
|||||||
_g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
|
_g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
|
||||||
}
|
}
|
||||||
if (G1UseParallelRSetScanning || (worker_i == 0)) {
|
if (G1UseParallelRSetScanning || (worker_i == 0)) {
|
||||||
scanRS(oc, worker_i);
|
scanRS(oc, code_root_cl, worker_i);
|
||||||
} else {
|
} else {
|
||||||
_g1p->phase_times()->record_scan_rs_time(worker_i, 0.0);
|
_g1p->phase_times()->record_scan_rs_time(worker_i, 0.0);
|
||||||
}
|
}
|
||||||
|
@ -81,14 +81,23 @@ public:
|
|||||||
G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
|
G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
|
||||||
~G1RemSet();
|
~G1RemSet();
|
||||||
|
|
||||||
// Invoke "blk->do_oop" on all pointers into the CS in objects in regions
|
// Invoke "blk->do_oop" on all pointers into the collection set
|
||||||
// outside the CS (having invoked "blk->set_region" to set the "from"
|
// from objects in regions outside the collection set (having
|
||||||
// region correctly beforehand.) The "worker_i" param is for the
|
// invoked "blk->set_region" to set the "from" region correctly
|
||||||
// parallel case where the number of the worker thread calling this
|
// beforehand.)
|
||||||
// function can be helpful in partitioning the work to be done. It
|
//
|
||||||
// should be the same as the "i" passed to the calling thread's
|
// Invoke code_root_cl->do_code_blob on the unmarked nmethods
|
||||||
// work(i) function. In the sequential case this param will be ingored.
|
// on the strong code roots list for each region in the
|
||||||
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i);
|
// collection set.
|
||||||
|
//
|
||||||
|
// The "worker_i" param is for the parallel case where the id
|
||||||
|
// of the worker thread calling this function can be helpful in
|
||||||
|
// partitioning the work to be done. It should be the same as
|
||||||
|
// the "i" passed to the calling thread's work(i) function.
|
||||||
|
// In the sequential case this param will be ignored.
|
||||||
|
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
|
||||||
|
CodeBlobToOopClosure* code_root_cl,
|
||||||
|
int worker_i);
|
||||||
|
|
||||||
// Prepare for and cleanup after an oops_into_collection_set_do
|
// Prepare for and cleanup after an oops_into_collection_set_do
|
||||||
// call. Must call each of these once before and after (in sequential
|
// call. Must call each of these once before and after (in sequential
|
||||||
@ -98,7 +107,10 @@ public:
|
|||||||
void prepare_for_oops_into_collection_set_do();
|
void prepare_for_oops_into_collection_set_do();
|
||||||
void cleanup_after_oops_into_collection_set_do();
|
void cleanup_after_oops_into_collection_set_do();
|
||||||
|
|
||||||
void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
|
void scanRS(OopsInHeapRegionClosure* oc,
|
||||||
|
CodeBlobToOopClosure* code_root_cl,
|
||||||
|
int worker_i);
|
||||||
|
|
||||||
void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
|
void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
|
||||||
|
|
||||||
CardTableModRefBS* ct_bs() { return _ct_bs; }
|
CardTableModRefBS* ct_bs() { return _ct_bs; }
|
||||||
|
@ -127,32 +127,55 @@ void G1RemSetSummary::subtract_from(G1RemSetSummary* other) {
|
|||||||
|
|
||||||
class HRRSStatsIter: public HeapRegionClosure {
|
class HRRSStatsIter: public HeapRegionClosure {
|
||||||
size_t _occupied;
|
size_t _occupied;
|
||||||
size_t _total_mem_sz;
|
|
||||||
size_t _max_mem_sz;
|
size_t _total_rs_mem_sz;
|
||||||
HeapRegion* _max_mem_sz_region;
|
size_t _max_rs_mem_sz;
|
||||||
|
HeapRegion* _max_rs_mem_sz_region;
|
||||||
|
|
||||||
|
size_t _total_code_root_mem_sz;
|
||||||
|
size_t _max_code_root_mem_sz;
|
||||||
|
HeapRegion* _max_code_root_mem_sz_region;
|
||||||
public:
|
public:
|
||||||
HRRSStatsIter() :
|
HRRSStatsIter() :
|
||||||
_occupied(0),
|
_occupied(0),
|
||||||
_total_mem_sz(0),
|
_total_rs_mem_sz(0),
|
||||||
_max_mem_sz(0),
|
_max_rs_mem_sz(0),
|
||||||
_max_mem_sz_region(NULL)
|
_max_rs_mem_sz_region(NULL),
|
||||||
|
_total_code_root_mem_sz(0),
|
||||||
|
_max_code_root_mem_sz(0),
|
||||||
|
_max_code_root_mem_sz_region(NULL)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
bool doHeapRegion(HeapRegion* r) {
|
bool doHeapRegion(HeapRegion* r) {
|
||||||
size_t mem_sz = r->rem_set()->mem_size();
|
HeapRegionRemSet* hrrs = r->rem_set();
|
||||||
if (mem_sz > _max_mem_sz) {
|
|
||||||
_max_mem_sz = mem_sz;
|
// HeapRegionRemSet::mem_size() includes the
|
||||||
_max_mem_sz_region = r;
|
// size of the strong code roots
|
||||||
|
size_t rs_mem_sz = hrrs->mem_size();
|
||||||
|
if (rs_mem_sz > _max_rs_mem_sz) {
|
||||||
|
_max_rs_mem_sz = rs_mem_sz;
|
||||||
|
_max_rs_mem_sz_region = r;
|
||||||
}
|
}
|
||||||
_total_mem_sz += mem_sz;
|
_total_rs_mem_sz += rs_mem_sz;
|
||||||
size_t occ = r->rem_set()->occupied();
|
|
||||||
|
size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
|
||||||
|
if (code_root_mem_sz > _max_code_root_mem_sz) {
|
||||||
|
_max_code_root_mem_sz = code_root_mem_sz;
|
||||||
|
_max_code_root_mem_sz_region = r;
|
||||||
|
}
|
||||||
|
_total_code_root_mem_sz += code_root_mem_sz;
|
||||||
|
|
||||||
|
size_t occ = hrrs->occupied();
|
||||||
_occupied += occ;
|
_occupied += occ;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
size_t total_mem_sz() { return _total_mem_sz; }
|
size_t total_rs_mem_sz() { return _total_rs_mem_sz; }
|
||||||
size_t max_mem_sz() { return _max_mem_sz; }
|
size_t max_rs_mem_sz() { return _max_rs_mem_sz; }
|
||||||
|
HeapRegion* max_rs_mem_sz_region() { return _max_rs_mem_sz_region; }
|
||||||
|
size_t total_code_root_mem_sz() { return _total_code_root_mem_sz; }
|
||||||
|
size_t max_code_root_mem_sz() { return _max_code_root_mem_sz; }
|
||||||
|
HeapRegion* max_code_root_mem_sz_region() { return _max_code_root_mem_sz_region; }
|
||||||
size_t occupied() { return _occupied; }
|
size_t occupied() { return _occupied; }
|
||||||
HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
double calc_percentage(size_t numerator, size_t denominator) {
|
double calc_percentage(size_t numerator, size_t denominator) {
|
||||||
@ -184,22 +207,33 @@ void G1RemSetSummary::print_on(outputStream* out) {
|
|||||||
|
|
||||||
HRRSStatsIter blk;
|
HRRSStatsIter blk;
|
||||||
G1CollectedHeap::heap()->heap_region_iterate(&blk);
|
G1CollectedHeap::heap()->heap_region_iterate(&blk);
|
||||||
|
// RemSet stats
|
||||||
out->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K."
|
out->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K."
|
||||||
" Max = "SIZE_FORMAT"K.",
|
" Max = "SIZE_FORMAT"K.",
|
||||||
blk.total_mem_sz()/K, blk.max_mem_sz()/K);
|
blk.total_rs_mem_sz()/K, blk.max_rs_mem_sz()/K);
|
||||||
out->print_cr(" Static structures = "SIZE_FORMAT"K,"
|
out->print_cr(" Static structures = "SIZE_FORMAT"K,"
|
||||||
" free_lists = "SIZE_FORMAT"K.",
|
" free_lists = "SIZE_FORMAT"K.",
|
||||||
HeapRegionRemSet::static_mem_size() / K,
|
HeapRegionRemSet::static_mem_size() / K,
|
||||||
HeapRegionRemSet::fl_mem_size() / K);
|
HeapRegionRemSet::fl_mem_size() / K);
|
||||||
out->print_cr(" "SIZE_FORMAT" occupied cards represented.",
|
out->print_cr(" "SIZE_FORMAT" occupied cards represented.",
|
||||||
blk.occupied());
|
blk.occupied());
|
||||||
HeapRegion* max_mem_sz_region = blk.max_mem_sz_region();
|
HeapRegion* max_rs_mem_sz_region = blk.max_rs_mem_sz_region();
|
||||||
HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set();
|
HeapRegionRemSet* max_rs_rem_set = max_rs_mem_sz_region->rem_set();
|
||||||
out->print_cr(" Max size region = "HR_FORMAT", "
|
out->print_cr(" Max size region = "HR_FORMAT", "
|
||||||
"size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
|
"size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
|
||||||
HR_FORMAT_PARAMS(max_mem_sz_region),
|
HR_FORMAT_PARAMS(max_rs_mem_sz_region),
|
||||||
(rem_set->mem_size() + K - 1)/K,
|
(max_rs_rem_set->mem_size() + K - 1)/K,
|
||||||
(rem_set->occupied() + K - 1)/K);
|
(max_rs_rem_set->occupied() + K - 1)/K);
|
||||||
|
|
||||||
out->print_cr(" Did %d coarsenings.", num_coarsenings());
|
out->print_cr(" Did %d coarsenings.", num_coarsenings());
|
||||||
|
// Strong code root stats
|
||||||
|
out->print_cr(" Total heap region code-root set sizes = "SIZE_FORMAT"K."
|
||||||
|
" Max = "SIZE_FORMAT"K.",
|
||||||
|
blk.total_code_root_mem_sz()/K, blk.max_code_root_mem_sz()/K);
|
||||||
|
HeapRegion* max_code_root_mem_sz_region = blk.max_code_root_mem_sz_region();
|
||||||
|
HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region->rem_set();
|
||||||
|
out->print_cr(" Max size region = "HR_FORMAT", "
|
||||||
|
"size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
|
||||||
|
HR_FORMAT_PARAMS(max_code_root_mem_sz_region),
|
||||||
|
(max_code_root_rem_set->strong_code_roots_mem_size() + K - 1)/K,
|
||||||
|
(max_code_root_rem_set->strong_code_roots_list_length()));
|
||||||
}
|
}
|
||||||
|
@ -319,7 +319,10 @@
|
|||||||
\
|
\
|
||||||
diagnostic(bool, G1VerifyRSetsDuringFullGC, false, \
|
diagnostic(bool, G1VerifyRSetsDuringFullGC, false, \
|
||||||
"If true, perform verification of each heap region's " \
|
"If true, perform verification of each heap region's " \
|
||||||
"remembered set when verifying the heap during a full GC.")
|
"remembered set when verifying the heap during a full GC.") \
|
||||||
|
\
|
||||||
|
diagnostic(bool, G1VerifyHeapRegionCodeRoots, false, \
|
||||||
|
"Verify the code root lists attached to each heap region.")
|
||||||
|
|
||||||
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
|
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
#include "code/nmethod.hpp"
|
||||||
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
|
||||||
@ -50,144 +51,6 @@ FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
|
|||||||
OopClosure* oc) :
|
OopClosure* oc) :
|
||||||
_r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
|
_r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
|
||||||
|
|
||||||
class VerifyLiveClosure: public OopClosure {
|
|
||||||
private:
|
|
||||||
G1CollectedHeap* _g1h;
|
|
||||||
CardTableModRefBS* _bs;
|
|
||||||
oop _containing_obj;
|
|
||||||
bool _failures;
|
|
||||||
int _n_failures;
|
|
||||||
VerifyOption _vo;
|
|
||||||
public:
|
|
||||||
// _vo == UsePrevMarking -> use "prev" marking information,
|
|
||||||
// _vo == UseNextMarking -> use "next" marking information,
|
|
||||||
// _vo == UseMarkWord -> use mark word from object header.
|
|
||||||
VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
|
|
||||||
_g1h(g1h), _bs(NULL), _containing_obj(NULL),
|
|
||||||
_failures(false), _n_failures(0), _vo(vo)
|
|
||||||
{
|
|
||||||
BarrierSet* bs = _g1h->barrier_set();
|
|
||||||
if (bs->is_a(BarrierSet::CardTableModRef))
|
|
||||||
_bs = (CardTableModRefBS*)bs;
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_containing_obj(oop obj) {
|
|
||||||
_containing_obj = obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool failures() { return _failures; }
|
|
||||||
int n_failures() { return _n_failures; }
|
|
||||||
|
|
||||||
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
|
||||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
|
||||||
|
|
||||||
void print_object(outputStream* out, oop obj) {
|
|
||||||
#ifdef PRODUCT
|
|
||||||
Klass* k = obj->klass();
|
|
||||||
const char* class_name = InstanceKlass::cast(k)->external_name();
|
|
||||||
out->print_cr("class name %s", class_name);
|
|
||||||
#else // PRODUCT
|
|
||||||
obj->print_on(out);
|
|
||||||
#endif // PRODUCT
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class T>
|
|
||||||
void do_oop_work(T* p) {
|
|
||||||
assert(_containing_obj != NULL, "Precondition");
|
|
||||||
assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
|
|
||||||
"Precondition");
|
|
||||||
T heap_oop = oopDesc::load_heap_oop(p);
|
|
||||||
if (!oopDesc::is_null(heap_oop)) {
|
|
||||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
|
||||||
bool failed = false;
|
|
||||||
if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
|
|
||||||
MutexLockerEx x(ParGCRareEvent_lock,
|
|
||||||
Mutex::_no_safepoint_check_flag);
|
|
||||||
|
|
||||||
if (!_failures) {
|
|
||||||
gclog_or_tty->print_cr("");
|
|
||||||
gclog_or_tty->print_cr("----------");
|
|
||||||
}
|
|
||||||
if (!_g1h->is_in_closed_subset(obj)) {
|
|
||||||
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
|
||||||
gclog_or_tty->print_cr("Field "PTR_FORMAT
|
|
||||||
" of live obj "PTR_FORMAT" in region "
|
|
||||||
"["PTR_FORMAT", "PTR_FORMAT")",
|
|
||||||
p, (void*) _containing_obj,
|
|
||||||
from->bottom(), from->end());
|
|
||||||
print_object(gclog_or_tty, _containing_obj);
|
|
||||||
gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
|
|
||||||
(void*) obj);
|
|
||||||
} else {
|
|
||||||
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
|
||||||
HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
|
|
||||||
gclog_or_tty->print_cr("Field "PTR_FORMAT
|
|
||||||
" of live obj "PTR_FORMAT" in region "
|
|
||||||
"["PTR_FORMAT", "PTR_FORMAT")",
|
|
||||||
p, (void*) _containing_obj,
|
|
||||||
from->bottom(), from->end());
|
|
||||||
print_object(gclog_or_tty, _containing_obj);
|
|
||||||
gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
|
|
||||||
"["PTR_FORMAT", "PTR_FORMAT")",
|
|
||||||
(void*) obj, to->bottom(), to->end());
|
|
||||||
print_object(gclog_or_tty, obj);
|
|
||||||
}
|
|
||||||
gclog_or_tty->print_cr("----------");
|
|
||||||
gclog_or_tty->flush();
|
|
||||||
_failures = true;
|
|
||||||
failed = true;
|
|
||||||
_n_failures++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
|
|
||||||
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
|
||||||
HeapRegion* to = _g1h->heap_region_containing(obj);
|
|
||||||
if (from != NULL && to != NULL &&
|
|
||||||
from != to &&
|
|
||||||
!to->isHumongous()) {
|
|
||||||
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
|
|
||||||
jbyte cv_field = *_bs->byte_for_const(p);
|
|
||||||
const jbyte dirty = CardTableModRefBS::dirty_card_val();
|
|
||||||
|
|
||||||
bool is_bad = !(from->is_young()
|
|
||||||
|| to->rem_set()->contains_reference(p)
|
|
||||||
|| !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
|
|
||||||
(_containing_obj->is_objArray() ?
|
|
||||||
cv_field == dirty
|
|
||||||
: cv_obj == dirty || cv_field == dirty));
|
|
||||||
if (is_bad) {
|
|
||||||
MutexLockerEx x(ParGCRareEvent_lock,
|
|
||||||
Mutex::_no_safepoint_check_flag);
|
|
||||||
|
|
||||||
if (!_failures) {
|
|
||||||
gclog_or_tty->print_cr("");
|
|
||||||
gclog_or_tty->print_cr("----------");
|
|
||||||
}
|
|
||||||
gclog_or_tty->print_cr("Missing rem set entry:");
|
|
||||||
gclog_or_tty->print_cr("Field "PTR_FORMAT" "
|
|
||||||
"of obj "PTR_FORMAT", "
|
|
||||||
"in region "HR_FORMAT,
|
|
||||||
p, (void*) _containing_obj,
|
|
||||||
HR_FORMAT_PARAMS(from));
|
|
||||||
_containing_obj->print_on(gclog_or_tty);
|
|
||||||
gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
|
|
||||||
"in region "HR_FORMAT,
|
|
||||||
(void*) obj,
|
|
||||||
HR_FORMAT_PARAMS(to));
|
|
||||||
obj->print_on(gclog_or_tty);
|
|
||||||
gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
|
|
||||||
cv_obj, cv_field);
|
|
||||||
gclog_or_tty->print_cr("----------");
|
|
||||||
gclog_or_tty->flush();
|
|
||||||
_failures = true;
|
|
||||||
if (!failed) _n_failures++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template<class ClosureType>
|
template<class ClosureType>
|
||||||
HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
|
HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
|
||||||
HeapRegion* hr,
|
HeapRegion* hr,
|
||||||
@ -368,7 +231,7 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
|
|||||||
if (!par) {
|
if (!par) {
|
||||||
// If this is parallel, this will be done later.
|
// If this is parallel, this will be done later.
|
||||||
HeapRegionRemSet* hrrs = rem_set();
|
HeapRegionRemSet* hrrs = rem_set();
|
||||||
if (hrrs != NULL) hrrs->clear();
|
hrrs->clear();
|
||||||
_claimed = InitialClaimValue;
|
_claimed = InitialClaimValue;
|
||||||
}
|
}
|
||||||
zero_marked_bytes();
|
zero_marked_bytes();
|
||||||
@ -505,6 +368,7 @@ HeapRegion::HeapRegion(uint hrs_index,
|
|||||||
_rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
|
_rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
|
||||||
_predicted_bytes_to_copy(0)
|
_predicted_bytes_to_copy(0)
|
||||||
{
|
{
|
||||||
|
_rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
|
||||||
_orig_end = mr.end();
|
_orig_end = mr.end();
|
||||||
// Note that initialize() will set the start of the unmarked area of the
|
// Note that initialize() will set the start of the unmarked area of the
|
||||||
// region.
|
// region.
|
||||||
@ -512,8 +376,6 @@ HeapRegion::HeapRegion(uint hrs_index,
|
|||||||
set_top(bottom());
|
set_top(bottom());
|
||||||
set_saved_mark();
|
set_saved_mark();
|
||||||
|
|
||||||
_rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
|
|
||||||
|
|
||||||
assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
|
assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -733,6 +595,160 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Code roots support
|
||||||
|
|
||||||
|
void HeapRegion::add_strong_code_root(nmethod* nm) {
|
||||||
|
HeapRegionRemSet* hrrs = rem_set();
|
||||||
|
hrrs->add_strong_code_root(nm);
|
||||||
|
}
|
||||||
|
|
||||||
|
void HeapRegion::remove_strong_code_root(nmethod* nm) {
|
||||||
|
HeapRegionRemSet* hrrs = rem_set();
|
||||||
|
hrrs->remove_strong_code_root(nm);
|
||||||
|
}
|
||||||
|
|
||||||
|
void HeapRegion::migrate_strong_code_roots() {
|
||||||
|
assert(in_collection_set(), "only collection set regions");
|
||||||
|
assert(!isHumongous(), "not humongous regions");
|
||||||
|
|
||||||
|
HeapRegionRemSet* hrrs = rem_set();
|
||||||
|
hrrs->migrate_strong_code_roots();
|
||||||
|
}
|
||||||
|
|
||||||
|
void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
|
||||||
|
HeapRegionRemSet* hrrs = rem_set();
|
||||||
|
hrrs->strong_code_roots_do(blk);
|
||||||
|
}
|
||||||
|
|
||||||
|
class VerifyStrongCodeRootOopClosure: public OopClosure {
|
||||||
|
const HeapRegion* _hr;
|
||||||
|
nmethod* _nm;
|
||||||
|
bool _failures;
|
||||||
|
bool _has_oops_in_region;
|
||||||
|
|
||||||
|
template <class T> void do_oop_work(T* p) {
|
||||||
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
|
||||||
|
// Note: not all the oops embedded in the nmethod are in the
|
||||||
|
// current region. We only look at those which are.
|
||||||
|
if (_hr->is_in(obj)) {
|
||||||
|
// Object is in the region. Check that its less than top
|
||||||
|
if (_hr->top() <= (HeapWord*)obj) {
|
||||||
|
// Object is above top
|
||||||
|
gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
|
||||||
|
"["PTR_FORMAT", "PTR_FORMAT") is above "
|
||||||
|
"top "PTR_FORMAT,
|
||||||
|
obj, _hr->bottom(), _hr->end(), _hr->top());
|
||||||
|
_failures = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Nmethod has at least one oop in the current region
|
||||||
|
_has_oops_in_region = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
|
||||||
|
_hr(hr), _failures(false), _has_oops_in_region(false) {}
|
||||||
|
|
||||||
|
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
|
void do_oop(oop* p) { do_oop_work(p); }
|
||||||
|
|
||||||
|
bool failures() { return _failures; }
|
||||||
|
bool has_oops_in_region() { return _has_oops_in_region; }
|
||||||
|
};
|
||||||
|
|
||||||
|
class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
|
||||||
|
const HeapRegion* _hr;
|
||||||
|
bool _failures;
|
||||||
|
public:
|
||||||
|
VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
|
||||||
|
_hr(hr), _failures(false) {}
|
||||||
|
|
||||||
|
void do_code_blob(CodeBlob* cb) {
|
||||||
|
nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
|
||||||
|
if (nm != NULL) {
|
||||||
|
// Verify that the nemthod is live
|
||||||
|
if (!nm->is_alive()) {
|
||||||
|
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod "
|
||||||
|
PTR_FORMAT" in its strong code roots",
|
||||||
|
_hr->bottom(), _hr->end(), nm);
|
||||||
|
_failures = true;
|
||||||
|
} else {
|
||||||
|
VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
|
||||||
|
nm->oops_do(&oop_cl);
|
||||||
|
if (!oop_cl.has_oops_in_region()) {
|
||||||
|
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod "
|
||||||
|
PTR_FORMAT" in its strong code roots "
|
||||||
|
"with no pointers into region",
|
||||||
|
_hr->bottom(), _hr->end(), nm);
|
||||||
|
_failures = true;
|
||||||
|
} else if (oop_cl.failures()) {
|
||||||
|
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other "
|
||||||
|
"failures for nmethod "PTR_FORMAT,
|
||||||
|
_hr->bottom(), _hr->end(), nm);
|
||||||
|
_failures = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool failures() { return _failures; }
|
||||||
|
};
|
||||||
|
|
||||||
|
void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
|
||||||
|
if (!G1VerifyHeapRegionCodeRoots) {
|
||||||
|
// We're not verifying code roots.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (vo == VerifyOption_G1UseMarkWord) {
|
||||||
|
// Marking verification during a full GC is performed after class
|
||||||
|
// unloading, code cache unloading, etc so the strong code roots
|
||||||
|
// attached to each heap region are in an inconsistent state. They won't
|
||||||
|
// be consistent until the strong code roots are rebuilt after the
|
||||||
|
// actual GC. Skip verifying the strong code roots in this particular
|
||||||
|
// time.
|
||||||
|
assert(VerifyDuringGC, "only way to get here");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapRegionRemSet* hrrs = rem_set();
|
||||||
|
int strong_code_roots_length = hrrs->strong_code_roots_list_length();
|
||||||
|
|
||||||
|
// if this region is empty then there should be no entries
|
||||||
|
// on its strong code root list
|
||||||
|
if (is_empty()) {
|
||||||
|
if (strong_code_roots_length > 0) {
|
||||||
|
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty "
|
||||||
|
"but has "INT32_FORMAT" code root entries",
|
||||||
|
bottom(), end(), strong_code_roots_length);
|
||||||
|
*failures = true;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// An H-region should have an empty strong code root list
|
||||||
|
if (isHumongous()) {
|
||||||
|
if (strong_code_roots_length > 0) {
|
||||||
|
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
|
||||||
|
"but has "INT32_FORMAT" code root entries",
|
||||||
|
bottom(), end(), strong_code_roots_length);
|
||||||
|
*failures = true;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
|
||||||
|
strong_code_roots_do(&cb_cl);
|
||||||
|
|
||||||
|
if (cb_cl.failures()) {
|
||||||
|
*failures = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void HeapRegion::print() const { print_on(gclog_or_tty); }
|
void HeapRegion::print() const { print_on(gclog_or_tty); }
|
||||||
void HeapRegion::print_on(outputStream* st) const {
|
void HeapRegion::print_on(outputStream* st) const {
|
||||||
if (isHumongous()) {
|
if (isHumongous()) {
|
||||||
@ -761,10 +777,143 @@ void HeapRegion::print_on(outputStream* st) const {
|
|||||||
G1OffsetTableContigSpace::print_on(st);
|
G1OffsetTableContigSpace::print_on(st);
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegion::verify() const {
|
class VerifyLiveClosure: public OopClosure {
|
||||||
bool dummy = false;
|
private:
|
||||||
verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
|
G1CollectedHeap* _g1h;
|
||||||
}
|
CardTableModRefBS* _bs;
|
||||||
|
oop _containing_obj;
|
||||||
|
bool _failures;
|
||||||
|
int _n_failures;
|
||||||
|
VerifyOption _vo;
|
||||||
|
public:
|
||||||
|
// _vo == UsePrevMarking -> use "prev" marking information,
|
||||||
|
// _vo == UseNextMarking -> use "next" marking information,
|
||||||
|
// _vo == UseMarkWord -> use mark word from object header.
|
||||||
|
VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
|
||||||
|
_g1h(g1h), _bs(NULL), _containing_obj(NULL),
|
||||||
|
_failures(false), _n_failures(0), _vo(vo)
|
||||||
|
{
|
||||||
|
BarrierSet* bs = _g1h->barrier_set();
|
||||||
|
if (bs->is_a(BarrierSet::CardTableModRef))
|
||||||
|
_bs = (CardTableModRefBS*)bs;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_containing_obj(oop obj) {
|
||||||
|
_containing_obj = obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool failures() { return _failures; }
|
||||||
|
int n_failures() { return _n_failures; }
|
||||||
|
|
||||||
|
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
|
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||||
|
|
||||||
|
void print_object(outputStream* out, oop obj) {
|
||||||
|
#ifdef PRODUCT
|
||||||
|
Klass* k = obj->klass();
|
||||||
|
const char* class_name = InstanceKlass::cast(k)->external_name();
|
||||||
|
out->print_cr("class name %s", class_name);
|
||||||
|
#else // PRODUCT
|
||||||
|
obj->print_on(out);
|
||||||
|
#endif // PRODUCT
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
void do_oop_work(T* p) {
|
||||||
|
assert(_containing_obj != NULL, "Precondition");
|
||||||
|
assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
|
||||||
|
"Precondition");
|
||||||
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
bool failed = false;
|
||||||
|
if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
|
||||||
|
MutexLockerEx x(ParGCRareEvent_lock,
|
||||||
|
Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
|
if (!_failures) {
|
||||||
|
gclog_or_tty->print_cr("");
|
||||||
|
gclog_or_tty->print_cr("----------");
|
||||||
|
}
|
||||||
|
if (!_g1h->is_in_closed_subset(obj)) {
|
||||||
|
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
||||||
|
gclog_or_tty->print_cr("Field "PTR_FORMAT
|
||||||
|
" of live obj "PTR_FORMAT" in region "
|
||||||
|
"["PTR_FORMAT", "PTR_FORMAT")",
|
||||||
|
p, (void*) _containing_obj,
|
||||||
|
from->bottom(), from->end());
|
||||||
|
print_object(gclog_or_tty, _containing_obj);
|
||||||
|
gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
|
||||||
|
(void*) obj);
|
||||||
|
} else {
|
||||||
|
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
||||||
|
HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
|
||||||
|
gclog_or_tty->print_cr("Field "PTR_FORMAT
|
||||||
|
" of live obj "PTR_FORMAT" in region "
|
||||||
|
"["PTR_FORMAT", "PTR_FORMAT")",
|
||||||
|
p, (void*) _containing_obj,
|
||||||
|
from->bottom(), from->end());
|
||||||
|
print_object(gclog_or_tty, _containing_obj);
|
||||||
|
gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
|
||||||
|
"["PTR_FORMAT", "PTR_FORMAT")",
|
||||||
|
(void*) obj, to->bottom(), to->end());
|
||||||
|
print_object(gclog_or_tty, obj);
|
||||||
|
}
|
||||||
|
gclog_or_tty->print_cr("----------");
|
||||||
|
gclog_or_tty->flush();
|
||||||
|
_failures = true;
|
||||||
|
failed = true;
|
||||||
|
_n_failures++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
|
||||||
|
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
||||||
|
HeapRegion* to = _g1h->heap_region_containing(obj);
|
||||||
|
if (from != NULL && to != NULL &&
|
||||||
|
from != to &&
|
||||||
|
!to->isHumongous()) {
|
||||||
|
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
|
||||||
|
jbyte cv_field = *_bs->byte_for_const(p);
|
||||||
|
const jbyte dirty = CardTableModRefBS::dirty_card_val();
|
||||||
|
|
||||||
|
bool is_bad = !(from->is_young()
|
||||||
|
|| to->rem_set()->contains_reference(p)
|
||||||
|
|| !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
|
||||||
|
(_containing_obj->is_objArray() ?
|
||||||
|
cv_field == dirty
|
||||||
|
: cv_obj == dirty || cv_field == dirty));
|
||||||
|
if (is_bad) {
|
||||||
|
MutexLockerEx x(ParGCRareEvent_lock,
|
||||||
|
Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
|
if (!_failures) {
|
||||||
|
gclog_or_tty->print_cr("");
|
||||||
|
gclog_or_tty->print_cr("----------");
|
||||||
|
}
|
||||||
|
gclog_or_tty->print_cr("Missing rem set entry:");
|
||||||
|
gclog_or_tty->print_cr("Field "PTR_FORMAT" "
|
||||||
|
"of obj "PTR_FORMAT", "
|
||||||
|
"in region "HR_FORMAT,
|
||||||
|
p, (void*) _containing_obj,
|
||||||
|
HR_FORMAT_PARAMS(from));
|
||||||
|
_containing_obj->print_on(gclog_or_tty);
|
||||||
|
gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
|
||||||
|
"in region "HR_FORMAT,
|
||||||
|
(void*) obj,
|
||||||
|
HR_FORMAT_PARAMS(to));
|
||||||
|
obj->print_on(gclog_or_tty);
|
||||||
|
gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
|
||||||
|
cv_obj, cv_field);
|
||||||
|
gclog_or_tty->print_cr("----------");
|
||||||
|
gclog_or_tty->flush();
|
||||||
|
_failures = true;
|
||||||
|
if (!failed) _n_failures++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// This really ought to be commoned up into OffsetTableContigSpace somehow.
|
// This really ought to be commoned up into OffsetTableContigSpace somehow.
|
||||||
// We would need a mechanism to make that code skip dead objects.
|
// We would need a mechanism to make that code skip dead objects.
|
||||||
@ -904,6 +1053,13 @@ void HeapRegion::verify(VerifyOption vo,
|
|||||||
*failures = true;
|
*failures = true;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
verify_strong_code_roots(vo, failures);
|
||||||
|
}
|
||||||
|
|
||||||
|
void HeapRegion::verify() const {
|
||||||
|
bool dummy = false;
|
||||||
|
verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
|
// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
|
||||||
|
@ -52,6 +52,7 @@ class HeapRegionRemSet;
|
|||||||
class HeapRegionRemSetIterator;
|
class HeapRegionRemSetIterator;
|
||||||
class HeapRegion;
|
class HeapRegion;
|
||||||
class HeapRegionSetBase;
|
class HeapRegionSetBase;
|
||||||
|
class nmethod;
|
||||||
|
|
||||||
#define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
|
#define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
|
||||||
#define HR_FORMAT_PARAMS(_hr_) \
|
#define HR_FORMAT_PARAMS(_hr_) \
|
||||||
@ -371,7 +372,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
RebuildRSClaimValue = 5,
|
RebuildRSClaimValue = 5,
|
||||||
ParEvacFailureClaimValue = 6,
|
ParEvacFailureClaimValue = 6,
|
||||||
AggregateCountClaimValue = 7,
|
AggregateCountClaimValue = 7,
|
||||||
VerifyCountClaimValue = 8
|
VerifyCountClaimValue = 8,
|
||||||
|
ParMarkRootClaimValue = 9
|
||||||
};
|
};
|
||||||
|
|
||||||
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
|
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
|
||||||
@ -796,6 +798,25 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
|||||||
|
|
||||||
virtual void reset_after_compaction();
|
virtual void reset_after_compaction();
|
||||||
|
|
||||||
|
// Routines for managing a list of code roots (attached to the
|
||||||
|
// this region's RSet) that point into this heap region.
|
||||||
|
void add_strong_code_root(nmethod* nm);
|
||||||
|
void remove_strong_code_root(nmethod* nm);
|
||||||
|
|
||||||
|
// During a collection, migrate the successfully evacuated
|
||||||
|
// strong code roots that referenced into this region to the
|
||||||
|
// new regions that they now point into. Unsuccessfully
|
||||||
|
// evacuated code roots are not migrated.
|
||||||
|
void migrate_strong_code_roots();
|
||||||
|
|
||||||
|
// Applies blk->do_code_blob() to each of the entries in
|
||||||
|
// the strong code roots list for this region
|
||||||
|
void strong_code_roots_do(CodeBlobClosure* blk) const;
|
||||||
|
|
||||||
|
// Verify that the entries on the strong code root list for this
|
||||||
|
// region are live and include at least one pointer into this region.
|
||||||
|
void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
|
||||||
|
|
||||||
void print() const;
|
void print() const;
|
||||||
void print_on(outputStream* st) const;
|
void print_on(outputStream* st) const;
|
||||||
|
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "utilities/bitMap.inline.hpp"
|
#include "utilities/bitMap.inline.hpp"
|
||||||
#include "utilities/globalDefinitions.hpp"
|
#include "utilities/globalDefinitions.hpp"
|
||||||
|
#include "utilities/growableArray.hpp"
|
||||||
|
|
||||||
class PerRegionTable: public CHeapObj<mtGC> {
|
class PerRegionTable: public CHeapObj<mtGC> {
|
||||||
friend class OtherRegionsTable;
|
friend class OtherRegionsTable;
|
||||||
@ -849,7 +850,7 @@ int HeapRegionRemSet::num_par_rem_sets() {
|
|||||||
|
|
||||||
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
|
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
|
||||||
HeapRegion* hr)
|
HeapRegion* hr)
|
||||||
: _bosa(bosa), _other_regions(hr) {
|
: _bosa(bosa), _strong_code_roots_list(NULL), _other_regions(hr) {
|
||||||
reset_for_par_iteration();
|
reset_for_par_iteration();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -908,6 +909,12 @@ void HeapRegionRemSet::cleanup() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void HeapRegionRemSet::clear() {
|
void HeapRegionRemSet::clear() {
|
||||||
|
if (_strong_code_roots_list != NULL) {
|
||||||
|
delete _strong_code_roots_list;
|
||||||
|
}
|
||||||
|
_strong_code_roots_list = new (ResourceObj::C_HEAP, mtGC)
|
||||||
|
GrowableArray<nmethod*>(10, 0, NULL, true);
|
||||||
|
|
||||||
_other_regions.clear();
|
_other_regions.clear();
|
||||||
assert(occupied() == 0, "Should be clear.");
|
assert(occupied() == 0, "Should be clear.");
|
||||||
reset_for_par_iteration();
|
reset_for_par_iteration();
|
||||||
@ -925,6 +932,121 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
|
|||||||
_other_regions.scrub(ctbs, region_bm, card_bm);
|
_other_regions.scrub(ctbs, region_bm, card_bm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Code roots support
|
||||||
|
|
||||||
|
void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
|
||||||
|
assert(nm != NULL, "sanity");
|
||||||
|
// Search for the code blob from the RHS to avoid
|
||||||
|
// duplicate entries as much as possible
|
||||||
|
if (_strong_code_roots_list->find_from_end(nm) < 0) {
|
||||||
|
// Code blob isn't already in the list
|
||||||
|
_strong_code_roots_list->push(nm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
|
||||||
|
assert(nm != NULL, "sanity");
|
||||||
|
int idx = _strong_code_roots_list->find(nm);
|
||||||
|
if (idx >= 0) {
|
||||||
|
_strong_code_roots_list->remove_at(idx);
|
||||||
|
}
|
||||||
|
// Check that there were no duplicates
|
||||||
|
guarantee(_strong_code_roots_list->find(nm) < 0, "duplicate entry found");
|
||||||
|
}
|
||||||
|
|
||||||
|
class NMethodMigrationOopClosure : public OopClosure {
|
||||||
|
G1CollectedHeap* _g1h;
|
||||||
|
HeapRegion* _from;
|
||||||
|
nmethod* _nm;
|
||||||
|
|
||||||
|
uint _num_self_forwarded;
|
||||||
|
|
||||||
|
template <class T> void do_oop_work(T* p) {
|
||||||
|
T heap_oop = oopDesc::load_heap_oop(p);
|
||||||
|
if (!oopDesc::is_null(heap_oop)) {
|
||||||
|
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||||
|
if (_from->is_in(obj)) {
|
||||||
|
// Reference still points into the source region.
|
||||||
|
// Since roots are immediately evacuated this means that
|
||||||
|
// we must have self forwarded the object
|
||||||
|
assert(obj->is_forwarded(),
|
||||||
|
err_msg("code roots should be immediately evacuated. "
|
||||||
|
"Ref: "PTR_FORMAT", "
|
||||||
|
"Obj: "PTR_FORMAT", "
|
||||||
|
"Region: "HR_FORMAT,
|
||||||
|
p, (void*) obj, HR_FORMAT_PARAMS(_from)));
|
||||||
|
assert(obj->forwardee() == obj,
|
||||||
|
err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
|
||||||
|
|
||||||
|
// The object has been self forwarded.
|
||||||
|
// Note, if we're during an initial mark pause, there is
|
||||||
|
// no need to explicitly mark object. It will be marked
|
||||||
|
// during the regular evacuation failure handling code.
|
||||||
|
_num_self_forwarded++;
|
||||||
|
} else {
|
||||||
|
// The reference points into a promotion or to-space region
|
||||||
|
HeapRegion* to = _g1h->heap_region_containing(obj);
|
||||||
|
to->rem_set()->add_strong_code_root(_nm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
|
||||||
|
_g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
|
||||||
|
|
||||||
|
void do_oop(narrowOop* p) { do_oop_work(p); }
|
||||||
|
void do_oop(oop* p) { do_oop_work(p); }
|
||||||
|
|
||||||
|
uint retain() { return _num_self_forwarded > 0; }
|
||||||
|
};
|
||||||
|
|
||||||
|
void HeapRegionRemSet::migrate_strong_code_roots() {
|
||||||
|
assert(hr()->in_collection_set(), "only collection set regions");
|
||||||
|
assert(!hr()->isHumongous(), "not humongous regions");
|
||||||
|
|
||||||
|
ResourceMark rm;
|
||||||
|
|
||||||
|
// List of code blobs to retain for this region
|
||||||
|
GrowableArray<nmethod*> to_be_retained(10);
|
||||||
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
|
|
||||||
|
while (_strong_code_roots_list->is_nonempty()) {
|
||||||
|
nmethod *nm = _strong_code_roots_list->pop();
|
||||||
|
if (nm != NULL) {
|
||||||
|
NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
|
||||||
|
nm->oops_do(&oop_cl);
|
||||||
|
if (oop_cl.retain()) {
|
||||||
|
to_be_retained.push(nm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now push any code roots we need to retain
|
||||||
|
assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
|
||||||
|
"Retained nmethod list must be empty or "
|
||||||
|
"evacuation of this region failed");
|
||||||
|
|
||||||
|
while (to_be_retained.is_nonempty()) {
|
||||||
|
nmethod* nm = to_be_retained.pop();
|
||||||
|
assert(nm != NULL, "sanity");
|
||||||
|
add_strong_code_root(nm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
|
||||||
|
for (int i = 0; i < _strong_code_roots_list->length(); i += 1) {
|
||||||
|
nmethod* nm = _strong_code_roots_list->at(i);
|
||||||
|
blk->do_code_blob(nm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t HeapRegionRemSet::strong_code_roots_mem_size() {
|
||||||
|
return sizeof(GrowableArray<nmethod*>) +
|
||||||
|
_strong_code_roots_list->max_length() * sizeof(nmethod*);
|
||||||
|
}
|
||||||
|
|
||||||
//-------------------- Iteration --------------------
|
//-------------------- Iteration --------------------
|
||||||
|
|
||||||
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
|
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
|
||||||
|
@ -37,6 +37,7 @@ class HeapRegion;
|
|||||||
class HeapRegionRemSetIterator;
|
class HeapRegionRemSetIterator;
|
||||||
class PerRegionTable;
|
class PerRegionTable;
|
||||||
class SparsePRT;
|
class SparsePRT;
|
||||||
|
class nmethod;
|
||||||
|
|
||||||
// Essentially a wrapper around SparsePRTCleanupTask. See
|
// Essentially a wrapper around SparsePRTCleanupTask. See
|
||||||
// sparsePRT.hpp for more details.
|
// sparsePRT.hpp for more details.
|
||||||
@ -191,6 +192,10 @@ private:
|
|||||||
G1BlockOffsetSharedArray* _bosa;
|
G1BlockOffsetSharedArray* _bosa;
|
||||||
G1BlockOffsetSharedArray* bosa() const { return _bosa; }
|
G1BlockOffsetSharedArray* bosa() const { return _bosa; }
|
||||||
|
|
||||||
|
// A list of code blobs (nmethods) whose code contains pointers into
|
||||||
|
// the region that owns this RSet.
|
||||||
|
GrowableArray<nmethod*>* _strong_code_roots_list;
|
||||||
|
|
||||||
OtherRegionsTable _other_regions;
|
OtherRegionsTable _other_regions;
|
||||||
|
|
||||||
enum ParIterState { Unclaimed, Claimed, Complete };
|
enum ParIterState { Unclaimed, Claimed, Complete };
|
||||||
@ -282,11 +287,13 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The actual # of bytes this hr_remset takes up.
|
// The actual # of bytes this hr_remset takes up.
|
||||||
|
// Note also includes the strong code root set.
|
||||||
size_t mem_size() {
|
size_t mem_size() {
|
||||||
return _other_regions.mem_size()
|
return _other_regions.mem_size()
|
||||||
// This correction is necessary because the above includes the second
|
// This correction is necessary because the above includes the second
|
||||||
// part.
|
// part.
|
||||||
+ sizeof(this) - sizeof(OtherRegionsTable);
|
+ (sizeof(this) - sizeof(OtherRegionsTable))
|
||||||
|
+ strong_code_roots_mem_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the memory occupancy of all static data structures associated
|
// Returns the memory occupancy of all static data structures associated
|
||||||
@ -304,6 +311,37 @@ public:
|
|||||||
bool contains_reference(OopOrNarrowOopStar from) const {
|
bool contains_reference(OopOrNarrowOopStar from) const {
|
||||||
return _other_regions.contains_reference(from);
|
return _other_regions.contains_reference(from);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Routines for managing the list of code roots that point into
|
||||||
|
// the heap region that owns this RSet.
|
||||||
|
void add_strong_code_root(nmethod* nm);
|
||||||
|
void remove_strong_code_root(nmethod* nm);
|
||||||
|
|
||||||
|
// During a collection, migrate the successfully evacuated strong
|
||||||
|
// code roots that referenced into the region that owns this RSet
|
||||||
|
// to the RSets of the new regions that they now point into.
|
||||||
|
// Unsuccessfully evacuated code roots are not migrated.
|
||||||
|
void migrate_strong_code_roots();
|
||||||
|
|
||||||
|
// Applies blk->do_code_blob() to each of the entries in
|
||||||
|
// the strong code roots list
|
||||||
|
void strong_code_roots_do(CodeBlobClosure* blk) const;
|
||||||
|
|
||||||
|
// Returns the number of elements in the strong code roots list
|
||||||
|
int strong_code_roots_list_length() {
|
||||||
|
return _strong_code_roots_list->length();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the strong code roots contains the given
|
||||||
|
// nmethod.
|
||||||
|
bool strong_code_roots_list_contains(nmethod* nm) {
|
||||||
|
return _strong_code_roots_list->contains(nm);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the amount of memory, in bytes, currently
|
||||||
|
// consumed by the strong code roots.
|
||||||
|
size_t strong_code_roots_mem_size();
|
||||||
|
|
||||||
void print() const;
|
void print() const;
|
||||||
|
|
||||||
// Called during a stop-world phase to perform any deferred cleanups.
|
// Called during a stop-world phase to perform any deferred cleanups.
|
||||||
|
@ -70,9 +70,6 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
|
|||||||
guarantee(target_pause_time_ms > 0.0,
|
guarantee(target_pause_time_ms > 0.0,
|
||||||
err_msg("target_pause_time_ms = %1.6lf should be positive",
|
err_msg("target_pause_time_ms = %1.6lf should be positive",
|
||||||
target_pause_time_ms));
|
target_pause_time_ms));
|
||||||
guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
|
|
||||||
"we can only request an allocation if the GC cause is for "
|
|
||||||
"an incremental GC pause");
|
|
||||||
_gc_cause = gc_cause;
|
_gc_cause = gc_cause;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -216,6 +216,7 @@ void ParallelScavengeHeap::update_counters() {
|
|||||||
young_gen()->update_counters();
|
young_gen()->update_counters();
|
||||||
old_gen()->update_counters();
|
old_gen()->update_counters();
|
||||||
MetaspaceCounters::update_performance_counters();
|
MetaspaceCounters::update_performance_counters();
|
||||||
|
CompressedClassSpaceCounters::update_performance_counters();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ParallelScavengeHeap::capacity() const {
|
size_t ParallelScavengeHeap::capacity() const {
|
||||||
|
@ -118,6 +118,14 @@ void CollectedHeap::print_heap_after_gc() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CollectedHeap::register_nmethod(nmethod* nm) {
|
||||||
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CollectedHeap::unregister_nmethod(nmethod* nm) {
|
||||||
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
|
}
|
||||||
|
|
||||||
void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
|
void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
|
||||||
const GCHeapSummary& heap_summary = create_heap_summary();
|
const GCHeapSummary& heap_summary = create_heap_summary();
|
||||||
const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
|
const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
|
||||||
|
@ -49,6 +49,7 @@ class MetaspaceSummary;
|
|||||||
class Thread;
|
class Thread;
|
||||||
class ThreadClosure;
|
class ThreadClosure;
|
||||||
class VirtualSpaceSummary;
|
class VirtualSpaceSummary;
|
||||||
|
class nmethod;
|
||||||
|
|
||||||
class GCMessage : public FormatBuffer<1024> {
|
class GCMessage : public FormatBuffer<1024> {
|
||||||
public:
|
public:
|
||||||
@ -603,6 +604,11 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
|||||||
void print_heap_before_gc();
|
void print_heap_before_gc();
|
||||||
void print_heap_after_gc();
|
void print_heap_after_gc();
|
||||||
|
|
||||||
|
// Registering and unregistering an nmethod (compiled code) with the heap.
|
||||||
|
// Override with specific mechanism for each specialized heap type.
|
||||||
|
virtual void register_nmethod(nmethod* nm);
|
||||||
|
virtual void unregister_nmethod(nmethod* nm);
|
||||||
|
|
||||||
void trace_heap_before_gc(GCTracer* gc_tracer);
|
void trace_heap_before_gc(GCTracer* gc_tracer);
|
||||||
void trace_heap_after_gc(GCTracer* gc_tracer);
|
void trace_heap_after_gc(GCTracer* gc_tracer);
|
||||||
|
|
||||||
|
@ -666,7 +666,7 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
|
|||||||
NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
|
NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
|
||||||
|
|
||||||
#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
|
#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
|
||||||
(type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail)
|
(type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail)
|
||||||
|
|
||||||
#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
|
#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
|
||||||
(type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
|
(type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
|
||||||
@ -675,16 +675,16 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
|
|||||||
(type*) (AllocateHeap((size) * sizeof(type), memflags))
|
(type*) (AllocateHeap((size) * sizeof(type), memflags))
|
||||||
|
|
||||||
#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
|
#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
|
||||||
NEW_C_HEAP_ARRAY3(type, size, memflags, pc, AllocFailStrategy::RETURN_NULL)
|
NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
|
||||||
|
|
||||||
#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
|
#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
|
||||||
NEW_C_HEAP_ARRAY3(type, size, memflags, (address)0, AllocFailStrategy::RETURN_NULL)
|
NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
|
||||||
|
|
||||||
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
|
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
|
||||||
(type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
|
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
|
||||||
|
|
||||||
#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
|
#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
|
||||||
(type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
|
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
|
||||||
|
|
||||||
#define FREE_C_HEAP_ARRAY(type, old, memflags) \
|
#define FREE_C_HEAP_ARRAY(type, old, memflags) \
|
||||||
FreeHeap((char*)(old), memflags)
|
FreeHeap((char*)(old), memflags)
|
||||||
|
@ -193,6 +193,8 @@ size_t GenCollectorPolicy::compute_max_alignment() {
|
|||||||
alignment = lcm(os::large_page_size(), alignment);
|
alignment = lcm(os::large_page_size(), alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(alignment >= min_alignment(), "Must be");
|
||||||
|
|
||||||
return alignment;
|
return alignment;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -362,15 +362,12 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
|
|||||||
ReservedSpace FileMapInfo::reserve_shared_memory() {
|
ReservedSpace FileMapInfo::reserve_shared_memory() {
|
||||||
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
|
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
|
||||||
char* requested_addr = si->_base;
|
char* requested_addr = si->_base;
|
||||||
size_t alignment = os::vm_allocation_granularity();
|
|
||||||
|
|
||||||
size_t size = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
|
size_t size = FileMapInfo::shared_spaces_size();
|
||||||
SharedMiscDataSize + SharedMiscCodeSize,
|
|
||||||
alignment);
|
|
||||||
|
|
||||||
// Reserve the space first, then map otherwise map will go right over some
|
// Reserve the space first, then map otherwise map will go right over some
|
||||||
// other reserved memory (like the code cache).
|
// other reserved memory (like the code cache).
|
||||||
ReservedSpace rs(size, alignment, false, requested_addr);
|
ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
|
||||||
if (!rs.is_reserved()) {
|
if (!rs.is_reserved()) {
|
||||||
fail_continue(err_msg("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr));
|
fail_continue(err_msg("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr));
|
||||||
return rs;
|
return rs;
|
||||||
@ -559,3 +556,19 @@ void FileMapInfo::print_shared_spaces() {
|
|||||||
si->_base, si->_base + si->_used);
|
si->_base, si->_base + si->_used);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unmap mapped regions of shared space.
|
||||||
|
void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
|
||||||
|
FileMapInfo *map_info = FileMapInfo::current_info();
|
||||||
|
if (map_info) {
|
||||||
|
map_info->fail_continue(msg);
|
||||||
|
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
|
||||||
|
if (map_info->_header._space[i]._base != NULL) {
|
||||||
|
map_info->unmap_region(i);
|
||||||
|
map_info->_header._space[i]._base = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (DumpSharedSpaces) {
|
||||||
|
fail_stop(msg, NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -150,6 +150,15 @@ public:
|
|||||||
// Return true if given address is in the mapped shared space.
|
// Return true if given address is in the mapped shared space.
|
||||||
bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
|
bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
|
||||||
void print_shared_spaces() NOT_CDS_RETURN;
|
void print_shared_spaces() NOT_CDS_RETURN;
|
||||||
|
|
||||||
|
static size_t shared_spaces_size() {
|
||||||
|
return align_size_up(SharedReadOnlySize + SharedReadWriteSize +
|
||||||
|
SharedMiscDataSize + SharedMiscCodeSize,
|
||||||
|
os::vm_allocation_granularity());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop CDS sharing and unmap CDS regions.
|
||||||
|
static void stop_sharing_and_unmap(const char* msg);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_MEMORY_FILEMAP_HPP
|
#endif // SHARE_VM_MEMORY_FILEMAP_HPP
|
||||||
|
@ -95,13 +95,13 @@ jint GenCollectedHeap::initialize() {
|
|||||||
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
|
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
|
||||||
|
|
||||||
// The heap must be at least as aligned as generations.
|
// The heap must be at least as aligned as generations.
|
||||||
size_t alignment = Generation::GenGrain;
|
size_t gen_alignment = Generation::GenGrain;
|
||||||
|
|
||||||
_gen_specs = gen_policy()->generations();
|
_gen_specs = gen_policy()->generations();
|
||||||
|
|
||||||
// Make sure the sizes are all aligned.
|
// Make sure the sizes are all aligned.
|
||||||
for (i = 0; i < _n_gens; i++) {
|
for (i = 0; i < _n_gens; i++) {
|
||||||
_gen_specs[i]->align(alignment);
|
_gen_specs[i]->align(gen_alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate space for the heap.
|
// Allocate space for the heap.
|
||||||
@ -109,9 +109,11 @@ jint GenCollectedHeap::initialize() {
|
|||||||
char* heap_address;
|
char* heap_address;
|
||||||
size_t total_reserved = 0;
|
size_t total_reserved = 0;
|
||||||
int n_covered_regions = 0;
|
int n_covered_regions = 0;
|
||||||
ReservedSpace heap_rs(0);
|
ReservedSpace heap_rs;
|
||||||
|
|
||||||
heap_address = allocate(alignment, &total_reserved,
|
size_t heap_alignment = collector_policy()->max_alignment();
|
||||||
|
|
||||||
|
heap_address = allocate(heap_alignment, &total_reserved,
|
||||||
&n_covered_regions, &heap_rs);
|
&n_covered_regions, &heap_rs);
|
||||||
|
|
||||||
if (!heap_rs.is_reserved()) {
|
if (!heap_rs.is_reserved()) {
|
||||||
@ -168,6 +170,8 @@ char* GenCollectedHeap::allocate(size_t alignment,
|
|||||||
const size_t pageSize = UseLargePages ?
|
const size_t pageSize = UseLargePages ?
|
||||||
os::large_page_size() : os::vm_page_size();
|
os::large_page_size() : os::vm_page_size();
|
||||||
|
|
||||||
|
assert(alignment % pageSize == 0, "Must be");
|
||||||
|
|
||||||
for (int i = 0; i < _n_gens; i++) {
|
for (int i = 0; i < _n_gens; i++) {
|
||||||
total_reserved += _gen_specs[i]->max_size();
|
total_reserved += _gen_specs[i]->max_size();
|
||||||
if (total_reserved < _gen_specs[i]->max_size()) {
|
if (total_reserved < _gen_specs[i]->max_size()) {
|
||||||
@ -175,24 +179,17 @@ char* GenCollectedHeap::allocate(size_t alignment,
|
|||||||
}
|
}
|
||||||
n_covered_regions += _gen_specs[i]->n_covered_regions();
|
n_covered_regions += _gen_specs[i]->n_covered_regions();
|
||||||
}
|
}
|
||||||
assert(total_reserved % pageSize == 0,
|
assert(total_reserved % alignment == 0,
|
||||||
err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
|
err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
|
||||||
SIZE_FORMAT, total_reserved, pageSize));
|
SIZE_FORMAT, total_reserved, alignment));
|
||||||
|
|
||||||
// Needed until the cardtable is fixed to have the right number
|
// Needed until the cardtable is fixed to have the right number
|
||||||
// of covered regions.
|
// of covered regions.
|
||||||
n_covered_regions += 2;
|
n_covered_regions += 2;
|
||||||
|
|
||||||
if (UseLargePages) {
|
*_total_reserved = total_reserved;
|
||||||
assert(total_reserved != 0, "total_reserved cannot be 0");
|
*_n_covered_regions = n_covered_regions;
|
||||||
total_reserved = round_to(total_reserved, os::large_page_size());
|
|
||||||
if (total_reserved < os::large_page_size()) {
|
|
||||||
vm_exit_during_initialization(overflow_msg);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*_total_reserved = total_reserved;
|
|
||||||
*_n_covered_regions = n_covered_regions;
|
|
||||||
*heap_rs = Universe::reserve_heap(total_reserved, alignment);
|
*heap_rs = Universe::reserve_heap(total_reserved, alignment);
|
||||||
return heap_rs->base();
|
return heap_rs->base();
|
||||||
}
|
}
|
||||||
@ -1211,6 +1208,7 @@ void GenCollectedHeap::gc_epilogue(bool full) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
MetaspaceCounters::update_performance_counters();
|
MetaspaceCounters::update_performance_counters();
|
||||||
|
CompressedClassSpaceCounters::update_performance_counters();
|
||||||
|
|
||||||
always_do_update_barrier = UseConcMarkSweepGC;
|
always_do_update_barrier = UseConcMarkSweepGC;
|
||||||
};
|
};
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -118,9 +118,12 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
|
|||||||
_number_of_committed_segments = size_to_segments(_memory.committed_size());
|
_number_of_committed_segments = size_to_segments(_memory.committed_size());
|
||||||
_number_of_reserved_segments = size_to_segments(_memory.reserved_size());
|
_number_of_reserved_segments = size_to_segments(_memory.reserved_size());
|
||||||
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
|
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
|
||||||
|
const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
|
||||||
|
const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
|
||||||
|
const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
|
||||||
|
|
||||||
// reserve space for _segmap
|
// reserve space for _segmap
|
||||||
if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) {
|
if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
|
void CodeBlobToOopClosure::do_newly_marked_nmethod(nmethod* nm) {
|
||||||
nm->oops_do(_cl, /*do_strong_roots_only=*/ true);
|
nm->oops_do(_cl, /*allow_zombie=*/ false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
|
void CodeBlobToOopClosure::do_code_blob(CodeBlob* cb) {
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
#include "runtime/globals.hpp"
|
#include "runtime/globals.hpp"
|
||||||
|
#include "runtime/java.hpp"
|
||||||
#include "runtime/mutex.hpp"
|
#include "runtime/mutex.hpp"
|
||||||
#include "runtime/orderAccess.hpp"
|
#include "runtime/orderAccess.hpp"
|
||||||
#include "services/memTracker.hpp"
|
#include "services/memTracker.hpp"
|
||||||
@ -54,6 +55,8 @@ size_t const allocation_from_dictionary_limit = 64 * K;
|
|||||||
|
|
||||||
MetaWord* last_allocated = 0;
|
MetaWord* last_allocated = 0;
|
||||||
|
|
||||||
|
size_t Metaspace::_class_metaspace_size;
|
||||||
|
|
||||||
// Used in declarations in SpaceManager and ChunkManager
|
// Used in declarations in SpaceManager and ChunkManager
|
||||||
enum ChunkIndex {
|
enum ChunkIndex {
|
||||||
ZeroIndex = 0,
|
ZeroIndex = 0,
|
||||||
@ -261,10 +264,6 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
|
|||||||
// count of chunks contained in this VirtualSpace
|
// count of chunks contained in this VirtualSpace
|
||||||
uintx _container_count;
|
uintx _container_count;
|
||||||
|
|
||||||
// Convenience functions for logical bottom and end
|
|
||||||
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
|
|
||||||
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
|
|
||||||
|
|
||||||
// Convenience functions to access the _virtual_space
|
// Convenience functions to access the _virtual_space
|
||||||
char* low() const { return virtual_space()->low(); }
|
char* low() const { return virtual_space()->low(); }
|
||||||
char* high() const { return virtual_space()->high(); }
|
char* high() const { return virtual_space()->high(); }
|
||||||
@ -284,6 +283,10 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
|
|||||||
VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
|
VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
|
||||||
~VirtualSpaceNode();
|
~VirtualSpaceNode();
|
||||||
|
|
||||||
|
// Convenience functions for logical bottom and end
|
||||||
|
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
|
||||||
|
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
|
||||||
|
|
||||||
// address of next available space in _virtual_space;
|
// address of next available space in _virtual_space;
|
||||||
// Accessors
|
// Accessors
|
||||||
VirtualSpaceNode* next() { return _next; }
|
VirtualSpaceNode* next() { return _next; }
|
||||||
@ -342,7 +345,7 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// byte_size is the size of the associated virtualspace.
|
// byte_size is the size of the associated virtualspace.
|
||||||
VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
|
VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
|
||||||
// align up to vm allocation granularity
|
// align up to vm allocation granularity
|
||||||
byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
|
byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
|
||||||
|
|
||||||
@ -1313,7 +1316,8 @@ bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
|
|||||||
|
|
||||||
// Class virtual space should always be expanded. Call GC for the other
|
// Class virtual space should always be expanded. Call GC for the other
|
||||||
// metadata virtual space.
|
// metadata virtual space.
|
||||||
if (vsl == Metaspace::class_space_list()) return true;
|
if (Metaspace::using_class_space() &&
|
||||||
|
(vsl == Metaspace::class_space_list())) return true;
|
||||||
|
|
||||||
// If this is part of an allocation after a GC, expand
|
// If this is part of an allocation after a GC, expand
|
||||||
// unconditionally.
|
// unconditionally.
|
||||||
@ -2257,7 +2261,7 @@ void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
|
|||||||
size_t raw_word_size = get_raw_word_size(word_size);
|
size_t raw_word_size = get_raw_word_size(word_size);
|
||||||
size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
|
size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
|
||||||
assert(raw_word_size >= min_size,
|
assert(raw_word_size >= min_size,
|
||||||
err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
|
err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
|
||||||
block_freelists()->return_block(p, raw_word_size);
|
block_freelists()->return_block(p, raw_word_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2374,7 +2378,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
|
|||||||
if (result == NULL) {
|
if (result == NULL) {
|
||||||
result = grow_and_allocate(word_size);
|
result = grow_and_allocate(word_size);
|
||||||
}
|
}
|
||||||
if (result > 0) {
|
if (result != 0) {
|
||||||
inc_used_metrics(word_size);
|
inc_used_metrics(word_size);
|
||||||
assert(result != (MetaWord*) chunks_in_use(MediumIndex),
|
assert(result != (MetaWord*) chunks_in_use(MediumIndex),
|
||||||
"Head of the list is being allocated");
|
"Head of the list is being allocated");
|
||||||
@ -2476,15 +2480,13 @@ void SpaceManager::mangle_freed_chunks() {
|
|||||||
size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
|
size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
|
||||||
size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
|
size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
|
||||||
|
|
||||||
|
size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
|
||||||
|
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
|
||||||
|
return list == NULL ? 0 : list->free_bytes();
|
||||||
|
}
|
||||||
|
|
||||||
size_t MetaspaceAux::free_bytes() {
|
size_t MetaspaceAux::free_bytes() {
|
||||||
size_t result = 0;
|
return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
|
||||||
if (Metaspace::class_space_list() != NULL) {
|
|
||||||
result = result + Metaspace::class_space_list()->free_bytes();
|
|
||||||
}
|
|
||||||
if (Metaspace::space_list() != NULL) {
|
|
||||||
result = result + Metaspace::space_list()->free_bytes();
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
|
void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
|
||||||
@ -2549,6 +2551,9 @@ size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
|
size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
|
||||||
|
if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
// Don't count the space in the freelists. That space will be
|
// Don't count the space in the freelists. That space will be
|
||||||
// added to the capacity calculation as needed.
|
// added to the capacity calculation as needed.
|
||||||
size_t capacity = 0;
|
size_t capacity = 0;
|
||||||
@ -2563,18 +2568,18 @@ size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
|
size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
|
||||||
size_t reserved = (mdtype == Metaspace::ClassType) ?
|
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
|
||||||
Metaspace::class_space_list()->virtual_space_total() :
|
return list == NULL ? 0 : list->virtual_space_total();
|
||||||
Metaspace::space_list()->virtual_space_total();
|
|
||||||
return reserved * BytesPerWord;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
|
size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
|
||||||
|
|
||||||
size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
|
size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
|
||||||
ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
|
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
|
||||||
Metaspace::class_space_list()->chunk_manager() :
|
if (list == NULL) {
|
||||||
Metaspace::space_list()->chunk_manager();
|
return 0;
|
||||||
|
}
|
||||||
|
ChunkManager* chunk = list->chunk_manager();
|
||||||
chunk->slow_verify();
|
chunk->slow_verify();
|
||||||
return chunk->free_chunks_total();
|
return chunk->free_chunks_total();
|
||||||
}
|
}
|
||||||
@ -2615,7 +2620,6 @@ void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
|
|||||||
|
|
||||||
// This is printed when PrintGCDetails
|
// This is printed when PrintGCDetails
|
||||||
void MetaspaceAux::print_on(outputStream* out) {
|
void MetaspaceAux::print_on(outputStream* out) {
|
||||||
Metaspace::MetadataType ct = Metaspace::ClassType;
|
|
||||||
Metaspace::MetadataType nct = Metaspace::NonClassType;
|
Metaspace::MetadataType nct = Metaspace::NonClassType;
|
||||||
|
|
||||||
out->print_cr(" Metaspace total "
|
out->print_cr(" Metaspace total "
|
||||||
@ -2629,12 +2633,15 @@ void MetaspaceAux::print_on(outputStream* out) {
|
|||||||
allocated_capacity_bytes(nct)/K,
|
allocated_capacity_bytes(nct)/K,
|
||||||
allocated_used_bytes(nct)/K,
|
allocated_used_bytes(nct)/K,
|
||||||
reserved_in_bytes(nct)/K);
|
reserved_in_bytes(nct)/K);
|
||||||
out->print_cr(" class space "
|
if (Metaspace::using_class_space()) {
|
||||||
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
|
Metaspace::MetadataType ct = Metaspace::ClassType;
|
||||||
" reserved " SIZE_FORMAT "K",
|
out->print_cr(" class space "
|
||||||
allocated_capacity_bytes(ct)/K,
|
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
|
||||||
allocated_used_bytes(ct)/K,
|
" reserved " SIZE_FORMAT "K",
|
||||||
reserved_in_bytes(ct)/K);
|
allocated_capacity_bytes(ct)/K,
|
||||||
|
allocated_used_bytes(ct)/K,
|
||||||
|
reserved_in_bytes(ct)/K);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print information for class space and data space separately.
|
// Print information for class space and data space separately.
|
||||||
@ -2659,13 +2666,37 @@ void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
|
|||||||
assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
|
assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print total fragmentation for class and data metaspaces separately
|
// Print total fragmentation for class metaspaces
|
||||||
void MetaspaceAux::print_waste(outputStream* out) {
|
void MetaspaceAux::print_class_waste(outputStream* out) {
|
||||||
|
assert(Metaspace::using_class_space(), "class metaspace not used");
|
||||||
size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
|
|
||||||
size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
|
|
||||||
size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
|
size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
|
||||||
size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
|
size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
|
||||||
|
ClassLoaderDataGraphMetaspaceIterator iter;
|
||||||
|
while (iter.repeat()) {
|
||||||
|
Metaspace* msp = iter.get_next();
|
||||||
|
if (msp != NULL) {
|
||||||
|
cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
|
||||||
|
cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
|
||||||
|
cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
|
||||||
|
cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
|
||||||
|
cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
|
||||||
|
cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
|
||||||
|
cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
|
||||||
|
SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
|
||||||
|
SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
|
||||||
|
"large count " SIZE_FORMAT,
|
||||||
|
cls_specialized_count, cls_specialized_waste,
|
||||||
|
cls_small_count, cls_small_waste,
|
||||||
|
cls_medium_count, cls_medium_waste, cls_humongous_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print total fragmentation for data and class metaspaces separately
|
||||||
|
void MetaspaceAux::print_waste(outputStream* out) {
|
||||||
|
size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
|
||||||
|
size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
|
||||||
|
|
||||||
ClassLoaderDataGraphMetaspaceIterator iter;
|
ClassLoaderDataGraphMetaspaceIterator iter;
|
||||||
while (iter.repeat()) {
|
while (iter.repeat()) {
|
||||||
@ -2678,14 +2709,6 @@ void MetaspaceAux::print_waste(outputStream* out) {
|
|||||||
medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
|
medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
|
||||||
medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
|
medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
|
||||||
humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
|
humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
|
||||||
|
|
||||||
cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
|
|
||||||
cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
|
|
||||||
cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
|
|
||||||
cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
|
|
||||||
cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
|
|
||||||
cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
|
|
||||||
cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out->print_cr("Total fragmentation waste (words) doesn't count free space");
|
out->print_cr("Total fragmentation waste (words) doesn't count free space");
|
||||||
@ -2695,13 +2718,9 @@ void MetaspaceAux::print_waste(outputStream* out) {
|
|||||||
"large count " SIZE_FORMAT,
|
"large count " SIZE_FORMAT,
|
||||||
specialized_count, specialized_waste, small_count,
|
specialized_count, specialized_waste, small_count,
|
||||||
small_waste, medium_count, medium_waste, humongous_count);
|
small_waste, medium_count, medium_waste, humongous_count);
|
||||||
out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
|
if (Metaspace::using_class_space()) {
|
||||||
SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
|
print_class_waste(out);
|
||||||
SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
|
}
|
||||||
"large count " SIZE_FORMAT,
|
|
||||||
cls_specialized_count, cls_specialized_waste,
|
|
||||||
cls_small_count, cls_small_waste,
|
|
||||||
cls_medium_count, cls_medium_waste, cls_humongous_count);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dump global metaspace things from the end of ClassLoaderDataGraph
|
// Dump global metaspace things from the end of ClassLoaderDataGraph
|
||||||
@ -2714,7 +2733,9 @@ void MetaspaceAux::dump(outputStream* out) {
|
|||||||
|
|
||||||
void MetaspaceAux::verify_free_chunks() {
|
void MetaspaceAux::verify_free_chunks() {
|
||||||
Metaspace::space_list()->chunk_manager()->verify();
|
Metaspace::space_list()->chunk_manager()->verify();
|
||||||
Metaspace::class_space_list()->chunk_manager()->verify();
|
if (Metaspace::using_class_space()) {
|
||||||
|
Metaspace::class_space_list()->chunk_manager()->verify();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetaspaceAux::verify_capacity() {
|
void MetaspaceAux::verify_capacity() {
|
||||||
@ -2776,7 +2797,9 @@ Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
|
|||||||
|
|
||||||
Metaspace::~Metaspace() {
|
Metaspace::~Metaspace() {
|
||||||
delete _vsm;
|
delete _vsm;
|
||||||
delete _class_vsm;
|
if (using_class_space()) {
|
||||||
|
delete _class_vsm;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
VirtualSpaceList* Metaspace::_space_list = NULL;
|
VirtualSpaceList* Metaspace::_space_list = NULL;
|
||||||
@ -2784,9 +2807,123 @@ VirtualSpaceList* Metaspace::_class_space_list = NULL;
|
|||||||
|
|
||||||
#define VIRTUALSPACEMULTIPLIER 2
|
#define VIRTUALSPACEMULTIPLIER 2
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
|
||||||
|
// Figure out the narrow_klass_base and the narrow_klass_shift. The
|
||||||
|
// narrow_klass_base is the lower of the metaspace base and the cds base
|
||||||
|
// (if cds is enabled). The narrow_klass_shift depends on the distance
|
||||||
|
// between the lower base and higher address.
|
||||||
|
address lower_base;
|
||||||
|
address higher_address;
|
||||||
|
if (UseSharedSpaces) {
|
||||||
|
higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
|
||||||
|
(address)(metaspace_base + class_metaspace_size()));
|
||||||
|
lower_base = MIN2(metaspace_base, cds_base);
|
||||||
|
} else {
|
||||||
|
higher_address = metaspace_base + class_metaspace_size();
|
||||||
|
lower_base = metaspace_base;
|
||||||
|
}
|
||||||
|
Universe::set_narrow_klass_base(lower_base);
|
||||||
|
if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
|
||||||
|
Universe::set_narrow_klass_shift(0);
|
||||||
|
} else {
|
||||||
|
assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
|
||||||
|
Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return TRUE if the specified metaspace_base and cds_base are close enough
|
||||||
|
// to work with compressed klass pointers.
|
||||||
|
bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
|
||||||
|
assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
|
||||||
|
assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
|
||||||
|
address lower_base = MIN2((address)metaspace_base, cds_base);
|
||||||
|
address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
|
||||||
|
(address)(metaspace_base + class_metaspace_size()));
|
||||||
|
return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to allocate the metaspace at the requested addr.
|
||||||
|
void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
|
||||||
|
assert(using_class_space(), "called improperly");
|
||||||
|
assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
|
||||||
|
assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
|
||||||
|
"Metaspace size is too big");
|
||||||
|
|
||||||
|
ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
|
||||||
|
os::vm_allocation_granularity(),
|
||||||
|
false, requested_addr, 0);
|
||||||
|
if (!metaspace_rs.is_reserved()) {
|
||||||
|
if (UseSharedSpaces) {
|
||||||
|
// Keep trying to allocate the metaspace, increasing the requested_addr
|
||||||
|
// by 1GB each time, until we reach an address that will no longer allow
|
||||||
|
// use of CDS with compressed klass pointers.
|
||||||
|
char *addr = requested_addr;
|
||||||
|
while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
|
||||||
|
can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
|
||||||
|
addr = addr + 1*G;
|
||||||
|
metaspace_rs = ReservedSpace(class_metaspace_size(),
|
||||||
|
os::vm_allocation_granularity(), false, addr, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no successful allocation then try to allocate the space anywhere. If
|
||||||
|
// that fails then OOM doom. At this point we cannot try allocating the
|
||||||
|
// metaspace as if UseCompressedKlassPointers is off because too much
|
||||||
|
// initialization has happened that depends on UseCompressedKlassPointers.
|
||||||
|
// So, UseCompressedKlassPointers cannot be turned off at this point.
|
||||||
|
if (!metaspace_rs.is_reserved()) {
|
||||||
|
metaspace_rs = ReservedSpace(class_metaspace_size(),
|
||||||
|
os::vm_allocation_granularity(), false);
|
||||||
|
if (!metaspace_rs.is_reserved()) {
|
||||||
|
vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
|
||||||
|
class_metaspace_size()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we got here then the metaspace got allocated.
|
||||||
|
MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
|
||||||
|
|
||||||
|
// Verify that we can use shared spaces. Otherwise, turn off CDS.
|
||||||
|
if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
|
||||||
|
FileMapInfo::stop_sharing_and_unmap(
|
||||||
|
"Could not allocate metaspace at a compatible address");
|
||||||
|
}
|
||||||
|
|
||||||
|
set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
|
||||||
|
UseSharedSpaces ? (address)cds_base : 0);
|
||||||
|
|
||||||
|
initialize_class_space(metaspace_rs);
|
||||||
|
|
||||||
|
if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
|
||||||
|
gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
|
||||||
|
Universe::narrow_klass_base(), Universe::narrow_klass_shift());
|
||||||
|
gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
|
||||||
|
class_metaspace_size(), metaspace_rs.base(), requested_addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For UseCompressedKlassPointers the class space is reserved above the top of
|
||||||
|
// the Java heap. The argument passed in is at the base of the compressed space.
|
||||||
|
void Metaspace::initialize_class_space(ReservedSpace rs) {
|
||||||
|
// The reserved space size may be bigger because of alignment, esp with UseLargePages
|
||||||
|
assert(rs.size() >= ClassMetaspaceSize,
|
||||||
|
err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
|
||||||
|
assert(using_class_space(), "Must be using class space");
|
||||||
|
_class_space_list = new VirtualSpaceList(rs);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
void Metaspace::global_initialize() {
|
void Metaspace::global_initialize() {
|
||||||
// Initialize the alignment for shared spaces.
|
// Initialize the alignment for shared spaces.
|
||||||
int max_alignment = os::vm_page_size();
|
int max_alignment = os::vm_page_size();
|
||||||
|
size_t cds_total = 0;
|
||||||
|
|
||||||
|
set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
|
||||||
|
os::vm_allocation_granularity()));
|
||||||
|
|
||||||
MetaspaceShared::set_max_alignment(max_alignment);
|
MetaspaceShared::set_max_alignment(max_alignment);
|
||||||
|
|
||||||
if (DumpSharedSpaces) {
|
if (DumpSharedSpaces) {
|
||||||
@ -2798,15 +2935,31 @@ void Metaspace::global_initialize() {
|
|||||||
// Initialize with the sum of the shared space sizes. The read-only
|
// Initialize with the sum of the shared space sizes. The read-only
|
||||||
// and read write metaspace chunks will be allocated out of this and the
|
// and read write metaspace chunks will be allocated out of this and the
|
||||||
// remainder is the misc code and data chunks.
|
// remainder is the misc code and data chunks.
|
||||||
size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
|
cds_total = FileMapInfo::shared_spaces_size();
|
||||||
SharedMiscDataSize + SharedMiscCodeSize,
|
_space_list = new VirtualSpaceList(cds_total/wordSize);
|
||||||
os::vm_allocation_granularity());
|
|
||||||
size_t word_size = total/wordSize;
|
#ifdef _LP64
|
||||||
_space_list = new VirtualSpaceList(word_size);
|
// Set the compressed klass pointer base so that decoding of these pointers works
|
||||||
|
// properly when creating the shared archive.
|
||||||
|
assert(UseCompressedOops && UseCompressedKlassPointers,
|
||||||
|
"UseCompressedOops and UseCompressedKlassPointers must be set");
|
||||||
|
Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
|
||||||
|
if (TraceMetavirtualspaceAllocation && Verbose) {
|
||||||
|
gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
|
||||||
|
_space_list->current_virtual_space()->bottom());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the shift to zero.
|
||||||
|
assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
|
||||||
|
"CDS region is too large");
|
||||||
|
Universe::set_narrow_klass_shift(0);
|
||||||
|
#endif
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// If using shared space, open the file that contains the shared space
|
// If using shared space, open the file that contains the shared space
|
||||||
// and map in the memory before initializing the rest of metaspace (so
|
// and map in the memory before initializing the rest of metaspace (so
|
||||||
// the addresses don't conflict)
|
// the addresses don't conflict)
|
||||||
|
address cds_address = NULL;
|
||||||
if (UseSharedSpaces) {
|
if (UseSharedSpaces) {
|
||||||
FileMapInfo* mapinfo = new FileMapInfo();
|
FileMapInfo* mapinfo = new FileMapInfo();
|
||||||
memset(mapinfo, 0, sizeof(FileMapInfo));
|
memset(mapinfo, 0, sizeof(FileMapInfo));
|
||||||
@ -2821,8 +2974,22 @@ void Metaspace::global_initialize() {
|
|||||||
assert(!mapinfo->is_open() && !UseSharedSpaces,
|
assert(!mapinfo->is_open() && !UseSharedSpaces,
|
||||||
"archive file not closed or shared spaces not disabled.");
|
"archive file not closed or shared spaces not disabled.");
|
||||||
}
|
}
|
||||||
|
cds_total = FileMapInfo::shared_spaces_size();
|
||||||
|
cds_address = (address)mapinfo->region_base(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
// If UseCompressedKlassPointers is set then allocate the metaspace area
|
||||||
|
// above the heap and above the CDS area (if it exists).
|
||||||
|
if (using_class_space()) {
|
||||||
|
if (UseSharedSpaces) {
|
||||||
|
allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
|
||||||
|
} else {
|
||||||
|
allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Initialize these before initializing the VirtualSpaceList
|
// Initialize these before initializing the VirtualSpaceList
|
||||||
_first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
|
_first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
|
||||||
_first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
|
_first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
|
||||||
@ -2840,39 +3007,28 @@ void Metaspace::global_initialize() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// For UseCompressedKlassPointers the class space is reserved as a piece of the
|
void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
|
||||||
// Java heap because the compression algorithm is the same for each. The
|
|
||||||
// argument passed in is at the top of the compressed space
|
|
||||||
void Metaspace::initialize_class_space(ReservedSpace rs) {
|
|
||||||
// The reserved space size may be bigger because of alignment, esp with UseLargePages
|
|
||||||
assert(rs.size() >= ClassMetaspaceSize,
|
|
||||||
err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
|
|
||||||
_class_space_list = new VirtualSpaceList(rs);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Metaspace::initialize(Mutex* lock,
|
|
||||||
MetaspaceType type) {
|
|
||||||
|
|
||||||
assert(space_list() != NULL,
|
assert(space_list() != NULL,
|
||||||
"Metadata VirtualSpaceList has not been initialized");
|
"Metadata VirtualSpaceList has not been initialized");
|
||||||
|
|
||||||
_vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list());
|
_vsm = new SpaceManager(NonClassType, lock, space_list());
|
||||||
if (_vsm == NULL) {
|
if (_vsm == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
size_t word_size;
|
size_t word_size;
|
||||||
size_t class_word_size;
|
size_t class_word_size;
|
||||||
vsm()->get_initial_chunk_sizes(type,
|
vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
|
||||||
&word_size,
|
|
||||||
&class_word_size);
|
|
||||||
|
|
||||||
assert(class_space_list() != NULL,
|
if (using_class_space()) {
|
||||||
"Class VirtualSpaceList has not been initialized");
|
assert(class_space_list() != NULL,
|
||||||
|
"Class VirtualSpaceList has not been initialized");
|
||||||
|
|
||||||
// Allocate SpaceManager for classes.
|
// Allocate SpaceManager for classes.
|
||||||
_class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list());
|
_class_vsm = new SpaceManager(ClassType, lock, class_space_list());
|
||||||
if (_class_vsm == NULL) {
|
if (_class_vsm == NULL) {
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
|
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
|
||||||
@ -2888,11 +3044,13 @@ void Metaspace::initialize(Mutex* lock,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Allocate chunk for class metadata objects
|
// Allocate chunk for class metadata objects
|
||||||
Metachunk* class_chunk =
|
if (using_class_space()) {
|
||||||
class_space_list()->get_initialization_chunk(class_word_size,
|
Metachunk* class_chunk =
|
||||||
class_vsm()->medium_chunk_bunch());
|
class_space_list()->get_initialization_chunk(class_word_size,
|
||||||
if (class_chunk != NULL) {
|
class_vsm()->medium_chunk_bunch());
|
||||||
class_vsm()->add_chunk(class_chunk, true);
|
if (class_chunk != NULL) {
|
||||||
|
class_vsm()->add_chunk(class_chunk, true);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_alloc_record_head = NULL;
|
_alloc_record_head = NULL;
|
||||||
@ -2906,7 +3064,8 @@ size_t Metaspace::align_word_size_up(size_t word_size) {
|
|||||||
|
|
||||||
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
|
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
|
||||||
// DumpSharedSpaces doesn't use class metadata area (yet)
|
// DumpSharedSpaces doesn't use class metadata area (yet)
|
||||||
if (mdtype == ClassType && !DumpSharedSpaces) {
|
// Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
|
||||||
|
if (mdtype == ClassType && using_class_space()) {
|
||||||
return class_vsm()->allocate(word_size);
|
return class_vsm()->allocate(word_size);
|
||||||
} else {
|
} else {
|
||||||
return vsm()->allocate(word_size);
|
return vsm()->allocate(word_size);
|
||||||
@ -2937,14 +3096,19 @@ char* Metaspace::bottom() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t Metaspace::used_words_slow(MetadataType mdtype) const {
|
size_t Metaspace::used_words_slow(MetadataType mdtype) const {
|
||||||
// return vsm()->allocated_used_words();
|
if (mdtype == ClassType) {
|
||||||
return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
|
return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
|
||||||
vsm()->sum_used_in_chunks_in_use(); // includes overhead!
|
} else {
|
||||||
|
return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Metaspace::free_words(MetadataType mdtype) const {
|
size_t Metaspace::free_words(MetadataType mdtype) const {
|
||||||
return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
|
if (mdtype == ClassType) {
|
||||||
vsm()->sum_free_in_chunks_in_use();
|
return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
|
||||||
|
} else {
|
||||||
|
return vsm()->sum_free_in_chunks_in_use();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Space capacity in the Metaspace. It includes
|
// Space capacity in the Metaspace. It includes
|
||||||
@ -2953,8 +3117,11 @@ size_t Metaspace::free_words(MetadataType mdtype) const {
|
|||||||
// in the space available in the dictionary which
|
// in the space available in the dictionary which
|
||||||
// is already counted in some chunk.
|
// is already counted in some chunk.
|
||||||
size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
|
size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
|
||||||
return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
|
if (mdtype == ClassType) {
|
||||||
vsm()->sum_capacity_in_chunks_in_use();
|
return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
|
||||||
|
} else {
|
||||||
|
return vsm()->sum_capacity_in_chunks_in_use();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
|
size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
|
||||||
@ -2977,8 +3144,8 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
|
|||||||
#endif
|
#endif
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (is_class) {
|
if (is_class && using_class_space()) {
|
||||||
class_vsm()->deallocate(ptr, word_size);
|
class_vsm()->deallocate(ptr, word_size);
|
||||||
} else {
|
} else {
|
||||||
vsm()->deallocate(ptr, word_size);
|
vsm()->deallocate(ptr, word_size);
|
||||||
}
|
}
|
||||||
@ -2992,7 +3159,7 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
|
|||||||
#endif
|
#endif
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (is_class) {
|
if (is_class && using_class_space()) {
|
||||||
class_vsm()->deallocate(ptr, word_size);
|
class_vsm()->deallocate(ptr, word_size);
|
||||||
} else {
|
} else {
|
||||||
vsm()->deallocate(ptr, word_size);
|
vsm()->deallocate(ptr, word_size);
|
||||||
@ -3101,14 +3268,18 @@ void Metaspace::purge() {
|
|||||||
MutexLockerEx cl(SpaceManager::expand_lock(),
|
MutexLockerEx cl(SpaceManager::expand_lock(),
|
||||||
Mutex::_no_safepoint_check_flag);
|
Mutex::_no_safepoint_check_flag);
|
||||||
space_list()->purge();
|
space_list()->purge();
|
||||||
class_space_list()->purge();
|
if (using_class_space()) {
|
||||||
|
class_space_list()->purge();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Metaspace::print_on(outputStream* out) const {
|
void Metaspace::print_on(outputStream* out) const {
|
||||||
// Print both class virtual space counts and metaspace.
|
// Print both class virtual space counts and metaspace.
|
||||||
if (Verbose) {
|
if (Verbose) {
|
||||||
vsm()->print_on(out);
|
vsm()->print_on(out);
|
||||||
|
if (using_class_space()) {
|
||||||
class_vsm()->print_on(out);
|
class_vsm()->print_on(out);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3122,17 +3293,21 @@ bool Metaspace::contains(const void * ptr) {
|
|||||||
// be needed. Note, locking this can cause inversion problems with the
|
// be needed. Note, locking this can cause inversion problems with the
|
||||||
// caller in MetaspaceObj::is_metadata() function.
|
// caller in MetaspaceObj::is_metadata() function.
|
||||||
return space_list()->contains(ptr) ||
|
return space_list()->contains(ptr) ||
|
||||||
class_space_list()->contains(ptr);
|
(using_class_space() && class_space_list()->contains(ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Metaspace::verify() {
|
void Metaspace::verify() {
|
||||||
vsm()->verify();
|
vsm()->verify();
|
||||||
class_vsm()->verify();
|
if (using_class_space()) {
|
||||||
|
class_vsm()->verify();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Metaspace::dump(outputStream* const out) const {
|
void Metaspace::dump(outputStream* const out) const {
|
||||||
out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
|
out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
|
||||||
vsm()->dump(out);
|
vsm()->dump(out);
|
||||||
out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
|
if (using_class_space()) {
|
||||||
class_vsm()->dump(out);
|
out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
|
||||||
|
class_vsm()->dump(out);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -105,6 +105,16 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
// Align up the word size to the allocation word size
|
// Align up the word size to the allocation word size
|
||||||
static size_t align_word_size_up(size_t);
|
static size_t align_word_size_up(size_t);
|
||||||
|
|
||||||
|
// Aligned size of the metaspace.
|
||||||
|
static size_t _class_metaspace_size;
|
||||||
|
|
||||||
|
static size_t class_metaspace_size() {
|
||||||
|
return _class_metaspace_size;
|
||||||
|
}
|
||||||
|
static void set_class_metaspace_size(size_t metaspace_size) {
|
||||||
|
_class_metaspace_size = metaspace_size;
|
||||||
|
}
|
||||||
|
|
||||||
static size_t _first_chunk_word_size;
|
static size_t _first_chunk_word_size;
|
||||||
static size_t _first_class_chunk_word_size;
|
static size_t _first_class_chunk_word_size;
|
||||||
|
|
||||||
@ -126,11 +136,26 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
|
|
||||||
static VirtualSpaceList* space_list() { return _space_list; }
|
static VirtualSpaceList* space_list() { return _space_list; }
|
||||||
static VirtualSpaceList* class_space_list() { return _class_space_list; }
|
static VirtualSpaceList* class_space_list() { return _class_space_list; }
|
||||||
|
static VirtualSpaceList* get_space_list(MetadataType mdtype) {
|
||||||
|
assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
|
||||||
|
return mdtype == ClassType ? class_space_list() : space_list();
|
||||||
|
}
|
||||||
|
|
||||||
// This is used by DumpSharedSpaces only, where only _vsm is used. So we will
|
// This is used by DumpSharedSpaces only, where only _vsm is used. So we will
|
||||||
// maintain a single list for now.
|
// maintain a single list for now.
|
||||||
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
|
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
|
||||||
|
|
||||||
|
#ifdef _LP64
|
||||||
|
static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
|
||||||
|
|
||||||
|
// Returns true if can use CDS with metaspace allocated as specified address.
|
||||||
|
static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base);
|
||||||
|
|
||||||
|
static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base);
|
||||||
|
|
||||||
|
static void initialize_class_space(ReservedSpace rs);
|
||||||
|
#endif
|
||||||
|
|
||||||
class AllocRecord : public CHeapObj<mtClass> {
|
class AllocRecord : public CHeapObj<mtClass> {
|
||||||
public:
|
public:
|
||||||
AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
|
AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
|
||||||
@ -151,7 +176,6 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
|
|
||||||
// Initialize globals for Metaspace
|
// Initialize globals for Metaspace
|
||||||
static void global_initialize();
|
static void global_initialize();
|
||||||
static void initialize_class_space(ReservedSpace rs);
|
|
||||||
|
|
||||||
static size_t first_chunk_word_size() { return _first_chunk_word_size; }
|
static size_t first_chunk_word_size() { return _first_chunk_word_size; }
|
||||||
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
|
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
|
||||||
@ -172,8 +196,6 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
MetaWord* expand_and_allocate(size_t size,
|
MetaWord* expand_and_allocate(size_t size,
|
||||||
MetadataType mdtype);
|
MetadataType mdtype);
|
||||||
|
|
||||||
static bool is_initialized() { return _class_space_list != NULL; }
|
|
||||||
|
|
||||||
static bool contains(const void *ptr);
|
static bool contains(const void *ptr);
|
||||||
void dump(outputStream* const out) const;
|
void dump(outputStream* const out) const;
|
||||||
|
|
||||||
@ -190,11 +212,16 @@ class Metaspace : public CHeapObj<mtClass> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
void iterate(AllocRecordClosure *closure);
|
void iterate(AllocRecordClosure *closure);
|
||||||
|
|
||||||
|
// Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
|
||||||
|
static bool using_class_space() {
|
||||||
|
return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
|
||||||
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class MetaspaceAux : AllStatic {
|
class MetaspaceAux : AllStatic {
|
||||||
static size_t free_chunks_total(Metaspace::MetadataType mdtype);
|
static size_t free_chunks_total(Metaspace::MetadataType mdtype);
|
||||||
static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Statistics for class space and data space in metaspace.
|
// Statistics for class space and data space in metaspace.
|
||||||
@ -238,13 +265,15 @@ class MetaspaceAux : AllStatic {
|
|||||||
// Used by MetaspaceCounters
|
// Used by MetaspaceCounters
|
||||||
static size_t free_chunks_total();
|
static size_t free_chunks_total();
|
||||||
static size_t free_chunks_total_in_bytes();
|
static size_t free_chunks_total_in_bytes();
|
||||||
|
static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
|
||||||
|
|
||||||
static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
|
static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
|
||||||
return _allocated_capacity_words[mdtype];
|
return _allocated_capacity_words[mdtype];
|
||||||
}
|
}
|
||||||
static size_t allocated_capacity_words() {
|
static size_t allocated_capacity_words() {
|
||||||
return _allocated_capacity_words[Metaspace::ClassType] +
|
return _allocated_capacity_words[Metaspace::NonClassType] +
|
||||||
_allocated_capacity_words[Metaspace::NonClassType];
|
(Metaspace::using_class_space() ?
|
||||||
|
_allocated_capacity_words[Metaspace::ClassType] : 0);
|
||||||
}
|
}
|
||||||
static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
|
static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
|
||||||
return allocated_capacity_words(mdtype) * BytesPerWord;
|
return allocated_capacity_words(mdtype) * BytesPerWord;
|
||||||
@ -257,8 +286,9 @@ class MetaspaceAux : AllStatic {
|
|||||||
return _allocated_used_words[mdtype];
|
return _allocated_used_words[mdtype];
|
||||||
}
|
}
|
||||||
static size_t allocated_used_words() {
|
static size_t allocated_used_words() {
|
||||||
return _allocated_used_words[Metaspace::ClassType] +
|
return _allocated_used_words[Metaspace::NonClassType] +
|
||||||
_allocated_used_words[Metaspace::NonClassType];
|
(Metaspace::using_class_space() ?
|
||||||
|
_allocated_used_words[Metaspace::ClassType] : 0);
|
||||||
}
|
}
|
||||||
static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
|
static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
|
||||||
return allocated_used_words(mdtype) * BytesPerWord;
|
return allocated_used_words(mdtype) * BytesPerWord;
|
||||||
@ -268,6 +298,7 @@ class MetaspaceAux : AllStatic {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static size_t free_bytes();
|
static size_t free_bytes();
|
||||||
|
static size_t free_bytes(Metaspace::MetadataType mdtype);
|
||||||
|
|
||||||
// Total capacity in all Metaspaces
|
// Total capacity in all Metaspaces
|
||||||
static size_t capacity_bytes_slow() {
|
static size_t capacity_bytes_slow() {
|
||||||
@ -300,6 +331,7 @@ class MetaspaceAux : AllStatic {
|
|||||||
static void print_on(outputStream * out);
|
static void print_on(outputStream * out);
|
||||||
static void print_on(outputStream * out, Metaspace::MetadataType mdtype);
|
static void print_on(outputStream * out, Metaspace::MetadataType mdtype);
|
||||||
|
|
||||||
|
static void print_class_waste(outputStream* out);
|
||||||
static void print_waste(outputStream* out);
|
static void print_waste(outputStream* out);
|
||||||
static void dump(outputStream* out);
|
static void dump(outputStream* out);
|
||||||
static void verify_free_chunks();
|
static void verify_free_chunks();
|
||||||
|
@ -25,11 +25,47 @@
|
|||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "memory/metaspaceCounters.hpp"
|
#include "memory/metaspaceCounters.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
|
#include "runtime/globals.hpp"
|
||||||
|
#include "runtime/perfData.hpp"
|
||||||
#include "utilities/exceptions.hpp"
|
#include "utilities/exceptions.hpp"
|
||||||
|
|
||||||
MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL;
|
class MetaspacePerfCounters: public CHeapObj<mtInternal> {
|
||||||
|
friend class VMStructs;
|
||||||
|
PerfVariable* _capacity;
|
||||||
|
PerfVariable* _used;
|
||||||
|
PerfVariable* _max_capacity;
|
||||||
|
|
||||||
size_t MetaspaceCounters::calc_total_capacity() {
|
PerfVariable* create_variable(const char *ns, const char *name, size_t value, TRAPS) {
|
||||||
|
const char *path = PerfDataManager::counter_name(ns, name);
|
||||||
|
return PerfDataManager::create_variable(SUN_GC, path, PerfData::U_Bytes, value, THREAD);
|
||||||
|
}
|
||||||
|
|
||||||
|
void create_constant(const char *ns, const char *name, size_t value, TRAPS) {
|
||||||
|
const char *path = PerfDataManager::counter_name(ns, name);
|
||||||
|
PerfDataManager::create_constant(SUN_GC, path, PerfData::U_Bytes, value, THREAD);
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
MetaspacePerfCounters(const char* ns, size_t min_capacity, size_t curr_capacity, size_t max_capacity, size_t used) {
|
||||||
|
EXCEPTION_MARK;
|
||||||
|
ResourceMark rm;
|
||||||
|
|
||||||
|
create_constant(ns, "minCapacity", min_capacity, THREAD);
|
||||||
|
_capacity = create_variable(ns, "capacity", curr_capacity, THREAD);
|
||||||
|
_max_capacity = create_variable(ns, "maxCapacity", max_capacity, THREAD);
|
||||||
|
_used = create_variable(ns, "used", used, THREAD);
|
||||||
|
}
|
||||||
|
|
||||||
|
void update(size_t capacity, size_t max_capacity, size_t used) {
|
||||||
|
_capacity->set_value(capacity);
|
||||||
|
_max_capacity->set_value(max_capacity);
|
||||||
|
_used->set_value(used);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
|
||||||
|
|
||||||
|
size_t MetaspaceCounters::calculate_capacity() {
|
||||||
// The total capacity is the sum of
|
// The total capacity is the sum of
|
||||||
// 1) capacity of Metachunks in use by all Metaspaces
|
// 1) capacity of Metachunks in use by all Metaspaces
|
||||||
// 2) unused space at the end of each Metachunk
|
// 2) unused space at the end of each Metachunk
|
||||||
@ -39,95 +75,65 @@ size_t MetaspaceCounters::calc_total_capacity() {
|
|||||||
return total_capacity;
|
return total_capacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
MetaspaceCounters::MetaspaceCounters() :
|
|
||||||
_capacity(NULL),
|
|
||||||
_used(NULL),
|
|
||||||
_max_capacity(NULL) {
|
|
||||||
if (UsePerfData) {
|
|
||||||
size_t min_capacity = MetaspaceAux::min_chunk_size();
|
|
||||||
size_t max_capacity = MetaspaceAux::reserved_in_bytes();
|
|
||||||
size_t curr_capacity = calc_total_capacity();
|
|
||||||
size_t used = MetaspaceAux::allocated_used_bytes();
|
|
||||||
|
|
||||||
initialize(min_capacity, max_capacity, curr_capacity, used);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static PerfVariable* create_ms_variable(const char *ns,
|
|
||||||
const char *name,
|
|
||||||
size_t value,
|
|
||||||
TRAPS) {
|
|
||||||
const char *path = PerfDataManager::counter_name(ns, name);
|
|
||||||
PerfVariable *result =
|
|
||||||
PerfDataManager::create_variable(SUN_GC, path, PerfData::U_Bytes, value,
|
|
||||||
CHECK_NULL);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void create_ms_constant(const char *ns,
|
|
||||||
const char *name,
|
|
||||||
size_t value,
|
|
||||||
TRAPS) {
|
|
||||||
const char *path = PerfDataManager::counter_name(ns, name);
|
|
||||||
PerfDataManager::create_constant(SUN_GC, path, PerfData::U_Bytes, value, CHECK);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MetaspaceCounters::initialize(size_t min_capacity,
|
|
||||||
size_t max_capacity,
|
|
||||||
size_t curr_capacity,
|
|
||||||
size_t used) {
|
|
||||||
|
|
||||||
if (UsePerfData) {
|
|
||||||
EXCEPTION_MARK;
|
|
||||||
ResourceMark rm;
|
|
||||||
|
|
||||||
const char *ms = "metaspace";
|
|
||||||
|
|
||||||
create_ms_constant(ms, "minCapacity", min_capacity, CHECK);
|
|
||||||
_max_capacity = create_ms_variable(ms, "maxCapacity", max_capacity, CHECK);
|
|
||||||
_capacity = create_ms_variable(ms, "capacity", curr_capacity, CHECK);
|
|
||||||
_used = create_ms_variable(ms, "used", used, CHECK);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MetaspaceCounters::update_capacity() {
|
|
||||||
assert(UsePerfData, "Should not be called unless being used");
|
|
||||||
size_t total_capacity = calc_total_capacity();
|
|
||||||
_capacity->set_value(total_capacity);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MetaspaceCounters::update_used() {
|
|
||||||
assert(UsePerfData, "Should not be called unless being used");
|
|
||||||
size_t used_in_bytes = MetaspaceAux::allocated_used_bytes();
|
|
||||||
_used->set_value(used_in_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MetaspaceCounters::update_max_capacity() {
|
|
||||||
assert(UsePerfData, "Should not be called unless being used");
|
|
||||||
assert(_max_capacity != NULL, "Should be initialized");
|
|
||||||
size_t reserved_in_bytes = MetaspaceAux::reserved_in_bytes();
|
|
||||||
_max_capacity->set_value(reserved_in_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MetaspaceCounters::update_all() {
|
|
||||||
if (UsePerfData) {
|
|
||||||
update_used();
|
|
||||||
update_capacity();
|
|
||||||
update_max_capacity();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MetaspaceCounters::initialize_performance_counters() {
|
void MetaspaceCounters::initialize_performance_counters() {
|
||||||
if (UsePerfData) {
|
if (UsePerfData) {
|
||||||
assert(_metaspace_counters == NULL, "Should only be initialized once");
|
assert(_perf_counters == NULL, "Should only be initialized once");
|
||||||
_metaspace_counters = new MetaspaceCounters();
|
|
||||||
|
size_t min_capacity = MetaspaceAux::min_chunk_size();
|
||||||
|
size_t capacity = calculate_capacity();
|
||||||
|
size_t max_capacity = MetaspaceAux::reserved_in_bytes();
|
||||||
|
size_t used = MetaspaceAux::allocated_used_bytes();
|
||||||
|
|
||||||
|
_perf_counters = new MetaspacePerfCounters("metaspace", min_capacity, capacity, max_capacity, used);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetaspaceCounters::update_performance_counters() {
|
void MetaspaceCounters::update_performance_counters() {
|
||||||
if (UsePerfData) {
|
if (UsePerfData) {
|
||||||
assert(_metaspace_counters != NULL, "Should be initialized");
|
assert(_perf_counters != NULL, "Should be initialized");
|
||||||
_metaspace_counters->update_all();
|
|
||||||
|
size_t capacity = calculate_capacity();
|
||||||
|
size_t max_capacity = MetaspaceAux::reserved_in_bytes();
|
||||||
|
size_t used = MetaspaceAux::allocated_used_bytes();
|
||||||
|
|
||||||
|
_perf_counters->update(capacity, max_capacity, used);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
|
||||||
|
|
||||||
|
size_t CompressedClassSpaceCounters::calculate_capacity() {
|
||||||
|
return MetaspaceAux::allocated_capacity_bytes(_class_type) +
|
||||||
|
MetaspaceAux::free_bytes(_class_type) +
|
||||||
|
MetaspaceAux::free_chunks_total_in_bytes(_class_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CompressedClassSpaceCounters::update_performance_counters() {
|
||||||
|
if (UsePerfData && UseCompressedKlassPointers) {
|
||||||
|
assert(_perf_counters != NULL, "Should be initialized");
|
||||||
|
|
||||||
|
size_t capacity = calculate_capacity();
|
||||||
|
size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
|
||||||
|
size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
|
||||||
|
|
||||||
|
_perf_counters->update(capacity, max_capacity, used);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CompressedClassSpaceCounters::initialize_performance_counters() {
|
||||||
|
if (UsePerfData) {
|
||||||
|
assert(_perf_counters == NULL, "Should only be initialized once");
|
||||||
|
const char* ns = "compressedclassspace";
|
||||||
|
|
||||||
|
if (UseCompressedKlassPointers) {
|
||||||
|
size_t min_capacity = MetaspaceAux::min_chunk_size();
|
||||||
|
size_t capacity = calculate_capacity();
|
||||||
|
size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
|
||||||
|
size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
|
||||||
|
|
||||||
|
_perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity, max_capacity, used);
|
||||||
|
} else {
|
||||||
|
_perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -25,31 +25,27 @@
|
|||||||
#ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
|
#ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
|
||||||
#define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
|
#define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
|
||||||
|
|
||||||
#include "runtime/perfData.hpp"
|
#include "memory/metaspace.hpp"
|
||||||
|
|
||||||
|
class MetaspacePerfCounters;
|
||||||
|
|
||||||
|
class MetaspaceCounters: public AllStatic {
|
||||||
|
static MetaspacePerfCounters* _perf_counters;
|
||||||
|
static size_t calculate_capacity();
|
||||||
|
|
||||||
class MetaspaceCounters: public CHeapObj<mtClass> {
|
|
||||||
friend class VMStructs;
|
|
||||||
PerfVariable* _capacity;
|
|
||||||
PerfVariable* _used;
|
|
||||||
PerfVariable* _max_capacity;
|
|
||||||
static MetaspaceCounters* _metaspace_counters;
|
|
||||||
void initialize(size_t min_capacity,
|
|
||||||
size_t max_capacity,
|
|
||||||
size_t curr_capacity,
|
|
||||||
size_t used);
|
|
||||||
size_t calc_total_capacity();
|
|
||||||
public:
|
public:
|
||||||
MetaspaceCounters();
|
|
||||||
~MetaspaceCounters();
|
|
||||||
|
|
||||||
void update_capacity();
|
|
||||||
void update_used();
|
|
||||||
void update_max_capacity();
|
|
||||||
|
|
||||||
void update_all();
|
|
||||||
|
|
||||||
static void initialize_performance_counters();
|
static void initialize_performance_counters();
|
||||||
static void update_performance_counters();
|
static void update_performance_counters();
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class CompressedClassSpaceCounters: public AllStatic {
|
||||||
|
static MetaspacePerfCounters* _perf_counters;
|
||||||
|
static size_t calculate_capacity();
|
||||||
|
static const Metaspace::MetadataType _class_type = Metaspace::ClassType;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static void initialize_performance_counters();
|
||||||
|
static void update_performance_counters();
|
||||||
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
|
#endif // SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
|
||||||
|
@ -52,7 +52,6 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
|
|||||||
int tag = 0;
|
int tag = 0;
|
||||||
soc->do_tag(--tag);
|
soc->do_tag(--tag);
|
||||||
|
|
||||||
assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive");
|
|
||||||
// Verify the sizes of various metadata in the system.
|
// Verify the sizes of various metadata in the system.
|
||||||
soc->do_tag(sizeof(Method));
|
soc->do_tag(sizeof(Method));
|
||||||
soc->do_tag(sizeof(ConstMethod));
|
soc->do_tag(sizeof(ConstMethod));
|
||||||
|
@ -145,8 +145,6 @@ NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
|
|||||||
NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
|
NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
|
||||||
address Universe::_narrow_ptrs_base;
|
address Universe::_narrow_ptrs_base;
|
||||||
|
|
||||||
size_t Universe::_class_metaspace_size;
|
|
||||||
|
|
||||||
void Universe::basic_type_classes_do(void f(Klass*)) {
|
void Universe::basic_type_classes_do(void f(Klass*)) {
|
||||||
f(boolArrayKlassObj());
|
f(boolArrayKlassObj());
|
||||||
f(byteArrayKlassObj());
|
f(byteArrayKlassObj());
|
||||||
@ -641,6 +639,8 @@ jint universe_init() {
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Metaspace::global_initialize();
|
||||||
|
|
||||||
// Create memory for metadata. Must be after initializing heap for
|
// Create memory for metadata. Must be after initializing heap for
|
||||||
// DumpSharedSpaces.
|
// DumpSharedSpaces.
|
||||||
ClassLoaderData::init_null_class_loader_data();
|
ClassLoaderData::init_null_class_loader_data();
|
||||||
@ -681,25 +681,27 @@ static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
|
|||||||
// 32Gb
|
// 32Gb
|
||||||
// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
|
// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
|
||||||
|
|
||||||
char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
|
||||||
|
assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
|
||||||
|
assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
|
||||||
|
assert(is_size_aligned(heap_size, alignment), "Must be");
|
||||||
|
|
||||||
|
uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
|
||||||
|
|
||||||
size_t base = 0;
|
size_t base = 0;
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
assert(mode == UnscaledNarrowOop ||
|
assert(mode == UnscaledNarrowOop ||
|
||||||
mode == ZeroBasedNarrowOop ||
|
mode == ZeroBasedNarrowOop ||
|
||||||
mode == HeapBasedNarrowOop, "mode is invalid");
|
mode == HeapBasedNarrowOop, "mode is invalid");
|
||||||
const size_t total_size = heap_size + HeapBaseMinAddress;
|
const size_t total_size = heap_size + heap_base_min_address_aligned;
|
||||||
// Return specified base for the first request.
|
// Return specified base for the first request.
|
||||||
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
|
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
|
||||||
base = HeapBaseMinAddress;
|
base = heap_base_min_address_aligned;
|
||||||
|
|
||||||
// If the total size and the metaspace size are small enough to allow
|
// If the total size is small enough to allow UnscaledNarrowOop then
|
||||||
// UnscaledNarrowOop then just use UnscaledNarrowOop.
|
// just use UnscaledNarrowOop.
|
||||||
} else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop) &&
|
} else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
|
||||||
(!UseCompressedKlassPointers ||
|
|
||||||
(((OopEncodingHeapMax - heap_size) + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax))) {
|
|
||||||
// We don't need to check the metaspace size here because it is always smaller
|
|
||||||
// than total_size.
|
|
||||||
if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
|
if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
|
||||||
(Universe::narrow_oop_shift() == 0)) {
|
(Universe::narrow_oop_shift() == 0)) {
|
||||||
// Use 32-bits oops without encoding and
|
// Use 32-bits oops without encoding and
|
||||||
@ -716,13 +718,6 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
|||||||
base = (OopEncodingHeapMax - heap_size);
|
base = (OopEncodingHeapMax - heap_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// See if ZeroBaseNarrowOop encoding will work for a heap based at
|
|
||||||
// (KlassEncodingMetaspaceMax - class_metaspace_size()).
|
|
||||||
} else if (UseCompressedKlassPointers && (mode != HeapBasedNarrowOop) &&
|
|
||||||
(Universe::class_metaspace_size() + HeapBaseMinAddress <= KlassEncodingMetaspaceMax) &&
|
|
||||||
(KlassEncodingMetaspaceMax + heap_size - Universe::class_metaspace_size() <= OopEncodingHeapMax)) {
|
|
||||||
base = (KlassEncodingMetaspaceMax - Universe::class_metaspace_size());
|
|
||||||
} else {
|
} else {
|
||||||
// UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
|
// UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
|
||||||
// HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb.
|
// HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb.
|
||||||
@ -732,8 +727,7 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
|||||||
// Set narrow_oop_base and narrow_oop_use_implicit_null_checks
|
// Set narrow_oop_base and narrow_oop_use_implicit_null_checks
|
||||||
// used in ReservedHeapSpace() constructors.
|
// used in ReservedHeapSpace() constructors.
|
||||||
// The final values will be set in initialize_heap() below.
|
// The final values will be set in initialize_heap() below.
|
||||||
if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax) &&
|
if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
|
||||||
(!UseCompressedKlassPointers || (base + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax)) {
|
|
||||||
// Use zero based compressed oops
|
// Use zero based compressed oops
|
||||||
Universe::set_narrow_oop_base(NULL);
|
Universe::set_narrow_oop_base(NULL);
|
||||||
// Don't need guard page for implicit checks in indexed
|
// Don't need guard page for implicit checks in indexed
|
||||||
@ -754,6 +748,8 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
assert(is_ptr_aligned((char*)base, alignment), "Must be");
|
||||||
return (char*)base; // also return NULL (don't care) for 32-bit VM
|
return (char*)base; // also return NULL (don't care) for 32-bit VM
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -816,9 +812,7 @@ jint Universe::initialize_heap() {
|
|||||||
tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
|
tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
|
||||||
Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
|
Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
|
||||||
}
|
}
|
||||||
if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) ||
|
if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
|
||||||
(UseCompressedKlassPointers &&
|
|
||||||
((uint64_t)Universe::heap()->base() + Universe::class_metaspace_size() > KlassEncodingMetaspaceMax))) {
|
|
||||||
// Can't reserve heap below 32Gb.
|
// Can't reserve heap below 32Gb.
|
||||||
// keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
|
// keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
|
||||||
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
|
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
|
||||||
@ -849,20 +843,16 @@ jint Universe::initialize_heap() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (verbose) {
|
if (verbose) {
|
||||||
tty->cr();
|
tty->cr();
|
||||||
tty->cr();
|
tty->cr();
|
||||||
}
|
}
|
||||||
if (UseCompressedKlassPointers) {
|
|
||||||
Universe::set_narrow_klass_base(Universe::narrow_oop_base());
|
|
||||||
Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
|
|
||||||
}
|
|
||||||
Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
|
Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
|
||||||
}
|
}
|
||||||
// Universe::narrow_oop_base() is one page below the metaspace
|
// Universe::narrow_oop_base() is one page below the heap.
|
||||||
// base. The actual metaspace base depends on alignment constraints
|
assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
|
||||||
// so we don't know its exact location here.
|
os::vm_page_size()) ||
|
||||||
assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
|
|
||||||
Universe::narrow_oop_base() == NULL, "invalid value");
|
Universe::narrow_oop_base() == NULL, "invalid value");
|
||||||
assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
|
assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
|
||||||
Universe::narrow_oop_shift() == 0, "invalid value");
|
Universe::narrow_oop_shift() == 0, "invalid value");
|
||||||
@ -882,35 +872,36 @@ jint Universe::initialize_heap() {
|
|||||||
|
|
||||||
// Reserve the Java heap, which is now the same for all GCs.
|
// Reserve the Java heap, which is now the same for all GCs.
|
||||||
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
||||||
// Add in the class metaspace area so the classes in the headers can
|
size_t total_reserved = align_size_up(heap_size, alignment);
|
||||||
// be compressed the same as instances.
|
|
||||||
// Need to round class space size up because it's below the heap and
|
|
||||||
// the actual alignment depends on its size.
|
|
||||||
Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment));
|
|
||||||
size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment);
|
|
||||||
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
|
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
|
||||||
"heap size is too big for compressed oops");
|
"heap size is too big for compressed oops");
|
||||||
char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
|
|
||||||
|
|
||||||
ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
|
bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
|
||||||
|
assert(!UseLargePages
|
||||||
|
|| UseParallelOldGC
|
||||||
|
|| use_large_pages, "Wrong alignment to use large pages");
|
||||||
|
|
||||||
|
char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
|
||||||
|
|
||||||
|
ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
|
||||||
|
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
if (addr != NULL && !total_rs.is_reserved()) {
|
if (addr != NULL && !total_rs.is_reserved()) {
|
||||||
// Failed to reserve at specified address - the requested memory
|
// Failed to reserve at specified address - the requested memory
|
||||||
// region is taken already, for example, by 'java' launcher.
|
// region is taken already, for example, by 'java' launcher.
|
||||||
// Try again to reserver heap higher.
|
// Try again to reserver heap higher.
|
||||||
addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
|
addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
|
||||||
|
|
||||||
ReservedHeapSpace total_rs0(total_reserved, alignment,
|
ReservedHeapSpace total_rs0(total_reserved, alignment,
|
||||||
UseLargePages, addr);
|
use_large_pages, addr);
|
||||||
|
|
||||||
if (addr != NULL && !total_rs0.is_reserved()) {
|
if (addr != NULL && !total_rs0.is_reserved()) {
|
||||||
// Failed to reserve at specified address again - give up.
|
// Failed to reserve at specified address again - give up.
|
||||||
addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
|
addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
|
||||||
assert(addr == NULL, "");
|
assert(addr == NULL, "");
|
||||||
|
|
||||||
ReservedHeapSpace total_rs1(total_reserved, alignment,
|
ReservedHeapSpace total_rs1(total_reserved, alignment,
|
||||||
UseLargePages, addr);
|
use_large_pages, addr);
|
||||||
total_rs = total_rs1;
|
total_rs = total_rs1;
|
||||||
} else {
|
} else {
|
||||||
total_rs = total_rs0;
|
total_rs = total_rs0;
|
||||||
@ -923,28 +914,17 @@ ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
|
|||||||
return total_rs;
|
return total_rs;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Split the reserved space into main Java heap and a space for
|
|
||||||
// classes so that they can be compressed using the same algorithm
|
|
||||||
// as compressed oops. If compress oops and compress klass ptrs are
|
|
||||||
// used we need the meta space first: if the alignment used for
|
|
||||||
// compressed oops is greater than the one used for compressed klass
|
|
||||||
// ptrs, a metadata space on top of the heap could become
|
|
||||||
// unreachable.
|
|
||||||
ReservedSpace class_rs = total_rs.first_part(Universe::class_metaspace_size());
|
|
||||||
ReservedSpace heap_rs = total_rs.last_part(Universe::class_metaspace_size(), alignment);
|
|
||||||
Metaspace::initialize_class_space(class_rs);
|
|
||||||
|
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
// Universe::initialize_heap() will reset this to NULL if unscaled
|
// Universe::initialize_heap() will reset this to NULL if unscaled
|
||||||
// or zero-based narrow oops are actually used.
|
// or zero-based narrow oops are actually used.
|
||||||
address base = (address)(total_rs.base() - os::vm_page_size());
|
address base = (address)(total_rs.base() - os::vm_page_size());
|
||||||
Universe::set_narrow_oop_base(base);
|
Universe::set_narrow_oop_base(base);
|
||||||
}
|
}
|
||||||
return heap_rs;
|
return total_rs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// It's the caller's repsonsibility to ensure glitch-freedom
|
// It's the caller's responsibility to ensure glitch-freedom
|
||||||
// (if required).
|
// (if required).
|
||||||
void Universe::update_heap_info_at_gc() {
|
void Universe::update_heap_info_at_gc() {
|
||||||
_heap_capacity_at_last_gc = heap()->capacity();
|
_heap_capacity_at_last_gc = heap()->capacity();
|
||||||
@ -1135,6 +1115,8 @@ bool universe_post_init() {
|
|||||||
|
|
||||||
// Initialize performance counters for metaspaces
|
// Initialize performance counters for metaspaces
|
||||||
MetaspaceCounters::initialize_performance_counters();
|
MetaspaceCounters::initialize_performance_counters();
|
||||||
|
CompressedClassSpaceCounters::initialize_performance_counters();
|
||||||
|
|
||||||
MemoryService::add_metaspace_memory_pools();
|
MemoryService::add_metaspace_memory_pools();
|
||||||
|
|
||||||
GC_locker::unlock(); // allow gc after bootstrapping
|
GC_locker::unlock(); // allow gc after bootstrapping
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user