Merge
This commit is contained in:
commit
de2e2b119a
.hgtags
doc
make
src/hotspot
cpu
aarch64
aarch64.adaarch64_ad.m4assembler_aarch64.hppc1_LIRAssembler_aarch64.cppc1_LIRAssembler_aarch64.hppcompiledIC_aarch64.cppframe_aarch64.cpp
gc/shenandoah
shenandoahBarrierSetAssembler_aarch64.cppshenandoahBarrierSetAssembler_aarch64.hppshenandoahBarrierSetC1_aarch64.cppshenandoah_aarch64.ad
macroAssembler_aarch64.cppmacroAssembler_aarch64.hppnativeInst_aarch64.cpparm
sparc
x86
os
os_cpu
share
adlc
classfile
javaClasses.cppjavaClasses.hppprotectionDomainCache.cppprotectionDomainCache.hppsystemDictionary.hppverifier.cppvmSymbols.hpp
code
compiler
gc
g1
g1CollectedHeap.cppg1CollectedHeap.hppg1CollectionSet.cppg1CollectionSet.hppg1CollectionSetCandidates.cppg1CollectionSetCandidates.hppg1ConcurrentMark.cppg1ConcurrentMarkThread.cppg1EdenRegions.hppg1EvacFailure.cppg1GCPhaseTimes.cppg1GCPhaseTimes.hppg1HeapVerifier.cppg1MonitoringSupport.cppg1MonitoringSupport.hppg1OopStarChunkedList.cppg1OopStarChunkedList.hppg1OopStarChunkedList.inline.hppg1ParScanThreadState.cppg1Policy.cppg1Policy.hppg1RemSet.cppg1RemSet.hppg1SurvivorRegions.cppg1SurvivorRegions.hppg1YoungGenSizer.cppg1YoungGenSizer.hppg1YoungRemSetSamplingThread.cppheapRegion.cppheapRegion.hppvmStructs_g1.hpp
shared
shenandoah
c1
c2
heuristics
1
.hgtags
1
.hgtags
@ -552,3 +552,4 @@ b67884871b5fff79c5ef3eb8ac74dd48d71ea9b1 jdk-12+33
|
||||
b67884871b5fff79c5ef3eb8ac74dd48d71ea9b1 jdk-12-ga
|
||||
83cace4142c8563b6a921787db02388e1bc48d01 jdk-13+13
|
||||
46cf212cdccaf4fb064d913b12004007d3322b67 jdk-13+14
|
||||
f855ec13aa2501ae184c8b3e0626a8cec9966116 jdk-13+15
|
||||
|
@ -1,19 +1,24 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<html xmlns="http://www.w3.org/1999/xhtml" lang="" xml:lang="">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="generator" content="pandoc">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
|
||||
<meta charset="utf-8" />
|
||||
<meta name="generator" content="pandoc" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
|
||||
<title>Testing the JDK</title>
|
||||
<style type="text/css">code{white-space: pre;}</style>
|
||||
<link rel="stylesheet" href="../make/data/docs-resources/resources/jdk-default.css">
|
||||
<style type="text/css">
|
||||
code{white-space: pre-wrap;}
|
||||
span.smallcaps{font-variant: small-caps;}
|
||||
span.underline{text-decoration: underline;}
|
||||
div.column{display: inline-block; vertical-align: top; width: 50%;}
|
||||
</style>
|
||||
<link rel="stylesheet" href="../make/data/docs-resources/resources/jdk-default.css" />
|
||||
<!--[if lt IE 9]>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
|
||||
<![endif]-->
|
||||
<style type="text/css">pre, code, tt { color: #1d6ae5; }</style>
|
||||
</head>
|
||||
<body>
|
||||
<header>
|
||||
<header id="title-block-header">
|
||||
<h1 class="title">Testing the JDK</h1>
|
||||
</header>
|
||||
<nav id="TOC">
|
||||
@ -34,6 +39,9 @@
|
||||
<li><a href="#gtest-keywords">Gtest keywords</a></li>
|
||||
<li><a href="#microbenchmark-keywords">Microbenchmark keywords</a></li>
|
||||
</ul></li>
|
||||
<li><a href="#notes-for-specific-tests">Notes for Specific Tests</a><ul>
|
||||
<li><a href="#docker-tests">Docker Tests</a></li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
</nav>
|
||||
<h2 id="using-make-test-the-run-test-framework">Using "make test" (the run-test framework)</h2>
|
||||
@ -180,5 +188,11 @@ TEST FAILURE</code></pre>
|
||||
<p>Additional VM arguments to provide to forked off VMs. Same as <code>-jvmArgs <args></code></p>
|
||||
<h4 id="options-2">OPTIONS</h4>
|
||||
<p>Additional arguments to send to JMH.</p>
|
||||
<h2 id="notes-for-specific-tests">Notes for Specific Tests</h2>
|
||||
<h3 id="docker-tests">Docker Tests</h3>
|
||||
<p>Docker tests with default parameters may fail on systems with glibc versions not compatible with the one used in the default docker image (e.g., Oracle Linux 7.6 for x86). For example, they pass on Ubuntu 16.04 but fail on Ubuntu 18.04 if run like this on x86:</p>
|
||||
<pre><code>$ make test TEST="jtreg:test/hotspot/jtreg/runtime/containers/docker"</code></pre>
|
||||
<p>To run these tests correctly, additional parameters for the correct docker image are required on Ubuntu 18.04 by using <code>JAVA_OPTIONS</code>.</p>
|
||||
<pre><code>$ make test TEST="jtreg:test/hotspot/jtreg/runtime/containers/docker" JTREG="JAVA_OPTIONS=-Djdk.test.docker.image.name=ubuntu -Djdk.test.docker.image.version=latest"</code></pre>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -373,6 +373,21 @@ Additional VM arguments to provide to forked off VMs. Same as `-jvmArgs <args>`
|
||||
#### OPTIONS
|
||||
Additional arguments to send to JMH.
|
||||
|
||||
## Notes for Specific Tests
|
||||
|
||||
### Docker Tests
|
||||
|
||||
Docker tests with default parameters may fail on systems with glibc versions not
|
||||
compatible with the one used in the default docker image (e.g., Oracle Linux 7.6 for x86).
|
||||
For example, they pass on Ubuntu 16.04 but fail on Ubuntu 18.04 if run like this on x86:
|
||||
|
||||
$ make test TEST="jtreg:test/hotspot/jtreg/runtime/containers/docker"
|
||||
|
||||
To run these tests correctly, additional parameters for the correct docker image are
|
||||
required on Ubuntu 18.04 by using `JAVA_OPTIONS`.
|
||||
|
||||
$ make test TEST="jtreg:test/hotspot/jtreg/runtime/containers/docker" JTREG="JAVA_OPTIONS=-Djdk.test.docker.image.name=ubuntu -Djdk.test.docker.image.version=latest"
|
||||
|
||||
---
|
||||
# Override some definitions in the global css file that are not optimal for
|
||||
# this document.
|
||||
|
@ -238,11 +238,13 @@ else # HAS_SPEC=true
|
||||
ifeq ($(LOG_NOFILE), true)
|
||||
# Disable build log if LOG=[level,]nofile was given
|
||||
override BUILD_LOG_PIPE :=
|
||||
override BUILD_LOG_PIPE_SIMPLE :=
|
||||
endif
|
||||
|
||||
ifeq ($(filter dist-clean, $(SEQUENTIAL_TARGETS)), dist-clean)
|
||||
# We can't have a log file if we're about to remove it.
|
||||
override BUILD_LOG_PIPE :=
|
||||
override BUILD_LOG_PIPE_SIMPLE :=
|
||||
endif
|
||||
|
||||
ifeq ($(OUTPUT_SYNC_SUPPORTED), true)
|
||||
@ -277,7 +279,7 @@ else # HAS_SPEC=true
|
||||
else
|
||||
$(ECHO) "Re-running configure using default settings"
|
||||
endif
|
||||
( cd $(OUTPUTDIR) && PATH="$(ORIGINAL_PATH)" AUTOCONF="$(AUTOCONF)" \
|
||||
( cd $(CONFIGURE_START_DIR) && PATH="$(ORIGINAL_PATH)" AUTOCONF="$(AUTOCONF)" \
|
||||
CUSTOM_ROOT="$(CUSTOM_ROOT)" \
|
||||
CUSTOM_CONFIG_DIR="$(CUSTOM_CONFIG_DIR)" \
|
||||
$(BASH) $(TOPDIR)/configure $(CONFIGURE_COMMAND_LINE) )
|
||||
@ -303,7 +305,7 @@ else # HAS_SPEC=true
|
||||
main: $(INIT_TARGETS)
|
||||
ifneq ($(SEQUENTIAL_TARGETS)$(PARALLEL_TARGETS), )
|
||||
$(call RotateLogFiles)
|
||||
$(PRINTF) "Building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE)
|
||||
$(PRINTF) "Building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE_SIMPLE)
|
||||
ifneq ($(SEQUENTIAL_TARGETS), )
|
||||
# Don't touch build output dir since we might be cleaning. That
|
||||
# means no log pipe.
|
||||
@ -325,7 +327,7 @@ else # HAS_SPEC=true
|
||||
$(PARALLEL_TARGETS) $(COMPARE_BUILD_MAKE) $(BUILD_LOG_PIPE) || \
|
||||
( exitcode=$$? && \
|
||||
$(PRINTF) "\nERROR: Build failed for $(TARGET_DESCRIPTION) (exit code $$exitcode) \n" \
|
||||
$(BUILD_LOG_PIPE) && \
|
||||
$(BUILD_LOG_PIPE_SIMPLE) && \
|
||||
cd $(TOPDIR) && $(MAKE) $(MAKE_ARGS) -j 1 -f make/Init.gmk \
|
||||
HAS_SPEC=true on-failure ; \
|
||||
exit $$exitcode ) )
|
||||
@ -336,7 +338,7 @@ else # HAS_SPEC=true
|
||||
if test -f $(MAKESUPPORT_OUTPUTDIR)/exit-with-error ; then \
|
||||
exit 1 ; \
|
||||
fi
|
||||
$(PRINTF) "Finished building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE)
|
||||
$(PRINTF) "Finished building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE_SIMPLE)
|
||||
$(call ReportProfileTimes)
|
||||
endif
|
||||
|
||||
|
@ -296,6 +296,9 @@ else # $(HAS_SPEC)=true
|
||||
BUILD_PROFILE_LOG := $(OUTPUTDIR)/build-profile.log
|
||||
|
||||
BUILD_LOG_PIPE := > >($(TEE) -a $(BUILD_LOG)) 2> >($(TEE) -a $(BUILD_LOG) >&2) && wait
|
||||
# Use this for simple echo/printf commands that are never expected to print
|
||||
# to stderr.
|
||||
BUILD_LOG_PIPE_SIMPLE := | $(TEE) -a $(BUILD_LOG)
|
||||
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
topdir=$(CUSTOM_ROOT)
|
||||
@ -514,7 +517,7 @@ else # $(HAS_SPEC)=true
|
||||
"`$(LS) $(BUILDTIMESDIR)/build_time_diff_* | $(GREP) -v _TOTAL | \
|
||||
$(XARGS) $(CAT) | $(SORT) -k 2`" \
|
||||
"`$(CAT) $(BUILDTIMESDIR)/build_time_diff_TOTAL`" \
|
||||
$(BUILD_LOG_PIPE)
|
||||
$(BUILD_LOG_PIPE_SIMPLE)
|
||||
endef
|
||||
|
||||
define ReportProfileTimes
|
||||
@ -524,7 +527,7 @@ else # $(HAS_SPEC)=true
|
||||
$(CAT) $(BUILD_PROFILE_LOG) && \
|
||||
$(ECHO) End $(notdir $(BUILD_PROFILE_LOG)); \
|
||||
} \
|
||||
$(BUILD_LOG_PIPE)
|
||||
$(BUILD_LOG_PIPE_SIMPLE)
|
||||
)
|
||||
endef
|
||||
|
||||
|
@ -335,6 +335,7 @@ BOOTCYCLE_TARGET := product-images
|
||||
bootcycle-images:
|
||||
ifneq ($(COMPILE_TYPE), cross)
|
||||
$(call LogWarn, Boot cycle build step 2: Building a new JDK image using previously built image)
|
||||
$(call MakeDir, $(OUTPUTDIR)/bootcycle-build)
|
||||
+$(MAKE) $(MAKE_ARGS) -f $(TOPDIR)/make/Init.gmk PARALLEL_TARGETS=$(BOOTCYCLE_TARGET) \
|
||||
JOBS= SPEC=$(dir $(SPEC))bootcycle-spec.gmk main
|
||||
else
|
||||
@ -650,7 +651,6 @@ else
|
||||
|
||||
# Declare dependencies between hotspot-<variant>* targets
|
||||
$(foreach v, $(JVM_VARIANTS), \
|
||||
$(eval hotspot-$v: hotspot-$v-gensrc hotspot-$v-libs) \
|
||||
$(eval hotspot-$v-gensrc: java.base-copy) \
|
||||
$(eval hotspot-$v-libs: hotspot-$v-gensrc java.base-copy) \
|
||||
)
|
||||
@ -943,6 +943,10 @@ JVM_TOOLS_TARGETS ?= buildtools-hotspot
|
||||
buildtools: buildtools-langtools interim-langtools interim-rmic \
|
||||
buildtools-jdk $(JVM_TOOLS_TARGETS)
|
||||
|
||||
# Declare dependencies from hotspot-<variant> targets
|
||||
$(foreach v, $(JVM_VARIANTS), \
|
||||
$(eval hotspot-$v: hotspot-$v-gensrc hotspot-$v-libs) \
|
||||
)
|
||||
hotspot: $(HOTSPOT_VARIANT_TARGETS)
|
||||
|
||||
# Create targets hotspot-libs and hotspot-gensrc.
|
||||
|
@ -627,7 +627,7 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
|
||||
AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
|
||||
[
|
||||
# Save the current directory this script was started from
|
||||
CURDIR="$PWD"
|
||||
CONFIGURE_START_DIR="$PWD"
|
||||
|
||||
# We might need to rewrite ORIGINAL_PATH, if it includes "#", to quote them
|
||||
# for make. We couldn't do this when we retrieved ORIGINAL_PATH, since SED
|
||||
@ -653,9 +653,10 @@ AC_DEFUN_ONCE([BASIC_SETUP_PATHS],
|
||||
AC_MSG_CHECKING([for top-level directory])
|
||||
AC_MSG_RESULT([$TOPDIR])
|
||||
AC_SUBST(TOPDIR)
|
||||
AC_SUBST(CONFIGURE_START_DIR)
|
||||
|
||||
# We can only call BASIC_FIXUP_PATH after BASIC_CHECK_PATHS_WINDOWS.
|
||||
BASIC_FIXUP_PATH(CURDIR)
|
||||
BASIC_FIXUP_PATH(CONFIGURE_START_DIR)
|
||||
BASIC_FIXUP_PATH(TOPDIR)
|
||||
|
||||
# Locate the directory of this script.
|
||||
@ -868,9 +869,10 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
|
||||
|
||||
# Test from where we are running configure, in or outside of src root.
|
||||
AC_MSG_CHECKING([where to store configuration])
|
||||
if test "x$CURDIR" = "x$TOPDIR" || test "x$CURDIR" = "x$CUSTOM_ROOT" \
|
||||
|| test "x$CURDIR" = "x$TOPDIR/make/autoconf" \
|
||||
|| test "x$CURDIR" = "x$TOPDIR/make" ; then
|
||||
if test "x$CONFIGURE_START_DIR" = "x$TOPDIR" \
|
||||
|| test "x$CONFIGURE_START_DIR" = "x$CUSTOM_ROOT" \
|
||||
|| test "x$CONFIGURE_START_DIR" = "x$TOPDIR/make/autoconf" \
|
||||
|| test "x$CONFIGURE_START_DIR" = "x$TOPDIR/make" ; then
|
||||
# We are running configure from the src root.
|
||||
# Create a default ./build/target-variant-debuglevel output root.
|
||||
if test "x${CONF_NAME}" = x; then
|
||||
@ -895,9 +897,9 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
|
||||
# If configuration is situated in normal build directory, just use the build
|
||||
# directory name as configuration name, otherwise use the complete path.
|
||||
if test "x${CONF_NAME}" = x; then
|
||||
CONF_NAME=`$ECHO $CURDIR | $SED -e "s!^${TOPDIR}/build/!!"`
|
||||
CONF_NAME=`$ECHO $CONFIGURE_START_DIR | $SED -e "s!^${TOPDIR}/build/!!"`
|
||||
fi
|
||||
OUTPUTDIR="$CURDIR"
|
||||
OUTPUTDIR="$CONFIGURE_START_DIR"
|
||||
AC_MSG_RESULT([in current directory])
|
||||
|
||||
# WARNING: This might be a bad thing to do. You need to be sure you want to
|
||||
@ -917,14 +919,14 @@ AC_DEFUN_ONCE([BASIC_SETUP_OUTPUT_DIR],
|
||||
-e 's/ //g' \
|
||||
| $TR -d '\n'`
|
||||
if test "x$filtered_files" != x; then
|
||||
AC_MSG_NOTICE([Current directory is $CURDIR.])
|
||||
AC_MSG_NOTICE([Current directory is $CONFIGURE_START_DIR.])
|
||||
AC_MSG_NOTICE([Since this is not the source root, configure will output the configuration here])
|
||||
AC_MSG_NOTICE([(as opposed to creating a configuration in <src_root>/build/<conf-name>).])
|
||||
AC_MSG_NOTICE([However, this directory is not empty. This is not allowed, since it could])
|
||||
AC_MSG_NOTICE([seriously mess up just about everything.])
|
||||
AC_MSG_NOTICE([Try 'cd $TOPDIR' and restart configure])
|
||||
AC_MSG_NOTICE([(or create a new empty directory and cd to it).])
|
||||
AC_MSG_ERROR([Will not continue creating configuration in $CURDIR])
|
||||
AC_MSG_ERROR([Will not continue creating configuration in $CONFIGURE_START_DIR])
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
@ -551,7 +551,7 @@ AC_DEFUN_ONCE([BASIC_COMPILE_FIXPATH],
|
||||
$MKDIR -p $FIXPATH_DIR $CONFIGURESUPPORT_OUTPUTDIR/bin
|
||||
cd $FIXPATH_DIR
|
||||
$CC $FIXPATH_SRC_W -Fe$FIXPATH_BIN_W > $FIXPATH_DIR/fixpath1.log 2>&1
|
||||
cd $CURDIR
|
||||
cd $CONFIGURE_START_DIR
|
||||
|
||||
if test ! -x $FIXPATH_BIN; then
|
||||
AC_MSG_RESULT([no])
|
||||
@ -574,7 +574,7 @@ AC_DEFUN_ONCE([BASIC_COMPILE_FIXPATH],
|
||||
cd $FIXPATH_DIR
|
||||
$FIXPATH $CC $FIXPATH_SRC -Fe$FIXPATH_DIR/fixpath2.exe \
|
||||
> $FIXPATH_DIR/fixpath2.log 2>&1
|
||||
cd $CURDIR
|
||||
cd $CONFIGURE_START_DIR
|
||||
if test ! -x $FIXPATH_DIR/fixpath2.exe; then
|
||||
AC_MSG_RESULT([no])
|
||||
cat $FIXPATH_DIR/fixpath2.log
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -81,10 +81,10 @@ AC_DEFUN([FLAGS_SETUP_RCFLAGS],
|
||||
RC_FLAGS="$RC_FLAGS \
|
||||
-D\"JDK_VERSION_STRING=\$(VERSION_STRING)\" \
|
||||
-D\"JDK_COMPANY=\$(COMPANY_NAME)\" \
|
||||
-D\"JDK_COMPONENT=\$(PRODUCT_NAME) \$(JDK_RC_PLATFORM_NAME) binary\" \
|
||||
-D\"JDK_COMPONENT=\$(JDK_RC_NAME) binary\" \
|
||||
-D\"JDK_VER=\$(VERSION_NUMBER)\" \
|
||||
-D\"JDK_COPYRIGHT=Copyright \xA9 $COPYRIGHT_YEAR\" \
|
||||
-D\"JDK_NAME=\$(PRODUCT_NAME) \$(JDK_RC_PLATFORM_NAME) \$(VERSION_FEATURE)\" \
|
||||
-D\"JDK_NAME=\$(JDK_RC_NAME) \$(VERSION_FEATURE)\" \
|
||||
-D\"JDK_FVER=\$(subst .,\$(COMMA),\$(VERSION_NUMBER_FOUR_POSITIONS))\""
|
||||
|
||||
JVM_RCFLAGS="$JVM_RCFLAGS \
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -69,6 +69,23 @@ AC_DEFUN_ONCE([JDKVER_SETUP_JDK_VERSION_NUMBERS],
|
||||
AC_SUBST(MACOSX_BUNDLE_NAME_BASE)
|
||||
AC_SUBST(MACOSX_BUNDLE_ID_BASE)
|
||||
|
||||
# Set the JDK RC name
|
||||
AC_ARG_WITH(jdk-rc-name, [AS_HELP_STRING([--with-jdk-rc-name],
|
||||
[Set JDK RC name. This is used for FileDescription and ProductName properties
|
||||
of MS Windows binaries. @<:@not specified@:>@])])
|
||||
if test "x$with_jdk_rc_name" = xyes; then
|
||||
AC_MSG_ERROR([--with-jdk-rc-name must have a value])
|
||||
elif [ ! [[ $with_jdk_rc_name =~ ^[[:print:]]*$ ]] ]; then
|
||||
AC_MSG_ERROR([--with-jdk-rc-name contains non-printing characters: $with_jdk_rc_name])
|
||||
elif test "x$with_jdk_rc_name" != x; then
|
||||
# Set JDK_RC_NAME to a custom value if '--with-jdk-rc-name' was used and is not empty.
|
||||
JDK_RC_NAME="$with_jdk_rc_name"
|
||||
else
|
||||
# Otherwise calculate from "version-numbers" included above.
|
||||
JDK_RC_NAME="$PRODUCT_NAME $JDK_RC_PLATFORM_NAME"
|
||||
fi
|
||||
AC_SUBST(JDK_RC_NAME)
|
||||
|
||||
# The vendor name, if any
|
||||
AC_ARG_WITH(vendor-name, [AS_HELP_STRING([--with-vendor-name],
|
||||
[Set vendor name. Among others, used to set the 'java.vendor'
|
||||
|
@ -32,6 +32,8 @@
|
||||
|
||||
# The command line given to configure.
|
||||
CONFIGURE_COMMAND_LINE:=@CONFIGURE_COMMAND_LINE@
|
||||
# The current directory when configure was run
|
||||
CONFIGURE_START_DIR:=@CONFIGURE_START_DIR@
|
||||
|
||||
# A self-referential reference to this file.
|
||||
SPEC:=@SPEC@
|
||||
@ -139,7 +141,6 @@ SYSROOT_LDFLAGS := @SYSROOT_LDFLAGS@
|
||||
# The top-level directory of the source repository
|
||||
TOPDIR:=@TOPDIR@
|
||||
|
||||
|
||||
IMPORT_MODULES_CLASSES:=@IMPORT_MODULES_CLASSES@
|
||||
IMPORT_MODULES_CMDS:=@IMPORT_MODULES_CMDS@
|
||||
IMPORT_MODULES_LIBS:=@IMPORT_MODULES_LIBS@
|
||||
@ -156,6 +157,7 @@ LAUNCHER_NAME:=@LAUNCHER_NAME@
|
||||
PRODUCT_NAME:=@PRODUCT_NAME@
|
||||
PRODUCT_SUFFIX:=@PRODUCT_SUFFIX@
|
||||
JDK_RC_PLATFORM_NAME:=@JDK_RC_PLATFORM_NAME@
|
||||
JDK_RC_NAME:=@JDK_RC_NAME@
|
||||
COMPANY_NAME:=@COMPANY_NAME@
|
||||
HOTSPOT_VM_DISTRO:=@HOTSPOT_VM_DISTRO@
|
||||
MACOSX_BUNDLE_NAME_BASE=@MACOSX_BUNDLE_NAME_BASE@
|
||||
|
@ -472,7 +472,7 @@ AC_DEFUN([TOOLCHAIN_SETUP_VISUAL_STUDIO_ENV],
|
||||
# Change directory so we don't need to mess with Windows paths in redirects.
|
||||
cd $VS_ENV_TMP_DIR
|
||||
$CMD /c extract-vs-env.bat | $CAT
|
||||
cd $CURDIR
|
||||
cd $CONFIGURE_START_DIR
|
||||
|
||||
if test ! -s $VS_ENV_TMP_DIR/set-vs-env.sh; then
|
||||
AC_MSG_NOTICE([Could not succesfully extract the environment variables needed for the VS setup.])
|
||||
|
@ -231,8 +231,8 @@ define SetupCompileNativeFileBody
|
||||
|
||||
# Only continue if this object file hasn't been processed already. This lets
|
||||
# the first found source file override any other with the same name.
|
||||
ifeq ($$(findstring $$($1_OBJ), $$($$($1_BASE)_OBJS_SO_FAR)), )
|
||||
$$($1_BASE)_OBJS_SO_FAR += $$($1_OBJ)
|
||||
ifeq ($$($1_OBJ_PROCESSED), )
|
||||
$1_OBJ_PROCESSED := true
|
||||
# This is the definite source file to use for $1_FILENAME.
|
||||
$1_SRC_FILE := $$($1_FILE)
|
||||
|
||||
@ -308,14 +308,18 @@ define SetupCompileNativeFileBody
|
||||
|
||||
ifeq ($$(filter %.s %.S, $$($1_FILENAME)), )
|
||||
# And this is the dependency file for this obj file.
|
||||
$1_DEP := $$(patsubst %$(OBJ_SUFFIX),%.d,$$($1_OBJ))
|
||||
$1_DEPS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d,$$($1_OBJ))
|
||||
# The dependency target file lists all dependencies as empty targets to
|
||||
# avoid make error "No rule to make target" for removed files
|
||||
$1_DEP_TARGETS := $$(patsubst %$(OBJ_SUFFIX),%.d.targets,$$($1_OBJ))
|
||||
$1_DEPS_TARGETS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d.targets,$$($1_OBJ))
|
||||
|
||||
# Include previously generated dependency information. (if it exists)
|
||||
-include $$($1_DEP)
|
||||
-include $$($1_DEP_TARGETS)
|
||||
# Only try to load individual dependency information files if the global
|
||||
# file hasn't been loaded (could happen if make was interrupted).
|
||||
ifneq ($$($$($1_BASE)_DEPS_FILE_LOADED), true)
|
||||
# Include previously generated dependency information. (if it exists)
|
||||
-include $$($1_DEPS_FILE)
|
||||
-include $$($1_DEPS_TARGETS_FILE)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq ($$(strip $$($1_CFLAGS) $$($1_CXXFLAGS) $$($1_OPTIMIZATION)), )
|
||||
@ -340,16 +344,16 @@ define SetupCompileNativeFileBody
|
||||
# object file in the generated deps files. Fixing it with sed. If
|
||||
# compiling assembly, don't try this.
|
||||
$$(call ExecuteWithLog, $$@, \
|
||||
$$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEP).tmp $$($1_COMPILE_OPTIONS))
|
||||
$(SED) 's|^$$(@F):|$$@:|' $$($1_DEP).tmp > $$($1_DEP)
|
||||
$$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEPS_FILE).tmp $$($1_COMPILE_OPTIONS))
|
||||
$(SED) 's|^$$(@F):|$$@:|' $$($1_DEPS_FILE).tmp > $$($1_DEPS_FILE)
|
||||
else
|
||||
$$(call ExecuteWithLog, $$@, \
|
||||
$$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEP) $$($1_COMPILE_OPTIONS))
|
||||
$$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEPS_FILE) $$($1_COMPILE_OPTIONS))
|
||||
endif
|
||||
# Create a dependency target file from the dependency file.
|
||||
# Solution suggested by http://make.mad-scientist.net/papers/advanced-auto-dependency-generation/
|
||||
ifneq ($$($1_DEP), )
|
||||
$(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEP) > $$($1_DEP_TARGETS)
|
||||
ifneq ($$($1_DEPS_FILE), )
|
||||
$(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) > $$($1_DEPS_TARGETS_FILE)
|
||||
endif
|
||||
else
|
||||
# The Visual Studio compiler lacks a feature for generating make
|
||||
@ -363,10 +367,10 @@ define SetupCompileNativeFileBody
|
||||
$$($1_COMPILER) -showIncludes $$($1_COMPILE_OPTIONS)) \
|
||||
| $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \
|
||||
-e "^$$($1_FILENAME)$$$$" || test "$$$$?" = "1" ; \
|
||||
$(ECHO) $$@: \\ > $$($1_DEP) ; \
|
||||
$(ECHO) $$@: \\ > $$($1_DEPS_FILE) ; \
|
||||
$(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_OBJ).log \
|
||||
| $(SORT) -u >> $$($1_DEP) ; \
|
||||
$(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEP) > $$($1_DEP_TARGETS)
|
||||
| $(SORT) -u >> $$($1_DEPS_FILE) ; \
|
||||
$(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) > $$($1_DEPS_TARGETS_FILE)
|
||||
endif
|
||||
endif
|
||||
endef
|
||||
@ -486,6 +490,9 @@ define SetupNativeCompilationBody
|
||||
$1_NOSUFFIX := $$($1_PREFIX)$$($1_NAME)
|
||||
$1_SAFE_NAME := $$(strip $$(subst /,_, $1))
|
||||
|
||||
# Need to make sure TARGET is first on list
|
||||
$1 := $$($1_TARGET)
|
||||
|
||||
# Setup the toolchain to be used
|
||||
$$(call SetIfEmpty, $1_TOOLCHAIN, TOOLCHAIN_DEFAULT)
|
||||
$$(call SetIfEmpty, $1_CC, $$($$($1_TOOLCHAIN)_CC))
|
||||
@ -719,20 +726,21 @@ define SetupNativeCompilationBody
|
||||
$1_PCH_FILE := $$($1_OBJECT_DIR)/precompiled/$$(notdir $$($1_PRECOMPILED_HEADER)).pch
|
||||
$1_USE_PCH_FLAGS := -include-pch $$($1_PCH_FILE)
|
||||
endif
|
||||
$1_PCH_DEP := $$($1_PCH_FILE).d
|
||||
$1_PCH_DEP_TARGETS := $$($1_PCH_FILE).d.targets
|
||||
$1_PCH_DEPS_FILE := $$($1_PCH_FILE).d
|
||||
$1_PCH_DEPS_TARGETS_FILE := $$($1_PCH_FILE).d.targets
|
||||
|
||||
-include $$($1_PCH_DEP)
|
||||
-include $$($1_PCH_DEP_TARGETS)
|
||||
-include $$($1_PCH_DEPS_FILE)
|
||||
-include $$($1_PCH_DEPS_TARGETS_FILE)
|
||||
|
||||
$1_PCH_COMMAND := $$($1_CC) $$($1_CFLAGS) $$($1_EXTRA_CFLAGS) $$($1_SYSROOT_CFLAGS) \
|
||||
$$($1_OPT_CFLAGS) -x c++-header -c $(C_FLAG_DEPS) $$($1_PCH_DEP)
|
||||
$$($1_OPT_CFLAGS) -x c++-header -c $(C_FLAG_DEPS) $$($1_PCH_DEPS_FILE)
|
||||
|
||||
$$($1_PCH_FILE): $$($1_PRECOMPILED_HEADER) $$($1_COMPILE_VARDEPS_FILE)
|
||||
$$(call LogInfo, Generating precompiled header)
|
||||
$$(call MakeDir, $$(@D))
|
||||
$$(call ExecuteWithLog, $$@, $$($1_PCH_COMMAND) $$< -o $$@)
|
||||
$(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_PCH_DEP) > $$($1_PCH_DEP_TARGETS)
|
||||
$(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_PCH_DEPS_FILE) \
|
||||
> $$($1_PCH_DEPS_TARGETS_FILE)
|
||||
|
||||
$$($1_ALL_OBJS): $$($1_PCH_FILE)
|
||||
|
||||
@ -748,6 +756,34 @@ define SetupNativeCompilationBody
|
||||
endif
|
||||
endif
|
||||
|
||||
# Create a rule to collect all the individual make dependency files into a
|
||||
# single makefile.
|
||||
$1_DEPS_FILE := $$($1_OBJECT_DIR)/$1.d
|
||||
|
||||
$$($1_DEPS_FILE): $$($1_ALL_OBJS)
|
||||
$(RM) $$@
|
||||
# CD into dir to reduce risk of hitting command length limits, which
|
||||
# could otherwise happen if TOPDIR is a very long path.
|
||||
$(CD) $$($1_OBJECT_DIR) && $(CAT) *.d > $$@.tmp
|
||||
$(CD) $$($1_OBJECT_DIR) && $(CAT) *.d.targets | $(SORT) -u >> $$@.tmp
|
||||
# After generating the file, which happens after all objects have been
|
||||
# compiled, copy it to .old extension. On the next make invocation, this
|
||||
# .old file will be included by make.
|
||||
$(CP) $$@.tmp $$@.old
|
||||
$(MV) $$@.tmp $$@
|
||||
|
||||
$1 += $$($1_DEPS_FILE)
|
||||
|
||||
# The include must be on the .old file, which represents the state from the
|
||||
# previous invocation of make. The file being included must not have a rule
|
||||
# defined for it as otherwise make will think it has to run the rule before
|
||||
# being able to include the file, which would be wrong since we specifically
|
||||
# need the file as it was generated by a previous make invocation.
|
||||
ifneq ($$(wildcard $$($1_DEPS_FILE).old), )
|
||||
$1_DEPS_FILE_LOADED := true
|
||||
-include $$($1_DEPS_FILE).old
|
||||
endif
|
||||
|
||||
# Now call SetupCompileNativeFile for each source file we are going to compile.
|
||||
$$(foreach file, $$($1_SRCS), \
|
||||
$$(eval $$(call SetupCompileNativeFile, $1_$$(notdir $$(file)),\
|
||||
@ -774,10 +810,10 @@ define SetupNativeCompilationBody
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
ifneq ($$($1_VERSIONINFO_RESOURCE), )
|
||||
$1_RES := $$($1_OBJECT_DIR)/$$($1_BASENAME).res
|
||||
$1_RES_DEP := $$($1_RES).d
|
||||
$1_RES_DEP_TARGETS := $$($1_RES).d.targets
|
||||
-include $$($1_RES_DEP)
|
||||
-include $$($1_RES_DEP_TARGETS)
|
||||
$1_RES_DEPS_FILE := $$($1_RES).d
|
||||
$1_RES_DEPS_TARGETS_FILE := $$($1_RES).d.targets
|
||||
-include $$($1_RES_DEPS_FILE)
|
||||
-include $$($1_RES_DEPS_TARGETS_FILE)
|
||||
|
||||
$1_RES_VARDEPS := $$($1_RC) $$($1_RC_FLAGS)
|
||||
$1_RES_VARDEPS_FILE := $$(call DependOnVariable, $1_RES_VARDEPS, \
|
||||
@ -794,16 +830,18 @@ define SetupNativeCompilationBody
|
||||
# For some unknown reason, in this case CL actually outputs the show
|
||||
# includes to stderr so need to redirect it to hide the output from the
|
||||
# main log.
|
||||
$$(call ExecuteWithLog, $$($1_RES_DEP).obj, \
|
||||
$$(call ExecuteWithLog, $$($1_RES_DEPS_FILE).obj, \
|
||||
$$($1_CC) $$(filter-out -l%, $$($1_RC_FLAGS)) \
|
||||
$$($1_SYSROOT_CFLAGS) -showIncludes -nologo -TC \
|
||||
$(CC_OUT_OPTION)$$($1_RES_DEP).obj -P -Fi$$($1_RES_DEP).pp \
|
||||
$(CC_OUT_OPTION)$$($1_RES_DEPS_FILE).obj -P -Fi$$($1_RES_DEPS_FILE).pp \
|
||||
$$($1_VERSIONINFO_RESOURCE)) 2>&1 \
|
||||
| $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \
|
||||
-e "^$$(notdir $$($1_VERSIONINFO_RESOURCE))$$$$" || test "$$$$?" = "1" ; \
|
||||
$(ECHO) $$($1_RES): \\ > $$($1_RES_DEP) ; \
|
||||
$(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_RES_DEP).obj.log >> $$($1_RES_DEP) ; \
|
||||
$(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_RES_DEP) > $$($1_RES_DEP_TARGETS)
|
||||
$(ECHO) $$($1_RES): \\ > $$($1_RES_DEPS_FILE) ; \
|
||||
$(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_RES_DEPS_FILE).obj.log \
|
||||
>> $$($1_RES_DEPS_FILE) ; \
|
||||
$(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_RES_DEPS_FILE) \
|
||||
> $$($1_RES_DEPS_TARGETS_FILE)
|
||||
endif
|
||||
endif
|
||||
|
||||
@ -830,9 +868,6 @@ define SetupNativeCompilationBody
|
||||
$1_EXTRA_LDFLAGS += $(call SET_SHARED_LIBRARY_MAPFILE,$$($1_REAL_MAPFILE))
|
||||
endif
|
||||
|
||||
# Need to make sure TARGET is first on list
|
||||
$1 := $$($1_TARGET)
|
||||
|
||||
ifneq ($$($1_COPY_DEBUG_SYMBOLS), false)
|
||||
$1_COPY_DEBUG_SYMBOLS := $(COPY_DEBUG_SYMBOLS)
|
||||
endif
|
||||
|
@ -172,8 +172,6 @@ endif
|
||||
ifneq ($(call check-jvm-feature, shenandoahgc), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_SHENANDOAHGC=0
|
||||
JVM_EXCLUDE_PATTERNS += gc/shenandoah
|
||||
else
|
||||
JVM_CFLAGS_FEATURES += -DSUPPORT_BARRIER_ON_PRIMITIVES -DSUPPORT_NOT_TO_SPACE_INVARIANT
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, jfr), true)
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -61,6 +61,7 @@ ifeq ($(call isTargetOs, windows), true)
|
||||
BUILD_JDK_JTREG_LIBRARIES_LIBS_libstringPlatformChars := $(WIN_LIB_JAVA)
|
||||
WIN_LIB_JLI := $(SUPPORT_OUTPUTDIR)/native/java.base/libjli/jli.lib
|
||||
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeJliLaunchTest := $(WIN_LIB_JLI)
|
||||
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeCallerAccessTest := jvm.lib
|
||||
else
|
||||
BUILD_JDK_JTREG_LIBRARIES_LIBS_libstringPlatformChars := -ljava
|
||||
BUILD_JDK_JTREG_LIBRARIES_LIBS_libDirectIO := -ljava
|
||||
@ -70,6 +71,7 @@ else
|
||||
BUILD_JDK_JTREG_LIBRARIES_LIBS_libInheritedChannel := -ljava -lsocket -lnsl
|
||||
endif
|
||||
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeJliLaunchTest := -ljli
|
||||
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeCallerAccessTest := -ljvm
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetOs, macosx), true)
|
||||
|
@ -3932,7 +3932,8 @@ operand immL_4294967295()
|
||||
|
||||
operand immL_bitmask()
|
||||
%{
|
||||
predicate(((n->get_long() & 0xc000000000000000l) == 0)
|
||||
predicate((n->get_long() != 0)
|
||||
&& ((n->get_long() & 0xc000000000000000l) == 0)
|
||||
&& is_power_of_2(n->get_long() + 1));
|
||||
match(ConL);
|
||||
|
||||
@ -3943,7 +3944,8 @@ operand immL_bitmask()
|
||||
|
||||
operand immI_bitmask()
|
||||
%{
|
||||
predicate(((n->get_int() & 0xc0000000) == 0)
|
||||
predicate((n->get_int() != 0)
|
||||
&& ((n->get_int() & 0xc0000000) == 0)
|
||||
&& is_power_of_2(n->get_int() + 1));
|
||||
match(ConI);
|
||||
|
||||
@ -11432,11 +11434,13 @@ instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_co
|
||||
instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
|
||||
%{
|
||||
match(Set dst (AndI (URShiftI src rshift) mask));
|
||||
// Make sure we are not going to exceed what ubfxw can do.
|
||||
predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ubfxw $dst, $src, $rshift, $mask" %}
|
||||
ins_encode %{
|
||||
int rshift = $rshift$$constant;
|
||||
int rshift = $rshift$$constant & 31;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
__ ubfxw(as_Register($dst$$reg),
|
||||
@ -11447,13 +11451,15 @@ instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
|
||||
instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
|
||||
%{
|
||||
match(Set dst (AndL (URShiftL src rshift) mask));
|
||||
// Make sure we are not going to exceed what ubfx can do.
|
||||
predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ubfx $dst, $src, $rshift, $mask" %}
|
||||
ins_encode %{
|
||||
int rshift = $rshift$$constant;
|
||||
int rshift = $rshift$$constant & 63;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
int width = exact_log2_long(mask+1);
|
||||
__ ubfx(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), rshift, width);
|
||||
%}
|
||||
@ -11465,11 +11471,13 @@ instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
|
||||
instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
|
||||
%{
|
||||
match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
|
||||
// Make sure we are not going to exceed what ubfxw can do.
|
||||
predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
|
||||
|
||||
ins_cost(INSN_COST * 2);
|
||||
format %{ "ubfx $dst, $src, $rshift, $mask" %}
|
||||
ins_encode %{
|
||||
int rshift = $rshift$$constant;
|
||||
int rshift = $rshift$$constant & 31;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
__ ubfx(as_Register($dst$$reg),
|
||||
@ -11510,7 +11518,7 @@ instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
|
||||
ins_encode %{
|
||||
int lshift = $lshift$$constant;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
int width = exact_log2_long(mask+1);
|
||||
__ ubfiz(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), lshift, width);
|
||||
%}
|
||||
|
@ -181,31 +181,35 @@ define(`BFX_INSN',
|
||||
`instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
|
||||
%{
|
||||
match(Set dst (And$1 ($2$1 src rshift) mask));
|
||||
// Make sure we are not going to exceed what $3 can do.
|
||||
predicate((exact_log2$6(n->in(2)->get_$5() + 1) + (n->in(1)->in(2)->get_int() & $4)) <= ($4 + 1));
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "$3 $dst, $src, $rshift, $mask" %}
|
||||
ins_encode %{
|
||||
int rshift = $rshift$$constant;
|
||||
int rshift = $rshift$$constant & $4;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
int width = exact_log2$6(mask+1);
|
||||
__ $3(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), rshift, width);
|
||||
%}
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}')
|
||||
BFX_INSN(I,URShift,ubfxw)
|
||||
BFX_INSN(L,URShift,ubfx)
|
||||
BFX_INSN(I, URShift, ubfxw, 31, int)
|
||||
BFX_INSN(L, URShift, ubfx, 63, long, _long)
|
||||
|
||||
// We can use ubfx when extending an And with a mask when we know mask
|
||||
// is positive. We know that because immI_bitmask guarantees it.
|
||||
instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
|
||||
%{
|
||||
match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
|
||||
// Make sure we are not going to exceed what ubfxw can do.
|
||||
predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
|
||||
|
||||
ins_cost(INSN_COST * 2);
|
||||
format %{ "ubfx $dst, $src, $rshift, $mask" %}
|
||||
ins_encode %{
|
||||
int rshift = $rshift$$constant;
|
||||
int rshift = $rshift$$constant & 31;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
__ ubfx(as_Register($dst$$reg),
|
||||
@ -228,7 +232,7 @@ define(`UBFIZ_INSN',
|
||||
ins_encode %{
|
||||
int lshift = $lshift$$constant;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
int width = exact_log2$5(mask+1);
|
||||
__ $2(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), lshift, width);
|
||||
%}
|
||||
|
@ -1211,8 +1211,8 @@ public:
|
||||
/* The size bit is in bit 30, not 31 */
|
||||
sz = (operand_size)(sz == word ? 0b00:0b01);
|
||||
}
|
||||
f(sz, 31, 30), f(0b001000, 29, 24), f(1, 23), f(a, 22), f(1, 21);
|
||||
rf(Rs, 16), f(r, 15), f(0b11111, 14, 10), rf(Rn, 5), rf(Rt, 0);
|
||||
f(sz, 31, 30), f(0b001000, 29, 24), f(not_pair ? 1 : 0, 23), f(a, 22), f(1, 21);
|
||||
zrf(Rs, 16), f(r, 15), f(0b11111, 14, 10), srf(Rn, 5), zrf(Rt, 0);
|
||||
}
|
||||
|
||||
// CAS
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "c1/c1_ValueStack.hpp"
|
||||
#include "ci/ciArrayKlass.hpp"
|
||||
#include "ci/ciInstance.hpp"
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
@ -2063,11 +2064,10 @@ void LIR_Assembler::emit_static_call_stub() {
|
||||
int start = __ offset();
|
||||
|
||||
__ relocate(static_stub_Relocation::spec(call_pc));
|
||||
__ mov_metadata(rmethod, (Metadata*)NULL);
|
||||
__ movptr(rscratch1, 0);
|
||||
__ br(rscratch1);
|
||||
__ emit_static_call_stub();
|
||||
|
||||
assert(__ offset() - start <= call_stub_size(), "stub too big");
|
||||
assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
|
||||
<= call_stub_size(), "stub too big");
|
||||
__ end_a_stub();
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,9 @@ friend class ArrayCopyStub;
|
||||
void deoptimize_trap(CodeEmitInfo *info);
|
||||
|
||||
enum {
|
||||
_call_stub_size = 12 * NativeInstruction::instruction_size,
|
||||
// call stub: CompiledStaticCall::to_interp_stub_size() +
|
||||
// CompiledStaticCall::to_trampoline_stub_size()
|
||||
_call_stub_size = 13 * NativeInstruction::instruction_size,
|
||||
_call_aot_stub_size = 0,
|
||||
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
|
||||
_deopt_handler_size = 7 * NativeInstruction::instruction_size
|
||||
|
@ -61,14 +61,14 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
|
||||
// Don't create a Metadata reloc if we're generating immutable PIC.
|
||||
if (cbuf.immutable_PIC()) {
|
||||
__ movptr(rmethod, 0);
|
||||
} else {
|
||||
__ mov_metadata(rmethod, (Metadata*)NULL);
|
||||
}
|
||||
#else
|
||||
__ mov_metadata(rmethod, (Metadata*)NULL);
|
||||
__ movptr(rscratch1, 0);
|
||||
__ br(rscratch1);
|
||||
|
||||
} else
|
||||
#endif
|
||||
__ movptr(rscratch1, 0);
|
||||
__ br(rscratch1);
|
||||
{
|
||||
__ emit_static_call_stub();
|
||||
}
|
||||
|
||||
assert((__ offset() - offset) <= (int)to_interp_stub_size(), "stub too big");
|
||||
__ end_a_stub();
|
||||
@ -77,7 +77,8 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
|
||||
#undef __
|
||||
|
||||
int CompiledStaticCall::to_interp_stub_size() {
|
||||
return 7 * NativeInstruction::instruction_size;
|
||||
// isb; movk; movz; movz; movk; movz; movz; br
|
||||
return 8 * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
int CompiledStaticCall::to_trampoline_stub_size() {
|
||||
@ -159,7 +160,8 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
|
||||
}
|
||||
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeMovConstReg* method_holder
|
||||
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
|
||||
#ifndef PRODUCT
|
||||
NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
|
||||
|
||||
@ -184,7 +186,8 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
|
||||
assert(stub != NULL, "stub not found");
|
||||
assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeMovConstReg* method_holder
|
||||
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
|
||||
method_holder->set_data(0);
|
||||
}
|
||||
|
||||
@ -201,8 +204,9 @@ void CompiledDirectStaticCall::verify() {
|
||||
address stub = find_stub(false /* is_aot */);
|
||||
assert(stub != NULL, "no stub found for static call");
|
||||
// Creation also verifies the object.
|
||||
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
NativeMovConstReg* method_holder
|
||||
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
|
||||
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
|
||||
|
||||
// Verify state.
|
||||
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||
|
@ -559,7 +559,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
|
||||
// validate constantPoolCache*
|
||||
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
|
||||
if (cp == NULL || !cp->is_metaspace_object()) return false;
|
||||
if (MetaspaceObj::is_valid(cp) == false) return false;
|
||||
|
||||
// validate locals
|
||||
|
||||
|
@ -40,7 +40,7 @@
|
||||
|
||||
#define __ masm->
|
||||
|
||||
address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
|
||||
address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL;
|
||||
|
||||
void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register addr, Register count, RegSet saved_regs) {
|
||||
@ -87,6 +87,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, Dec
|
||||
void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register start, Register count, Register scratch, RegSet saved_regs) {
|
||||
if (is_oop) {
|
||||
Label done;
|
||||
|
||||
// Avoid calling runtime if count == 0
|
||||
__ cbz(count, done);
|
||||
|
||||
// Is updating references?
|
||||
Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
__ ldrb(rscratch1, gc_state);
|
||||
__ tbz(rscratch1, ShenandoahHeap::UPDATEREFS_BITPOS, done);
|
||||
|
||||
__ push(saved_regs, sp);
|
||||
assert_different_registers(start, count, scratch);
|
||||
assert_different_registers(c_rarg0, count);
|
||||
@ -94,6 +104,8 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
|
||||
__ mov(c_rarg1, count);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2);
|
||||
__ pop(saved_regs, sp);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
@ -198,60 +210,31 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahReadBarrier) {
|
||||
read_barrier_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
|
||||
void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst) {
|
||||
assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
|
||||
Label is_null;
|
||||
__ cbz(dst, is_null);
|
||||
read_barrier_not_null_impl(masm, dst);
|
||||
resolve_forward_pointer_not_null(masm, dst);
|
||||
__ bind(is_null);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahReadBarrier) {
|
||||
read_barrier_not_null_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
|
||||
// IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2.
|
||||
void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst) {
|
||||
assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
|
||||
__ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahWriteBarrier) {
|
||||
write_barrier_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
|
||||
assert(dst != rscratch1, "need rscratch1");
|
||||
void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
assert(ShenandoahLoadRefBarrier, "Should be enabled");
|
||||
assert(dst != rscratch2, "need rscratch2");
|
||||
|
||||
Label done;
|
||||
|
||||
__ enter();
|
||||
Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
__ ldrb(rscratch1, gc_state);
|
||||
__ ldrb(rscratch2, gc_state);
|
||||
|
||||
// Check for heap stability
|
||||
__ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
|
||||
__ tst(rscratch1, rscratch2);
|
||||
__ br(Assembler::EQ, done);
|
||||
|
||||
// Heap is unstable, need to perform the read-barrier even if WB is inactive
|
||||
__ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
|
||||
|
||||
// Check for evacuation-in-progress and jump to WB slow-path if needed
|
||||
__ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
|
||||
__ tst(rscratch1, rscratch2);
|
||||
__ br(Assembler::EQ, done);
|
||||
__ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
|
||||
|
||||
RegSet to_save = RegSet::of(r0);
|
||||
if (dst != r0) {
|
||||
@ -259,7 +242,7 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
|
||||
__ mov(r0, dst);
|
||||
}
|
||||
|
||||
__ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb())));
|
||||
__ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb())));
|
||||
|
||||
if (dst != r0) {
|
||||
__ mov(dst, r0);
|
||||
@ -267,14 +250,11 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
|
||||
}
|
||||
|
||||
__ bind(done);
|
||||
__ leave();
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
Label is_null;
|
||||
__ cbz(dst, is_null);
|
||||
write_barrier_impl(masm, dst);
|
||||
__ bind(is_null);
|
||||
// Save possibly live regs.
|
||||
RegSet live_regs = RegSet::range(r0, r4) - dst;
|
||||
__ push(live_regs, sp);
|
||||
@ -286,44 +266,45 @@ void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Regis
|
||||
__ ldrd(v0, __ post(sp, 2 * wordSize));
|
||||
__ pop(live_regs, sp);
|
||||
}
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
read_barrier_impl(masm, dst);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
Label is_null;
|
||||
__ cbz(dst, is_null);
|
||||
load_reference_barrier_not_null(masm, dst, tmp);
|
||||
__ bind(is_null);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
bool on_reference = on_weak || on_phantom;
|
||||
|
||||
if (in_heap) {
|
||||
read_barrier_not_null(masm, src.base());
|
||||
}
|
||||
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
|
||||
__ enter();
|
||||
satb_write_barrier_pre(masm /* masm */,
|
||||
noreg /* obj */,
|
||||
dst /* pre_val */,
|
||||
rthread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
__ leave();
|
||||
if (on_oop) {
|
||||
load_reference_barrier(masm, dst, tmp1);
|
||||
|
||||
if (ShenandoahKeepAliveBarrier && on_reference) {
|
||||
__ enter();
|
||||
satb_write_barrier_pre(masm /* masm */,
|
||||
noreg /* obj */,
|
||||
dst /* pre_val */,
|
||||
rthread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
__ leave();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
if (in_heap) {
|
||||
write_barrier(masm, dst.base());
|
||||
}
|
||||
if (!on_oop) {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
|
||||
return;
|
||||
@ -361,21 +342,6 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
|
||||
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) {
|
||||
__ cmp(op1, op2);
|
||||
if (ShenandoahAcmpBarrier) {
|
||||
Label done;
|
||||
__ br(Assembler::EQ, done);
|
||||
// The object may have been evacuated, but we won't see it without a
|
||||
// membar here.
|
||||
__ membar(Assembler::LoadStore| Assembler::LoadLoad);
|
||||
read_barrier(masm, op1);
|
||||
read_barrier(masm, op2);
|
||||
__ cmp(op1, op2);
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
@ -410,27 +376,6 @@ void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
|
||||
bool oop_not_null = (decorators & IS_NOT_NULL) != 0;
|
||||
bool is_write = (decorators & ACCESS_WRITE) != 0;
|
||||
if (is_write) {
|
||||
if (oop_not_null) {
|
||||
write_barrier(masm, obj);
|
||||
} else {
|
||||
Label done;
|
||||
__ cbz(obj, done);
|
||||
write_barrier(masm, obj);
|
||||
__ bind(done);
|
||||
}
|
||||
} else {
|
||||
if (oop_not_null) {
|
||||
read_barrier_not_null(masm, obj);
|
||||
} else {
|
||||
read_barrier(masm, obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
|
||||
bool acquire, bool release, bool weak, bool is_cae,
|
||||
Register result) {
|
||||
@ -469,8 +414,8 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register a
|
||||
__ decode_heap_oop(tmp1, tmp1);
|
||||
__ decode_heap_oop(tmp2, tmp2);
|
||||
}
|
||||
read_barrier_impl(masm, tmp1);
|
||||
read_barrier_impl(masm, tmp2);
|
||||
resolve_forward_pointer(masm, tmp1);
|
||||
resolve_forward_pointer(masm, tmp2);
|
||||
__ cmp(tmp1, tmp2);
|
||||
// Retry with expected now being the value we just loaded from addr.
|
||||
__ br(Assembler::EQ, retry);
|
||||
@ -515,7 +460,7 @@ void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, Shen
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) {
|
||||
void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
|
||||
|
||||
Register obj = stub->obj()->as_register();
|
||||
Register res = stub->result()->as_register();
|
||||
@ -532,7 +477,7 @@ void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, Sh
|
||||
__ cbz(res, done);
|
||||
}
|
||||
|
||||
write_barrier(ce->masm(), res);
|
||||
load_reference_barrier_not_null(ce->masm(), res, rscratch1);
|
||||
|
||||
__ bind(done);
|
||||
__ b(*stub->continuation());
|
||||
@ -592,14 +537,14 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
|
||||
|
||||
#endif // COMPILER1
|
||||
|
||||
address ShenandoahBarrierSetAssembler::shenandoah_wb() {
|
||||
assert(_shenandoah_wb != NULL, "need write barrier stub");
|
||||
return _shenandoah_wb;
|
||||
address ShenandoahBarrierSetAssembler::shenandoah_lrb() {
|
||||
assert(_shenandoah_lrb != NULL, "need load reference barrier stub");
|
||||
return _shenandoah_lrb;
|
||||
}
|
||||
|
||||
#define __ cgen->assembler()->
|
||||
|
||||
// Shenandoah write barrier.
|
||||
// Shenandoah load reference barrier.
|
||||
//
|
||||
// Input:
|
||||
// r0: OOP to evacuate. Not null.
|
||||
@ -608,13 +553,13 @@ address ShenandoahBarrierSetAssembler::shenandoah_wb() {
|
||||
// r0: Pointer to evacuated OOP.
|
||||
//
|
||||
// Trash rscratch1, rscratch2. Preserve everything else.
|
||||
address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) {
|
||||
address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) {
|
||||
|
||||
__ align(6);
|
||||
StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb");
|
||||
StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb");
|
||||
address start = __ pc();
|
||||
|
||||
Label work;
|
||||
Label work, done;
|
||||
__ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
|
||||
__ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
|
||||
__ ldrb(rscratch2, Address(rscratch2, rscratch1));
|
||||
@ -622,19 +567,23 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
|
||||
__ ret(lr);
|
||||
__ bind(work);
|
||||
|
||||
Register obj = r0;
|
||||
__ mov(rscratch2, r0);
|
||||
resolve_forward_pointer_not_null(cgen->assembler(), r0);
|
||||
__ cmp(rscratch2, r0);
|
||||
__ br(Assembler::NE, done);
|
||||
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
__ push_call_clobbered_registers();
|
||||
|
||||
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT));
|
||||
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT));
|
||||
__ blrt(lr, 1, 0, MacroAssembler::ret_type_integral);
|
||||
__ mov(rscratch1, obj);
|
||||
__ mov(rscratch1, r0);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ mov(obj, rscratch1);
|
||||
__ mov(r0, rscratch1);
|
||||
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ bind(done);
|
||||
__ ret(lr);
|
||||
|
||||
return start;
|
||||
@ -643,12 +592,12 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
|
||||
#undef __
|
||||
|
||||
void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
|
||||
if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
int stub_code_size = 2048;
|
||||
ResourceMark rm;
|
||||
BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
|
||||
CodeBuffer buf(bb);
|
||||
StubCodeGenerator cgen(&buf);
|
||||
_shenandoah_wb = generate_shenandoah_wb(&cgen);
|
||||
_shenandoah_lrb = generate_shenandoah_lrb(&cgen);
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class ShenandoahPreBarrierStub;
|
||||
class ShenandoahWriteBarrierStub;
|
||||
class ShenandoahLoadReferenceBarrierStub;
|
||||
class StubAssembler;
|
||||
class StubCodeGenerator;
|
||||
#endif
|
||||
@ -37,7 +37,7 @@ class StubCodeGenerator;
|
||||
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
|
||||
private:
|
||||
|
||||
static address _shenandoah_wb;
|
||||
static address _shenandoah_lrb;
|
||||
|
||||
void satb_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
@ -54,24 +54,21 @@ private:
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
|
||||
void read_barrier(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_not_null(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_not_null_impl(MacroAssembler* masm, Register dst);
|
||||
void write_barrier(MacroAssembler* masm, Register dst);
|
||||
void write_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
void asm_acmp_barrier(MacroAssembler* masm, Register op1, Register op2);
|
||||
void resolve_forward_pointer(MacroAssembler* masm, Register dst);
|
||||
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst);
|
||||
void load_reference_barrier(MacroAssembler* masm, Register dst, Register tmp);
|
||||
void load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Register tmp);
|
||||
|
||||
address generate_shenandoah_wb(StubCodeGenerator* cgen);
|
||||
address generate_shenandoah_lrb(StubCodeGenerator* cgen);
|
||||
|
||||
public:
|
||||
static address shenandoah_wb();
|
||||
static address shenandoah_lrb();
|
||||
|
||||
void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp);
|
||||
|
||||
#ifdef COMPILER1
|
||||
void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
|
||||
void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub);
|
||||
void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub);
|
||||
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
|
||||
#endif
|
||||
|
||||
@ -83,8 +80,6 @@ public:
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread);
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2);
|
||||
virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2);
|
||||
virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj);
|
||||
virtual void tlab_allocate(MacroAssembler* masm, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
|
@ -99,6 +99,7 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRIt
|
||||
__ xchg(access.resolved_addr(), value_opr, result, tmp);
|
||||
|
||||
if (access.is_oop()) {
|
||||
result = load_reference_barrier(access.gen(), result, access.access_emit_info(), true);
|
||||
if (ShenandoahSATBBarrier) {
|
||||
pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
|
||||
result /* pre_val */);
|
||||
|
@ -45,18 +45,6 @@ encode %{
|
||||
%}
|
||||
%}
|
||||
|
||||
instruct shenandoahRB(iRegPNoSp dst, iRegP src, rFlagsReg cr) %{
|
||||
match(Set dst (ShenandoahReadBarrier src));
|
||||
format %{ "shenandoah_rb $dst,$src" %}
|
||||
ins_encode %{
|
||||
Register s = $src$$Register;
|
||||
Register d = $dst$$Register;
|
||||
__ ldr(d, Address(s, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
|
||||
instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
|
||||
match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
|
||||
|
@ -812,6 +812,18 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
|
||||
return stub_start_addr;
|
||||
}
|
||||
|
||||
void MacroAssembler::emit_static_call_stub() {
|
||||
// CompiledDirectStaticCall::set_to_interpreted knows the
|
||||
// exact layout of this stub.
|
||||
|
||||
isb();
|
||||
mov_metadata(rmethod, (Metadata*)NULL);
|
||||
|
||||
// Jump to the entry point of the i2c stub.
|
||||
movptr(rscratch1, 0);
|
||||
br(rscratch1);
|
||||
}
|
||||
|
||||
void MacroAssembler::c2bool(Register x) {
|
||||
// implements x == 0 ? 0 : 1
|
||||
// note: must only look at least-significant byte of x
|
||||
|
@ -607,6 +607,7 @@ public:
|
||||
static int patch_narrow_klass(address insn_addr, narrowKlass n);
|
||||
|
||||
address emit_trampoline_stub(int insts_call_instruction_offset, address target);
|
||||
void emit_static_call_stub();
|
||||
|
||||
// The following 4 methods return the offset of the appropriate move instruction
|
||||
|
||||
|
@ -232,7 +232,11 @@ void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
void NativeMovConstReg::verify() {
|
||||
// make sure code pattern is actually mov reg64, imm64 instructions
|
||||
if (! (nativeInstruction_at(instruction_address())->is_movz() ||
|
||||
is_adrp_at(instruction_address()) ||
|
||||
is_ldr_literal_at(instruction_address())) ) {
|
||||
fatal("should be MOVZ or ADRP or LDR (literal)");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -494,7 +494,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
|
||||
// validate ConstantPoolCache*
|
||||
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
|
||||
if (cp == NULL || !cp->is_metaspace_object()) return false;
|
||||
if (MetaspaceObj::is_valid(cp) == false) return false;
|
||||
|
||||
// validate locals
|
||||
|
||||
|
@ -665,7 +665,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
|
||||
// validate ConstantPoolCache*
|
||||
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
|
||||
if (cp == NULL || !cp->is_metaspace_object()) return false;
|
||||
if (MetaspaceObj::is_valid(cp) == false) return false;
|
||||
|
||||
// validate locals
|
||||
|
||||
|
@ -546,7 +546,7 @@ bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
|
||||
|
||||
// validate ConstantPoolCache*
|
||||
ConstantPoolCache* cp = *interpreter_frame_cache_addr();
|
||||
if (cp == NULL || !cp->is_metaspace_object()) return false;
|
||||
if (MetaspaceObj::is_valid(cp) == false) return false;
|
||||
|
||||
// validate locals
|
||||
|
||||
|
@ -41,7 +41,7 @@
|
||||
|
||||
#define __ masm->
|
||||
|
||||
address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
|
||||
address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL;
|
||||
|
||||
void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register src, Register dst, Register count) {
|
||||
@ -138,6 +138,22 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
|
||||
}
|
||||
#endif
|
||||
|
||||
Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
|
||||
#ifndef _LP64
|
||||
__ push(thread);
|
||||
__ get_thread(thread);
|
||||
#endif
|
||||
|
||||
// Short-circuit if count == 0.
|
||||
Label done;
|
||||
__ testptr(count, count);
|
||||
__ jcc(Assembler::zero, done);
|
||||
|
||||
// Skip runtime call if no forwarded objects.
|
||||
Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
__ testb(gc_state, ShenandoahHeap::UPDATEREFS);
|
||||
__ jcc(Assembler::zero, done);
|
||||
|
||||
__ pusha(); // push registers (overkill)
|
||||
#ifdef _LP64
|
||||
if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
|
||||
@ -155,6 +171,9 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec
|
||||
dst, count);
|
||||
#endif
|
||||
__ popa();
|
||||
|
||||
__ bind(done);
|
||||
NOT_LP64(__ pop(thread);)
|
||||
}
|
||||
}
|
||||
|
||||
@ -293,41 +312,23 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahReadBarrier) {
|
||||
read_barrier_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
|
||||
void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst) {
|
||||
assert(ShenandoahCASBarrier, "should be enabled");
|
||||
Label is_null;
|
||||
__ testptr(dst, dst);
|
||||
__ jcc(Assembler::zero, is_null);
|
||||
read_barrier_not_null_impl(masm, dst);
|
||||
resolve_forward_pointer_not_null(masm, dst);
|
||||
__ bind(is_null);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahReadBarrier) {
|
||||
read_barrier_not_null_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
|
||||
void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst) {
|
||||
assert(ShenandoahCASBarrier || ShenandoahLoadRefBarrier, "should be enabled");
|
||||
__ movptr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahWriteBarrier) {
|
||||
write_barrier_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
|
||||
void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst) {
|
||||
assert(ShenandoahLoadRefBarrier, "Should be enabled");
|
||||
#ifdef _LP64
|
||||
Label done;
|
||||
|
||||
@ -335,8 +336,8 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
|
||||
__ testb(gc_state, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
|
||||
__ jccb(Assembler::zero, done);
|
||||
|
||||
// Heap is unstable, need to perform the read-barrier even if WB is inactive
|
||||
read_barrier_not_null(masm, dst);
|
||||
// Heap is unstable, need to perform the resolve even if LRB is inactive
|
||||
resolve_forward_pointer_not_null(masm, dst);
|
||||
|
||||
__ testb(gc_state, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
|
||||
__ jccb(Assembler::zero, done);
|
||||
@ -345,7 +346,7 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
|
||||
__ xchgptr(dst, rax); // Move obj into rax and save rax into obj.
|
||||
}
|
||||
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb())));
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb())));
|
||||
|
||||
if (dst != rax) {
|
||||
__ xchgptr(rax, dst); // Swap back obj with rax.
|
||||
@ -358,24 +359,18 @@ void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Reg
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
if (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier) {
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
storeval_barrier_impl(masm, dst, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
assert(UseShenandoahGC && (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled");
|
||||
assert(ShenandoahStoreValEnqueueBarrier, "should be enabled");
|
||||
|
||||
if (dst == noreg) return;
|
||||
|
||||
#ifdef _LP64
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
Label is_null;
|
||||
__ testptr(dst, dst);
|
||||
__ jcc(Assembler::zero, is_null);
|
||||
write_barrier_impl(masm, dst);
|
||||
__ bind(is_null);
|
||||
|
||||
// The set of registers to be saved+restored is the same as in the write-barrier above.
|
||||
// Those are the commonly used registers in the interpreter.
|
||||
__ pusha();
|
||||
@ -389,50 +384,54 @@ void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm,
|
||||
//__ pop_callee_saved_registers();
|
||||
__ popa();
|
||||
}
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
read_barrier_impl(masm, dst);
|
||||
}
|
||||
#else
|
||||
Unimplemented();
|
||||
#endif
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
Label done;
|
||||
__ testptr(dst, dst);
|
||||
__ jcc(Assembler::zero, done);
|
||||
load_reference_barrier_not_null(masm, dst);
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
bool on_reference = on_weak || on_phantom;
|
||||
if (in_heap) {
|
||||
read_barrier_not_null(masm, src.base());
|
||||
}
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
|
||||
const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
if (on_oop) {
|
||||
load_reference_barrier(masm, dst);
|
||||
|
||||
// Generate the SATB pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer.
|
||||
shenandoah_write_barrier_pre(masm /* masm */,
|
||||
noreg /* obj */,
|
||||
dst /* pre_val */,
|
||||
thread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
if (ShenandoahKeepAliveBarrier && on_reference) {
|
||||
const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
// Generate the SATB pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer.
|
||||
shenandoah_write_barrier_pre(masm /* masm */,
|
||||
noreg /* obj */,
|
||||
dst /* pre_val */,
|
||||
thread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2) {
|
||||
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
bool as_normal = (decorators & AS_NORMAL) != 0;
|
||||
if (in_heap) {
|
||||
write_barrier(masm, dst.base());
|
||||
}
|
||||
if (type == T_OBJECT || type == T_ARRAY) {
|
||||
if (on_oop && in_heap) {
|
||||
bool needs_pre_barrier = as_normal;
|
||||
|
||||
Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi);
|
||||
@ -475,44 +474,6 @@ void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef _LP64
|
||||
void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm,
|
||||
Address obj1, jobject obj2) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm,
|
||||
Register obj1, jobject obj2) {
|
||||
Unimplemented();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) {
|
||||
__ cmpptr(op1, op2);
|
||||
if (ShenandoahAcmpBarrier) {
|
||||
Label done;
|
||||
__ jccb(Assembler::equal, done);
|
||||
read_barrier(masm, op1);
|
||||
read_barrier(masm, op2);
|
||||
__ cmpptr(op1, op2);
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register src1, Address src2) {
|
||||
__ cmpptr(src1, src2);
|
||||
if (ShenandoahAcmpBarrier) {
|
||||
Label done;
|
||||
__ jccb(Assembler::equal, done);
|
||||
__ movptr(rscratch2, src2);
|
||||
read_barrier(masm, src1);
|
||||
read_barrier(masm, rscratch2);
|
||||
__ cmpptr(src1, rscratch2);
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm,
|
||||
Register thread, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
@ -562,28 +523,6 @@ void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm,
|
||||
__ verify_tlab();
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
|
||||
bool oop_not_null = (decorators & IS_NOT_NULL) != 0;
|
||||
bool is_write = (decorators & ACCESS_WRITE) != 0;
|
||||
if (is_write) {
|
||||
if (oop_not_null) {
|
||||
write_barrier(masm, obj);
|
||||
} else {
|
||||
Label done;
|
||||
__ testptr(obj, obj);
|
||||
__ jcc(Assembler::zero, done);
|
||||
write_barrier(masm, obj);
|
||||
__ bind(done);
|
||||
}
|
||||
} else {
|
||||
if (oop_not_null) {
|
||||
read_barrier_not_null(masm, obj);
|
||||
} else {
|
||||
read_barrier(masm, obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Special Shenandoah CAS implementation that handles false negatives
|
||||
// due to concurrent evacuation.
|
||||
#ifndef _LP64
|
||||
@ -622,14 +561,14 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
|
||||
// Step 2. CAS had failed. This may be a false negative.
|
||||
//
|
||||
// The trouble comes when we compare the to-space pointer with the from-space
|
||||
// pointer to the same object. To resolve this, it will suffice to read both
|
||||
// oldval and the value from memory through the read barriers -- this will give
|
||||
// both to-space pointers. If they mismatch, then it was a legitimate failure.
|
||||
// pointer to the same object. To resolve this, it will suffice to resolve both
|
||||
// oldval and the value from memory -- this will give both to-space pointers.
|
||||
// If they mismatch, then it was a legitimate failure.
|
||||
//
|
||||
if (UseCompressedOops) {
|
||||
__ decode_heap_oop(tmp1);
|
||||
}
|
||||
read_barrier_impl(masm, tmp1);
|
||||
resolve_forward_pointer(masm, tmp1);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
__ movl(tmp2, oldval);
|
||||
@ -637,7 +576,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
|
||||
} else {
|
||||
__ movptr(tmp2, oldval);
|
||||
}
|
||||
read_barrier_impl(masm, tmp2);
|
||||
resolve_forward_pointer(masm, tmp2);
|
||||
|
||||
__ cmpptr(tmp1, tmp2);
|
||||
__ jcc(Assembler::notEqual, done, true);
|
||||
@ -646,8 +585,8 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
|
||||
//
|
||||
// Corner case: it may happen that somebody stored the from-space pointer
|
||||
// to memory while we were preparing for retry. Therefore, we can fail again
|
||||
// on retry, and so need to do this in loop, always re-reading the failure
|
||||
// witness through the read barrier.
|
||||
// on retry, and so need to do this in loop, always resolving the failure
|
||||
// witness.
|
||||
__ bind(retry);
|
||||
if (os::is_MP()) __ lock();
|
||||
if (UseCompressedOops) {
|
||||
@ -663,7 +602,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm,
|
||||
} else {
|
||||
__ movptr(tmp2, oldval);
|
||||
}
|
||||
read_barrier_impl(masm, tmp2);
|
||||
resolve_forward_pointer(masm, tmp2);
|
||||
|
||||
__ cmpptr(tmp1, tmp2);
|
||||
__ jcc(Assembler::equal, retry, true);
|
||||
@ -811,7 +750,7 @@ void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, Shen
|
||||
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) {
|
||||
void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
|
||||
__ bind(*stub->entry());
|
||||
|
||||
Label done;
|
||||
@ -828,7 +767,7 @@ void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, Sh
|
||||
__ jcc(Assembler::zero, done);
|
||||
}
|
||||
|
||||
write_barrier(ce->masm(), res);
|
||||
load_reference_barrier_not_null(ce->masm(), res);
|
||||
|
||||
__ bind(done);
|
||||
__ jmp(*stub->continuation());
|
||||
@ -898,16 +837,16 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
|
||||
|
||||
#endif // COMPILER1
|
||||
|
||||
address ShenandoahBarrierSetAssembler::shenandoah_wb() {
|
||||
assert(_shenandoah_wb != NULL, "need write barrier stub");
|
||||
return _shenandoah_wb;
|
||||
address ShenandoahBarrierSetAssembler::shenandoah_lrb() {
|
||||
assert(_shenandoah_lrb != NULL, "need load reference barrier stub");
|
||||
return _shenandoah_lrb;
|
||||
}
|
||||
|
||||
#define __ cgen->assembler()->
|
||||
|
||||
address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) {
|
||||
address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb");
|
||||
StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb");
|
||||
address start = __ pc();
|
||||
|
||||
#ifdef _LP64
|
||||
@ -955,7 +894,7 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
|
||||
__ push(r15);
|
||||
save_vector_registers(cgen->assembler());
|
||||
__ movptr(rdi, rax);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT), rdi);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), rdi);
|
||||
restore_vector_registers(cgen->assembler());
|
||||
__ pop(r15);
|
||||
__ pop(r14);
|
||||
@ -982,12 +921,12 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator*
|
||||
#undef __
|
||||
|
||||
void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
|
||||
if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
int stub_code_size = 4096;
|
||||
ResourceMark rm;
|
||||
BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
|
||||
CodeBuffer buf(bb);
|
||||
StubCodeGenerator cgen(&buf);
|
||||
_shenandoah_wb = generate_shenandoah_wb(&cgen);
|
||||
_shenandoah_lrb = generate_shenandoah_lrb(&cgen);
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class ShenandoahPreBarrierStub;
|
||||
class ShenandoahWriteBarrierStub;
|
||||
class ShenandoahLoadReferenceBarrierStub;
|
||||
class StubAssembler;
|
||||
class StubCodeGenerator;
|
||||
#endif
|
||||
@ -37,7 +37,7 @@ class StubCodeGenerator;
|
||||
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
|
||||
private:
|
||||
|
||||
static address _shenandoah_wb;
|
||||
static address _shenandoah_lrb;
|
||||
|
||||
void satb_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
@ -55,32 +55,30 @@ private:
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
|
||||
void read_barrier(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
void resolve_forward_pointer(MacroAssembler* masm, Register dst);
|
||||
void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst);
|
||||
|
||||
void read_barrier_not_null(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_not_null_impl(MacroAssembler* masm, Register dst);
|
||||
|
||||
void write_barrier(MacroAssembler* masm, Register dst);
|
||||
void write_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
void load_reference_barrier_not_null(MacroAssembler* masm, Register dst);
|
||||
|
||||
void storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp);
|
||||
|
||||
address generate_shenandoah_wb(StubCodeGenerator* cgen);
|
||||
address generate_shenandoah_lrb(StubCodeGenerator* cgen);
|
||||
|
||||
void save_vector_registers(MacroAssembler* masm);
|
||||
void restore_vector_registers(MacroAssembler* masm);
|
||||
|
||||
public:
|
||||
static address shenandoah_wb();
|
||||
static address shenandoah_lrb();
|
||||
|
||||
void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp);
|
||||
#ifdef COMPILER1
|
||||
void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
|
||||
void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub);
|
||||
void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub);
|
||||
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
|
||||
#endif
|
||||
|
||||
void load_reference_barrier(MacroAssembler* masm, Register dst);
|
||||
|
||||
void cmpxchg_oop(MacroAssembler* masm,
|
||||
Register res, Address addr, Register oldval, Register newval,
|
||||
bool exchange, Register tmp1, Register tmp2);
|
||||
@ -93,16 +91,6 @@ public:
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2);
|
||||
|
||||
#ifndef _LP64
|
||||
virtual void obj_equals(MacroAssembler* masm,
|
||||
Address obj1, jobject obj2);
|
||||
virtual void obj_equals(MacroAssembler* masm,
|
||||
Register obj1, jobject obj2);
|
||||
#endif
|
||||
|
||||
virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2);
|
||||
virtual void obj_equals(MacroAssembler* masm, Register src1, Address src2);
|
||||
|
||||
virtual void tlab_allocate(MacroAssembler* masm,
|
||||
Register thread, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
@ -110,8 +98,6 @@ public:
|
||||
Register t1, Register t2,
|
||||
Label& slow_case);
|
||||
|
||||
virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj);
|
||||
|
||||
virtual void barrier_stubs_init();
|
||||
|
||||
};
|
||||
|
@ -107,6 +107,7 @@ LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRIt
|
||||
__ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr);
|
||||
|
||||
if (access.is_oop()) {
|
||||
result = load_reference_barrier(access.gen(), result, access.access_emit_info(), true);
|
||||
if (ShenandoahSATBBarrier) {
|
||||
pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
|
||||
result /* pre_val */);
|
||||
|
@ -23,47 +23,7 @@
|
||||
|
||||
source_hpp %{
|
||||
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
|
||||
%}
|
||||
|
||||
instruct shenandoahRB(rRegP dst, rRegP src, rFlagsReg cr) %{
|
||||
match(Set dst (ShenandoahReadBarrier src));
|
||||
effect(DEF dst, USE src);
|
||||
ins_cost(125); // XXX
|
||||
format %{ "shenandoah_rb $dst, $src" %}
|
||||
ins_encode %{
|
||||
Register d = $dst$$Register;
|
||||
Register s = $src$$Register;
|
||||
__ movptr(d, Address(s, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct shenandoahRBNarrow(rRegP dst, rRegN src) %{
|
||||
predicate(UseCompressedOops && (Universe::narrow_oop_shift() == 0));
|
||||
match(Set dst (ShenandoahReadBarrier (DecodeN src)));
|
||||
effect(DEF dst, USE src);
|
||||
ins_cost(125); // XXX
|
||||
format %{ "shenandoah_rb $dst, $src" %}
|
||||
ins_encode %{
|
||||
Register d = $dst$$Register;
|
||||
Register s = $src$$Register;
|
||||
__ movptr(d, Address(r12, s, Address::times_1, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct shenandoahRBNarrowShift(rRegP dst, rRegN src) %{
|
||||
predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
|
||||
match(Set dst (ShenandoahReadBarrier (DecodeN src)));
|
||||
effect(DEF dst, USE src);
|
||||
ins_cost(125); // XXX
|
||||
format %{ "shenandoah_rb $dst, $src" %}
|
||||
ins_encode %{
|
||||
Register d = $dst$$Register;
|
||||
Register s = $src$$Register;
|
||||
__ movptr(d, Address(r12, s, Address::times_8, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
#include "gc/shenandoah/c2/shenandoahSupport.hpp"
|
||||
%}
|
||||
|
||||
instruct compareAndSwapP_shenandoah(rRegI res,
|
||||
|
@ -1309,7 +1309,7 @@ void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
}
|
||||
|
||||
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
|
||||
return implementation( NULL, ra_, true, NULL );
|
||||
return MachNode::size(ra_);
|
||||
}
|
||||
|
||||
|
||||
|
@ -336,20 +336,8 @@ static OSReturn get_total_ticks(int which_logical_cpu, CPUPerfTicks* pticks) {
|
||||
|
||||
fclose(fh);
|
||||
if (n < expected_assign_count || logical_cpu != which_logical_cpu) {
|
||||
#ifdef DEBUG_LINUX_PROC_STAT
|
||||
vm_fprintf(stderr, "[stat] read failed");
|
||||
#endif
|
||||
return OS_ERR;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_LINUX_PROC_STAT
|
||||
vm_fprintf(stderr, "[stat] read "
|
||||
UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
|
||||
UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " \n",
|
||||
userTicks, niceTicks, systemTicks, idleTicks,
|
||||
iowTicks, irqTicks, sirqTicks);
|
||||
#endif
|
||||
|
||||
pticks->used = userTicks + niceTicks;
|
||||
pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
|
||||
pticks->total = userTicks + niceTicks + systemTicks + idleTicks +
|
||||
|
@ -1598,6 +1598,8 @@ void os::get_summary_cpu_info(char* buf, size_t buflen) {
|
||||
}
|
||||
|
||||
void os::print_memory_info(outputStream* st) {
|
||||
xsw_usage swap_usage;
|
||||
size_t size = sizeof(swap_usage);
|
||||
|
||||
st->print("Memory:");
|
||||
st->print(" %dk page", os::vm_page_size()>>10);
|
||||
@ -1606,6 +1608,16 @@ void os::print_memory_info(outputStream* st) {
|
||||
os::physical_memory() >> 10);
|
||||
st->print("(" UINT64_FORMAT "k free)",
|
||||
os::available_memory() >> 10);
|
||||
|
||||
if((sysctlbyname("vm.swapusage", &swap_usage, &size, NULL, 0) == 0) || (errno == ENOMEM)) {
|
||||
if (size >= offset_of(xsw_usage, xsu_used)) {
|
||||
st->print(", swap " UINT64_FORMAT "k",
|
||||
((julong) swap_usage.xsu_total) >> 10);
|
||||
st->print("(" UINT64_FORMAT "k free)",
|
||||
((julong) swap_usage.xsu_avail) >> 10);
|
||||
}
|
||||
}
|
||||
|
||||
st->cr();
|
||||
}
|
||||
|
||||
|
@ -227,6 +227,82 @@ julong os::physical_memory() {
|
||||
return phys_mem;
|
||||
}
|
||||
|
||||
static uint64_t initial_total_ticks = 0;
|
||||
static uint64_t initial_steal_ticks = 0;
|
||||
static bool has_initial_tick_info = false;
|
||||
|
||||
static void next_line(FILE *f) {
|
||||
int c;
|
||||
do {
|
||||
c = fgetc(f);
|
||||
} while (c != '\n' && c != EOF);
|
||||
}
|
||||
|
||||
bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) {
|
||||
FILE* fh;
|
||||
uint64_t userTicks, niceTicks, systemTicks, idleTicks;
|
||||
// since at least kernel 2.6 : iowait: time waiting for I/O to complete
|
||||
// irq: time servicing interrupts; softirq: time servicing softirqs
|
||||
uint64_t iowTicks = 0, irqTicks = 0, sirqTicks= 0;
|
||||
// steal (since kernel 2.6.11): time spent in other OS when running in a virtualized environment
|
||||
uint64_t stealTicks = 0;
|
||||
// guest (since kernel 2.6.24): time spent running a virtual CPU for guest OS under the
|
||||
// control of the Linux kernel
|
||||
uint64_t guestNiceTicks = 0;
|
||||
int logical_cpu = -1;
|
||||
const int required_tickinfo_count = (which_logical_cpu == -1) ? 4 : 5;
|
||||
int n;
|
||||
|
||||
memset(pticks, 0, sizeof(CPUPerfTicks));
|
||||
|
||||
if ((fh = fopen("/proc/stat", "r")) == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (which_logical_cpu == -1) {
|
||||
n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
|
||||
UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
|
||||
UINT64_FORMAT " " UINT64_FORMAT " ",
|
||||
&userTicks, &niceTicks, &systemTicks, &idleTicks,
|
||||
&iowTicks, &irqTicks, &sirqTicks,
|
||||
&stealTicks, &guestNiceTicks);
|
||||
} else {
|
||||
// Move to next line
|
||||
next_line(fh);
|
||||
|
||||
// find the line for requested cpu faster to just iterate linefeeds?
|
||||
for (int i = 0; i < which_logical_cpu; i++) {
|
||||
next_line(fh);
|
||||
}
|
||||
|
||||
n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
|
||||
UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
|
||||
UINT64_FORMAT " " UINT64_FORMAT " ",
|
||||
&logical_cpu, &userTicks, &niceTicks,
|
||||
&systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks,
|
||||
&stealTicks, &guestNiceTicks);
|
||||
}
|
||||
|
||||
fclose(fh);
|
||||
if (n < required_tickinfo_count || logical_cpu != which_logical_cpu) {
|
||||
return false;
|
||||
}
|
||||
pticks->used = userTicks + niceTicks;
|
||||
pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
|
||||
pticks->total = userTicks + niceTicks + systemTicks + idleTicks +
|
||||
iowTicks + irqTicks + sirqTicks + stealTicks + guestNiceTicks;
|
||||
|
||||
if (n > required_tickinfo_count + 3) {
|
||||
pticks->steal = stealTicks;
|
||||
pticks->has_steal_ticks = true;
|
||||
} else {
|
||||
pticks->steal = 0;
|
||||
pticks->has_steal_ticks = false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Return true if user is running as root.
|
||||
|
||||
bool os::have_special_privileges() {
|
||||
@ -1977,6 +2053,8 @@ void os::print_os_info(outputStream* st) {
|
||||
os::Linux::print_container_info(st);
|
||||
|
||||
os::Linux::print_virtualization_info(st);
|
||||
|
||||
os::Linux::print_steal_info(st);
|
||||
}
|
||||
|
||||
// Try to identify popular distros.
|
||||
@ -2265,6 +2343,24 @@ void os::Linux::print_virtualization_info(outputStream* st) {
|
||||
#endif
|
||||
}
|
||||
|
||||
void os::Linux::print_steal_info(outputStream* st) {
|
||||
if (has_initial_tick_info) {
|
||||
CPUPerfTicks pticks;
|
||||
bool res = os::Linux::get_tick_information(&pticks, -1);
|
||||
|
||||
if (res && pticks.has_steal_ticks) {
|
||||
uint64_t steal_ticks_difference = pticks.steal - initial_steal_ticks;
|
||||
uint64_t total_ticks_difference = pticks.total - initial_total_ticks;
|
||||
double steal_ticks_perc = 0.0;
|
||||
if (total_ticks_difference != 0) {
|
||||
steal_ticks_perc = (double) steal_ticks_difference / total_ticks_difference;
|
||||
}
|
||||
st->print_cr("Steal ticks since vm start: " UINT64_FORMAT, steal_ticks_difference);
|
||||
st->print_cr("Steal ticks percentage since vm start:%7.3f", steal_ticks_perc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void os::print_memory_info(outputStream* st) {
|
||||
|
||||
st->print("Memory:");
|
||||
@ -4989,6 +5085,15 @@ void os::init(void) {
|
||||
|
||||
Linux::initialize_os_info();
|
||||
|
||||
os::Linux::CPUPerfTicks pticks;
|
||||
bool res = os::Linux::get_tick_information(&pticks, -1);
|
||||
|
||||
if (res && pticks.has_steal_ticks) {
|
||||
has_initial_tick_info = true;
|
||||
initial_total_ticks = pticks.total;
|
||||
initial_steal_ticks = pticks.steal;
|
||||
}
|
||||
|
||||
// _main_thread points to the thread that created/loaded the JVM.
|
||||
Linux::_main_thread = pthread_self();
|
||||
|
||||
|
@ -109,12 +109,23 @@ class Linux {
|
||||
static void print_full_memory_info(outputStream* st);
|
||||
static void print_container_info(outputStream* st);
|
||||
static void print_virtualization_info(outputStream* st);
|
||||
static void print_steal_info(outputStream* st);
|
||||
static void print_distro_info(outputStream* st);
|
||||
static void print_libversion_info(outputStream* st);
|
||||
static void print_proc_sys_info(outputStream* st);
|
||||
static void print_ld_preload_file(outputStream* st);
|
||||
|
||||
public:
|
||||
struct CPUPerfTicks {
|
||||
uint64_t used;
|
||||
uint64_t usedKernel;
|
||||
uint64_t total;
|
||||
uint64_t steal;
|
||||
bool has_steal_ticks;
|
||||
};
|
||||
|
||||
// which_logical_cpu=-1 returns accumulated ticks for all cpus.
|
||||
static bool get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu);
|
||||
static bool _stack_is_executable;
|
||||
static void *dlopen_helper(const char *name, char *ebuf, int ebuflen);
|
||||
static void *dll_load_in_vmthread(const char *name, char *ebuf, int ebuflen);
|
||||
|
@ -206,13 +206,6 @@ format: %d %s %c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %l
|
||||
# define _SCANFMT_
|
||||
#endif
|
||||
|
||||
|
||||
struct CPUPerfTicks {
|
||||
uint64_t used;
|
||||
uint64_t usedKernel;
|
||||
uint64_t total;
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
CPU_LOAD_VM_ONLY,
|
||||
CPU_LOAD_GLOBAL,
|
||||
@ -227,8 +220,8 @@ enum {
|
||||
|
||||
struct CPUPerfCounters {
|
||||
int nProcs;
|
||||
CPUPerfTicks jvmTicks;
|
||||
CPUPerfTicks* cpus;
|
||||
os::Linux::CPUPerfTicks jvmTicks;
|
||||
os::Linux::CPUPerfTicks* cpus;
|
||||
};
|
||||
|
||||
static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target);
|
||||
@ -287,80 +280,6 @@ static FILE* open_statfile(void) {
|
||||
return f;
|
||||
}
|
||||
|
||||
static void
|
||||
next_line(FILE *f) {
|
||||
int c;
|
||||
do {
|
||||
c = fgetc(f);
|
||||
} while (c != '\n' && c != EOF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the total number of ticks since the system was booted.
|
||||
* If the usedTicks parameter is not NULL, it will be filled with
|
||||
* the number of ticks spent on actual processes (user, system or
|
||||
* nice processes) since system boot. Note that this is the total number
|
||||
* of "executed" ticks on _all_ CPU:s, that is on a n-way system it is
|
||||
* n times the number of ticks that has passed in clock time.
|
||||
*
|
||||
* Returns a negative value if the reading of the ticks failed.
|
||||
*/
|
||||
static OSReturn get_total_ticks(int which_logical_cpu, CPUPerfTicks* pticks) {
|
||||
FILE* fh;
|
||||
uint64_t userTicks, niceTicks, systemTicks, idleTicks;
|
||||
uint64_t iowTicks = 0, irqTicks = 0, sirqTicks= 0;
|
||||
int logical_cpu = -1;
|
||||
const int expected_assign_count = (-1 == which_logical_cpu) ? 4 : 5;
|
||||
int n;
|
||||
|
||||
if ((fh = open_statfile()) == NULL) {
|
||||
return OS_ERR;
|
||||
}
|
||||
if (-1 == which_logical_cpu) {
|
||||
n = fscanf(fh, "cpu " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
|
||||
UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
|
||||
&userTicks, &niceTicks, &systemTicks, &idleTicks,
|
||||
&iowTicks, &irqTicks, &sirqTicks);
|
||||
} else {
|
||||
// Move to next line
|
||||
next_line(fh);
|
||||
|
||||
// find the line for requested cpu faster to just iterate linefeeds?
|
||||
for (int i = 0; i < which_logical_cpu; i++) {
|
||||
next_line(fh);
|
||||
}
|
||||
|
||||
n = fscanf(fh, "cpu%u " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
|
||||
UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT,
|
||||
&logical_cpu, &userTicks, &niceTicks,
|
||||
&systemTicks, &idleTicks, &iowTicks, &irqTicks, &sirqTicks);
|
||||
}
|
||||
|
||||
fclose(fh);
|
||||
if (n < expected_assign_count || logical_cpu != which_logical_cpu) {
|
||||
#ifdef DEBUG_LINUX_PROC_STAT
|
||||
vm_fprintf(stderr, "[stat] read failed");
|
||||
#endif
|
||||
return OS_ERR;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_LINUX_PROC_STAT
|
||||
vm_fprintf(stderr, "[stat] read "
|
||||
UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " "
|
||||
UINT64_FORMAT " " UINT64_FORMAT " " UINT64_FORMAT " \n",
|
||||
userTicks, niceTicks, systemTicks, idleTicks,
|
||||
iowTicks, irqTicks, sirqTicks);
|
||||
#endif
|
||||
|
||||
pticks->used = userTicks + niceTicks;
|
||||
pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
|
||||
pticks->total = userTicks + niceTicks + systemTicks + idleTicks +
|
||||
iowTicks + irqTicks + sirqTicks;
|
||||
|
||||
return OS_OK;
|
||||
}
|
||||
|
||||
|
||||
static int get_systemtype(void) {
|
||||
static int procEntriesType = UNDETECTED;
|
||||
DIR *taskDir;
|
||||
@ -391,7 +310,7 @@ static int read_ticks(const char* procfile, uint64_t* userTicks, uint64_t* syste
|
||||
* Return the number of ticks spent in any of the processes belonging
|
||||
* to the JVM on any CPU.
|
||||
*/
|
||||
static OSReturn get_jvm_ticks(CPUPerfTicks* pticks) {
|
||||
static OSReturn get_jvm_ticks(os::Linux::CPUPerfTicks* pticks) {
|
||||
uint64_t userTicks;
|
||||
uint64_t systemTicks;
|
||||
|
||||
@ -404,7 +323,7 @@ static OSReturn get_jvm_ticks(CPUPerfTicks* pticks) {
|
||||
}
|
||||
|
||||
// get the total
|
||||
if (get_total_ticks(-1, pticks) != OS_OK) {
|
||||
if (! os::Linux::get_tick_information(pticks, -1)) {
|
||||
return OS_ERR;
|
||||
}
|
||||
|
||||
@ -423,8 +342,8 @@ static OSReturn get_jvm_ticks(CPUPerfTicks* pticks) {
|
||||
*/
|
||||
static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, double* pkernelLoad, CpuLoadTarget target) {
|
||||
uint64_t udiff, kdiff, tdiff;
|
||||
CPUPerfTicks* pticks;
|
||||
CPUPerfTicks tmp;
|
||||
os::Linux::CPUPerfTicks* pticks;
|
||||
os::Linux::CPUPerfTicks tmp;
|
||||
double user_load;
|
||||
|
||||
*pkernelLoad = 0.0;
|
||||
@ -443,7 +362,7 @@ static double get_cpu_load(int which_logical_cpu, CPUPerfCounters* counters, dou
|
||||
if (get_jvm_ticks(pticks) != OS_OK) {
|
||||
return -1.0;
|
||||
}
|
||||
} else if (get_total_ticks(which_logical_cpu, pticks) != OS_OK) {
|
||||
} else if (! os::Linux::get_tick_information(pticks, which_logical_cpu)) {
|
||||
return -1.0;
|
||||
}
|
||||
|
||||
@ -584,19 +503,19 @@ CPUPerformanceInterface::CPUPerformance::CPUPerformance() {
|
||||
}
|
||||
|
||||
bool CPUPerformanceInterface::CPUPerformance::initialize() {
|
||||
size_t tick_array_size = (_counters.nProcs +1) * sizeof(CPUPerfTicks);
|
||||
_counters.cpus = (CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal);
|
||||
size_t tick_array_size = (_counters.nProcs +1) * sizeof(os::Linux::CPUPerfTicks);
|
||||
_counters.cpus = (os::Linux::CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal);
|
||||
if (NULL == _counters.cpus) {
|
||||
return false;
|
||||
}
|
||||
memset(_counters.cpus, 0, tick_array_size);
|
||||
|
||||
// For the CPU load total
|
||||
get_total_ticks(-1, &_counters.cpus[_counters.nProcs]);
|
||||
os::Linux::get_tick_information(&_counters.cpus[_counters.nProcs], -1);
|
||||
|
||||
// For each CPU
|
||||
for (int i = 0; i < _counters.nProcs; i++) {
|
||||
get_total_ticks(i, &_counters.cpus[i]);
|
||||
os::Linux::get_tick_information(&_counters.cpus[i], i);
|
||||
}
|
||||
// For JVM load
|
||||
get_jvm_ticks(&_counters.jvmTicks);
|
||||
|
@ -66,7 +66,7 @@ bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
|
||||
|
||||
if (ret_frame.is_interpreted_frame()) {
|
||||
frame::ijava_state* istate = ret_frame.get_ijava_state();
|
||||
if (!((Method*)(istate->method))->is_metaspace_object()) {
|
||||
if (MetaspaceObj::is_valid((Method*)(istate->method)) == false) {
|
||||
return false;
|
||||
}
|
||||
uint64_t reg_bcp = uc->uc_mcontext.regs->gpr[14/*R14_bcp*/];
|
||||
|
@ -63,7 +63,8 @@ bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
|
||||
|
||||
if (ret_frame.is_interpreted_frame()) {
|
||||
frame::z_ijava_state* istate = ret_frame.ijava_state_unchecked();
|
||||
if ((stack_base() >= (address)istate && (address)istate > stack_end()) || !((Method*)(istate->method))->is_metaspace_object()) {
|
||||
if ((stack_base() >= (address)istate && (address)istate > stack_end()) ||
|
||||
MetaspaceObj::is_valid((Method*)(istate->method)) == false) {
|
||||
return false;
|
||||
}
|
||||
uint64_t reg_bcp = uc->uc_mcontext.gregs[13/*Z_BCP*/];
|
||||
|
@ -777,8 +777,7 @@ bool InstructForm::captures_bottom_type(FormDict &globals) const {
|
||||
!strcmp(_matrule->_rChild->_opType,"CompareAndExchangeP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahReadBarrier"))) return true;
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN"))) return true;
|
||||
else if ( is_ideal_load() == Form::idealP ) return true;
|
||||
else if ( is_ideal_store() != Form::none ) return true;
|
||||
|
||||
@ -3506,7 +3505,6 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
|
||||
"ClearArray",
|
||||
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
|
||||
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
|
||||
"ShenandoahReadBarrier",
|
||||
"LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
|
||||
};
|
||||
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
|
||||
|
@ -218,7 +218,7 @@ public:
|
||||
|
||||
void java_lang_String::set_compact_strings(bool value) {
|
||||
CompactStringsFixup fix(value);
|
||||
InstanceKlass::cast(SystemDictionary::String_klass())->do_local_static_fields(&fix);
|
||||
SystemDictionary::String_klass()->do_local_static_fields(&fix);
|
||||
}
|
||||
|
||||
Handle java_lang_String::basic_create(int length, bool is_latin1, TRAPS) {
|
||||
@ -3991,6 +3991,48 @@ int java_lang_System::in_offset_in_bytes() { return static_in_offset; }
|
||||
int java_lang_System::out_offset_in_bytes() { return static_out_offset; }
|
||||
int java_lang_System::err_offset_in_bytes() { return static_err_offset; }
|
||||
|
||||
// Support for jdk_internal_misc_UnsafeConstants
|
||||
//
|
||||
class UnsafeConstantsFixup : public FieldClosure {
|
||||
private:
|
||||
int _address_size;
|
||||
int _page_size;
|
||||
bool _big_endian;
|
||||
bool _use_unaligned_access;
|
||||
public:
|
||||
UnsafeConstantsFixup() {
|
||||
// round up values for all static final fields
|
||||
_address_size = sizeof(void*);
|
||||
_page_size = os::vm_page_size();
|
||||
_big_endian = LITTLE_ENDIAN_ONLY(false) BIG_ENDIAN_ONLY(true);
|
||||
_use_unaligned_access = UseUnalignedAccesses;
|
||||
}
|
||||
|
||||
void do_field(fieldDescriptor* fd) {
|
||||
oop mirror = fd->field_holder()->java_mirror();
|
||||
assert(mirror != NULL, "UnsafeConstants must have mirror already");
|
||||
assert(fd->field_holder() == SystemDictionary::UnsafeConstants_klass(), "Should be UnsafeConstants");
|
||||
assert(fd->is_final(), "fields of UnsafeConstants must be final");
|
||||
assert(fd->is_static(), "fields of UnsafeConstants must be static");
|
||||
if (fd->name() == vmSymbols::address_size_name()) {
|
||||
mirror->int_field_put(fd->offset(), _address_size);
|
||||
} else if (fd->name() == vmSymbols::page_size_name()) {
|
||||
mirror->int_field_put(fd->offset(), _page_size);
|
||||
} else if (fd->name() == vmSymbols::big_endian_name()) {
|
||||
mirror->bool_field_put(fd->offset(), _big_endian);
|
||||
} else if (fd->name() == vmSymbols::use_unaligned_access_name()) {
|
||||
mirror->bool_field_put(fd->offset(), _use_unaligned_access);
|
||||
} else {
|
||||
assert(false, "unexpected UnsafeConstants field");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void jdk_internal_misc_UnsafeConstants::set_unsafe_constants() {
|
||||
UnsafeConstantsFixup fixup;
|
||||
SystemDictionary::UnsafeConstants_klass()->do_local_static_fields(&fixup);
|
||||
}
|
||||
|
||||
int java_lang_Class::_klass_offset;
|
||||
int java_lang_Class::_array_klass_offset;
|
||||
int java_lang_Class::_oop_size_offset;
|
||||
|
@ -81,6 +81,7 @@
|
||||
f(java_lang_StackFrameInfo) \
|
||||
f(java_lang_LiveStackFrameInfo) \
|
||||
f(java_util_concurrent_locks_AbstractOwnableSynchronizer) \
|
||||
f(jdk_internal_misc_UnsafeConstants) \
|
||||
//end
|
||||
|
||||
#define BASIC_JAVA_CLASSES_DO(f) \
|
||||
@ -1483,6 +1484,15 @@ class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
|
||||
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
|
||||
};
|
||||
|
||||
// Interface to jdk.internal.misc.UnsafeConsants
|
||||
|
||||
class jdk_internal_misc_UnsafeConstants : AllStatic {
|
||||
public:
|
||||
static void set_unsafe_constants();
|
||||
static void compute_offsets() { }
|
||||
static void serialize_offsets(SerializeClosure* f) { }
|
||||
};
|
||||
|
||||
// Use to declare fields that need to be injected into Java classes
|
||||
// for the JVM to use. The name_index and signature_index are
|
||||
// declared in vmSymbols. The may_be_java flag is used to declare
|
||||
|
@ -45,7 +45,7 @@ int ProtectionDomainCacheTable::index_for(Handle protection_domain) {
|
||||
}
|
||||
|
||||
ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
|
||||
: Hashtable<ClassLoaderWeakHandle, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
|
||||
: Hashtable<WeakHandle<vm_class_loader_data>, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
|
||||
{ _dead_entries = false;
|
||||
_total_oops_removed = 0;
|
||||
}
|
||||
@ -180,8 +180,8 @@ ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, uns
|
||||
protection_domain->print_value_on(&ls);
|
||||
ls.cr();
|
||||
}
|
||||
ClassLoaderWeakHandle w = ClassLoaderWeakHandle::create(protection_domain);
|
||||
WeakHandle<vm_class_loader_data> w = WeakHandle<vm_class_loader_data>::create(protection_domain);
|
||||
ProtectionDomainCacheEntry* p = new_entry(hash, w);
|
||||
Hashtable<ClassLoaderWeakHandle, mtClass>::add_entry(index, p);
|
||||
Hashtable<WeakHandle<vm_class_loader_data>, mtClass>::add_entry(index, p);
|
||||
return p;
|
||||
}
|
||||
|
@ -35,18 +35,18 @@
|
||||
// to dictionary.hpp pd_set for more information about how protection domain entries
|
||||
// are used.
|
||||
// This table is walked during GC, rather than the class loader data graph dictionaries.
|
||||
class ProtectionDomainCacheEntry : public HashtableEntry<ClassLoaderWeakHandle, mtClass> {
|
||||
class ProtectionDomainCacheEntry : public HashtableEntry<WeakHandle<vm_class_loader_data>, mtClass> {
|
||||
friend class VMStructs;
|
||||
public:
|
||||
oop object();
|
||||
oop object_no_keepalive();
|
||||
|
||||
ProtectionDomainCacheEntry* next() {
|
||||
return (ProtectionDomainCacheEntry*)HashtableEntry<ClassLoaderWeakHandle, mtClass>::next();
|
||||
return (ProtectionDomainCacheEntry*)HashtableEntry<WeakHandle<vm_class_loader_data>, mtClass>::next();
|
||||
}
|
||||
|
||||
ProtectionDomainCacheEntry** next_addr() {
|
||||
return (ProtectionDomainCacheEntry**)HashtableEntry<ClassLoaderWeakHandle, mtClass>::next_addr();
|
||||
return (ProtectionDomainCacheEntry**)HashtableEntry<WeakHandle<vm_class_loader_data>, mtClass>::next_addr();
|
||||
}
|
||||
|
||||
void verify();
|
||||
@ -61,21 +61,21 @@ class ProtectionDomainCacheEntry : public HashtableEntry<ClassLoaderWeakHandle,
|
||||
// we only need to iterate over this set.
|
||||
// The amount of different protection domains used is typically magnitudes smaller
|
||||
// than the number of system dictionary entries (loaded classes).
|
||||
class ProtectionDomainCacheTable : public Hashtable<ClassLoaderWeakHandle, mtClass> {
|
||||
class ProtectionDomainCacheTable : public Hashtable<WeakHandle<vm_class_loader_data>, mtClass> {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
ProtectionDomainCacheEntry* bucket(int i) const {
|
||||
return (ProtectionDomainCacheEntry*) Hashtable<ClassLoaderWeakHandle, mtClass>::bucket(i);
|
||||
return (ProtectionDomainCacheEntry*) Hashtable<WeakHandle<vm_class_loader_data>, mtClass>::bucket(i);
|
||||
}
|
||||
|
||||
// The following method is not MT-safe and must be done under lock.
|
||||
ProtectionDomainCacheEntry** bucket_addr(int i) {
|
||||
return (ProtectionDomainCacheEntry**) Hashtable<ClassLoaderWeakHandle, mtClass>::bucket_addr(i);
|
||||
return (ProtectionDomainCacheEntry**) Hashtable<WeakHandle<vm_class_loader_data>, mtClass>::bucket_addr(i);
|
||||
}
|
||||
|
||||
ProtectionDomainCacheEntry* new_entry(unsigned int hash, ClassLoaderWeakHandle protection_domain) {
|
||||
ProtectionDomainCacheEntry* new_entry(unsigned int hash, WeakHandle<vm_class_loader_data> protection_domain) {
|
||||
ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*)
|
||||
Hashtable<ClassLoaderWeakHandle, mtClass>::new_entry(hash, protection_domain);
|
||||
Hashtable<WeakHandle<vm_class_loader_data>, mtClass>::new_entry(hash, protection_domain);
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
@ -177,6 +177,7 @@ class OopStorage;
|
||||
do_klass(AssertionStatusDirectives_klass, java_lang_AssertionStatusDirectives ) \
|
||||
do_klass(StringBuffer_klass, java_lang_StringBuffer ) \
|
||||
do_klass(StringBuilder_klass, java_lang_StringBuilder ) \
|
||||
do_klass(UnsafeConstants_klass, jdk_internal_misc_UnsafeConstants ) \
|
||||
do_klass(internal_Unsafe_klass, jdk_internal_misc_Unsafe ) \
|
||||
do_klass(module_Modules_klass, jdk_internal_module_Modules ) \
|
||||
\
|
||||
|
@ -2060,7 +2060,9 @@ void ClassVerifier::class_format_error(const char* msg, ...) {
|
||||
ss.vprint(msg, va);
|
||||
va_end(va);
|
||||
if (!_method.is_null()) {
|
||||
ss.print(" in method %s", _method->name_and_sig_as_C_string());
|
||||
ss.print(" in method '");
|
||||
_method->print_external_name(&ss);
|
||||
ss.print("'");
|
||||
}
|
||||
_message = ss.as_string();
|
||||
}
|
||||
|
@ -446,6 +446,10 @@
|
||||
template(module_entry_name, "module_entry") \
|
||||
template(resolved_references_name, "<resolved_references>") \
|
||||
template(init_lock_name, "<init_lock>") \
|
||||
template(address_size_name, "ADDRESS_SIZE0") \
|
||||
template(page_size_name, "PAGE_SIZE") \
|
||||
template(big_endian_name, "BIG_ENDIAN") \
|
||||
template(use_unaligned_access_name, "UNALIGNED_ACCESS") \
|
||||
\
|
||||
/* name symbols needed by intrinsics */ \
|
||||
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, template, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
|
||||
@ -1070,6 +1074,9 @@
|
||||
do_intrinsic(_updateByteBufferAdler32, java_util_zip_Adler32, updateByteBuffer_A_name, updateByteBuffer_signature, F_SN) \
|
||||
do_name( updateByteBuffer_A_name, "updateByteBuffer") \
|
||||
\
|
||||
/* support for UnsafeConstants */ \
|
||||
do_class(jdk_internal_misc_UnsafeConstants, "jdk/internal/misc/UnsafeConstants") \
|
||||
\
|
||||
/* support for Unsafe */ \
|
||||
do_class(jdk_internal_misc_Unsafe, "jdk/internal/misc/Unsafe") \
|
||||
\
|
||||
|
@ -780,13 +780,14 @@ void CodeCache::increment_unloading_cycle() {
|
||||
CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
|
||||
: _is_unloading_behaviour(is_alive)
|
||||
{
|
||||
_saved_behaviour = IsUnloadingBehaviour::current();
|
||||
IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
|
||||
increment_unloading_cycle();
|
||||
DependencyContext::cleaning_start();
|
||||
}
|
||||
|
||||
CodeCache::UnloadingScope::~UnloadingScope() {
|
||||
IsUnloadingBehaviour::set_current(NULL);
|
||||
IsUnloadingBehaviour::set_current(_saved_behaviour);
|
||||
DependencyContext::cleaning_end();
|
||||
}
|
||||
|
||||
|
@ -170,6 +170,7 @@ class CodeCache : AllStatic {
|
||||
// "unloading_occurred" controls whether metadata should be cleaned because of class unloading.
|
||||
class UnloadingScope: StackObj {
|
||||
ClosureIsUnloadingBehaviour _is_unloading_behaviour;
|
||||
IsUnloadingBehaviour* _saved_behaviour;
|
||||
|
||||
public:
|
||||
UnloadingScope(BoolObjectClosure* is_alive);
|
||||
|
@ -135,11 +135,6 @@ CompileLog** CompileBroker::_compiler2_logs = NULL;
|
||||
volatile jint CompileBroker::_compilation_id = 0;
|
||||
volatile jint CompileBroker::_osr_compilation_id = 0;
|
||||
|
||||
// Debugging information
|
||||
int CompileBroker::_last_compile_type = no_compile;
|
||||
int CompileBroker::_last_compile_level = CompLevel_none;
|
||||
char CompileBroker::_last_method_compiled[CompileBroker::name_buffer_length];
|
||||
|
||||
// Performance counters
|
||||
PerfCounter* CompileBroker::_perf_total_compilation = NULL;
|
||||
PerfCounter* CompileBroker::_perf_osr_compilation = NULL;
|
||||
@ -577,8 +572,6 @@ CompilerCounters::CompilerCounters() {
|
||||
//
|
||||
// Initialize the Compilation object
|
||||
void CompileBroker::compilation_init_phase1(TRAPS) {
|
||||
_last_method_compiled[0] = '\0';
|
||||
|
||||
// No need to initialize compilation system if we do not use it.
|
||||
if (!UseCompiler) {
|
||||
return;
|
||||
@ -2032,8 +2025,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
// Look up matching directives
|
||||
directive = DirectivesStack::getMatchingDirective(method, comp);
|
||||
|
||||
// Save information about this method in case of failure.
|
||||
set_last_compile(thread, method, is_osr, task_level);
|
||||
// Update compile information when using perfdata.
|
||||
if (UsePerfData) {
|
||||
update_compile_perf_data(thread, method, is_osr);
|
||||
}
|
||||
|
||||
DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level));
|
||||
}
|
||||
@ -2264,58 +2259,49 @@ void CompileBroker::handle_full_code_cache(int code_blob_type) {
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileBroker::set_last_compile
|
||||
// CompileBroker::update_compile_perf_data
|
||||
//
|
||||
// Record this compilation for debugging purposes.
|
||||
void CompileBroker::set_last_compile(CompilerThread* thread, const methodHandle& method, bool is_osr, int comp_level) {
|
||||
void CompileBroker::update_compile_perf_data(CompilerThread* thread, const methodHandle& method, bool is_osr) {
|
||||
ResourceMark rm;
|
||||
char* method_name = method->name()->as_C_string();
|
||||
strncpy(_last_method_compiled, method_name, CompileBroker::name_buffer_length);
|
||||
_last_method_compiled[CompileBroker::name_buffer_length - 1] = '\0'; // ensure null terminated
|
||||
char current_method[CompilerCounters::cmname_buffer_length];
|
||||
size_t maxLen = CompilerCounters::cmname_buffer_length;
|
||||
|
||||
if (UsePerfData) {
|
||||
const char* class_name = method->method_holder()->name()->as_C_string();
|
||||
const char* class_name = method->method_holder()->name()->as_C_string();
|
||||
|
||||
size_t s1len = strlen(class_name);
|
||||
size_t s2len = strlen(method_name);
|
||||
size_t s1len = strlen(class_name);
|
||||
size_t s2len = strlen(method_name);
|
||||
|
||||
// check if we need to truncate the string
|
||||
if (s1len + s2len + 2 > maxLen) {
|
||||
// check if we need to truncate the string
|
||||
if (s1len + s2len + 2 > maxLen) {
|
||||
|
||||
// the strategy is to lop off the leading characters of the
|
||||
// class name and the trailing characters of the method name.
|
||||
// the strategy is to lop off the leading characters of the
|
||||
// class name and the trailing characters of the method name.
|
||||
|
||||
if (s2len + 2 > maxLen) {
|
||||
// lop of the entire class name string, let snprintf handle
|
||||
// truncation of the method name.
|
||||
class_name += s1len; // null string
|
||||
}
|
||||
else {
|
||||
// lop off the extra characters from the front of the class name
|
||||
class_name += ((s1len + s2len + 2) - maxLen);
|
||||
}
|
||||
if (s2len + 2 > maxLen) {
|
||||
// lop of the entire class name string, let snprintf handle
|
||||
// truncation of the method name.
|
||||
class_name += s1len; // null string
|
||||
}
|
||||
else {
|
||||
// lop off the extra characters from the front of the class name
|
||||
class_name += ((s1len + s2len + 2) - maxLen);
|
||||
}
|
||||
|
||||
jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name);
|
||||
}
|
||||
|
||||
jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name);
|
||||
|
||||
int last_compile_type = normal_compile;
|
||||
if (CICountOSR && is_osr) {
|
||||
_last_compile_type = osr_compile;
|
||||
} else {
|
||||
_last_compile_type = normal_compile;
|
||||
last_compile_type = osr_compile;
|
||||
}
|
||||
_last_compile_level = comp_level;
|
||||
|
||||
if (UsePerfData) {
|
||||
CompilerCounters* counters = thread->counters();
|
||||
counters->set_current_method(current_method);
|
||||
counters->set_compile_type((jlong)_last_compile_type);
|
||||
}
|
||||
CompilerCounters* counters = thread->counters();
|
||||
counters->set_current_method(current_method);
|
||||
counters->set_compile_type((jlong) last_compile_type);
|
||||
}
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// CompileBroker::push_jni_handle_block
|
||||
//
|
||||
@ -2618,21 +2604,6 @@ void CompileBroker::print_times(bool per_compiler, bool aggregate) {
|
||||
tty->print_cr(" nmethod total size : %8d bytes", nmethods_size);
|
||||
}
|
||||
|
||||
// Debugging output for failure
|
||||
void CompileBroker::print_last_compile() {
|
||||
if (_last_compile_level != CompLevel_none &&
|
||||
compiler(_last_compile_level) != NULL &&
|
||||
_last_compile_type != no_compile) {
|
||||
if (_last_compile_type == osr_compile) {
|
||||
tty->print_cr("Last parse: [osr]%d+++(%d) %s",
|
||||
_osr_compilation_id, _last_compile_level, _last_method_compiled);
|
||||
} else {
|
||||
tty->print_cr("Last parse: %d+++(%d) %s",
|
||||
_compilation_id, _last_compile_level, _last_method_compiled);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print general/accumulated JIT information.
|
||||
void CompileBroker::print_info(outputStream *out) {
|
||||
if (out == NULL) out = tty;
|
||||
|
@ -173,10 +173,6 @@ class CompileBroker: AllStatic {
|
||||
static volatile jint _compilation_id;
|
||||
static volatile jint _osr_compilation_id;
|
||||
|
||||
static int _last_compile_type;
|
||||
static int _last_compile_level;
|
||||
static char _last_method_compiled[name_buffer_length];
|
||||
|
||||
static CompileQueue* _c2_compile_queue;
|
||||
static CompileQueue* _c1_compile_queue;
|
||||
|
||||
@ -254,7 +250,8 @@ class CompileBroker: AllStatic {
|
||||
static void invoke_compiler_on_method(CompileTask* task);
|
||||
static void post_compile(CompilerThread* thread, CompileTask* task, bool success, ciEnv* ci_env,
|
||||
int compilable, const char* failure_reason);
|
||||
static void set_last_compile(CompilerThread *thread, const methodHandle& method, bool is_osr, int comp_level);
|
||||
static void update_compile_perf_data(CompilerThread *thread, const methodHandle& method, bool is_osr);
|
||||
|
||||
static void push_jni_handle_block();
|
||||
static void pop_jni_handle_block();
|
||||
static void collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task);
|
||||
@ -382,9 +379,6 @@ public:
|
||||
// Print a detailed accounting of compilation time
|
||||
static void print_times(bool per_compiler = true, bool aggregate = true);
|
||||
|
||||
// Debugging output for failure
|
||||
static void print_last_compile();
|
||||
|
||||
// compiler name for debugging
|
||||
static const char* compiler_name(int comp_level);
|
||||
|
||||
|
@ -154,6 +154,11 @@ void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_region
|
||||
reset_from_card_cache(start_idx, num_regions);
|
||||
}
|
||||
|
||||
Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
|
||||
Ticks start = Ticks::now();
|
||||
workers()->run_task(task, workers()->active_workers());
|
||||
return Ticks::now() - start;
|
||||
}
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
|
||||
MemRegion mr) {
|
||||
@ -2242,12 +2247,12 @@ void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
|
||||
_hrm->par_iterate(cl, hrclaimer, 0);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
|
||||
void G1CollectedHeap::collection_set_iterate_all(HeapRegionClosure* cl) {
|
||||
_collection_set.iterate(cl);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
|
||||
_collection_set.iterate_from(cl, worker_id, workers()->active_workers());
|
||||
void G1CollectedHeap::collection_set_iterate_increment_from(HeapRegionClosure *cl, uint worker_id) {
|
||||
_collection_set.iterate_incremental_part_from(cl, worker_id, workers()->active_workers());
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::block_start(const void* addr) const {
|
||||
@ -2484,7 +2489,7 @@ public:
|
||||
|
||||
void G1CollectedHeap::print_cset_rsets() {
|
||||
PrintRSetsClosure cl("Printing CSet RSets");
|
||||
collection_set_iterate(&cl);
|
||||
collection_set_iterate_all(&cl);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::print_all_rsets() {
|
||||
@ -2495,8 +2500,8 @@ void G1CollectedHeap::print_all_rsets() {
|
||||
|
||||
G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
|
||||
|
||||
size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
|
||||
size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
|
||||
size_t eden_used_bytes = _eden.used_bytes();
|
||||
size_t survivor_used_bytes = _survivor.used_bytes();
|
||||
size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
|
||||
|
||||
size_t eden_capacity_bytes =
|
||||
@ -2880,15 +2885,18 @@ void G1CollectedHeap::start_new_collection_set() {
|
||||
phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms){
|
||||
policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
|
||||
evacuation_info.set_collectionset_regions(collection_set()->region_length());
|
||||
void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms) {
|
||||
|
||||
_collection_set.finalize_initial_collection_set(target_pause_time_ms, &_survivor);
|
||||
evacuation_info.set_collectionset_regions(collection_set()->region_length() +
|
||||
collection_set()->optional_region_length());
|
||||
|
||||
_cm->verify_no_collection_set_oops();
|
||||
|
||||
if (_hr_printer.is_active()) {
|
||||
G1PrintCollectionSetClosure cl(&_hr_printer);
|
||||
_collection_set.iterate(&cl);
|
||||
_collection_set.iterate_optional(&cl);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3060,9 +3068,10 @@ bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_
|
||||
pre_evacuate_collection_set(evacuation_info);
|
||||
|
||||
// Actually do the work...
|
||||
evacuate_collection_set(&per_thread_states);
|
||||
evacuate_optional_collection_set(&per_thread_states);
|
||||
|
||||
evacuate_initial_collection_set(&per_thread_states);
|
||||
if (_collection_set.optional_region_length() != 0) {
|
||||
evacuate_optional_collection_set(&per_thread_states);
|
||||
}
|
||||
post_evacuate_collection_set(evacuation_info, &per_thread_states);
|
||||
|
||||
start_new_collection_set();
|
||||
@ -3088,7 +3097,8 @@ bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_
|
||||
|
||||
double sample_end_time_sec = os::elapsedTime();
|
||||
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
|
||||
size_t total_cards_scanned = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards);
|
||||
size_t total_cards_scanned = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ScanRSScannedCards) +
|
||||
phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanRS, G1GCPhaseTimes::ScanRSScannedCards);
|
||||
policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
|
||||
}
|
||||
|
||||
@ -3192,86 +3202,6 @@ void G1ParEvacuateFollowersClosure::do_void() {
|
||||
} while (!offer_termination());
|
||||
}
|
||||
|
||||
class G1ParTask : public AbstractGangTask {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadStateSet* _pss;
|
||||
RefToScanQueueSet* _queues;
|
||||
G1RootProcessor* _root_processor;
|
||||
TaskTerminator _terminator;
|
||||
uint _n_workers;
|
||||
|
||||
public:
|
||||
G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
|
||||
: AbstractGangTask("G1 collection"),
|
||||
_g1h(g1h),
|
||||
_pss(per_thread_states),
|
||||
_queues(task_queues),
|
||||
_root_processor(root_processor),
|
||||
_terminator(n_workers, _queues),
|
||||
_n_workers(n_workers)
|
||||
{}
|
||||
|
||||
void work(uint worker_id) {
|
||||
if (worker_id >= _n_workers) return; // no work needed this round
|
||||
|
||||
double start_sec = os::elapsedTime();
|
||||
_g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);
|
||||
|
||||
{
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
ReferenceProcessor* rp = _g1h->ref_processor_stw();
|
||||
|
||||
G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
|
||||
pss->set_ref_discoverer(rp);
|
||||
|
||||
double start_strong_roots_sec = os::elapsedTime();
|
||||
|
||||
_root_processor->evacuate_roots(pss, worker_id);
|
||||
|
||||
_g1h->rem_set()->oops_into_collection_set_do(pss, worker_id);
|
||||
|
||||
double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
|
||||
|
||||
double term_sec = 0.0;
|
||||
size_t evac_term_attempts = 0;
|
||||
{
|
||||
double start = os::elapsedTime();
|
||||
G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, _terminator.terminator(), G1GCPhaseTimes::ObjCopy);
|
||||
evac.do_void();
|
||||
|
||||
evac_term_attempts = evac.term_attempts();
|
||||
term_sec = evac.term_time();
|
||||
double elapsed_sec = os::elapsedTime() - start;
|
||||
|
||||
G1GCPhaseTimes* p = _g1h->phase_times();
|
||||
p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
|
||||
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
|
||||
worker_id,
|
||||
pss->lab_waste_words() * HeapWordSize,
|
||||
G1GCPhaseTimes::ObjCopyLABWaste);
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
|
||||
worker_id,
|
||||
pss->lab_undo_waste_words() * HeapWordSize,
|
||||
G1GCPhaseTimes::ObjCopyLABUndoWaste);
|
||||
|
||||
p->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
|
||||
p->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
|
||||
}
|
||||
|
||||
assert(pss->queue_is_empty(), "should be empty");
|
||||
|
||||
// Close the inner scope so that the ResourceMark and HandleMark
|
||||
// destructors are executed here and are included as part of the
|
||||
// "GC Worker Time".
|
||||
}
|
||||
_g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
|
||||
bool class_unloading_occurred) {
|
||||
uint num_workers = workers()->active_workers();
|
||||
@ -3675,176 +3605,196 @@ void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_i
|
||||
double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
|
||||
phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states) {
|
||||
// Should G1EvacuationFailureALot be in effect for this GC?
|
||||
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
|
||||
|
||||
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
|
||||
|
||||
double start_par_time_sec = os::elapsedTime();
|
||||
double end_par_time_sec;
|
||||
|
||||
{
|
||||
const uint n_workers = workers()->active_workers();
|
||||
G1RootProcessor root_processor(this, n_workers);
|
||||
G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
|
||||
|
||||
workers()->run_task(&g1_par_task);
|
||||
end_par_time_sec = os::elapsedTime();
|
||||
|
||||
// Closing the inner scope will execute the destructor
|
||||
// for the G1RootProcessor object. We record the current
|
||||
// elapsed time before closing the scope so that time
|
||||
// taken for the destructor is NOT included in the
|
||||
// reported parallel time.
|
||||
}
|
||||
|
||||
double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
|
||||
phase_times()->record_par_time(par_time_ms);
|
||||
|
||||
double code_root_fixup_time_ms =
|
||||
(os::elapsedTime() - end_par_time_sec) * 1000.0;
|
||||
phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
|
||||
}
|
||||
|
||||
class G1EvacuateOptionalRegionTask : public AbstractGangTask {
|
||||
class G1EvacuateRegionsBaseTask : public AbstractGangTask {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadStateSet* _per_thread_states;
|
||||
G1OptionalCSet* _optional;
|
||||
RefToScanQueueSet* _queues;
|
||||
ParallelTaskTerminator _terminator;
|
||||
RefToScanQueueSet* _task_queues;
|
||||
TaskTerminator _terminator;
|
||||
uint _num_workers;
|
||||
|
||||
Tickspan trim_ticks(G1ParScanThreadState* pss) {
|
||||
Tickspan copy_time = pss->trim_ticks();
|
||||
pss->reset_trim_ticks();
|
||||
return copy_time;
|
||||
}
|
||||
|
||||
void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
|
||||
G1EvacuationRootClosures* root_cls = pss->closures();
|
||||
G1ScanObjsDuringScanRSClosure obj_cl(_g1h, pss);
|
||||
|
||||
size_t scanned = 0;
|
||||
size_t claimed = 0;
|
||||
size_t skipped = 0;
|
||||
size_t used_memory = 0;
|
||||
|
||||
Ticks start = Ticks::now();
|
||||
Tickspan copy_time;
|
||||
|
||||
for (uint i = _optional->current_index(); i < _optional->current_limit(); i++) {
|
||||
HeapRegion* hr = _optional->region_at(i);
|
||||
G1ScanRSForOptionalClosure scan_opt_cl(&obj_cl);
|
||||
pss->oops_into_optional_region(hr)->oops_do(&scan_opt_cl, root_cls->raw_strong_oops());
|
||||
copy_time += trim_ticks(pss);
|
||||
|
||||
G1ScanRSForRegionClosure scan_rs_cl(_g1h->rem_set()->scan_state(), &obj_cl, pss, G1GCPhaseTimes::OptScanRS, worker_id);
|
||||
scan_rs_cl.do_heap_region(hr);
|
||||
copy_time += trim_ticks(pss);
|
||||
scanned += scan_rs_cl.cards_scanned();
|
||||
claimed += scan_rs_cl.cards_claimed();
|
||||
skipped += scan_rs_cl.cards_skipped();
|
||||
|
||||
// Chunk lists for this region is no longer needed.
|
||||
used_memory += pss->oops_into_optional_region(hr)->used_memory();
|
||||
}
|
||||
|
||||
Tickspan scan_time = (Ticks::now() - start) - copy_time;
|
||||
void evacuate_live_objects(G1ParScanThreadState* pss,
|
||||
uint worker_id,
|
||||
G1GCPhaseTimes::GCParPhases objcopy_phase,
|
||||
G1GCPhaseTimes::GCParPhases termination_phase) {
|
||||
G1GCPhaseTimes* p = _g1h->phase_times();
|
||||
p->record_or_add_time_secs(G1GCPhaseTimes::OptScanRS, worker_id, scan_time.seconds());
|
||||
p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, copy_time.seconds());
|
||||
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, scanned, G1GCPhaseTimes::OptCSetScannedCards);
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, claimed, G1GCPhaseTimes::OptCSetClaimedCards);
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, skipped, G1GCPhaseTimes::OptCSetSkippedCards);
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
|
||||
}
|
||||
|
||||
void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
|
||||
Ticks start = Ticks::now();
|
||||
G1ParEvacuateFollowersClosure cl(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::OptObjCopy);
|
||||
G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, _terminator.terminator(), objcopy_phase);
|
||||
cl.do_void();
|
||||
|
||||
assert(pss->queue_is_empty(), "should be empty");
|
||||
|
||||
Tickspan evac_time = (Ticks::now() - start);
|
||||
G1GCPhaseTimes* p = _g1h->phase_times();
|
||||
p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, evac_time.seconds());
|
||||
assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming done during optional evacuation");
|
||||
p->record_or_add_time_secs(objcopy_phase, worker_id, evac_time.seconds() - cl.term_time());
|
||||
|
||||
p->record_or_add_thread_work_item(objcopy_phase, worker_id, pss->lab_waste_words() * HeapWordSize, G1GCPhaseTimes::ObjCopyLABWaste);
|
||||
p->record_or_add_thread_work_item(objcopy_phase, worker_id, pss->lab_undo_waste_words() * HeapWordSize, G1GCPhaseTimes::ObjCopyLABUndoWaste);
|
||||
|
||||
if (termination_phase == G1GCPhaseTimes::Termination) {
|
||||
p->record_time_secs(termination_phase, worker_id, cl.term_time());
|
||||
p->record_thread_work_item(termination_phase, worker_id, cl.term_attempts());
|
||||
} else {
|
||||
p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time());
|
||||
p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts());
|
||||
}
|
||||
assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming during evacuation");
|
||||
}
|
||||
|
||||
public:
|
||||
G1EvacuateOptionalRegionTask(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadStateSet* per_thread_states,
|
||||
G1OptionalCSet* cset,
|
||||
RefToScanQueueSet* queues,
|
||||
uint n_workers) :
|
||||
AbstractGangTask("G1 Evacuation Optional Region Task"),
|
||||
_g1h(g1h),
|
||||
virtual void start_work(uint worker_id) { }
|
||||
|
||||
virtual void end_work(uint worker_id) { }
|
||||
|
||||
virtual void scan_roots(G1ParScanThreadState* pss, uint worker_id) = 0;
|
||||
|
||||
virtual void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) = 0;
|
||||
|
||||
public:
|
||||
G1EvacuateRegionsBaseTask(const char* name, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet* task_queues, uint num_workers) :
|
||||
AbstractGangTask(name),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_per_thread_states(per_thread_states),
|
||||
_optional(cset),
|
||||
_queues(queues),
|
||||
_terminator(n_workers, _queues) {
|
||||
}
|
||||
_task_queues(task_queues),
|
||||
_terminator(num_workers, _task_queues),
|
||||
_num_workers(num_workers)
|
||||
{ }
|
||||
|
||||
void work(uint worker_id) {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
start_work(worker_id);
|
||||
|
||||
G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
|
||||
pss->set_ref_discoverer(_g1h->ref_processor_stw());
|
||||
{
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
scan_roots(pss, worker_id);
|
||||
evacuate_live_objects(pss, worker_id);
|
||||
G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
|
||||
pss->set_ref_discoverer(_g1h->ref_processor_stw());
|
||||
|
||||
scan_roots(pss, worker_id);
|
||||
evacuate_live_objects(pss, worker_id);
|
||||
}
|
||||
|
||||
end_work(worker_id);
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset) {
|
||||
class G1MarkScope : public MarkScope {};
|
||||
G1MarkScope code_mark_scope;
|
||||
class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask {
|
||||
G1RootProcessor* _root_processor;
|
||||
|
||||
G1EvacuateOptionalRegionTask task(this, per_thread_states, ocset, _task_queues, workers()->active_workers());
|
||||
workers()->run_task(&task);
|
||||
void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
|
||||
_root_processor->evacuate_roots(pss, worker_id);
|
||||
_g1h->rem_set()->update_rem_set(pss, worker_id);
|
||||
_g1h->rem_set()->scan_rem_set(pss, worker_id, G1GCPhaseTimes::ScanRS, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::CodeRoots);
|
||||
}
|
||||
|
||||
void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
|
||||
G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination);
|
||||
}
|
||||
|
||||
void start_work(uint worker_id) {
|
||||
_g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, Ticks::now().seconds());
|
||||
}
|
||||
|
||||
void end_work(uint worker_id) {
|
||||
_g1h->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, Ticks::now().seconds());
|
||||
}
|
||||
|
||||
public:
|
||||
G1EvacuateRegionsTask(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadStateSet* per_thread_states,
|
||||
RefToScanQueueSet* task_queues,
|
||||
G1RootProcessor* root_processor,
|
||||
uint num_workers) :
|
||||
G1EvacuateRegionsBaseTask("G1 Evacuate Regions", per_thread_states, task_queues, num_workers),
|
||||
_root_processor(root_processor)
|
||||
{ }
|
||||
};
|
||||
|
||||
void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
|
||||
Tickspan task_time;
|
||||
const uint num_workers = workers()->active_workers();
|
||||
|
||||
Ticks start_processing = Ticks::now();
|
||||
{
|
||||
G1RootProcessor root_processor(this, num_workers);
|
||||
G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
|
||||
task_time = run_task(&g1_par_task);
|
||||
// Closing the inner scope will execute the destructor for the G1RootProcessor object.
|
||||
// To extract its code root fixup time we measure total time of this scope and
|
||||
// subtract from the time the WorkGang task took.
|
||||
}
|
||||
Tickspan total_processing = Ticks::now() - start_processing;
|
||||
|
||||
G1GCPhaseTimes* p = phase_times();
|
||||
p->record_initial_evac_time(task_time.seconds() * 1000.0);
|
||||
p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
|
||||
}
|
||||
|
||||
class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
|
||||
|
||||
void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
|
||||
_g1h->rem_set()->scan_rem_set(pss, worker_id, G1GCPhaseTimes::OptScanRS, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptCodeRoots);
|
||||
}
|
||||
|
||||
void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
|
||||
G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
|
||||
}
|
||||
|
||||
public:
|
||||
G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
|
||||
RefToScanQueueSet* queues,
|
||||
uint num_workers) :
|
||||
G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
|
||||
class G1MarkScope : public MarkScope { };
|
||||
|
||||
Tickspan task_time;
|
||||
|
||||
Ticks start_processing = Ticks::now();
|
||||
{
|
||||
G1MarkScope code_mark_scope;
|
||||
G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
|
||||
task_time = run_task(&task);
|
||||
// See comment in evacuate_collection_set() for the reason of the scope.
|
||||
}
|
||||
Tickspan total_processing = Ticks::now() - start_processing;
|
||||
|
||||
G1GCPhaseTimes* p = phase_times();
|
||||
p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
|
||||
G1OptionalCSet optional_cset(&_collection_set, per_thread_states);
|
||||
if (optional_cset.is_empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (evacuation_failed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
|
||||
|
||||
double start_time_sec = os::elapsedTime();
|
||||
Ticks start = Ticks::now();
|
||||
|
||||
while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
|
||||
|
||||
do {
|
||||
double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
|
||||
double time_left_ms = MaxGCPauseMillis - time_used_ms;
|
||||
|
||||
if (time_left_ms < 0) {
|
||||
log_trace(gc, ergo, cset)("Skipping %u optional regions, pause time exceeded %.3fms", optional_cset.size(), time_used_ms);
|
||||
if (time_left_ms < 0 ||
|
||||
!_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
|
||||
log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
|
||||
_collection_set.optional_region_length(), time_left_ms);
|
||||
break;
|
||||
}
|
||||
|
||||
optional_cset.prepare_evacuation(time_left_ms * _policy->optional_evacuation_fraction());
|
||||
if (optional_cset.prepare_failed()) {
|
||||
log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms);
|
||||
break;
|
||||
}
|
||||
evacuate_next_optional_regions(per_thread_states);
|
||||
}
|
||||
|
||||
evacuate_optional_regions(per_thread_states, &optional_cset);
|
||||
_collection_set.abandon_optional_collection_set(per_thread_states);
|
||||
|
||||
optional_cset.complete_evacuation();
|
||||
if (optional_cset.evacuation_failed()) {
|
||||
break;
|
||||
}
|
||||
} while (!optional_cset.is_empty());
|
||||
|
||||
phase_times()->record_optional_evacuation((os::elapsedTime() - start_time_sec) * 1000.0);
|
||||
phase_times()->record_or_add_optional_evac_time((Ticks::now() - start).seconds() * 1000.0);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
|
||||
@ -4259,15 +4209,14 @@ void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, G1Eva
|
||||
double free_cset_start_time = os::elapsedTime();
|
||||
|
||||
{
|
||||
uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
|
||||
uint const num_regions = _collection_set.region_length();
|
||||
uint const num_chunks = MAX2(num_regions / G1FreeCollectionSetTask::chunk_size(), 1U);
|
||||
uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
|
||||
|
||||
G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
|
||||
|
||||
log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
|
||||
cl.name(),
|
||||
num_workers,
|
||||
_collection_set.region_length());
|
||||
cl.name(), num_workers, num_regions);
|
||||
workers()->run_task(&cl, num_workers);
|
||||
}
|
||||
phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
|
||||
@ -4436,7 +4385,7 @@ public:
|
||||
|
||||
void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
|
||||
G1AbandonCollectionSetClosure cl;
|
||||
collection_set->iterate(&cl);
|
||||
collection_set_iterate_all(&cl);
|
||||
|
||||
collection_set->clear();
|
||||
collection_set->stop_incremental_building();
|
||||
@ -4636,7 +4585,9 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
|
||||
|
||||
collection_set()->add_eden_region(alloc_region);
|
||||
increase_used(allocated_bytes);
|
||||
_eden.add_used_bytes(allocated_bytes);
|
||||
_hr_printer.retire(alloc_region);
|
||||
|
||||
// We update the eden sizes here, when the region is retired,
|
||||
// instead of when it's allocated, since this is the point that its
|
||||
// used space has been recorded in _summary_bytes_used.
|
||||
@ -4693,6 +4644,9 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
policy()->record_bytes_copied_during_gc(allocated_bytes);
|
||||
if (dest.is_old()) {
|
||||
old_set_add(alloc_region);
|
||||
} else {
|
||||
assert(dest.is_young(), "Retiring alloc region should be young(%d)", dest.value());
|
||||
_survivor.add_used_bytes(allocated_bytes);
|
||||
}
|
||||
|
||||
bool const during_im = collector_state()->in_initial_mark_gc();
|
||||
|
@ -143,9 +143,8 @@ class G1CollectedHeap : public CollectedHeap {
|
||||
// Closures used in implementation.
|
||||
friend class G1ParScanThreadState;
|
||||
friend class G1ParScanThreadStateSet;
|
||||
friend class G1ParTask;
|
||||
friend class G1EvacuateRegionsTask;
|
||||
friend class G1PLABAllocator;
|
||||
friend class G1PrepareCompactClosure;
|
||||
|
||||
// Other related classes.
|
||||
friend class HeapRegionClaimer;
|
||||
@ -206,7 +205,7 @@ private:
|
||||
|
||||
// Outside of GC pauses, the number of bytes used in all regions other
|
||||
// than the current allocation region(s).
|
||||
size_t _summary_bytes_used;
|
||||
volatile size_t _summary_bytes_used;
|
||||
|
||||
void increase_used(size_t bytes);
|
||||
void decrease_used(size_t bytes);
|
||||
@ -519,6 +518,10 @@ public:
|
||||
|
||||
WorkGang* workers() const { return _workers; }
|
||||
|
||||
// Runs the given AbstractGangTask with the current active workers, returning the
|
||||
// total time taken.
|
||||
Tickspan run_task(AbstractGangTask* task);
|
||||
|
||||
G1Allocator* allocator() {
|
||||
return _allocator;
|
||||
}
|
||||
@ -738,11 +741,14 @@ private:
|
||||
|
||||
void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
|
||||
|
||||
// Actually do the work of evacuating the collection set.
|
||||
void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
|
||||
// Actually do the work of evacuating the parts of the collection set.
|
||||
void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
|
||||
void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
|
||||
void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);
|
||||
private:
|
||||
// Evacuate the next set of optional regions.
|
||||
void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
|
||||
|
||||
public:
|
||||
void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info);
|
||||
void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
|
||||
|
||||
@ -1165,14 +1171,14 @@ public:
|
||||
void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
|
||||
HeapRegionClaimer* hrclaimer) const;
|
||||
|
||||
// Iterate over the regions (if any) in the current collection set.
|
||||
void collection_set_iterate(HeapRegionClosure* blk);
|
||||
// Iterate over all regions currently in the current collection set.
|
||||
void collection_set_iterate_all(HeapRegionClosure* blk);
|
||||
|
||||
// Iterate over the regions (if any) in the current collection set. Starts the
|
||||
// iteration over the entire collection set so that the start regions of a given
|
||||
// worker id over the set active_workers are evenly spread across the set of
|
||||
// collection set regions.
|
||||
void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
|
||||
// Iterate over the regions in the current increment of the collection set.
|
||||
// Starts the iteration so that the start regions of a given worker id over the
|
||||
// set active_workers are evenly spread across the set of collection set regions
|
||||
// to be iterated.
|
||||
void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id);
|
||||
|
||||
// Returns the HeapRegion that contains addr. addr must not be NULL.
|
||||
template <class T>
|
||||
@ -1252,6 +1258,8 @@ public:
|
||||
|
||||
uint eden_regions_count() const { return _eden.length(); }
|
||||
uint survivor_regions_count() const { return _survivor.length(); }
|
||||
size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
|
||||
size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
|
||||
uint young_regions_count() const { return _eden.length() + _survivor.length(); }
|
||||
uint old_regions_count() const { return _old_set.length(); }
|
||||
uint archive_regions_count() const { return _archive_set.length(); }
|
||||
@ -1420,7 +1428,7 @@ private:
|
||||
size_t _term_attempts;
|
||||
|
||||
void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
|
||||
void end_term_time() { _term_time += os::elapsedTime() - _start_term; }
|
||||
void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadState* _par_scan_state;
|
||||
|
@ -59,12 +59,11 @@ G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
|
||||
_collection_set_regions(NULL),
|
||||
_collection_set_cur_length(0),
|
||||
_collection_set_max_length(0),
|
||||
_optional_regions(NULL),
|
||||
_optional_region_length(0),
|
||||
_optional_region_max_length(0),
|
||||
_num_optional_regions(0),
|
||||
_bytes_used_before(0),
|
||||
_recorded_rs_lengths(0),
|
||||
_inc_build_state(Inactive),
|
||||
_inc_part_start(0),
|
||||
_inc_bytes_used_before(0),
|
||||
_inc_recorded_rs_lengths(0),
|
||||
_inc_recorded_rs_lengths_diffs(0),
|
||||
@ -90,8 +89,8 @@ void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
|
||||
assert((size_t) young_region_length() == _collection_set_cur_length,
|
||||
"Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
|
||||
|
||||
_old_region_length = 0;
|
||||
_optional_region_length = 0;
|
||||
_old_region_length = 0;
|
||||
free_optional_regions();
|
||||
}
|
||||
|
||||
void G1CollectionSet::initialize(uint max_region_length) {
|
||||
@ -100,21 +99,8 @@ void G1CollectionSet::initialize(uint max_region_length) {
|
||||
_collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
|
||||
}
|
||||
|
||||
void G1CollectionSet::initialize_optional(uint max_length) {
|
||||
assert(_optional_regions == NULL, "Already initialized");
|
||||
assert(_optional_region_length == 0, "Already initialized");
|
||||
assert(_optional_region_max_length == 0, "Already initialized");
|
||||
_optional_region_max_length = max_length;
|
||||
_optional_regions = NEW_C_HEAP_ARRAY(HeapRegion*, _optional_region_max_length, mtGC);
|
||||
}
|
||||
|
||||
void G1CollectionSet::free_optional_regions() {
|
||||
_optional_region_length = 0;
|
||||
_optional_region_max_length = 0;
|
||||
if (_optional_regions != NULL) {
|
||||
FREE_C_HEAP_ARRAY(HeapRegion*, _optional_regions);
|
||||
_optional_regions = NULL;
|
||||
}
|
||||
_num_optional_regions = 0;
|
||||
}
|
||||
|
||||
void G1CollectionSet::clear_candidates() {
|
||||
@ -130,39 +116,32 @@ void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
|
||||
void G1CollectionSet::add_old_region(HeapRegion* hr) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
|
||||
assert(_inc_build_state == Active || hr->index_in_opt_cset() != G1OptionalCSet::InvalidCSetIndex,
|
||||
assert(_inc_build_state == Active,
|
||||
"Precondition, actively building cset or adding optional later on");
|
||||
assert(hr->is_old(), "the region should be old");
|
||||
|
||||
assert(!hr->in_collection_set(), "should not already be in the CSet");
|
||||
assert(!hr->in_collection_set(), "should not already be in the collection set");
|
||||
_g1h->register_old_region_with_cset(hr);
|
||||
|
||||
_collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
|
||||
assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
|
||||
|
||||
_bytes_used_before += hr->used();
|
||||
size_t rs_length = hr->rem_set()->occupied();
|
||||
_recorded_rs_lengths += rs_length;
|
||||
_old_region_length += 1;
|
||||
_recorded_rs_lengths += hr->rem_set()->occupied();
|
||||
_old_region_length++;
|
||||
|
||||
log_trace(gc, cset)("Added old region %d to collection set", hr->hrm_index());
|
||||
_g1h->old_set_remove(hr);
|
||||
}
|
||||
|
||||
void G1CollectionSet::add_optional_region(HeapRegion* hr) {
|
||||
assert(!optional_is_full(), "Precondition, must have room left for this region");
|
||||
assert(hr->is_old(), "the region should be old");
|
||||
assert(!hr->in_collection_set(), "should not already be in the CSet");
|
||||
|
||||
_g1h->register_optional_region_with_cset(hr);
|
||||
|
||||
_optional_regions[_optional_region_length] = hr;
|
||||
uint index = _optional_region_length++;
|
||||
hr->set_index_in_opt_cset(index);
|
||||
|
||||
log_trace(gc, cset)("Added region %d to optional collection set (%u)", hr->hrm_index(), _optional_region_length);
|
||||
hr->set_index_in_opt_cset(_num_optional_regions++);
|
||||
}
|
||||
|
||||
// Initialize the per-collection-set information
|
||||
void G1CollectionSet::start_incremental_building() {
|
||||
assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
|
||||
assert(_inc_build_state == Inactive, "Precondition");
|
||||
@ -173,7 +152,8 @@ void G1CollectionSet::start_incremental_building() {
|
||||
_inc_recorded_rs_lengths_diffs = 0;
|
||||
_inc_predicted_elapsed_time_ms = 0.0;
|
||||
_inc_predicted_elapsed_time_ms_diffs = 0.0;
|
||||
_inc_build_state = Active;
|
||||
|
||||
update_incremental_marker();
|
||||
}
|
||||
|
||||
void G1CollectionSet::finalize_incremental_building() {
|
||||
@ -211,29 +191,48 @@ void G1CollectionSet::finalize_incremental_building() {
|
||||
void G1CollectionSet::clear() {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
_collection_set_cur_length = 0;
|
||||
_optional_region_length = 0;
|
||||
}
|
||||
|
||||
void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
|
||||
iterate_from(cl, 0, 1);
|
||||
}
|
||||
|
||||
void G1CollectionSet::iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
|
||||
size_t len = _collection_set_cur_length;
|
||||
OrderAccess::loadload();
|
||||
if (len == 0) {
|
||||
return;
|
||||
}
|
||||
size_t start_pos = (worker_id * len) / total_workers;
|
||||
size_t cur_pos = start_pos;
|
||||
|
||||
do {
|
||||
HeapRegion* r = _g1h->region_at(_collection_set_regions[cur_pos]);
|
||||
for (uint i = 0; i < len; i++) {
|
||||
HeapRegion* r = _g1h->region_at(_collection_set_regions[i]);
|
||||
bool result = cl->do_heap_region(r);
|
||||
if (result) {
|
||||
cl->set_incomplete();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
|
||||
assert_at_safepoint();
|
||||
|
||||
for (uint i = 0; i < _num_optional_regions; i++) {
|
||||
HeapRegion* r = _candidates->at(i);
|
||||
bool result = cl->do_heap_region(r);
|
||||
guarantee(!result, "Must not cancel iteration");
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
|
||||
assert_at_safepoint();
|
||||
|
||||
size_t len = _collection_set_cur_length - _inc_part_start;
|
||||
if (len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
size_t start_pos = (worker_id * len) / total_workers;
|
||||
size_t cur_pos = start_pos;
|
||||
|
||||
do {
|
||||
HeapRegion* r = _g1h->region_at(_collection_set_regions[cur_pos + _inc_part_start]);
|
||||
bool result = cl->do_heap_region(r);
|
||||
guarantee(!result, "Must not cancel iteration");
|
||||
|
||||
cur_pos++;
|
||||
if (cur_pos == len) {
|
||||
cur_pos = 0;
|
||||
@ -440,30 +439,6 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi
|
||||
return time_remaining_ms;
|
||||
}
|
||||
|
||||
void G1CollectionSet::add_as_old(HeapRegion* hr) {
|
||||
candidates()->pop_front(); // already have region via peek()
|
||||
_g1h->old_set_remove(hr);
|
||||
add_old_region(hr);
|
||||
}
|
||||
|
||||
void G1CollectionSet::add_as_optional(HeapRegion* hr) {
|
||||
assert(_optional_regions != NULL, "Must not be called before array is allocated");
|
||||
candidates()->pop_front(); // already have region via peek()
|
||||
_g1h->old_set_remove(hr);
|
||||
add_optional_region(hr);
|
||||
}
|
||||
|
||||
bool G1CollectionSet::optional_is_full() {
|
||||
assert(_optional_region_length <= _optional_region_max_length, "Invariant");
|
||||
return _optional_region_length == _optional_region_max_length;
|
||||
}
|
||||
|
||||
void G1CollectionSet::clear_optional_region(const HeapRegion* hr) {
|
||||
assert(_optional_regions != NULL, "Must not be called before array is allocated");
|
||||
uint index = hr->index_in_opt_cset();
|
||||
_optional_regions[index] = NULL;
|
||||
}
|
||||
|
||||
static int compare_region_idx(const uint a, const uint b) {
|
||||
if (a > b) {
|
||||
return 1;
|
||||
@ -476,87 +451,25 @@ static int compare_region_idx(const uint a, const uint b) {
|
||||
|
||||
void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
|
||||
double non_young_start_time_sec = os::elapsedTime();
|
||||
double predicted_old_time_ms = 0.0;
|
||||
double predicted_optional_time_ms = 0.0;
|
||||
double optional_threshold_ms = time_remaining_ms * _policy->optional_prediction_fraction();
|
||||
uint expensive_region_num = 0;
|
||||
|
||||
if (collector_state()->in_mixed_phase()) {
|
||||
candidates()->verify();
|
||||
const uint min_old_cset_length = _policy->calc_min_old_cset_length();
|
||||
const uint max_old_cset_length = MAX2(min_old_cset_length, _policy->calc_max_old_cset_length());
|
||||
bool check_time_remaining = _policy->adaptive_young_list_length();
|
||||
|
||||
initialize_optional(max_old_cset_length - min_old_cset_length);
|
||||
log_debug(gc, ergo, cset)("Start adding old regions for mixed gc. min %u regions, max %u regions, "
|
||||
"time remaining %1.2fms, optional threshold %1.2fms",
|
||||
min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms);
|
||||
uint num_initial_old_regions;
|
||||
uint num_optional_old_regions;
|
||||
|
||||
HeapRegion* hr = candidates()->peek_front();
|
||||
while (hr != NULL) {
|
||||
if (old_region_length() + optional_region_length() >= max_old_cset_length) {
|
||||
// Added maximum number of old regions to the CSet.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). "
|
||||
"old %u regions, optional %u regions",
|
||||
old_region_length(), optional_region_length());
|
||||
break;
|
||||
}
|
||||
_policy->calculate_old_collection_set_regions(candidates(),
|
||||
time_remaining_ms,
|
||||
num_initial_old_regions,
|
||||
num_optional_old_regions);
|
||||
|
||||
// Stop adding regions if the remaining reclaimable space is
|
||||
// not above G1HeapWastePercent.
|
||||
size_t reclaimable_bytes = candidates()->remaining_reclaimable_bytes();
|
||||
double reclaimable_percent = _policy->reclaimable_bytes_percent(reclaimable_bytes);
|
||||
double threshold = (double) G1HeapWastePercent;
|
||||
if (reclaimable_percent <= threshold) {
|
||||
// We've added enough old regions that the amount of uncollected
|
||||
// reclaimable space is at or below the waste threshold. Stop
|
||||
// adding old regions to the CSet.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
|
||||
"reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%",
|
||||
byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes),
|
||||
reclaimable_percent, G1HeapWastePercent);
|
||||
break;
|
||||
}
|
||||
// Prepare initial old regions.
|
||||
move_candidates_to_collection_set(num_initial_old_regions);
|
||||
|
||||
double predicted_time_ms = predict_region_elapsed_time_ms(hr);
|
||||
time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
|
||||
// Add regions to old set until we reach minimum amount
|
||||
if (old_region_length() < min_old_cset_length) {
|
||||
predicted_old_time_ms += predicted_time_ms;
|
||||
add_as_old(hr);
|
||||
// Record the number of regions added when no time remaining
|
||||
if (time_remaining_ms == 0.0) {
|
||||
expensive_region_num++;
|
||||
}
|
||||
} else {
|
||||
// In the non-auto-tuning case, we'll finish adding regions
|
||||
// to the CSet if we reach the minimum.
|
||||
if (!check_time_remaining) {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min).");
|
||||
break;
|
||||
}
|
||||
// Keep adding regions to old set until we reach optional threshold
|
||||
if (time_remaining_ms > optional_threshold_ms) {
|
||||
predicted_old_time_ms += predicted_time_ms;
|
||||
add_as_old(hr);
|
||||
} else if (time_remaining_ms > 0) {
|
||||
// Keep adding optional regions until time is up
|
||||
if (!optional_is_full()) {
|
||||
predicted_optional_time_ms += predicted_time_ms;
|
||||
add_as_optional(hr);
|
||||
} else {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (optional set full).");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high).");
|
||||
break;
|
||||
}
|
||||
}
|
||||
hr = candidates()->peek_front();
|
||||
}
|
||||
if (hr == NULL) {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
|
||||
// Prepare optional old regions for evacuation.
|
||||
uint candidate_idx = candidates()->cur_idx();
|
||||
for (uint i = 0; i < num_optional_old_regions; i++) {
|
||||
add_optional_region(candidates()->at(candidate_idx + i));
|
||||
}
|
||||
|
||||
candidates()->verify();
|
||||
@ -564,99 +477,59 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
|
||||
|
||||
stop_incremental_building();
|
||||
|
||||
log_debug(gc, ergo, cset)("Finish choosing CSet regions old: %u, optional: %u, "
|
||||
"predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
|
||||
old_region_length(), optional_region_length(),
|
||||
predicted_old_time_ms, predicted_optional_time_ms, time_remaining_ms);
|
||||
if (expensive_region_num > 0) {
|
||||
log_debug(gc, ergo, cset)("CSet contains %u old regions that were added although the predicted time was too high.",
|
||||
expensive_region_num);
|
||||
}
|
||||
|
||||
double non_young_end_time_sec = os::elapsedTime();
|
||||
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
|
||||
|
||||
QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true);
|
||||
}
|
||||
|
||||
HeapRegion* G1OptionalCSet::region_at(uint index) {
|
||||
return _cset->optional_region_at(index);
|
||||
}
|
||||
|
||||
void G1OptionalCSet::prepare_evacuation(double time_limit) {
|
||||
assert(_current_index == _current_limit, "Before prepare no regions should be ready for evac");
|
||||
|
||||
uint prepared_regions = 0;
|
||||
double prediction_ms = 0;
|
||||
|
||||
_prepare_failed = true;
|
||||
for (uint i = _current_index; i < _cset->optional_region_length(); i++) {
|
||||
HeapRegion* hr = region_at(i);
|
||||
prediction_ms += _cset->predict_region_elapsed_time_ms(hr);
|
||||
if (prediction_ms > time_limit) {
|
||||
log_debug(gc, cset)("Prepared %u regions for optional evacuation. Predicted time: %.3fms", prepared_regions, prediction_ms);
|
||||
return;
|
||||
}
|
||||
|
||||
// This region will be included in the next optional evacuation.
|
||||
prepare_to_evacuate_optional_region(hr);
|
||||
prepared_regions++;
|
||||
_current_limit++;
|
||||
_prepare_failed = false;
|
||||
void G1CollectionSet::move_candidates_to_collection_set(uint num_old_candidate_regions) {
|
||||
if (num_old_candidate_regions == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
log_debug(gc, cset)("Prepared all %u regions for optional evacuation. Predicted time: %.3fms",
|
||||
prepared_regions, prediction_ms);
|
||||
}
|
||||
|
||||
bool G1OptionalCSet::prepare_failed() {
|
||||
return _prepare_failed;
|
||||
}
|
||||
|
||||
void G1OptionalCSet::complete_evacuation() {
|
||||
_evacuation_failed = false;
|
||||
for (uint i = _current_index; i < _current_limit; i++) {
|
||||
HeapRegion* hr = region_at(i);
|
||||
_cset->clear_optional_region(hr);
|
||||
if (hr->evacuation_failed()){
|
||||
_evacuation_failed = true;
|
||||
}
|
||||
uint candidate_idx = candidates()->cur_idx();
|
||||
for (uint i = 0; i < num_old_candidate_regions; i++) {
|
||||
HeapRegion* r = candidates()->at(candidate_idx + i);
|
||||
// This potentially optional candidate region is going to be an actual collection
|
||||
// set region. Clear cset marker.
|
||||
_g1h->clear_in_cset(r);
|
||||
add_old_region(r);
|
||||
}
|
||||
_current_index = _current_limit;
|
||||
candidates()->remove(num_old_candidate_regions);
|
||||
|
||||
candidates()->verify();
|
||||
}
|
||||
|
||||
bool G1OptionalCSet::evacuation_failed() {
|
||||
return _evacuation_failed;
|
||||
void G1CollectionSet::finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
|
||||
double time_remaining_ms = finalize_young_part(target_pause_time_ms, survivor);
|
||||
finalize_old_part(time_remaining_ms);
|
||||
}
|
||||
|
||||
G1OptionalCSet::~G1OptionalCSet() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
while (!is_empty()) {
|
||||
// We want to return regions not evacuated to the collection set candidates
|
||||
// in reverse order to maintain the old order.
|
||||
HeapRegion* hr = _cset->remove_last_optional_region();
|
||||
assert(hr != NULL, "Should be valid region left");
|
||||
_pset->record_unused_optional_region(hr);
|
||||
g1h->old_set_add(hr);
|
||||
g1h->clear_in_cset(hr);
|
||||
hr->set_index_in_opt_cset(InvalidCSetIndex);
|
||||
_cset->candidates()->push_front(hr);
|
||||
bool G1CollectionSet::finalize_optional_for_evacuation(double remaining_pause_time) {
|
||||
update_incremental_marker();
|
||||
|
||||
uint num_selected_regions;
|
||||
_policy->calculate_optional_collection_set_regions(candidates(),
|
||||
_num_optional_regions,
|
||||
remaining_pause_time,
|
||||
num_selected_regions);
|
||||
|
||||
move_candidates_to_collection_set(num_selected_regions);
|
||||
|
||||
_num_optional_regions -= num_selected_regions;
|
||||
|
||||
stop_incremental_building();
|
||||
return num_selected_regions > 0;
|
||||
}
|
||||
|
||||
void G1CollectionSet::abandon_optional_collection_set(G1ParScanThreadStateSet* pss) {
|
||||
for (uint i = 0; i < _num_optional_regions; i++) {
|
||||
HeapRegion* r = candidates()->at(candidates()->cur_idx() + i);
|
||||
pss->record_unused_optional_region(r);
|
||||
_g1h->clear_in_cset(r);
|
||||
r->clear_index_in_opt_cset();
|
||||
}
|
||||
_cset->free_optional_regions();
|
||||
}
|
||||
|
||||
uint G1OptionalCSet::size() {
|
||||
return _cset->optional_region_length() - _current_index;
|
||||
}
|
||||
|
||||
bool G1OptionalCSet::is_empty() {
|
||||
return size() == 0;
|
||||
}
|
||||
|
||||
void G1OptionalCSet::prepare_to_evacuate_optional_region(HeapRegion* hr) {
|
||||
log_trace(gc, cset)("Adding region %u for optional evacuation", hr->hrm_index());
|
||||
G1CollectedHeap::heap()->clear_in_cset(hr);
|
||||
_cset->add_old_region(hr);
|
||||
free_optional_regions();
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
@ -38,11 +38,101 @@ class G1SurvivorRegions;
|
||||
class HeapRegion;
|
||||
class HeapRegionClosure;
|
||||
|
||||
// The collection set.
|
||||
//
|
||||
// The set of regions that are evacuated during an evacuation pause.
|
||||
//
|
||||
// At the end of a collection, before freeing the collection set, this set
|
||||
// contains all regions that were evacuated during this collection:
|
||||
//
|
||||
// - survivor regions from the last collection (if any)
|
||||
// - eden regions allocated by the mutator
|
||||
// - old gen regions evacuated during mixed gc
|
||||
//
|
||||
// This set is built incrementally at mutator time as regions are retired, and
|
||||
// if this had been a mixed gc, some additional (during gc) incrementally added
|
||||
// old regions from the collection set candidates built during the concurrent
|
||||
// cycle.
|
||||
//
|
||||
// A more detailed overview of how the collection set changes over time follows:
|
||||
//
|
||||
// 0) at the end of GC the survivor regions are added to this collection set.
|
||||
// 1) the mutator incrementally adds eden regions as they retire
|
||||
//
|
||||
// ----- gc starts
|
||||
//
|
||||
// 2) prepare (finalize) young regions of the collection set for collection
|
||||
// - relabel the survivors as eden
|
||||
// - finish up the incremental building that happened at mutator time
|
||||
//
|
||||
// iff this is a young-only collection:
|
||||
//
|
||||
// a3) evacuate the current collection set in one "initial evacuation" phase
|
||||
//
|
||||
// iff this is a mixed collection:
|
||||
//
|
||||
// b3) calculate the set of old gen regions we may be able to collect in this
|
||||
// collection from the list of collection set candidates.
|
||||
// - one part is added to the current collection set
|
||||
// - the remainder regions are labeled as optional, and NOT yet added to the
|
||||
// collection set.
|
||||
// b4) evacuate the current collection set in the "initial evacuation" phase
|
||||
// b5) evacuate the optional regions in the "optional evacuation" phase. This is
|
||||
// done in increments (or rounds).
|
||||
// b5-1) add a few of the optional regions to the current collection set
|
||||
// b5-2) evacuate only these newly added optional regions. For this mechanism we
|
||||
// reuse the incremental collection set building infrastructure (used also at
|
||||
// mutator time).
|
||||
// b5-3) repeat from b5-1 until the policy determines we are done
|
||||
//
|
||||
// all collections
|
||||
//
|
||||
// 6) free the collection set (contains all regions now; empties collection set
|
||||
// afterwards)
|
||||
// 7) add survivors to this collection set
|
||||
//
|
||||
// ----- gc ends
|
||||
//
|
||||
// goto 1)
|
||||
//
|
||||
// Examples of how the collection set might look over time:
|
||||
//
|
||||
// Legend:
|
||||
// S = survivor, E = eden, O = old.
|
||||
// |xxxx| = increment (with increment markers), containing four regions
|
||||
//
|
||||
// |SSSS| ... after step 0), with four survivor regions
|
||||
// |SSSSEE| ... at step 1), after retiring two eden regions
|
||||
// |SSSSEEEE| ... after step 1), after retiring four eden regions
|
||||
// |EEEEEEEE| ... after step 2)
|
||||
//
|
||||
// iff this is a young-only collection
|
||||
//
|
||||
// EEEEEEEE|| ... after step a3), after initial evacuation phase
|
||||
// || ... after step 6)
|
||||
// |SS| ... after step 7), with two survivor regions
|
||||
//
|
||||
// iff this is a mixed collection
|
||||
//
|
||||
// |EEEEEEEEOOOO| ... after step b3), added four regions to be
|
||||
// evacuated in the "initial evacuation" phase
|
||||
// EEEEEEEEOOOO|| ... after step b4), incremental part is empty
|
||||
// after evacuation
|
||||
// EEEEEEEEOOOO|OO| ... after step b5.1), added two regions to be
|
||||
// evacuated in the first round of the
|
||||
// "optional evacuation" phase
|
||||
// EEEEEEEEOOOOOO|O| ... after step b5.1), added one region to be
|
||||
// evacuated in the second round of the
|
||||
// "optional evacuation" phase
|
||||
// EEEEEEEEOOOOOOO|| ... after step b5), the complete collection set.
|
||||
// || ... after step b6)
|
||||
// |SSS| ... after step 7), with three survivor regions
|
||||
//
|
||||
class G1CollectionSet {
|
||||
G1CollectedHeap* _g1h;
|
||||
G1Policy* _policy;
|
||||
|
||||
// All old gen collection set candidate regions for the current mixed gc phase.
|
||||
// All old gen collection set candidate regions for the current mixed phase.
|
||||
G1CollectionSetCandidates* _candidates;
|
||||
|
||||
uint _eden_region_length;
|
||||
@ -51,7 +141,7 @@ class G1CollectionSet {
|
||||
|
||||
// The actual collection set as a set of region indices.
|
||||
// All entries in _collection_set_regions below _collection_set_cur_length are
|
||||
// assumed to be valid entries.
|
||||
// assumed to be part of the collection set.
|
||||
// We assume that at any time there is at most only one writer and (one or more)
|
||||
// concurrent readers. This means we are good with using storestore and loadload
|
||||
// barriers on the writer and reader respectively only.
|
||||
@ -59,31 +149,33 @@ class G1CollectionSet {
|
||||
volatile size_t _collection_set_cur_length;
|
||||
size_t _collection_set_max_length;
|
||||
|
||||
// When doing mixed collections we can add old regions to the collection, which
|
||||
// can be collected if there is enough time. We call these optional regions and
|
||||
// the pointer to these regions are stored in the array below.
|
||||
HeapRegion** _optional_regions;
|
||||
uint _optional_region_length;
|
||||
uint _optional_region_max_length;
|
||||
// When doing mixed collections we can add old regions to the collection set, which
|
||||
// will be collected only if there is enough time. We call these optional regions.
|
||||
// This member records the current number of regions that are of that type that
|
||||
// correspond to the first x entries in the collection set candidates.
|
||||
uint _num_optional_regions;
|
||||
|
||||
// The number of bytes in the collection set before the pause. Set from
|
||||
// the incrementally built collection set at the start of an evacuation
|
||||
// pause, and incremented in finalize_old_part() when adding old regions
|
||||
// (if any) to the collection set.
|
||||
// pause, and updated as more regions are added to the collection set.
|
||||
size_t _bytes_used_before;
|
||||
|
||||
// The number of cards in the remembered set in the collection set. Set from
|
||||
// the incrementally built collection set at the start of an evacuation
|
||||
// pause, and updated as more regions are added to the collection set.
|
||||
size_t _recorded_rs_lengths;
|
||||
|
||||
// The associated information that is maintained while the incremental
|
||||
// collection set is being built with young regions. Used to populate
|
||||
// the recorded info for the evacuation pause.
|
||||
|
||||
enum CSetBuildType {
|
||||
Active, // We are actively building the collection set
|
||||
Inactive // We are not actively building the collection set
|
||||
};
|
||||
|
||||
CSetBuildType _inc_build_state;
|
||||
size_t _inc_part_start;
|
||||
|
||||
// The associated information that is maintained while the incremental
|
||||
// collection set is being built with *young* regions. Used to populate
|
||||
// the recorded info for the evacuation pause.
|
||||
|
||||
// The number of bytes in the incrementally built collection set.
|
||||
// Used to set _collection_set_bytes_used_before at the start of
|
||||
@ -113,22 +205,44 @@ class G1CollectionSet {
|
||||
// See the comment for _inc_recorded_rs_lengths_diffs.
|
||||
double _inc_predicted_elapsed_time_ms_diffs;
|
||||
|
||||
void set_recorded_rs_lengths(size_t rs_lengths);
|
||||
|
||||
G1CollectorState* collector_state();
|
||||
G1GCPhaseTimes* phase_times();
|
||||
|
||||
void verify_young_cset_indices() const NOT_DEBUG_RETURN;
|
||||
void add_as_optional(HeapRegion* hr);
|
||||
void add_as_old(HeapRegion* hr);
|
||||
bool optional_is_full();
|
||||
|
||||
double predict_region_elapsed_time_ms(HeapRegion* hr);
|
||||
|
||||
// Update the incremental collection set information when adding a region.
|
||||
void add_young_region_common(HeapRegion* hr);
|
||||
|
||||
// Add old region "hr" to the collection set.
|
||||
void add_old_region(HeapRegion* hr);
|
||||
void free_optional_regions();
|
||||
|
||||
// Add old region "hr" to optional collection set.
|
||||
void add_optional_region(HeapRegion* hr);
|
||||
|
||||
void move_candidates_to_collection_set(uint num_regions);
|
||||
|
||||
// Finalize the young part of the initial collection set. Relabel survivor regions
|
||||
// as Eden and calculate a prediction on how long the evacuation of all young regions
|
||||
// will take.
|
||||
double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
|
||||
// Perform any final calculations on the incremental collection set fields before we
|
||||
// can use them.
|
||||
void finalize_incremental_building();
|
||||
|
||||
// Select the old regions of the initial collection set and determine how many optional
|
||||
// regions we might be able to evacuate in this pause.
|
||||
void finalize_old_part(double time_remaining_ms);
|
||||
public:
|
||||
G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
|
||||
~G1CollectionSet();
|
||||
|
||||
// Initializes the collection set giving the maximum possible length of the collection set.
|
||||
void initialize(uint max_region_length);
|
||||
void initialize_optional(uint max_length);
|
||||
void free_optional_regions();
|
||||
|
||||
void clear_candidates();
|
||||
|
||||
@ -141,8 +255,6 @@ public:
|
||||
void init_region_lengths(uint eden_cset_region_length,
|
||||
uint survivor_cset_region_length);
|
||||
|
||||
void set_recorded_rs_lengths(size_t rs_lengths);
|
||||
|
||||
uint region_length() const { return young_region_length() +
|
||||
old_region_length(); }
|
||||
uint young_region_length() const { return eden_region_length() +
|
||||
@ -151,32 +263,29 @@ public:
|
||||
uint eden_region_length() const { return _eden_region_length; }
|
||||
uint survivor_region_length() const { return _survivor_region_length; }
|
||||
uint old_region_length() const { return _old_region_length; }
|
||||
uint optional_region_length() const { return _optional_region_length; }
|
||||
uint optional_region_length() const { return _num_optional_regions; }
|
||||
|
||||
// Reset the contents of the collection set.
|
||||
void clear();
|
||||
|
||||
// Incremental collection set support
|
||||
|
||||
// Initialize incremental collection set info.
|
||||
void start_incremental_building();
|
||||
// Start a new collection set increment.
|
||||
void update_incremental_marker() { _inc_build_state = Active; _inc_part_start = _collection_set_cur_length; }
|
||||
// Stop adding regions to the current collection set increment.
|
||||
void stop_incremental_building() { _inc_build_state = Inactive; }
|
||||
|
||||
// Perform any final calculations on the incremental collection set fields
|
||||
// before we can use them.
|
||||
void finalize_incremental_building();
|
||||
// Iterate over the current collection set increment applying the given HeapRegionClosure
|
||||
// from a starting position determined by the given worker id.
|
||||
void iterate_incremental_part_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
|
||||
|
||||
// Reset the contents of the collection set.
|
||||
void clear();
|
||||
|
||||
// Iterate over the collection set, applying the given HeapRegionClosure on all of them.
|
||||
// If may_be_aborted is true, iteration may be aborted using the return value of the
|
||||
// called closure method.
|
||||
// Iterate over the entire collection set (all increments calculated so far), applying
|
||||
// the given HeapRegionClosure on all of them.
|
||||
void iterate(HeapRegionClosure* cl) const;
|
||||
|
||||
// Iterate over the collection set, applying the given HeapRegionClosure on all of them,
|
||||
// trying to optimally spread out starting position of total_workers workers given the
|
||||
// caller's worker_id.
|
||||
void iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
|
||||
|
||||
// Stop adding regions to the incremental collection set.
|
||||
void stop_incremental_building() { _inc_build_state = Inactive; }
|
||||
void iterate_optional(HeapRegionClosure* cl) const;
|
||||
|
||||
size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
|
||||
|
||||
@ -188,16 +297,14 @@ public:
|
||||
_bytes_used_before = 0;
|
||||
}
|
||||
|
||||
// Choose a new collection set. Marks the chosen regions as being
|
||||
// "in_collection_set".
|
||||
double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
|
||||
void finalize_old_part(double time_remaining_ms);
|
||||
|
||||
// Add old region "hr" to the collection set.
|
||||
void add_old_region(HeapRegion* hr);
|
||||
|
||||
// Add old region "hr" to optional collection set.
|
||||
void add_optional_region(HeapRegion* hr);
|
||||
// Finalize the initial collection set consisting of all young regions potentially a
|
||||
// few old gen regions.
|
||||
void finalize_initial_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor);
|
||||
// Finalize the next collection set from the set of available optional old gen regions.
|
||||
bool finalize_optional_for_evacuation(double remaining_pause_time);
|
||||
// Abandon (clean up) optional collection set regions that were not evacuated in this
|
||||
// pause.
|
||||
void abandon_optional_collection_set(G1ParScanThreadStateSet* pss);
|
||||
|
||||
// Update information about hr in the aggregated information for
|
||||
// the incrementally built collection set.
|
||||
@ -214,73 +321,6 @@ public:
|
||||
|
||||
void print(outputStream* st);
|
||||
#endif // !PRODUCT
|
||||
|
||||
double predict_region_elapsed_time_ms(HeapRegion* hr);
|
||||
|
||||
void clear_optional_region(const HeapRegion* hr);
|
||||
|
||||
HeapRegion* optional_region_at(uint i) const {
|
||||
assert(_optional_regions != NULL, "Not yet initialized");
|
||||
assert(i < _optional_region_length, "index %u out of bounds (%u)", i, _optional_region_length);
|
||||
return _optional_regions[i];
|
||||
}
|
||||
|
||||
HeapRegion* remove_last_optional_region() {
|
||||
assert(_optional_regions != NULL, "Not yet initialized");
|
||||
assert(_optional_region_length != 0, "No region to remove");
|
||||
_optional_region_length--;
|
||||
HeapRegion* removed = _optional_regions[_optional_region_length];
|
||||
_optional_regions[_optional_region_length] = NULL;
|
||||
return removed;
|
||||
}
|
||||
|
||||
private:
|
||||
// Update the incremental collection set information when adding a region.
|
||||
void add_young_region_common(HeapRegion* hr);
|
||||
};
|
||||
|
||||
// Helper class to manage the optional regions in a Mixed collection.
|
||||
class G1OptionalCSet : public StackObj {
|
||||
private:
|
||||
G1CollectionSet* _cset;
|
||||
G1ParScanThreadStateSet* _pset;
|
||||
uint _current_index;
|
||||
uint _current_limit;
|
||||
bool _prepare_failed;
|
||||
bool _evacuation_failed;
|
||||
|
||||
void prepare_to_evacuate_optional_region(HeapRegion* hr);
|
||||
|
||||
public:
|
||||
static const uint InvalidCSetIndex = UINT_MAX;
|
||||
|
||||
G1OptionalCSet(G1CollectionSet* cset, G1ParScanThreadStateSet* pset) :
|
||||
_cset(cset),
|
||||
_pset(pset),
|
||||
_current_index(0),
|
||||
_current_limit(0),
|
||||
_prepare_failed(false),
|
||||
_evacuation_failed(false) { }
|
||||
// The destructor returns regions to the collection set candidates set and
|
||||
// frees the optional structure in the collection set.
|
||||
~G1OptionalCSet();
|
||||
|
||||
uint current_index() { return _current_index; }
|
||||
uint current_limit() { return _current_limit; }
|
||||
|
||||
uint size();
|
||||
bool is_empty();
|
||||
|
||||
HeapRegion* region_at(uint index);
|
||||
|
||||
// Prepare a set of regions for optional evacuation.
|
||||
void prepare_evacuation(double time_left_ms);
|
||||
bool prepare_failed();
|
||||
|
||||
// Complete the evacuation of the previously prepared
|
||||
// regions by updating their state and check for failures.
|
||||
void complete_evacuation();
|
||||
bool evacuation_failed();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1COLLECTIONSET_HPP
|
||||
|
@ -27,26 +27,12 @@
|
||||
#include "gc/g1/g1CollectionSetChooser.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
|
||||
HeapRegion* G1CollectionSetCandidates::pop_front() {
|
||||
assert(_front_idx < _num_regions, "pre-condition");
|
||||
HeapRegion* hr = _regions[_front_idx];
|
||||
assert(hr != NULL, "pre-condition");
|
||||
_regions[_front_idx] = NULL;
|
||||
assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes,
|
||||
"Remaining reclaimable bytes inconsistent "
|
||||
"from region: " SIZE_FORMAT " remaining: " SIZE_FORMAT,
|
||||
hr->reclaimable_bytes(), _remaining_reclaimable_bytes);
|
||||
_remaining_reclaimable_bytes -= hr->reclaimable_bytes();
|
||||
_front_idx++;
|
||||
return hr;
|
||||
}
|
||||
|
||||
void G1CollectionSetCandidates::push_front(HeapRegion* hr) {
|
||||
assert(hr != NULL, "Can't put back a NULL region");
|
||||
assert(_front_idx >= 1, "Too many regions have been put back.");
|
||||
_front_idx--;
|
||||
_regions[_front_idx] = hr;
|
||||
_remaining_reclaimable_bytes += hr->reclaimable_bytes();
|
||||
void G1CollectionSetCandidates::remove(uint num_regions) {
|
||||
assert(num_regions <= num_remaining(), "Trying to remove more regions (%u) than available (%u)", num_regions, num_remaining());
|
||||
for (uint i = 0; i < num_regions; i++) {
|
||||
_remaining_reclaimable_bytes -= at(_front_idx)->reclaimable_bytes();
|
||||
_front_idx++;
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectionSetCandidates::iterate(HeapRegionClosure* cl) {
|
||||
@ -62,13 +48,8 @@ void G1CollectionSetCandidates::iterate(HeapRegionClosure* cl) {
|
||||
#ifndef PRODUCT
|
||||
void G1CollectionSetCandidates::verify() const {
|
||||
guarantee(_front_idx <= _num_regions, "Index: %u Num_regions: %u", _front_idx, _num_regions);
|
||||
uint idx = 0;
|
||||
uint idx = _front_idx;
|
||||
size_t sum_of_reclaimable_bytes = 0;
|
||||
while (idx < _front_idx) {
|
||||
guarantee(_regions[idx] == NULL, "All entries before _front_idx %u should be NULL, but %u is not",
|
||||
_front_idx, idx);
|
||||
idx++;
|
||||
}
|
||||
HeapRegion *prev = NULL;
|
||||
for (; idx < _num_regions; idx++) {
|
||||
HeapRegion *cur = _regions[idx];
|
||||
|
@ -63,22 +63,18 @@ public:
|
||||
// Returns the total number of collection set candidate old regions added.
|
||||
uint num_regions() { return _num_regions; }
|
||||
|
||||
// Return the candidate region at the cursor position to be considered for collection without
|
||||
// removing it.
|
||||
HeapRegion* peek_front() {
|
||||
uint cur_idx() const { return _front_idx; }
|
||||
|
||||
HeapRegion* at(uint idx) const {
|
||||
HeapRegion* res = NULL;
|
||||
if (_front_idx < _num_regions) {
|
||||
res = _regions[_front_idx];
|
||||
assert(res != NULL, "Unexpected NULL HeapRegion at index %u", _front_idx);
|
||||
if (idx < _num_regions) {
|
||||
res = _regions[idx];
|
||||
assert(res != NULL, "Unexpected NULL HeapRegion at index %u", idx);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
// Remove the given region from the candidates set and move the cursor to the next one.
|
||||
HeapRegion* pop_front();
|
||||
|
||||
// Add the given HeapRegion to the front of the collection set candidate set again.
|
||||
void push_front(HeapRegion* hr);
|
||||
void remove(uint num_regions);
|
||||
|
||||
// Iterate over all remaining collection set candidate regions.
|
||||
void iterate(HeapRegionClosure* cl);
|
||||
|
@ -1940,9 +1940,10 @@ public:
|
||||
guarantee(oopDesc::is_oop(task_entry.obj()),
|
||||
"Non-oop " PTR_FORMAT ", phase: %s, info: %d",
|
||||
p2i(task_entry.obj()), _phase, _info);
|
||||
guarantee(!_g1h->is_in_cset(task_entry.obj()),
|
||||
"obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
|
||||
p2i(task_entry.obj()), _phase, _info);
|
||||
HeapRegion* r = _g1h->heap_region_containing(task_entry.obj());
|
||||
guarantee(!(r->in_collection_set() || r->has_index_in_opt_cset()),
|
||||
"obj " PTR_FORMAT " from %s (%d) in region %u in (optional) collection set",
|
||||
p2i(task_entry.obj()), _phase, _info, r->hrm_index());
|
||||
}
|
||||
};
|
||||
|
||||
@ -1979,11 +1980,11 @@ void G1ConcurrentMark::verify_no_collection_set_oops() {
|
||||
HeapWord* task_finger = task->finger();
|
||||
if (task_finger != NULL && task_finger < _heap.end()) {
|
||||
// See above note on the global finger verification.
|
||||
HeapRegion* task_hr = _g1h->heap_region_containing(task_finger);
|
||||
guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
|
||||
!task_hr->in_collection_set(),
|
||||
HeapRegion* r = _g1h->heap_region_containing(task_finger);
|
||||
guarantee(r == NULL || task_finger == r->bottom() ||
|
||||
!r->in_collection_set() || !r->has_index_in_opt_cset(),
|
||||
"task finger: " PTR_FORMAT " region: " HR_FORMAT,
|
||||
p2i(task_finger), HR_FORMAT_PARAMS(task_hr));
|
||||
p2i(task_finger), HR_FORMAT_PARAMS(r));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ double G1ConcurrentMarkThread::mmu_sleep_time(G1Policy* g1_policy, bool remark)
|
||||
}
|
||||
|
||||
void G1ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
|
||||
if (g1_policy->adaptive_young_list_length()) {
|
||||
if (g1_policy->use_adaptive_young_list_length()) {
|
||||
jlong sleep_time_ms = mmu_sleep_time(g1_policy, remark);
|
||||
if (!_cm->has_aborted() && sleep_time_ms > 0) {
|
||||
os::sleep(this, sleep_time_ms, false);
|
||||
|
@ -31,19 +31,28 @@
|
||||
|
||||
class G1EdenRegions {
|
||||
private:
|
||||
int _length;
|
||||
int _length;
|
||||
// Sum of used bytes from all retired eden regions.
|
||||
// I.e. updated when mutator regions are retired.
|
||||
volatile size_t _used_bytes;
|
||||
|
||||
public:
|
||||
G1EdenRegions() : _length(0) {}
|
||||
G1EdenRegions() : _length(0), _used_bytes(0) { }
|
||||
|
||||
void add(HeapRegion* hr) {
|
||||
assert(!hr->is_eden(), "should not already be set");
|
||||
_length++;
|
||||
}
|
||||
|
||||
void clear() { _length = 0; }
|
||||
void clear() { _length = 0; _used_bytes = 0; }
|
||||
|
||||
uint length() const { return _length; }
|
||||
|
||||
size_t used_bytes() const { return _used_bytes; }
|
||||
|
||||
void add_used_bytes(size_t used_bytes) {
|
||||
_used_bytes += used_bytes;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1EDENREGIONS_HPP
|
||||
|
@ -228,6 +228,8 @@ public:
|
||||
|
||||
if (_hrclaimer->claim_region(hr->hrm_index())) {
|
||||
if (hr->evacuation_failed()) {
|
||||
hr->clear_index_in_opt_cset();
|
||||
|
||||
bool during_initial_mark = _g1h->collector_state()->in_initial_mark_gc();
|
||||
bool during_conc_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
|
||||
|
||||
@ -257,5 +259,5 @@ G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask() :
|
||||
void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
|
||||
RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, &_hrclaimer);
|
||||
|
||||
_g1h->collection_set_iterate_from(&rsfp_cl, worker_id);
|
||||
_g1h->collection_set_iterate_increment_from(&rsfp_cl, worker_id);
|
||||
}
|
||||
|
@ -76,10 +76,12 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
}
|
||||
_gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms):");
|
||||
_gc_par_phases[OptScanRS] = new WorkerDataArray<double>(max_gc_threads, "Optional Scan RS (ms):");
|
||||
_gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms):");
|
||||
_gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scan (ms):");
|
||||
_gc_par_phases[OptCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Optional Code Root Scan (ms):");
|
||||
_gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
|
||||
_gc_par_phases[OptObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Optional Object Copy (ms):");
|
||||
_gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");
|
||||
_gc_par_phases[OptTermination] = new WorkerDataArray<double>(max_gc_threads, "Optional Termination (ms):");
|
||||
_gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms):");
|
||||
_gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms):");
|
||||
_gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms):");
|
||||
@ -91,14 +93,16 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
_scan_rs_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
|
||||
_gc_par_phases[ScanRS]->link_thread_work_items(_scan_rs_skipped_cards, ScanRSSkippedCards);
|
||||
|
||||
_opt_cset_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_scanned_cards, OptCSetScannedCards);
|
||||
_opt_cset_claimed_cards = new WorkerDataArray<size_t>(max_gc_threads, "Claimed Cards:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_claimed_cards, OptCSetClaimedCards);
|
||||
_opt_cset_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_skipped_cards, OptCSetSkippedCards);
|
||||
_opt_cset_used_memory = new WorkerDataArray<size_t>(max_gc_threads, "Used Memory:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_cset_used_memory, OptCSetUsedMemory);
|
||||
_opt_scan_rs_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_scanned_cards, ScanRSScannedCards);
|
||||
_opt_scan_rs_claimed_cards = new WorkerDataArray<size_t>(max_gc_threads, "Claimed Cards:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_claimed_cards, ScanRSClaimedCards);
|
||||
_opt_scan_rs_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_skipped_cards, ScanRSSkippedCards);
|
||||
_opt_scan_rs_scanned_opt_refs = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Refs:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_scanned_opt_refs, ScanRSScannedOptRefs);
|
||||
_opt_scan_rs_used_memory = new WorkerDataArray<size_t>(max_gc_threads, "Used Memory:");
|
||||
_gc_par_phases[OptScanRS]->link_thread_work_items(_opt_scan_rs_used_memory, ScanRSUsedMemory);
|
||||
|
||||
_update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers:");
|
||||
_gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers, UpdateRSProcessedBuffers);
|
||||
@ -112,9 +116,17 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
_obj_copy_lab_undo_waste = new WorkerDataArray<size_t>(max_gc_threads, "LAB Undo Waste");
|
||||
_gc_par_phases[ObjCopy]->link_thread_work_items(_obj_copy_lab_undo_waste, ObjCopyLABUndoWaste);
|
||||
|
||||
_opt_obj_copy_lab_waste = new WorkerDataArray<size_t>(max_gc_threads, "LAB Waste");
|
||||
_gc_par_phases[OptObjCopy]->link_thread_work_items(_obj_copy_lab_waste, ObjCopyLABWaste);
|
||||
_opt_obj_copy_lab_undo_waste = new WorkerDataArray<size_t>(max_gc_threads, "LAB Undo Waste");
|
||||
_gc_par_phases[OptObjCopy]->link_thread_work_items(_obj_copy_lab_undo_waste, ObjCopyLABUndoWaste);
|
||||
|
||||
_termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:");
|
||||
_gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
|
||||
|
||||
_opt_termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Optional Termination Attempts:");
|
||||
_gc_par_phases[OptTermination]->link_thread_work_items(_opt_termination_attempts);
|
||||
|
||||
if (UseStringDeduplication) {
|
||||
_gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms):");
|
||||
_gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms):");
|
||||
@ -134,7 +146,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::reset() {
|
||||
_cur_collection_par_time_ms = 0.0;
|
||||
_cur_collection_initial_evac_time_ms = 0.0;
|
||||
_cur_optional_evac_ms = 0.0;
|
||||
_cur_collection_code_root_fixup_time_ms = 0.0;
|
||||
_cur_strong_code_root_purge_time_ms = 0.0;
|
||||
@ -251,6 +263,10 @@ void G1GCPhaseTimes::record_or_add_time_secs(GCParPhases phase, uint worker_i, d
|
||||
}
|
||||
}
|
||||
|
||||
double G1GCPhaseTimes::get_time_secs(GCParPhases phase, uint worker_i) {
|
||||
return _gc_par_phases[phase]->get(worker_i);
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index) {
|
||||
_gc_par_phases[phase]->set_thread_work_item(worker_i, count, index);
|
||||
}
|
||||
@ -259,6 +275,10 @@ void G1GCPhaseTimes::record_or_add_thread_work_item(GCParPhases phase, uint work
|
||||
_gc_par_phases[phase]->set_or_add_thread_work_item(worker_i, count, index);
|
||||
}
|
||||
|
||||
size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i, uint index) {
|
||||
return _gc_par_phases[phase]->get_thread_work_item(worker_i, index);
|
||||
}
|
||||
|
||||
// return the average time for a phase in milliseconds
|
||||
double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
|
||||
return _gc_par_phases[phase]->average() * 1000.0;
|
||||
@ -374,12 +394,14 @@ double G1GCPhaseTimes::print_evacuate_optional_collection_set() const {
|
||||
info_time("Evacuate Optional Collection Set", sum_ms);
|
||||
debug_phase(_gc_par_phases[OptScanRS]);
|
||||
debug_phase(_gc_par_phases[OptObjCopy]);
|
||||
debug_phase(_gc_par_phases[OptCodeRoots]);
|
||||
debug_phase(_gc_par_phases[OptTermination]);
|
||||
}
|
||||
return sum_ms;
|
||||
}
|
||||
|
||||
double G1GCPhaseTimes::print_evacuate_collection_set() const {
|
||||
const double sum_ms = _cur_collection_par_time_ms;
|
||||
const double sum_ms = _cur_collection_initial_evac_time_ms;
|
||||
|
||||
info_time("Evacuate Collection Set", sum_ms);
|
||||
|
||||
@ -517,9 +539,11 @@ const char* G1GCPhaseTimes::phase_name(GCParPhases phase) {
|
||||
"ScanRS",
|
||||
"OptScanRS",
|
||||
"CodeRoots",
|
||||
"OptCodeRoots",
|
||||
"ObjCopy",
|
||||
"OptObjCopy",
|
||||
"Termination",
|
||||
"OptTermination",
|
||||
"Other",
|
||||
"GCWorkerTotal",
|
||||
"GCWorkerEnd",
|
||||
|
@ -67,9 +67,11 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
ScanRS,
|
||||
OptScanRS,
|
||||
CodeRoots,
|
||||
OptCodeRoots,
|
||||
ObjCopy,
|
||||
OptObjCopy,
|
||||
Termination,
|
||||
OptTermination,
|
||||
Other,
|
||||
GCWorkerTotal,
|
||||
GCWorkerEnd,
|
||||
@ -87,7 +89,9 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
enum GCScanRSWorkItems {
|
||||
ScanRSScannedCards,
|
||||
ScanRSClaimedCards,
|
||||
ScanRSSkippedCards
|
||||
ScanRSSkippedCards,
|
||||
ScanRSScannedOptRefs,
|
||||
ScanRSUsedMemory
|
||||
};
|
||||
|
||||
enum GCUpdateRSWorkItems {
|
||||
@ -101,13 +105,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
ObjCopyLABUndoWaste
|
||||
};
|
||||
|
||||
enum GCOptCSetWorkItems {
|
||||
OptCSetScannedCards,
|
||||
OptCSetClaimedCards,
|
||||
OptCSetSkippedCards,
|
||||
OptCSetUsedMemory
|
||||
};
|
||||
|
||||
private:
|
||||
// Markers for grouping the phases in the GCPhases enum above
|
||||
static const int GCMainParPhasesLast = GCWorkerEnd;
|
||||
@ -122,19 +119,25 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
WorkerDataArray<size_t>* _scan_rs_claimed_cards;
|
||||
WorkerDataArray<size_t>* _scan_rs_skipped_cards;
|
||||
|
||||
WorkerDataArray<size_t>* _opt_scan_rs_scanned_cards;
|
||||
WorkerDataArray<size_t>* _opt_scan_rs_claimed_cards;
|
||||
WorkerDataArray<size_t>* _opt_scan_rs_skipped_cards;
|
||||
WorkerDataArray<size_t>* _opt_scan_rs_scanned_opt_refs;
|
||||
WorkerDataArray<size_t>* _opt_scan_rs_used_memory;
|
||||
|
||||
WorkerDataArray<size_t>* _obj_copy_lab_waste;
|
||||
WorkerDataArray<size_t>* _obj_copy_lab_undo_waste;
|
||||
|
||||
WorkerDataArray<size_t>* _opt_cset_scanned_cards;
|
||||
WorkerDataArray<size_t>* _opt_cset_claimed_cards;
|
||||
WorkerDataArray<size_t>* _opt_cset_skipped_cards;
|
||||
WorkerDataArray<size_t>* _opt_cset_used_memory;
|
||||
WorkerDataArray<size_t>* _opt_obj_copy_lab_waste;
|
||||
WorkerDataArray<size_t>* _opt_obj_copy_lab_undo_waste;
|
||||
|
||||
WorkerDataArray<size_t>* _termination_attempts;
|
||||
|
||||
WorkerDataArray<size_t>* _opt_termination_attempts;
|
||||
|
||||
WorkerDataArray<size_t>* _redirtied_cards;
|
||||
|
||||
double _cur_collection_par_time_ms;
|
||||
double _cur_collection_initial_evac_time_ms;
|
||||
double _cur_optional_evac_ms;
|
||||
double _cur_collection_code_root_fixup_time_ms;
|
||||
double _cur_strong_code_root_purge_time_ms;
|
||||
@ -225,10 +228,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
|
||||
void record_or_add_time_secs(GCParPhases phase, uint worker_i, double secs);
|
||||
|
||||
double get_time_secs(GCParPhases phase, uint worker_i);
|
||||
|
||||
void record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index = 0);
|
||||
|
||||
void record_or_add_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index = 0);
|
||||
|
||||
size_t get_thread_work_item(GCParPhases phase, uint worker_i, uint index = 0);
|
||||
|
||||
// return the average time for a phase in milliseconds
|
||||
double average_time_ms(GCParPhases phase);
|
||||
|
||||
@ -256,16 +263,16 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
_cur_expand_heap_time_ms = ms;
|
||||
}
|
||||
|
||||
void record_par_time(double ms) {
|
||||
_cur_collection_par_time_ms = ms;
|
||||
void record_initial_evac_time(double ms) {
|
||||
_cur_collection_initial_evac_time_ms = ms;
|
||||
}
|
||||
|
||||
void record_optional_evacuation(double ms) {
|
||||
_cur_optional_evac_ms = ms;
|
||||
void record_or_add_optional_evac_time(double ms) {
|
||||
_cur_optional_evac_ms += ms;
|
||||
}
|
||||
|
||||
void record_code_root_fixup_time(double ms) {
|
||||
_cur_collection_code_root_fixup_time_ms = ms;
|
||||
void record_or_add_code_root_fixup_time(double ms) {
|
||||
_cur_collection_code_root_fixup_time_ms += ms;
|
||||
}
|
||||
|
||||
void record_strong_code_root_purge_time(double ms) {
|
||||
@ -360,7 +367,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
}
|
||||
|
||||
double cur_collection_par_time_ms() {
|
||||
return _cur_collection_par_time_ms;
|
||||
return _cur_collection_initial_evac_time_ms;
|
||||
}
|
||||
|
||||
double cur_clear_ct_time_ms() {
|
||||
|
@ -371,6 +371,7 @@ public:
|
||||
}
|
||||
|
||||
bool do_heap_region(HeapRegion* r) {
|
||||
guarantee(!r->has_index_in_opt_cset(), "Region %u still has opt collection set index %u", r->hrm_index(), r->index_in_opt_cset());
|
||||
guarantee(!r->is_young() || r->rem_set()->is_complete(), "Remembered set for Young region %u must be complete, is %s", r->hrm_index(), r->rem_set()->get_state_str());
|
||||
// Humongous and old regions regions might be of any state, so can't check here.
|
||||
guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s", r->hrm_index(), r->rem_set()->get_state_str());
|
||||
|
@ -228,23 +228,25 @@ void G1MonitoringSupport::recalculate_sizes() {
|
||||
MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Recalculate all the sizes from scratch.
|
||||
|
||||
uint young_list_length = _g1h->young_regions_count();
|
||||
// This never includes used bytes of current allocating heap region.
|
||||
_overall_used = _g1h->used_unlocked();
|
||||
_eden_space_used = _g1h->eden_regions_used_bytes();
|
||||
_survivor_space_used = _g1h->survivor_regions_used_bytes();
|
||||
|
||||
// _overall_used and _eden_space_used are obtained concurrently so
|
||||
// may be inconsistent with each other. To prevent _old_gen_used going negative,
|
||||
// use smaller value to substract.
|
||||
_old_gen_used = _overall_used - MIN2(_overall_used, _eden_space_used + _survivor_space_used);
|
||||
|
||||
uint survivor_list_length = _g1h->survivor_regions_count();
|
||||
assert(young_list_length >= survivor_list_length, "invariant");
|
||||
uint eden_list_length = young_list_length - survivor_list_length;
|
||||
// Max length includes any potential extensions to the young gen
|
||||
// we'll do when the GC locker is active.
|
||||
uint young_list_max_length = _g1h->policy()->young_list_max_length();
|
||||
assert(young_list_max_length >= survivor_list_length, "invariant");
|
||||
uint eden_list_max_length = young_list_max_length - survivor_list_length;
|
||||
|
||||
_overall_used = _g1h->used_unlocked();
|
||||
_eden_space_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
|
||||
_survivor_space_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
|
||||
_old_gen_used = subtract_up_to_zero(_overall_used, _eden_space_used + _survivor_space_used);
|
||||
|
||||
// First calculate the committed sizes that can be calculated independently.
|
||||
_survivor_space_committed = _survivor_space_used;
|
||||
_survivor_space_committed = survivor_list_length * HeapRegion::GrainBytes;
|
||||
_old_gen_committed = HeapRegion::align_up_to_region_byte_size(_old_gen_used);
|
||||
|
||||
// Next, start with the overall committed size.
|
||||
@ -274,11 +276,15 @@ void G1MonitoringSupport::recalculate_sizes() {
|
||||
// Somewhat defensive: cap the eden used size to make sure it
|
||||
// never exceeds the committed size.
|
||||
_eden_space_used = MIN2(_eden_space_used, _eden_space_committed);
|
||||
// _survivor_committed and _old_committed are calculated in terms of
|
||||
// the corresponding _*_used value, so the next two conditions
|
||||
// should hold.
|
||||
assert(_survivor_space_used <= _survivor_space_committed, "post-condition");
|
||||
assert(_old_gen_used <= _old_gen_committed, "post-condition");
|
||||
// _survivor_space_used is calculated during a safepoint and _survivor_space_committed
|
||||
// is calculated from survivor region count * heap region size.
|
||||
assert(_survivor_space_used <= _survivor_space_committed, "Survivor used bytes(" SIZE_FORMAT
|
||||
") should be less than or equal to survivor committed(" SIZE_FORMAT ")",
|
||||
_survivor_space_used, _survivor_space_committed);
|
||||
// _old_gen_committed is calculated in terms of _old_gen_used value.
|
||||
assert(_old_gen_used <= _old_gen_committed, "Old gen used bytes(" SIZE_FORMAT
|
||||
") should be less than or equal to old gen committed(" SIZE_FORMAT ")",
|
||||
_old_gen_used, _old_gen_committed);
|
||||
}
|
||||
|
||||
void G1MonitoringSupport::update_sizes() {
|
||||
|
@ -174,21 +174,6 @@ class G1MonitoringSupport : public CHeapObj<mtGC> {
|
||||
|
||||
size_t _old_gen_used;
|
||||
|
||||
// It returns x - y if x > y, 0 otherwise.
|
||||
// As described in the comment above, some of the inputs to the
|
||||
// calculations we have to do are obtained concurrently and hence
|
||||
// may be inconsistent with each other. So, this provides a
|
||||
// defensive way of performing the subtraction and avoids the value
|
||||
// going negative (which would mean a very large result, given that
|
||||
// the parameter are size_t).
|
||||
static size_t subtract_up_to_zero(size_t x, size_t y) {
|
||||
if (x > y) {
|
||||
return x - y;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Recalculate all the sizes.
|
||||
void recalculate_sizes();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,9 +32,11 @@ G1OopStarChunkedList::~G1OopStarChunkedList() {
|
||||
delete_list(_coops);
|
||||
}
|
||||
|
||||
void G1OopStarChunkedList::oops_do(OopClosure* obj_cl, OopClosure* root_cl) {
|
||||
chunks_do(_roots, root_cl);
|
||||
chunks_do(_croots, root_cl);
|
||||
chunks_do(_oops, obj_cl);
|
||||
chunks_do(_coops, obj_cl);
|
||||
size_t G1OopStarChunkedList::oops_do(OopClosure* obj_cl, OopClosure* root_cl) {
|
||||
size_t result = 0;
|
||||
result += chunks_do(_roots, root_cl);
|
||||
result += chunks_do(_croots, root_cl);
|
||||
result += chunks_do(_oops, obj_cl);
|
||||
result += chunks_do(_coops, obj_cl);
|
||||
return result;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,8 +41,8 @@ class G1OopStarChunkedList : public CHeapObj<mtGC> {
|
||||
template <typename T> void delete_list(ChunkedList<T*, mtGC>* c);
|
||||
|
||||
template <typename T>
|
||||
void chunks_do(ChunkedList<T*, mtGC>* head,
|
||||
OopClosure* cl);
|
||||
size_t chunks_do(ChunkedList<T*, mtGC>* head,
|
||||
OopClosure* cl);
|
||||
|
||||
template <typename T>
|
||||
inline void push(ChunkedList<T*, mtGC>** field, T* p);
|
||||
@ -53,7 +53,7 @@ class G1OopStarChunkedList : public CHeapObj<mtGC> {
|
||||
|
||||
size_t used_memory() { return _used_memory; }
|
||||
|
||||
void oops_do(OopClosure* obj_cl, OopClosure* root_cl);
|
||||
size_t oops_do(OopClosure* obj_cl, OopClosure* root_cl);
|
||||
|
||||
inline void push_oop(oop* p);
|
||||
inline void push_oop(narrowOop* p);
|
||||
|
@ -72,13 +72,16 @@ void G1OopStarChunkedList::delete_list(ChunkedList<T*, mtGC>* c) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void G1OopStarChunkedList::chunks_do(ChunkedList<T*, mtGC>* head, OopClosure* cl) {
|
||||
size_t G1OopStarChunkedList::chunks_do(ChunkedList<T*, mtGC>* head, OopClosure* cl) {
|
||||
size_t result = 0;
|
||||
for (ChunkedList<T*, mtGC>* c = head; c != NULL; c = c->next_used()) {
|
||||
result += c->size();
|
||||
for (size_t i = 0; i < c->size(); i++) {
|
||||
T* p = c->at(i);
|
||||
cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_G1OOPSTARCHUNKEDLIST_INLINE_HPP
|
||||
|
@ -372,7 +372,7 @@ void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
|
||||
}
|
||||
|
||||
size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
|
||||
_g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
|
||||
_g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::ScanRSUsedMemory);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
|
||||
|
||||
assert(Heap_lock->owned_by_self(), "Locking discipline.");
|
||||
|
||||
if (!adaptive_young_list_length()) {
|
||||
if (!use_adaptive_young_list_length()) {
|
||||
_young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
|
||||
}
|
||||
_young_gen_sizer->adjust_max_new_size(_g1h->max_expandable_regions());
|
||||
@ -195,7 +195,7 @@ void G1Policy::record_new_heap_size(uint new_number_of_regions) {
|
||||
|
||||
uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const {
|
||||
uint desired_min_length = 0;
|
||||
if (adaptive_young_list_length()) {
|
||||
if (use_adaptive_young_list_length()) {
|
||||
if (_analytics->num_alloc_rate_ms() > 3) {
|
||||
double now_sec = os::elapsedTime();
|
||||
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
|
||||
@ -252,7 +252,7 @@ G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_lengt
|
||||
uint desired_max_length = calculate_young_list_desired_max_length();
|
||||
|
||||
uint young_list_target_length = 0;
|
||||
if (adaptive_young_list_length()) {
|
||||
if (use_adaptive_young_list_length()) {
|
||||
if (collector_state()->in_young_only_phase()) {
|
||||
young_list_target_length =
|
||||
calculate_young_list_target_length(rs_lengths,
|
||||
@ -304,7 +304,7 @@ G1Policy::calculate_young_list_target_length(size_t rs_lengths,
|
||||
uint base_min_length,
|
||||
uint desired_min_length,
|
||||
uint desired_max_length) const {
|
||||
assert(adaptive_young_list_length(), "pre-condition");
|
||||
assert(use_adaptive_young_list_length(), "pre-condition");
|
||||
assert(collector_state()->in_young_only_phase(), "only call this for young GCs");
|
||||
|
||||
// In case some edge-condition makes the desired max length too small...
|
||||
@ -414,7 +414,7 @@ double G1Policy::predict_survivor_regions_evac_time() const {
|
||||
}
|
||||
|
||||
void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
|
||||
guarantee( adaptive_young_list_length(), "should not call this otherwise" );
|
||||
guarantee(use_adaptive_young_list_length(), "should not call this otherwise" );
|
||||
|
||||
if (rs_lengths > _rs_lengths_prediction) {
|
||||
// add 10% to avoid having to recalculate often
|
||||
@ -430,7 +430,7 @@ void G1Policy::update_rs_lengths_prediction() {
|
||||
}
|
||||
|
||||
void G1Policy::update_rs_lengths_prediction(size_t prediction) {
|
||||
if (collector_state()->in_young_only_phase() && adaptive_young_list_length()) {
|
||||
if (collector_state()->in_young_only_phase() && use_adaptive_young_list_length()) {
|
||||
_rs_lengths_prediction = prediction;
|
||||
}
|
||||
}
|
||||
@ -659,7 +659,11 @@ void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_sc
|
||||
|
||||
double cost_per_entry_ms = 0.0;
|
||||
if (cards_scanned > 10) {
|
||||
cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
|
||||
double avg_time_scan_rs = average_time_ms(G1GCPhaseTimes::ScanRS);
|
||||
if (this_pause_was_young_only) {
|
||||
avg_time_scan_rs += average_time_ms(G1GCPhaseTimes::OptScanRS);
|
||||
}
|
||||
cost_per_entry_ms = avg_time_scan_rs / cards_scanned;
|
||||
_analytics->report_cost_per_entry_ms(cost_per_entry_ms, this_pause_was_young_only);
|
||||
}
|
||||
|
||||
@ -694,7 +698,7 @@ void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_sc
|
||||
double cost_per_byte_ms = 0.0;
|
||||
|
||||
if (copied_bytes > 0) {
|
||||
cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
|
||||
cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / (double) copied_bytes;
|
||||
_analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
|
||||
}
|
||||
|
||||
@ -906,8 +910,8 @@ bool G1Policy::can_expand_young_list() const {
|
||||
return young_list_length < young_list_max_length;
|
||||
}
|
||||
|
||||
bool G1Policy::adaptive_young_list_length() const {
|
||||
return _young_gen_sizer->adaptive_young_list_length();
|
||||
bool G1Policy::use_adaptive_young_list_length() const {
|
||||
return _young_gen_sizer->use_adaptive_young_list_length();
|
||||
}
|
||||
|
||||
size_t G1Policy::desired_survivor_size(uint max_regions) const {
|
||||
@ -1188,11 +1192,135 @@ uint G1Policy::calc_max_old_cset_length() const {
|
||||
return (uint) result;
|
||||
}
|
||||
|
||||
uint G1Policy::finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
|
||||
double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms, survivor);
|
||||
_collection_set->finalize_old_part(time_remaining_ms);
|
||||
void G1Policy::calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
|
||||
double time_remaining_ms,
|
||||
uint& num_initial_regions,
|
||||
uint& num_optional_regions) {
|
||||
assert(candidates != NULL, "Must be");
|
||||
|
||||
return _collection_set->region_length();
|
||||
num_initial_regions = 0;
|
||||
num_optional_regions = 0;
|
||||
uint num_expensive_regions = 0;
|
||||
|
||||
double predicted_old_time_ms = 0.0;
|
||||
double predicted_initial_time_ms = 0.0;
|
||||
double predicted_optional_time_ms = 0.0;
|
||||
|
||||
double optional_threshold_ms = time_remaining_ms * optional_prediction_fraction();
|
||||
|
||||
const uint min_old_cset_length = calc_min_old_cset_length();
|
||||
const uint max_old_cset_length = MAX2(min_old_cset_length, calc_max_old_cset_length());
|
||||
const uint max_optional_regions = max_old_cset_length - min_old_cset_length;
|
||||
bool check_time_remaining = use_adaptive_young_list_length();
|
||||
|
||||
uint candidate_idx = candidates->cur_idx();
|
||||
|
||||
log_debug(gc, ergo, cset)("Start adding old regions to collection set. Min %u regions, max %u regions, "
|
||||
"time remaining %1.2fms, optional threshold %1.2fms",
|
||||
min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms);
|
||||
|
||||
HeapRegion* hr = candidates->at(candidate_idx);
|
||||
while (hr != NULL) {
|
||||
if (num_initial_regions + num_optional_regions >= max_old_cset_length) {
|
||||
// Added maximum number of old regions to the CSet.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Maximum number of regions). "
|
||||
"Initial %u regions, optional %u regions",
|
||||
num_initial_regions, num_optional_regions);
|
||||
break;
|
||||
}
|
||||
|
||||
// Stop adding regions if the remaining reclaimable space is
|
||||
// not above G1HeapWastePercent.
|
||||
size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
|
||||
double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
|
||||
double threshold = (double) G1HeapWastePercent;
|
||||
if (reclaimable_percent <= threshold) {
|
||||
// We've added enough old regions that the amount of uncollected
|
||||
// reclaimable space is at or below the waste threshold. Stop
|
||||
// adding old regions to the CSet.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Reclaimable percentage below threshold). "
|
||||
"Reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%",
|
||||
byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes),
|
||||
reclaimable_percent, G1HeapWastePercent);
|
||||
break;
|
||||
}
|
||||
|
||||
double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
|
||||
time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
|
||||
// Add regions to old set until we reach the minimum amount
|
||||
if (num_initial_regions < min_old_cset_length) {
|
||||
predicted_old_time_ms += predicted_time_ms;
|
||||
num_initial_regions++;
|
||||
// Record the number of regions added with no time remaining
|
||||
if (time_remaining_ms == 0.0) {
|
||||
num_expensive_regions++;
|
||||
}
|
||||
} else if (!check_time_remaining) {
|
||||
// In the non-auto-tuning case, we'll finish adding regions
|
||||
// to the CSet if we reach the minimum.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Region amount reached min).");
|
||||
break;
|
||||
} else {
|
||||
// Keep adding regions to old set until we reach the optional threshold
|
||||
if (time_remaining_ms > optional_threshold_ms) {
|
||||
predicted_old_time_ms += predicted_time_ms;
|
||||
num_initial_regions++;
|
||||
} else if (time_remaining_ms > 0) {
|
||||
// Keep adding optional regions until time is up.
|
||||
assert(num_optional_regions < max_optional_regions, "Should not be possible.");
|
||||
predicted_optional_time_ms += predicted_time_ms;
|
||||
num_optional_regions++;
|
||||
} else {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to collection set (Predicted time too high).");
|
||||
break;
|
||||
}
|
||||
}
|
||||
hr = candidates->at(++candidate_idx);
|
||||
}
|
||||
if (hr == NULL) {
|
||||
log_debug(gc, ergo, cset)("Old candidate collection set empty.");
|
||||
}
|
||||
|
||||
if (num_expensive_regions > 0) {
|
||||
log_debug(gc, ergo, cset)("Added %u initial old regions to collection set although the predicted time was too high.",
|
||||
num_expensive_regions);
|
||||
}
|
||||
|
||||
log_debug(gc, ergo, cset)("Finish choosing collection set old regions. Initial: %u, optional: %u, "
|
||||
"predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
|
||||
num_initial_regions, num_optional_regions,
|
||||
predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms);
|
||||
}
|
||||
|
||||
void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
|
||||
uint const max_optional_regions,
|
||||
double time_remaining_ms,
|
||||
uint& num_optional_regions) {
|
||||
assert(_g1h->collector_state()->in_mixed_phase(), "Should only be called in mixed phase");
|
||||
|
||||
num_optional_regions = 0;
|
||||
double prediction_ms = 0;
|
||||
uint candidate_idx = candidates->cur_idx();
|
||||
|
||||
HeapRegion* r = candidates->at(candidate_idx);
|
||||
while (num_optional_regions < max_optional_regions) {
|
||||
assert(r != NULL, "Region must exist");
|
||||
prediction_ms += predict_region_elapsed_time_ms(r, false);
|
||||
|
||||
if (prediction_ms > time_remaining_ms) {
|
||||
log_debug(gc, ergo, cset)("Prediction %.3fms for region %u does not fit remaining time: %.3fms.",
|
||||
prediction_ms, r->hrm_index(), time_remaining_ms);
|
||||
break;
|
||||
}
|
||||
// This region will be included in the next optional evacuation.
|
||||
|
||||
time_remaining_ms -= prediction_ms;
|
||||
num_optional_regions++;
|
||||
r = candidates->at(++candidate_idx);
|
||||
}
|
||||
|
||||
log_debug(gc, ergo, cset)("Prepared %u regions out of %u for optional evacuation. Predicted time: %.3fms",
|
||||
num_optional_regions, max_optional_regions, prediction_ms);
|
||||
}
|
||||
|
||||
void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
|
||||
|
@ -44,6 +44,7 @@
|
||||
|
||||
class HeapRegion;
|
||||
class G1CollectionSet;
|
||||
class G1CollectionSetCandidates;
|
||||
class G1CollectionSetChooser;
|
||||
class G1IHOPControl;
|
||||
class G1Analytics;
|
||||
@ -344,7 +345,21 @@ public:
|
||||
bool next_gc_should_be_mixed(const char* true_action_str,
|
||||
const char* false_action_str) const;
|
||||
|
||||
uint finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor);
|
||||
// Calculate and return the number of initial and optional old gen regions from
|
||||
// the given collection set candidates and the remaining time.
|
||||
void calculate_old_collection_set_regions(G1CollectionSetCandidates* candidates,
|
||||
double time_remaining_ms,
|
||||
uint& num_initial_regions,
|
||||
uint& num_optional_regions);
|
||||
|
||||
// Calculate the number of optional regions from the given collection set candidates,
|
||||
// the remaining time and the maximum number of these regions and return the number
|
||||
// of actually selected regions in num_optional_regions.
|
||||
void calculate_optional_collection_set_regions(G1CollectionSetCandidates* candidates,
|
||||
uint const max_optional_regions,
|
||||
double time_remaining_ms,
|
||||
uint& num_optional_regions);
|
||||
|
||||
private:
|
||||
// Set the state to start a concurrent marking cycle and clear
|
||||
// _initiate_conc_mark_if_possible because it has now been
|
||||
@ -384,7 +399,7 @@ public:
|
||||
return _young_list_max_length;
|
||||
}
|
||||
|
||||
bool adaptive_young_list_length() const;
|
||||
bool use_adaptive_young_list_length() const;
|
||||
|
||||
void transfer_survivors_to_cset(const G1SurvivorRegions* survivors);
|
||||
|
||||
@ -403,11 +418,13 @@ private:
|
||||
AgeTable _survivors_age_table;
|
||||
|
||||
size_t desired_survivor_size(uint max_regions) const;
|
||||
public:
|
||||
|
||||
// Fraction used when predicting how many optional regions to include in
|
||||
// the CSet. This fraction of the available time is used for optional regions,
|
||||
// the rest is used to add old regions to the normal CSet.
|
||||
double optional_prediction_fraction() { return 0.2; }
|
||||
|
||||
public:
|
||||
// Fraction used when evacuating the optional regions. This fraction of the
|
||||
// remaining time is used to choose what regions to include in the evacuation.
|
||||
double optional_evacuation_fraction() { return 0.75; }
|
||||
|
@ -316,6 +316,8 @@ G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state
|
||||
_scan_state(scan_state),
|
||||
_phase(phase),
|
||||
_worker_i(worker_i),
|
||||
_opt_refs_scanned(0),
|
||||
_opt_refs_memory_used(0),
|
||||
_cards_scanned(0),
|
||||
_cards_claimed(0),
|
||||
_cards_skipped(0),
|
||||
@ -338,6 +340,19 @@ void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card)
|
||||
_cards_scanned++;
|
||||
}
|
||||
|
||||
void G1ScanRSForRegionClosure::scan_opt_rem_set_roots(HeapRegion* r) {
|
||||
EventGCPhaseParallel event;
|
||||
|
||||
G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r);
|
||||
|
||||
G1ScanObjsDuringScanRSClosure scan_cl(_g1h, _pss);
|
||||
G1ScanRSForOptionalClosure cl(&scan_cl);
|
||||
_opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->raw_strong_oops());
|
||||
_opt_refs_memory_used += opt_rem_set_list->used_memory();
|
||||
|
||||
event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(_phase));
|
||||
}
|
||||
|
||||
void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) {
|
||||
EventGCPhaseParallel event;
|
||||
uint const region_idx = r->hrm_index();
|
||||
@ -414,11 +429,16 @@ void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
|
||||
}
|
||||
|
||||
bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
|
||||
assert(r->in_collection_set(),
|
||||
"Should only be called on elements of the collection set but region %u is not.",
|
||||
r->hrm_index());
|
||||
assert(r->in_collection_set(), "Region %u is not in the collection set.", r->hrm_index());
|
||||
uint const region_idx = r->hrm_index();
|
||||
|
||||
// The individual references for the optional remembered set are per-worker, so we
|
||||
// always need to scan them.
|
||||
if (r->has_index_in_opt_cset()) {
|
||||
G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time);
|
||||
scan_opt_rem_set_roots(r);
|
||||
}
|
||||
|
||||
// Do an early out if we know we are complete.
|
||||
if (_scan_state->iter_is_complete(region_idx)) {
|
||||
return false;
|
||||
@ -437,22 +457,33 @@ bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void G1RemSet::scan_rem_set(G1ParScanThreadState* pss, uint worker_i) {
|
||||
void G1RemSet::scan_rem_set(G1ParScanThreadState* pss,
|
||||
uint worker_i,
|
||||
G1GCPhaseTimes::GCParPhases scan_phase,
|
||||
G1GCPhaseTimes::GCParPhases objcopy_phase,
|
||||
G1GCPhaseTimes::GCParPhases coderoots_phase) {
|
||||
assert(pss->trim_ticks().value() == 0, "Queues must have been trimmed before entering.");
|
||||
|
||||
G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss);
|
||||
G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, G1GCPhaseTimes::ScanRS, worker_i);
|
||||
_g1h->collection_set_iterate_from(&cl, worker_i);
|
||||
G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, scan_phase, worker_i);
|
||||
_g1h->collection_set_iterate_increment_from(&cl, worker_i);
|
||||
|
||||
G1GCPhaseTimes* p = _g1p->phase_times();
|
||||
|
||||
p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, cl.rem_set_root_scan_time().seconds());
|
||||
p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.rem_set_trim_partially_time().seconds());
|
||||
p->record_or_add_time_secs(objcopy_phase, worker_i, cl.rem_set_trim_partially_time().seconds());
|
||||
|
||||
p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
|
||||
p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
|
||||
p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
|
||||
p->record_or_add_time_secs(scan_phase, worker_i, cl.rem_set_root_scan_time().seconds());
|
||||
p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
|
||||
p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
|
||||
p->record_or_add_thread_work_item(scan_phase, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
|
||||
// At this time we only record some metrics for the optional remembered set.
|
||||
if (scan_phase == G1GCPhaseTimes::OptScanRS) {
|
||||
p->record_or_add_thread_work_item(scan_phase, worker_i, cl.opt_refs_scanned(), G1GCPhaseTimes::ScanRSScannedOptRefs);
|
||||
p->record_or_add_thread_work_item(scan_phase, worker_i, cl.opt_refs_memory_used(), G1GCPhaseTimes::ScanRSUsedMemory);
|
||||
}
|
||||
|
||||
p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time().seconds());
|
||||
p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, cl.strong_code_root_trim_partially_time().seconds());
|
||||
p->record_or_add_time_secs(coderoots_phase, worker_i, cl.strong_code_root_scan_time().seconds());
|
||||
p->add_time_secs(objcopy_phase, worker_i, cl.strong_code_root_trim_partially_time().seconds());
|
||||
}
|
||||
|
||||
// Closure used for updating rem sets. Only called during an evacuation pause.
|
||||
@ -514,11 +545,6 @@ void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
|
||||
}
|
||||
}
|
||||
|
||||
void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i) {
|
||||
update_rem_set(pss, worker_i);
|
||||
scan_rem_set(pss, worker_i);;
|
||||
}
|
||||
|
||||
void G1RemSet::prepare_for_oops_into_collection_set_do() {
|
||||
G1BarrierSet::dirty_card_queue_set().concatenate_logs();
|
||||
_scan_state->reset();
|
||||
|
@ -60,14 +60,6 @@ private:
|
||||
|
||||
G1RemSetSummary _prev_period_summary;
|
||||
|
||||
// Scan all remembered sets of the collection set for references into the collection
|
||||
// set.
|
||||
void scan_rem_set(G1ParScanThreadState* pss, uint worker_i);
|
||||
|
||||
// Flush remaining refinement buffers for cross-region references to either evacuate references
|
||||
// into the collection set or update the remembered set.
|
||||
void update_rem_set(G1ParScanThreadState* pss, uint worker_i);
|
||||
|
||||
G1CollectedHeap* _g1h;
|
||||
size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator.
|
||||
|
||||
@ -93,12 +85,19 @@ public:
|
||||
G1HotCardCache* hot_card_cache);
|
||||
~G1RemSet();
|
||||
|
||||
// Process all oops in the collection set from the cards in the refinement buffers and
|
||||
// remembered sets using pss.
|
||||
//
|
||||
// Scan all remembered sets of the collection set for references into the collection
|
||||
// set.
|
||||
// Further applies heap_region_codeblobs on the oops of the unmarked nmethods on the strong code
|
||||
// roots list for each region in the collection set.
|
||||
void oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i);
|
||||
void scan_rem_set(G1ParScanThreadState* pss,
|
||||
uint worker_i,
|
||||
G1GCPhaseTimes::GCParPhases scan_phase,
|
||||
G1GCPhaseTimes::GCParPhases objcopy_phase,
|
||||
G1GCPhaseTimes::GCParPhases coderoots_phase);
|
||||
|
||||
// Flush remaining refinement buffers for cross-region references to either evacuate references
|
||||
// into the collection set or update the remembered set.
|
||||
void update_rem_set(G1ParScanThreadState* pss, uint worker_i);
|
||||
|
||||
// Prepare for and cleanup after an oops_into_collection_set_do
|
||||
// call. Must call each of these once before and after (in sequential
|
||||
@ -144,6 +143,9 @@ class G1ScanRSForRegionClosure : public HeapRegionClosure {
|
||||
|
||||
uint _worker_i;
|
||||
|
||||
size_t _opt_refs_scanned;
|
||||
size_t _opt_refs_memory_used;
|
||||
|
||||
size_t _cards_scanned;
|
||||
size_t _cards_claimed;
|
||||
size_t _cards_skipped;
|
||||
@ -157,6 +159,7 @@ class G1ScanRSForRegionClosure : public HeapRegionClosure {
|
||||
void claim_card(size_t card_index, const uint region_idx_for_card);
|
||||
void scan_card(MemRegion mr, uint region_idx_for_card);
|
||||
|
||||
void scan_opt_rem_set_roots(HeapRegion* r);
|
||||
void scan_rem_set_roots(HeapRegion* r);
|
||||
void scan_strong_code_roots(HeapRegion* r);
|
||||
public:
|
||||
@ -177,6 +180,9 @@ public:
|
||||
size_t cards_scanned() const { return _cards_scanned; }
|
||||
size_t cards_claimed() const { return _cards_claimed; }
|
||||
size_t cards_skipped() const { return _cards_skipped; }
|
||||
|
||||
size_t opt_refs_scanned() const { return _opt_refs_scanned; }
|
||||
size_t opt_refs_memory_used() const { return _opt_refs_memory_used; }
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1REMSET_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,9 @@
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
G1SurvivorRegions::G1SurvivorRegions() : _regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapRegion*>(8, true, mtGC)) {}
|
||||
G1SurvivorRegions::G1SurvivorRegions() :
|
||||
_regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapRegion*>(8, true, mtGC)),
|
||||
_used_bytes(0) {}
|
||||
|
||||
void G1SurvivorRegions::add(HeapRegion* hr) {
|
||||
assert(hr->is_survivor(), "should be flagged as survivor region");
|
||||
@ -51,5 +53,9 @@ void G1SurvivorRegions::convert_to_eden() {
|
||||
|
||||
void G1SurvivorRegions::clear() {
|
||||
_regions->clear();
|
||||
_used_bytes = 0;
|
||||
}
|
||||
|
||||
void G1SurvivorRegions::add_used_bytes(size_t used_bytes) {
|
||||
_used_bytes += used_bytes;
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ class HeapRegion;
|
||||
class G1SurvivorRegions {
|
||||
private:
|
||||
GrowableArray<HeapRegion*>* _regions;
|
||||
volatile size_t _used_bytes;
|
||||
|
||||
public:
|
||||
G1SurvivorRegions();
|
||||
@ -49,6 +50,11 @@ public:
|
||||
const GrowableArray<HeapRegion*>* regions() const {
|
||||
return _regions;
|
||||
}
|
||||
|
||||
// Used bytes of all survivor regions.
|
||||
size_t used_bytes() const { return _used_bytes; }
|
||||
|
||||
void add_used_bytes(size_t used_bytes);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1SURVIVORREGIONS_HPP
|
||||
|
@ -30,14 +30,14 @@
|
||||
#include "logging/log.hpp"
|
||||
|
||||
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
|
||||
_adaptive_size(true), _min_desired_young_length(0), _max_desired_young_length(0) {
|
||||
_use_adaptive_sizing(true), _min_desired_young_length(0), _max_desired_young_length(0) {
|
||||
|
||||
if (FLAG_IS_CMDLINE(NewRatio)) {
|
||||
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
|
||||
} else {
|
||||
_sizer_kind = SizerNewRatio;
|
||||
_adaptive_size = false;
|
||||
_use_adaptive_sizing = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -59,7 +59,7 @@ G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
|
||||
MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
|
||||
1U);
|
||||
_sizer_kind = SizerMaxAndNewSize;
|
||||
_adaptive_size = _min_desired_young_length != _max_desired_young_length;
|
||||
_use_adaptive_sizing = _min_desired_young_length != _max_desired_young_length;
|
||||
} else {
|
||||
_sizer_kind = SizerNewSizeOnly;
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ private:
|
||||
|
||||
// False when using a fixed young generation size due to command-line options,
|
||||
// true otherwise.
|
||||
bool _adaptive_size;
|
||||
bool _use_adaptive_sizing;
|
||||
|
||||
uint calculate_default_min_length(uint new_number_of_heap_regions);
|
||||
uint calculate_default_max_length(uint new_number_of_heap_regions);
|
||||
@ -104,8 +104,8 @@ public:
|
||||
return _max_desired_young_length;
|
||||
}
|
||||
|
||||
bool adaptive_young_list_length() const {
|
||||
return _adaptive_size;
|
||||
bool use_adaptive_young_list_length() const {
|
||||
return _use_adaptive_sizing;
|
||||
}
|
||||
|
||||
static G1YoungGenSizer* create_gen_sizer(G1CollectorPolicy* policy);
|
||||
|
@ -165,7 +165,7 @@ void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1Policy* policy = g1h->policy();
|
||||
|
||||
if (policy->adaptive_young_list_length()) {
|
||||
if (policy->use_adaptive_young_list_length()) {
|
||||
G1YoungRemSetSamplingClosure cl(&sts);
|
||||
|
||||
G1CollectionSet* g1cs = g1h->collection_set();
|
||||
|
@ -117,6 +117,7 @@ void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) {
|
||||
"Should not clear heap region %u in the collection set", hrm_index());
|
||||
|
||||
set_young_index_in_cset(-1);
|
||||
clear_index_in_opt_cset();
|
||||
uninstall_surv_rate_group();
|
||||
set_free();
|
||||
reset_pre_dummy_top();
|
||||
@ -241,7 +242,7 @@ HeapRegion::HeapRegion(uint hrm_index,
|
||||
_containing_set(NULL),
|
||||
#endif
|
||||
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
|
||||
_index_in_opt_cset(G1OptionalCSet::InvalidCSetIndex), _young_index_in_cset(-1),
|
||||
_index_in_opt_cset(InvalidCSetIndex), _young_index_in_cset(-1),
|
||||
_surv_rate_group(NULL), _age_index(-1),
|
||||
_prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL),
|
||||
_recorded_rs_length(0), _predicted_elapsed_time_ms(0)
|
||||
|
@ -250,6 +250,8 @@ class HeapRegion: public G1ContiguousSpace {
|
||||
// The calculated GC efficiency of the region.
|
||||
double _gc_efficiency;
|
||||
|
||||
static const uint InvalidCSetIndex = UINT_MAX;
|
||||
|
||||
// The index in the optional regions array, if this region
|
||||
// is considered optional during a mixed collections.
|
||||
uint _index_in_opt_cset;
|
||||
@ -549,8 +551,13 @@ class HeapRegion: public G1ContiguousSpace {
|
||||
void calc_gc_efficiency(void);
|
||||
double gc_efficiency() const { return _gc_efficiency;}
|
||||
|
||||
uint index_in_opt_cset() const { return _index_in_opt_cset; }
|
||||
uint index_in_opt_cset() const {
|
||||
assert(has_index_in_opt_cset(), "Opt cset index not set.");
|
||||
return _index_in_opt_cset;
|
||||
}
|
||||
bool has_index_in_opt_cset() const { return _index_in_opt_cset != InvalidCSetIndex; }
|
||||
void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; }
|
||||
void clear_index_in_opt_cset() { _index_in_opt_cset = InvalidCSetIndex; }
|
||||
|
||||
int young_index_in_cset() const { return _young_index_in_cset; }
|
||||
void set_young_index_in_cset(int index) {
|
||||
|
@ -52,8 +52,8 @@
|
||||
nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \
|
||||
nonstatic_field(HeapRegionManager, _num_committed, uint) \
|
||||
\
|
||||
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
|
||||
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager*) \
|
||||
volatile_nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
|
||||
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager*) \
|
||||
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
|
||||
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
|
||||
nonstatic_field(G1CollectedHeap, _archive_set, HeapRegionSetBase) \
|
||||
|
@ -34,7 +34,7 @@ template <class T>
|
||||
class WorkerDataArray : public CHeapObj<mtGC> {
|
||||
friend class WDAPrinter;
|
||||
public:
|
||||
static const uint MaxThreadWorkItems = 4;
|
||||
static const uint MaxThreadWorkItems = 5;
|
||||
private:
|
||||
T* _data;
|
||||
uint _length;
|
||||
@ -50,6 +50,7 @@ private:
|
||||
void set_thread_work_item(uint worker_i, size_t value, uint index = 0);
|
||||
void add_thread_work_item(uint worker_i, size_t value, uint index = 0);
|
||||
void set_or_add_thread_work_item(uint worker_i, size_t value, uint index = 0);
|
||||
size_t get_thread_work_item(uint worker_i, uint index = 0);
|
||||
|
||||
WorkerDataArray<size_t>* thread_work_items(uint index = 0) const {
|
||||
assert(index < MaxThreadWorkItems, "Tried to access thread work item %u max %u", index, MaxThreadWorkItems);
|
||||
|
@ -91,6 +91,13 @@ void WorkerDataArray<T>::set_or_add_thread_work_item(uint worker_i, size_t value
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
size_t WorkerDataArray<T>::get_thread_work_item(uint worker_i, uint index) {
|
||||
assert(index < MaxThreadWorkItems, "Tried to access thread work item %u (max %u)", index, MaxThreadWorkItems);
|
||||
assert(_thread_work_items[index] != NULL, "No sub count");
|
||||
return _thread_work_items[index]->get(worker_i);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void WorkerDataArray<T>::add(uint worker_i, T value) {
|
||||
assert(worker_i < _length, "Worker %d is greater than max: %d", worker_i, _length);
|
||||
|
@ -46,9 +46,9 @@ void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
bs->gen_pre_barrier_stub(ce, this);
|
||||
}
|
||||
|
||||
void ShenandoahWriteBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->gen_write_barrier_stub(ce, this);
|
||||
bs->gen_load_reference_barrier_stub(ce, this);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
|
||||
@ -105,40 +105,16 @@ void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info,
|
||||
__ branch_destination(slow->continuation());
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::read_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
if (UseShenandoahGC && ShenandoahReadBarrier) {
|
||||
return read_barrier_impl(gen, obj, info, need_null_check);
|
||||
LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
return load_reference_barrier_impl(gen, obj, info, need_null_check);
|
||||
} else {
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::read_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier), "Should be enabled");
|
||||
LabelObj* done = new LabelObj();
|
||||
LIR_Opr result = gen->new_register(T_OBJECT);
|
||||
__ move(obj, result);
|
||||
if (need_null_check) {
|
||||
__ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
|
||||
__ branch(lir_cond_equal, T_LONG, done->label());
|
||||
}
|
||||
LIR_Address* brooks_ptr_address = gen->generate_address(result, ShenandoahBrooksPointer::byte_offset(), T_ADDRESS);
|
||||
__ load(brooks_ptr_address, result, info ? new CodeEmitInfo(info) : NULL, lir_patch_none);
|
||||
|
||||
__ branch_destination(done->label());
|
||||
return result;
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::write_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
if (UseShenandoahGC && ShenandoahWriteBarrier) {
|
||||
return write_barrier_impl(gen, obj, info, need_null_check);
|
||||
} else {
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::write_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
|
||||
LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) {
|
||||
assert(ShenandoahLoadRefBarrier, "Should be enabled");
|
||||
|
||||
obj = ensure_in_register(gen, obj);
|
||||
assert(obj->is_register(), "must be a register at this point");
|
||||
@ -168,7 +144,7 @@ LIR_Opr ShenandoahBarrierSetC1::write_barrier_impl(LIRGenerator* gen, LIR_Opr ob
|
||||
}
|
||||
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
|
||||
|
||||
CodeStub* slow = new ShenandoahWriteBarrierStub(obj, result, info ? new CodeEmitInfo(info) : NULL, need_null_check);
|
||||
CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, result, info ? new CodeEmitInfo(info) : NULL, need_null_check);
|
||||
__ branch(lir_cond_notEqual, T_INT, slow);
|
||||
__ branch_destination(slow->continuation());
|
||||
|
||||
@ -189,58 +165,13 @@ LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr ob
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
|
||||
bool need_null_check = (decorators & IS_NOT_NULL) == 0;
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
obj = write_barrier_impl(gen, obj, info, need_null_check);
|
||||
obj = ensure_in_register(gen, obj);
|
||||
pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
|
||||
}
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
obj = read_barrier_impl(gen, obj, info, true /*need_null_check*/);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
|
||||
DecoratorSet decorators = access.decorators();
|
||||
bool is_array = (decorators & IS_ARRAY) != 0;
|
||||
bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
|
||||
|
||||
bool is_write = (decorators & ACCESS_WRITE) != 0;
|
||||
bool needs_null_check = (decorators & IS_NOT_NULL) == 0;
|
||||
|
||||
LIR_Opr base = access.base().item().result();
|
||||
LIR_Opr offset = access.offset().opr();
|
||||
LIRGenerator* gen = access.gen();
|
||||
|
||||
if (is_write) {
|
||||
base = write_barrier(gen, base, access.access_emit_info(), needs_null_check);
|
||||
} else {
|
||||
base = read_barrier(gen, base, access.access_emit_info(), needs_null_check);
|
||||
}
|
||||
|
||||
LIR_Opr addr_opr;
|
||||
if (is_array) {
|
||||
addr_opr = LIR_OprFact::address(gen->emit_array_address(base, offset, access.type()));
|
||||
} else if (needs_patching) {
|
||||
// we need to patch the offset in the instruction so don't allow
|
||||
// generate_address to try to be smart about emitting the -1.
|
||||
// Otherwise the patching code won't know how to find the
|
||||
// instruction to patch.
|
||||
addr_opr = LIR_OprFact::address(new LIR_Address(base, PATCHED_ADDR, access.type()));
|
||||
} else {
|
||||
addr_opr = LIR_OprFact::address(gen->generate_address(base, offset, 0, 0, access.type()));
|
||||
}
|
||||
|
||||
if (resolve_in_register) {
|
||||
LIR_Opr resolved_addr = gen->new_pointer_register();
|
||||
__ leal(addr_opr, resolved_addr);
|
||||
resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, access.type()));
|
||||
return resolved_addr;
|
||||
} else {
|
||||
return addr_opr;
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
|
||||
if (access.is_oop()) {
|
||||
if (ShenandoahSATBBarrier) {
|
||||
@ -252,15 +183,28 @@ void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value)
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
|
||||
BarrierSetC1::load_at_resolved(access, result);
|
||||
if (!access.is_oop()) {
|
||||
BarrierSetC1::load_at_resolved(access, result);
|
||||
return;
|
||||
}
|
||||
|
||||
LIRGenerator *gen = access.gen();
|
||||
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
LIR_Opr tmp = gen->new_register(T_OBJECT);
|
||||
BarrierSetC1::load_at_resolved(access, tmp);
|
||||
tmp = load_reference_barrier(access.gen(), tmp, access.access_emit_info(), true);
|
||||
__ move(tmp, result);
|
||||
} else {
|
||||
BarrierSetC1::load_at_resolved(access, result);
|
||||
}
|
||||
|
||||
if (ShenandoahKeepAliveBarrier) {
|
||||
DecoratorSet decorators = access.decorators();
|
||||
bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
|
||||
LIRGenerator *gen = access.gen();
|
||||
if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) {
|
||||
if (is_weak || is_phantom || is_anonymous) {
|
||||
// Register the value in the referent field with the pre-barrier
|
||||
LabelObj *Lcont_anonymous;
|
||||
if (is_anonymous) {
|
||||
@ -276,19 +220,6 @@ void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result)
|
||||
}
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::atomic_add_at_resolved(LIRAccess& access, LIRItem& value) {
|
||||
return BarrierSetC1::atomic_add_at_resolved(access, value);
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj) {
|
||||
bool is_write = decorators & ACCESS_WRITE;
|
||||
if (is_write) {
|
||||
return write_barrier(gen, obj, NULL, (decorators & IS_NOT_NULL) == 0);
|
||||
} else {
|
||||
return read_barrier(gen, obj, NULL, (decorators & IS_NOT_NULL) == 0);
|
||||
}
|
||||
}
|
||||
|
||||
class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
|
||||
virtual OopMapSet* generate_code(StubAssembler* sasm) {
|
||||
ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
|
@ -85,7 +85,7 @@ public:
|
||||
#endif // PRODUCT
|
||||
};
|
||||
|
||||
class ShenandoahWriteBarrierStub: public CodeStub {
|
||||
class ShenandoahLoadReferenceBarrierStub: public CodeStub {
|
||||
friend class ShenandoahBarrierSetC1;
|
||||
private:
|
||||
LIR_Opr _obj;
|
||||
@ -94,7 +94,7 @@ private:
|
||||
bool _needs_null_check;
|
||||
|
||||
public:
|
||||
ShenandoahWriteBarrierStub(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, bool needs_null_check) :
|
||||
ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, bool needs_null_check) :
|
||||
_obj(obj), _result(result), _info(info), _needs_null_check(needs_null_check)
|
||||
{
|
||||
assert(_obj->is_register(), "should be register");
|
||||
@ -113,7 +113,7 @@ public:
|
||||
visitor->do_temp(_result);
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
virtual void print_name(outputStream* out) const { out->print("ShenandoahWritePreBarrierStub"); }
|
||||
virtual void print_name(outputStream* out) const { out->print("ShenandoahLoadReferenceBarrierStub"); }
|
||||
#endif // PRODUCT
|
||||
};
|
||||
|
||||
@ -181,12 +181,10 @@ private:
|
||||
|
||||
void pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val);
|
||||
|
||||
LIR_Opr read_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
LIR_Opr write_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
LIR_Opr storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators);
|
||||
|
||||
LIR_Opr read_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
LIR_Opr write_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check);
|
||||
|
||||
LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj);
|
||||
|
||||
@ -194,7 +192,6 @@ public:
|
||||
CodeBlob* pre_barrier_c1_runtime_code_blob() { return _pre_barrier_c1_runtime_code_blob; }
|
||||
|
||||
protected:
|
||||
virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register);
|
||||
|
||||
virtual void store_at_resolved(LIRAccess& access, LIR_Opr value);
|
||||
virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
|
||||
@ -202,10 +199,8 @@ protected:
|
||||
virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
|
||||
|
||||
virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
|
||||
virtual LIR_Opr atomic_add_at_resolved(LIRAccess& access, LIRItem& value);
|
||||
|
||||
public:
|
||||
virtual LIR_Opr resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj);
|
||||
|
||||
virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob);
|
||||
};
|
||||
|
@ -43,121 +43,56 @@ ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
|
||||
}
|
||||
|
||||
ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
|
||||
: _shenandoah_barriers(new (comp_arena) GrowableArray<ShenandoahWriteBarrierNode*>(comp_arena, 8, 0, NULL)) {
|
||||
: _enqueue_barriers(new (comp_arena) GrowableArray<ShenandoahEnqueueBarrierNode*>(comp_arena, 8, 0, NULL)),
|
||||
_load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, NULL)) {
|
||||
}
|
||||
|
||||
int ShenandoahBarrierSetC2State::shenandoah_barriers_count() const {
|
||||
return _shenandoah_barriers->length();
|
||||
int ShenandoahBarrierSetC2State::enqueue_barriers_count() const {
|
||||
return _enqueue_barriers->length();
|
||||
}
|
||||
|
||||
ShenandoahWriteBarrierNode* ShenandoahBarrierSetC2State::shenandoah_barrier(int idx) const {
|
||||
return _shenandoah_barriers->at(idx);
|
||||
ShenandoahEnqueueBarrierNode* ShenandoahBarrierSetC2State::enqueue_barrier(int idx) const {
|
||||
return _enqueue_barriers->at(idx);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2State::add_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
|
||||
assert(!_shenandoah_barriers->contains(n), "duplicate entry in barrier list");
|
||||
_shenandoah_barriers->append(n);
|
||||
void ShenandoahBarrierSetC2State::add_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) {
|
||||
assert(!_enqueue_barriers->contains(n), "duplicate entry in barrier list");
|
||||
_enqueue_barriers->append(n);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2State::remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n) {
|
||||
if (_shenandoah_barriers->contains(n)) {
|
||||
_shenandoah_barriers->remove(n);
|
||||
void ShenandoahBarrierSetC2State::remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) {
|
||||
if (_enqueue_barriers->contains(n)) {
|
||||
_enqueue_barriers->remove(n);
|
||||
}
|
||||
}
|
||||
|
||||
#define __ kit->
|
||||
int ShenandoahBarrierSetC2State::load_reference_barriers_count() const {
|
||||
return _load_reference_barriers->length();
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_read_barrier(GraphKit* kit, Node* obj) const {
|
||||
if (ShenandoahReadBarrier) {
|
||||
obj = shenandoah_read_barrier_impl(kit, obj, false, true, true);
|
||||
ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const {
|
||||
return _load_reference_barriers->at(idx);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
|
||||
assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list");
|
||||
_load_reference_barriers->append(n);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) {
|
||||
if (_load_reference_barriers->contains(n)) {
|
||||
_load_reference_barriers->remove(n);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const {
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
obj = shenandoah_write_barrier(kit, obj);
|
||||
obj = shenandoah_enqueue_barrier(kit, obj);
|
||||
}
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
obj = shenandoah_read_barrier_impl(kit, obj, true, false, false);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const {
|
||||
const Type* obj_type = obj->bottom_type();
|
||||
if (obj_type->higher_equal(TypePtr::NULL_PTR)) {
|
||||
return obj;
|
||||
}
|
||||
const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
|
||||
Node* mem = use_mem ? __ memory(adr_type) : __ immutable_memory();
|
||||
|
||||
if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, mem, allow_fromspace)) {
|
||||
// We know it is null, no barrier needed.
|
||||
return obj;
|
||||
}
|
||||
|
||||
if (obj_type->meet(TypePtr::NULL_PTR) == obj_type->remove_speculative()) {
|
||||
|
||||
// We don't know if it's null or not. Need null-check.
|
||||
enum { _not_null_path = 1, _null_path, PATH_LIMIT };
|
||||
RegionNode* region = new RegionNode(PATH_LIMIT);
|
||||
Node* phi = new PhiNode(region, obj_type);
|
||||
Node* null_ctrl = __ top();
|
||||
Node* not_null_obj = __ null_check_oop(obj, &null_ctrl);
|
||||
|
||||
region->init_req(_null_path, null_ctrl);
|
||||
phi ->init_req(_null_path, __ zerocon(T_OBJECT));
|
||||
|
||||
Node* ctrl = use_ctrl ? __ control() : NULL;
|
||||
ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, not_null_obj, allow_fromspace);
|
||||
Node* n = __ gvn().transform(rb);
|
||||
|
||||
region->init_req(_not_null_path, __ control());
|
||||
phi ->init_req(_not_null_path, n);
|
||||
|
||||
__ set_control(__ gvn().transform(region));
|
||||
__ record_for_igvn(region);
|
||||
return __ gvn().transform(phi);
|
||||
|
||||
} else {
|
||||
// We know it is not null. Simple barrier is sufficient.
|
||||
Node* ctrl = use_ctrl ? __ control() : NULL;
|
||||
ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, obj, allow_fromspace);
|
||||
Node* n = __ gvn().transform(rb);
|
||||
__ record_for_igvn(n);
|
||||
return n;
|
||||
}
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const {
|
||||
ShenandoahWriteBarrierNode* wb = new ShenandoahWriteBarrierNode(kit->C, kit->control(), kit->memory(adr_type), obj);
|
||||
Node* n = __ gvn().transform(wb);
|
||||
if (n == wb) { // New barrier needs memory projection.
|
||||
Node* proj = __ gvn().transform(new ShenandoahWBMemProjNode(n));
|
||||
__ set_memory(proj, adr_type);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_write_barrier(GraphKit* kit, Node* obj) const {
|
||||
if (ShenandoahWriteBarrier) {
|
||||
obj = shenandoah_write_barrier_impl(kit, obj);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const {
|
||||
if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, NULL, true)) {
|
||||
return obj;
|
||||
}
|
||||
const Type* obj_type = obj->bottom_type();
|
||||
const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type);
|
||||
Node* n = shenandoah_write_barrier_helper(kit, obj, adr_type);
|
||||
__ record_for_igvn(n);
|
||||
return n;
|
||||
}
|
||||
#define __ kit->
|
||||
|
||||
bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr,
|
||||
BasicType bt, uint adr_idx) const {
|
||||
@ -304,7 +239,7 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
|
||||
Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset())));
|
||||
Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw);
|
||||
marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING));
|
||||
assert(ShenandoahWriteBarrierNode::is_gc_state_load(ld), "Should match the shape");
|
||||
assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape");
|
||||
|
||||
// if (!marking)
|
||||
__ if_then(marking, BoolTest::ne, zero, unlikely); {
|
||||
@ -361,7 +296,7 @@ bool ShenandoahBarrierSetC2::is_shenandoah_wb_pre_call(Node* call) {
|
||||
|
||||
bool ShenandoahBarrierSetC2::is_shenandoah_wb_call(Node* call) {
|
||||
return call->is_CallLeaf() &&
|
||||
call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT);
|
||||
call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT);
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) {
|
||||
@ -549,88 +484,6 @@ const TypeFunc* ShenandoahBarrierSetC2::shenandoah_write_barrier_Type() {
|
||||
return TypeFunc::make(domain, range);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2::resolve_address(C2Access& access) const {
|
||||
const TypePtr* adr_type = access.addr().type();
|
||||
|
||||
if ((access.decorators() & IN_NATIVE) == 0 && (adr_type->isa_instptr() || adr_type->isa_aryptr())) {
|
||||
int off = adr_type->is_ptr()->offset();
|
||||
int base_off = adr_type->isa_instptr() ? instanceOopDesc::base_offset_in_bytes() :
|
||||
arrayOopDesc::base_offset_in_bytes(adr_type->is_aryptr()->elem()->array_element_basic_type());
|
||||
assert(off != Type::OffsetTop, "unexpected offset");
|
||||
if (off == Type::OffsetBot || off >= base_off) {
|
||||
DecoratorSet decorators = access.decorators();
|
||||
bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
|
||||
GraphKit* kit = NULL;
|
||||
if (access.is_parse_access()) {
|
||||
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
||||
kit = parse_access.kit();
|
||||
}
|
||||
Node* adr = access.addr().node();
|
||||
assert(adr->is_AddP(), "unexpected address shape");
|
||||
Node* base = adr->in(AddPNode::Base);
|
||||
|
||||
if (is_write) {
|
||||
if (kit != NULL) {
|
||||
base = shenandoah_write_barrier(kit, base);
|
||||
} else {
|
||||
assert(access.is_opt_access(), "either parse or opt access");
|
||||
assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for clone");
|
||||
}
|
||||
} else {
|
||||
if (adr_type->isa_instptr()) {
|
||||
Compile* C = access.gvn().C;
|
||||
ciField* field = C->alias_type(adr_type)->field();
|
||||
|
||||
// Insert read barrier for Shenandoah.
|
||||
if (field != NULL &&
|
||||
((ShenandoahOptimizeStaticFinals && field->is_static() && field->is_final()) ||
|
||||
(ShenandoahOptimizeInstanceFinals && !field->is_static() && field->is_final()) ||
|
||||
(ShenandoahOptimizeStableFinals && field->is_stable()))) {
|
||||
// Skip the barrier for special fields
|
||||
} else {
|
||||
if (kit != NULL) {
|
||||
base = shenandoah_read_barrier(kit, base);
|
||||
} else {
|
||||
assert(access.is_opt_access(), "either parse or opt access");
|
||||
assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (kit != NULL) {
|
||||
base = shenandoah_read_barrier(kit, base);
|
||||
} else {
|
||||
assert(access.is_opt_access(), "either parse or opt access");
|
||||
assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (base != adr->in(AddPNode::Base)) {
|
||||
assert(kit != NULL, "no barrier should have been added");
|
||||
|
||||
Node* address = adr->in(AddPNode::Address);
|
||||
|
||||
if (address->is_AddP()) {
|
||||
assert(address->in(AddPNode::Base) == adr->in(AddPNode::Base), "unexpected address shape");
|
||||
assert(!address->in(AddPNode::Address)->is_AddP(), "unexpected address shape");
|
||||
assert(address->in(AddPNode::Address) == adr->in(AddPNode::Base), "unexpected address shape");
|
||||
address = address->clone();
|
||||
address->set_req(AddPNode::Base, base);
|
||||
address->set_req(AddPNode::Address, base);
|
||||
address = kit->gvn().transform(address);
|
||||
} else {
|
||||
assert(address == adr->in(AddPNode::Base), "unexpected address shape");
|
||||
address = base;
|
||||
}
|
||||
adr = adr->clone();
|
||||
adr->set_req(AddPNode::Base, base);
|
||||
adr->set_req(AddPNode::Address, address);
|
||||
adr = kit->gvn().transform(adr);
|
||||
access.addr().set_node(adr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
|
||||
DecoratorSet decorators = access.decorators();
|
||||
|
||||
@ -662,44 +515,8 @@ Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue&
|
||||
PhaseGVN& gvn = opt_access.gvn();
|
||||
MergeMemNode* mm = opt_access.mem();
|
||||
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
RegionNode* region = new RegionNode(3);
|
||||
const Type* v_t = gvn.type(val.node());
|
||||
Node* phi = new PhiNode(region, v_t->isa_oopptr() ? v_t->is_oopptr()->cast_to_nonconst() : v_t);
|
||||
Node* cmp = gvn.transform(new CmpPNode(val.node(), gvn.zerocon(T_OBJECT)));
|
||||
Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::ne));
|
||||
IfNode* iff = new IfNode(opt_access.ctl(), bol, PROB_LIKELY_MAG(3), COUNT_UNKNOWN);
|
||||
|
||||
gvn.transform(iff);
|
||||
if (gvn.is_IterGVN()) {
|
||||
gvn.is_IterGVN()->_worklist.push(iff);
|
||||
} else {
|
||||
gvn.record_for_igvn(iff);
|
||||
}
|
||||
|
||||
Node* null_true = gvn.transform(new IfFalseNode(iff));
|
||||
Node* null_false = gvn.transform(new IfTrueNode(iff));
|
||||
region->init_req(1, null_true);
|
||||
region->init_req(2, null_false);
|
||||
phi->init_req(1, gvn.zerocon(T_OBJECT));
|
||||
Node* cast = new CastPPNode(val.node(), gvn.type(val.node())->join_speculative(TypePtr::NOTNULL));
|
||||
cast->set_req(0, null_false);
|
||||
cast = gvn.transform(cast);
|
||||
Node* rb = gvn.transform(new ShenandoahReadBarrierNode(null_false, gvn.C->immutable_memory(), cast, false));
|
||||
phi->init_req(2, rb);
|
||||
opt_access.set_ctl(gvn.transform(region));
|
||||
val.set_node(gvn.transform(phi));
|
||||
}
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(gvn.type(val.node()));
|
||||
int alias = gvn.C->get_alias_index(adr_type);
|
||||
Node* wb = new ShenandoahWriteBarrierNode(gvn.C, opt_access.ctl(), mm->memory_at(alias), val.node());
|
||||
Node* wb_transformed = gvn.transform(wb);
|
||||
Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(wb_transformed));
|
||||
if (wb_transformed == wb) {
|
||||
Node* proj = gvn.transform(new ShenandoahWBMemProjNode(wb));
|
||||
mm->set_memory_at(alias, proj);
|
||||
}
|
||||
Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(val.node()));
|
||||
val.set_node(enqueue);
|
||||
}
|
||||
}
|
||||
@ -724,6 +541,17 @@ Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val
|
||||
Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
|
||||
Node* load = BarrierSetC2::load_at_resolved(access, val_type);
|
||||
|
||||
if (access.is_oop()) {
|
||||
if (ShenandoahLoadRefBarrier) {
|
||||
load = new ShenandoahLoadReferenceBarrierNode(NULL, load);
|
||||
if (access.is_parse_access()) {
|
||||
load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
|
||||
} else {
|
||||
load = static_cast<C2OptAccess &>(access).gvn().transform(load);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we are reading the value of the referent field of a Reference
|
||||
// object (either by using Unsafe directly or through reflection)
|
||||
// then, if SATB is enabled, we need to record the referent in an
|
||||
@ -797,9 +625,10 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess
|
||||
|
||||
#ifdef _LP64
|
||||
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
|
||||
return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
|
||||
load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
|
||||
}
|
||||
#endif
|
||||
load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store));
|
||||
return load_store;
|
||||
}
|
||||
return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
|
||||
@ -867,6 +696,7 @@ Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& acces
|
||||
}
|
||||
Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
|
||||
if (access.is_oop()) {
|
||||
result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result));
|
||||
shenandoah_write_barrier_pre(kit, false /* do_load */,
|
||||
NULL, NULL, max_juint, NULL, NULL,
|
||||
result /* pre_val */, T_OBJECT);
|
||||
@ -876,19 +706,9 @@ Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& acces
|
||||
|
||||
void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
|
||||
assert(!src->is_AddP(), "unexpected input");
|
||||
src = shenandoah_read_barrier(kit, src);
|
||||
BarrierSetC2::clone(kit, src, dst, size, is_array);
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const {
|
||||
bool is_write = decorators & ACCESS_WRITE;
|
||||
if (is_write) {
|
||||
return shenandoah_write_barrier(kit, n);
|
||||
} else {
|
||||
return shenandoah_read_barrier(kit, n);
|
||||
}
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes,
|
||||
Node*& i_o, Node*& needgc_ctrl,
|
||||
Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
|
||||
@ -915,6 +735,7 @@ Node* ShenandoahBarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl,
|
||||
|
||||
// Support for GC barriers emitted during parsing
|
||||
bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
|
||||
if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) return true;
|
||||
if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) {
|
||||
return false;
|
||||
}
|
||||
@ -929,26 +750,30 @@ bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
|
||||
return ShenandoahBarrierNode::skip_through_barrier(c);
|
||||
if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
|
||||
return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
|
||||
}
|
||||
if (c->Opcode() == Op_ShenandoahEnqueueBarrier) {
|
||||
c = c->in(1);
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
|
||||
return !ShenandoahWriteBarrierNode::expand(C, igvn);
|
||||
return !ShenandoahBarrierC2Support::expand(C, igvn);
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const {
|
||||
if (mode == LoopOptsShenandoahExpand) {
|
||||
assert(UseShenandoahGC, "only for shenandoah");
|
||||
ShenandoahWriteBarrierNode::pin_and_expand(phase);
|
||||
ShenandoahBarrierC2Support::pin_and_expand(phase);
|
||||
return true;
|
||||
} else if (mode == LoopOptsShenandoahPostExpand) {
|
||||
assert(UseShenandoahGC, "only for shenandoah");
|
||||
visited.Clear();
|
||||
ShenandoahWriteBarrierNode::optimize_after_expansion(visited, nstack, worklist, phase);
|
||||
ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase);
|
||||
return true;
|
||||
}
|
||||
GrowableArray<MemoryGraphFixer*> memory_graph_fixers;
|
||||
ShenandoahWriteBarrierNode::optimize_before_expansion(phase, memory_graph_fixers, false);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -957,7 +782,6 @@ bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_couple
|
||||
if (!is_oop) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (tightly_coupled_alloc) {
|
||||
if (phase == Optimization) {
|
||||
return false;
|
||||
@ -985,7 +809,7 @@ bool ShenandoahBarrierSetC2::clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIte
|
||||
}
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else if (src_type->isa_aryptr()) {
|
||||
BasicType src_elem = src_type->klass()->as_array_klass()->element_type()->basic_type();
|
||||
if (src_elem == T_OBJECT || src_elem == T_ARRAY) {
|
||||
@ -1038,14 +862,20 @@ void ShenandoahBarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node*
|
||||
|
||||
// Support for macro expanded GC barriers
|
||||
void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const {
|
||||
if (node->Opcode() == Op_ShenandoahWriteBarrier) {
|
||||
state()->add_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
|
||||
if (node->Opcode() == Op_ShenandoahEnqueueBarrier) {
|
||||
state()->add_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node);
|
||||
}
|
||||
if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
|
||||
state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const {
|
||||
if (node->Opcode() == Op_ShenandoahWriteBarrier) {
|
||||
state()->remove_shenandoah_barrier((ShenandoahWriteBarrierNode*) node);
|
||||
if (node->Opcode() == Op_ShenandoahEnqueueBarrier) {
|
||||
state()->remove_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node);
|
||||
}
|
||||
if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
|
||||
state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1091,19 +921,18 @@ void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &use
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int i = state()->shenandoah_barriers_count()-1; i >= 0; i--) {
|
||||
ShenandoahWriteBarrierNode* n = state()->shenandoah_barrier(i);
|
||||
for (int i = state()->enqueue_barriers_count() - 1; i >= 0; i--) {
|
||||
ShenandoahEnqueueBarrierNode* n = state()->enqueue_barrier(i);
|
||||
if (!useful.member(n)) {
|
||||
state()->remove_shenandoah_barrier(n);
|
||||
state()->remove_enqueue_barrier(n);
|
||||
}
|
||||
}
|
||||
for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) {
|
||||
ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i);
|
||||
if (!useful.member(n)) {
|
||||
state()->remove_load_reference_barrier(n);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::has_special_unique_user(const Node* node) const {
|
||||
assert(node->outcnt() == 1, "match only for unique out");
|
||||
Node* n = node->unique_out();
|
||||
return node->Opcode() == Op_ShenandoahWriteBarrier && n->Opcode() == Op_ShenandoahWBMemProj;
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {}
|
||||
@ -1123,7 +952,7 @@ bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
|
||||
#ifdef ASSERT
|
||||
void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
|
||||
if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeExpand) {
|
||||
ShenandoahBarrierNode::verify(Compile::current()->root());
|
||||
ShenandoahBarrierC2Support::verify(Compile::current()->root());
|
||||
} else if (phase == BarrierSetC2::BeforeCodeGen) {
|
||||
// Verify G1 pre-barriers
|
||||
const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset());
|
||||
@ -1229,7 +1058,7 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
|
||||
}
|
||||
} else if (can_reshape &&
|
||||
n->Opcode() == Op_If &&
|
||||
ShenandoahWriteBarrierNode::is_heap_stable_test(n) &&
|
||||
ShenandoahBarrierC2Support::is_heap_stable_test(n) &&
|
||||
n->in(0) != NULL) {
|
||||
Node* dom = n->in(0);
|
||||
Node* prev_dom = n;
|
||||
@ -1237,7 +1066,7 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
|
||||
int dist = 16;
|
||||
// Search up the dominator tree for another heap stable test
|
||||
while (dom->Opcode() != op || // Not same opcode?
|
||||
!ShenandoahWriteBarrierNode::is_heap_stable_test(dom) || // Not same input 1?
|
||||
!ShenandoahBarrierC2Support::is_heap_stable_test(dom) || // Not same input 1?
|
||||
prev_dom->in(0) != dom) { // One path of test does not dominate?
|
||||
if (dist < 0) return NULL;
|
||||
|
||||
@ -1258,46 +1087,6 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::identity_node(PhaseGVN* phase, Node* n) const {
|
||||
if (n->is_Load()) {
|
||||
Node *mem = n->in(MemNode::Memory);
|
||||
Node *value = n->as_Load()->can_see_stored_value(mem, phase);
|
||||
if (value) {
|
||||
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||
if (igvn != NULL &&
|
||||
value->is_Phi() &&
|
||||
value->req() > 2 &&
|
||||
value->in(1) != NULL &&
|
||||
value->in(1)->is_ShenandoahBarrier()) {
|
||||
if (igvn->_worklist.member(value) ||
|
||||
igvn->_worklist.member(value->in(0)) ||
|
||||
(value->in(0)->in(1) != NULL &&
|
||||
value->in(0)->in(1)->is_IfProj() &&
|
||||
(igvn->_worklist.member(value->in(0)->in(1)) ||
|
||||
(value->in(0)->in(1)->in(0) != NULL &&
|
||||
igvn->_worklist.member(value->in(0)->in(1)->in(0)))))) {
|
||||
igvn->_worklist.push(n);
|
||||
return n;
|
||||
}
|
||||
}
|
||||
// (This works even when value is a Con, but LoadNode::Value
|
||||
// usually runs first, producing the singleton type of the Con.)
|
||||
Node *value_no_barrier = step_over_gc_barrier(value->Opcode() == Op_EncodeP ? value->in(1) : value);
|
||||
if (value->Opcode() == Op_EncodeP) {
|
||||
if (value_no_barrier != value->in(1)) {
|
||||
Node *encode = value->clone();
|
||||
encode->set_req(1, value_no_barrier);
|
||||
encode = phase->transform(encode);
|
||||
return encode;
|
||||
}
|
||||
} else {
|
||||
return value_no_barrier;
|
||||
}
|
||||
}
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
|
||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = n->fast_out(i);
|
||||
@ -1308,20 +1097,6 @@ bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
|
||||
return n->outcnt() > 0;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::flatten_gc_alias_type(const TypePtr*& adr_type) const {
|
||||
int offset = adr_type->offset();
|
||||
if (offset == ShenandoahBrooksPointer::byte_offset()) {
|
||||
if (adr_type->isa_aryptr()) {
|
||||
adr_type = TypeAryPtr::make(adr_type->ptr(), adr_type->isa_aryptr()->ary(), adr_type->isa_aryptr()->klass(), false, offset);
|
||||
} else if (adr_type->isa_instptr()) {
|
||||
adr_type = TypeInstPtr::make(adr_type->ptr(), ciEnv::current()->Object_klass(), false, NULL, offset);
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const {
|
||||
switch (opcode) {
|
||||
case Op_CallLeaf:
|
||||
@ -1356,9 +1131,7 @@ bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, ui
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
case Op_ShenandoahReadBarrier:
|
||||
return true;
|
||||
case Op_ShenandoahWriteBarrier:
|
||||
case Op_ShenandoahLoadReferenceBarrier:
|
||||
assert(false, "should have been expanded already");
|
||||
return true;
|
||||
default:
|
||||
@ -1366,17 +1139,6 @@ bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, ui
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool ShenandoahBarrierSetC2::verify_gc_alias_type(const TypePtr* adr_type, int offset) const {
|
||||
if (offset == ShenandoahBrooksPointer::byte_offset() &&
|
||||
(adr_type->base() == Type::AryPtr || adr_type->base() == Type::OopPtr)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
|
||||
switch (opcode) {
|
||||
case Op_ShenandoahCompareAndExchangeP:
|
||||
@ -1412,15 +1174,12 @@ bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph
|
||||
}
|
||||
return false;
|
||||
}
|
||||
case Op_ShenandoahReadBarrier:
|
||||
case Op_ShenandoahWriteBarrier:
|
||||
// Barriers 'pass through' its arguments. I.e. what goes in, comes out.
|
||||
// It doesn't escape.
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), delayed_worklist);
|
||||
break;
|
||||
case Op_ShenandoahEnqueueBarrier:
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
|
||||
break;
|
||||
case Op_ShenandoahLoadReferenceBarrier:
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist);
|
||||
return true;
|
||||
default:
|
||||
// Nothing
|
||||
break;
|
||||
@ -1441,15 +1200,12 @@ bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph,
|
||||
case Op_ShenandoahWeakCompareAndSwapP:
|
||||
case Op_ShenandoahWeakCompareAndSwapN:
|
||||
return conn_graph->add_final_edges_unsafe_access(n, opcode);
|
||||
case Op_ShenandoahReadBarrier:
|
||||
case Op_ShenandoahWriteBarrier:
|
||||
// Barriers 'pass through' its arguments. I.e. what goes in, comes out.
|
||||
// It doesn't escape.
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), NULL);
|
||||
return true;
|
||||
case Op_ShenandoahEnqueueBarrier:
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
|
||||
return true;
|
||||
case Op_ShenandoahLoadReferenceBarrier:
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL);
|
||||
return true;
|
||||
default:
|
||||
// Nothing
|
||||
break;
|
||||
@ -1464,21 +1220,7 @@ bool ShenandoahBarrierSetC2::escape_has_out_with_unsafe_object(Node* n) const {
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::escape_is_barrier_node(Node* n) const {
|
||||
return n->is_ShenandoahBarrier();
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const {
|
||||
switch (opcode) {
|
||||
case Op_ShenandoahReadBarrier:
|
||||
if (n->in(ShenandoahBarrierNode::ValueIn)->is_DecodeNarrowPtr()) {
|
||||
matcher->set_shared(n->in(ShenandoahBarrierNode::ValueIn)->in(1));
|
||||
}
|
||||
matcher->set_shared(n);
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
return n->Opcode() == Op_ShenandoahLoadReferenceBarrier;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const {
|
||||
@ -1510,62 +1252,3 @@ bool ShenandoahBarrierSetC2::matcher_is_store_load_barrier(Node* x, uint xop) co
|
||||
xop == Op_ShenandoahCompareAndSwapN ||
|
||||
xop == Op_ShenandoahCompareAndSwapP;
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2::igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const {
|
||||
if (use->is_ShenandoahBarrier()) {
|
||||
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
|
||||
Node* u = use->fast_out(i2);
|
||||
Node* cmp = use->find_out_with(Op_CmpP);
|
||||
if (u->Opcode() == Op_CmpP) {
|
||||
igvn->_worklist.push(cmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2::ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const {
|
||||
if (use->is_ShenandoahBarrier()) {
|
||||
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
|
||||
Node* p = use->fast_out(i2);
|
||||
if (p->Opcode() == Op_AddP) {
|
||||
for (DUIterator_Fast i3max, i3 = p->fast_outs(i3max); i3 < i3max; i3++) {
|
||||
Node* q = p->fast_out(i3);
|
||||
if (q->is_Load()) {
|
||||
if(q->bottom_type() != ccp->type(q)) {
|
||||
worklist.push(q);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::split_if_pre(PhaseIdealLoop* phase, Node* n) const {
|
||||
if (n->Opcode() == Op_ShenandoahReadBarrier) {
|
||||
((ShenandoahReadBarrierNode*)n)->try_move(phase);
|
||||
} else if (n->Opcode() == Op_ShenandoahWriteBarrier) {
|
||||
return ((ShenandoahWriteBarrierNode*)n)->try_split_thru_phi(phase);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::build_loop_late_post(PhaseIdealLoop* phase, Node* n) const {
|
||||
return ShenandoahBarrierNode::build_loop_late_post(phase, n);
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const {
|
||||
if (n->is_ShenandoahBarrier()) {
|
||||
return x->as_ShenandoahBarrier()->sink_node(phase, x_ctrl, n_ctrl);
|
||||
}
|
||||
if (n->is_MergeMem()) {
|
||||
// PhaseIdealLoop::split_if_with_blocks_post() would:
|
||||
// _igvn._worklist.yank(x);
|
||||
// which sometimes causes chains of MergeMem which some of
|
||||
// shenandoah specific code doesn't support
|
||||
phase->register_new_node(x, x_ctrl);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -30,14 +30,21 @@
|
||||
|
||||
class ShenandoahBarrierSetC2State : public ResourceObj {
|
||||
private:
|
||||
GrowableArray<ShenandoahWriteBarrierNode*>* _shenandoah_barriers;
|
||||
GrowableArray<ShenandoahEnqueueBarrierNode*>* _enqueue_barriers;
|
||||
GrowableArray<ShenandoahLoadReferenceBarrierNode*>* _load_reference_barriers;
|
||||
|
||||
public:
|
||||
ShenandoahBarrierSetC2State(Arena* comp_arena);
|
||||
int shenandoah_barriers_count() const;
|
||||
ShenandoahWriteBarrierNode* shenandoah_barrier(int idx) const;
|
||||
void add_shenandoah_barrier(ShenandoahWriteBarrierNode * n);
|
||||
void remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n);
|
||||
|
||||
int enqueue_barriers_count() const;
|
||||
ShenandoahEnqueueBarrierNode* enqueue_barrier(int idx) const;
|
||||
void add_enqueue_barrier(ShenandoahEnqueueBarrierNode* n);
|
||||
void remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n);
|
||||
|
||||
int load_reference_barriers_count() const;
|
||||
ShenandoahLoadReferenceBarrierNode* load_reference_barrier(int idx) const;
|
||||
void add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode* n);
|
||||
void remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n);
|
||||
};
|
||||
|
||||
class ShenandoahBarrierSetC2 : public BarrierSetC2 {
|
||||
@ -66,12 +73,7 @@ private:
|
||||
BasicType bt) const;
|
||||
|
||||
Node* shenandoah_enqueue_barrier(GraphKit* kit, Node* val) const;
|
||||
Node* shenandoah_read_barrier(GraphKit* kit, Node* obj) const;
|
||||
Node* shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const;
|
||||
Node* shenandoah_write_barrier(GraphKit* kit, Node* obj) const;
|
||||
Node* shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const;
|
||||
Node* shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const;
|
||||
Node* shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const;
|
||||
|
||||
void insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
|
||||
Node* pre_val, bool need_mem_bar) const;
|
||||
@ -79,7 +81,6 @@ private:
|
||||
static bool clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIterGVN& igvn);
|
||||
|
||||
protected:
|
||||
virtual void resolve_address(C2Access& access) const;
|
||||
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
|
||||
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
|
||||
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||
@ -102,12 +103,11 @@ public:
|
||||
static const TypeFunc* write_ref_field_pre_entry_Type();
|
||||
static const TypeFunc* shenandoah_clone_barrier_Type();
|
||||
static const TypeFunc* shenandoah_write_barrier_Type();
|
||||
virtual bool has_load_barriers() const { return true; }
|
||||
|
||||
// This is the entry-point for the backend to perform accesses through the Access API.
|
||||
virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
|
||||
|
||||
virtual Node* resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const;
|
||||
|
||||
virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes,
|
||||
Node*& i_o, Node*& needgc_ctrl,
|
||||
Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
|
||||
@ -144,13 +144,7 @@ public:
|
||||
virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const;
|
||||
#endif
|
||||
|
||||
virtual bool flatten_gc_alias_type(const TypePtr*& adr_type) const;
|
||||
#ifdef ASSERT
|
||||
virtual bool verify_gc_alias_type(const TypePtr* adr_type, int offset) const;
|
||||
#endif
|
||||
|
||||
virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const;
|
||||
virtual Node* identity_node(PhaseGVN* phase, Node* n) const;
|
||||
virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const;
|
||||
|
||||
virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const;
|
||||
@ -158,17 +152,8 @@ public:
|
||||
virtual bool escape_has_out_with_unsafe_object(Node* n) const;
|
||||
virtual bool escape_is_barrier_node(Node* n) const;
|
||||
|
||||
virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const;
|
||||
virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const;
|
||||
virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const;
|
||||
|
||||
virtual void igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const;
|
||||
virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const;
|
||||
|
||||
virtual bool has_special_unique_user(const Node* node) const;
|
||||
virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const;
|
||||
virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const;
|
||||
virtual bool sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -36,10 +36,8 @@
|
||||
class PhaseGVN;
|
||||
class MemoryGraphFixer;
|
||||
|
||||
class ShenandoahBarrierNode : public TypeNode {
|
||||
class ShenandoahBarrierC2Support : public AllStatic {
|
||||
private:
|
||||
bool _allow_fromspace;
|
||||
|
||||
#ifdef ASSERT
|
||||
enum verify_type {
|
||||
ShenandoahLoad,
|
||||
@ -50,204 +48,49 @@ private:
|
||||
};
|
||||
|
||||
static bool verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used);
|
||||
#endif
|
||||
|
||||
public:
|
||||
enum { Control,
|
||||
Memory,
|
||||
ValueIn
|
||||
};
|
||||
|
||||
ShenandoahBarrierNode(Node* ctrl, Node* mem, Node* obj, bool allow_fromspace)
|
||||
: TypeNode(obj->bottom_type()->isa_oopptr() ? obj->bottom_type()->is_oopptr()->cast_to_nonconst() : obj->bottom_type(), 3),
|
||||
_allow_fromspace(allow_fromspace) {
|
||||
|
||||
init_req(Control, ctrl);
|
||||
init_req(Memory, mem);
|
||||
init_req(ValueIn, obj);
|
||||
|
||||
init_class_id(Class_ShenandoahBarrier);
|
||||
}
|
||||
|
||||
static Node* skip_through_barrier(Node* n);
|
||||
|
||||
static const TypeOopPtr* brooks_pointer_type(const Type* t) {
|
||||
return t->is_oopptr()->cast_to_nonconst()->add_offset(ShenandoahBrooksPointer::byte_offset())->is_oopptr();
|
||||
}
|
||||
|
||||
virtual const TypePtr* adr_type() const {
|
||||
if (bottom_type() == Type::TOP) {
|
||||
return NULL;
|
||||
}
|
||||
//const TypePtr* adr_type = in(MemNode::Address)->bottom_type()->is_ptr();
|
||||
const TypePtr* adr_type = brooks_pointer_type(bottom_type());
|
||||
assert(adr_type->offset() == ShenandoahBrooksPointer::byte_offset(), "sane offset");
|
||||
assert(Compile::current()->alias_type(adr_type)->is_rewritable(), "brooks ptr must be rewritable");
|
||||
return adr_type;
|
||||
}
|
||||
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
virtual uint match_edge(uint idx) const {
|
||||
return idx >= ValueIn;
|
||||
}
|
||||
|
||||
Node* Identity_impl(PhaseGVN* phase);
|
||||
|
||||
virtual const Type* Value(PhaseGVN* phase) const;
|
||||
virtual bool depends_only_on_test() const {
|
||||
return true;
|
||||
};
|
||||
|
||||
static bool needs_barrier(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace);
|
||||
|
||||
#ifdef ASSERT
|
||||
static void report_verify_failure(const char* msg, Node* n1 = NULL, Node* n2 = NULL);
|
||||
static void verify(RootNode* root);
|
||||
static void verify_raw_mem(RootNode* root);
|
||||
#endif
|
||||
#ifndef PRODUCT
|
||||
virtual void dump_spec(outputStream *st) const;
|
||||
#endif
|
||||
|
||||
// protected:
|
||||
static Node* dom_mem(Node* mem, Node*& mem_ctrl, Node* n, Node* rep_ctrl, int alias, PhaseIdealLoop* phase);
|
||||
static Node* dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase);
|
||||
static bool is_dominator(Node *d_c, Node *n_c, Node* d, Node* n, PhaseIdealLoop* phase);
|
||||
static bool is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase);
|
||||
static Node* no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase);
|
||||
static bool build_loop_late_post(PhaseIdealLoop* phase, Node* n);
|
||||
bool sink_node(PhaseIdealLoop* phase, Node* ctrl, Node* n_ctrl);
|
||||
|
||||
protected:
|
||||
uint hash() const;
|
||||
bool cmp(const Node& n) const;
|
||||
uint size_of() const;
|
||||
|
||||
private:
|
||||
static bool needs_barrier_impl(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace, Unique_Node_List &visited);
|
||||
|
||||
static bool dominates_memory(PhaseGVN* phase, Node* b1, Node* b2, bool linear);
|
||||
static bool dominates_memory_impl(PhaseGVN* phase, Node* b1, Node* b2, Node* current, bool linear);
|
||||
};
|
||||
|
||||
class ShenandoahReadBarrierNode : public ShenandoahBarrierNode {
|
||||
public:
|
||||
ShenandoahReadBarrierNode(Node* ctrl, Node* mem, Node* obj)
|
||||
: ShenandoahBarrierNode(ctrl, mem, obj, true) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier ||
|
||||
ShenandoahWriteBarrier || ShenandoahAcmpBarrier),
|
||||
"should be enabled");
|
||||
}
|
||||
ShenandoahReadBarrierNode(Node* ctrl, Node* mem, Node* obj, bool allow_fromspace)
|
||||
: ShenandoahBarrierNode(ctrl, mem, obj, allow_fromspace) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier ||
|
||||
ShenandoahWriteBarrier || ShenandoahAcmpBarrier),
|
||||
"should be enabled");
|
||||
}
|
||||
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual Node* Identity(PhaseGVN* phase);
|
||||
virtual int Opcode() const;
|
||||
|
||||
bool is_independent(Node* mem);
|
||||
|
||||
void try_move(PhaseIdealLoop* phase);
|
||||
|
||||
private:
|
||||
static bool is_independent(const Type* in_type, const Type* this_type);
|
||||
static bool dominates_memory_rb(PhaseGVN* phase, Node* b1, Node* b2, bool linear);
|
||||
static bool dominates_memory_rb_impl(PhaseGVN* phase, Node* b1, Node* b2, Node* current, bool linear);
|
||||
};
|
||||
|
||||
class ShenandoahWriteBarrierNode : public ShenandoahBarrierNode {
|
||||
public:
|
||||
ShenandoahWriteBarrierNode(Compile* C, Node* ctrl, Node* mem, Node* obj);
|
||||
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual Node* Identity(PhaseGVN* phase);
|
||||
virtual bool depends_only_on_test() const { return false; }
|
||||
|
||||
static bool expand(Compile* C, PhaseIterGVN& igvn);
|
||||
static bool is_gc_state_load(Node *n);
|
||||
static bool is_heap_state_test(Node* iff, int mask);
|
||||
static bool is_heap_stable_test(Node* iff);
|
||||
static bool try_common_gc_state_load(Node *n, PhaseIdealLoop *phase);
|
||||
static bool has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase);
|
||||
|
||||
static LoopNode* try_move_before_pre_loop(Node* c, Node* val_ctrl, PhaseIdealLoop* phase);
|
||||
static Node* move_above_predicates(LoopNode* cl, Node* val_ctrl, PhaseIdealLoop* phase);
|
||||
#ifdef ASSERT
|
||||
static bool memory_dominates_all_paths(Node* mem, Node* rep_ctrl, int alias, PhaseIdealLoop* phase);
|
||||
static void memory_dominates_all_paths_helper(Node* c, Node* rep_ctrl, Unique_Node_List& controls, PhaseIdealLoop* phase);
|
||||
#endif
|
||||
void try_move_before_loop(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses);
|
||||
void try_move_before_loop_helper(LoopNode* cl, Node* val_ctrl, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses);
|
||||
static void pin_and_expand(PhaseIdealLoop* phase);
|
||||
CallStaticJavaNode* pin_and_expand_null_check(PhaseIterGVN& igvn);
|
||||
void pin_and_expand_move_barrier(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, Unique_Node_List& uses);
|
||||
void pin_and_expand_helper(PhaseIdealLoop* phase);
|
||||
static Node* find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase);
|
||||
static void follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase);
|
||||
static void test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase);
|
||||
|
||||
static void test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
|
||||
PhaseIdealLoop* phase);
|
||||
static void call_wb_stub(Node*& ctrl, Node*& val, Node*& result_mem,
|
||||
Node* raw_mem, Node* wb_mem, int alias,
|
||||
PhaseIdealLoop* phase);
|
||||
static void call_lrb_stub(Node*& ctrl, Node*& val, Node*& result_mem, Node* raw_mem, PhaseIdealLoop* phase);
|
||||
static Node* clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase);
|
||||
static void fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, Unique_Node_List& uses,
|
||||
PhaseIdealLoop* phase);
|
||||
static void in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
|
||||
static void move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase);
|
||||
|
||||
static void optimize_after_expansion(VectorSet &visited, Node_Stack &nstack, Node_List &old_new, PhaseIdealLoop* phase);
|
||||
static void merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase);
|
||||
static bool identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase);
|
||||
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase);
|
||||
|
||||
static void optimize_before_expansion(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*> memory_graph_fixers, bool include_lsm);
|
||||
Node* would_subsume(ShenandoahBarrierNode* other, PhaseIdealLoop* phase);
|
||||
static IfNode* find_unswitching_candidate(const IdealLoopTree *loop, PhaseIdealLoop* phase);
|
||||
|
||||
Node* try_split_thru_phi(PhaseIdealLoop* phase);
|
||||
};
|
||||
|
||||
class ShenandoahWBMemProjNode : public Node {
|
||||
public:
|
||||
enum { Control,
|
||||
WriteBarrier };
|
||||
static bool is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase);
|
||||
static bool is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase);
|
||||
|
||||
ShenandoahWBMemProjNode(Node *src) : Node(NULL, src) {
|
||||
assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled");
|
||||
assert(src->Opcode() == Op_ShenandoahWriteBarrier || src->is_Mach(), "epxect wb");
|
||||
}
|
||||
virtual Node* Identity(PhaseGVN* phase);
|
||||
static bool is_gc_state_load(Node* n);
|
||||
static bool is_heap_stable_test(Node* iff);
|
||||
|
||||
virtual int Opcode() const;
|
||||
virtual bool is_CFG() const { return false; }
|
||||
virtual const Type *bottom_type() const {return Type::MEMORY;}
|
||||
virtual const TypePtr *adr_type() const {
|
||||
Node* wb = in(WriteBarrier);
|
||||
if (wb == NULL || wb->is_top()) return NULL; // node is dead
|
||||
assert(wb->Opcode() == Op_ShenandoahWriteBarrier || (wb->is_Mach() && wb->as_Mach()->ideal_Opcode() == Op_ShenandoahWriteBarrier) || wb->is_Phi(), "expect wb");
|
||||
return ShenandoahBarrierNode::brooks_pointer_type(wb->bottom_type());
|
||||
}
|
||||
static bool expand(Compile* C, PhaseIterGVN& igvn);
|
||||
static void pin_and_expand(PhaseIdealLoop* phase);
|
||||
static void optimize_after_expansion(VectorSet& visited, Node_Stack& nstack, Node_List& old_new, PhaseIdealLoop* phase);
|
||||
|
||||
virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
|
||||
virtual const Type *Value(PhaseGVN* phase ) const {
|
||||
return bottom_type();
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
virtual void dump_spec(outputStream *st) const {};
|
||||
#ifdef ASSERT
|
||||
static void verify(RootNode* root);
|
||||
#endif
|
||||
};
|
||||
|
||||
class ShenandoahEnqueueBarrierNode : public Node {
|
||||
public:
|
||||
ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
|
||||
}
|
||||
ShenandoahEnqueueBarrierNode(Node* val);
|
||||
|
||||
const Type *bottom_type() const;
|
||||
const Type* Value(PhaseGVN* phase) const;
|
||||
@ -289,7 +132,6 @@ public:
|
||||
Node* find_mem(Node* ctrl, Node* n) const;
|
||||
void fix_mem(Node* ctrl, Node* region, Node* mem, Node* mem_for_ctrl, Node* mem_phi, Unique_Node_List& uses);
|
||||
int alias() const { return _alias; }
|
||||
void remove(Node* n);
|
||||
};
|
||||
|
||||
class ShenandoahCompareAndSwapPNode : public CompareAndSwapPNode {
|
||||
@ -382,4 +224,41 @@ public:
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
class ShenandoahLoadReferenceBarrierNode : public Node {
|
||||
public:
|
||||
enum {
|
||||
Control,
|
||||
ValueIn
|
||||
};
|
||||
|
||||
enum Strength {
|
||||
NONE, WEAK, STRONG, NA
|
||||
};
|
||||
|
||||
ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* val);
|
||||
|
||||
virtual int Opcode() const;
|
||||
virtual const Type* bottom_type() const;
|
||||
virtual const Type* Value(PhaseGVN* phase) const;
|
||||
virtual const class TypePtr *adr_type() const { return TypeOopPtr::BOTTOM; }
|
||||
virtual uint match_edge(uint idx) const {
|
||||
return idx >= ValueIn;
|
||||
}
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
|
||||
virtual Node* Identity(PhaseGVN* phase);
|
||||
|
||||
uint size_of() const {
|
||||
return sizeof(*this);
|
||||
}
|
||||
|
||||
Strength get_barrier_strength();
|
||||
CallStaticJavaNode* pin_and_expand_null_check(PhaseIterGVN& igvn);
|
||||
|
||||
private:
|
||||
bool needs_barrier(PhaseGVN* phase, Node* n);
|
||||
bool needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited);
|
||||
};
|
||||
|
||||
|
||||
#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP
|
||||
|
@ -41,13 +41,10 @@ ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics() :
|
||||
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
|
||||
|
||||
// Final configuration checks
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
|
||||
}
|
||||
|
||||
@ -75,7 +72,7 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
|
||||
// we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme,
|
||||
// ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit.
|
||||
|
||||
size_t capacity = ShenandoahHeap::heap()->capacity();
|
||||
size_t capacity = ShenandoahHeap::heap()->max_capacity();
|
||||
size_t free_target = ShenandoahMinFreeThreshold * capacity / 100;
|
||||
size_t min_garbage = free_target > actual_free ? (free_target - actual_free) : 0;
|
||||
size_t max_cset = (size_t)(1.0 * ShenandoahEvacReserve * capacity / 100 / ShenandoahEvacWaste);
|
||||
@ -126,12 +123,12 @@ void ShenandoahAdaptiveHeuristics::record_phase_time(ShenandoahPhaseTimings::Pha
|
||||
|
||||
bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
size_t capacity = heap->capacity();
|
||||
size_t capacity = heap->max_capacity();
|
||||
size_t available = heap->free_set()->available();
|
||||
|
||||
// Check if we are falling below the worst limit, time to trigger the GC, regardless of
|
||||
// anything else.
|
||||
size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
|
||||
size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
|
||||
if (available < min_threshold) {
|
||||
log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
|
||||
available / M, min_threshold / M);
|
||||
@ -141,7 +138,7 @@ bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
|
||||
// Check if are need to learn a bit about the application
|
||||
const size_t max_learn = ShenandoahLearningSteps;
|
||||
if (_gc_times_learned < max_learn) {
|
||||
size_t init_threshold = ShenandoahInitFreeThreshold * heap->capacity() / 100;
|
||||
size_t init_threshold = ShenandoahInitFreeThreshold * heap->max_capacity() / 100;
|
||||
if (available < init_threshold) {
|
||||
log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
|
||||
_gc_times_learned + 1, max_learn, available / M, init_threshold / M);
|
||||
|
@ -47,13 +47,10 @@ ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics() : ShenandoahHeu
|
||||
}
|
||||
|
||||
// Final configuration checks
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
|
||||
}
|
||||
|
||||
|
@ -42,13 +42,10 @@ ShenandoahCompactHeuristics::ShenandoahCompactHeuristics() : ShenandoahHeuristic
|
||||
SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGarbageThreshold, 10);
|
||||
|
||||
// Final configuration checks
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
|
||||
}
|
||||
|
||||
@ -56,8 +53,8 @@ bool ShenandoahCompactHeuristics::should_start_normal_gc() const {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
size_t available = heap->free_set()->available();
|
||||
size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
|
||||
size_t min_threshold = ShenandoahMinFreeThreshold * heap->capacity() / 100;
|
||||
size_t threshold_bytes_allocated = heap->max_capacity() * ShenandoahAllocationThreshold / 100;
|
||||
size_t min_threshold = ShenandoahMinFreeThreshold * heap->max_capacity() / 100;
|
||||
|
||||
if (available < min_threshold) {
|
||||
log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
|
||||
|
@ -43,14 +43,11 @@ ShenandoahPassiveHeuristics::ShenandoahPassiveHeuristics() : ShenandoahHeuristic
|
||||
}
|
||||
|
||||
// Disable known barriers by default.
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahLoadRefBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahKeepAliveBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahWriteBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahReadBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValEnqueueBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValReadBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahAcmpBarrier);
|
||||
SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier);
|
||||
|
||||
// Final configuration checks
|
||||
@ -84,7 +81,7 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando
|
||||
|
||||
// Do not select too large CSet that would overflow the available free space.
|
||||
// Take at least the entire evacuation reserve, and be free to overflow to free space.
|
||||
size_t capacity = ShenandoahHeap::heap()->capacity();
|
||||
size_t capacity = ShenandoahHeap::heap()->max_capacity();
|
||||
size_t available = MAX2(ShenandoahEvacReserve * capacity / 100, actual_free);
|
||||
size_t max_cset = (size_t)(available / ShenandoahEvacWaste);
|
||||
|
||||
|
@ -40,13 +40,10 @@ ShenandoahStaticHeuristics::ShenandoahStaticHeuristics() : ShenandoahHeuristics(
|
||||
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
|
||||
|
||||
// Final configuration checks
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahWriteBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValReadBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahAcmpBarrier);
|
||||
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
|
||||
}
|
||||
|
||||
@ -55,7 +52,7 @@ ShenandoahStaticHeuristics::~ShenandoahStaticHeuristics() {}
|
||||
bool ShenandoahStaticHeuristics::should_start_normal_gc() const {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
size_t capacity = heap->capacity();
|
||||
size_t capacity = heap->max_capacity();
|
||||
size_t available = heap->free_set()->available();
|
||||
size_t threshold_available = (capacity * ShenandoahFreeThreshold) / 100;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user