Merge
This commit is contained in:
commit
b3eb4c3ce8
1
.hgtags
1
.hgtags
@ -525,3 +525,4 @@ dc1f9dec2018a37fedba47d8a2aedef99faaec64 jdk-12+19
|
||||
40098289d5804c3b5e7074bc75501a81e70d9b0d jdk-12+20
|
||||
f8fb0c86f2b3d24294d39c5685a628e1beb14ba7 jdk-12+21
|
||||
732bec44c89e8b93a38296bf690f97b7230c5b6d jdk-12+22
|
||||
eef755718cb24813031a842bbfc716a6cea18e9a jdk-12+23
|
||||
|
@ -396,9 +396,9 @@ CC: Sun C++ 5.13 SunOS_i386 151846-10 2015/10/30</code></pre>
|
||||
<h3 id="x11">X11</h3>
|
||||
<p>Certain <a href="http://www.x.org/">X11</a> libraries and include files are required on Linux and Solaris.</p>
|
||||
<ul>
|
||||
<li>To install on an apt-based Linux, try running <code>sudo apt-get install libx11-dev libxext-dev libxrender-dev libxtst-dev libxt-dev</code>.</li>
|
||||
<li>To install on an rpm-based Linux, try running <code>sudo yum install libXtst-devel libXt-devel libXrender-devel libXi-devel</code>.</li>
|
||||
<li>To install on Solaris, try running <code>pkg install x11/header/x11-protocols x11/library/libice x11/library/libpthread-stubs x11/library/libsm x11/library/libx11 x11/library/libxau x11/library/libxcb x11/library/libxdmcp x11/library/libxevie x11/library/libxext x11/library/libxrender x11/library/libxscrnsaver x11/library/libxtst x11/library/toolkit/libxt</code>.</li>
|
||||
<li>To install on an apt-based Linux, try running <code>sudo apt-get install libx11-dev libxext-dev libxrender-dev libxrandr-dev libxtst-dev libxt-dev</code>.</li>
|
||||
<li>To install on an rpm-based Linux, try running <code>sudo yum install libXtst-devel libXt-devel libXrender-devel libXrandr-devel libXi-devel</code>.</li>
|
||||
<li>To install on Solaris, try running <code>pkg install x11/header/x11-protocols x11/library/libice x11/library/libpthread-stubs x11/library/libsm x11/library/libx11 x11/library/libxau x11/library/libxcb x11/library/libxdmcp x11/library/libxevie x11/library/libxext x11/library/libxrender x11/library/libxrandr x11/library/libxscrnsaver x11/library/libxtst x11/library/toolkit/libxt</code>.</li>
|
||||
</ul>
|
||||
<p>Use <code>--with-x=<path></code> if <code>configure</code> does not properly locate your X11 files.</p>
|
||||
<h3 id="alsa">ALSA</h3>
|
||||
@ -662,6 +662,7 @@ dpkg-deb -x /tmp/libasound2-dev_1.0.25-4_armhf.deb .</code></pre></li>
|
||||
<li>libice-dev</li>
|
||||
<li>libxrender</li>
|
||||
<li>libxrender-dev</li>
|
||||
<li>libxrandr-dev</li>
|
||||
<li>libsm-dev</li>
|
||||
<li>libxt-dev</li>
|
||||
<li>libx11</li>
|
||||
@ -693,7 +694,7 @@ cp: cannot stat `arm-linux-gnueabihf/libXt.so': No such file or directory</c
|
||||
<pre><code>apt install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu</code></pre></li>
|
||||
<li><p>Create chroot on the <em>build</em> system, configuring it for <em>target</em> system:</p>
|
||||
<pre><code>sudo qemu-debootstrap --arch=arm64 --verbose \
|
||||
--include=fakeroot,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng12-dev \
|
||||
--include=fakeroot,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxrandr-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng12-dev \
|
||||
--resolve-deps jessie /chroots/arm64 http://httpredir.debian.org/debian/</code></pre></li>
|
||||
<li><p>Configure and build with newly created chroot as sysroot/toolchain-path:</p>
|
||||
<pre><code>CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ sh ./configure --openjdk-target=aarch64-linux-gnu --with-sysroot=/chroots/arm64/ --with-toolchain-path=/chroots/arm64/
|
||||
|
@ -488,15 +488,15 @@ Certain [X11](http://www.x.org/) libraries and include files are required on
|
||||
Linux and Solaris.
|
||||
|
||||
* To install on an apt-based Linux, try running `sudo apt-get install
|
||||
libx11-dev libxext-dev libxrender-dev libxtst-dev libxt-dev`.
|
||||
libx11-dev libxext-dev libxrender-dev libxrandr-dev libxtst-dev libxt-dev`.
|
||||
* To install on an rpm-based Linux, try running `sudo yum install
|
||||
libXtst-devel libXt-devel libXrender-devel libXi-devel`.
|
||||
libXtst-devel libXt-devel libXrender-devel libXrandr-devel libXi-devel`.
|
||||
* To install on Solaris, try running `pkg install x11/header/x11-protocols
|
||||
x11/library/libice x11/library/libpthread-stubs x11/library/libsm
|
||||
x11/library/libx11 x11/library/libxau x11/library/libxcb
|
||||
x11/library/libxdmcp x11/library/libxevie x11/library/libxext
|
||||
x11/library/libxrender x11/library/libxscrnsaver x11/library/libxtst
|
||||
x11/library/toolkit/libxt`.
|
||||
x11/library/libxrender x11/library/libxrandr x11/library/libxscrnsaver
|
||||
x11/library/libxtst x11/library/toolkit/libxt`.
|
||||
|
||||
Use `--with-x=<path>` if `configure` does not properly locate your X11 files.
|
||||
|
||||
@ -1062,6 +1062,7 @@ Note that X11 is needed even if you only want to build a headless JDK.
|
||||
* libice-dev
|
||||
* libxrender
|
||||
* libxrender-dev
|
||||
* libxrandr-dev
|
||||
* libsm-dev
|
||||
* libxt-dev
|
||||
* libx11
|
||||
@ -1112,7 +1113,7 @@ apt install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
|
||||
* Create chroot on the *build* system, configuring it for *target* system:
|
||||
```
|
||||
sudo qemu-debootstrap --arch=arm64 --verbose \
|
||||
--include=fakeroot,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng12-dev \
|
||||
--include=fakeroot,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxrandr-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng12-dev \
|
||||
--resolve-deps jessie /chroots/arm64 http://httpredir.debian.org/debian/
|
||||
```
|
||||
|
||||
|
@ -481,7 +481,6 @@ jdk.internal.vm.compiler_EXCLUDES += \
|
||||
org.graalvm.compiler.processor \
|
||||
org.graalvm.compiler.replacements.jdk12.test \
|
||||
org.graalvm.compiler.replacements.jdk9.test \
|
||||
org.graalvm.compiler.replacements.jdk9_11.test \
|
||||
org.graalvm.compiler.replacements.processor \
|
||||
org.graalvm.compiler.replacements.test \
|
||||
org.graalvm.compiler.serviceprovider.processor \
|
||||
|
@ -25,7 +25,7 @@
|
||||
|
||||
# All valid JVM features, regardless of platform
|
||||
VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
|
||||
graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc zgc nmt cds \
|
||||
graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
|
||||
static-build link-time-opt aot jfr"
|
||||
|
||||
# Deprecated JVM features (these are ignored, but with a warning)
|
||||
@ -325,6 +325,15 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
|
||||
fi
|
||||
fi
|
||||
|
||||
# Only enable Shenandoah on supported arches
|
||||
AC_MSG_CHECKING([if shenandoah can be built])
|
||||
if test "x$OPENJDK_TARGET_CPU_ARCH" = "xx86" || test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES shenandoahgc"
|
||||
AC_MSG_RESULT([no, platform not supported])
|
||||
fi
|
||||
|
||||
# Only enable ZGC on supported platforms
|
||||
AC_MSG_CHECKING([if zgc can be built])
|
||||
if test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
|
||||
@ -336,7 +345,7 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
|
||||
|
||||
# Disable unsupported GCs for Zero
|
||||
if HOTSPOT_CHECK_JVM_VARIANT(zero); then
|
||||
DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES epsilongc g1gc zgc"
|
||||
DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES epsilongc g1gc zgc shenandoahgc"
|
||||
fi
|
||||
|
||||
# Turn on additional features based on other parts of configure
|
||||
@ -470,7 +479,7 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
|
||||
fi
|
||||
|
||||
# All variants but minimal (and custom) get these features
|
||||
NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc jni-check jvmti management nmt services vm-structs zgc"
|
||||
NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
|
||||
|
||||
# Disable CDS on AIX.
|
||||
if test "x$OPENJDK_TARGET_OS" = "xaix"; then
|
||||
|
@ -248,6 +248,7 @@ var getJibProfilesCommon = function (input, data) {
|
||||
configure_args: concat(["--enable-jtreg-failure-handler"],
|
||||
"--with-exclude-translations=de,es,fr,it,ko,pt_BR,sv,ca,tr,cs,sk,ja_JP_A,ja_JP_HA,ja_JP_HI,ja_JP_I,zh_TW,zh_HK",
|
||||
"--disable-manpages",
|
||||
"--with-jvm-features=-shenandoahgc",
|
||||
versionArgs(input, common))
|
||||
};
|
||||
// Extra settings for debug profiles
|
||||
@ -425,7 +426,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"windows-x64": {
|
||||
target_os: "windows",
|
||||
target_cpu: "x64",
|
||||
dependencies: ["devkit", "graalunit_lib"],
|
||||
dependencies: ["devkit", "pandoc", "graalunit_lib"],
|
||||
configure_args: concat(common.configure_args_64bit),
|
||||
},
|
||||
|
||||
|
@ -1554,10 +1554,11 @@ charset x-IBM970 IBM970
|
||||
|
||||
charset x-IBM964 IBM964
|
||||
package sun.nio.cs.ext
|
||||
type source
|
||||
type template
|
||||
alias cp964 # JDK historical
|
||||
alias ibm964
|
||||
alias ibm-964
|
||||
alias ibm-euctw
|
||||
alias 964
|
||||
|
||||
charset x-IBM33722 IBM33722
|
||||
@ -1861,6 +1862,11 @@ charset x-JIS0212_MS5022X JIS_X_0212_MS5022X
|
||||
minmax 0x21 0x7e 0x21 0x7e
|
||||
internal true # "internal implementation
|
||||
|
||||
charset SimpleEUCEncoder SimpleEUCEncoder
|
||||
package sun.nio.cs.ext
|
||||
type template
|
||||
internal true
|
||||
|
||||
########################################################
|
||||
#
|
||||
# platform specific charsets, to be registered into spi
|
||||
|
@ -16,6 +16,7 @@ IBM942C
|
||||
IBM943
|
||||
IBM943C
|
||||
IBM950
|
||||
IBM964
|
||||
IBM970
|
||||
IBM1046
|
||||
IBM1124
|
||||
@ -25,3 +26,4 @@ ISO_8859_6
|
||||
ISO_8859_8
|
||||
MS1252
|
||||
TIS_620
|
||||
SimpleEUCEncoder
|
||||
|
@ -41,6 +41,9 @@ if [[ $TARGET_PLATFORM == linux_x64 ]] ; then
|
||||
elif [[ $TARGET_PLATFORM == macosx_x64 ]] ; then
|
||||
PANDOC_PLATFORM=macOS
|
||||
PANDOC_SUFFIX=zip
|
||||
elif [[ $TARGET_PLATFORM == windows_x64 ]] ; then
|
||||
PANDOC_PLATFORM=windows-x86_64
|
||||
PANDOC_SUFFIX=zip
|
||||
else
|
||||
echo "Unknown platform"
|
||||
exit 1
|
||||
@ -59,7 +62,12 @@ fi
|
||||
cd ..
|
||||
|
||||
mkdir pandoc
|
||||
cp tmp/pandoc-$PANDOC_VERSION/bin/pandoc pandoc
|
||||
if [[ $TARGET_PLATFORM == windows_x64 ]] ; then
|
||||
cp tmp/pandoc-$PANDOC_VERSION-$PANDOC_PLATFORM/pandoc.exe pandoc
|
||||
chmod +x pandoc/pandoc.exe
|
||||
else
|
||||
cp tmp/pandoc-$PANDOC_VERSION/bin/pandoc pandoc
|
||||
fi
|
||||
|
||||
tar -cvzf ../$BUNDLE_NAME pandoc
|
||||
cp ../$BUNDLE_NAME "$ORIG_DIR"
|
||||
|
@ -136,6 +136,12 @@ ifeq ($(call check-jvm-feature, compiler2), true)
|
||||
$d/os_cpu/$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH).ad \
|
||||
)))
|
||||
|
||||
ifeq ($(call check-jvm-feature, shenandoahgc), true)
|
||||
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
|
||||
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/shenandoah/shenandoah_$(HOTSPOT_TARGET_CPU).ad \
|
||||
)))
|
||||
endif
|
||||
|
||||
SINGLE_AD_SRCFILE := $(ADLC_SUPPORT_DIR)/all-ad-src.ad
|
||||
|
||||
INSERT_FILENAME_AWK_SCRIPT := \
|
||||
|
@ -60,6 +60,7 @@ ifeq ($(call check-jvm-feature, dtrace), true)
|
||||
ciEnv.o \
|
||||
classLoadingService.o \
|
||||
compileBroker.o \
|
||||
gcVMOperations.o \
|
||||
hashtable.o \
|
||||
instanceKlass.o \
|
||||
java.o \
|
||||
@ -74,18 +75,17 @@ ifeq ($(call check-jvm-feature, dtrace), true)
|
||||
thread.o \
|
||||
unsafe.o \
|
||||
vmThread.o \
|
||||
vmGCOperations.o \
|
||||
)
|
||||
|
||||
ifeq ($(call check-jvm-feature, cmsgc), true)
|
||||
DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
|
||||
vmCMSOperations.o \
|
||||
cmsVMOperations.o \
|
||||
)
|
||||
endif
|
||||
|
||||
ifeq ($(call check-jvm-feature, parallelgc), true)
|
||||
DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
|
||||
vmPSOperations.o \
|
||||
psVMOperations.o \
|
||||
)
|
||||
endif
|
||||
|
||||
|
@ -166,6 +166,13 @@ ifneq ($(call check-jvm-feature, zgc), true)
|
||||
JVM_EXCLUDE_PATTERNS += gc/z
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, shenandoahgc), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_SHENANDOAHGC=0
|
||||
JVM_EXCLUDE_PATTERNS += gc/shenandoah
|
||||
else
|
||||
JVM_CFLAGS_FEATURES += -DSUPPORT_BARRIER_ON_PRIMITIVES -DSUPPORT_NOT_TO_SPACE_INVARIANT
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, jfr), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_JFR=0
|
||||
JVM_EXCLUDE_PATTERNS += jfr
|
||||
|
@ -36,6 +36,11 @@ ifeq ($(TOOLCHAIN_TYPE), gcc)
|
||||
BUILD_LIBJVM_assembler_x86.cpp_CXXFLAGS := -Wno-maybe-uninitialized
|
||||
BUILD_LIBJVM_cardTableBarrierSetAssembler_x86.cpp_CXXFLAGS := -Wno-maybe-uninitialized
|
||||
BUILD_LIBJVM_interp_masm_x86.cpp_CXXFLAGS := -Wno-uninitialized
|
||||
ifeq ($(DEBUG_LEVEL), release)
|
||||
# Need extra inlining to collapse all marking code into the hot marking loop
|
||||
BUILD_LIBJVM_shenandoahConcurrentMark.cpp_CXXFLAGS := --param inline-unit-growth=1000
|
||||
BUILD_LIBJVM_shenandoahTraversalGC.cpp_CXXFLAGS := --param inline-unit-growth=1000
|
||||
endif
|
||||
endif
|
||||
|
||||
LIBJVM_FDLIBM_COPY_OPT_FLAG := $(CXX_O_FLAG_NONE)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -151,6 +151,19 @@ abstract class AbstractLDMLHandler<V> extends DefaultHandler {
|
||||
}
|
||||
}
|
||||
|
||||
void pushStringListEntry(String qName, Attributes attributes, String key) {
|
||||
if (!pushIfIgnored(qName, attributes)) {
|
||||
currentContainer = new StringListEntry(qName, currentContainer, key);
|
||||
}
|
||||
}
|
||||
|
||||
void pushStringListElement(String qName, Attributes attributes, int index) {
|
||||
if (!pushIfIgnored(qName, attributes)) {
|
||||
currentContainer = new StringListElement(qName, currentContainer, index);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private boolean pushIfIgnored(String qName, Attributes attributes) {
|
||||
if (isIgnored(attributes) || currentContainer instanceof IgnoredContainer) {
|
||||
pushIgnoredContainer(qName);
|
||||
|
@ -53,6 +53,10 @@ class Bundle {
|
||||
"NumberPatterns/percent"
|
||||
};
|
||||
|
||||
private final static String[] COMPACT_NUMBER_PATTERN_KEYS = {
|
||||
"short.CompactNumberPatterns",
|
||||
"long.CompactNumberPatterns"};
|
||||
|
||||
private final static String[] NUMBER_ELEMENT_KEYS = {
|
||||
"NumberElements/decimal",
|
||||
"NumberElements/group",
|
||||
@ -228,6 +232,16 @@ class Bundle {
|
||||
}
|
||||
}
|
||||
|
||||
for (String k : COMPACT_NUMBER_PATTERN_KEYS) {
|
||||
List<String> patterns = (List<String>) myMap.remove(k);
|
||||
if (patterns != null) {
|
||||
// Replace any null entry with empty strings.
|
||||
String[] arrPatterns = patterns.stream()
|
||||
.map(s -> s == null ? "" : s).toArray(String[]::new);
|
||||
myMap.put(k, arrPatterns);
|
||||
}
|
||||
}
|
||||
|
||||
// if myMap has any of NUMBER_ELEMENT_KEYS, create a complete NumberElements.
|
||||
String defaultScript = (String) myMap.get("DefaultNumberingSystem");
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -888,6 +888,8 @@ public class CLDRConverter {
|
||||
copyIfPresent(map, "NumberElements", formatData);
|
||||
}
|
||||
copyIfPresent(map, "NumberPatterns", formatData);
|
||||
copyIfPresent(map, "short.CompactNumberPatterns", formatData);
|
||||
copyIfPresent(map, "long.CompactNumberPatterns", formatData);
|
||||
|
||||
// put extra number elements for available scripts into formatData, if it is "root"
|
||||
if (id.equals("root")) {
|
||||
|
@ -52,6 +52,8 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
|
||||
private final String id;
|
||||
private String currentContext = ""; // "format"/"stand-alone"
|
||||
private String currentWidth = ""; // "wide"/"narrow"/"abbreviated"
|
||||
private String currentStyle = ""; // short, long for decimalFormat
|
||||
private String compactCount = ""; // one or other for decimalFormat
|
||||
|
||||
LDMLParseHandler(String id) {
|
||||
this.id = id;
|
||||
@ -503,13 +505,85 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
|
||||
// Number format information
|
||||
//
|
||||
case "decimalFormatLength":
|
||||
if (attributes.getValue("type") == null) {
|
||||
// skipping type="short" data
|
||||
// for FormatData
|
||||
// copy string for later assembly into NumberPatterns
|
||||
String type = attributes.getValue("type");
|
||||
if (null == type) {
|
||||
// format data for decimal number format
|
||||
pushStringEntry(qName, attributes, "NumberPatterns/decimal");
|
||||
currentStyle = type;
|
||||
} else {
|
||||
pushIgnoredContainer(qName);
|
||||
switch (type) {
|
||||
case "short":
|
||||
case "long":
|
||||
// considering "short" and long for
|
||||
// compact number formatting patterns
|
||||
pushKeyContainer(qName, attributes, type);
|
||||
currentStyle = type;
|
||||
break;
|
||||
default:
|
||||
pushIgnoredContainer(qName);
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case "decimalFormat":
|
||||
if(currentStyle == null) {
|
||||
pushContainer(qName, attributes);
|
||||
} else {
|
||||
switch (currentStyle) {
|
||||
case "short":
|
||||
pushStringListEntry(qName, attributes,
|
||||
currentStyle+".CompactNumberPatterns");
|
||||
break;
|
||||
case "long":
|
||||
pushStringListEntry(qName, attributes,
|
||||
currentStyle+".CompactNumberPatterns");
|
||||
break;
|
||||
default:
|
||||
pushIgnoredContainer(qName);
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case "pattern":
|
||||
String containerName = currentContainer.getqName();
|
||||
if (containerName.equals("decimalFormat")) {
|
||||
if (currentStyle == null) {
|
||||
pushContainer(qName, attributes);
|
||||
} else {
|
||||
// The compact number patterns parsing assumes that the order
|
||||
// of patterns are always in the increasing order of their
|
||||
// type attribute i.e. type = 1000...
|
||||
// Between the inflectional forms for a type (e.g.
|
||||
// count = "one" and count = "other" for type = 1000), it is
|
||||
// assumed that the count = "one" always appears before
|
||||
// count = "other"
|
||||
switch (currentStyle) {
|
||||
case "short":
|
||||
case "long":
|
||||
String count = attributes.getValue("count");
|
||||
// first pattern of count = "one" or count = "other"
|
||||
if ((count.equals("one") || count.equals("other"))
|
||||
&& compactCount.equals("")) {
|
||||
compactCount = count;
|
||||
pushStringListElement(qName, attributes,
|
||||
(int) Math.log10(Double.parseDouble(attributes.getValue("type"))));
|
||||
} else if ((count.equals("one") || count.equals("other"))
|
||||
&& compactCount.equals(count)) {
|
||||
// extract patterns with similar "count"
|
||||
// attribute value
|
||||
pushStringListElement(qName, attributes,
|
||||
(int) Math.log10(Double.parseDouble(attributes.getValue("type"))));
|
||||
} else {
|
||||
pushIgnoredContainer(qName);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pushIgnoredContainer(qName);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pushContainer(qName, attributes);
|
||||
}
|
||||
break;
|
||||
case "currencyFormatLength":
|
||||
@ -676,10 +750,9 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
|
||||
// "alias" for root
|
||||
case "alias":
|
||||
{
|
||||
if (id.equals("root") &&
|
||||
!isIgnored(attributes) &&
|
||||
currentCalendarType != null &&
|
||||
!currentCalendarType.lname().startsWith("islamic-")) { // ignore Islamic variants
|
||||
if (id.equals("root") && !isIgnored(attributes)
|
||||
&& ((currentContainer.getqName().equals("decimalFormatLength"))
|
||||
|| (currentCalendarType != null && !currentCalendarType.lname().startsWith("islamic-")))) { // ignore islamic variants
|
||||
pushAliasEntry(qName, attributes, attributes.getValue("path"));
|
||||
} else {
|
||||
pushIgnoredContainer(qName);
|
||||
@ -831,6 +904,9 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
|
||||
case "dayPeriods":
|
||||
case "eras":
|
||||
break;
|
||||
case "decimalFormatLength": // used for compact number formatting patterns
|
||||
keyName = type + ".CompactNumberPatterns";
|
||||
break;
|
||||
default:
|
||||
keyName = "";
|
||||
break;
|
||||
@ -869,6 +945,14 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
|
||||
width = path.substring(start+typeKey.length(), path.indexOf("']", start));
|
||||
}
|
||||
|
||||
// used for compact number formatting patterns aliases
|
||||
typeKey = "decimalFormatLength[@type='";
|
||||
start = path.indexOf(typeKey);
|
||||
if (start != -1) {
|
||||
String style = path.substring(start + typeKey.length(), path.indexOf("']", start));
|
||||
return toJDKKey(qName, "", style);
|
||||
}
|
||||
|
||||
return calType + "." + toJDKKey(qName, context, width);
|
||||
}
|
||||
|
||||
@ -926,7 +1010,11 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
|
||||
currentContext = "";
|
||||
putIfEntry();
|
||||
break;
|
||||
|
||||
case "decimalFormatLength":
|
||||
currentStyle = "";
|
||||
compactCount = "";
|
||||
putIfEntry();
|
||||
break;
|
||||
default:
|
||||
putIfEntry();
|
||||
}
|
||||
@ -937,22 +1025,28 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
|
||||
if (currentContainer instanceof AliasEntry) {
|
||||
Entry<?> entry = (Entry<?>) currentContainer;
|
||||
String containerqName = entry.getParent().getqName();
|
||||
Set<String> keyNames = populateAliasKeys(containerqName, currentContext, currentWidth);
|
||||
if (!keyNames.isEmpty()) {
|
||||
for (String keyName : keyNames) {
|
||||
String[] tmp = keyName.split(",", 3);
|
||||
String calType = currentCalendarType.lname();
|
||||
String src = calType+"."+tmp[0];
|
||||
String target = getTarget(
|
||||
entry.getKey(),
|
||||
calType,
|
||||
tmp[1].length()>0 ? tmp[1] : currentContext,
|
||||
tmp[2].length()>0 ? tmp[2] : currentWidth);
|
||||
if (target.substring(target.lastIndexOf('.')+1).equals(containerqName)) {
|
||||
target = target.substring(0, target.indexOf('.'))+"."+tmp[0];
|
||||
if (containerqName.equals("decimalFormatLength")) {
|
||||
String srcKey = toJDKKey(containerqName, "", currentStyle);
|
||||
String targetKey = getTarget(entry.getKey(), "", "", "");
|
||||
CLDRConverter.aliases.put(srcKey, targetKey);
|
||||
} else {
|
||||
Set<String> keyNames = populateAliasKeys(containerqName, currentContext, currentWidth);
|
||||
if (!keyNames.isEmpty()) {
|
||||
for (String keyName : keyNames) {
|
||||
String[] tmp = keyName.split(",", 3);
|
||||
String calType = currentCalendarType.lname();
|
||||
String src = calType+"."+tmp[0];
|
||||
String target = getTarget(
|
||||
entry.getKey(),
|
||||
calType,
|
||||
tmp[1].length()>0 ? tmp[1] : currentContext,
|
||||
tmp[2].length()>0 ? tmp[2] : currentWidth);
|
||||
if (target.substring(target.lastIndexOf('.')+1).equals(containerqName)) {
|
||||
target = target.substring(0, target.indexOf('.'))+"."+tmp[0];
|
||||
}
|
||||
CLDRConverter.aliases.put(src.replaceFirst("^gregorian.", ""),
|
||||
target.replaceFirst("^gregorian.", ""));
|
||||
}
|
||||
CLDRConverter.aliases.put(src.replaceFirst("^gregorian.", ""),
|
||||
target.replaceFirst("^gregorian.", ""));
|
||||
}
|
||||
}
|
||||
} else if (currentContainer instanceof Entry) {
|
||||
|
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package build.tools.cldrconverter;
|
||||
|
||||
class StringListElement extends Container {
|
||||
|
||||
StringListEntry list;
|
||||
int index;
|
||||
|
||||
StringListElement(String qName, Container parent, int index) {
|
||||
super(qName, parent);
|
||||
while (!(parent instanceof StringListEntry)) {
|
||||
parent = parent.getParent();
|
||||
}
|
||||
list = (StringListEntry) parent;
|
||||
this.index = index;
|
||||
}
|
||||
|
||||
@Override
|
||||
void addCharacters(char[] characters, int start, int length) {
|
||||
list.addCharacters(index, characters, start, length);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package build.tools.cldrconverter;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
class StringListEntry extends Entry<List<String>> {
|
||||
|
||||
private List<String> value;
|
||||
|
||||
StringListEntry(String qName, Container parent, String key) {
|
||||
super(qName, parent, key);
|
||||
value = new ArrayList<>();
|
||||
}
|
||||
|
||||
void addCharacters(int index, char[] characters, int start, int length) {
|
||||
// fill with empty strings when the patterns start from index > 0
|
||||
if (value.size() < index) {
|
||||
IntStream.range(0, index).forEach(i -> value.add(i, ""));
|
||||
value.add(index, new String(characters, start, length));
|
||||
} else {
|
||||
value.add(index, new String(characters, start, length));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
List<String> getValue() {
|
||||
for (String element : value) {
|
||||
if (element != null) {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
@ -4,7 +4,9 @@
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
@ -21,4 +23,9 @@
|
||||
# questions.
|
||||
#
|
||||
|
||||
exclusiveAccess.dirs=.
|
||||
include LauncherCommon.gmk
|
||||
|
||||
$(eval $(call SetupBuildLauncher, jfr, \
|
||||
MAIN_CLASS := jdk.jfr.internal.tool.Main, \
|
||||
CFLAGS := -DEXPAND_CLASSPATH_WILDCARDS, \
|
||||
))
|
@ -80,15 +80,18 @@ ifeq ($(INCLUDE_GRAAL), true)
|
||||
$(SRC_DIR)/org.graalvm.compiler.asm.amd64.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.asm.sparc.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.asm.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.core.aarch64.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.core.amd64.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.core.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.core.jdk9.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.core.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.debug.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.graph.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.hotspot.amd64.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.hotspot.lir.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.hotspot.sparc.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.hotspot.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.jtt/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.lir.jtt/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.lir.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.loop.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.nodes.test/src \
|
||||
@ -96,12 +99,9 @@ ifeq ($(INCLUDE_GRAAL), true)
|
||||
$(SRC_DIR)/org.graalvm.compiler.phases.common.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.replacements.jdk12.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.replacements.jdk9.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.replacements.jdk9_11.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.replacements.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.test/src \
|
||||
$(SRC_DIR)/org.graalvm.util.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.jtt/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.lir.jtt/src \
|
||||
, \
|
||||
EXCLUDE_FILES := org/graalvm/compiler/core/test/VerifyDebugUsageTest.java, \
|
||||
BIN := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \
|
||||
|
@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
// Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -1272,6 +1272,8 @@ source %{
|
||||
case Op_CompareAndSwapL:
|
||||
case Op_CompareAndSwapP:
|
||||
case Op_CompareAndSwapN:
|
||||
case Op_ShenandoahCompareAndSwapP:
|
||||
case Op_ShenandoahCompareAndSwapN:
|
||||
case Op_CompareAndSwapB:
|
||||
case Op_CompareAndSwapS:
|
||||
case Op_GetAndSetI:
|
||||
@ -1293,6 +1295,10 @@ source %{
|
||||
case Op_WeakCompareAndSwapL:
|
||||
case Op_WeakCompareAndSwapP:
|
||||
case Op_WeakCompareAndSwapN:
|
||||
case Op_ShenandoahWeakCompareAndSwapP:
|
||||
case Op_ShenandoahWeakCompareAndSwapN:
|
||||
case Op_ShenandoahCompareAndExchangeP:
|
||||
case Op_ShenandoahCompareAndExchangeN:
|
||||
return maybe_volatile;
|
||||
default:
|
||||
return false;
|
||||
|
@ -1118,7 +1118,7 @@ public:
|
||||
Register Rn, enum operand_size sz, int op, bool ordered) {
|
||||
starti;
|
||||
f(sz, 31, 30), f(0b001000, 29, 24), f(op, 23, 21);
|
||||
rf(Rs, 16), f(ordered, 15), rf(Rt2, 10), rf(Rn, 5), rf(Rt1, 0);
|
||||
rf(Rs, 16), f(ordered, 15), rf(Rt2, 10), srf(Rn, 5), rf(Rt1, 0);
|
||||
}
|
||||
|
||||
void load_exclusive(Register dst, Register addr,
|
||||
@ -1247,7 +1247,7 @@ public:
|
||||
enum operand_size sz, int op1, int op2, bool a, bool r) {
|
||||
starti;
|
||||
f(sz, 31, 30), f(0b111000, 29, 24), f(a, 23), f(r, 22), f(1, 21);
|
||||
rf(Rs, 16), f(op1, 15), f(op2, 14, 12), f(0, 11, 10), rf(Rn, 5), zrf(Rt, 0);
|
||||
rf(Rs, 16), f(op1, 15), f(op2, 14, 12), f(0, 11, 10), srf(Rn, 5), zrf(Rt, 0);
|
||||
}
|
||||
|
||||
#define INSN(NAME, NAME_A, NAME_L, NAME_AL, op1, op2) \
|
||||
|
@ -0,0 +1,671 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahRuntime.hpp"
|
||||
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
|
||||
#endif
|
||||
|
||||
#define __ masm->
|
||||
|
||||
address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
|
||||
|
||||
void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register addr, Register count, RegSet saved_regs) {
|
||||
if (is_oop) {
|
||||
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
|
||||
if (!dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) {
|
||||
__ push(saved_regs, sp);
|
||||
if (count == c_rarg0) {
|
||||
if (addr == c_rarg1) {
|
||||
// exactly backwards!!
|
||||
__ mov(rscratch1, c_rarg0);
|
||||
__ mov(c_rarg0, c_rarg1);
|
||||
__ mov(c_rarg1, rscratch1);
|
||||
} else {
|
||||
__ mov(c_rarg1, count);
|
||||
__ mov(c_rarg0, addr);
|
||||
}
|
||||
} else {
|
||||
__ mov(c_rarg0, addr);
|
||||
__ mov(c_rarg1, count);
|
||||
}
|
||||
if (UseCompressedOops) {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2);
|
||||
} else {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2);
|
||||
}
|
||||
__ pop(saved_regs, sp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register start, Register end, Register scratch, RegSet saved_regs) {
|
||||
if (is_oop) {
|
||||
__ push(saved_regs, sp);
|
||||
// must compute element count unless barrier set interface is changed (other platforms supply count)
|
||||
assert_different_registers(start, end, scratch);
|
||||
__ lea(scratch, Address(end, BytesPerHeapOop));
|
||||
__ sub(scratch, scratch, start); // subtract start to get #bytes
|
||||
__ lsr(scratch, scratch, LogBytesPerHeapOop); // convert to element count
|
||||
__ mov(c_rarg0, start);
|
||||
__ mov(c_rarg1, scratch);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2);
|
||||
__ pop(saved_regs, sp);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call) {
|
||||
if (ShenandoahSATBBarrier) {
|
||||
satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call) {
|
||||
// If expand_call is true then we expand the call_VM_leaf macro
|
||||
// directly to skip generating the check by
|
||||
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
|
||||
|
||||
assert(thread == rthread, "must be");
|
||||
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
assert_different_registers(obj, pre_val, tmp, rscratch1);
|
||||
assert(pre_val != noreg && tmp != noreg, "expecting a register");
|
||||
|
||||
Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
|
||||
Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ ldrw(tmp, in_progress);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ ldrb(tmp, in_progress);
|
||||
}
|
||||
__ cbzw(tmp, done);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
if (obj != noreg) {
|
||||
__ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
|
||||
}
|
||||
|
||||
// Is the previous value null?
|
||||
__ cbz(pre_val, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
// Is index == 0?
|
||||
// (The index field is typed as size_t.)
|
||||
|
||||
__ ldr(tmp, index); // tmp := *index_adr
|
||||
__ cbz(tmp, runtime); // tmp == 0?
|
||||
// If yes, goto runtime
|
||||
|
||||
__ sub(tmp, tmp, wordSize); // tmp := tmp - wordSize
|
||||
__ str(tmp, index); // *index_adr := tmp
|
||||
__ ldr(rscratch1, buffer);
|
||||
__ add(tmp, tmp, rscratch1); // tmp := tmp + *buffer_adr
|
||||
|
||||
// Record the previous value
|
||||
__ str(pre_val, Address(tmp, 0));
|
||||
__ b(done);
|
||||
|
||||
__ bind(runtime);
|
||||
// save the live input values
|
||||
RegSet saved = RegSet::of(pre_val);
|
||||
if (tosca_live) saved += RegSet::of(r0);
|
||||
if (obj != noreg) saved += RegSet::of(obj);
|
||||
|
||||
__ push(saved, sp);
|
||||
|
||||
// Calling the runtime using the regular call_VM_leaf mechanism generates
|
||||
// code (generated by InterpreterMacroAssember::call_VM_leaf_base)
|
||||
// that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
|
||||
//
|
||||
// If we care generating the pre-barrier without a frame (e.g. in the
|
||||
// intrinsified Reference.get() routine) then ebp might be pointing to
|
||||
// the caller frame and so this check will most likely fail at runtime.
|
||||
//
|
||||
// Expanding the call directly bypasses the generation of the check.
|
||||
// So when we do not have have a full interpreter frame on the stack
|
||||
// expand_call should be passed true.
|
||||
|
||||
if (expand_call) {
|
||||
assert(pre_val != c_rarg1, "smashed arg");
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
} else {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
}
|
||||
|
||||
__ pop(saved, sp);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahReadBarrier) {
|
||||
read_barrier_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
|
||||
Label is_null;
|
||||
__ cbz(dst, is_null);
|
||||
read_barrier_not_null_impl(masm, dst);
|
||||
__ bind(is_null);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahReadBarrier) {
|
||||
read_barrier_not_null_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
|
||||
__ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
|
||||
if (ShenandoahWriteBarrier) {
|
||||
write_barrier_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
|
||||
assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
|
||||
assert(dst != rscratch1, "need rscratch1");
|
||||
assert(dst != rscratch2, "need rscratch2");
|
||||
|
||||
Label done;
|
||||
|
||||
Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
__ ldrb(rscratch1, gc_state);
|
||||
|
||||
// Check for heap stability
|
||||
__ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
|
||||
__ tst(rscratch1, rscratch2);
|
||||
__ br(Assembler::EQ, done);
|
||||
|
||||
// Heap is unstable, need to perform the read-barrier even if WB is inactive
|
||||
__ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
|
||||
|
||||
// Check for evacuation-in-progress and jump to WB slow-path if needed
|
||||
__ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
|
||||
__ tst(rscratch1, rscratch2);
|
||||
__ br(Assembler::EQ, done);
|
||||
|
||||
RegSet to_save = RegSet::of(r0);
|
||||
if (dst != r0) {
|
||||
__ push(to_save, sp);
|
||||
__ mov(r0, dst);
|
||||
}
|
||||
|
||||
__ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb())));
|
||||
|
||||
if (dst != r0) {
|
||||
__ mov(dst, r0);
|
||||
__ pop(to_save, sp);
|
||||
}
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
|
||||
if (ShenandoahStoreValEnqueueBarrier) {
|
||||
Label is_null;
|
||||
__ cbz(dst, is_null);
|
||||
write_barrier_impl(masm, dst);
|
||||
__ bind(is_null);
|
||||
// Save possibly live regs.
|
||||
RegSet live_regs = RegSet::range(r0, r4) - dst;
|
||||
__ push(live_regs, sp);
|
||||
__ strd(v0, __ pre(sp, 2 * -wordSize));
|
||||
|
||||
satb_write_barrier_pre(masm, noreg, dst, rthread, tmp, true, false);
|
||||
|
||||
// Restore possibly live regs.
|
||||
__ ldrd(v0, __ post(sp, 2 * wordSize));
|
||||
__ pop(live_regs, sp);
|
||||
}
|
||||
if (ShenandoahStoreValReadBarrier) {
|
||||
read_barrier_impl(masm, dst);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
bool on_reference = on_weak || on_phantom;
|
||||
|
||||
if (in_heap) {
|
||||
read_barrier_not_null(masm, src.base());
|
||||
}
|
||||
|
||||
BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
|
||||
if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
|
||||
__ enter();
|
||||
satb_write_barrier_pre(masm /* masm */,
|
||||
noreg /* obj */,
|
||||
dst /* pre_val */,
|
||||
rthread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
true /* tosca_live */,
|
||||
true /* expand_call */);
|
||||
__ leave();
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
if (in_heap) {
|
||||
write_barrier(masm, dst.base());
|
||||
}
|
||||
if (!on_oop) {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
|
||||
return;
|
||||
}
|
||||
|
||||
// flatten object address if needed
|
||||
if (dst.index() == noreg && dst.offset() == 0) {
|
||||
if (dst.base() != r3) {
|
||||
__ mov(r3, dst.base());
|
||||
}
|
||||
} else {
|
||||
__ lea(r3, dst);
|
||||
}
|
||||
|
||||
shenandoah_write_barrier_pre(masm,
|
||||
r3 /* obj */,
|
||||
tmp2 /* pre_val */,
|
||||
rthread /* thread */,
|
||||
tmp1 /* tmp */,
|
||||
val != noreg /* tosca_live */,
|
||||
false /* expand_call */);
|
||||
|
||||
if (val == noreg) {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg);
|
||||
} else {
|
||||
storeval_barrier(masm, val, tmp1);
|
||||
// G1 barrier needs uncompressed oop for region cross check.
|
||||
Register new_val = val;
|
||||
if (UseCompressedOops) {
|
||||
new_val = rscratch2;
|
||||
__ mov(new_val, val);
|
||||
}
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) {
|
||||
__ cmp(op1, op2);
|
||||
if (ShenandoahAcmpBarrier) {
|
||||
Label done;
|
||||
__ br(Assembler::EQ, done);
|
||||
// The object may have been evacuated, but we won't see it without a
|
||||
// membar here.
|
||||
__ membar(Assembler::LoadStore| Assembler::LoadLoad);
|
||||
read_barrier(masm, op1);
|
||||
read_barrier(masm, op2);
|
||||
__ cmp(op1, op2);
|
||||
__ bind(done);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Register t2,
|
||||
Label& slow_case) {
|
||||
|
||||
assert_different_registers(obj, t2);
|
||||
assert_different_registers(obj, var_size_in_bytes);
|
||||
Register end = t2;
|
||||
|
||||
__ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
|
||||
if (var_size_in_bytes == noreg) {
|
||||
__ lea(end, Address(obj, (int) (con_size_in_bytes + ShenandoahBrooksPointer::byte_size())));
|
||||
} else {
|
||||
__ add(var_size_in_bytes, var_size_in_bytes, ShenandoahBrooksPointer::byte_size());
|
||||
__ lea(end, Address(obj, var_size_in_bytes));
|
||||
}
|
||||
__ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
|
||||
__ cmp(end, rscratch1);
|
||||
__ br(Assembler::HI, slow_case);
|
||||
|
||||
// update the tlab top pointer
|
||||
__ str(end, Address(rthread, JavaThread::tlab_top_offset()));
|
||||
|
||||
__ add(obj, obj, ShenandoahBrooksPointer::byte_size());
|
||||
__ str(obj, Address(obj, ShenandoahBrooksPointer::byte_offset()));
|
||||
|
||||
// recover var_size_in_bytes if necessary
|
||||
if (var_size_in_bytes == end) {
|
||||
__ sub(var_size_in_bytes, var_size_in_bytes, obj);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
|
||||
bool oop_not_null = (decorators & IS_NOT_NULL) != 0;
|
||||
bool is_write = (decorators & ACCESS_WRITE) != 0;
|
||||
if (is_write) {
|
||||
if (oop_not_null) {
|
||||
write_barrier(masm, obj);
|
||||
} else {
|
||||
Label done;
|
||||
__ cbz(obj, done);
|
||||
write_barrier(masm, obj);
|
||||
__ bind(done);
|
||||
}
|
||||
} else {
|
||||
if (oop_not_null) {
|
||||
read_barrier_not_null(masm, obj);
|
||||
} else {
|
||||
read_barrier(masm, obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
|
||||
bool acquire, bool release, bool weak, bool encode,
|
||||
Register tmp1, Register tmp2, Register tmp3,
|
||||
Register result) {
|
||||
|
||||
if (!ShenandoahCASBarrier) {
|
||||
if (UseCompressedOops) {
|
||||
if (encode) {
|
||||
__ encode_heap_oop(tmp1, expected);
|
||||
expected = tmp1;
|
||||
__ encode_heap_oop(tmp3, new_val);
|
||||
new_val = tmp3;
|
||||
}
|
||||
__ cmpxchg(addr, expected, new_val, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
|
||||
__ membar(__ AnyAny);
|
||||
} else {
|
||||
__ cmpxchg(addr, expected, new_val, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
|
||||
__ membar(__ AnyAny);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (encode) {
|
||||
storeval_barrier(masm, new_val, tmp3);
|
||||
}
|
||||
|
||||
if (UseCompressedOops) {
|
||||
if (encode) {
|
||||
__ encode_heap_oop(tmp1, expected);
|
||||
expected = tmp1;
|
||||
__ encode_heap_oop(tmp2, new_val);
|
||||
new_val = tmp2;
|
||||
}
|
||||
}
|
||||
bool is_cae = (result != noreg);
|
||||
bool is_narrow = UseCompressedOops;
|
||||
Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
|
||||
if (! is_cae) result = rscratch1;
|
||||
|
||||
assert_different_registers(addr, expected, new_val, result, tmp3);
|
||||
|
||||
Label retry, done, fail;
|
||||
|
||||
// CAS, using LL/SC pair.
|
||||
__ bind(retry);
|
||||
__ load_exclusive(result, addr, size, acquire);
|
||||
if (is_narrow) {
|
||||
__ cmpw(result, expected);
|
||||
} else {
|
||||
__ cmp(result, expected);
|
||||
}
|
||||
__ br(Assembler::NE, fail);
|
||||
__ store_exclusive(tmp3, new_val, addr, size, release);
|
||||
if (weak) {
|
||||
__ cmpw(tmp3, 0u); // If the store fails, return NE to our caller
|
||||
} else {
|
||||
__ cbnzw(tmp3, retry);
|
||||
}
|
||||
__ b(done);
|
||||
|
||||
__ bind(fail);
|
||||
// Check if rb(expected)==rb(result)
|
||||
// Shuffle registers so that we have memory value ready for next expected.
|
||||
__ mov(tmp3, expected);
|
||||
__ mov(expected, result);
|
||||
if (is_narrow) {
|
||||
__ decode_heap_oop(result, result);
|
||||
__ decode_heap_oop(tmp3, tmp3);
|
||||
}
|
||||
read_barrier_impl(masm, result);
|
||||
read_barrier_impl(masm, tmp3);
|
||||
__ cmp(result, tmp3);
|
||||
// Retry with expected now being the value we just loaded from addr.
|
||||
__ br(Assembler::EQ, retry);
|
||||
if (is_narrow && is_cae) {
|
||||
// For cmp-and-exchange and narrow oops, we need to restore
|
||||
// the compressed old-value. We moved it to 'expected' a few lines up.
|
||||
__ mov(result, expected);
|
||||
}
|
||||
__ bind(done);
|
||||
|
||||
}
|
||||
|
||||
#ifdef COMPILER1
|
||||
|
||||
#undef __
|
||||
#define __ ce->masm()->
|
||||
|
||||
void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
|
||||
ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
|
||||
// At this point we know that marking is in progress.
|
||||
// If do_load() is true then we have to emit the
|
||||
// load of the previous value; otherwise it has already
|
||||
// been loaded into _pre_val.
|
||||
|
||||
__ bind(*stub->entry());
|
||||
|
||||
assert(stub->pre_val()->is_register(), "Precondition.");
|
||||
|
||||
Register pre_val_reg = stub->pre_val()->as_register();
|
||||
|
||||
if (stub->do_load()) {
|
||||
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
|
||||
}
|
||||
__ cbz(pre_val_reg, *stub->continuation());
|
||||
ce->store_parameter(stub->pre_val()->as_register(), 0);
|
||||
__ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) {
|
||||
|
||||
Register obj = stub->obj()->as_register();
|
||||
Register res = stub->result()->as_register();
|
||||
|
||||
Label done;
|
||||
|
||||
__ bind(*stub->entry());
|
||||
|
||||
if (res != obj) {
|
||||
__ mov(res, obj);
|
||||
}
|
||||
// Check for null.
|
||||
if (stub->needs_null_check()) {
|
||||
__ cbz(res, done);
|
||||
}
|
||||
|
||||
write_barrier(ce->masm(), res);
|
||||
|
||||
__ bind(done);
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#define __ sasm->
|
||||
|
||||
void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
|
||||
__ prologue("shenandoah_pre_barrier", false);
|
||||
|
||||
// arg0 : previous value of memory
|
||||
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
|
||||
const Register pre_val = r0;
|
||||
const Register thread = rthread;
|
||||
const Register tmp = rscratch1;
|
||||
|
||||
Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
// Is marking still active?
|
||||
Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
__ ldrb(tmp, gc_state);
|
||||
__ mov(rscratch2, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL);
|
||||
__ tst(tmp, rscratch2);
|
||||
__ br(Assembler::EQ, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
__ ldr(tmp, queue_index);
|
||||
__ cbz(tmp, runtime);
|
||||
|
||||
__ sub(tmp, tmp, wordSize);
|
||||
__ str(tmp, queue_index);
|
||||
__ ldr(rscratch2, buffer);
|
||||
__ add(tmp, tmp, rscratch2);
|
||||
__ load_parameter(0, rscratch2);
|
||||
__ str(rscratch2, Address(tmp, 0));
|
||||
__ b(done);
|
||||
|
||||
__ bind(runtime);
|
||||
__ push_call_clobbered_registers();
|
||||
__ load_parameter(0, pre_val);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ bind(done);
|
||||
|
||||
__ epilogue();
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#endif // COMPILER1
|
||||
|
||||
address ShenandoahBarrierSetAssembler::shenandoah_wb() {
|
||||
assert(_shenandoah_wb != NULL, "need write barrier stub");
|
||||
return _shenandoah_wb;
|
||||
}
|
||||
|
||||
#define __ cgen->assembler()->
|
||||
|
||||
// Shenandoah write barrier.
|
||||
//
|
||||
// Input:
|
||||
// r0: OOP to evacuate. Not null.
|
||||
//
|
||||
// Output:
|
||||
// r0: Pointer to evacuated OOP.
|
||||
//
|
||||
// Trash rscratch1, rscratch2. Preserve everything else.
|
||||
address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) {
|
||||
|
||||
__ align(6);
|
||||
StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb");
|
||||
address start = __ pc();
|
||||
|
||||
Label work;
|
||||
__ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
|
||||
__ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
|
||||
__ ldrb(rscratch2, Address(rscratch2, rscratch1));
|
||||
__ tbnz(rscratch2, 0, work);
|
||||
__ ret(lr);
|
||||
__ bind(work);
|
||||
|
||||
Register obj = r0;
|
||||
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
__ push_call_clobbered_registers();
|
||||
|
||||
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT));
|
||||
__ blrt(lr, 1, 0, MacroAssembler::ret_type_integral);
|
||||
__ mov(rscratch1, obj);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ mov(obj, rscratch1);
|
||||
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ ret(lr);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
|
||||
if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
|
||||
int stub_code_size = 2048;
|
||||
ResourceMark rm;
|
||||
BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
|
||||
CodeBuffer buf(bb);
|
||||
StubCodeGenerator cgen(&buf);
|
||||
_shenandoah_wb = generate_shenandoah_wb(&cgen);
|
||||
}
|
||||
}
|
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP
|
||||
#define CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class ShenandoahPreBarrierStub;
|
||||
class ShenandoahWriteBarrierStub;
|
||||
class StubAssembler;
|
||||
class StubCodeGenerator;
|
||||
#endif
|
||||
|
||||
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
|
||||
private:
|
||||
|
||||
static address _shenandoah_wb;
|
||||
|
||||
void satb_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
void shenandoah_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
|
||||
void read_barrier(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_not_null(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_not_null_impl(MacroAssembler* masm, Register dst);
|
||||
void write_barrier(MacroAssembler* masm, Register dst);
|
||||
void write_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp);
|
||||
void asm_acmp_barrier(MacroAssembler* masm, Register op1, Register op2);
|
||||
|
||||
address generate_shenandoah_wb(StubCodeGenerator* cgen);
|
||||
|
||||
public:
|
||||
static address shenandoah_wb();
|
||||
|
||||
#ifdef COMPILER1
|
||||
void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
|
||||
void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub);
|
||||
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
|
||||
#endif
|
||||
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register addr, Register count, RegSet saved_regs);
|
||||
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register start, Register end, Register tmp, RegSet saved_regs);
|
||||
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread);
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2);
|
||||
virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2);
|
||||
virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj);
|
||||
virtual void tlab_allocate(MacroAssembler* masm, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1,
|
||||
Register t2,
|
||||
Label& slow_case);
|
||||
|
||||
void cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
|
||||
bool acquire, bool release, bool weak, bool encode,
|
||||
Register tmp1, Register tmp2, Register tmp3 = rscratch2,
|
||||
Register result = noreg);
|
||||
|
||||
virtual void barrier_stubs_init();
|
||||
};
|
||||
|
||||
#endif // CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP
|
@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
|
||||
#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
|
||||
|
||||
void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
|
||||
Register addr = _addr->as_register_lo();
|
||||
Register newval = _new_value->as_register();
|
||||
Register cmpval = _cmp_value->as_register();
|
||||
Register tmp1 = _tmp1->as_register();
|
||||
Register tmp2 = _tmp2->as_register();
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ false, /*release*/ true, /*weak*/ false, true, tmp1, tmp2);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
#define __ gen->lir(__FILE__, __LINE__)->
|
||||
#else
|
||||
#define __ gen->lir()->
|
||||
#endif
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
|
||||
BasicType bt = access.type();
|
||||
if (access.is_oop()) {
|
||||
LIRGenerator *gen = access.gen();
|
||||
if (ShenandoahSATBBarrier) {
|
||||
pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(),
|
||||
LIR_OprFact::illegalOpr /* pre_val */);
|
||||
}
|
||||
if (ShenandoahCASBarrier) {
|
||||
cmp_value.load_item();
|
||||
new_value.load_item();
|
||||
|
||||
LIR_Opr t1 = gen->new_register(T_OBJECT);
|
||||
LIR_Opr t2 = gen->new_register(T_OBJECT);
|
||||
LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base();
|
||||
|
||||
__ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2,
|
||||
LIR_OprFact::illegalOpr));
|
||||
|
||||
LIR_Opr result = gen->new_register(T_INT);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
|
||||
result, T_INT);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
|
||||
LIRGenerator* gen = access.gen();
|
||||
BasicType type = access.type();
|
||||
|
||||
LIR_Opr result = gen->new_register(type);
|
||||
value.load_item();
|
||||
LIR_Opr value_opr = value.result();
|
||||
|
||||
if (access.is_oop()) {
|
||||
value_opr = storeval_barrier(access.gen(), value_opr, access.access_emit_info(), access.decorators());
|
||||
}
|
||||
|
||||
assert(type == T_INT || type == T_OBJECT || type == T_ARRAY LP64_ONLY( || type == T_LONG ), "unexpected type");
|
||||
LIR_Opr tmp = gen->new_register(T_INT);
|
||||
__ xchg(access.resolved_addr(), value_opr, result, tmp);
|
||||
|
||||
if (access.is_oop()) {
|
||||
if (ShenandoahSATBBarrier) {
|
||||
pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
|
||||
result /* pre_val */);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
281
src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad
Normal file
281
src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad
Normal file
@ -0,0 +1,281 @@
|
||||
//
|
||||
// Copyright (c) 2018, Red Hat, Inc. All rights reserved.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
|
||||
%}
|
||||
|
||||
encode %{
|
||||
enc_class aarch64_enc_cmpxchg_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
|
||||
%}
|
||||
|
||||
enc_class aarch64_enc_cmpxchg_acq_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ true, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
|
||||
%}
|
||||
%}
|
||||
|
||||
instruct shenandoahRB(iRegPNoSp dst, iRegP src, rFlagsReg cr) %{
|
||||
match(Set dst (ShenandoahReadBarrier src));
|
||||
format %{ "shenandoah_rb $dst,$src" %}
|
||||
ins_encode %{
|
||||
Register s = $src$$Register;
|
||||
Register d = $dst$$Register;
|
||||
__ ldr(d, Address(s, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
|
||||
instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
|
||||
match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
format %{
|
||||
"cmpxchg_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
|
||||
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
|
||||
ins_encode(aarch64_enc_cmpxchg_oop_shenandoah(mem, oldval, newval, tmp),
|
||||
aarch64_enc_cset_eq(res));
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
|
||||
match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
format %{
|
||||
"cmpxchgw_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
|
||||
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
format %{
|
||||
"cmpxchg_acq_shenandoah_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
|
||||
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
|
||||
ins_encode(aarch64_enc_cmpxchg_acq_oop_shenandoah(mem, oldval, newval, tmp),
|
||||
aarch64_enc_cset_eq(res));
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
match(Set res (ShenandoahCompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
|
||||
effect(TEMP tmp, KILL cr);
|
||||
|
||||
format %{
|
||||
"cmpxchgw_acq_shenandoah_narrow_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
|
||||
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg);
|
||||
__ cset($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
match(Set res (ShenandoahCompareAndExchangeN mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchgw_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
%}
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ false, /*release*/ true, /*weak*/ false, /* encode*/ false, noreg, noreg, rscratch2, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
match(Set res (ShenandoahCompareAndExchangeP mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchg_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
|
||||
%}
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ false, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg, rscratch2, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeNAcq_shenandoah(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
match(Set res (ShenandoahCompareAndExchangeN mem (Binary oldval newval)));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchgw_acq_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
%}
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ true, /*release*/ true, /*weak*/ false, /* encode*/ false, noreg, noreg, rscratch2, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct compareAndExchangePAcq_shenandoah(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
match(Set res (ShenandoahCompareAndExchangeP mem (Binary oldval newval)));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
effect(TEMP_DEF res, TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchg_acq_shenandoah $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp"
|
||||
%}
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ true, /*release*/ true, /*weak*/ false, /*encode*/ false, noreg, noreg, rscratch2, $res$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
match(Set res (ShenandoahWeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchgw_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ false, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
match(Set res (ShenandoahWeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
effect(TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchg_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ false, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
match(Set res (ShenandoahWeakCompareAndSwapN mem (Binary oldval newval)));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
effect(TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchgw_acq_shenandoah $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ true, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct weakCompareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{
|
||||
predicate(needs_acquiring_load_exclusive(n));
|
||||
match(Set res (ShenandoahWeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
effect(TEMP tmp, KILL cr);
|
||||
format %{
|
||||
"cmpxchg_acq_shenandoah $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
|
||||
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
|
||||
%}
|
||||
ins_encode %{
|
||||
Register tmp = $tmp$$Register;
|
||||
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
|
||||
/*acquire*/ true, /*release*/ true, /*weak*/ true, /*encode*/ false, noreg, noreg);
|
||||
__ csetw($res$$Register, Assembler::EQ);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
@ -5740,9 +5740,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C();
|
||||
}
|
||||
|
||||
if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
|
||||
StubRoutines::_dlog = generate_dlog();
|
||||
}
|
||||
// Disabled until JDK-8210858 is fixed
|
||||
// if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) {
|
||||
// StubRoutines::_dlog = generate_dlog();
|
||||
// }
|
||||
|
||||
if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) {
|
||||
StubRoutines::_dsin = generate_dsin_dcos(/* isCos = */ false);
|
||||
|
@ -1580,9 +1580,9 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
__ mov(R0, Rthread);
|
||||
__ mov(R1, Rkind);
|
||||
|
||||
pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp);
|
||||
pc_offset = __ set_last_Java_frame(SP, FP, true, Rtemp);
|
||||
assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
|
||||
__ call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
|
||||
if (pc_offset == -1) {
|
||||
pc_offset = __ offset();
|
||||
}
|
||||
@ -1747,8 +1747,8 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
// Call unpack_frames with proper arguments
|
||||
__ mov(R0, Rthread);
|
||||
__ mov(R1, Deoptimization::Unpack_uncommon_trap);
|
||||
__ set_last_Java_frame(SP, FP, false, Rtemp);
|
||||
__ call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
|
||||
__ set_last_Java_frame(SP, FP, true, Rtemp);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
|
||||
// oop_maps->add_gc_map(__ pc() - start, new OopMap(frame_size_in_words, 0));
|
||||
__ reset_last_Java_frame(Rtemp);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -521,15 +521,3 @@ void VM_Version::allow_all() {
|
||||
void VM_Version::revert() {
|
||||
_features = saved_features;
|
||||
}
|
||||
|
||||
/* Determine a suitable number of threads on this particular machine.
|
||||
*
|
||||
* FIXME: Simply checking the processor family is insufficient.
|
||||
*/
|
||||
unsigned int VM_Version::calc_parallel_worker_threads() {
|
||||
const int num = 5;
|
||||
const int den = is_post_niagara() ? 16 : 8;
|
||||
const int threshold = 8;
|
||||
|
||||
return nof_parallel_worker_threads(num, den, threshold);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -365,8 +365,12 @@ public:
|
||||
// this properly in order to enable complete page size support.
|
||||
static uint page_size_count() { return 2; }
|
||||
|
||||
// Calculates the number of parallel threads
|
||||
static unsigned int calc_parallel_worker_threads();
|
||||
// Override default denominator for ParallelGCThreads.
|
||||
//
|
||||
// FIXME: Simply checking the processor family is insufficient.
|
||||
static uint parallel_worker_threads_denominator() {
|
||||
return is_post_niagara() ? 16 : 8;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // CPU_SPARC_VM_VM_VERSION_SPARC_HPP
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP
|
||||
#define CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP
|
||||
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#ifdef COMPILER1
|
||||
class LIR_Assembler;
|
||||
class ShenandoahPreBarrierStub;
|
||||
class ShenandoahWriteBarrierStub;
|
||||
class StubAssembler;
|
||||
class StubCodeGenerator;
|
||||
#endif
|
||||
|
||||
class ShenandoahBarrierSetAssembler: public BarrierSetAssembler {
|
||||
private:
|
||||
|
||||
static address _shenandoah_wb;
|
||||
|
||||
void satb_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
|
||||
void shenandoah_write_barrier_pre(MacroAssembler* masm,
|
||||
Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
|
||||
void read_barrier(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
|
||||
void read_barrier_not_null(MacroAssembler* masm, Register dst);
|
||||
void read_barrier_not_null_impl(MacroAssembler* masm, Register dst);
|
||||
|
||||
void write_barrier(MacroAssembler* masm, Register dst);
|
||||
void write_barrier_impl(MacroAssembler* masm, Register dst);
|
||||
|
||||
void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp);
|
||||
void storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp);
|
||||
|
||||
address generate_shenandoah_wb(StubCodeGenerator* cgen);
|
||||
|
||||
void save_vector_registers(MacroAssembler* masm);
|
||||
void restore_vector_registers(MacroAssembler* masm);
|
||||
|
||||
public:
|
||||
static address shenandoah_wb();
|
||||
|
||||
#ifdef COMPILER1
|
||||
void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub);
|
||||
void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub);
|
||||
void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
|
||||
#endif
|
||||
|
||||
void cmpxchg_oop(MacroAssembler* masm,
|
||||
Register res, Address addr, Register oldval, Register newval,
|
||||
bool exchange, bool encode, Register tmp1, Register tmp2);
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register src, Register dst, Register count);
|
||||
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register src, Register dst, Register count);
|
||||
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp_thread);
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address dst, Register val, Register tmp1, Register tmp2);
|
||||
|
||||
#ifndef _LP64
|
||||
virtual void obj_equals(MacroAssembler* masm,
|
||||
Address obj1, jobject obj2);
|
||||
virtual void obj_equals(MacroAssembler* masm,
|
||||
Register obj1, jobject obj2);
|
||||
#endif
|
||||
|
||||
virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2);
|
||||
virtual void obj_equals(MacroAssembler* masm, Register src1, Address src2);
|
||||
|
||||
virtual void tlab_allocate(MacroAssembler* masm,
|
||||
Register thread, Register obj,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1, Register t2,
|
||||
Label& slow_case);
|
||||
|
||||
virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj);
|
||||
|
||||
virtual void barrier_stubs_init();
|
||||
|
||||
};
|
||||
|
||||
#endif // CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP
|
104
src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetC1_x86.cpp
Normal file
104
src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetC1_x86.cpp
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
|
||||
#include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
|
||||
|
||||
void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
|
||||
Register addr = _addr->as_register_lo();
|
||||
Register newval = _new_value->as_register();
|
||||
Register cmpval = _cmp_value->as_register();
|
||||
Register tmp1 = _tmp1->as_register();
|
||||
Register tmp2 = _tmp2->as_register();
|
||||
assert(cmpval == rax, "wrong register");
|
||||
assert(newval != NULL, "new val must be register");
|
||||
assert(cmpval != newval, "cmp and new values must be in different registers");
|
||||
assert(cmpval != addr, "cmp and addr must be in different registers");
|
||||
assert(newval != addr, "new value and addr must be in different registers");
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), NULL, Address(addr, 0), cmpval, newval, true, true, tmp1, tmp2);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
#define __ gen->lir(__FILE__, __LINE__)->
|
||||
#else
|
||||
#define __ gen->lir()->
|
||||
#endif
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
|
||||
|
||||
if (access.is_oop()) {
|
||||
LIRGenerator* gen = access.gen();
|
||||
if (ShenandoahSATBBarrier) {
|
||||
pre_barrier(gen, access.access_emit_info(), access.decorators(), access.resolved_addr(),
|
||||
LIR_OprFact::illegalOpr /* pre_val */);
|
||||
}
|
||||
if (ShenandoahCASBarrier) {
|
||||
cmp_value.load_item_force(FrameMap::rax_oop_opr);
|
||||
new_value.load_item();
|
||||
|
||||
LIR_Opr t1 = gen->new_register(T_OBJECT);
|
||||
LIR_Opr t2 = gen->new_register(T_OBJECT);
|
||||
LIR_Opr addr = access.resolved_addr()->as_address_ptr()->base();
|
||||
|
||||
__ append(new LIR_OpShenandoahCompareAndSwap(addr, cmp_value.result(), new_value.result(), t1, t2, LIR_OprFact::illegalOpr));
|
||||
|
||||
LIR_Opr result = gen->new_register(T_INT);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
|
||||
result, T_INT);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
|
||||
}
|
||||
|
||||
LIR_Opr ShenandoahBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
|
||||
LIRGenerator* gen = access.gen();
|
||||
BasicType type = access.type();
|
||||
|
||||
LIR_Opr result = gen->new_register(type);
|
||||
value.load_item();
|
||||
LIR_Opr value_opr = value.result();
|
||||
|
||||
if (access.is_oop()) {
|
||||
value_opr = storeval_barrier(access.gen(), value_opr, access.access_emit_info(), access.decorators());
|
||||
}
|
||||
|
||||
// Because we want a 2-arg form of xchg and xadd
|
||||
__ move(value_opr, result);
|
||||
|
||||
assert(type == T_INT || type == T_OBJECT || type == T_ARRAY LP64_ONLY( || type == T_LONG ), "unexpected type");
|
||||
__ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr);
|
||||
|
||||
if (access.is_oop()) {
|
||||
if (ShenandoahSATBBarrier) {
|
||||
pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
|
||||
result /* pre_val */);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
152
src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad
Normal file
152
src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad
Normal file
@ -0,0 +1,152 @@
|
||||
//
|
||||
// Copyright (c) 2018, Red Hat, Inc. All rights reserved.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
// under the terms of the GNU General Public License version 2 only, as
|
||||
// published by the Free Software Foundation.
|
||||
//
|
||||
// This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
// version 2 for more details (a copy is included in the LICENSE file that
|
||||
// accompanied this code).
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License version
|
||||
// 2 along with this work; if not, write to the Free Software Foundation,
|
||||
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
//
|
||||
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
// or visit www.oracle.com if you need additional information or have any
|
||||
// questions.
|
||||
//
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
|
||||
%}
|
||||
|
||||
instruct shenandoahRB(rRegP dst, rRegP src, rFlagsReg cr) %{
|
||||
match(Set dst (ShenandoahReadBarrier src));
|
||||
effect(DEF dst, USE src);
|
||||
ins_cost(125); // XXX
|
||||
format %{ "shenandoah_rb $dst, $src" %}
|
||||
ins_encode %{
|
||||
Register d = $dst$$Register;
|
||||
Register s = $src$$Register;
|
||||
__ movptr(d, Address(s, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct shenandoahRBNarrow(rRegP dst, rRegN src) %{
|
||||
predicate(UseCompressedOops && (Universe::narrow_oop_shift() == 0));
|
||||
match(Set dst (ShenandoahReadBarrier (DecodeN src)));
|
||||
effect(DEF dst, USE src);
|
||||
ins_cost(125); // XXX
|
||||
format %{ "shenandoah_rb $dst, $src" %}
|
||||
ins_encode %{
|
||||
Register d = $dst$$Register;
|
||||
Register s = $src$$Register;
|
||||
__ movptr(d, Address(r12, s, Address::times_1, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct shenandoahRBNarrowShift(rRegP dst, rRegN src) %{
|
||||
predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8));
|
||||
match(Set dst (ShenandoahReadBarrier (DecodeN src)));
|
||||
effect(DEF dst, USE src);
|
||||
ins_cost(125); // XXX
|
||||
format %{ "shenandoah_rb $dst, $src" %}
|
||||
ins_encode %{
|
||||
Register d = $dst$$Register;
|
||||
Register s = $src$$Register;
|
||||
__ movptr(d, Address(r12, s, Address::times_8, ShenandoahBrooksPointer::byte_offset()));
|
||||
%}
|
||||
ins_pipe(ialu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct compareAndSwapP_shenandoah(rRegI res,
|
||||
memory mem_ptr,
|
||||
rRegP tmp1, rRegP tmp2,
|
||||
rax_RegP oldval, rRegP newval,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set res (ShenandoahCompareAndSwapP mem_ptr (Binary oldval newval)));
|
||||
match(Set res (ShenandoahWeakCompareAndSwapP mem_ptr (Binary oldval newval)));
|
||||
effect(TEMP tmp1, TEMP tmp2, KILL cr, KILL oldval);
|
||||
|
||||
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
|
||||
|
||||
ins_encode %{
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
|
||||
$res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
|
||||
false, // swap
|
||||
false, $tmp1$$Register, $tmp2$$Register
|
||||
);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
%}
|
||||
|
||||
instruct compareAndSwapN_shenandoah(rRegI res,
|
||||
memory mem_ptr,
|
||||
rRegP tmp1, rRegP tmp2,
|
||||
rax_RegN oldval, rRegN newval,
|
||||
rFlagsReg cr) %{
|
||||
match(Set res (ShenandoahCompareAndSwapN mem_ptr (Binary oldval newval)));
|
||||
match(Set res (ShenandoahWeakCompareAndSwapN mem_ptr (Binary oldval newval)));
|
||||
effect(TEMP tmp1, TEMP tmp2, KILL cr, KILL oldval);
|
||||
|
||||
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
|
||||
|
||||
ins_encode %{
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
|
||||
$res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
|
||||
false, // swap
|
||||
false, $tmp1$$Register, $tmp2$$Register
|
||||
);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeN_shenandoah(memory mem_ptr,
|
||||
rax_RegN oldval, rRegN newval,
|
||||
rRegP tmp1, rRegP tmp2,
|
||||
rFlagsReg cr) %{
|
||||
match(Set oldval (ShenandoahCompareAndExchangeN mem_ptr (Binary oldval newval)));
|
||||
effect(TEMP tmp1, TEMP tmp2, KILL cr);
|
||||
|
||||
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
|
||||
|
||||
ins_encode %{
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
|
||||
NULL, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
|
||||
true, // exchange
|
||||
false, $tmp1$$Register, $tmp2$$Register
|
||||
);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
%}
|
||||
|
||||
instruct compareAndExchangeP_shenandoah(memory mem_ptr,
|
||||
rax_RegP oldval, rRegP newval,
|
||||
rRegP tmp1, rRegP tmp2,
|
||||
rFlagsReg cr)
|
||||
%{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set oldval (ShenandoahCompareAndExchangeP mem_ptr (Binary oldval newval)));
|
||||
effect(KILL cr, TEMP tmp1, TEMP tmp2);
|
||||
ins_cost(1000);
|
||||
|
||||
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
|
||||
|
||||
ins_encode %{
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
|
||||
NULL, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
|
||||
true, // exchange
|
||||
false, $tmp1$$Register, $tmp2$$Register
|
||||
);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
%}
|
@ -4359,7 +4359,7 @@ bool os::start_debugging(char *buf, int buflen) {
|
||||
static inline time_t get_mtime(const char* filename) {
|
||||
struct stat st;
|
||||
int ret = os::stat(filename, &st);
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
|
||||
return st.st_mtime;
|
||||
}
|
||||
|
||||
|
@ -3388,7 +3388,7 @@ bool os::message_box(const char* title, const char* message) {
|
||||
static inline struct timespec get_mtime(const char* filename) {
|
||||
struct stat st;
|
||||
int ret = os::stat(filename, &st);
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
|
||||
#ifdef __APPLE__
|
||||
return st.st_mtimespec;
|
||||
#else
|
||||
|
@ -5946,7 +5946,7 @@ size_t os::current_stack_size() {
|
||||
static inline struct timespec get_mtime(const char* filename) {
|
||||
struct stat st;
|
||||
int ret = os::stat(filename, &st);
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
|
||||
return st.st_mtim;
|
||||
}
|
||||
|
||||
|
@ -1652,7 +1652,7 @@ void* os::get_default_process_handle() {
|
||||
static inline time_t get_mtime(const char* filename) {
|
||||
struct stat st;
|
||||
int ret = os::stat(filename, &st);
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
|
||||
return st.st_mtime;
|
||||
}
|
||||
|
||||
|
@ -1563,7 +1563,7 @@ int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
|
||||
static inline time_t get_mtime(const char* filename) {
|
||||
struct stat st;
|
||||
int ret = os::stat(filename, &st);
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
|
||||
assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
|
||||
return st.st_mtime;
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,9 @@
|
||||
#define OS_WINDOWS_VM_OS_WINDOWS_HPP
|
||||
// Win32_OS defines the interface to windows operating systems
|
||||
|
||||
// strtok_s is the Windows thread-safe equivalent of POSIX strtok_r
|
||||
#define strtok_r strtok_s
|
||||
|
||||
// Information about the protection of the page at address '0' on this os.
|
||||
static bool zero_page_read_protected() { return true; }
|
||||
|
||||
|
@ -775,7 +775,10 @@ bool InstructForm::captures_bottom_type(FormDict &globals) const {
|
||||
!strcmp(_matrule->_rChild->_opType,"GetAndSetP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"GetAndSetN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"CompareAndExchangeP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN"))) return true;
|
||||
!strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahReadBarrier"))) return true;
|
||||
else if ( is_ideal_load() == Form::idealP ) return true;
|
||||
else if ( is_ideal_store() != Form::none ) return true;
|
||||
|
||||
@ -3498,10 +3501,12 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
|
||||
"CompareAndSwapB", "CompareAndSwapS", "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
|
||||
"WeakCompareAndSwapB", "WeakCompareAndSwapS", "WeakCompareAndSwapI", "WeakCompareAndSwapL", "WeakCompareAndSwapP", "WeakCompareAndSwapN",
|
||||
"CompareAndExchangeB", "CompareAndExchangeS", "CompareAndExchangeI", "CompareAndExchangeL", "CompareAndExchangeP", "CompareAndExchangeN",
|
||||
"ShenandoahCompareAndSwapN", "ShenandoahCompareAndSwapP", "ShenandoahWeakCompareAndSwapP", "ShenandoahWeakCompareAndSwapN", "ShenandoahCompareAndExchangeP", "ShenandoahCompareAndExchangeN",
|
||||
"StoreCM",
|
||||
"ClearArray",
|
||||
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
|
||||
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
|
||||
"ShenandoahReadBarrier",
|
||||
"LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
|
||||
};
|
||||
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
|
||||
|
@ -40,7 +40,7 @@
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/safepointVerifiers.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/vm_operations.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
|
||||
bool AOTLib::_narrow_oop_shift_initialized = false;
|
||||
int AOTLib::_narrow_oop_shift = 0;
|
||||
|
@ -155,6 +155,10 @@ Metadata* AOTCompiledMethod::metadata_at(int index) const {
|
||||
ShouldNotReachHere(); return NULL;
|
||||
}
|
||||
|
||||
void AOTCompiledMethod::do_unloading(bool unloading_occurred) {
|
||||
unload_nmethod_caches(unloading_occurred);
|
||||
}
|
||||
|
||||
bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
|
||||
// Make sure the method is not flushed in case of a safepoint in code below.
|
||||
methodHandle the_method(method());
|
||||
|
@ -265,6 +265,7 @@ private:
|
||||
#endif
|
||||
}
|
||||
|
||||
virtual void do_unloading(bool unloading_occurred);
|
||||
|
||||
protected:
|
||||
// AOT compiled methods are not flushed
|
||||
|
@ -72,7 +72,7 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
|
||||
// by the GC but need to be strong roots if reachable from a current compilation.
|
||||
// InstanceKlass are created for both weak and strong metadata. Ensuring this metadata
|
||||
// alive covers the cases where there are weak roots without performance cost.
|
||||
oop holder = ik->holder_phantom();
|
||||
oop holder = ik->klass_holder();
|
||||
if (ik->is_unsafe_anonymous()) {
|
||||
// Though ciInstanceKlass records class loader oop, it's not enough to keep
|
||||
// VM unsafe anonymous classes alive (loader == NULL). Klass holder should
|
||||
@ -742,3 +742,27 @@ void ciInstanceKlass::dump_replay_data(outputStream* out) {
|
||||
ik->do_local_static_fields(&sffp);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool ciInstanceKlass::debug_final_field_at(int offset) {
|
||||
GUARDED_VM_ENTRY(
|
||||
InstanceKlass* ik = get_instanceKlass();
|
||||
fieldDescriptor fd;
|
||||
if (ik->find_field_from_offset(offset, false, &fd)) {
|
||||
return fd.is_final();
|
||||
}
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ciInstanceKlass::debug_stable_field_at(int offset) {
|
||||
GUARDED_VM_ENTRY(
|
||||
InstanceKlass* ik = get_instanceKlass();
|
||||
fieldDescriptor fd;
|
||||
if (ik->find_field_from_offset(offset, false, &fd)) {
|
||||
return fd.is_stable();
|
||||
}
|
||||
);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
@ -274,6 +274,11 @@ public:
|
||||
|
||||
// Dump the current state of this klass for compilation replay.
|
||||
virtual void dump_replay_data(outputStream* out);
|
||||
|
||||
#ifdef ASSERT
|
||||
bool debug_final_field_at(int offset);
|
||||
bool debug_stable_field_at(int offset);
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_CI_CIINSTANCEKLASS_HPP
|
||||
|
@ -403,7 +403,7 @@ MethodLivenessResult ciMethod::raw_liveness_at_bci(int bci) {
|
||||
// information.
|
||||
MethodLivenessResult ciMethod::liveness_at_bci(int bci) {
|
||||
MethodLivenessResult result = raw_liveness_at_bci(bci);
|
||||
if (CURRENT_ENV->should_retain_local_variables() || DeoptimizeALot || CompileTheWorld) {
|
||||
if (CURRENT_ENV->should_retain_local_variables() || DeoptimizeALot) {
|
||||
// Keep all locals live for the user's edification and amusement.
|
||||
result.at_put_range(0, result.size(), true);
|
||||
}
|
||||
@ -1210,7 +1210,7 @@ bool ciMethod::is_klass_loaded(int refinfo_index, bool must_be_resolved) const {
|
||||
// ciMethod::check_call
|
||||
bool ciMethod::check_call(int refinfo_index, bool is_static) const {
|
||||
// This method is used only in C2 from InlineTree::ok_to_inline,
|
||||
// and is only used under -Xcomp or -XX:CompileTheWorld.
|
||||
// and is only used under -Xcomp.
|
||||
// It appears to fail when applied to an invokeinterface call site.
|
||||
// FIXME: Remove this method and resolve_method_statically; refactor to use the other LinkResolver entry points.
|
||||
VM_ENTRY_MARK;
|
||||
|
@ -453,37 +453,6 @@ JImageLocationRef ClassLoader::jimage_find_resource(JImageFile* jf,
|
||||
return ((*JImageFindResource)(jf, module_name, get_jimage_version_string(), file_name, &size));
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool ctw_visitor(JImageFile* jimage,
|
||||
const char* module_name, const char* version, const char* package,
|
||||
const char* name, const char* extension, void* arg) {
|
||||
if (strcmp(extension, "class") == 0) {
|
||||
Thread* THREAD = Thread::current();
|
||||
ResourceMark rm(THREAD);
|
||||
char* path = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, JIMAGE_MAX_PATH);
|
||||
jio_snprintf(path, JIMAGE_MAX_PATH - 1, "%s/%s.class", package, name);
|
||||
ClassLoader::compile_the_world_in(path, *(Handle*)arg, THREAD);
|
||||
return !HAS_PENDING_EXCEPTION;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void ClassPathImageEntry::compile_the_world(Handle loader, TRAPS) {
|
||||
tty->print_cr("CompileTheWorld : Compiling all classes in %s", name());
|
||||
tty->cr();
|
||||
(*JImageResourceIterator)(_jimage, (JImageResourceVisitor_t)ctw_visitor, (void *)&loader);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
|
||||
tty->print_cr("Increase class metadata storage if a limit was set");
|
||||
} else {
|
||||
tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
bool ClassPathImageEntry::is_modules_image() const {
|
||||
return ClassLoader::is_modules_image(name());
|
||||
}
|
||||
@ -1224,13 +1193,14 @@ const char* ClassLoader::file_name_for_class_name(const char* class_name,
|
||||
assert((int)strlen(class_name) == class_name_len, "invariant");
|
||||
|
||||
static const char class_suffix[] = ".class";
|
||||
size_t class_suffix_len = sizeof(class_suffix);
|
||||
|
||||
char* const file_name = NEW_RESOURCE_ARRAY(char,
|
||||
class_name_len +
|
||||
sizeof(class_suffix)); // includes term NULL
|
||||
class_suffix_len); // includes term NULL
|
||||
|
||||
strncpy(file_name, class_name, class_name_len);
|
||||
strncpy(&file_name[class_name_len], class_suffix, sizeof(class_suffix));
|
||||
strncpy(&file_name[class_name_len], class_suffix, class_suffix_len);
|
||||
|
||||
return file_name;
|
||||
}
|
||||
@ -1750,247 +1720,6 @@ void ClassLoader::create_javabase() {
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
// CompileTheWorld
|
||||
//
|
||||
// Iterates over all class path entries and forces compilation of all methods
|
||||
// in all classes found. Currently, only zip/jar archives are searched.
|
||||
//
|
||||
// The classes are loaded by the Java level bootstrap class loader, and the
|
||||
// initializer is called. If DelayCompilationDuringStartup is true (default),
|
||||
// the interpreter will run the initialization code. Note that forcing
|
||||
// initialization in this way could potentially lead to initialization order
|
||||
// problems, in which case we could just force the initialization bit to be set.
|
||||
|
||||
|
||||
// We need to iterate over the contents of a zip/jar file, so we replicate the
|
||||
// jzcell and jzfile definitions from zip_util.h but rename jzfile to real_jzfile,
|
||||
// since jzfile already has a void* definition.
|
||||
//
|
||||
// Note that this is only used in debug mode.
|
||||
//
|
||||
// HotSpot integration note:
|
||||
// Matches zip_util.h 1.14 99/06/01 from jdk1.3 beta H build
|
||||
|
||||
|
||||
// JDK 1.3 version
|
||||
typedef struct real_jzentry { /* Zip file entry */
|
||||
char *name; /* entry name */
|
||||
jint time; /* modification time */
|
||||
jint size; /* size of uncompressed data */
|
||||
jint csize; /* size of compressed data (zero if uncompressed) */
|
||||
jint crc; /* crc of uncompressed data */
|
||||
char *comment; /* optional zip file comment */
|
||||
jbyte *extra; /* optional extra data */
|
||||
jint pos; /* position of LOC header (if negative) or data */
|
||||
} real_jzentry;
|
||||
|
||||
typedef struct real_jzfile { /* Zip file */
|
||||
char *name; /* zip file name */
|
||||
jint refs; /* number of active references */
|
||||
jint fd; /* open file descriptor */
|
||||
void *lock; /* read lock */
|
||||
char *comment; /* zip file comment */
|
||||
char *msg; /* zip error message */
|
||||
void *entries; /* array of hash cells */
|
||||
jint total; /* total number of entries */
|
||||
unsigned short *table; /* Hash chain heads: indexes into entries */
|
||||
jint tablelen; /* number of hash eads */
|
||||
real_jzfile *next; /* next zip file in search list */
|
||||
jzentry *cache; /* we cache the most recently freed jzentry */
|
||||
/* Information on metadata names in META-INF directory */
|
||||
char **metanames; /* array of meta names (may have null names) */
|
||||
jint metacount; /* number of slots in metanames array */
|
||||
/* If there are any per-entry comments, they are in the comments array */
|
||||
char **comments;
|
||||
} real_jzfile;
|
||||
|
||||
void ClassPathDirEntry::compile_the_world(Handle loader, TRAPS) {
|
||||
// For now we only compile all methods in all classes in zip/jar files
|
||||
tty->print_cr("CompileTheWorld : Skipped classes in %s", _dir);
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
void ClassPathZipEntry::compile_the_world(Handle loader, TRAPS) {
|
||||
real_jzfile* zip = (real_jzfile*) _zip;
|
||||
tty->print_cr("CompileTheWorld : Compiling all classes in %s", zip->name);
|
||||
tty->cr();
|
||||
// Iterate over all entries in zip file
|
||||
for (int n = 0; ; n++) {
|
||||
real_jzentry * ze = (real_jzentry *)((*GetNextEntry)(_zip, n));
|
||||
if (ze == NULL) break;
|
||||
ClassLoader::compile_the_world_in(ze->name, loader, CHECK);
|
||||
}
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
if (PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
tty->print_cr("\nCompileTheWorld : Ran out of memory\n");
|
||||
tty->print_cr("Increase class metadata storage if a limit was set");
|
||||
} else {
|
||||
tty->print_cr("\nCompileTheWorld : Unexpected exception occurred\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoader::compile_the_world() {
|
||||
EXCEPTION_MARK;
|
||||
HandleMark hm(THREAD);
|
||||
ResourceMark rm(THREAD);
|
||||
|
||||
assert(has_jrt_entry(), "Compile The World not supported with exploded module build");
|
||||
|
||||
// Find bootstrap loader
|
||||
Handle system_class_loader (THREAD, SystemDictionary::java_system_loader());
|
||||
jlong start = os::javaTimeMillis();
|
||||
|
||||
// Compile the world for the modular java runtime image
|
||||
_jrt_entry->compile_the_world(system_class_loader, CATCH);
|
||||
|
||||
// Iterate over all bootstrap class path appended entries
|
||||
ClassPathEntry* e = _first_append_entry;
|
||||
while (e != NULL) {
|
||||
assert(!e->is_modules_image(), "A modular java runtime image is present on the list of appended entries");
|
||||
e->compile_the_world(system_class_loader, CATCH);
|
||||
e = e->next();
|
||||
}
|
||||
jlong end = os::javaTimeMillis();
|
||||
tty->print_cr("CompileTheWorld : Done (%d classes, %d methods, " JLONG_FORMAT " ms)",
|
||||
_compile_the_world_class_counter, _compile_the_world_method_counter, (end - start));
|
||||
{
|
||||
// Print statistics as if before normal exit:
|
||||
extern void print_statistics();
|
||||
print_statistics();
|
||||
}
|
||||
vm_exit(0);
|
||||
}
|
||||
|
||||
int ClassLoader::_compile_the_world_class_counter = 0;
|
||||
int ClassLoader::_compile_the_world_method_counter = 0;
|
||||
static int _codecache_sweep_counter = 0;
|
||||
|
||||
// Filter out all exceptions except OOMs
|
||||
static void clear_pending_exception_if_not_oom(TRAPS) {
|
||||
if (HAS_PENDING_EXCEPTION &&
|
||||
!PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
}
|
||||
// The CHECK at the caller will propagate the exception out
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if the given method should be compiled when doing compile-the-world.
|
||||
*
|
||||
* TODO: This should be a private method in a CompileTheWorld class.
|
||||
*/
|
||||
static bool can_be_compiled(const methodHandle& m, int comp_level) {
|
||||
assert(CompileTheWorld, "must be");
|
||||
|
||||
// It's not valid to compile a native wrapper for MethodHandle methods
|
||||
// that take a MemberName appendix since the bytecode signature is not
|
||||
// correct.
|
||||
vmIntrinsics::ID iid = m->intrinsic_id();
|
||||
if (MethodHandles::is_signature_polymorphic(iid) && MethodHandles::has_member_arg(iid)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return CompilationPolicy::can_be_compiled(m, comp_level);
|
||||
}
|
||||
|
||||
void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
|
||||
if (string_ends_with(name, ".class")) {
|
||||
// We have a .class file
|
||||
int len = (int)strlen(name);
|
||||
char buffer[2048];
|
||||
strncpy(buffer, name, len - 6);
|
||||
buffer[len-6] = 0;
|
||||
// If the file has a period after removing .class, it's not really a
|
||||
// valid class file. The class loader will check everything else.
|
||||
if (strchr(buffer, '.') == NULL) {
|
||||
_compile_the_world_class_counter++;
|
||||
if (_compile_the_world_class_counter > CompileTheWorldStopAt) return;
|
||||
|
||||
// Construct name without extension
|
||||
TempNewSymbol sym = SymbolTable::new_symbol(buffer, CHECK);
|
||||
// Use loader to load and initialize class
|
||||
Klass* k = SystemDictionary::resolve_or_null(sym, loader, Handle(), THREAD);
|
||||
if (k != NULL && !HAS_PENDING_EXCEPTION) {
|
||||
k->initialize(THREAD);
|
||||
}
|
||||
bool exception_occurred = HAS_PENDING_EXCEPTION;
|
||||
clear_pending_exception_if_not_oom(CHECK);
|
||||
if (CompileTheWorldPreloadClasses && k != NULL) {
|
||||
InstanceKlass* ik = InstanceKlass::cast(k);
|
||||
ConstantPool::preload_and_initialize_all_classes(ik->constants(), THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
// If something went wrong in preloading we just ignore it
|
||||
clear_pending_exception_if_not_oom(CHECK);
|
||||
tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_class_counter, buffer);
|
||||
}
|
||||
}
|
||||
|
||||
if (_compile_the_world_class_counter >= CompileTheWorldStartAt) {
|
||||
if (k == NULL || exception_occurred) {
|
||||
// If something went wrong (e.g. ExceptionInInitializerError) we skip this class
|
||||
tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_class_counter, buffer);
|
||||
} else {
|
||||
tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_class_counter, buffer);
|
||||
// Preload all classes to get around uncommon traps
|
||||
// Iterate over all methods in class
|
||||
int comp_level = CompilationPolicy::policy()->initial_compile_level();
|
||||
InstanceKlass* ik = InstanceKlass::cast(k);
|
||||
for (int n = 0; n < ik->methods()->length(); n++) {
|
||||
methodHandle m (THREAD, ik->methods()->at(n));
|
||||
if (can_be_compiled(m, comp_level)) {
|
||||
if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
|
||||
// Give sweeper a chance to keep up with CTW
|
||||
VM_CTWThreshold op;
|
||||
VMThread::execute(&op);
|
||||
_codecache_sweep_counter = 0;
|
||||
}
|
||||
// Force compilation
|
||||
CompileBroker::compile_method(m, InvocationEntryBci, comp_level,
|
||||
methodHandle(), 0, CompileTask::Reason_CTW, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
clear_pending_exception_if_not_oom(CHECK);
|
||||
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
|
||||
} else {
|
||||
_compile_the_world_method_counter++;
|
||||
}
|
||||
if (TieredCompilation && TieredStopAtLevel >= CompLevel_full_optimization) {
|
||||
// Clobber the first compile and force second tier compilation
|
||||
CompiledMethod* nm = m->code();
|
||||
if (nm != NULL && !m->is_method_handle_intrinsic()) {
|
||||
// Throw out the code so that the code cache doesn't fill up
|
||||
nm->make_not_entrant();
|
||||
}
|
||||
CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_full_optimization,
|
||||
methodHandle(), 0, CompileTask::Reason_CTW, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
clear_pending_exception_if_not_oom(CHECK);
|
||||
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
|
||||
} else {
|
||||
_compile_the_world_method_counter++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
|
||||
}
|
||||
|
||||
CompiledMethod* nm = m->code();
|
||||
if (nm != NULL && !m->is_method_handle_intrinsic()) {
|
||||
// Throw out the code so that the code cache doesn't fill up
|
||||
nm->make_not_entrant();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif //PRODUCT
|
||||
|
||||
// Please keep following two functions at end of this file. With them placed at top or in middle of the file,
|
||||
// they could get inlined by agressive compiler, an unknown trick, see bug 6966589.
|
||||
void PerfClassTraceTime::initialize() {
|
||||
|
@ -60,8 +60,6 @@ public:
|
||||
// Attempt to locate file_name through this class path entry.
|
||||
// Returns a class file parsing stream if successfull.
|
||||
virtual ClassFileStream* open_stream(const char* name, TRAPS) = 0;
|
||||
// Debugging
|
||||
NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;)
|
||||
};
|
||||
|
||||
class ClassPathDirEntry: public ClassPathEntry {
|
||||
@ -75,8 +73,6 @@ class ClassPathDirEntry: public ClassPathEntry {
|
||||
ClassPathDirEntry(const char* dir);
|
||||
virtual ~ClassPathDirEntry() {}
|
||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||
// Debugging
|
||||
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
|
||||
};
|
||||
|
||||
|
||||
@ -107,8 +103,6 @@ class ClassPathZipEntry: public ClassPathEntry {
|
||||
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
|
||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||
void contents_do(void f(const char* name, void* context), void* context);
|
||||
// Debugging
|
||||
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
|
||||
};
|
||||
|
||||
|
||||
@ -126,9 +120,6 @@ public:
|
||||
ClassPathImageEntry(JImageFile* jimage, const char* name);
|
||||
virtual ~ClassPathImageEntry();
|
||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||
|
||||
// Debugging
|
||||
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
|
||||
};
|
||||
|
||||
// ModuleClassPathList contains a linked list of ClassPathEntry's
|
||||
@ -447,17 +438,6 @@ class ClassLoader: AllStatic {
|
||||
|
||||
// Debugging
|
||||
static void verify() PRODUCT_RETURN;
|
||||
|
||||
// Force compilation of all methods in all classes in bootstrap class path (stress test)
|
||||
#ifndef PRODUCT
|
||||
protected:
|
||||
static int _compile_the_world_class_counter;
|
||||
static int _compile_the_world_method_counter;
|
||||
public:
|
||||
static void compile_the_world();
|
||||
static void compile_the_world_in(char* name, Handle loader, TRAPS);
|
||||
static int compile_the_world_counter() { return _compile_the_world_class_counter; }
|
||||
#endif //PRODUCT
|
||||
};
|
||||
|
||||
// PerfClassTraceTime is used to measure time for class loading related events.
|
||||
|
@ -192,7 +192,7 @@ oop* ClassLoaderData::ChunkedHandleList::add(oop o) {
|
||||
OrderAccess::release_store(&_head, next);
|
||||
}
|
||||
oop* handle = &_head->_data[_head->_size];
|
||||
*handle = o;
|
||||
NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o);
|
||||
OrderAccess::release_store(&_head->_size, _head->_size + 1);
|
||||
return handle;
|
||||
}
|
||||
@ -234,7 +234,7 @@ class VerifyContainsOopClosure : public OopClosure {
|
||||
VerifyContainsOopClosure(oop target) : _target(target), _found(false) {}
|
||||
|
||||
void do_oop(oop* p) {
|
||||
if (p != NULL && oopDesc::equals(RawAccess<>::oop_load(p), _target)) {
|
||||
if (p != NULL && oopDesc::equals(NativeAccess<AS_NO_KEEPALIVE>::oop_load(p), _target)) {
|
||||
_found = true;
|
||||
}
|
||||
}
|
||||
|
@ -176,12 +176,12 @@ class ClassLoaderData : public CHeapObj<mtClass> {
|
||||
void clear_accumulated_modified_oops() { _accumulated_modified_oops = false; }
|
||||
bool has_accumulated_modified_oops() { return _accumulated_modified_oops; }
|
||||
oop holder_no_keepalive() const;
|
||||
oop holder_phantom() const;
|
||||
|
||||
private:
|
||||
void unload();
|
||||
bool keep_alive() const { return _keep_alive > 0; }
|
||||
|
||||
oop holder_phantom() const;
|
||||
void classes_do(void f(Klass* const));
|
||||
void loaded_classes_do(KlassClosure* klass_closure);
|
||||
void classes_do(void f(InstanceKlass*));
|
||||
|
@ -583,7 +583,6 @@ void ClassLoaderDataGraph::clean_module_and_package_info() {
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::purge() {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
ClassLoaderData* list = _unloading;
|
||||
_unloading = NULL;
|
||||
ClassLoaderData* next = list;
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/vm_operations.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "services/diagnosticCommand.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
|
||||
|
@ -235,7 +235,7 @@ void Dictionary::clean_cached_protection_domains(DictionaryEntry* probe) {
|
||||
|
||||
|
||||
void Dictionary::do_unloading() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
||||
assert_locked_or_safepoint(SystemDictionary_lock);
|
||||
|
||||
// The NULL class loader doesn't initiate loading classes from other class loaders
|
||||
if (loader_data() == ClassLoaderData::the_null_class_loader_data()) {
|
||||
|
@ -173,7 +173,7 @@ class DictionaryEntry : public HashtableEntry<InstanceKlass*, mtClass> {
|
||||
for (ProtectionDomainEntry* current = pd_set(); // accessed at a safepoint
|
||||
current != NULL;
|
||||
current = current->_next) {
|
||||
oopDesc::verify(current->_pd_cache->object_no_keepalive());
|
||||
guarantee(oopDesc::is_oop(current->_pd_cache->object_no_keepalive()), "Invalid oop");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,13 +54,13 @@
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// We prefer short chains of avg 2
|
||||
#define PREF_AVG_LIST_LEN 2
|
||||
const double PREF_AVG_LIST_LEN = 2.0;
|
||||
// 2^24 is max size
|
||||
#define END_SIZE 24
|
||||
const size_t END_SIZE = 24;
|
||||
// If a chain gets to 32 something might be wrong
|
||||
#define REHASH_LEN 32
|
||||
const size_t REHASH_LEN = 32;
|
||||
// If we have as many dead items as 50% of the number of bucket
|
||||
#define CLEAN_DEAD_HIGH_WATER_MARK 0.5
|
||||
const double CLEAN_DEAD_HIGH_WATER_MARK = 0.5;
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
inline oop read_string_from_compact_hashtable(address base_address, u4 offset) {
|
||||
@ -216,7 +216,7 @@ size_t StringTable::item_added() {
|
||||
return Atomic::add((size_t)1, &(the_table()->_items_count));
|
||||
}
|
||||
|
||||
size_t StringTable::add_items_count_to_clean(size_t ndead) {
|
||||
size_t StringTable::add_items_to_clean(size_t ndead) {
|
||||
size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items_count));
|
||||
log_trace(stringtable)(
|
||||
"Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
|
||||
@ -228,11 +228,11 @@ void StringTable::item_removed() {
|
||||
Atomic::add((size_t)-1, &(the_table()->_items_count));
|
||||
}
|
||||
|
||||
double StringTable::get_load_factor() {
|
||||
double StringTable::get_load_factor() const {
|
||||
return (double)_items_count/_current_size;
|
||||
}
|
||||
|
||||
double StringTable::get_dead_factor() {
|
||||
double StringTable::get_dead_factor() const {
|
||||
return (double)_uncleaned_items_count/_current_size;
|
||||
}
|
||||
|
||||
@ -432,7 +432,7 @@ void StringTable::possibly_parallel_unlink(
|
||||
_par_state_string->weak_oops_do(&stiac, &dnc);
|
||||
|
||||
// Accumulate the dead strings.
|
||||
the_table()->add_items_count_to_clean(stiac._count);
|
||||
the_table()->add_items_to_clean(stiac._count);
|
||||
|
||||
*processed = stiac._count_total;
|
||||
*removed = stiac._count;
|
||||
|
@ -72,15 +72,15 @@ private:
|
||||
volatile size_t _uncleaned_items_count;
|
||||
DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
|
||||
|
||||
double get_load_factor();
|
||||
double get_dead_factor();
|
||||
double get_load_factor() const;
|
||||
double get_dead_factor() const;
|
||||
|
||||
void check_concurrent_work();
|
||||
void trigger_concurrent_work();
|
||||
|
||||
static size_t item_added();
|
||||
static void item_removed();
|
||||
size_t add_items_count_to_clean(size_t ndead);
|
||||
size_t add_items_to_clean(size_t ndead);
|
||||
|
||||
StringTable();
|
||||
|
||||
@ -125,7 +125,7 @@ private:
|
||||
// If GC uses ParState directly it should add the number of cleared
|
||||
// strings to this method.
|
||||
static void inc_dead_counter(size_t ndead) {
|
||||
the_table()->add_items_count_to_clean(ndead);
|
||||
the_table()->add_items_to_clean(ndead);
|
||||
}
|
||||
|
||||
// Delete pointers to otherwise-unreachable objects.
|
||||
|
@ -42,19 +42,19 @@
|
||||
// We used to not resize at all, so let's be conservative
|
||||
// and not set it too short before we decide to resize,
|
||||
// to match previous startup behavior
|
||||
#define PREF_AVG_LIST_LEN 8
|
||||
const double PREF_AVG_LIST_LEN = 8.0;
|
||||
// 2^17 (131,072) is max size, which is about 6.5 times as large
|
||||
// as the previous table size (used to be 20,011),
|
||||
// which never resized
|
||||
#define END_SIZE 17
|
||||
const size_t END_SIZE = 17;
|
||||
// If a chain gets to 100 something might be wrong
|
||||
#define REHASH_LEN 100
|
||||
const size_t REHASH_LEN = 100;
|
||||
// We only get a chance to check whether we need
|
||||
// to clean infrequently (on class unloading),
|
||||
// so if we have even one dead entry then mark table for cleaning
|
||||
#define CLEAN_DEAD_HIGH_WATER_MARK 0.0
|
||||
const double CLEAN_DEAD_HIGH_WATER_MARK = 0.0;
|
||||
|
||||
#define ON_STACK_BUFFER_LENGTH 128
|
||||
const size_t ON_STACK_BUFFER_LENGTH = 128;
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
@ -171,8 +171,9 @@ void SymbolTable::set_item_clean_count(size_t ncl) {
|
||||
log_trace(symboltable)("Set uncleaned items:" SIZE_FORMAT, SymbolTable::the_table()->_uncleaned_items_count);
|
||||
}
|
||||
|
||||
// Mark one item as needing to be cleaned, but only if no other items are marked yet
|
||||
void SymbolTable::mark_item_clean_count() {
|
||||
if (Atomic::cmpxchg((size_t)1, &(SymbolTable::the_table()->_uncleaned_items_count), (size_t)0) == 0) { // only mark if unset
|
||||
if (Atomic::cmpxchg((size_t)1, &(SymbolTable::the_table()->_uncleaned_items_count), (size_t)0) == 0) {
|
||||
log_trace(symboltable)("Marked uncleaned items:" SIZE_FORMAT, SymbolTable::the_table()->_uncleaned_items_count);
|
||||
}
|
||||
}
|
||||
@ -182,11 +183,11 @@ void SymbolTable::item_removed() {
|
||||
Atomic::dec(&(SymbolTable::the_table()->_items_count));
|
||||
}
|
||||
|
||||
double SymbolTable::get_load_factor() {
|
||||
double SymbolTable::get_load_factor() const {
|
||||
return (double)_items_count/_current_size;
|
||||
}
|
||||
|
||||
double SymbolTable::get_dead_factor() {
|
||||
double SymbolTable::get_dead_factor() const {
|
||||
return (double)_uncleaned_items_count/_current_size;
|
||||
}
|
||||
|
||||
@ -386,7 +387,7 @@ public:
|
||||
assert(*value != NULL, "value should point to a symbol");
|
||||
_return = *value;
|
||||
}
|
||||
Symbol* get_res_sym() {
|
||||
Symbol* get_res_sym() const {
|
||||
return _return;
|
||||
}
|
||||
};
|
||||
@ -694,7 +695,7 @@ void SymbolTable::grow(JavaThread* jt) {
|
||||
}
|
||||
|
||||
struct SymbolTableDoDelete : StackObj {
|
||||
int _deleted;
|
||||
size_t _deleted;
|
||||
SymbolTableDoDelete() : _deleted(0) {}
|
||||
void operator()(Symbol** value) {
|
||||
assert(value != NULL, "expected valid value");
|
||||
@ -706,7 +707,7 @@ struct SymbolTableDoDelete : StackObj {
|
||||
};
|
||||
|
||||
struct SymbolTableDeleteCheck : StackObj {
|
||||
int _processed;
|
||||
size_t _processed;
|
||||
SymbolTableDeleteCheck() : _processed(0) {}
|
||||
bool operator()(Symbol** value) {
|
||||
assert(value != NULL, "expected valid value");
|
||||
@ -738,9 +739,9 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) {
|
||||
bdt.done(jt);
|
||||
}
|
||||
|
||||
Atomic::add((size_t)stdc._processed, &_symbols_counted);
|
||||
Atomic::add(stdc._processed, &_symbols_counted);
|
||||
|
||||
log_debug(symboltable)("Cleaned " INT32_FORMAT " of " INT32_FORMAT,
|
||||
log_debug(symboltable)("Cleaned " SIZE_FORMAT " of " SIZE_FORMAT,
|
||||
stdd._deleted, stdc._processed);
|
||||
}
|
||||
|
||||
@ -775,7 +776,7 @@ void SymbolTable::concurrent_work(JavaThread* jt) {
|
||||
}
|
||||
|
||||
class CountDead : StackObj {
|
||||
int _count;
|
||||
size_t _count;
|
||||
public:
|
||||
CountDead() : _count(0) {}
|
||||
bool operator()(Symbol** value) {
|
||||
@ -787,7 +788,7 @@ public:
|
||||
}
|
||||
return true;
|
||||
};
|
||||
int get_dead_count() {
|
||||
size_t get_dead_count() const {
|
||||
return _count;
|
||||
}
|
||||
};
|
||||
|
@ -123,8 +123,8 @@ private:
|
||||
volatile size_t _items_count;
|
||||
volatile size_t _uncleaned_items_count;
|
||||
|
||||
double get_load_factor();
|
||||
double get_dead_factor();
|
||||
double get_load_factor() const;
|
||||
double get_dead_factor() const;
|
||||
|
||||
void check_concurrent_work();
|
||||
void trigger_concurrent_work();
|
||||
|
@ -36,7 +36,6 @@
|
||||
class Verifier : AllStatic {
|
||||
public:
|
||||
enum {
|
||||
STRICTER_ACCESS_CTRL_CHECK_VERSION = 49,
|
||||
STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50,
|
||||
INVOKEDYNAMIC_MAJOR_VERSION = 51,
|
||||
NO_RELAX_ACCESS_CTRL_CHECK_VERSION = 52,
|
||||
|
@ -473,15 +473,16 @@ bool vmIntrinsics::is_intrinsic_disabled(vmIntrinsics::ID id) {
|
||||
// Note, DirectiveSet may not be created at this point yet since this code
|
||||
// is called from initial stub geenration code.
|
||||
char* local_list = (char*)DirectiveSet::canonicalize_disableintrinsic(DisableIntrinsic);
|
||||
|
||||
char* save_ptr;
|
||||
bool found = false;
|
||||
char* token = strtok(local_list, ",");
|
||||
|
||||
char* token = strtok_r(local_list, ",", &save_ptr);
|
||||
while (token != NULL) {
|
||||
if (strcmp(token, vmIntrinsics::name_at(id)) == 0) {
|
||||
found = true;
|
||||
break;
|
||||
} else {
|
||||
token = strtok(NULL, ",");
|
||||
token = strtok_r(NULL, ",", &save_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,10 +30,10 @@
|
||||
CompiledICProtectionBehaviour* CompiledICProtectionBehaviour::_current = NULL;
|
||||
|
||||
bool DefaultICProtectionBehaviour::lock(CompiledMethod* method) {
|
||||
if (CompiledIC_lock->owned_by_self()) {
|
||||
if (is_safe(method)) {
|
||||
return false;
|
||||
}
|
||||
CompiledIC_lock->lock();
|
||||
CompiledIC_lock->lock_without_safepoint_check();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -289,7 +289,7 @@ void CodeCache::initialize_heaps() {
|
||||
|
||||
// If large page support is enabled, align code heaps according to large
|
||||
// page size to make sure that code cache is covered by large pages.
|
||||
const size_t alignment = MAX2(page_size(false), (size_t) os::vm_allocation_granularity());
|
||||
const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity());
|
||||
non_nmethod_size = align_up(non_nmethod_size, alignment);
|
||||
profiled_size = align_down(profiled_size, alignment);
|
||||
|
||||
@ -314,10 +314,14 @@ void CodeCache::initialize_heaps() {
|
||||
add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
|
||||
}
|
||||
|
||||
size_t CodeCache::page_size(bool aligned) {
|
||||
size_t CodeCache::page_size(bool aligned, size_t min_pages) {
|
||||
if (os::can_execute_large_page_memory()) {
|
||||
return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, 8) :
|
||||
os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8);
|
||||
if (InitialCodeCacheSize < ReservedCodeCacheSize) {
|
||||
// Make sure that the page size allows for an incremental commit of the reserved space
|
||||
min_pages = MAX2(min_pages, (size_t)8);
|
||||
}
|
||||
return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
|
||||
os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
|
||||
} else {
|
||||
return os::vm_page_size();
|
||||
}
|
||||
@ -1196,7 +1200,6 @@ bool CodeCache::is_far_target(address target) {
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef HOTSWAP
|
||||
int CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
int number_of_marked_CodeBlobs = 0;
|
||||
@ -1230,8 +1233,6 @@ int CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) {
|
||||
|
||||
return number_of_marked_CodeBlobs;
|
||||
}
|
||||
#endif // HOTSWAP
|
||||
|
||||
|
||||
// Deoptimize all methods
|
||||
void CodeCache::mark_all_nmethods_for_deoptimization() {
|
||||
@ -1293,8 +1294,8 @@ void CodeCache::flush_dependents_on(InstanceKlass* dependee) {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef HOTSWAP
|
||||
// Flushes compiled methods dependent on dependee in the evolutionary sense
|
||||
// Flushes compiled methods dependent on dependee when the dependee is redefined
|
||||
// via RedefineClasses
|
||||
void CodeCache::flush_evol_dependents_on(InstanceKlass* ev_k) {
|
||||
// --- Compile_lock is not held. However we are at a safepoint.
|
||||
assert_locked_or_safepoint(Compile_lock);
|
||||
@ -1322,8 +1323,6 @@ void CodeCache::flush_evol_dependents_on(InstanceKlass* ev_k) {
|
||||
make_marked_nmethods_not_entrant();
|
||||
}
|
||||
}
|
||||
#endif // HOTSWAP
|
||||
|
||||
|
||||
// Flushes compiled methods dependent on dependee
|
||||
void CodeCache::flush_dependents_on_method(const methodHandle& m_h) {
|
||||
|
@ -75,6 +75,7 @@
|
||||
class ExceptionCache;
|
||||
class KlassDepChange;
|
||||
class OopClosure;
|
||||
class ShenandoahParallelCodeHeapIterator;
|
||||
|
||||
class CodeCache : AllStatic {
|
||||
friend class VMStructs;
|
||||
@ -82,6 +83,7 @@ class CodeCache : AllStatic {
|
||||
template <class T, class Filter> friend class CodeBlobIterator;
|
||||
friend class WhiteBox;
|
||||
friend class CodeCacheLoader;
|
||||
friend class ShenandoahParallelCodeHeapIterator;
|
||||
private:
|
||||
// CodeHeaps of the cache
|
||||
static GrowableArray<CodeHeap*>* _heaps;
|
||||
@ -111,7 +113,6 @@ class CodeCache : AllStatic {
|
||||
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
|
||||
// Returns the name of the VM option to set the size of the corresponding CodeHeap
|
||||
static const char* get_code_heap_flag_name(int code_blob_type);
|
||||
static size_t page_size(bool aligned = true); // Returns the page size used by the CodeCache
|
||||
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
|
||||
|
||||
// Iteration
|
||||
@ -133,6 +134,7 @@ class CodeCache : AllStatic {
|
||||
public:
|
||||
// Initialization
|
||||
static void initialize();
|
||||
static size_t page_size(bool aligned = true, size_t min_pages = 1); // Returns the page size used by the CodeCache
|
||||
|
||||
static int code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs);
|
||||
|
||||
@ -286,9 +288,7 @@ class CodeCache : AllStatic {
|
||||
// Deoptimization
|
||||
private:
|
||||
static int mark_for_deoptimization(KlassDepChange& changes);
|
||||
#ifdef HOTSWAP
|
||||
static int mark_for_evol_deoptimization(InstanceKlass* dependee);
|
||||
#endif // HOTSWAP
|
||||
|
||||
public:
|
||||
static void mark_all_nmethods_for_deoptimization();
|
||||
@ -297,10 +297,8 @@ class CodeCache : AllStatic {
|
||||
|
||||
// Flushing and deoptimization
|
||||
static void flush_dependents_on(InstanceKlass* dependee);
|
||||
#ifdef HOTSWAP
|
||||
// Flushing and deoptimization in case of evolution
|
||||
static void flush_evol_dependents_on(InstanceKlass* dependee);
|
||||
#endif // HOTSWAP
|
||||
// Support for fullspeed debugging
|
||||
static void flush_dependents_on_method(const methodHandle& dependee);
|
||||
|
||||
|
@ -126,7 +126,6 @@ void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub
|
||||
|
||||
{
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address());
|
||||
MutexLockerEx pl(CompiledICLocker::is_safe(cb->as_compiled_method()) ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(cb != NULL && cb->is_compiled(), "must be compiled");
|
||||
_call->set_destination_mt_safe(entry_point);
|
||||
}
|
||||
@ -237,7 +236,13 @@ CompiledIC::CompiledIC(RelocIterator* iter)
|
||||
initialize_from_iter(iter);
|
||||
}
|
||||
|
||||
bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
|
||||
// This function may fail for two reasons: either due to running out of vtable
|
||||
// stubs, or due to running out of IC stubs in an attempted transition to a
|
||||
// transitional state. The needs_ic_stub_refill value will be set if the failure
|
||||
// was due to running out of IC stubs, in which case the caller will refill IC
|
||||
// stubs and retry.
|
||||
bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode,
|
||||
bool& needs_ic_stub_refill, TRAPS) {
|
||||
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
|
||||
assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
|
||||
assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
|
||||
@ -259,7 +264,11 @@ bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
|
||||
CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
|
||||
call_info->resolved_klass(), false);
|
||||
holder->claim();
|
||||
InlineCacheBuffer::create_transition_stub(this, holder, entry);
|
||||
if (!InlineCacheBuffer::create_transition_stub(this, holder, entry)) {
|
||||
delete holder;
|
||||
needs_ic_stub_refill = true;
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
|
||||
// Can be different than selected_method->vtable_index(), due to package-private etc.
|
||||
@ -269,7 +278,10 @@ bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
|
||||
if (entry == NULL) {
|
||||
return false;
|
||||
}
|
||||
InlineCacheBuffer::create_transition_stub(this, NULL, entry);
|
||||
if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) {
|
||||
needs_ic_stub_refill = true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (TraceICs) {
|
||||
@ -350,7 +362,7 @@ bool CompiledIC::is_call_to_interpreted() const {
|
||||
return is_call_to_interpreted;
|
||||
}
|
||||
|
||||
void CompiledIC::set_to_clean(bool in_use) {
|
||||
bool CompiledIC::set_to_clean(bool in_use) {
|
||||
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
|
||||
if (TraceInlineCacheClearing || TraceICs) {
|
||||
tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
|
||||
@ -361,7 +373,7 @@ void CompiledIC::set_to_clean(bool in_use) {
|
||||
|
||||
// A zombie transition will always be safe, since the metadata has already been set to NULL, so
|
||||
// we only need to patch the destination
|
||||
bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || CompiledICLocker::is_safe(_method);
|
||||
bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
|
||||
|
||||
if (safe_transition) {
|
||||
// Kill any leftover stub we might have too
|
||||
@ -373,7 +385,9 @@ void CompiledIC::set_to_clean(bool in_use) {
|
||||
}
|
||||
} else {
|
||||
// Unsafe transition - create stub.
|
||||
InlineCacheBuffer::create_transition_stub(this, NULL, entry);
|
||||
if (!InlineCacheBuffer::create_transition_stub(this, NULL, entry)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// We can't check this anymore. With lazy deopt we could have already
|
||||
// cleaned this IC entry before we even return. This is possible if
|
||||
@ -382,6 +396,7 @@ void CompiledIC::set_to_clean(bool in_use) {
|
||||
// race because the IC entry was complete when we safepointed so
|
||||
// cleaning it immediately is harmless.
|
||||
// assert(is_clean(), "sanity check");
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CompiledIC::is_clean() const {
|
||||
@ -393,7 +408,7 @@ bool CompiledIC::is_clean() const {
|
||||
return is_clean;
|
||||
}
|
||||
|
||||
void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
|
||||
bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
|
||||
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
|
||||
// Updating a cache to the wrong entry can cause bugs that are very hard
|
||||
// to track down - if cache entry gets invalid - we just clean it. In
|
||||
@ -411,8 +426,7 @@ void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
|
||||
if (info.to_interpreter() || info.to_aot()) {
|
||||
// Call to interpreter
|
||||
if (info.is_optimized() && is_optimized()) {
|
||||
assert(is_clean(), "unsafe IC path");
|
||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(is_clean(), "unsafe IC path");
|
||||
// the call analysis (callee structure) specifies that the call is optimized
|
||||
// (either because of CHA or the static target is final)
|
||||
// At code generation time, this call has been emitted as static call
|
||||
@ -430,7 +444,11 @@ void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
|
||||
}
|
||||
} else {
|
||||
// Call via method-klass-holder
|
||||
InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
|
||||
CompiledICHolder* holder = info.claim_cached_icholder();
|
||||
if (!InlineCacheBuffer::create_transition_stub(this, holder, info.entry())) {
|
||||
delete holder;
|
||||
return false;
|
||||
}
|
||||
if (TraceICs) {
|
||||
ResourceMark rm(thread);
|
||||
tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
|
||||
@ -450,7 +468,9 @@ void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
|
||||
(!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
|
||||
|
||||
if (!safe) {
|
||||
InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
|
||||
if (!InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry())) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (is_optimized()) {
|
||||
set_ic_destination(info.entry());
|
||||
@ -475,6 +495,7 @@ void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
|
||||
// race because the IC entry was complete when we safepointed so
|
||||
// cleaning it immediately is harmless.
|
||||
// assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -575,16 +596,16 @@ void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const Com
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
void CompiledStaticCall::set_to_clean(bool in_use) {
|
||||
bool CompiledStaticCall::set_to_clean(bool in_use) {
|
||||
// in_use is unused but needed to match template function in CompiledMethod
|
||||
assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
|
||||
// Reset call site
|
||||
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
set_destination_mt_safe(resolve_call_stub());
|
||||
|
||||
// Do not reset stub here: It is too expensive to call find_stub.
|
||||
// Instead, rely on caller (nmethod::clear_inline_caches) to clear
|
||||
// both the call and its stub.
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CompiledStaticCall::is_clean() const {
|
||||
@ -624,7 +645,6 @@ void CompiledStaticCall::set_to_compiled(address entry) {
|
||||
|
||||
void CompiledStaticCall::set(const StaticCallInfo& info) {
|
||||
assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
|
||||
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Updating a cache to the wrong entry can cause bugs that are very hard
|
||||
// to track down - if cache entry gets invalid - we just clean it. In
|
||||
// this way it is always the same code path that is responsible for
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "code/nativeInst.hpp"
|
||||
#include "interpreter/linkResolver.hpp"
|
||||
#include "oops/compiledICHolder.hpp"
|
||||
#include "runtime/safepointVerifiers.hpp"
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// The CompiledIC represents a compiled inline cache.
|
||||
@ -67,6 +68,7 @@ class CompiledICLocker: public StackObj {
|
||||
CompiledMethod* _method;
|
||||
CompiledICProtectionBehaviour* _behaviour;
|
||||
bool _locked;
|
||||
NoSafepointVerifier _nsv;
|
||||
|
||||
public:
|
||||
CompiledICLocker(CompiledMethod* method);
|
||||
@ -272,13 +274,13 @@ class CompiledIC: public ResourceObj {
|
||||
//
|
||||
// They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
|
||||
//
|
||||
void set_to_clean(bool in_use = true);
|
||||
void set_to_monomorphic(CompiledICInfo& info);
|
||||
bool set_to_clean(bool in_use = true);
|
||||
bool set_to_monomorphic(CompiledICInfo& info);
|
||||
void clear_ic_stub();
|
||||
|
||||
// Returns true if successful and false otherwise. The call can fail if memory
|
||||
// allocation in the code cache fails.
|
||||
bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
|
||||
// allocation in the code cache fails, or ic stub refill is required.
|
||||
bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, bool& needs_ic_stub_refill, TRAPS);
|
||||
|
||||
static void compute_monomorphic_entry(const methodHandle& method, Klass* receiver_klass,
|
||||
bool is_optimized, bool static_bound, bool caller_is_nmethod,
|
||||
@ -372,7 +374,7 @@ public:
|
||||
virtual address destination() const = 0;
|
||||
|
||||
// Clean static call (will force resolving on next use)
|
||||
void set_to_clean(bool in_use = true);
|
||||
bool set_to_clean(bool in_use = true);
|
||||
|
||||
// Set state. The entry must be the same, as computed by compute_entry.
|
||||
// Computation and setting is split up, since the actions are separate during
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "code/compiledMethod.inline.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/gcBehaviours.hpp"
|
||||
#include "interpreter/bytecode.inline.hpp"
|
||||
@ -430,27 +431,30 @@ static void check_class(Metadata* md) {
|
||||
#endif // ASSERT
|
||||
|
||||
|
||||
void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
|
||||
bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
|
||||
if (ic->is_clean()) {
|
||||
return true;
|
||||
}
|
||||
if (ic->is_icholder_call()) {
|
||||
// The only exception is compiledICHolder metdata which may
|
||||
// yet be marked below. (We check this further below).
|
||||
CompiledICHolder* cichk_metdata = ic->cached_icholder();
|
||||
|
||||
if (cichk_metdata->is_loader_alive()) {
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
Metadata* ic_metdata = ic->cached_metadata();
|
||||
if (ic_metdata != NULL) {
|
||||
if (ic_metdata->is_klass()) {
|
||||
if (((Klass*)ic_metdata)->is_loader_alive()) {
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
} else if (ic_metdata->is_method()) {
|
||||
Method* method = (Method*)ic_metdata;
|
||||
assert(!method->is_old(), "old method should have been cleaned");
|
||||
if (method->method_holder()->is_loader_alive()) {
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
@ -458,7 +462,7 @@ void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
|
||||
}
|
||||
}
|
||||
|
||||
ic->set_to_clean();
|
||||
return ic->set_to_clean();
|
||||
}
|
||||
|
||||
// static_stub_Relocations may have dangling references to
|
||||
@ -496,7 +500,7 @@ void CompiledMethod::clean_ic_stubs() {
|
||||
|
||||
// Clean references to unloaded nmethods at addr from this one, which is not unloaded.
|
||||
template <class CompiledICorStaticCall>
|
||||
static void clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
|
||||
bool clean_all) {
|
||||
// Ok, to lookup references to zombies here
|
||||
CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
|
||||
@ -504,20 +508,23 @@ static void clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address add
|
||||
if (nm != NULL) {
|
||||
// Clean inline caches pointing to both zombie and not_entrant methods
|
||||
if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
|
||||
ic->set_to_clean(from->is_alive());
|
||||
if (!ic->set_to_clean(from->is_alive())) {
|
||||
return false;
|
||||
}
|
||||
assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
|
||||
bool clean_all) {
|
||||
clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
|
||||
return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
|
||||
}
|
||||
|
||||
static void clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
|
||||
static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
|
||||
bool clean_all) {
|
||||
clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
|
||||
return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
|
||||
}
|
||||
|
||||
// Cleans caches in nmethods that point to either classes that are unloaded
|
||||
@ -527,7 +534,7 @@ static void clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod
|
||||
// nmethods are unloaded. Return postponed=true in the parallel case for
|
||||
// inline caches found that point to nmethods that are not yet visited during
|
||||
// the do_unloading walk.
|
||||
void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
|
||||
bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
|
||||
ResourceMark rm;
|
||||
|
||||
// Exception cache only needs to be called if unloading occurred
|
||||
@ -535,18 +542,33 @@ void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
|
||||
clean_exception_cache();
|
||||
}
|
||||
|
||||
cleanup_inline_caches_impl(unloading_occurred, false);
|
||||
if (!cleanup_inline_caches_impl(unloading_occurred, false)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// All static stubs need to be cleaned.
|
||||
clean_ic_stubs();
|
||||
|
||||
// Check that the metadata embedded in the nmethod is alive
|
||||
DEBUG_ONLY(metadata_do(check_class));
|
||||
return true;
|
||||
}
|
||||
|
||||
void CompiledMethod::cleanup_inline_caches(bool clean_all) {
|
||||
for (;;) {
|
||||
ICRefillVerifier ic_refill_verifier;
|
||||
{ CompiledICLocker ic_locker(this);
|
||||
if (cleanup_inline_caches_impl(false, clean_all)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
InlineCacheBuffer::refill_ic_stubs();
|
||||
}
|
||||
}
|
||||
|
||||
// Called to clean up after class unloading for live nmethods and from the sweeper
|
||||
// for all methods.
|
||||
void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
|
||||
bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
|
||||
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
|
||||
ResourceMark rm;
|
||||
|
||||
@ -561,30 +583,34 @@ void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool cl
|
||||
if (unloading_occurred) {
|
||||
// If class unloading occurred we first clear ICs where the cached metadata
|
||||
// is referring to an unloaded klass or method.
|
||||
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
|
||||
if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
|
||||
if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
case relocInfo::opt_virtual_call_type:
|
||||
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
|
||||
if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
case relocInfo::static_call_type:
|
||||
clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all);
|
||||
if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
case relocInfo::oop_type:
|
||||
break;
|
||||
|
||||
case relocInfo::metadata_type:
|
||||
break; // nothing to do.
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
|
||||
|
@ -352,12 +352,11 @@ public:
|
||||
|
||||
// Inline cache support for class unloading and nmethod unloading
|
||||
private:
|
||||
void cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
|
||||
bool cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
|
||||
|
||||
public:
|
||||
void cleanup_inline_caches(bool clean_all) {
|
||||
// Serial version used by sweeper and whitebox test
|
||||
cleanup_inline_caches_impl(false, clean_all);
|
||||
}
|
||||
// Serial version used by sweeper and whitebox test
|
||||
void cleanup_inline_caches(bool clean_all);
|
||||
|
||||
virtual void clear_inline_caches();
|
||||
void clear_ic_stubs();
|
||||
@ -390,7 +389,7 @@ public:
|
||||
address oops_reloc_begin() const;
|
||||
|
||||
private:
|
||||
void static clean_ic_if_metadata_is_dead(CompiledIC *ic);
|
||||
bool static clean_ic_if_metadata_is_dead(CompiledIC *ic);
|
||||
|
||||
void clean_ic_stubs();
|
||||
|
||||
@ -400,8 +399,8 @@ public:
|
||||
|
||||
virtual bool is_unloading() = 0;
|
||||
|
||||
void unload_nmethod_caches(bool class_unloading_occurred);
|
||||
virtual void do_unloading(bool unloading_occurred) { }
|
||||
bool unload_nmethod_caches(bool class_unloading_occurred);
|
||||
virtual void do_unloading(bool unloading_occurred) = 0;
|
||||
|
||||
private:
|
||||
PcDesc* find_pc_desc(address pc, bool approximate) {
|
||||
|
@ -38,15 +38,49 @@
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
DEF_STUB_INTERFACE(ICStub);
|
||||
|
||||
StubQueue* InlineCacheBuffer::_buffer = NULL;
|
||||
ICStub* InlineCacheBuffer::_next_stub = NULL;
|
||||
|
||||
CompiledICHolder* InlineCacheBuffer::_pending_released = NULL;
|
||||
int InlineCacheBuffer::_pending_count = 0;
|
||||
|
||||
#ifdef ASSERT
|
||||
ICRefillVerifier::ICRefillVerifier()
|
||||
: _refill_requested(false),
|
||||
_refill_remembered(false)
|
||||
{
|
||||
Thread* thread = Thread::current();
|
||||
assert(thread->missed_ic_stub_refill_mark() == NULL, "nesting not supported");
|
||||
thread->set_missed_ic_stub_refill_mark(this);
|
||||
}
|
||||
|
||||
ICRefillVerifier::~ICRefillVerifier() {
|
||||
assert(!_refill_requested || _refill_remembered,
|
||||
"Forgot to refill IC stubs after failed IC transition");
|
||||
Thread::current()->set_missed_ic_stub_refill_mark(NULL);
|
||||
}
|
||||
|
||||
ICRefillVerifierMark::ICRefillVerifierMark(ICRefillVerifier* verifier) {
|
||||
Thread* thread = Thread::current();
|
||||
assert(thread->missed_ic_stub_refill_mark() == NULL, "nesting not supported");
|
||||
thread->set_missed_ic_stub_refill_mark(this);
|
||||
}
|
||||
|
||||
ICRefillVerifierMark::~ICRefillVerifierMark() {
|
||||
Thread::current()->set_missed_ic_stub_refill_mark(NULL);
|
||||
}
|
||||
|
||||
static ICRefillVerifier* current_ic_refill_verifier() {
|
||||
Thread* current = Thread::current();
|
||||
ICRefillVerifier* verifier = reinterpret_cast<ICRefillVerifier*>(current->missed_ic_stub_refill_mark());
|
||||
assert(verifier != NULL, "need a verifier for safety");
|
||||
return verifier;
|
||||
}
|
||||
#endif
|
||||
|
||||
void ICStub::finalize() {
|
||||
if (!is_empty()) {
|
||||
ResourceMark rm;
|
||||
@ -103,52 +137,46 @@ void ICStub::print() {
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
// Implementation of InlineCacheBuffer
|
||||
|
||||
void InlineCacheBuffer::init_next_stub() {
|
||||
ICStub* ic_stub = (ICStub*)buffer()->request_committed (ic_stub_code_size());
|
||||
assert (ic_stub != NULL, "no room for a single stub");
|
||||
set_next_stub(ic_stub);
|
||||
}
|
||||
|
||||
void InlineCacheBuffer::initialize() {
|
||||
if (_buffer != NULL) return; // already initialized
|
||||
_buffer = new StubQueue(new ICStubInterface, 10*K, InlineCacheBuffer_lock, "InlineCacheBuffer");
|
||||
assert (_buffer != NULL, "cannot allocate InlineCacheBuffer");
|
||||
init_next_stub();
|
||||
}
|
||||
|
||||
|
||||
ICStub* InlineCacheBuffer::new_ic_stub() {
|
||||
while (true) {
|
||||
ICStub* ic_stub = (ICStub*)buffer()->request_committed(ic_stub_code_size());
|
||||
if (ic_stub != NULL) {
|
||||
return ic_stub;
|
||||
}
|
||||
// we ran out of inline cache buffer space; must enter safepoint.
|
||||
// We do this by forcing a safepoint
|
||||
EXCEPTION_MARK;
|
||||
return (ICStub*)buffer()->request_committed(ic_stub_code_size());
|
||||
}
|
||||
|
||||
VM_ICBufferFull ibf;
|
||||
VMThread::execute(&ibf);
|
||||
// We could potential get an async. exception at this point.
|
||||
// In that case we will rethrow it to ourselvs.
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
oop exception = PENDING_EXCEPTION;
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
Thread::send_async_exception(JavaThread::current()->threadObj(), exception);
|
||||
}
|
||||
|
||||
void InlineCacheBuffer::refill_ic_stubs() {
|
||||
#ifdef ASSERT
|
||||
ICRefillVerifier* verifier = current_ic_refill_verifier();
|
||||
verifier->request_remembered();
|
||||
#endif
|
||||
// we ran out of inline cache buffer space; must enter safepoint.
|
||||
// We do this by forcing a safepoint
|
||||
EXCEPTION_MARK;
|
||||
|
||||
VM_ICBufferFull ibf;
|
||||
VMThread::execute(&ibf);
|
||||
// We could potential get an async. exception at this point.
|
||||
// In that case we will rethrow it to ourselvs.
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
oop exception = PENDING_EXCEPTION;
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
Thread::send_async_exception(JavaThread::current()->threadObj(), exception);
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void InlineCacheBuffer::update_inline_caches() {
|
||||
if (buffer()->number_of_stubs() > 1) {
|
||||
if (buffer()->number_of_stubs() > 0) {
|
||||
if (TraceICBuffer) {
|
||||
tty->print_cr("[updating inline caches with %d stubs]", buffer()->number_of_stubs());
|
||||
}
|
||||
buffer()->remove_all();
|
||||
init_next_stub();
|
||||
}
|
||||
release_pending_icholders();
|
||||
}
|
||||
@ -160,7 +188,7 @@ bool InlineCacheBuffer::contains(address instruction_address) {
|
||||
|
||||
|
||||
bool InlineCacheBuffer::is_empty() {
|
||||
return buffer()->number_of_stubs() == 1; // always has sentinel
|
||||
return buffer()->number_of_stubs() == 0;
|
||||
}
|
||||
|
||||
|
||||
@ -168,9 +196,7 @@ void InlineCacheBuffer_init() {
|
||||
InlineCacheBuffer::initialize();
|
||||
}
|
||||
|
||||
|
||||
void InlineCacheBuffer::create_transition_stub(CompiledIC *ic, void* cached_value, address entry) {
|
||||
MutexLockerEx ml(CompiledIC_lock->owned_by_self() ? NULL : CompiledIC_lock);
|
||||
bool InlineCacheBuffer::create_transition_stub(CompiledIC *ic, void* cached_value, address entry) {
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "should not be called during a safepoint");
|
||||
assert(CompiledICLocker::is_safe(ic->instruction_address()), "mt unsafe call");
|
||||
if (TraceICBuffer) {
|
||||
@ -178,20 +204,27 @@ void InlineCacheBuffer::create_transition_stub(CompiledIC *ic, void* cached_valu
|
||||
p2i(ic->instruction_address()), p2i(entry), p2i(cached_value));
|
||||
}
|
||||
|
||||
// allocate and initialize new "out-of-line" inline-cache
|
||||
ICStub* ic_stub = new_ic_stub();
|
||||
if (ic_stub == NULL) {
|
||||
#ifdef ASSERT
|
||||
ICRefillVerifier* verifier = current_ic_refill_verifier();
|
||||
verifier->request_refill();
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
// If an transition stub is already associate with the inline cache, then we remove the association.
|
||||
if (ic->is_in_transition_state()) {
|
||||
ICStub* old_stub = ICStub_from_destination_address(ic->stub_address());
|
||||
old_stub->clear();
|
||||
}
|
||||
|
||||
// allocate and initialize new "out-of-line" inline-cache
|
||||
ICStub* ic_stub = get_next_stub();
|
||||
ic_stub->set_stub(ic, cached_value, entry);
|
||||
|
||||
// Update inline cache in nmethod to point to new "out-of-line" allocated inline cache
|
||||
ic->set_ic_destination(ic_stub);
|
||||
|
||||
set_next_stub(new_ic_stub()); // can cause safepoint synchronization
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -225,9 +258,7 @@ void InlineCacheBuffer::release_pending_icholders() {
|
||||
// not safe to free them until them since they might be visible to
|
||||
// another thread.
|
||||
void InlineCacheBuffer::queue_for_release(CompiledICHolder* icholder) {
|
||||
MutexLockerEx mex1((CompiledIC_lock->owned_by_self() ||
|
||||
SafepointSynchronize::is_at_safepoint()) ? NULL : CompiledIC_lock);
|
||||
MutexLockerEx mex2(InlineCacheBuffer_lock);
|
||||
MutexLockerEx mex(InlineCacheBuffer_lock, Mutex::_no_safepoint_check_flag);
|
||||
icholder->set_next(_pending_released);
|
||||
_pending_released = icholder;
|
||||
_pending_count++;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,7 +29,10 @@
|
||||
#include "code/stubs.hpp"
|
||||
#include "interpreter/bytecodes.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/safepointVerifiers.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
//
|
||||
// For CompiledIC's:
|
||||
@ -92,6 +95,43 @@ inline ICStub* ICStub_from_destination_address(address destination_address) {
|
||||
return stub;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// The ICRefillVerifier class is a stack allocated RAII object used to
|
||||
// detect if a failed IC transition that required IC stub refilling has
|
||||
// been accidentally missed. It is up to the caller to in that case
|
||||
// refill IC stubs.
|
||||
class ICRefillVerifier: StackObj {
|
||||
bool _refill_requested;
|
||||
bool _refill_remembered;
|
||||
|
||||
public:
|
||||
ICRefillVerifier();
|
||||
~ICRefillVerifier();
|
||||
|
||||
void request_refill() { _refill_requested = true; }
|
||||
void request_remembered() { _refill_remembered = true; }
|
||||
};
|
||||
|
||||
// The ICRefillVerifierMark is used to set the thread's current
|
||||
// ICRefillVerifier to a provided one. This is useful in particular
|
||||
// when transitioning IC stubs in parallel and refilling from the
|
||||
// master thread invoking the IC stub transitioning code.
|
||||
class ICRefillVerifierMark: StackObj {
|
||||
public:
|
||||
ICRefillVerifierMark(ICRefillVerifier* verifier);
|
||||
~ICRefillVerifierMark();
|
||||
};
|
||||
#else
|
||||
class ICRefillVerifier: StackObj {
|
||||
public:
|
||||
ICRefillVerifier() {}
|
||||
};
|
||||
class ICRefillVerifierMark: StackObj {
|
||||
public:
|
||||
ICRefillVerifierMark(ICRefillVerifier* verifier) {}
|
||||
};
|
||||
#endif
|
||||
|
||||
class InlineCacheBuffer: public AllStatic {
|
||||
private:
|
||||
// friends
|
||||
@ -100,20 +140,14 @@ class InlineCacheBuffer: public AllStatic {
|
||||
static int ic_stub_code_size();
|
||||
|
||||
static StubQueue* _buffer;
|
||||
static ICStub* _next_stub;
|
||||
|
||||
static CompiledICHolder* _pending_released;
|
||||
static int _pending_count;
|
||||
|
||||
static StubQueue* buffer() { return _buffer; }
|
||||
static void set_next_stub(ICStub* next_stub) { _next_stub = next_stub; }
|
||||
static ICStub* get_next_stub() { return _next_stub; }
|
||||
|
||||
static void init_next_stub();
|
||||
|
||||
static ICStub* new_ic_stub();
|
||||
|
||||
|
||||
// Machine-dependent implementation of ICBuffer
|
||||
static void assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point);
|
||||
static address ic_buffer_entry_point (address code_begin);
|
||||
@ -129,6 +163,7 @@ class InlineCacheBuffer: public AllStatic {
|
||||
|
||||
// removes the ICStubs after backpatching
|
||||
static void update_inline_caches();
|
||||
static void refill_ic_stubs();
|
||||
|
||||
// for debugging
|
||||
static bool is_empty();
|
||||
@ -138,7 +173,7 @@ class InlineCacheBuffer: public AllStatic {
|
||||
static int pending_icholder_count() { return _pending_count; }
|
||||
|
||||
// New interface
|
||||
static void create_transition_stub(CompiledIC *ic, void* cached_value, address entry);
|
||||
static bool create_transition_stub(CompiledIC *ic, void* cached_value, address entry);
|
||||
static address ic_destination_for(CompiledIC *ic);
|
||||
static void* cached_value_for(CompiledIC *ic);
|
||||
};
|
||||
|
@ -1575,14 +1575,44 @@ bool nmethod::is_unloading() {
|
||||
if (state_is_unloading) {
|
||||
return true;
|
||||
}
|
||||
if (state_unloading_cycle == CodeCache::unloading_cycle()) {
|
||||
uint8_t current_cycle = CodeCache::unloading_cycle();
|
||||
if (state_unloading_cycle == current_cycle) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// The IsUnloadingBehaviour is responsible for checking if there are any dead
|
||||
// oops in the CompiledMethod, by calling oops_do on it.
|
||||
state_unloading_cycle = CodeCache::unloading_cycle();
|
||||
state_is_unloading = IsUnloadingBehaviour::current()->is_unloading(this);
|
||||
state_unloading_cycle = current_cycle;
|
||||
|
||||
if (is_zombie()) {
|
||||
// Zombies without calculated unloading epoch are never unloading due to GC.
|
||||
|
||||
// There are no races where a previously observed is_unloading() nmethod
|
||||
// suddenly becomes not is_unloading() due to here being observed as zombie.
|
||||
|
||||
// With STW unloading, all is_alive() && is_unloading() nmethods are unlinked
|
||||
// and unloaded in the safepoint. That makes races where an nmethod is first
|
||||
// observed as is_alive() && is_unloading() and subsequently observed as
|
||||
// is_zombie() impossible.
|
||||
|
||||
// With concurrent unloading, all references to is_unloading() nmethods are
|
||||
// first unlinked (e.g. IC caches and dependency contexts). Then a global
|
||||
// handshake operation is performed with all JavaThreads before finally
|
||||
// unloading the nmethods. The sweeper never converts is_alive() && is_unloading()
|
||||
// nmethods to zombies; it waits for them to become is_unloaded(). So before
|
||||
// the global handshake, it is impossible for is_unloading() nmethods to
|
||||
// racingly become is_zombie(). And is_unloading() is calculated for all is_alive()
|
||||
// nmethods before taking that global handshake, meaning that it will never
|
||||
// be recalculated after the handshake.
|
||||
|
||||
// After that global handshake, is_unloading() nmethods are only observable
|
||||
// to the iterators, and they will never trigger recomputation of the cached
|
||||
// is_unloading_state, and hence may not suffer from such races.
|
||||
|
||||
state_is_unloading = false;
|
||||
} else {
|
||||
state_is_unloading = IsUnloadingBehaviour::current()->is_unloading(this);
|
||||
}
|
||||
|
||||
state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
|
||||
|
||||
@ -1620,7 +1650,8 @@ void nmethod::do_unloading(bool unloading_occurred) {
|
||||
}
|
||||
#endif
|
||||
|
||||
unload_nmethod_caches(unloading_occurred);
|
||||
guarantee(unload_nmethod_caches(unloading_occurred),
|
||||
"Should not need transition stubs");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -644,12 +644,12 @@ Method* virtual_call_Relocation::method_value() {
|
||||
return (Method*)m;
|
||||
}
|
||||
|
||||
void virtual_call_Relocation::clear_inline_cache() {
|
||||
bool virtual_call_Relocation::clear_inline_cache() {
|
||||
// No stubs for ICs
|
||||
// Clean IC
|
||||
ResourceMark rm;
|
||||
CompiledIC* icache = CompiledIC_at(this);
|
||||
icache->set_to_clean();
|
||||
return icache->set_to_clean();
|
||||
}
|
||||
|
||||
|
||||
@ -672,15 +672,20 @@ Method* opt_virtual_call_Relocation::method_value() {
|
||||
return (Method*)m;
|
||||
}
|
||||
|
||||
void opt_virtual_call_Relocation::clear_inline_cache() {
|
||||
template<typename CompiledICorStaticCall>
|
||||
static bool set_to_clean_no_ic_refill(CompiledICorStaticCall* ic) {
|
||||
guarantee(ic->set_to_clean(), "Should not need transition stubs");
|
||||
return true;
|
||||
}
|
||||
|
||||
bool opt_virtual_call_Relocation::clear_inline_cache() {
|
||||
// No stubs for ICs
|
||||
// Clean IC
|
||||
ResourceMark rm;
|
||||
CompiledIC* icache = CompiledIC_at(this);
|
||||
icache->set_to_clean();
|
||||
return set_to_clean_no_ic_refill(icache);
|
||||
}
|
||||
|
||||
|
||||
address opt_virtual_call_Relocation::static_stub(bool is_aot) {
|
||||
// search for the static stub who points back to this static call
|
||||
address static_call_addr = addr();
|
||||
@ -715,10 +720,10 @@ void static_call_Relocation::unpack_data() {
|
||||
_method_index = unpack_1_int();
|
||||
}
|
||||
|
||||
void static_call_Relocation::clear_inline_cache() {
|
||||
bool static_call_Relocation::clear_inline_cache() {
|
||||
// Safe call site info
|
||||
CompiledStaticCall* handler = this->code()->compiledStaticCall_at(this);
|
||||
handler->set_to_clean();
|
||||
return set_to_clean_no_ic_refill(handler);
|
||||
}
|
||||
|
||||
|
||||
@ -757,10 +762,11 @@ address trampoline_stub_Relocation::get_trampoline_for(address call, nmethod* co
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void static_stub_Relocation::clear_inline_cache() {
|
||||
bool static_stub_Relocation::clear_inline_cache() {
|
||||
// Call stub is only used when calling the interpreted code.
|
||||
// It does not really need to be cleared, except that we want to clean out the methodoop.
|
||||
CompiledDirectStaticCall::set_stub_to_clean(this);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
@ -814,7 +814,7 @@ class Relocation {
|
||||
// all relocations are able to reassert their values
|
||||
virtual void set_value(address x);
|
||||
|
||||
virtual void clear_inline_cache() { }
|
||||
virtual bool clear_inline_cache() { return true; }
|
||||
|
||||
// This method assumes that all virtual/static (inline) caches are cleared (since for static_call_type and
|
||||
// ic_call_type is not always posisition dependent (depending on the state of the cache)). However, this is
|
||||
@ -1052,7 +1052,7 @@ class virtual_call_Relocation : public CallRelocation {
|
||||
void pack_data_to(CodeSection* dest);
|
||||
void unpack_data();
|
||||
|
||||
void clear_inline_cache();
|
||||
bool clear_inline_cache();
|
||||
};
|
||||
|
||||
|
||||
@ -1083,7 +1083,7 @@ class opt_virtual_call_Relocation : public CallRelocation {
|
||||
void pack_data_to(CodeSection* dest);
|
||||
void unpack_data();
|
||||
|
||||
void clear_inline_cache();
|
||||
bool clear_inline_cache();
|
||||
|
||||
// find the matching static_stub
|
||||
address static_stub(bool is_aot);
|
||||
@ -1117,7 +1117,7 @@ class static_call_Relocation : public CallRelocation {
|
||||
void pack_data_to(CodeSection* dest);
|
||||
void unpack_data();
|
||||
|
||||
void clear_inline_cache();
|
||||
bool clear_inline_cache();
|
||||
|
||||
// find the matching static_stub
|
||||
address static_stub(bool is_aot);
|
||||
@ -1146,7 +1146,7 @@ class static_stub_Relocation : public Relocation {
|
||||
static_stub_Relocation() { }
|
||||
|
||||
public:
|
||||
void clear_inline_cache();
|
||||
bool clear_inline_cache();
|
||||
|
||||
address static_call() { return _static_call; }
|
||||
bool is_aot() { return _is_aot; }
|
||||
|
@ -117,7 +117,7 @@ Stub* StubQueue::request_committed(int code_size) {
|
||||
|
||||
Stub* StubQueue::request(int requested_code_size) {
|
||||
assert(requested_code_size > 0, "requested_code_size must be > 0");
|
||||
if (_mutex != NULL) _mutex->lock();
|
||||
if (_mutex != NULL) _mutex->lock_without_safepoint_check();
|
||||
Stub* s = current_stub();
|
||||
int requested_size = align_up(stub_code_size_to_size(requested_code_size), CodeEntryAlignment);
|
||||
if (requested_size <= available_space()) {
|
||||
@ -207,7 +207,7 @@ void StubQueue::remove_all(){
|
||||
void StubQueue::verify() {
|
||||
// verify only if initialized
|
||||
if (_stub_buffer == NULL) return;
|
||||
MutexLockerEx lock(_mutex);
|
||||
MutexLockerEx lock(_mutex, Mutex::_no_safepoint_check_flag);
|
||||
// verify index boundaries
|
||||
guarantee(0 <= _buffer_size, "buffer size must be positive");
|
||||
guarantee(0 <= _buffer_limit && _buffer_limit <= _buffer_size , "_buffer_limit out of bounds");
|
||||
@ -234,9 +234,8 @@ void StubQueue::verify() {
|
||||
|
||||
|
||||
void StubQueue::print() {
|
||||
MutexLockerEx lock(_mutex);
|
||||
MutexLockerEx lock(_mutex, Mutex::_no_safepoint_check_flag);
|
||||
for (Stub* s = first(); s != NULL; s = next(s)) {
|
||||
stub_print(s);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -124,7 +124,7 @@ int VtableStubs::_itab_stub_size = 0;
|
||||
void VtableStubs::initialize() {
|
||||
VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
|
||||
{
|
||||
MutexLocker ml(VtableStubs_lock);
|
||||
MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
|
||||
assert(is_power_of_2(N), "N must be a power of 2");
|
||||
for (int i = 0; i < N; i++) {
|
||||
@ -247,7 +247,7 @@ inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
|
||||
|
||||
|
||||
VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
|
||||
MutexLocker ml(VtableStubs_lock);
|
||||
MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
|
||||
unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
|
||||
VtableStub* s = _table[hash];
|
||||
while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
|
||||
@ -256,7 +256,7 @@ VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
|
||||
|
||||
|
||||
void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
|
||||
MutexLocker ml(VtableStubs_lock);
|
||||
MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
|
||||
unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
|
||||
// enter s at the beginning of the corresponding list
|
||||
@ -266,7 +266,7 @@ void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
|
||||
}
|
||||
|
||||
VtableStub* VtableStubs::entry_point(address pc) {
|
||||
MutexLocker ml(VtableStubs_lock);
|
||||
MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
|
||||
VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
|
||||
uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
|
||||
VtableStub* s;
|
||||
|
@ -1306,7 +1306,7 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
|
||||
CompilationPolicy::policy()->delay_compilation(method());
|
||||
return NULL;
|
||||
}
|
||||
bool is_blocking = !directive->BackgroundCompilationOption || CompileTheWorld || ReplayCompiles;
|
||||
bool is_blocking = !directive->BackgroundCompilationOption || ReplayCompiles;
|
||||
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, compile_reason, is_blocking, THREAD);
|
||||
}
|
||||
|
||||
@ -2247,11 +2247,11 @@ void CompileBroker::handle_full_code_cache(int code_blob_type) {
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (CompileTheWorld || ExitOnFullCodeCache) {
|
||||
if (ExitOnFullCodeCache) {
|
||||
codecache_print(/* detailed= */ true);
|
||||
before_exit(JavaThread::current());
|
||||
exit_globals(); // will delete tty
|
||||
vm_direct_exit(CompileTheWorld ? 0 : 1);
|
||||
vm_direct_exit(1);
|
||||
}
|
||||
#endif
|
||||
if (UseCodeCacheFlushing) {
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "compiler/compilerDefinitions.hpp"
|
||||
@ -200,8 +201,10 @@ void CompilerConfig::set_tiered_flags() {
|
||||
FLAG_SET_ERGO(uintx, ReservedCodeCacheSize,
|
||||
MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)ReservedCodeCacheSize * 5));
|
||||
}
|
||||
// Enable SegmentedCodeCache if TieredCompilation is enabled and ReservedCodeCacheSize >= 240M
|
||||
if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M) {
|
||||
// Enable SegmentedCodeCache if TieredCompilation is enabled, ReservedCodeCacheSize >= 240M
|
||||
// and the code cache contains at least 8 pages (segmentation disables advantage of huge pages).
|
||||
if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M &&
|
||||
8 * CodeCache::page_size() <= ReservedCodeCacheSize) {
|
||||
FLAG_SET_ERGO(bool, SegmentedCodeCache, true);
|
||||
}
|
||||
if (!UseInterpreter) { // -Xcomp
|
||||
@ -311,9 +314,9 @@ bool CompilerConfig::check_args_consistency(bool status) {
|
||||
}
|
||||
#endif
|
||||
|
||||
if (BackgroundCompilation && (CompileTheWorld || ReplayCompiles)) {
|
||||
if (BackgroundCompilation && ReplayCompiles) {
|
||||
if (!FLAG_IS_DEFAULT(BackgroundCompilation)) {
|
||||
warning("BackgroundCompilation disabled due to CompileTheWorld or ReplayCompiles options.");
|
||||
warning("BackgroundCompilation disabled due to ReplayCompiles option.");
|
||||
}
|
||||
FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
|
||||
}
|
||||
|
@ -398,13 +398,14 @@ bool DirectiveSet::is_intrinsic_disabled(const methodHandle& method) {
|
||||
size_t length = strlen(DisableIntrinsicOption);
|
||||
char* local_list = NEW_RESOURCE_ARRAY(char, length + 1);
|
||||
strncpy(local_list, DisableIntrinsicOption, length + 1);
|
||||
char* save_ptr;
|
||||
|
||||
char* token = strtok(local_list, ",");
|
||||
char* token = strtok_r(local_list, ",", &save_ptr);
|
||||
while (token != NULL) {
|
||||
if (strcmp(token, vmIntrinsics::name_at(id)) == 0) {
|
||||
return true;
|
||||
} else {
|
||||
token = strtok(NULL, ",");
|
||||
token = strtok_r(NULL, ",", &save_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,10 +30,10 @@
|
||||
#include "gc/cms/compactibleFreeListSpace.hpp"
|
||||
#include "gc/shared/gcArguments.inline.hpp"
|
||||
#include "gc/shared/genCollectedHeap.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
|
||||
size_t CMSArguments::conservative_max_heap_alignment() {
|
||||
@ -46,7 +46,7 @@ void CMSArguments::set_parnew_gc_flags() {
|
||||
assert(UseConcMarkSweepGC, "CMS is expected to be on here");
|
||||
|
||||
if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
|
||||
FLAG_SET_DEFAULT(ParallelGCThreads, VM_Version::parallel_worker_threads());
|
||||
FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
|
||||
assert(ParallelGCThreads > 0, "We should always have at least one thread by default");
|
||||
} else if (ParallelGCThreads == 0) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,9 +30,9 @@
|
||||
#include "gc/shared/collectorPolicy.hpp"
|
||||
#include "gc/shared/gcLocker.hpp"
|
||||
#include "gc/shared/gcPolicyCounters.hpp"
|
||||
#include "gc/shared/gcVMOperations.hpp"
|
||||
#include "gc/shared/generationSpec.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "gc/shared/vmGCOperations.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
|
@ -24,12 +24,12 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/cms/cmsCardTable.hpp"
|
||||
#include "gc/cms/cmsVMOperations.hpp"
|
||||
#include "gc/cms/compactibleFreeListSpace.hpp"
|
||||
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
|
||||
#include "gc/cms/concurrentMarkSweepThread.hpp"
|
||||
#include "gc/cms/cmsHeap.hpp"
|
||||
#include "gc/cms/parNewGeneration.hpp"
|
||||
#include "gc/cms/vmCMSOperations.hpp"
|
||||
#include "gc/shared/genCollectedHeap.hpp"
|
||||
#include "gc/shared/genMemoryPools.hpp"
|
||||
#include "gc/shared/genOopClosures.inline.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,9 +24,9 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/cms/cmsHeap.hpp"
|
||||
#include "gc/cms/cmsVMOperations.hpp"
|
||||
#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
|
||||
#include "gc/cms/concurrentMarkSweepThread.hpp"
|
||||
#include "gc/cms/vmCMSOperations.hpp"
|
||||
#include "gc/shared/gcLocker.hpp"
|
||||
#include "gc/shared/gcTimer.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,14 +22,14 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_CMS_VMCMSOPERATIONS_HPP
|
||||
#define SHARE_VM_GC_CMS_VMCMSOPERATIONS_HPP
|
||||
#ifndef SHARE_VM_GC_CMS_CMSVMOPERATIONS_HPP
|
||||
#define SHARE_VM_GC_CMS_CMSVMOPERATIONS_HPP
|
||||
|
||||
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
|
||||
#include "gc/shared/gcCause.hpp"
|
||||
#include "gc/shared/gcId.hpp"
|
||||
#include "gc/shared/vmGCOperations.hpp"
|
||||
#include "runtime/vm_operations.hpp"
|
||||
#include "gc/shared/gcVMOperations.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
|
||||
// The VM_CMS_Operation is slightly different from
|
||||
// a VM_GC_Operation -- and would not have subclassed easily
|
||||
@ -143,4 +143,4 @@ class VM_GenCollectFullConcurrent: public VM_GC_Operation {
|
||||
virtual bool evaluate_at_safepoint() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_CMS_VMCMSOPERATIONS_HPP
|
||||
#endif // SHARE_VM_GC_CMS_CMSVMOPERATIONS_HPP
|
@ -32,12 +32,12 @@
|
||||
#include "gc/cms/cmsGCStats.hpp"
|
||||
#include "gc/cms/cmsHeap.hpp"
|
||||
#include "gc/cms/cmsOopClosures.inline.hpp"
|
||||
#include "gc/cms/cmsVMOperations.hpp"
|
||||
#include "gc/cms/compactibleFreeListSpace.hpp"
|
||||
#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
|
||||
#include "gc/cms/concurrentMarkSweepThread.hpp"
|
||||
#include "gc/cms/parNewGeneration.hpp"
|
||||
#include "gc/cms/promotionInfo.inline.hpp"
|
||||
#include "gc/cms/vmCMSOperations.hpp"
|
||||
#include "gc/serial/genMarkSweep.hpp"
|
||||
#include "gc/serial/tenuredGeneration.hpp"
|
||||
#include "gc/shared/adaptiveSizePolicy.hpp"
|
||||
@ -55,12 +55,14 @@
|
||||
#include "gc/shared/genOopClosures.inline.hpp"
|
||||
#include "gc/shared/isGCActiveMark.hpp"
|
||||
#include "gc/shared/oopStorageParState.hpp"
|
||||
#include "gc/shared/owstTaskTerminator.hpp"
|
||||
#include "gc/shared/referencePolicy.hpp"
|
||||
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
|
||||
#include "gc/shared/space.inline.hpp"
|
||||
#include "gc/shared/strongRootsScope.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "gc/shared/weakProcessor.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
@ -2981,7 +2983,7 @@ bool CMSCollector::markFromRootsWork() {
|
||||
// Forward decl
|
||||
class CMSConcMarkingTask;
|
||||
|
||||
class CMSConcMarkingTerminator: public ParallelTaskTerminator {
|
||||
class CMSConcMarkingParallelTerminator: public ParallelTaskTerminator {
|
||||
CMSCollector* _collector;
|
||||
CMSConcMarkingTask* _task;
|
||||
public:
|
||||
@ -2991,7 +2993,7 @@ class CMSConcMarkingTerminator: public ParallelTaskTerminator {
|
||||
// "queue_set" is a set of work queues of other threads.
|
||||
// "collector" is the CMS collector associated with this task terminator.
|
||||
// "yield" indicates whether we need the gang as a whole to yield.
|
||||
CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
|
||||
CMSConcMarkingParallelTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
|
||||
ParallelTaskTerminator(n_threads, queue_set),
|
||||
_collector(collector) { }
|
||||
|
||||
@ -3000,6 +3002,45 @@ class CMSConcMarkingTerminator: public ParallelTaskTerminator {
|
||||
}
|
||||
};
|
||||
|
||||
class CMSConcMarkingOWSTTerminator: public OWSTTaskTerminator {
|
||||
CMSCollector* _collector;
|
||||
CMSConcMarkingTask* _task;
|
||||
public:
|
||||
virtual void yield();
|
||||
|
||||
// "n_threads" is the number of threads to be terminated.
|
||||
// "queue_set" is a set of work queues of other threads.
|
||||
// "collector" is the CMS collector associated with this task terminator.
|
||||
// "yield" indicates whether we need the gang as a whole to yield.
|
||||
CMSConcMarkingOWSTTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
|
||||
OWSTTaskTerminator(n_threads, queue_set),
|
||||
_collector(collector) { }
|
||||
|
||||
void set_task(CMSConcMarkingTask* task) {
|
||||
_task = task;
|
||||
}
|
||||
};
|
||||
|
||||
class CMSConcMarkingTaskTerminator {
|
||||
private:
|
||||
ParallelTaskTerminator* _term;
|
||||
public:
|
||||
CMSConcMarkingTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) {
|
||||
if (UseOWSTTaskTerminator) {
|
||||
_term = new CMSConcMarkingOWSTTerminator(n_threads, queue_set, collector);
|
||||
} else {
|
||||
_term = new CMSConcMarkingParallelTerminator(n_threads, queue_set, collector);
|
||||
}
|
||||
}
|
||||
~CMSConcMarkingTaskTerminator() {
|
||||
assert(_term != NULL, "Must not be NULL");
|
||||
delete _term;
|
||||
}
|
||||
|
||||
void set_task(CMSConcMarkingTask* task);
|
||||
ParallelTaskTerminator* terminator() const { return _term; }
|
||||
};
|
||||
|
||||
class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
|
||||
CMSConcMarkingTask* _task;
|
||||
public:
|
||||
@ -3027,7 +3068,7 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask {
|
||||
OopTaskQueueSet* _task_queues;
|
||||
|
||||
// Termination (and yielding) support
|
||||
CMSConcMarkingTerminator _term;
|
||||
CMSConcMarkingTaskTerminator _term;
|
||||
CMSConcMarkingTerminatorTerminator _term_term;
|
||||
|
||||
public:
|
||||
@ -3057,7 +3098,7 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask {
|
||||
|
||||
HeapWord* volatile* global_finger_addr() { return &_global_finger; }
|
||||
|
||||
CMSConcMarkingTerminator* terminator() { return &_term; }
|
||||
ParallelTaskTerminator* terminator() { return _term.terminator(); }
|
||||
|
||||
virtual void set_for_termination(uint active_workers) {
|
||||
terminator()->reset_for_reuse(active_workers);
|
||||
@ -3075,7 +3116,7 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask {
|
||||
void reset(HeapWord* ra) {
|
||||
assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
|
||||
_restart_addr = _global_finger = ra;
|
||||
_term.reset_for_reuse();
|
||||
_term.terminator()->reset_for_reuse();
|
||||
}
|
||||
|
||||
static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
|
||||
@ -3096,7 +3137,7 @@ bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
|
||||
// thread has yielded.
|
||||
}
|
||||
|
||||
void CMSConcMarkingTerminator::yield() {
|
||||
void CMSConcMarkingParallelTerminator::yield() {
|
||||
if (_task->should_yield()) {
|
||||
_task->yield();
|
||||
} else {
|
||||
@ -3104,6 +3145,22 @@ void CMSConcMarkingTerminator::yield() {
|
||||
}
|
||||
}
|
||||
|
||||
void CMSConcMarkingOWSTTerminator::yield() {
|
||||
if (_task->should_yield()) {
|
||||
_task->yield();
|
||||
} else {
|
||||
OWSTTaskTerminator::yield();
|
||||
}
|
||||
}
|
||||
|
||||
void CMSConcMarkingTaskTerminator::set_task(CMSConcMarkingTask* task) {
|
||||
if (UseOWSTTaskTerminator) {
|
||||
((CMSConcMarkingOWSTTerminator*)_term)->set_task(task);
|
||||
} else {
|
||||
((CMSConcMarkingParallelTerminator*)_term)->set_task(task);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Concurrent Marking Algorithm Sketch
|
||||
////////////////////////////////////////////////////////////////
|
||||
@ -3488,9 +3545,9 @@ void CMSConcMarkingTask::coordinator_yield() {
|
||||
|
||||
bool CMSCollector::do_marking_mt() {
|
||||
assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
|
||||
uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
|
||||
conc_workers()->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
uint num_workers = WorkerPolicy::calc_active_conc_workers(conc_workers()->total_workers(),
|
||||
conc_workers()->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
num_workers = conc_workers()->update_active_workers(num_workers);
|
||||
log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
|
||||
|
||||
@ -4292,7 +4349,7 @@ class CMSParRemarkTask: public CMSParMarkTask {
|
||||
|
||||
// The per-thread work queues, available here for stealing.
|
||||
OopTaskQueueSet* _task_queues;
|
||||
ParallelTaskTerminator _term;
|
||||
TaskTerminator _term;
|
||||
StrongRootsScope* _strong_roots_scope;
|
||||
|
||||
public:
|
||||
@ -4314,7 +4371,7 @@ class CMSParRemarkTask: public CMSParMarkTask {
|
||||
|
||||
OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
|
||||
|
||||
ParallelTaskTerminator* terminator() { return &_term; }
|
||||
ParallelTaskTerminator* terminator() { return _term.terminator(); }
|
||||
uint n_workers() { return _n_workers; }
|
||||
|
||||
void work(uint worker_id);
|
||||
@ -5002,11 +5059,11 @@ void CMSCollector::do_remark_non_parallel() {
|
||||
////////////////////////////////////////////////////////
|
||||
class AbstractGangTaskWOopQueues : public AbstractGangTask {
|
||||
OopTaskQueueSet* _queues;
|
||||
ParallelTaskTerminator _terminator;
|
||||
TaskTerminator _terminator;
|
||||
public:
|
||||
AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
|
||||
AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
|
||||
ParallelTaskTerminator* terminator() { return &_terminator; }
|
||||
ParallelTaskTerminator* terminator() { return _terminator.terminator(); }
|
||||
OopTaskQueueSet* queues() { return _queues; }
|
||||
};
|
||||
|
||||
|
@ -49,6 +49,7 @@
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "gc/shared/weakProcessor.hpp"
|
||||
#include "gc/shared/workgroup.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
@ -74,7 +75,7 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
Stack<oop, mtGC>* overflow_stacks_,
|
||||
PreservedMarks* preserved_marks_,
|
||||
size_t desired_plab_sz_,
|
||||
ParallelTaskTerminator& term_) :
|
||||
TaskTerminator& term_) :
|
||||
_work_queue(work_queue_set_->queue(thread_num_)),
|
||||
_overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
|
||||
_preserved_marks(preserved_marks_),
|
||||
@ -86,7 +87,7 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
|
||||
_old_gen_root_closure(young_gen_, this),
|
||||
_evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
|
||||
&_to_space_root_closure, young_gen_, &_old_gen_root_closure,
|
||||
work_queue_set_, &term_),
|
||||
work_queue_set_, term_.terminator()),
|
||||
_is_alive_closure(young_gen_),
|
||||
_scan_weak_ref_closure(young_gen_, this),
|
||||
_keep_alive_closure(&_scan_weak_ref_closure),
|
||||
@ -305,7 +306,7 @@ public:
|
||||
Stack<oop, mtGC>* overflow_stacks_,
|
||||
PreservedMarksSet& preserved_marks_set,
|
||||
size_t desired_plab_sz,
|
||||
ParallelTaskTerminator& term);
|
||||
TaskTerminator& term);
|
||||
|
||||
~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
|
||||
|
||||
@ -326,14 +327,14 @@ public:
|
||||
#endif // TASKQUEUE_STATS
|
||||
|
||||
private:
|
||||
ParallelTaskTerminator& _term;
|
||||
TaskTerminator& _term;
|
||||
ParNewGeneration& _young_gen;
|
||||
Generation& _old_gen;
|
||||
ParScanThreadState* _per_thread_states;
|
||||
const int _num_threads;
|
||||
public:
|
||||
bool is_valid(int id) const { return id < _num_threads; }
|
||||
ParallelTaskTerminator* terminator() { return &_term; }
|
||||
ParallelTaskTerminator* terminator() { return _term.terminator(); }
|
||||
};
|
||||
|
||||
ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
|
||||
@ -344,7 +345,7 @@ ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
|
||||
Stack<oop, mtGC>* overflow_stacks,
|
||||
PreservedMarksSet& preserved_marks_set,
|
||||
size_t desired_plab_sz,
|
||||
ParallelTaskTerminator& term)
|
||||
TaskTerminator& term)
|
||||
: _term(term),
|
||||
_young_gen(young_gen),
|
||||
_old_gen(old_gen),
|
||||
@ -378,7 +379,7 @@ void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_trace
|
||||
}
|
||||
|
||||
void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
|
||||
_term.reset_for_reuse(active_threads);
|
||||
_term.terminator()->reset_for_reuse(active_threads);
|
||||
if (promotion_failed) {
|
||||
for (int i = 0; i < _num_threads; ++i) {
|
||||
thread_state(i).print_promotion_failure_size();
|
||||
@ -866,9 +867,9 @@ void ParNewGeneration::collect(bool full,
|
||||
WorkGang* workers = gch->workers();
|
||||
assert(workers != NULL, "Need workgang for parallel work");
|
||||
uint active_workers =
|
||||
AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
|
||||
workers->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
WorkerPolicy::calc_active_workers(workers->total_workers(),
|
||||
workers->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
active_workers = workers->update_active_workers(active_workers);
|
||||
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
|
||||
|
||||
@ -903,7 +904,7 @@ void ParNewGeneration::collect(bool full,
|
||||
|
||||
// Always set the terminator for the active number of workers
|
||||
// because only those workers go through the termination protocol.
|
||||
ParallelTaskTerminator _term(active_workers, task_queues());
|
||||
TaskTerminator _term(active_workers, task_queues());
|
||||
ParScanThreadStateSet thread_state_set(active_workers,
|
||||
*to(), *this, *_old_gen, *task_queues(),
|
||||
_overflow_stacks, _preserved_marks_set,
|
||||
|
@ -133,7 +133,7 @@ class ParScanThreadState {
|
||||
Stack<oop, mtGC>* overflow_stacks_,
|
||||
PreservedMarks* preserved_marks_,
|
||||
size_t desired_plab_sz_,
|
||||
ParallelTaskTerminator& term_);
|
||||
TaskTerminator& term_);
|
||||
|
||||
public:
|
||||
AgeTable* age_table() {return &_ageTable;}
|
||||
|
@ -84,8 +84,6 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
|
||||
// we allocate to in the region sets. We'll re-add it later, when
|
||||
// it's retired again.
|
||||
_g1h->old_set_remove(retained_region);
|
||||
bool during_im = _g1h->collector_state()->in_initial_mark_gc();
|
||||
retained_region->note_start_of_copying(during_im);
|
||||
old->set(retained_region);
|
||||
_g1h->hr_printer()->reuse(retained_region);
|
||||
evacuation_info.set_alloc_regions_used_before(retained_region->used());
|
||||
|
@ -30,9 +30,9 @@
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/shared/gcArguments.inline.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
|
||||
size_t G1Arguments::conservative_max_heap_alignment() {
|
||||
return HeapRegion::max_region_size();
|
||||
@ -44,10 +44,12 @@ void G1Arguments::initialize_verification_types() {
|
||||
size_t length = strlen(VerifyGCType);
|
||||
char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
|
||||
strncpy(type_list, VerifyGCType, length + 1);
|
||||
char* token = strtok(type_list, delimiter);
|
||||
char* save_ptr;
|
||||
|
||||
char* token = strtok_r(type_list, delimiter, &save_ptr);
|
||||
while (token != NULL) {
|
||||
parse_verification_type(token);
|
||||
token = strtok(NULL, delimiter);
|
||||
token = strtok_r(NULL, delimiter, &save_ptr);
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(char, type_list);
|
||||
}
|
||||
@ -75,7 +77,7 @@ void G1Arguments::parse_verification_type(const char* type) {
|
||||
void G1Arguments::initialize() {
|
||||
GCArguments::initialize();
|
||||
assert(UseG1GC, "Error");
|
||||
FLAG_SET_DEFAULT(ParallelGCThreads, VM_Version::parallel_worker_threads());
|
||||
FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
|
||||
if (ParallelGCThreads == 0) {
|
||||
assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
|
||||
vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
|
||||
|
@ -57,11 +57,10 @@
|
||||
#include "gc/g1/g1ThreadLocalData.hpp"
|
||||
#include "gc/g1/g1YCTypes.hpp"
|
||||
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
|
||||
#include "gc/g1/g1VMOperations.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "gc/g1/heapRegionSet.inline.hpp"
|
||||
#include "gc/g1/vm_operations_g1.hpp"
|
||||
#include "gc/shared/adaptiveSizePolicy.hpp"
|
||||
#include "gc/shared/gcBehaviours.hpp"
|
||||
#include "gc/shared/gcHeapSummary.hpp"
|
||||
#include "gc/shared/gcId.hpp"
|
||||
@ -78,6 +77,7 @@
|
||||
#include "gc/shared/referenceProcessor.inline.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
@ -1150,6 +1150,8 @@ void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::resize_heap_if_necessary() {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
|
||||
// Capacity, free and used after the GC counted as full regions to
|
||||
// include the waste in the following calculations.
|
||||
const size_t capacity_after_gc = capacity();
|
||||
@ -1991,6 +1993,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
||||
switch (cause) {
|
||||
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
|
||||
case GCCause::_g1_humongous_allocation: return true;
|
||||
case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
|
||||
default: return is_user_requested_concurrent_full_gc(cause);
|
||||
}
|
||||
}
|
||||
@ -2912,9 +2915,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
}
|
||||
GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
|
||||
|
||||
uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
|
||||
workers()->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
uint active_workers = WorkerPolicy::calc_active_workers(workers()->total_workers(),
|
||||
workers()->active_workers(),
|
||||
Threads::number_of_non_daemon_threads());
|
||||
active_workers = workers()->update_active_workers(active_workers);
|
||||
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
|
||||
|
||||
@ -3002,11 +3005,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// Initialize the GC alloc regions.
|
||||
_allocator->init_gc_alloc_regions(evacuation_info);
|
||||
|
||||
G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
|
||||
G1ParScanThreadStateSet per_thread_states(this,
|
||||
workers()->active_workers(),
|
||||
collection_set()->young_region_length(),
|
||||
collection_set()->optional_region_length());
|
||||
pre_evacuate_collection_set();
|
||||
|
||||
// Actually do the work...
|
||||
evacuate_collection_set(&per_thread_states);
|
||||
evacuate_optional_collection_set(&per_thread_states);
|
||||
|
||||
post_evacuate_collection_set(evacuation_info, &per_thread_states);
|
||||
|
||||
@ -3197,11 +3204,11 @@ void G1ParEvacuateFollowersClosure::do_void() {
|
||||
EventGCPhaseParallel event;
|
||||
G1ParScanThreadState* const pss = par_scan_state();
|
||||
pss->trim_queue();
|
||||
event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ObjCopy));
|
||||
event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
|
||||
do {
|
||||
EventGCPhaseParallel event;
|
||||
pss->steal_and_trim_queue(queues());
|
||||
event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(G1GCPhaseTimes::ObjCopy));
|
||||
event.commit(GCId::current(), pss->worker_id(), G1GCPhaseTimes::phase_name(_phase));
|
||||
} while (!offer_termination());
|
||||
}
|
||||
|
||||
@ -3211,7 +3218,7 @@ protected:
|
||||
G1ParScanThreadStateSet* _pss;
|
||||
RefToScanQueueSet* _queues;
|
||||
G1RootProcessor* _root_processor;
|
||||
ParallelTaskTerminator _terminator;
|
||||
TaskTerminator _terminator;
|
||||
uint _n_workers;
|
||||
|
||||
public:
|
||||
@ -3256,7 +3263,7 @@ public:
|
||||
size_t evac_term_attempts = 0;
|
||||
{
|
||||
double start = os::elapsedTime();
|
||||
G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
|
||||
G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, _terminator.terminator(), G1GCPhaseTimes::ObjCopy);
|
||||
evac.do_void();
|
||||
|
||||
evac_term_attempts = evac.term_attempts();
|
||||
@ -3547,7 +3554,7 @@ public:
|
||||
G1CopyingKeepAliveClosure keep_alive(_g1h, pss);
|
||||
|
||||
// Complete GC closure
|
||||
G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
|
||||
G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator, G1GCPhaseTimes::ObjCopy);
|
||||
|
||||
// Call the reference processing task's work routine.
|
||||
_proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
|
||||
@ -3568,8 +3575,8 @@ void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers
|
||||
assert(_workers->active_workers() >= ergo_workers,
|
||||
"Ergonomically chosen workers (%u) should be less than or equal to active workers (%u)",
|
||||
ergo_workers, _workers->active_workers());
|
||||
ParallelTaskTerminator terminator(ergo_workers, _queues);
|
||||
G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator);
|
||||
TaskTerminator terminator(ergo_workers, _queues);
|
||||
G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, terminator.terminator());
|
||||
|
||||
_workers->run_task(&proc_task_proxy, ergo_workers);
|
||||
}
|
||||
@ -3719,6 +3726,145 @@ void G1CollectedHeap::evacuate_collection_set(G1ParScanThreadStateSet* per_threa
|
||||
phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
|
||||
}
|
||||
|
||||
class G1EvacuateOptionalRegionTask : public AbstractGangTask {
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadStateSet* _per_thread_states;
|
||||
G1OptionalCSet* _optional;
|
||||
RefToScanQueueSet* _queues;
|
||||
ParallelTaskTerminator _terminator;
|
||||
|
||||
Tickspan trim_ticks(G1ParScanThreadState* pss) {
|
||||
Tickspan copy_time = pss->trim_ticks();
|
||||
pss->reset_trim_ticks();
|
||||
return copy_time;
|
||||
}
|
||||
|
||||
void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
|
||||
G1EvacuationRootClosures* root_cls = pss->closures();
|
||||
G1ScanObjsDuringScanRSClosure obj_cl(_g1h, pss);
|
||||
|
||||
size_t scanned = 0;
|
||||
size_t claimed = 0;
|
||||
size_t skipped = 0;
|
||||
size_t used_memory = 0;
|
||||
|
||||
Ticks start = Ticks::now();
|
||||
Tickspan copy_time;
|
||||
|
||||
for (uint i = _optional->current_index(); i < _optional->current_limit(); i++) {
|
||||
HeapRegion* hr = _optional->region_at(i);
|
||||
G1ScanRSForOptionalClosure scan_opt_cl(&obj_cl);
|
||||
pss->oops_into_optional_region(hr)->oops_do(&scan_opt_cl, root_cls->raw_strong_oops());
|
||||
copy_time += trim_ticks(pss);
|
||||
|
||||
G1ScanRSForRegionClosure scan_rs_cl(_g1h->g1_rem_set()->scan_state(), &obj_cl, pss, G1GCPhaseTimes::OptScanRS, worker_id);
|
||||
scan_rs_cl.do_heap_region(hr);
|
||||
copy_time += trim_ticks(pss);
|
||||
scanned += scan_rs_cl.cards_scanned();
|
||||
claimed += scan_rs_cl.cards_claimed();
|
||||
skipped += scan_rs_cl.cards_skipped();
|
||||
|
||||
// Chunk lists for this region is no longer needed.
|
||||
used_memory += pss->oops_into_optional_region(hr)->used_memory();
|
||||
}
|
||||
|
||||
Tickspan scan_time = (Ticks::now() - start) - copy_time;
|
||||
G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
|
||||
p->record_or_add_time_secs(G1GCPhaseTimes::OptScanRS, worker_id, scan_time.seconds());
|
||||
p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, copy_time.seconds());
|
||||
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, scanned, G1GCPhaseTimes::OptCSetScannedCards);
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, claimed, G1GCPhaseTimes::OptCSetClaimedCards);
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, skipped, G1GCPhaseTimes::OptCSetSkippedCards);
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_id, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
|
||||
}
|
||||
|
||||
void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
|
||||
Ticks start = Ticks::now();
|
||||
G1ParEvacuateFollowersClosure cl(_g1h, pss, _queues, &_terminator, G1GCPhaseTimes::OptObjCopy);
|
||||
cl.do_void();
|
||||
|
||||
Tickspan evac_time = (Ticks::now() - start);
|
||||
G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
|
||||
p->record_or_add_time_secs(G1GCPhaseTimes::OptObjCopy, worker_id, evac_time.seconds());
|
||||
assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming done during optional evacuation");
|
||||
}
|
||||
|
||||
public:
|
||||
G1EvacuateOptionalRegionTask(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadStateSet* per_thread_states,
|
||||
G1OptionalCSet* cset,
|
||||
RefToScanQueueSet* queues,
|
||||
uint n_workers) :
|
||||
AbstractGangTask("G1 Evacuation Optional Region Task"),
|
||||
_g1h(g1h),
|
||||
_per_thread_states(per_thread_states),
|
||||
_optional(cset),
|
||||
_queues(queues),
|
||||
_terminator(n_workers, _queues) {
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id);
|
||||
pss->set_ref_discoverer(_g1h->ref_processor_stw());
|
||||
|
||||
scan_roots(pss, worker_id);
|
||||
evacuate_live_objects(pss, worker_id);
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset) {
|
||||
class G1MarkScope : public MarkScope {};
|
||||
G1MarkScope code_mark_scope;
|
||||
|
||||
G1EvacuateOptionalRegionTask task(this, per_thread_states, ocset, _task_queues, workers()->active_workers());
|
||||
workers()->run_task(&task);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
|
||||
G1OptionalCSet optional_cset(&_collection_set, per_thread_states);
|
||||
if (optional_cset.is_empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (evacuation_failed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
|
||||
const double gc_start_time_ms = phase_times->cur_collection_start_sec() * 1000.0;
|
||||
|
||||
double start_time_sec = os::elapsedTime();
|
||||
|
||||
do {
|
||||
double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
|
||||
double time_left_ms = MaxGCPauseMillis - time_used_ms;
|
||||
|
||||
if (time_left_ms < 0) {
|
||||
log_trace(gc, ergo, cset)("Skipping %u optional regions, pause time exceeded %.3fms", optional_cset.size(), time_used_ms);
|
||||
break;
|
||||
}
|
||||
|
||||
optional_cset.prepare_evacuation(time_left_ms * _g1_policy->optional_evacuation_fraction());
|
||||
if (optional_cset.prepare_failed()) {
|
||||
log_trace(gc, ergo, cset)("Skipping %u optional regions, no regions can be evacuated in %.3fms", optional_cset.size(), time_left_ms);
|
||||
break;
|
||||
}
|
||||
|
||||
evacuate_optional_regions(per_thread_states, &optional_cset);
|
||||
|
||||
optional_cset.complete_evacuation();
|
||||
if (optional_cset.evacuation_failed()) {
|
||||
break;
|
||||
}
|
||||
} while (!optional_cset.is_empty());
|
||||
|
||||
phase_times->record_optional_evacuation((os::elapsedTime() - start_time_sec) * 1000.0);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
|
||||
// Also cleans the card table from temporary duplicate detection information used
|
||||
// during UpdateRS/ScanRS.
|
||||
@ -4533,8 +4679,6 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState d
|
||||
}
|
||||
_g1_policy->remset_tracker()->update_at_allocate(new_alloc_region);
|
||||
_hr_printer.alloc(new_alloc_region);
|
||||
bool during_im = collector_state()->in_initial_mark_gc();
|
||||
new_alloc_region->note_start_of_copying(during_im);
|
||||
return new_alloc_region;
|
||||
}
|
||||
return NULL;
|
||||
@ -4543,12 +4687,15 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState d
|
||||
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
|
||||
size_t allocated_bytes,
|
||||
InCSetState dest) {
|
||||
bool during_im = collector_state()->in_initial_mark_gc();
|
||||
alloc_region->note_end_of_copying(during_im);
|
||||
g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
|
||||
if (dest.is_old()) {
|
||||
old_set_add(alloc_region);
|
||||
}
|
||||
|
||||
bool const during_im = collector_state()->in_initial_mark_gc();
|
||||
if (during_im && allocated_bytes > 0) {
|
||||
_cm->root_regions()->add(alloc_region);
|
||||
}
|
||||
_hr_printer.retire(alloc_region);
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "gc/g1/g1EdenRegions.hpp"
|
||||
#include "gc/g1/g1EvacFailure.hpp"
|
||||
#include "gc/g1/g1EvacStats.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1HeapTransition.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1HRPrinter.hpp"
|
||||
@ -567,6 +568,9 @@ public:
|
||||
void register_old_region_with_cset(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_in_old(r->hrm_index());
|
||||
}
|
||||
void register_optional_region_with_cset(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_optional(r->hrm_index());
|
||||
}
|
||||
void clear_in_cset(const HeapRegion* hr) {
|
||||
_in_cset_fast_test.clear(hr);
|
||||
}
|
||||
@ -723,6 +727,8 @@ private:
|
||||
|
||||
// Actually do the work of evacuating the collection set.
|
||||
void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
|
||||
void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
|
||||
void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);
|
||||
|
||||
void pre_evacuate_collection_set();
|
||||
void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
|
||||
@ -1405,6 +1411,7 @@ protected:
|
||||
G1ParScanThreadState* _par_scan_state;
|
||||
RefToScanQueueSet* _queues;
|
||||
ParallelTaskTerminator* _terminator;
|
||||
G1GCPhaseTimes::GCParPhases _phase;
|
||||
|
||||
G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
|
||||
RefToScanQueueSet* queues() { return _queues; }
|
||||
@ -1414,10 +1421,11 @@ public:
|
||||
G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
|
||||
G1ParScanThreadState* par_scan_state,
|
||||
RefToScanQueueSet* queues,
|
||||
ParallelTaskTerminator* terminator)
|
||||
ParallelTaskTerminator* terminator,
|
||||
G1GCPhaseTimes::GCParPhases phase)
|
||||
: _start_term(0.0), _term_time(0.0), _term_attempts(0),
|
||||
_g1h(g1h), _par_scan_state(par_scan_state),
|
||||
_queues(queues), _terminator(terminator) {}
|
||||
_queues(queues), _terminator(terminator), _phase(phase) {}
|
||||
|
||||
void do_void();
|
||||
|
||||
|
@ -23,15 +23,17 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectionSet.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1ParScanThreadState.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "gc/g1/heapRegionSet.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
|
||||
G1CollectorState* G1CollectionSet::collector_state() {
|
||||
@ -60,6 +62,9 @@ G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
|
||||
_collection_set_regions(NULL),
|
||||
_collection_set_cur_length(0),
|
||||
_collection_set_max_length(0),
|
||||
_optional_regions(NULL),
|
||||
_optional_region_length(0),
|
||||
_optional_region_max_length(0),
|
||||
_bytes_used_before(0),
|
||||
_recorded_rs_lengths(0),
|
||||
_inc_build_state(Inactive),
|
||||
@ -74,6 +79,7 @@ G1CollectionSet::~G1CollectionSet() {
|
||||
if (_collection_set_regions != NULL) {
|
||||
FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
|
||||
}
|
||||
free_optional_regions();
|
||||
delete _cset_chooser;
|
||||
}
|
||||
|
||||
@ -88,6 +94,7 @@ void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
|
||||
"Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
|
||||
|
||||
_old_region_length = 0;
|
||||
_optional_region_length = 0;
|
||||
}
|
||||
|
||||
void G1CollectionSet::initialize(uint max_region_length) {
|
||||
@ -96,6 +103,23 @@ void G1CollectionSet::initialize(uint max_region_length) {
|
||||
_collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
|
||||
}
|
||||
|
||||
void G1CollectionSet::initialize_optional(uint max_length) {
|
||||
assert(_optional_regions == NULL, "Already initialized");
|
||||
assert(_optional_region_length == 0, "Already initialized");
|
||||
assert(_optional_region_max_length == 0, "Already initialized");
|
||||
_optional_region_max_length = max_length;
|
||||
_optional_regions = NEW_C_HEAP_ARRAY(HeapRegion*, _optional_region_max_length, mtGC);
|
||||
}
|
||||
|
||||
void G1CollectionSet::free_optional_regions() {
|
||||
_optional_region_length = 0;
|
||||
_optional_region_max_length = 0;
|
||||
if (_optional_regions != NULL) {
|
||||
FREE_C_HEAP_ARRAY(HeapRegion*, _optional_regions);
|
||||
_optional_regions = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
|
||||
_recorded_rs_lengths = rs_lengths;
|
||||
}
|
||||
@ -104,7 +128,8 @@ void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
|
||||
void G1CollectionSet::add_old_region(HeapRegion* hr) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
|
||||
assert(_inc_build_state == Active, "Precondition");
|
||||
assert(_inc_build_state == Active || hr->index_in_opt_cset() != G1OptionalCSet::InvalidCSetIndex,
|
||||
"Precondition, actively building cset or adding optional later on");
|
||||
assert(hr->is_old(), "the region should be old");
|
||||
|
||||
assert(!hr->in_collection_set(), "should not already be in the CSet");
|
||||
@ -117,6 +142,22 @@ void G1CollectionSet::add_old_region(HeapRegion* hr) {
|
||||
size_t rs_length = hr->rem_set()->occupied();
|
||||
_recorded_rs_lengths += rs_length;
|
||||
_old_region_length += 1;
|
||||
|
||||
log_trace(gc, cset)("Added old region %d to collection set", hr->hrm_index());
|
||||
}
|
||||
|
||||
void G1CollectionSet::add_optional_region(HeapRegion* hr) {
|
||||
assert(!optional_is_full(), "Precondition, must have room left for this region");
|
||||
assert(hr->is_old(), "the region should be old");
|
||||
assert(!hr->in_collection_set(), "should not already be in the CSet");
|
||||
|
||||
_g1h->register_optional_region_with_cset(hr);
|
||||
|
||||
_optional_regions[_optional_region_length] = hr;
|
||||
uint index = _optional_region_length++;
|
||||
hr->set_index_in_opt_cset(index);
|
||||
|
||||
log_trace(gc, cset)("Added region %d to optional collection set (%u)", hr->hrm_index(), _optional_region_length);
|
||||
}
|
||||
|
||||
// Initialize the per-collection-set information
|
||||
@ -168,6 +209,7 @@ void G1CollectionSet::finalize_incremental_building() {
|
||||
void G1CollectionSet::clear() {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
_collection_set_cur_length = 0;
|
||||
_optional_region_length = 0;
|
||||
}
|
||||
|
||||
void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
|
||||
@ -396,6 +438,30 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi
|
||||
return time_remaining_ms;
|
||||
}
|
||||
|
||||
void G1CollectionSet::add_as_old(HeapRegion* hr) {
|
||||
cset_chooser()->pop(); // already have region via peek()
|
||||
_g1h->old_set_remove(hr);
|
||||
add_old_region(hr);
|
||||
}
|
||||
|
||||
void G1CollectionSet::add_as_optional(HeapRegion* hr) {
|
||||
assert(_optional_regions != NULL, "Must not be called before array is allocated");
|
||||
cset_chooser()->pop(); // already have region via peek()
|
||||
_g1h->old_set_remove(hr);
|
||||
add_optional_region(hr);
|
||||
}
|
||||
|
||||
bool G1CollectionSet::optional_is_full() {
|
||||
assert(_optional_region_length <= _optional_region_max_length, "Invariant");
|
||||
return _optional_region_length == _optional_region_max_length;
|
||||
}
|
||||
|
||||
void G1CollectionSet::clear_optional_region(const HeapRegion* hr) {
|
||||
assert(_optional_regions != NULL, "Must not be called before array is allocated");
|
||||
uint index = hr->index_in_opt_cset();
|
||||
_optional_regions[index] = NULL;
|
||||
}
|
||||
|
||||
static int compare_region_idx(const uint a, const uint b) {
|
||||
if (a > b) {
|
||||
return 1;
|
||||
@ -409,21 +475,28 @@ static int compare_region_idx(const uint a, const uint b) {
|
||||
void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
|
||||
double non_young_start_time_sec = os::elapsedTime();
|
||||
double predicted_old_time_ms = 0.0;
|
||||
double predicted_optional_time_ms = 0.0;
|
||||
double optional_threshold_ms = time_remaining_ms * _policy->optional_prediction_fraction();
|
||||
uint expensive_region_num = 0;
|
||||
|
||||
if (collector_state()->in_mixed_phase()) {
|
||||
cset_chooser()->verify();
|
||||
const uint min_old_cset_length = _policy->calc_min_old_cset_length();
|
||||
const uint max_old_cset_length = _policy->calc_max_old_cset_length();
|
||||
|
||||
uint expensive_region_num = 0;
|
||||
const uint max_old_cset_length = MAX2(min_old_cset_length, _policy->calc_max_old_cset_length());
|
||||
bool check_time_remaining = _policy->adaptive_young_list_length();
|
||||
|
||||
initialize_optional(max_old_cset_length - min_old_cset_length);
|
||||
log_debug(gc, ergo, cset)("Start adding old regions for mixed gc. min %u regions, max %u regions, "
|
||||
"time remaining %1.2fms, optional threshold %1.2fms",
|
||||
min_old_cset_length, max_old_cset_length, time_remaining_ms, optional_threshold_ms);
|
||||
|
||||
HeapRegion* hr = cset_chooser()->peek();
|
||||
while (hr != NULL) {
|
||||
if (old_region_length() >= max_old_cset_length) {
|
||||
if (old_region_length() + optional_region_length() >= max_old_cset_length) {
|
||||
// Added maximum number of old regions to the CSet.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",
|
||||
old_region_length(), max_old_cset_length);
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). "
|
||||
"old %u regions, optional %u regions",
|
||||
old_region_length(), optional_region_length());
|
||||
break;
|
||||
}
|
||||
|
||||
@ -437,69 +510,66 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
|
||||
// reclaimable space is at or below the waste threshold. Stop
|
||||
// adding old regions to the CSet.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
|
||||
"old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
|
||||
old_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
|
||||
"reclaimable: " SIZE_FORMAT "%s (%1.2f%%) threshold: " UINTX_FORMAT "%%",
|
||||
byte_size_in_proper_unit(reclaimable_bytes), proper_unit_for_byte_size(reclaimable_bytes),
|
||||
reclaimable_percent, G1HeapWastePercent);
|
||||
break;
|
||||
}
|
||||
|
||||
double predicted_time_ms = predict_region_elapsed_time_ms(hr);
|
||||
if (check_time_remaining) {
|
||||
if (predicted_time_ms > time_remaining_ms) {
|
||||
// Too expensive for the current CSet.
|
||||
|
||||
if (old_region_length() >= min_old_cset_length) {
|
||||
// We have added the minimum number of old regions to the CSet,
|
||||
// we are done with this CSet.
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
|
||||
"predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
|
||||
predicted_time_ms, time_remaining_ms, old_region_length(), min_old_cset_length);
|
||||
break;
|
||||
}
|
||||
|
||||
// We'll add it anyway given that we haven't reached the
|
||||
// minimum number of old regions.
|
||||
expensive_region_num += 1;
|
||||
time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
|
||||
// Add regions to old set until we reach minimum amount
|
||||
if (old_region_length() < min_old_cset_length) {
|
||||
predicted_old_time_ms += predicted_time_ms;
|
||||
add_as_old(hr);
|
||||
// Record the number of regions added when no time remaining
|
||||
if (time_remaining_ms == 0.0) {
|
||||
expensive_region_num++;
|
||||
}
|
||||
} else {
|
||||
if (old_region_length() >= min_old_cset_length) {
|
||||
// In the non-auto-tuning case, we'll finish adding regions
|
||||
// to the CSet if we reach the minimum.
|
||||
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",
|
||||
old_region_length(), min_old_cset_length);
|
||||
// In the non-auto-tuning case, we'll finish adding regions
|
||||
// to the CSet if we reach the minimum.
|
||||
if (!check_time_remaining) {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min).");
|
||||
break;
|
||||
}
|
||||
// Keep adding regions to old set until we reach optional threshold
|
||||
if (time_remaining_ms > optional_threshold_ms) {
|
||||
predicted_old_time_ms += predicted_time_ms;
|
||||
add_as_old(hr);
|
||||
} else if (time_remaining_ms > 0) {
|
||||
// Keep adding optional regions until time is up
|
||||
if (!optional_is_full()) {
|
||||
predicted_optional_time_ms += predicted_time_ms;
|
||||
add_as_optional(hr);
|
||||
} else {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (optional set full).");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high).");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// We will add this region to the CSet.
|
||||
time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
|
||||
predicted_old_time_ms += predicted_time_ms;
|
||||
cset_chooser()->pop(); // already have region via peek()
|
||||
_g1h->old_set_remove(hr);
|
||||
add_old_region(hr);
|
||||
|
||||
hr = cset_chooser()->peek();
|
||||
}
|
||||
if (hr == NULL) {
|
||||
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
|
||||
}
|
||||
|
||||
if (expensive_region_num > 0) {
|
||||
// We print the information once here at the end, predicated on
|
||||
// whether we added any apparently expensive regions or not, to
|
||||
// avoid generating output per region.
|
||||
log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
|
||||
"old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
|
||||
old_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
|
||||
}
|
||||
|
||||
cset_chooser()->verify();
|
||||
}
|
||||
|
||||
stop_incremental_building();
|
||||
|
||||
log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
|
||||
old_region_length(), predicted_old_time_ms, time_remaining_ms);
|
||||
log_debug(gc, ergo, cset)("Finish choosing CSet regions old: %u, optional: %u, "
|
||||
"predicted old time: %1.2fms, predicted optional time: %1.2fms, time remaining: %1.2f",
|
||||
old_region_length(), optional_region_length(),
|
||||
predicted_old_time_ms, predicted_optional_time_ms, time_remaining_ms);
|
||||
if (expensive_region_num > 0) {
|
||||
log_debug(gc, ergo, cset)("CSet contains %u old regions that were added although the predicted time was too high.",
|
||||
expensive_region_num);
|
||||
}
|
||||
|
||||
double non_young_end_time_sec = os::elapsedTime();
|
||||
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
|
||||
@ -507,6 +577,86 @@ void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
|
||||
QuickSort::sort(_collection_set_regions, _collection_set_cur_length, compare_region_idx, true);
|
||||
}
|
||||
|
||||
HeapRegion* G1OptionalCSet::region_at(uint index) {
|
||||
return _cset->optional_region_at(index);
|
||||
}
|
||||
|
||||
void G1OptionalCSet::prepare_evacuation(double time_limit) {
|
||||
assert(_current_index == _current_limit, "Before prepare no regions should be ready for evac");
|
||||
|
||||
uint prepared_regions = 0;
|
||||
double prediction_ms = 0;
|
||||
|
||||
_prepare_failed = true;
|
||||
for (uint i = _current_index; i < _cset->optional_region_length(); i++) {
|
||||
HeapRegion* hr = region_at(i);
|
||||
prediction_ms += _cset->predict_region_elapsed_time_ms(hr);
|
||||
if (prediction_ms > time_limit) {
|
||||
log_debug(gc, cset)("Prepared %u regions for optional evacuation. Predicted time: %.3fms", prepared_regions, prediction_ms);
|
||||
return;
|
||||
}
|
||||
|
||||
// This region will be included in the next optional evacuation.
|
||||
prepare_to_evacuate_optional_region(hr);
|
||||
prepared_regions++;
|
||||
_current_limit++;
|
||||
_prepare_failed = false;
|
||||
}
|
||||
|
||||
log_debug(gc, cset)("Prepared all %u regions for optional evacuation. Predicted time: %.3fms",
|
||||
prepared_regions, prediction_ms);
|
||||
}
|
||||
|
||||
bool G1OptionalCSet::prepare_failed() {
|
||||
return _prepare_failed;
|
||||
}
|
||||
|
||||
void G1OptionalCSet::complete_evacuation() {
|
||||
_evacuation_failed = false;
|
||||
for (uint i = _current_index; i < _current_limit; i++) {
|
||||
HeapRegion* hr = region_at(i);
|
||||
_cset->clear_optional_region(hr);
|
||||
if (hr->evacuation_failed()){
|
||||
_evacuation_failed = true;
|
||||
}
|
||||
}
|
||||
_current_index = _current_limit;
|
||||
}
|
||||
|
||||
bool G1OptionalCSet::evacuation_failed() {
|
||||
return _evacuation_failed;
|
||||
}
|
||||
|
||||
G1OptionalCSet::~G1OptionalCSet() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
while (!is_empty()) {
|
||||
// We want to return regions not evacuated to the
|
||||
// chooser in reverse order to maintain the old order.
|
||||
HeapRegion* hr = _cset->remove_last_optional_region();
|
||||
assert(hr != NULL, "Should be valid region left");
|
||||
_pset->record_unused_optional_region(hr);
|
||||
g1h->old_set_add(hr);
|
||||
g1h->clear_in_cset(hr);
|
||||
hr->set_index_in_opt_cset(InvalidCSetIndex);
|
||||
_cset->cset_chooser()->push(hr);
|
||||
}
|
||||
_cset->free_optional_regions();
|
||||
}
|
||||
|
||||
uint G1OptionalCSet::size() {
|
||||
return _cset->optional_region_length() - _current_index;
|
||||
}
|
||||
|
||||
bool G1OptionalCSet::is_empty() {
|
||||
return size() == 0;
|
||||
}
|
||||
|
||||
void G1OptionalCSet::prepare_to_evacuate_optional_region(HeapRegion* hr) {
|
||||
log_trace(gc, cset)("Adding region %u for optional evacuation", hr->hrm_index());
|
||||
G1CollectedHeap::heap()->clear_in_cset(hr);
|
||||
_cset->add_old_region(hr);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
|
||||
private:
|
||||
|
@ -32,6 +32,7 @@
|
||||
class G1CollectedHeap;
|
||||
class G1CollectorState;
|
||||
class G1GCPhaseTimes;
|
||||
class G1ParScanThreadStateSet;
|
||||
class G1Policy;
|
||||
class G1SurvivorRegions;
|
||||
class HeapRegion;
|
||||
@ -56,6 +57,13 @@ class G1CollectionSet {
|
||||
volatile size_t _collection_set_cur_length;
|
||||
size_t _collection_set_max_length;
|
||||
|
||||
// When doing mixed collections we can add old regions to the collection, which
|
||||
// can be collected if there is enough time. We call these optional regions and
|
||||
// the pointer to these regions are stored in the array below.
|
||||
HeapRegion** _optional_regions;
|
||||
uint _optional_region_length;
|
||||
uint _optional_region_max_length;
|
||||
|
||||
// The number of bytes in the collection set before the pause. Set from
|
||||
// the incrementally built collection set at the start of an evacuation
|
||||
// pause, and incremented in finalize_old_part() when adding old regions
|
||||
@ -106,15 +114,19 @@ class G1CollectionSet {
|
||||
G1CollectorState* collector_state();
|
||||
G1GCPhaseTimes* phase_times();
|
||||
|
||||
double predict_region_elapsed_time_ms(HeapRegion* hr);
|
||||
|
||||
void verify_young_cset_indices() const NOT_DEBUG_RETURN;
|
||||
void add_as_optional(HeapRegion* hr);
|
||||
void add_as_old(HeapRegion* hr);
|
||||
bool optional_is_full();
|
||||
|
||||
public:
|
||||
G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
|
||||
~G1CollectionSet();
|
||||
|
||||
// Initializes the collection set giving the maximum possible length of the collection set.
|
||||
void initialize(uint max_region_length);
|
||||
void initialize_optional(uint max_length);
|
||||
void free_optional_regions();
|
||||
|
||||
CollectionSetChooser* cset_chooser();
|
||||
|
||||
@ -131,6 +143,7 @@ public:
|
||||
uint eden_region_length() const { return _eden_region_length; }
|
||||
uint survivor_region_length() const { return _survivor_region_length; }
|
||||
uint old_region_length() const { return _old_region_length; }
|
||||
uint optional_region_length() const { return _optional_region_length; }
|
||||
|
||||
// Incremental collection set support
|
||||
|
||||
@ -175,6 +188,9 @@ public:
|
||||
// Add old region "hr" to the collection set.
|
||||
void add_old_region(HeapRegion* hr);
|
||||
|
||||
// Add old region "hr" to optional collection set.
|
||||
void add_optional_region(HeapRegion* hr);
|
||||
|
||||
// Update information about hr in the aggregated information for
|
||||
// the incrementally built collection set.
|
||||
void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
|
||||
@ -191,10 +207,73 @@ public:
|
||||
void print(outputStream* st);
|
||||
#endif // !PRODUCT
|
||||
|
||||
double predict_region_elapsed_time_ms(HeapRegion* hr);
|
||||
|
||||
void clear_optional_region(const HeapRegion* hr);
|
||||
|
||||
HeapRegion* optional_region_at(uint i) const {
|
||||
assert(_optional_regions != NULL, "Not yet initialized");
|
||||
assert(i < _optional_region_length, "index %u out of bounds (%u)", i, _optional_region_length);
|
||||
return _optional_regions[i];
|
||||
}
|
||||
|
||||
HeapRegion* remove_last_optional_region() {
|
||||
assert(_optional_regions != NULL, "Not yet initialized");
|
||||
assert(_optional_region_length != 0, "No region to remove");
|
||||
_optional_region_length--;
|
||||
HeapRegion* removed = _optional_regions[_optional_region_length];
|
||||
_optional_regions[_optional_region_length] = NULL;
|
||||
return removed;
|
||||
}
|
||||
|
||||
private:
|
||||
// Update the incremental collection set information when adding a region.
|
||||
void add_young_region_common(HeapRegion* hr);
|
||||
};
|
||||
|
||||
// Helper class to manage the optional regions in a Mixed collection.
|
||||
class G1OptionalCSet : public StackObj {
|
||||
private:
|
||||
G1CollectionSet* _cset;
|
||||
G1ParScanThreadStateSet* _pset;
|
||||
uint _current_index;
|
||||
uint _current_limit;
|
||||
bool _prepare_failed;
|
||||
bool _evacuation_failed;
|
||||
|
||||
void prepare_to_evacuate_optional_region(HeapRegion* hr);
|
||||
|
||||
public:
|
||||
static const uint InvalidCSetIndex = UINT_MAX;
|
||||
|
||||
G1OptionalCSet(G1CollectionSet* cset, G1ParScanThreadStateSet* pset) :
|
||||
_cset(cset),
|
||||
_pset(pset),
|
||||
_current_index(0),
|
||||
_current_limit(0),
|
||||
_prepare_failed(false),
|
||||
_evacuation_failed(false) { }
|
||||
// The destructor returns regions to the cset-chooser and
|
||||
// frees the optional structure in the cset.
|
||||
~G1OptionalCSet();
|
||||
|
||||
uint current_index() { return _current_index; }
|
||||
uint current_limit() { return _current_limit; }
|
||||
|
||||
uint size();
|
||||
bool is_empty();
|
||||
|
||||
HeapRegion* region_at(uint index);
|
||||
|
||||
// Prepare a set of regions for optional evacuation.
|
||||
void prepare_evacuation(double time_left_ms);
|
||||
bool prepare_failed();
|
||||
|
||||
// Complete the evacuation of the previously prepared
|
||||
// regions by updating their state and check for failures.
|
||||
void complete_evacuation();
|
||||
bool evacuation_failed();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
|
||||
|
||||
|
@ -39,18 +39,18 @@
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "gc/g1/heapRegionSet.inline.hpp"
|
||||
#include "gc/shared/adaptiveSizePolicy.hpp"
|
||||
#include "gc/shared/gcId.hpp"
|
||||
#include "gc/shared/gcTimer.hpp"
|
||||
#include "gc/shared/gcTrace.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/gcVMOperations.hpp"
|
||||
#include "gc/shared/genOopClosures.inline.hpp"
|
||||
#include "gc/shared/referencePolicy.hpp"
|
||||
#include "gc/shared/strongRootsScope.hpp"
|
||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "gc/shared/vmGCOperations.hpp"
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "include/jvm.h"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
@ -255,21 +255,35 @@ void G1CMMarkStack::set_empty() {
|
||||
_free_list = NULL;
|
||||
}
|
||||
|
||||
G1CMRootRegions::G1CMRootRegions() :
|
||||
_survivors(NULL), _cm(NULL), _scan_in_progress(false),
|
||||
_should_abort(false), _claimed_survivor_index(0) { }
|
||||
G1CMRootRegions::G1CMRootRegions(uint const max_regions) :
|
||||
_root_regions(NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC)),
|
||||
_max_regions(max_regions),
|
||||
_num_root_regions(0),
|
||||
_claimed_root_regions(0),
|
||||
_scan_in_progress(false),
|
||||
_should_abort(false) { }
|
||||
|
||||
void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
|
||||
_survivors = survivors;
|
||||
_cm = cm;
|
||||
G1CMRootRegions::~G1CMRootRegions() {
|
||||
FREE_C_HEAP_ARRAY(HeapRegion*, _max_regions);
|
||||
}
|
||||
|
||||
void G1CMRootRegions::reset() {
|
||||
_num_root_regions = 0;
|
||||
}
|
||||
|
||||
void G1CMRootRegions::add(HeapRegion* hr) {
|
||||
assert_at_safepoint();
|
||||
size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
|
||||
assert(idx < _max_regions, "Trying to add more root regions than there is space " SIZE_FORMAT, _max_regions);
|
||||
_root_regions[idx] = hr;
|
||||
}
|
||||
|
||||
void G1CMRootRegions::prepare_for_scan() {
|
||||
assert(!scan_in_progress(), "pre-condition");
|
||||
|
||||
// Currently, only survivors can be root regions.
|
||||
_claimed_survivor_index = 0;
|
||||
_scan_in_progress = _survivors->regions()->is_nonempty();
|
||||
_scan_in_progress = _num_root_regions > 0;
|
||||
|
||||
_claimed_root_regions = 0;
|
||||
_should_abort = false;
|
||||
}
|
||||
|
||||
@ -280,18 +294,19 @@ HeapRegion* G1CMRootRegions::claim_next() {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Currently, only survivors can be root regions.
|
||||
const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
|
||||
if (_claimed_root_regions >= _num_root_regions) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
|
||||
if (claimed_index < survivor_regions->length()) {
|
||||
return survivor_regions->at(claimed_index);
|
||||
size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
|
||||
if (claimed_index < _num_root_regions) {
|
||||
return _root_regions[claimed_index];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uint G1CMRootRegions::num_root_regions() const {
|
||||
return (uint)_survivors->regions()->length();
|
||||
return (uint)_num_root_regions;
|
||||
}
|
||||
|
||||
void G1CMRootRegions::notify_scan_done() {
|
||||
@ -307,12 +322,10 @@ void G1CMRootRegions::cancel_scan() {
|
||||
void G1CMRootRegions::scan_finished() {
|
||||
assert(scan_in_progress(), "pre-condition");
|
||||
|
||||
// Currently, only survivors can be root regions.
|
||||
if (!_should_abort) {
|
||||
assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
|
||||
assert((uint)_claimed_survivor_index >= _survivors->length(),
|
||||
"we should have claimed all survivors, claimed index = %u, length = %u",
|
||||
(uint)_claimed_survivor_index, _survivors->length());
|
||||
assert(_claimed_root_regions >= num_root_regions(),
|
||||
"we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
|
||||
_claimed_root_regions, num_root_regions());
|
||||
}
|
||||
|
||||
notify_scan_done();
|
||||
@ -353,7 +366,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
|
||||
_heap(_g1h->reserved_region()),
|
||||
|
||||
_root_regions(),
|
||||
_root_regions(_g1h->max_regions()),
|
||||
|
||||
_global_mark_stack(),
|
||||
|
||||
@ -365,7 +378,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
// _tasks set inside the constructor
|
||||
|
||||
_task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
|
||||
_terminator(ParallelTaskTerminator((int) _max_num_tasks, _task_queues)),
|
||||
_terminator((int) _max_num_tasks, _task_queues),
|
||||
|
||||
_first_overflow_barrier_sync(),
|
||||
_second_overflow_barrier_sync(),
|
||||
@ -406,8 +419,6 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
|
||||
assert(CGC_lock != NULL, "CGC_lock must be initialized");
|
||||
|
||||
_root_regions.init(_g1h->survivor(), this);
|
||||
|
||||
if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
|
||||
// Calculate the number of concurrent worker threads by scaling
|
||||
// the number of parallel GC threads.
|
||||
@ -577,7 +588,7 @@ void G1ConcurrentMark::set_concurrency(uint active_tasks) {
|
||||
_num_active_tasks = active_tasks;
|
||||
// Need to update the three data structures below according to the
|
||||
// number of active threads for this phase.
|
||||
_terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
|
||||
_terminator = TaskTerminator((int) active_tasks, _task_queues);
|
||||
_first_overflow_barrier_sync.set_n_workers((int) active_tasks);
|
||||
_second_overflow_barrier_sync.set_n_workers((int) active_tasks);
|
||||
}
|
||||
@ -728,6 +739,8 @@ void G1ConcurrentMark::pre_initial_mark() {
|
||||
// For each region note start of marking.
|
||||
NoteStartOfMarkHRClosure startcl;
|
||||
_g1h->heap_region_iterate(&startcl);
|
||||
|
||||
_root_regions.reset();
|
||||
}
|
||||
|
||||
|
||||
@ -845,10 +858,10 @@ uint G1ConcurrentMark::calc_active_marking_workers() {
|
||||
result = _max_concurrent_workers;
|
||||
} else {
|
||||
result =
|
||||
AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers,
|
||||
1, /* Minimum workers */
|
||||
_num_concurrent_workers,
|
||||
Threads::number_of_non_daemon_threads());
|
||||
WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
|
||||
1, /* Minimum workers */
|
||||
_num_concurrent_workers,
|
||||
Threads::number_of_non_daemon_threads());
|
||||
// Don't scale the result down by scale_concurrent_workers() because
|
||||
// that scaling has already gone into "_max_concurrent_workers".
|
||||
}
|
||||
@ -859,12 +872,12 @@ uint G1ConcurrentMark::calc_active_marking_workers() {
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
|
||||
// Currently, only survivors can be root regions.
|
||||
assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
|
||||
assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()),
|
||||
"Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str());
|
||||
G1RootRegionScanClosure cl(_g1h, this, worker_id);
|
||||
|
||||
const uintx interval = PrefetchScanIntervalInBytes;
|
||||
HeapWord* curr = hr->bottom();
|
||||
HeapWord* curr = hr->next_top_at_mark_start();
|
||||
const HeapWord* end = hr->top();
|
||||
while (curr < end) {
|
||||
Prefetch::read(curr, interval);
|
||||
|
@ -224,34 +224,37 @@ private:
|
||||
template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// Root Regions are regions that are not empty at the beginning of a
|
||||
// marking cycle and which we might collect during an evacuation pause
|
||||
// while the cycle is active. Given that, during evacuation pauses, we
|
||||
// do not copy objects that are explicitly marked, what we have to do
|
||||
// for the root regions is to scan them and mark all objects reachable
|
||||
// from them. According to the SATB assumptions, we only need to visit
|
||||
// each object once during marking. So, as long as we finish this scan
|
||||
// before the next evacuation pause, we can copy the objects from the
|
||||
// root regions without having to mark them or do anything else to them.
|
||||
//
|
||||
// Currently, we only support root region scanning once (at the start
|
||||
// of the marking cycle) and the root regions are all the survivor
|
||||
// regions populated during the initial-mark pause.
|
||||
// Root Regions are regions that contain objects from nTAMS to top. These are roots
|
||||
// for marking, i.e. their referenced objects must be kept alive to maintain the
|
||||
// SATB invariant.
|
||||
// We could scan and mark them through during the initial-mark pause, but for
|
||||
// pause time reasons we move this work to the concurrent phase.
|
||||
// We need to complete this procedure before the next GC because it might determine
|
||||
// that some of these "root objects" are dead, potentially dropping some required
|
||||
// references.
|
||||
// Root regions comprise of the complete contents of survivor regions, and any
|
||||
// objects copied into old gen during GC.
|
||||
class G1CMRootRegions {
|
||||
private:
|
||||
const G1SurvivorRegions* _survivors;
|
||||
G1ConcurrentMark* _cm;
|
||||
HeapRegion** _root_regions;
|
||||
size_t const _max_regions;
|
||||
|
||||
volatile bool _scan_in_progress;
|
||||
volatile bool _should_abort;
|
||||
volatile int _claimed_survivor_index;
|
||||
volatile size_t _num_root_regions; // Actual number of root regions.
|
||||
|
||||
volatile size_t _claimed_root_regions; // Number of root regions currently claimed.
|
||||
|
||||
volatile bool _scan_in_progress;
|
||||
volatile bool _should_abort;
|
||||
|
||||
void notify_scan_done();
|
||||
|
||||
public:
|
||||
G1CMRootRegions();
|
||||
// We actually do most of the initialization in this method.
|
||||
void init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm);
|
||||
G1CMRootRegions(uint const max_regions);
|
||||
~G1CMRootRegions();
|
||||
|
||||
// Reset the data structure to allow addition of new root regions.
|
||||
void reset();
|
||||
|
||||
void add(HeapRegion* hr);
|
||||
|
||||
// Reset the claiming / scanning of the root regions.
|
||||
void prepare_for_scan();
|
||||
@ -322,8 +325,8 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
uint _num_active_tasks; // Number of tasks currently active
|
||||
G1CMTask** _tasks; // Task queue array (max_worker_id length)
|
||||
|
||||
G1CMTaskQueueSet* _task_queues; // Task queue set
|
||||
ParallelTaskTerminator _terminator; // For termination
|
||||
G1CMTaskQueueSet* _task_queues; // Task queue set
|
||||
TaskTerminator _terminator; // For termination
|
||||
|
||||
// Two sync barriers that are used to synchronize tasks when an
|
||||
// overflow occurs. The algorithm is the following. All tasks enter
|
||||
@ -409,10 +412,10 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
// Prints all gathered CM-related statistics
|
||||
void print_stats();
|
||||
|
||||
HeapWord* finger() { return _finger; }
|
||||
bool concurrent() { return _concurrent; }
|
||||
uint active_tasks() { return _num_active_tasks; }
|
||||
ParallelTaskTerminator* terminator() { return &_terminator; }
|
||||
HeapWord* finger() { return _finger; }
|
||||
bool concurrent() { return _concurrent; }
|
||||
uint active_tasks() { return _num_active_tasks; }
|
||||
ParallelTaskTerminator* terminator() const { return _terminator.terminator(); }
|
||||
|
||||
// Claims the next available region to be scanned by a marking
|
||||
// task/thread. It might return NULL if the next region is empty or
|
||||
@ -553,7 +556,7 @@ public:
|
||||
// them.
|
||||
void scan_root_regions();
|
||||
|
||||
// Scan a single root region and mark everything reachable from it.
|
||||
// Scan a single root region from nTAMS to top and mark everything reachable from it.
|
||||
void scan_root_region(HeapRegion* hr, uint worker_id);
|
||||
|
||||
// Do concurrent phase of marking, to a tentative transitive closure.
|
||||
@ -593,10 +596,8 @@ public:
|
||||
void print_on_error(outputStream* st) const;
|
||||
|
||||
// Mark the given object on the next bitmap if it is below nTAMS.
|
||||
// If the passed obj_size is zero, it is recalculated from the given object if
|
||||
// needed. This is to be as lazy as possible with accessing the object's size.
|
||||
inline bool mark_in_next_bitmap(uint worker_id, HeapRegion* const hr, oop const obj, size_t const obj_size = 0);
|
||||
inline bool mark_in_next_bitmap(uint worker_id, oop const obj, size_t const obj_size = 0);
|
||||
inline bool mark_in_next_bitmap(uint worker_id, HeapRegion* const hr, oop const obj);
|
||||
inline bool mark_in_next_bitmap(uint worker_id, oop const obj);
|
||||
|
||||
inline bool is_marked_in_next_bitmap(oop p) const;
|
||||
|
||||
|
@ -55,12 +55,12 @@ inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
|
||||
return _g1h->heap_region_containing(obj)->is_old_or_humongous_or_archive();
|
||||
}
|
||||
|
||||
inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj, size_t const obj_size) {
|
||||
inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj) {
|
||||
HeapRegion* const hr = _g1h->heap_region_containing(obj);
|
||||
return mark_in_next_bitmap(worker_id, hr, obj, obj_size);
|
||||
return mark_in_next_bitmap(worker_id, hr, obj);
|
||||
}
|
||||
|
||||
inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, HeapRegion* const hr, oop const obj, size_t const obj_size) {
|
||||
inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, HeapRegion* const hr, oop const obj) {
|
||||
assert(hr != NULL, "just checking");
|
||||
assert(hr->is_in_reserved(obj), "Attempting to mark object at " PTR_FORMAT " that is not contained in the given region %u", p2i(obj), hr->hrm_index());
|
||||
|
||||
@ -76,7 +76,7 @@ inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, HeapRegi
|
||||
|
||||
bool success = _next_mark_bitmap->par_mark(obj_addr);
|
||||
if (success) {
|
||||
add_to_liveness(worker_id, obj, obj_size == 0 ? obj->size() : obj_size);
|
||||
add_to_liveness(worker_id, obj, obj->size());
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include "gc/g1/g1MMUTracker.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/g1RemSet.hpp"
|
||||
#include "gc/g1/vm_operations_g1.hpp"
|
||||
#include "gc/g1/g1VMOperations.hpp"
|
||||
#include "gc/shared/concurrentGCPhaseManager.hpp"
|
||||
#include "gc/shared/gcId.hpp"
|
||||
#include "gc/shared/gcTrace.hpp"
|
||||
@ -339,7 +339,7 @@ void G1ConcurrentMarkThread::run_service() {
|
||||
TimeHelper::counter_to_millis(mark_end - mark_start));
|
||||
mark_manager.set_phase(G1ConcurrentPhase::REMARK, false);
|
||||
CMRemark cl(_cm);
|
||||
VM_CGC_Operation op(&cl, "Pause Remark");
|
||||
VM_G1Concurrent op(&cl, "Pause Remark");
|
||||
VMThread::execute(&op);
|
||||
if (_cm->has_aborted()) {
|
||||
break;
|
||||
@ -370,7 +370,7 @@ void G1ConcurrentMarkThread::run_service() {
|
||||
|
||||
if (!_cm->has_aborted()) {
|
||||
CMCleanup cl_cl(_cm);
|
||||
VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
|
||||
VM_G1Concurrent op(&cl_cl, "Pause Cleanup");
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
|
||||
|
@ -126,7 +126,7 @@ public:
|
||||
// explicitly and all objects in the CSet are considered
|
||||
// (implicitly) live. So, we won't mark them explicitly and
|
||||
// we'll leave them over NTAMS.
|
||||
_cm->mark_in_next_bitmap(_worker_id, obj);
|
||||
_cm->mark_in_next_bitmap(_worker_id, _hr, obj);
|
||||
}
|
||||
size_t obj_size = obj->size();
|
||||
|
||||
|
@ -37,11 +37,11 @@
|
||||
#include "gc/g1/g1OopClosures.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#include "gc/shared/adaptiveSizePolicy.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/preservedMarks.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
@ -88,15 +88,15 @@ uint G1FullCollector::calc_active_workers() {
|
||||
uint waste_worker_count = MAX2((max_wasted_regions_allowed * 2) , 1u);
|
||||
uint heap_waste_worker_limit = MIN2(waste_worker_count, max_worker_count);
|
||||
|
||||
// Also consider HeapSizePerGCThread by calling AdaptiveSizePolicy to calculate
|
||||
// Also consider HeapSizePerGCThread by calling WorkerPolicy to calculate
|
||||
// the number of workers.
|
||||
uint current_active_workers = heap->workers()->active_workers();
|
||||
uint adaptive_worker_limit = AdaptiveSizePolicy::calc_active_workers(max_worker_count, current_active_workers, 0);
|
||||
uint active_worker_limit = WorkerPolicy::calc_active_workers(max_worker_count, current_active_workers, 0);
|
||||
|
||||
// Update active workers to the lower of the limits.
|
||||
uint worker_count = MIN2(heap_waste_worker_limit, adaptive_worker_limit);
|
||||
uint worker_count = MIN2(heap_waste_worker_limit, active_worker_limit);
|
||||
log_debug(gc, task)("Requesting %u active workers for full compaction (waste limited workers: %u, adaptive workers: %u)",
|
||||
worker_count, heap_waste_worker_limit, adaptive_worker_limit);
|
||||
worker_count, heap_waste_worker_limit, active_worker_limit);
|
||||
worker_count = heap->workers()->update_active_workers(worker_count);
|
||||
log_info(gc, task)("Using %u workers of %u for full compaction", worker_count, max_worker_count);
|
||||
|
||||
|
@ -61,7 +61,7 @@ void G1FullGCMarkTask::work(uint worker_id) {
|
||||
}
|
||||
|
||||
// Mark stack is populated, now process and drain it.
|
||||
marker->complete_marking(collector()->oop_queue_set(), collector()->array_queue_set(), &_terminator);
|
||||
marker->complete_marking(collector()->oop_queue_set(), collector()->array_queue_set(), _terminator.terminator());
|
||||
|
||||
// This is the point where the entire marking should have completed.
|
||||
assert(marker->oop_stack()->is_empty(), "Marking should have completed");
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user