This commit is contained in:
Phil Race 2019-05-02 10:48:56 -07:00
commit 501a6f3423
968 changed files with 30359 additions and 14876 deletions
.hgtags
doc
make
src/hotspot
.mx.jvmci
cpu
os
os_cpu/linux_x86
share

@ -555,3 +555,5 @@ b67884871b5fff79c5ef3eb8ac74dd48d71ea9b1 jdk-12-ga
f855ec13aa2501ae184c8b3e0626a8cec9966116 jdk-13+15
9d0ae9508d5337b0dc7cc4684be42888c4023755 jdk-13+16
93b702d2a0cb9e32160208f6700aede1f8492773 jdk-13+17
bebb82ef3434a25f8142edafec20165f07ac562d jdk-13+18
a43d6467317d8f1e160f67aadec37919c9d64443 jdk-13+19

@ -369,7 +369,7 @@ cc: Sun C 5.13 SunOS_i386 2014/10/20
$ CC -V
CC: Sun C++ 5.13 SunOS_i386 151846-10 2015/10/30</code></pre>
<h3 id="microsoft-visual-studio">Microsoft Visual Studio</h3>
<p>The minimum accepted version of Visual Studio is 2010. Older versions will not be accepted by <code>configure</code>. The maximum accepted version of Visual Studio is 2017. Versions older than 2017 are unlikely to continue working for long.</p>
<p>The minimum accepted version of Visual Studio is 2010. Older versions will not be accepted by <code>configure</code>. The maximum accepted version of Visual Studio is 2019. Versions older than 2017 are unlikely to continue working for long.</p>
<p>If you have multiple versions of Visual Studio installed, <code>configure</code> will by default pick the latest. You can request a specific version to be used by setting <code>--with-toolchain-version</code>, e.g. <code>--with-toolchain-version=2015</code>.</p>
<p>If you get <code>LINK: fatal error LNK1123: failure during conversion to COFF: file invalid</code> when building using Visual Studio 2010, you have encountered <a href="http://support.microsoft.com/kb/2757355">KB2757355</a>, a bug triggered by a specific installation order. However, the solution suggested by the KB article does not always resolve the problem. See <a href="https://stackoverflow.com/questions/10888391">this stackoverflow discussion</a> for other suggestions.</p>
<h3 id="ibm-xl-cc">IBM XL C/C++</h3>

@ -404,7 +404,7 @@ CC: Sun C++ 5.13 SunOS_i386 151846-10 2015/10/30
The minimum accepted version of Visual Studio is 2010. Older versions will not
be accepted by `configure`. The maximum accepted version of Visual Studio is
2017. Versions older than 2017 are unlikely to continue working for long.
2019. Versions older than 2017 are unlikely to continue working for long.
If you have multiple versions of Visual Studio installed, `configure` will by
default pick the latest. You can request a specific version to be used by

@ -460,6 +460,7 @@ jdk.internal.vm.compiler_EXCLUDES += \
org.graalvm.compiler.core.test \
org.graalvm.compiler.debug.test \
org.graalvm.compiler.graph.test \
org.graalvm.compiler.hotspot.aarch64.test \
org.graalvm.compiler.hotspot.amd64.test \
org.graalvm.compiler.hotspot.jdk9.test \
org.graalvm.compiler.hotspot.lir.test \

@ -536,6 +536,12 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -mno-omit-leaf-frame-pointer -mstack-alignment=16"
if test "x$OPENJDK_TARGET_OS" = xlinux; then
if test "x$DEBUG_LEVEL" = xrelease; then
# Clang does not inline as much as GCC does for functions with "inline" keyword by default.
# This causes noticeable slowdown in pause time for G1, and possibly in other areas.
# Increasing the inline hint threshold avoids the slowdown for Clang-built JVM.
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -mllvm -inlinehint-threshold=100000"
fi
TOOLCHAIN_CFLAGS_JDK="-pipe"
TOOLCHAIN_CFLAGS_JDK_CONLY="-fno-strict-aliasing" # technically NOT for CXX
fi

@ -392,9 +392,8 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
JVM_FEATURES_jvmci=""
INCLUDE_JVMCI="false"
else
# Only enable jvmci on x86_64, sparcv9 and aarch64
# Only enable jvmci on x86_64 and aarch64
if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
test "x$OPENJDK_TARGET_CPU" = "xsparcv9" || \
test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
AC_MSG_RESULT([yes])
JVM_FEATURES_jvmci="jvmci"

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
################################################################################
# The order of these defines the priority by which we try to find them.
VALID_VS_VERSIONS="2017 2013 2015 2012 2010"
VALID_VS_VERSIONS="2017 2019 2013 2015 2012 2010"
VS_DESCRIPTION_2010="Microsoft Visual Studio 2010"
VS_VERSION_INTERNAL_2010=100
@ -89,6 +89,20 @@ VS_SDK_PLATFORM_NAME_2017=
VS_SUPPORTED_2017=true
VS_TOOLSET_SUPPORTED_2017=true
VS_DESCRIPTION_2019="Microsoft Visual Studio 2019"
VS_VERSION_INTERNAL_2019=141
VS_MSVCR_2019=vcruntime140.dll
VS_MSVCP_2019=msvcp140.dll
VS_ENVVAR_2019="VS160COMNTOOLS"
VS_USE_UCRT_2019="true"
VS_VS_INSTALLDIR_2019="Microsoft Visual Studio/2019"
VS_EDITIONS_2019="BuildTools Community Professional Enterprise"
VS_SDK_INSTALLDIR_2019=
VS_VS_PLATFORM_NAME_2019="v142"
VS_SDK_PLATFORM_NAME_2019=
VS_SUPPORTED_2019=false
VS_TOOLSET_SUPPORTED_2019=false
################################################################################
AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT],
@ -99,7 +113,7 @@ AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT],
METHOD="$3"
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(VS_BASE)
# In VS 2017, the default installation is in a subdir named after the edition.
# In VS 2017 and VS 2019, the default installation is in a subdir named after the edition.
# Find the first one present and use that.
if test "x$VS_EDITIONS" != x; then
for edition in $VS_EDITIONS; do
@ -662,7 +676,7 @@ AC_DEFUN([TOOLCHAIN_SETUP_MSVC_DLL],
else
CYGWIN_VC_TOOLS_REDIST_DIR="$VCToolsRedistDir"
BASIC_FIXUP_PATH(CYGWIN_VC_TOOLS_REDIST_DIR)
# Probe: Using well-known location from VS 2017
# Probe: Using well-known location from VS 2017 and VS 2019
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
POSSIBLE_MSVC_DLL="`ls $CYGWIN_VC_TOOLS_REDIST_DIR/x64/Microsoft.VC${VS_VERSION_INTERNAL}.CRT/$DLL_NAME`"
else

@ -1283,7 +1283,10 @@ var getVersion = function (feature, interim, update, patch) {
var version = (feature != null ? feature : version_numbers.get("DEFAULT_VERSION_FEATURE"))
+ "." + (interim != null ? interim : version_numbers.get("DEFAULT_VERSION_INTERIM"))
+ "." + (update != null ? update : version_numbers.get("DEFAULT_VERSION_UPDATE"))
+ "." + (patch != null ? patch : version_numbers.get("DEFAULT_VERSION_PATCH"));
+ "." + (patch != null ? patch : version_numbers.get("DEFAULT_VERSION_PATCH"))
+ "." + version_numbers.get("DEFAULT_VERSION_EXTRA1")
+ "." + version_numbers.get("DEFAULT_VERSION_EXTRA2")
+ "." + version_numbers.get("DEFAULT_VERSION_EXTRA3");
while (version.match(".*\\.0$")) {
version = version.substring(0, version.length - 2);
}

@ -1,4 +1,4 @@
File-Date: 2018-11-30
File-Date: 2019-04-03
%%
Type: language
Subtag: aa
@ -43008,6 +43008,7 @@ Subtag: lsg
Description: Lyons Sign Language
Added: 2009-07-29
Deprecated: 2018-03-08
Preferred-Value: lsg
Prefix: sgn
%%
Type: extlang
@ -43406,6 +43407,7 @@ Subtag: rsi
Description: Rennellese Sign Language
Added: 2009-07-29
Deprecated: 2017-02-23
Preferred-Value: rsi
Prefix: sgn
%%
Type: extlang
@ -43760,6 +43762,7 @@ Subtag: yds
Description: Yiddish Sign Language
Added: 2009-07-29
Deprecated: 2015-02-12
Preferred-Value: yds
Prefix: sgn
%%
Type: extlang
@ -46537,16 +46540,16 @@ Subtag: arevela
Description: Eastern Armenian
Added: 2006-09-18
Deprecated: 2018-03-24
Preferred-Value: hy
Prefix: hy
Comments: Preferred tag is hy
%%
Type: variant
Subtag: arevmda
Description: Western Armenian
Added: 2006-09-18
Deprecated: 2018-03-24
Preferred-Value: hyw
Prefix: hy
Comments: Preferred tag is hyw
%%
Type: variant
Subtag: asante
@ -46642,6 +46645,12 @@ Prefix: en
Comments: Jargon embedded in American English
%%
Type: variant
Subtag: bornholm
Description: Bornholmsk
Added: 2019-03-27
Prefix: da
%%
Type: variant
Subtag: cisaup
Description: Cisalpine
Added: 2018-04-22

@ -0,0 +1,209 @@
#!/bin/bash
#
# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# This script copies parts of a Visual Studio installation into a devkit
# suitable for building OpenJDK and OracleJDK. Needs to run in Cygwin or WSL.
# erik.joelsson@oracle.com
VS_VERSION="2019"
VS_VERSION_NUM_NODOT="160"
VS_DLL_VERSION="140"
SDK_VERSION="10"
SDK_FULL_VERSION="10.0.17763.0"
MSVC_DIR="Microsoft.VC141.CRT"
MSVC_FULL_VERSION="14.12.27508"
REDIST_FULL_VERSION="14.20.27508"
SCRIPT_DIR="$(cd "$(dirname $0)" > /dev/null && pwd)"
BUILD_DIR="${SCRIPT_DIR}/../../build/devkit"
################################################################################
# Prepare settings
UNAME_SYSTEM=`uname -s`
UNAME_RELEASE=`uname -r`
# Detect cygwin or WSL
IS_CYGWIN=`echo $UNAME_SYSTEM | grep -i CYGWIN`
IS_WSL=`echo $UNAME_RELEASE | grep Microsoft`
if test "x$IS_CYGWIN" != "x"; then
BUILD_ENV="cygwin"
elif test "x$IS_WSL" != "x"; then
BUILD_ENV="wsl"
else
echo "Unknown environment; only Cygwin and WSL are supported."
exit 1
fi
if test "x$BUILD_ENV" = "xcygwin"; then
WINDOWS_PATH_TO_UNIX_PATH="cygpath -u"
elif test "x$BUILD_ENV" = "xwsl"; then
WINDOWS_PATH_TO_UNIX_PATH="wslpath -u"
fi
# Work around the insanely named ProgramFiles(x86) env variable
PROGRAMFILES_X86="$($WINDOWS_PATH_TO_UNIX_PATH "$(cmd.exe /c set | sed -n 's/^ProgramFiles(x86)=//p' | tr -d '\r')")"
# Find Visual Studio installation dir
VSNNNCOMNTOOLS=`cmd.exe /c echo %VS${VS_VERSION_NUM_NODOT}COMNTOOLS% | tr -d '\r'`
if [ -d "$VSNNNCOMNTOOLS" ]; then
VS_INSTALL_DIR="$($WINDOWS_PATH_TO_UNIX_PATH "$VSNNNCOMNTOOLS/../..")"
else
VS_INSTALL_DIR="${PROGRAMFILES_X86}/Microsoft Visual Studio/2019"
VS_INSTALL_DIR="$(ls -d "${VS_INSTALL_DIR}/"{Community,Professional,Enterprise} 2>/dev/null | head -n1)"
fi
echo "VS_INSTALL_DIR: $VS_INSTALL_DIR"
# Extract semantic version
POTENTIAL_INI_FILES="Common7/IDE/wdexpress.isolation.ini Common7/IDE/devenv.isolation.ini"
for f in $POTENTIAL_INI_FILES; do
if [ -f "$VS_INSTALL_DIR/$f" ]; then
VS_VERSION_SP="$(grep ^SemanticVersion= "$VS_INSTALL_DIR/$f")"
# Remove SemnaticVersion=
VS_VERSION_SP="${VS_VERSION_SP#*=}"
# Remove suffix of too detailed numbering starting with +
VS_VERSION_SP="${VS_VERSION_SP%+*}"
break
fi
done
if [ -z "$VS_VERSION_SP" ]; then
echo "Failed to find SP version"
exit 1
fi
echo "Found Version SP: $VS_VERSION_SP"
# Setup output dirs
DEVKIT_ROOT="${BUILD_DIR}/VS${VS_VERSION}-${VS_VERSION_SP}-devkit"
DEVKIT_BUNDLE="${DEVKIT_ROOT}.tar.gz"
echo "Creating devkit in $DEVKIT_ROOT"
MSVCR_DLL=${MSVC_DIR}/vcruntime${VS_DLL_VERSION}.dll
MSVCP_DLL=${MSVC_DIR}/msvcp${VS_DLL_VERSION}.dll
################################################################################
# Copy Visual Studio files
TOOLS_VERSION="$(ls "$VS_INSTALL_DIR/VC/Tools/MSVC" | sort -r -n | head -n1)"
echo "Found Tools version: $TOOLS_VERSION"
VC_SUBDIR="VC/Tools/MSVC/$TOOLS_VERSION"
REDIST_VERSION="$(ls "$VS_INSTALL_DIR/VC/Redist/MSVC" | sort -r -n | head -n1)"
echo "Found Redist version: $REDIST_VERSION"
REDIST_SUBDIR="VC/Redist/MSVC/$REDIST_VERSION"
echo "Copying VC..."
rm -rf $DEVKIT_ROOT/VC
mkdir -p $DEVKIT_ROOT/VC/bin
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx64/x64" $DEVKIT_ROOT/VC/bin/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx86/x86" $DEVKIT_ROOT/VC/bin/
mkdir -p $DEVKIT_ROOT/VC/lib
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/lib/x64" $DEVKIT_ROOT/VC/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/lib/x86" $DEVKIT_ROOT/VC/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/include" $DEVKIT_ROOT/VC/
mkdir -p $DEVKIT_ROOT/VC/atlmfc/lib
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/atlmfc/lib/x64" $DEVKIT_ROOT/VC/atlmfc/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/atlmfc/lib/x86" $DEVKIT_ROOT/VC/atlmfc/lib/
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/atlmfc/include" $DEVKIT_ROOT/VC/atlmfc/
mkdir -p $DEVKIT_ROOT/VC/Auxiliary
cp -r "$VS_INSTALL_DIR/VC/Auxiliary/Build" $DEVKIT_ROOT/VC/Auxiliary/
mkdir -p $DEVKIT_ROOT/VC/redist
cp -r "$VS_INSTALL_DIR/$REDIST_SUBDIR/x64" $DEVKIT_ROOT/VC/redist/
cp -r "$VS_INSTALL_DIR/$REDIST_SUBDIR/x86" $DEVKIT_ROOT/VC/redist/
# The redist runtime libs are needed to run the compiler but may not be
# installed on the machine where the devkit will be used.
cp $DEVKIT_ROOT/VC/redist/x86/$MSVCR_DLL $DEVKIT_ROOT/VC/bin/x86
cp $DEVKIT_ROOT/VC/redist/x86/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/x86
cp $DEVKIT_ROOT/VC/redist/x64/$MSVCR_DLL $DEVKIT_ROOT/VC/bin/x64
cp $DEVKIT_ROOT/VC/redist/x64/$MSVCP_DLL $DEVKIT_ROOT/VC/bin/x64
################################################################################
# Copy SDK files
SDK_INSTALL_DIR="$PROGRAMFILES_X86/Windows Kits/$SDK_VERSION"
echo "SDK_INSTALL_DIR: $SDK_INSTALL_DIR"
SDK_FULL_VERSION="$(ls "$SDK_INSTALL_DIR/bin" | sort -r -n | head -n1)"
echo "Found SDK version: $SDK_FULL_VERSION"
UCRT_VERSION="$(ls "$SDK_INSTALL_DIR/Redist" | grep $SDK_VERSION | sort -r -n | head -n1)"
echo "Found UCRT version: $UCRT_VERSION"
echo "Copying SDK..."
rm -rf $DEVKIT_ROOT/$SDK_VERSION
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/bin
cp -r "$SDK_INSTALL_DIR/bin/$SDK_FULL_VERSION/x64" $DEVKIT_ROOT/$SDK_VERSION/bin/
cp -r "$SDK_INSTALL_DIR/bin/$SDK_FULL_VERSION/x86" $DEVKIT_ROOT/$SDK_VERSION/bin/
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/lib
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/um/x64" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/um/x86" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/x64" $DEVKIT_ROOT/$SDK_VERSION/lib/
cp -r "$SDK_INSTALL_DIR/lib/$SDK_FULL_VERSION/ucrt/x86" $DEVKIT_ROOT/$SDK_VERSION/lib/
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/Redist
cp -r "$SDK_INSTALL_DIR/Redist/$UCRT_VERSION/ucrt" $DEVKIT_ROOT/$SDK_VERSION/Redist/
mkdir -p $DEVKIT_ROOT/$SDK_VERSION/include
cp -r "$SDK_INSTALL_DIR/include/$SDK_FULL_VERSION/"* $DEVKIT_ROOT/$SDK_VERSION/include/
################################################################################
# Generate devkit.info
echo-info() {
echo "$1" >> $DEVKIT_ROOT/devkit.info
}
echo "Generating devkit.info..."
rm -f $DEVKIT_ROOT/devkit.info
echo-info "# This file describes to configure how to interpret the contents of this devkit"
echo-info "DEVKIT_NAME=\"Microsoft Visual Studio $VS_VERSION $VS_VERSION_SP (devkit)\""
echo-info "DEVKIT_VS_VERSION=\"$VS_VERSION\""
echo-info ""
echo-info "DEVKIT_TOOLCHAIN_PATH_x86=\"\$DEVKIT_ROOT/VC/bin/x86:\$DEVKIT_ROOT/$SDK_VERSION/bin/x86\""
echo-info "DEVKIT_VS_INCLUDE_x86=\"\$DEVKIT_ROOT/VC/include;\$DEVKIT_ROOT/VC/atlmfc/include;\$DEVKIT_ROOT/$SDK_VERSION/include/shared;\$DEVKIT_ROOT/$SDK_VERSION/include/ucrt;\$DEVKIT_ROOT/$SDK_VERSION/include/um;\$DEVKIT_ROOT/$SDK_VERSION/include/winrt\""
echo-info "DEVKIT_VS_LIB_x86=\"\$DEVKIT_ROOT/VC/lib/x86;\$DEVKIT_ROOT/VC/atlmfc/lib/x86;\$DEVKIT_ROOT/$SDK_VERSION/lib/x86\""
echo-info "DEVKIT_MSVCR_DLL_x86=\"\$DEVKIT_ROOT/VC/redist/x86/$MSVCR_DLL\""
echo-info "DEVKIT_MSVCP_DLL_x86=\"\$DEVKIT_ROOT/VC/redist/x86/$MSVCP_DLL\""
echo-info "DEVKIT_UCRT_DLL_DIR_x86=\"\$DEVKIT_ROOT/10/Redist/ucrt/DLLs/x86\""
echo-info ""
echo-info "DEVKIT_TOOLCHAIN_PATH_x86_64=\"\$DEVKIT_ROOT/VC/bin/x64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x64:\$DEVKIT_ROOT/$SDK_VERSION/bin/x86\""
echo-info "DEVKIT_VS_INCLUDE_x86_64=\"\$DEVKIT_ROOT/VC/include;\$DEVKIT_ROOT/VC/atlmfc/include;\$DEVKIT_ROOT/$SDK_VERSION/include/shared;\$DEVKIT_ROOT/$SDK_VERSION/include/ucrt;\$DEVKIT_ROOT/$SDK_VERSION/include/um;\$DEVKIT_ROOT/$SDK_VERSION/include/winrt\""
echo-info "DEVKIT_VS_LIB_x86_64=\"\$DEVKIT_ROOT/VC/lib/x64;\$DEVKIT_ROOT/VC/atlmfc/lib/x64;\$DEVKIT_ROOT/$SDK_VERSION/lib/x64\""
echo-info "DEVKIT_MSVCR_DLL_x86_64=\"\$DEVKIT_ROOT/VC/redist/x64/$MSVCR_DLL\""
echo-info "DEVKIT_MSVCP_DLL_x86_64=\"\$DEVKIT_ROOT/VC/redist/x64/$MSVCP_DLL\""
echo-info "DEVKIT_UCRT_DLL_DIR_x86_64=\"\$DEVKIT_ROOT/10/Redist/ucrt/DLLs/x64\""
echo-info ""
echo-info "DEVKIT_TOOLS_VERSION=\"$TOOLS_VERSION\""
echo-info "DEVKIT_REDIST_VERSION=\"$REDIST_VERSION\""
echo-info "DEVKIT_SDK_VERSION=\"$SDK_FULL_VERSION\""
echo-info "DEVKIT_UCRT_VERSION=\"$UCRT_VERSION\""
################################################################################
# Copy this script
echo "Copying this script..."
cp $0 $DEVKIT_ROOT/
################################################################################
# Create bundle
echo "Creating bundle: $DEVKIT_BUNDLE"
(cd "$DEVKIT_ROOT" && tar zcf "$DEVKIT_BUNDLE" .)

@ -72,6 +72,8 @@ public class HelloClasslist {
String SC = String.valueOf(args.length) + "string";
String SCS = String.valueOf(args.length) + "string" + String.valueOf(args.length);
String CSS = "string" + String.valueOf(args.length) + String.valueOf(args.length);
String CSC = "string" + String.valueOf(args.length) + "string";
String SSC = String.valueOf(args.length) + String.valueOf(args.length) + "string";
String CSCS = "string" + String.valueOf(args.length) + "string" + String.valueOf(args.length);
String SCSC = String.valueOf(args.length) + "string" + String.valueOf(args.length) + "string";
String CSCSC = "string" + String.valueOf(args.length) + "string" + String.valueOf(args.length) + "string";

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -66,6 +66,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJDWP, \
LIBS_solaris := $(LIBDL), \
LIBS_macosx := -liconv, \
LIBS_aix := -liconv, \
LIBS_windows := $(WIN_JAVA_LIB), \
))
$(BUILD_LIBJDWP): $(call FindLib, java.base, java)

@ -91,6 +91,7 @@ ifeq ($(INCLUDE_GRAAL), true)
$(SRC_DIR)/org.graalvm.compiler.core.test/src \
$(SRC_DIR)/org.graalvm.compiler.debug.test/src \
$(SRC_DIR)/org.graalvm.compiler.graph.test/src \
$(SRC_DIR)/org.graalvm.compiler.hotspot.aarch64.test/src \
$(SRC_DIR)/org.graalvm.compiler.hotspot.amd64.test/src \
$(SRC_DIR)/org.graalvm.compiler.hotspot.jdk9.test/src \
$(SRC_DIR)/org.graalvm.compiler.hotspot.lir.test/src \

@ -1,5 +1,5 @@
#
# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -736,6 +736,7 @@ BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libgetclsig006 := $(NSK_JVMTI_AGENT_INCLUDE
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libdisposeenv002 := $(NSK_JVMTI_AGENT_INCLUDES)
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libdisposeenv001 := $(NSK_JVMTI_AGENT_INCLUDES)
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libexception001 := $(NSK_JVMTI_AGENT_INCLUDES)
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libexceptionjni001 := $(NSK_JVMTI_AGENT_INCLUDES)
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libresexhausted := $(NSK_JVMTI_AGENT_INCLUDES)
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libgcstart001 := $(NSK_JVMTI_AGENT_INCLUDES)
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libgcstart002 := $(NSK_JVMTI_AGENT_INCLUDES)
@ -1375,6 +1376,7 @@ else
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_librawmonenter004 += -lpthread
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_librawmonenter001 += -lpthread
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libdealloc001 += -lpthread
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libexceptionjni001 += -lpthread
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libisfldsin003 += -lpthread
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libisfldsin002 += -lpthread
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libthrgrpinfo001 += -lpthread

@ -56,6 +56,9 @@ suite = {
"jdk.vm.ci.common" : {
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"dependencies" : [
"jdk.vm.ci.services",
],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9+",
"workingSets" : "API,JVMCI",
@ -258,9 +261,11 @@ suite = {
"subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
"dependencies" : [
"jdk.vm.ci.runtime.test",
"jdk.vm.ci.hotspot.test",
],
"distDependencies" : [
"JVMCI_API",
"JVMCI_HOTSPOT",
],
"exclude" : ["mx:JUNIT"],
},

@ -11343,14 +11343,11 @@ instruct SubL_reg_LShift_reg(iRegLNoSp dst,
instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
%{
match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
// Make sure we are not going to exceed what sbfm can do.
predicate((unsigned int)n->in(2)->get_int() <= 63
&& (unsigned int)n->in(1)->in(2)->get_int() <= 63);
ins_cost(INSN_COST * 2);
format %{ "sbfm $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
ins_encode %{
int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
int lshift = $lshift_count$$constant & 63;
int rshift = $rshift_count$$constant & 63;
int s = 63 - lshift;
int r = (rshift - lshift) & 63;
__ sbfm(as_Register($dst$$reg),
@ -11366,14 +11363,11 @@ instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
%{
match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
// Make sure we are not going to exceed what sbfmw can do.
predicate((unsigned int)n->in(2)->get_int() <= 31
&& (unsigned int)n->in(1)->in(2)->get_int() <= 31);
ins_cost(INSN_COST * 2);
format %{ "sbfmw $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
ins_encode %{
int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
int lshift = $lshift_count$$constant & 31;
int rshift = $rshift_count$$constant & 31;
int s = 31 - lshift;
int r = (rshift - lshift) & 31;
__ sbfmw(as_Register($dst$$reg),
@ -11389,14 +11383,11 @@ instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_co
instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
%{
match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
// Make sure we are not going to exceed what ubfm can do.
predicate((unsigned int)n->in(2)->get_int() <= 63
&& (unsigned int)n->in(1)->in(2)->get_int() <= 63);
ins_cost(INSN_COST * 2);
format %{ "ubfm $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
ins_encode %{
int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
int lshift = $lshift_count$$constant & 63;
int rshift = $rshift_count$$constant & 63;
int s = 63 - lshift;
int r = (rshift - lshift) & 63;
__ ubfm(as_Register($dst$$reg),
@ -11412,14 +11403,11 @@ instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
%{
match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
// Make sure we are not going to exceed what ubfmw can do.
predicate((unsigned int)n->in(2)->get_int() <= 31
&& (unsigned int)n->in(1)->in(2)->get_int() <= 31);
ins_cost(INSN_COST * 2);
format %{ "ubfmw $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
ins_encode %{
int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
int lshift = $lshift_count$$constant & 31;
int rshift = $rshift_count$$constant & 31;
int s = 31 - lshift;
int r = (rshift - lshift) & 31;
__ ubfmw(as_Register($dst$$reg),
@ -11491,13 +11479,12 @@ instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask m
instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
%{
match(Set dst (LShiftI (AndI src mask) lshift));
predicate((unsigned int)n->in(2)->get_int() <= 31 &&
(exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
ins_cost(INSN_COST);
format %{ "ubfizw $dst, $src, $lshift, $mask" %}
ins_encode %{
int lshift = $lshift$$constant;
int lshift = $lshift$$constant & 31;
long mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfizw(as_Register($dst$$reg),
@ -11510,13 +11497,12 @@ instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
%{
match(Set dst (LShiftL (AndL src mask) lshift));
predicate((unsigned int)n->in(2)->get_int() <= 63 &&
(exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
ins_cost(INSN_COST);
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
ins_encode %{
int lshift = $lshift$$constant;
int lshift = $lshift$$constant & 63;
long mask = $mask$$constant;
int width = exact_log2_long(mask+1);
__ ubfiz(as_Register($dst$$reg),
@ -11528,14 +11514,13 @@ instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
// If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
%{
match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
predicate((unsigned int)n->in(2)->get_int() <= 31 &&
(exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
ins_cost(INSN_COST);
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
ins_encode %{
int lshift = $lshift$$constant;
int lshift = $lshift$$constant & 63;
long mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfiz(as_Register($dst$$reg),
@ -11549,7 +11534,7 @@ instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask
instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
ins_cost(INSN_COST);
format %{ "extr $dst, $src1, $src2, #$rshift" %}
@ -11564,7 +11549,7 @@ instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift
instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
ins_cost(INSN_COST);
format %{ "extr $dst, $src1, $src2, #$rshift" %}
@ -11579,7 +11564,7 @@ instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, i
instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
ins_cost(INSN_COST);
format %{ "extr $dst, $src1, $src2, #$rshift" %}
@ -11594,7 +11579,7 @@ instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshif
instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
ins_cost(INSN_COST);
format %{ "extr $dst, $src1, $src2, #$rshift" %}
@ -14061,55 +14046,63 @@ instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg
// ============================================================================
// Max and Min
instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
%{
match(Set dst (MinI src1 src2));
effect( DEF dst, USE src1, USE src2, USE cr );
effect(DEF dst, USE src1, USE src2, KILL cr);
size(8);
ins_cost(INSN_COST * 3);
format %{
"cmpw $src1 $src2\t signed int\n\t"
"cselw $dst, $src1, $src2 lt\t"
%}
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src1, $src2 lt\t" %}
ins_encode %{
__ cmpw(as_Register($src1$$reg),
as_Register($src2$$reg));
__ cselw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LT);
%}
ins_pipe(ialu_reg_reg);
ins_pipe(icond_reg_reg);
%}
instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
%{
match(Set dst (MinI src1 src2));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_reg(cr, src1, src2);
cmovI_reg_reg_lt(dst, src1, src2, cr);
%}
%}
// FROM HERE
instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
%{
match(Set dst (MaxI src1 src2));
effect( DEF dst, USE src1, USE src2, USE cr );
effect(DEF dst, USE src1, USE src2, KILL cr);
size(8);
ins_cost(INSN_COST * 3);
format %{
"cmpw $src1 $src2\t signed int\n\t"
"cselw $dst, $src1, $src2 gt\t"
%}
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src1, $src2 gt\t" %}
ins_encode %{
__ cmpw(as_Register($src1$$reg),
as_Register($src2$$reg));
__ cselw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::GT);
%}
ins_pipe(ialu_reg_reg);
ins_pipe(icond_reg_reg);
%}
instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2)
%{
match(Set dst (MaxI src1 src2));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_reg(cr, src1, src2);
cmovI_reg_reg_gt(dst, src1, src2, cr);
%}
%}
// ============================================================================

@ -154,14 +154,11 @@ define(`BFM_INSN',`
instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rshift_count)
%{
match(Set dst EXTEND($1, $3, src, lshift_count, rshift_count));
// Make sure we are not going to exceed what $4 can do.
predicate((unsigned int)n->in(2)->get_int() <= $2
&& (unsigned int)n->in(1)->in(2)->get_int() <= $2);
ins_cost(INSN_COST * 2);
format %{ "$4 $dst, $src, $rshift_count - $lshift_count, #$2 - $lshift_count" %}
ins_encode %{
int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
int lshift = $lshift_count$$constant & $2;
int rshift = $rshift_count$$constant & $2;
int s = $2 - lshift;
int r = (rshift - lshift) & $2;
__ $4(as_Register($dst$$reg),
@ -224,13 +221,12 @@ define(`UBFIZ_INSN',
`instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
%{
match(Set dst (LShift$1 (And$1 src mask) lshift));
predicate((unsigned int)n->in(2)->get_int() <= $3 &&
(exact_log2$5(n->in(1)->in(2)->get_$4()+1) + (unsigned int)n->in(2)->get_int()) <= ($3+1));
predicate((exact_log2$5(n->in(1)->in(2)->get_$4() + 1) + (n->in(2)->get_int() & $3)) <= ($3 + 1));
ins_cost(INSN_COST);
format %{ "$2 $dst, $src, $lshift, $mask" %}
ins_encode %{
int lshift = $lshift$$constant;
int lshift = $lshift$$constant & $3;
long mask = $mask$$constant;
int width = exact_log2$5(mask+1);
__ $2(as_Register($dst$$reg),
@ -239,19 +235,18 @@ define(`UBFIZ_INSN',
ins_pipe(ialu_reg_shift);
%}')
UBFIZ_INSN(I, ubfizw, 31, int)
UBFIZ_INSN(L, ubfiz, 63, long, _long)
UBFIZ_INSN(L, ubfiz, 63, long, _long)
// If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
%{
match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
predicate((unsigned int)n->in(2)->get_int() <= 31 &&
(exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
ins_cost(INSN_COST);
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
ins_encode %{
int lshift = $lshift$$constant;
int lshift = $lshift$$constant & 63;
long mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfiz(as_Register($dst$$reg),
@ -266,7 +261,7 @@ define(`EXTRACT_INSN',
`instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift)));
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & $2));
predicate(0 == (((n->in(1)->in(2)->get_int() & $2) + (n->in(2)->in(2)->get_int() & $2)) & $2));
ins_cost(INSN_COST);
format %{ "extr $dst, $src1, $src2, #$rshift" %}

@ -33,7 +33,6 @@
// (see globals.hpp)
define_pd_global(bool, ShareVtableStubs, true);
define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
#include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, JVMCIObject method, JVMCI_TRAPS) {
if (inst->is_call() || inst->is_jump() || inst->is_blr()) {
return pc_offset + NativeCall::instruction_size;
} else if (inst->is_general_jump()) {
@ -43,12 +43,12 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
}
}
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
void CodeInstaller::pd_patch_OopConstant(int pc_offset, JVMCIObject constant, JVMCI_TRAPS) {
address pc = _instructions->start() + pc_offset;
#ifdef ASSERT
{
NativeInstruction *insn = nativeInstruction_at(pc);
if (HotSpotObjectConstantImpl::compressed(constant)) {
if (jvmci_env()->get_HotSpotObjectConstantImpl_compressed(constant)) {
// Mov narrow constant: movz n << 16, movk
assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
nativeInstruction_at(pc+4)->is_movk(), "wrong insn in patch");
@ -59,7 +59,7 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
}
}
#endif // ASSERT
Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
Handle obj = jvmci_env()->asConstant(constant, JVMCI_CHECK);
jobject value = JNIHandles::make_local(obj());
MacroAssembler::patch_oop(pc, (address)obj());
int oop_index = _oop_recorder->find_index(value);
@ -67,21 +67,21 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
_instructions->relocate(pc, rspec);
}
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, JVMCIObject constant, JVMCI_TRAPS) {
address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK);
if (jvmci_env()->get_HotSpotMetaspaceConstantImpl_compressed(constant)) {
narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, JVMCI_CHECK);
MacroAssembler::patch_narrow_klass(pc, narrowOop);
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
} else {
NativeMovConstReg* move = nativeMovConstReg_at(pc);
void* reference = record_metadata_reference(_instructions, pc, constant, CHECK);
void* reference = record_metadata_reference(_instructions, pc, constant, JVMCI_CHECK);
move->set_data((intptr_t) reference);
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(reference));
}
}
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS) {
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, JVMCI_TRAPS) {
address pc = _instructions->start() + pc_offset;
NativeInstruction* inst = nativeInstruction_at(pc);
if (inst->is_adr_aligned() || inst->is_ldr_literal()
@ -94,7 +94,7 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
}
}
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, JVMCI_TRAPS) {
address pc = (address) inst;
if (inst->is_call()) {
NativeCall* call = nativeCall_at(pc);
@ -118,12 +118,12 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
}
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle hotspot_method, jint pc_offset, TRAPS) {
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, JVMCIObject hotspot_method, jint pc_offset, JVMCI_TRAPS) {
#ifdef ASSERT
Method* method = NULL;
// we need to check, this might also be an unresolved method
if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) {
method = getMethodFromHotSpotMethod(hotspot_method());
if (JVMCIENV->isa_HotSpotResolvedJavaMethodImpl(hotspot_method)) {
method = JVMCIENV->asMethod(hotspot_method);
}
#endif
switch (_next_call_type) {
@ -157,7 +157,7 @@ void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle hotspot_meth
}
}
void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
void CodeInstaller::pd_relocate_poll(address pc, jint mark, JVMCI_TRAPS) {
switch (mark) {
case POLL_NEAR:
JVMCI_ERROR("unimplemented");
@ -178,7 +178,7 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
}
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, JVMCI_TRAPS) {
if (jvmci_reg < RegisterImpl::number_of_registers) {
return as_Register(jvmci_reg)->as_VMReg();
} else {

@ -40,8 +40,6 @@ define_pd_global(uintx, CodeCacheSegmentSize, 64 TIERED_ONLY(+64)); // Tiered co
define_pd_global(intx, CodeEntryAlignment, 16);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
#define DEFAULT_STACK_YELLOW_PAGES (2)
#define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_SHADOW_PAGES (5 DEBUG_ONLY(+1))

@ -33,8 +33,6 @@
// (see globals.hpp)
define_pd_global(bool, ShareVtableStubs, true);
define_pd_global(bool, NeedsDeoptSuspend, false); // Only register window machines need this.
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.
define_pd_global(bool, TrapBasedNullChecks, true);

@ -35,7 +35,6 @@
// z/Architecture remembers branch targets, so don't share vtables.
define_pd_global(bool, ShareVtableStubs, true);
define_pd_global(bool, NeedsDeoptSuspend, false); // Only register window machines need this.
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks.
define_pd_global(bool, TrapBasedNullChecks, true);

@ -38,7 +38,6 @@
// according to the prior table. So, we let the thread continue and let it block by itself.
define_pd_global(bool, DontYieldALot, true); // yield no more than 100 times per second
define_pd_global(bool, ShareVtableStubs, false); // improves performance markedly for mtrt and compress
define_pd_global(bool, NeedsDeoptSuspend, true); // register window machines need this
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on sparc.

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,7 @@
#include "utilities/align.hpp"
#include "vmreg_sparc.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, JVMCIObject method, JVMCI_TRAPS) {
if (inst->is_call() || inst->is_jump()) {
return pc_offset + NativeCall::instruction_size;
} else if (inst->is_call_reg()) {
@ -44,11 +44,11 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
}
}
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
void CodeInstaller::pd_patch_OopConstant(int pc_offset, JVMCIObject constant, JVMCI_TRAPS) {
address pc = _instructions->start() + pc_offset;
Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
Handle obj = jvmci_env()->asConstant(constant, JVMCI_CHECK);
jobject value = JNIHandles::make_local(obj());
if (HotSpotObjectConstantImpl::compressed(constant)) {
if (jvmci_env()->get_HotSpotObjectConstantImpl_compressed(constant)) {
int oop_index = _oop_recorder->find_index(value);
RelocationHolder rspec = oop_Relocation::spec(oop_index);
_instructions->relocate(pc, rspec, 1);
@ -64,22 +64,22 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
}
}
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, JVMCIObject constant, JVMCI_TRAPS) {
address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
if (jvmci_env()->get_HotSpotMetaspaceConstantImpl_compressed(constant)) {
NativeMovConstReg32* move = nativeMovConstReg32_at(pc);
narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK);
narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, JVMCI_CHECK);
move->set_data((intptr_t)narrowOop);
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
} else {
NativeMovConstReg* move = nativeMovConstReg_at(pc);
void* reference = record_metadata_reference(_instructions, pc, constant, CHECK);
void* reference = record_metadata_reference(_instructions, pc, constant, JVMCI_CHECK);
move->set_data((intptr_t)reference);
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(reference));
}
}
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS) {
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, JVMCI_TRAPS) {
address pc = _instructions->start() + pc_offset;
NativeInstruction* inst = nativeInstruction_at(pc);
NativeInstruction* inst1 = nativeInstruction_at(pc + 4);
@ -100,7 +100,7 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
}
}
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, JVMCI_TRAPS) {
address pc = (address) inst;
if (inst->is_call()) {
NativeCall* call = nativeCall_at(pc);
@ -116,12 +116,12 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
}
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, Handle hotspot_method, jint pc_offset, TRAPS) {
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, JVMCIObject hotspot_method, jint pc_offset, JVMCI_TRAPS) {
#ifdef ASSERT
Method* method = NULL;
// we need to check, this might also be an unresolved method
if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) {
method = getMethodFromHotSpotMethod(hotspot_method());
if (JVMCIENV->isa_HotSpotResolvedJavaMethodImpl(hotspot_method)) {
method = JVMCIENV->asMethod(hotspot_method);
}
#endif
switch (_next_call_type) {
@ -155,7 +155,7 @@ void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, Handle hotspot_method,
}
}
void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
void CodeInstaller::pd_relocate_poll(address pc, jint mark, JVMCI_TRAPS) {
switch (mark) {
case POLL_NEAR:
JVMCI_ERROR("unimplemented");
@ -176,7 +176,7 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
}
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, JVMCI_TRAPS) {
// JVMCI Registers are numbered as follows:
// 0..31: Thirty-two General Purpose registers (CPU Registers)
// 32..63: Thirty-two single precision float registers

@ -32,7 +32,6 @@
// (see globals.hpp)
define_pd_global(bool, ShareVtableStubs, true);
define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on x86.

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@
#include "code/vmreg.hpp"
#include "vmreg_x86.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, JVMCIObject method, JVMCI_TRAPS) {
if (inst->is_call() || inst->is_jump()) {
assert(NativeCall::instruction_size == (int)NativeJump::instruction_size, "unexpected size");
return (pc_offset + NativeCall::instruction_size);
@ -54,7 +54,7 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
return (offset);
} else if (inst->is_call_reg()) {
// the inlined vtable stub contains a "call register" instruction
assert(method.not_null(), "only valid for virtual calls");
assert(method.is_non_null(), "only valid for virtual calls");
return (pc_offset + ((NativeCallReg *) inst)->next_instruction_offset());
} else if (inst->is_cond_jump()) {
address pc = (address) (inst);
@ -64,11 +64,12 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Hand
}
}
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
void CodeInstaller::pd_patch_OopConstant(int pc_offset, JVMCIObject constant, JVMCI_TRAPS) {
address pc = _instructions->start() + pc_offset;
Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
Handle obj = jvmci_env()->asConstant(constant, JVMCI_CHECK);
Thread* THREAD = Thread::current();
jobject value = JNIHandles::make_local(obj());
if (HotSpotObjectConstantImpl::compressed(constant)) {
if (jvmci_env()->get_HotSpotObjectConstantImpl_compressed(constant)) {
#ifdef _LP64
address operand = Assembler::locate_operand(pc, Assembler::narrow_oop_operand);
int oop_index = _oop_recorder->find_index(value);
@ -85,24 +86,24 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS)
}
}
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, JVMCIObject constant, JVMCI_TRAPS) {
address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
if (jvmci_env()->get_HotSpotMetaspaceConstantImpl_compressed(constant)) {
#ifdef _LP64
address operand = Assembler::locate_operand(pc, Assembler::narrow_oop_operand);
*((narrowKlass*) operand) = record_narrow_metadata_reference(_instructions, operand, constant, CHECK);
*((narrowKlass*) operand) = record_narrow_metadata_reference(_instructions, operand, constant, JVMCI_CHECK);
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
#else
JVMCI_ERROR("compressed Klass* on 32bit");
#endif
} else {
address operand = Assembler::locate_operand(pc, Assembler::imm_operand);
*((void**) operand) = record_metadata_reference(_instructions, operand, constant, CHECK);
*((void**) operand) = record_metadata_reference(_instructions, operand, constant, JVMCI_CHECK);
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
}
}
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS) {
void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, JVMCI_TRAPS) {
address pc = _instructions->start() + pc_offset;
address operand = Assembler::locate_operand(pc, Assembler::disp32_operand);
@ -117,7 +118,7 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
TRACE_jvmci_3("relocating at " PTR_FORMAT "/" PTR_FORMAT " with destination at " PTR_FORMAT " (%d)", p2i(pc), p2i(operand), p2i(dest), data_offset);
}
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, JVMCI_TRAPS) {
address pc = (address) inst;
if (inst->is_call()) {
// NOTE: for call without a mov, the offset must fit a 32-bit immediate
@ -145,12 +146,12 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
}
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, Handle hotspot_method, jint pc_offset, TRAPS) {
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, JVMCIObject hotspot_method, jint pc_offset, JVMCI_TRAPS) {
#ifdef ASSERT
Method* method = NULL;
// we need to check, this might also be an unresolved method
if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) {
method = getMethodFromHotSpotMethod(hotspot_method());
if (JVMCIENV->isa_HotSpotResolvedJavaMethodImpl(hotspot_method)) {
method = JVMCIENV->asMethod(hotspot_method);
}
#endif
switch (_next_call_type) {
@ -199,7 +200,7 @@ static void relocate_poll_near(address pc) {
}
void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
void CodeInstaller::pd_relocate_poll(address pc, jint mark, JVMCI_TRAPS) {
switch (mark) {
case POLL_NEAR: {
relocate_poll_near(pc);
@ -229,7 +230,7 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
}
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, JVMCI_TRAPS) {
if (jvmci_reg < RegisterImpl::number_of_registers) {
return as_Register(jvmci_reg)->as_VMReg();
} else {

@ -33,7 +33,6 @@
// runtime system. See globals.hpp for details of what they do.
define_pd_global(bool, ShareVtableStubs, true);
define_pd_global(bool, NeedsDeoptSuspend, false);
define_pd_global(bool, ImplicitNullChecks, true);
define_pd_global(bool, TrapBasedNullChecks, false);

@ -678,7 +678,7 @@ static void *thread_native_entry(Thread *thread) {
// handshaking with parent thread
{
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
MutexLocker ml(sync, Mutex::_no_safepoint_check_flag);
// notify parent thread
osthread->set_state(INITIALIZED);
@ -686,7 +686,7 @@ static void *thread_native_entry(Thread *thread) {
// wait until os::start_thread()
while (osthread->get_state() == INITIALIZED) {
sync->wait(Mutex::_no_safepoint_check_flag);
sync->wait_without_safepoint_check();
}
}
@ -766,9 +766,9 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
// Wait until child thread is either initialized or aborted
{
Monitor* sync_with_child = osthread->startThread_lock();
MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
while ((state = osthread->get_state()) == ALLOCATED) {
sync_with_child->wait(Mutex::_no_safepoint_check_flag);
sync_with_child->wait_without_safepoint_check();
}
}
@ -840,7 +840,7 @@ void os::pd_start_thread(Thread* thread) {
OSThread * osthread = thread->osthread();
assert(osthread->get_state() != INITIALIZED, "just checking");
Monitor* sync_with_child = osthread->startThread_lock();
MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
sync_with_child->notify();
}

@ -773,7 +773,7 @@ static void *thread_native_entry(Thread *thread) {
// handshaking with parent thread
{
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
MutexLocker ml(sync, Mutex::_no_safepoint_check_flag);
// notify parent thread
osthread->set_state(INITIALIZED);
@ -781,7 +781,7 @@ static void *thread_native_entry(Thread *thread) {
// wait until os::start_thread()
while (osthread->get_state() == INITIALIZED) {
sync->wait(Mutex::_no_safepoint_check_flag);
sync->wait_without_safepoint_check();
}
}
@ -881,9 +881,9 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
// Wait until child thread is either initialized or aborted
{
Monitor* sync_with_child = osthread->startThread_lock();
MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
while ((state = osthread->get_state()) == ALLOCATED) {
sync_with_child->wait(Mutex::_no_safepoint_check_flag);
sync_with_child->wait_without_safepoint_check();
}
}
}
@ -975,7 +975,7 @@ void os::pd_start_thread(Thread* thread) {
OSThread * osthread = thread->osthread();
assert(osthread->get_state() != INITIALIZED, "just checking");
Monitor* sync_with_child = osthread->startThread_lock();
MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
sync_with_child->notify();
}
@ -5158,13 +5158,16 @@ jint os::init_2(void) {
return JNI_ERR;
}
#if defined(IA32)
// Need to ensure we've determined the process's initial stack to
// perform the workaround
Linux::capture_initial_stack(JavaThread::stack_size_at_create());
workaround_expand_exec_shield_cs_limit();
#else
suppress_primordial_thread_resolution = Arguments::created_by_java_launcher();
if (!suppress_primordial_thread_resolution) {
Linux::capture_initial_stack(JavaThread::stack_size_at_create());
}
#if defined(IA32)
workaround_expand_exec_shield_cs_limit();
#endif
Linux::libpthread_init();

@ -3558,41 +3558,19 @@ void os::naked_short_sleep(jlong ms) {
Sleep(ms);
}
// Windows does not provide sleep functionality with nanosecond resolution, so we
// try to approximate this with spinning combined with yielding if another thread
// is ready to run on the current processor.
void os::naked_short_nanosleep(jlong ns) {
assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
LARGE_INTEGER hundreds_nanos = { 0 };
HANDLE wait_timer = ::CreateWaitableTimer(NULL /* attributes*/,
true /* manual reset */,
NULL /* name */ );
if (wait_timer == NULL) {
log_warning(os)("Failed to CreateWaitableTimer: %u", GetLastError());
return;
}
// We need a minimum of one hundred nanos.
ns = ns > 100 ? ns : 100;
// Round ns to the nearst hundred of nanos.
// Negative values indicate relative time.
hundreds_nanos.QuadPart = -((ns + 50) / 100);
if (::SetWaitableTimer(wait_timer /* handle */,
&hundreds_nanos /* due time */,
0 /* period */,
NULL /* comp func */,
NULL /* comp func args */,
FALSE /* resume */)) {
DWORD res = ::WaitForSingleObject(wait_timer /* handle */, INFINITE /* timeout */);
if (res != WAIT_OBJECT_0) {
if (res == WAIT_FAILED) {
log_warning(os)("Failed to WaitForSingleObject: %u", GetLastError());
} else {
log_warning(os)("Unexpected return from WaitForSingleObject: %s",
res == WAIT_ABANDONED ? "WAIT_ABANDONED" : "WAIT_TIMEOUT");
}
int64_t start = os::javaTimeNanos();
do {
if (SwitchToThread() == 0) {
// Nothing else is ready to run on this cpu, spin a little
SpinPause();
}
}
::CloseHandle(wait_timer /* handle */);
} while (os::javaTimeNanos() - start < ns);
}
// Sleep forever; naked call to OS-specific sleep; use with CAUTION

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,11 +23,151 @@
#include "precompiled.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zUtils.inline.hpp"
#include "runtime/globals.hpp"
#include "utilities/globalDefinitions.hpp"
uintptr_t ZAddressReservedStart() {
return ZAddressMetadataMarked0;
//
// The heap can have three different layouts, depending on the max heap size.
//
// Address Space & Pointer Layout 1
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000014000000000 (20TB)
// | Remapped View |
// +--------------------------------+ 0x0000010000000000 (16TB)
// | (Reserved, but unused) |
// +--------------------------------+ 0x00000c0000000000 (12TB)
// | Marked1 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// | Marked0 View |
// +--------------------------------+ 0x0000040000000000 (4TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 6 5 2 1 0
// +--------------------+----+-----------------------------------------------+
// |00000000 00000000 00|1111|11 11111111 11111111 11111111 11111111 11111111|
// +--------------------+----+-----------------------------------------------+
// | | |
// | | * 41-0 Object Offset (42-bits, 4TB address space)
// | |
// | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
// | 0010 = Marked1 (Address view 8-12TB)
// | 0100 = Remapped (Address view 16-20TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-46 Fixed (18-bits, always zero)
//
//
// Address Space & Pointer Layout 2
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000280000000000 (40TB)
// | Remapped View |
// +--------------------------------+ 0x0000200000000000 (32TB)
// | (Reserved, but unused) |
// +--------------------------------+ 0x0000180000000000 (24TB)
// | Marked1 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// | Marked0 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 7 6 3 2 0
// +------------------+-----+------------------------------------------------+
// |00000000 00000000 0|1111|111 11111111 11111111 11111111 11111111 11111111|
// +-------------------+----+------------------------------------------------+
// | | |
// | | * 42-0 Object Offset (43-bits, 8TB address space)
// | |
// | * 46-43 Metadata Bits (4-bits) 0001 = Marked0 (Address view 8-16TB)
// | 0010 = Marked1 (Address view 16-24TB)
// | 0100 = Remapped (Address view 32-40TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-47 Fixed (17-bits, always zero)
//
//
// Address Space & Pointer Layout 3
// --------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000500000000000 (80TB)
// | Remapped View |
// +--------------------------------+ 0x0000400000000000 (64TB)
// | (Reserved, but unused) |
// +--------------------------------+ 0x0000300000000000 (48TB)
// | Marked1 View |
// +--------------------------------+ 0x0000200000000000 (32TB)
// | Marked0 View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
// 6 4 4 4 4
// 3 8 7 4 3 0
// +------------------+----+-------------------------------------------------+
// |00000000 00000000 |1111|1111 11111111 11111111 11111111 11111111 11111111|
// +------------------+----+-------------------------------------------------+
// | | |
// | | * 43-0 Object Offset (44-bits, 16TB address space)
// | |
// | * 47-44 Metadata Bits (4-bits) 0001 = Marked0 (Address view 16-32TB)
// | 0010 = Marked1 (Address view 32-48TB)
// | 0100 = Remapped (Address view 64-80TB)
// | 1000 = Finalizable (Address view N/A)
// |
// * 63-48 Fixed (16-bits, always zero)
//
uintptr_t ZPlatformAddressSpaceStart() {
const uintptr_t first_heap_view_address = (uintptr_t)1 << (ZPlatformAddressMetadataShift() + 0);
const size_t min_address_offset = 0;
return first_heap_view_address + min_address_offset;
}
uintptr_t ZAddressReservedEnd() {
return ZAddressMetadataRemapped + ZAddressOffsetMax;
uintptr_t ZPlatformAddressSpaceEnd() {
const uintptr_t last_heap_view_address = (uintptr_t)1 << (ZPlatformAddressMetadataShift() + 2);
const size_t max_address_offset = (size_t)1 << ZPlatformAddressOffsetBits();
return last_heap_view_address + max_address_offset;
}
uintptr_t ZPlatformAddressReservedStart() {
return ZPlatformAddressSpaceStart();
}
uintptr_t ZPlatformAddressReservedEnd() {
return ZPlatformAddressSpaceEnd();
}
uintptr_t ZPlatformAddressBase() {
return 0;
}
size_t ZPlatformAddressOffsetBits() {
const size_t min_address_offset_bits = 42; // 4TB
const size_t max_address_offset_bits = 44; // 16TB
const size_t virtual_to_physical_ratio = 7; // 7:1
const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * virtual_to_physical_ratio);
const size_t address_offset_bits = log2_intptr(address_offset);
return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
}
size_t ZPlatformAddressMetadataShift() {
return ZPlatformAddressOffsetBits();
}

@ -35,56 +35,17 @@
// Large X*M > 4M 2M
// ------------------------------------------------------------------
//
//
// Address Space & Pointer Layout
// ------------------------------
//
// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
// . .
// . .
// . .
// +--------------------------------+ 0x0000140000000000 (20TB)
// | Remapped View |
// +--------------------------------+ 0x0000100000000000 (16TB)
// | (Reserved, but unused) |
// +--------------------------------+ 0x00000c0000000000 (12TB)
// | Marked1 View |
// +--------------------------------+ 0x0000080000000000 (8TB)
// | Marked0 View |
// +--------------------------------+ 0x0000040000000000 (4TB)
// . .
// +--------------------------------+ 0x0000000000000000
//
//
// 6 4 4 4 4 4 0
// 3 7 6 5 2 1 0
// +-------------------+-+----+-----------------------------------------------+
// |00000000 00000000 0|0|1111|11 11111111 11111111 11111111 11111111 11111111|
// +-------------------+-+----+-----------------------------------------------+
// | | | |
// | | | * 41-0 Object Offset (42-bits, 4TB address space)
// | | |
// | | * 45-42 Metadata Bits (4-bits) 0001 = Marked0 (Address view 4-8TB)
// | | 0010 = Marked1 (Address view 8-12TB)
// | | 0100 = Remapped (Address view 16-20TB)
// | | 1000 = Finalizable (Address view N/A)
// | |
// | * 46-46 Unused (1-bit, always zero)
// |
// * 63-47 Fixed (17-bits, always zero)
//
const size_t ZPlatformGranuleSizeShift = 21; // 2MB
const size_t ZPlatformMaxHeapSizeShift = 46; // 16TB
const size_t ZPlatformNMethodDisarmedOffset = 4;
const size_t ZPlatformCacheLineSize = 64;
const size_t ZPlatformGranuleSizeShift = 21; // 2M
const size_t ZPlatformAddressOffsetBits = 42; // 4TB
const uintptr_t ZPlatformAddressMetadataShift = ZPlatformAddressOffsetBits;
const uintptr_t ZPlatformAddressSpaceStart = (uintptr_t)1 << ZPlatformAddressOffsetBits;
const uintptr_t ZPlatformAddressSpaceSize = ((uintptr_t)1 << ZPlatformAddressOffsetBits) * 4;
const size_t ZPlatformNMethodDisarmedOffset = 4;
const size_t ZPlatformCacheLineSize = 64;
uintptr_t ZPlatformAddressSpaceStart();
uintptr_t ZPlatformAddressSpaceEnd();
uintptr_t ZPlatformAddressReservedStart();
uintptr_t ZPlatformAddressReservedEnd();
uintptr_t ZPlatformAddressBase();
size_t ZPlatformAddressOffsetBits();
size_t ZPlatformAddressMetadataShift();
#endif // OS_CPU_LINUX_X86_GC_Z_ZGLOBALS_LINUX_X86_HPP

@ -827,6 +827,7 @@ void os::verify_stack_alignment() {
*/
void os::workaround_expand_exec_shield_cs_limit() {
#if defined(IA32)
assert(Linux::initial_thread_stack_bottom() != NULL, "sanity");
size_t page_size = os::vm_page_size();
/*

@ -32,8 +32,6 @@
#include "compiler/compilerOracle.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "jvmci/compilerRuntime.hpp"
#include "jvmci/jvmciRuntime.hpp"
#include "oops/method.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
@ -167,7 +165,7 @@ bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
{
// Enter critical section. Does not block for safepoint.
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
if (*_state_adr == new_state) {
// another thread already performed this transition so nothing
@ -218,7 +216,7 @@ bool AOTCompiledMethod::make_entrant() {
{
// Enter critical section. Does not block for safepoint.
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
if (*_state_adr == in_use) {
// another thread already performed this transition so nothing

@ -26,8 +26,8 @@
#include "aot/aotCodeHeap.hpp"
#include "aot/aotLoader.inline.hpp"
#include "jvmci/jvmciRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/method.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/os.inline.hpp"
@ -200,7 +200,7 @@ void AOTLoader::universe_init() {
if ((*lib)->is_valid()) {
AOTCodeHeap* heap = new AOTCodeHeap(*lib);
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
add_heap(heap);
CodeCache::add_heap(heap);
}

@ -1046,7 +1046,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// Now copy code back
{
MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
//
// Deoptimization may have happened while we waited for the lock.
// In that case we don't bother to do any patching we just return
@ -1265,7 +1265,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// If we are patching in a non-perm oop, make sure the nmethod
// is on the right list.
{
MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
guarantee(nm != NULL, "only nmethods can contain non-perm oops");

@ -126,7 +126,8 @@
#define JAVA_13_VERSION 57
void ClassFileParser::set_class_bad_constant_seen(short bad_constant) {
assert((bad_constant == 19 || bad_constant == 20) && _major_version >= JAVA_9_VERSION,
assert((bad_constant == JVM_CONSTANT_Module ||
bad_constant == JVM_CONSTANT_Package) && _major_version >= JAVA_9_VERSION,
"Unexpected bad constant pool entry");
if (_bad_constant_seen == 0) _bad_constant_seen = bad_constant;
}
@ -343,8 +344,8 @@ void ClassFileParser::parse_constant_pool_entries(const ClassFileStream* const s
}
break;
}
case 19:
case 20: {
case JVM_CONSTANT_Module:
case JVM_CONSTANT_Package: {
// Record that an error occurred in these two cases but keep parsing so
// that ACC_Module can be checked for in the access_flags. Need to
// throw NoClassDefFoundError in that case.

@ -449,7 +449,7 @@ void ClassLoaderData::record_dependency(const Klass* k) {
void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
{
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
Klass* old_value = _klasses;
k->set_next_link(old_value);
// Link the new item into the list, making sure the linked class is stable
@ -549,7 +549,7 @@ ModuleEntryTable* ClassLoaderData::modules() {
modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size);
{
MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Ensure _modules is stable, since it is examined without a lock
OrderAccess::release_store(&_modules, modules);
}
@ -743,7 +743,7 @@ ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
// Lock-free access requires load_acquire.
ClassLoaderMetaspace* metaspace = OrderAccess::load_acquire(&_metaspace);
if (metaspace == NULL) {
MutexLockerEx ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
// Check if _metaspace got allocated while we were waiting for this lock.
if ((metaspace = _metaspace) == NULL) {
if (this == the_null_class_loader_data()) {
@ -764,7 +764,7 @@ ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
}
OopHandle ClassLoaderData::add_handle(Handle h) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
record_modified_oops();
return OopHandle(_handles.add(h()));
}
@ -779,7 +779,7 @@ void ClassLoaderData::remove_handle(OopHandle h) {
}
void ClassLoaderData::init_handle_locked(OopHandle& dest, Handle h) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
if (dest.resolve() != NULL) {
return;
} else {
@ -792,7 +792,7 @@ void ClassLoaderData::init_handle_locked(OopHandle& dest, Handle h) {
void ClassLoaderData::add_to_deallocate_list(Metadata* m) {
// Metadata in shared region isn't deleted.
if (!m->is_shared()) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
if (_deallocate_list == NULL) {
_deallocate_list = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(100, true);
}

@ -151,7 +151,7 @@ bool Dictionary::resize_if_needed() {
bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
// Lock the pd_set list. This lock cannot safepoint since the caller holds
// a Dictionary entry, which can be moved if the Dictionary is resized.
MutexLockerEx ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
if (oopDesc::equals(protection_domain, instance_klass()->protection_domain())) {
// Ensure this doesn't show up in the pd_set (invariant)
@ -191,7 +191,7 @@ void DictionaryEntry::add_protection_domain(Dictionary* dict, Handle protection_
ProtectionDomainCacheEntry* entry = SystemDictionary::cache_get(protection_domain);
// The pd_set in the dictionary entry is protected by a low level lock.
// With concurrent PD table cleanup, these links could be broken.
MutexLockerEx ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag);
ProtectionDomainEntry* new_head =
new ProtectionDomainEntry(entry, pd_set());
set_pd_set(new_head);
@ -369,7 +369,7 @@ void Dictionary::clean_cached_protection_domains() {
probe = probe->next()) {
Klass* e = probe->instance_klass();
MutexLockerEx ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag);
ProtectionDomainEntry* current = probe->pd_set();
ProtectionDomainEntry* prev = NULL;
while (current != NULL) {
@ -460,7 +460,7 @@ void SymbolPropertyTable::methods_do(void f(Method*)) {
}
void DictionaryEntry::verify_protection_domain_set() {
MutexLockerEx ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag);
for (ProtectionDomainEntry* current = pd_set(); // accessed at a safepoint
current != NULL;
current = current->_next) {
@ -469,7 +469,7 @@ void DictionaryEntry::verify_protection_domain_set() {
}
void DictionaryEntry::print_count(outputStream *st) {
MutexLockerEx ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag);
int count = 0;
for (ProtectionDomainEntry* current = pd_set(); // accessed inside SD lock
current != NULL;

@ -1613,7 +1613,6 @@ int java_lang_Thread::_stackSize_offset = 0;
int java_lang_Thread::_tid_offset = 0;
int java_lang_Thread::_thread_status_offset = 0;
int java_lang_Thread::_park_blocker_offset = 0;
int java_lang_Thread::_park_event_offset = 0 ;
#define THREAD_FIELDS_DO(macro) \
macro(_name_offset, k, vmSymbols::name_name(), string_signature, false); \
@ -1627,8 +1626,7 @@ int java_lang_Thread::_park_event_offset = 0 ;
macro(_stackSize_offset, k, "stackSize", long_signature, false); \
macro(_tid_offset, k, "tid", long_signature, false); \
macro(_thread_status_offset, k, "threadStatus", int_signature, false); \
macro(_park_blocker_offset, k, "parkBlocker", object_signature, false); \
macro(_park_event_offset, k, "nativeParkEventPointer", long_signature, false)
macro(_park_blocker_offset, k, "parkBlocker", object_signature, false)
void java_lang_Thread::compute_offsets() {
assert(_group_offset == 0, "offsets should be initialized only once");
@ -1745,15 +1743,6 @@ oop java_lang_Thread::park_blocker(oop java_thread) {
return java_thread->obj_field(_park_blocker_offset);
}
jlong java_lang_Thread::park_event(oop java_thread) {
return java_thread->long_field(_park_event_offset);
}
bool java_lang_Thread::set_park_event(oop java_thread, jlong ptr) {
java_thread->long_field_put(_park_event_offset, ptr);
return true;
}
const char* java_lang_Thread::thread_status_name(oop java_thread) {
ThreadStatus status = (java_lang_Thread::ThreadStatus)java_thread->int_field(_thread_status_offset);
switch (status) {
@ -2618,6 +2607,45 @@ void java_lang_StackTraceElement::fill_in(Handle element,
}
}
#if INCLUDE_JVMCI
void java_lang_StackTraceElement::decode(Handle mirror, methodHandle method, int bci, Symbol*& methodname, Symbol*& filename, int& line_number) {
int method_id = method->orig_method_idnum();
int cpref = method->name_index();
decode(mirror, method_id, method->constants()->version(), bci, cpref, methodname, filename, line_number);
}
void java_lang_StackTraceElement::decode(Handle mirror, int method_id, int version, int bci, int cpref, Symbol*& methodname, Symbol*& filename, int& line_number) {
// Fill in class name
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror()));
Method* method = holder->method_with_orig_idnum(method_id, version);
// The method can be NULL if the requested class version is gone
Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref);
// Fill in method name
methodname = sym;
if (!version_matches(method, version)) {
// If the method was redefined, accurate line number information isn't available
filename = NULL;
line_number = -1;
} else {
// Fill in source file name and line number.
// Use a specific ik version as a holder since the mirror might
// refer to a version that is now obsolete and no longer accessible
// via the previous versions list.
holder = holder->get_klass_version(version);
assert(holder != NULL, "sanity check");
Symbol* source = holder->source_file_name();
if (ShowHiddenFrames && source == NULL) {
source = vmSymbols::unknown_class_name();
}
filename = source;
line_number = Backtrace::get_line_number(method, bci);
}
}
#endif // INCLUDE_JVMCI
Method* java_lang_StackFrameInfo::get_method(Handle stackFrame, InstanceKlass* holder, TRAPS) {
HandleMark hm(THREAD);
Handle mname(THREAD, stackFrame->obj_field(_memberName_offset));

@ -371,7 +371,6 @@ class java_lang_Thread : AllStatic {
static int _tid_offset;
static int _thread_status_offset;
static int _park_blocker_offset;
static int _park_event_offset ;
static void compute_offsets();
@ -413,12 +412,6 @@ class java_lang_Thread : AllStatic {
// Blocker object responsible for thread parking
static oop park_blocker(oop java_thread);
// Pointer to type-stable park handler, encoded as jlong.
// Should be set when apparently null
// For details, see unsafe.cpp Unsafe_Unpark
static jlong park_event(oop java_thread);
static bool set_park_event(oop java_thread, jlong ptr);
// Java Thread Status for JVMTI and M&M use.
// This thread status info is saved in threadStatus field of
// java.lang.Thread java class.
@ -1375,6 +1368,11 @@ class java_lang_StackTraceElement: AllStatic {
static void compute_offsets();
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
#if INCLUDE_JVMCI
static void decode(Handle mirror, int method, int version, int bci, int cpref, Symbol*& methodName, Symbol*& fileName, int& lineNumber);
static void decode(Handle mirror, methodHandle method, int bci, Symbol*& methodName, Symbol*& fileName, int& lineNumber);
#endif
// Debugging
friend class JavaClasses;
};

@ -33,7 +33,7 @@
#include "services/threadService.hpp"
#include "utilities/chunkedList.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmciRuntime.hpp"
#include "jvmci/jvmci.hpp"
#endif
MetadataOnStackBuffer* MetadataOnStackMark::_used_buffers = NULL;
@ -73,7 +73,7 @@ MetadataOnStackMark::MetadataOnStackMark(bool walk_all_metadata, bool redefiniti
JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
ThreadService::metadata_do(Metadata::mark_on_stack);
#if INCLUDE_JVMCI
JVMCIRuntime::metadata_do(Metadata::mark_on_stack);
JVMCI::metadata_do(Metadata::mark_on_stack);
#endif
}
}

@ -245,7 +245,7 @@ PackageEntry* PackageEntryTable::lookup(Symbol* name, ModuleEntry* module) {
PackageEntry* PackageEntryTable::lookup_only(Symbol* name) {
assert(!Module_lock->owned_by_self(), "should not have the Module_lock - use locked_lookup_only");
MutexLockerEx ml(Module_lock);
MutexLocker ml(Module_lock);
return locked_lookup_only(name);
}

@ -51,7 +51,7 @@ ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
}
void ProtectionDomainCacheTable::trigger_cleanup() {
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
_dead_entries = true;
Service_lock->notify_all();
}

@ -242,7 +242,7 @@ size_t StringTable::table_size() {
}
void StringTable::trigger_concurrent_work() {
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
the_table()->_has_work = true;
Service_lock->notify_all();
}

@ -150,7 +150,7 @@ SymbolTable::SymbolTable() :
void SymbolTable::delete_symbol(Symbol* sym) {
if (sym->refcount() == PERM_REFCOUNT) {
MutexLockerEx ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
// Deleting permanent symbol should not occur very often (insert race condition),
// so log it.
log_trace_symboltable_helper(sym, "Freeing permanent symbol");
@ -190,7 +190,7 @@ size_t SymbolTable::table_size() {
}
void SymbolTable::trigger_cleanup() {
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
SymbolTable::the_table()->_has_work = true;
Service_lock->notify_all();
}
@ -208,7 +208,7 @@ Symbol* SymbolTable::allocate_symbol(const char* name, int len, bool c_heap, TRA
assert(sym != NULL, "new should call vm_exit_out_of_memory if C_HEAP is exhausted");
} else {
// Allocate to global arena
MutexLockerEx ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
sym = new (len, arena(), THREAD) Symbol((const u1*)name, len, PERM_REFCOUNT);
}
return sym;

@ -86,9 +86,6 @@
#if INCLUDE_CDS
#include "classfile/systemDictionaryShared.hpp"
#endif
#if INCLUDE_JVMCI
#include "jvmci/jvmciRuntime.hpp"
#endif
#if INCLUDE_JFR
#include "jfr/jfr.hpp"
#endif
@ -1824,10 +1821,10 @@ bool SystemDictionary::do_unloading(GCTimer* gc_timer) {
// First, mark for unload all ClassLoaderData referencing a dead class loader.
unloading_occurred = ClassLoaderDataGraph::do_unloading();
if (unloading_occurred) {
MutexLockerEx ml2(is_concurrent ? Module_lock : NULL);
MutexLocker ml2(is_concurrent ? Module_lock : NULL);
JFR_ONLY(Jfr::on_unloading_classes();)
MutexLockerEx ml1(is_concurrent ? SystemDictionary_lock : NULL);
MutexLocker ml1(is_concurrent ? SystemDictionary_lock : NULL);
ClassLoaderDataGraph::clean_module_and_package_info();
constraints()->purge_loader_constraints();
resolution_errors()->purge_resolution_errors();
@ -1922,13 +1919,6 @@ bool SystemDictionary::resolve_wk_klass(WKID id, TRAPS) {
Symbol* symbol = vmSymbols::symbol_at((vmSymbols::SID)sid);
InstanceKlass** klassp = &_well_known_klasses[id];
#if INCLUDE_JVMCI
if (id >= FIRST_JVMCI_WKID) {
assert(EnableJVMCI, "resolve JVMCI classes only when EnableJVMCI is true");
}
#endif
if ((*klassp) == NULL) {
Klass* k = resolve_or_fail(symbol, true, CHECK_0);
(*klassp) = InstanceKlass::cast(k);
@ -2017,7 +2007,7 @@ void SystemDictionary::resolve_well_known_classes(TRAPS) {
WKID jsr292_group_end = WK_KLASS_ENUM_NAME(VolatileCallSite_klass);
resolve_wk_klasses_until(jsr292_group_start, scan, CHECK);
resolve_wk_klasses_through(jsr292_group_end, scan, CHECK);
WKID last = NOT_JVMCI(WKID_LIMIT) JVMCI_ONLY(FIRST_JVMCI_WKID);
WKID last = WKID_LIMIT;
resolve_wk_klasses_until(last, scan, CHECK);
_box_klasses[T_BOOLEAN] = WK_KLASS(Boolean_klass);
@ -2750,106 +2740,61 @@ Handle SystemDictionary::link_method_handle_constant(Klass* caller,
return Handle(THREAD, (oop) result.get_jobject());
}
// Ask Java to compute a constant by invoking a BSM given a Dynamic_info CP entry
Handle SystemDictionary::link_dynamic_constant(Klass* caller,
int condy_index,
Handle bootstrap_specifier,
Symbol* name,
Symbol* type,
TRAPS) {
Handle empty;
Handle bsm, info;
if (java_lang_invoke_MethodHandle::is_instance(bootstrap_specifier())) {
bsm = bootstrap_specifier;
} else {
assert(bootstrap_specifier->is_objArray(), "");
objArrayOop args = (objArrayOop) bootstrap_specifier();
assert(args->length() == 2, "");
bsm = Handle(THREAD, args->obj_at(0));
info = Handle(THREAD, args->obj_at(1));
}
guarantee(java_lang_invoke_MethodHandle::is_instance(bsm()),
"caller must supply a valid BSM");
// Ask Java to run a bootstrap method, in order to create a dynamic call site
// while linking an invokedynamic op, or compute a constant for Dynamic_info CP entry
// with linkage results being stored back into the bootstrap specifier.
void SystemDictionary::invoke_bootstrap_method(BootstrapInfo& bootstrap_specifier, TRAPS) {
// Resolve the bootstrap specifier, its name, type, and static arguments
bootstrap_specifier.resolve_bsm(CHECK);
// This should not happen. JDK code should take care of that.
if (caller == NULL) {
THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad dynamic constant", empty);
if (bootstrap_specifier.caller() == NULL || bootstrap_specifier.type_arg().is_null()) {
THROW_MSG(vmSymbols::java_lang_InternalError(), "Invalid bootstrap method invocation with no caller or type argument");
}
Handle constant_name = java_lang_String::create_from_symbol(name, CHECK_(empty));
bool is_indy = bootstrap_specifier.is_method_call();
objArrayHandle appendix_box;
if (is_indy) {
// Some method calls may require an appendix argument. Arrange to receive it.
appendix_box = oopFactory::new_objArray_handle(SystemDictionary::Object_klass(), 1, CHECK);
assert(appendix_box->obj_at(0) == NULL, "");
}
// Resolve the constant type in the context of the caller class
Handle type_mirror = find_java_mirror_for_type(type, caller, SignatureStream::NCDFError,
CHECK_(empty));
// call java.lang.invoke.MethodHandleNatives::linkConstantDyanmic(caller, condy_index, bsm, type, info)
// call condy: java.lang.invoke.MethodHandleNatives::linkDynamicConstant(caller, condy_index, bsm, type, info)
// indy: java.lang.invoke.MethodHandleNatives::linkCallSite(caller, indy_index, bsm, name, mtype, info, &appendix)
JavaCallArguments args;
args.push_oop(Handle(THREAD, caller->java_mirror()));
args.push_int(condy_index);
args.push_oop(bsm);
args.push_oop(constant_name);
args.push_oop(type_mirror);
args.push_oop(info);
args.push_oop(Handle(THREAD, bootstrap_specifier.caller_mirror()));
args.push_int(bootstrap_specifier.bss_index());
args.push_oop(bootstrap_specifier.bsm());
args.push_oop(bootstrap_specifier.name_arg());
args.push_oop(bootstrap_specifier.type_arg());
args.push_oop(bootstrap_specifier.arg_values());
if (is_indy) {
args.push_oop(appendix_box);
}
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::linkDynamicConstant_name(),
vmSymbols::linkDynamicConstant_signature(),
&args, CHECK_(empty));
is_indy ? vmSymbols::linkCallSite_name() : vmSymbols::linkDynamicConstant_name(),
is_indy ? vmSymbols::linkCallSite_signature() : vmSymbols::linkDynamicConstant_signature(),
&args, CHECK);
return Handle(THREAD, (oop) result.get_jobject());
}
// Ask Java code to find or construct a java.lang.invoke.CallSite for the given
// name and signature, as interpreted relative to the given class loader.
methodHandle SystemDictionary::find_dynamic_call_site_invoker(Klass* caller,
int indy_index,
Handle bootstrap_specifier,
Symbol* name,
Symbol* type,
Handle *appendix_result,
TRAPS) {
methodHandle empty;
Handle bsm, info;
if (java_lang_invoke_MethodHandle::is_instance(bootstrap_specifier())) {
bsm = bootstrap_specifier;
Handle value(THREAD, (oop) result.get_jobject());
if (is_indy) {
Handle appendix;
methodHandle method = unpack_method_and_appendix(value,
bootstrap_specifier.caller(),
appendix_box,
&appendix, CHECK);
bootstrap_specifier.set_resolved_method(method, appendix);
} else {
objArrayOop args = (objArrayOop) bootstrap_specifier();
assert(args->length() == 2, "");
bsm = Handle(THREAD, args->obj_at(0));
info = Handle(THREAD, args->obj_at(1));
}
guarantee(java_lang_invoke_MethodHandle::is_instance(bsm()),
"caller must supply a valid BSM");
Handle method_name = java_lang_String::create_from_symbol(name, CHECK_(empty));
Handle method_type = find_method_handle_type(type, caller, CHECK_(empty));
// This should not happen. JDK code should take care of that.
if (caller == NULL || method_type.is_null()) {
THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad invokedynamic", empty);
bootstrap_specifier.set_resolved_value(value);
}
objArrayHandle appendix_box = oopFactory::new_objArray_handle(SystemDictionary::Object_klass(), 1, CHECK_(empty));
assert(appendix_box->obj_at(0) == NULL, "");
// call java.lang.invoke.MethodHandleNatives::linkCallSite(caller, indy_index, bsm, name, mtype, info, &appendix)
JavaCallArguments args;
args.push_oop(Handle(THREAD, caller->java_mirror()));
args.push_int(indy_index);
args.push_oop(bsm);
args.push_oop(method_name);
args.push_oop(method_type);
args.push_oop(info);
args.push_oop(appendix_box);
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
SystemDictionary::MethodHandleNatives_klass(),
vmSymbols::linkCallSite_name(),
vmSymbols::linkCallSite_signature(),
&args, CHECK_(empty));
Handle mname(THREAD, (oop) result.get_jobject());
return unpack_method_and_appendix(mname, caller, appendix_box, appendix_result, THREAD);
// sanity check
assert(bootstrap_specifier.is_resolved() ||
(bootstrap_specifier.is_method_call() &&
bootstrap_specifier.resolved_method().not_null()), "bootstrap method call failed");
}
// Protection domain cache table handling

@ -26,7 +26,6 @@
#define SHARE_CLASSFILE_SYSTEMDICTIONARY_HPP
#include "classfile/classLoaderData.hpp"
#include "jvmci/systemDictionary_jvmci.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/symbol.hpp"
#include "runtime/java.hpp"
@ -74,6 +73,7 @@
// of placeholders must hold the SystemDictionary_lock.
//
class BootstrapInfo;
class ClassFileStream;
class Dictionary;
class PlaceholderTable;
@ -214,13 +214,11 @@ class OopStorage;
do_klass(Integer_klass, java_lang_Integer ) \
do_klass(Long_klass, java_lang_Long ) \
\
/* JVMCI classes. These are loaded on-demand. */ \
JVMCI_WK_KLASSES_DO(do_klass) \
\
/*end*/
class SystemDictionary : AllStatic {
friend class BootstrapInfo;
friend class VMStructs;
friend class SystemDictionaryHandles;
@ -234,11 +232,6 @@ class SystemDictionary : AllStatic {
WKID_LIMIT,
#if INCLUDE_JVMCI
FIRST_JVMCI_WKID = WK_KLASS_ENUM_NAME(JVMCI_klass),
LAST_JVMCI_WKID = WK_KLASS_ENUM_NAME(Value_klass),
#endif
FIRST_WKID = NO_WKID + 1
};
@ -531,21 +524,7 @@ public:
TRAPS);
// ask Java to compute a constant by invoking a BSM given a Dynamic_info CP entry
static Handle link_dynamic_constant(Klass* caller,
int condy_index,
Handle bootstrap_specifier,
Symbol* name,
Symbol* type,
TRAPS);
// ask Java to create a dynamic call site, while linking an invokedynamic op
static methodHandle find_dynamic_call_site_invoker(Klass* caller,
int indy_index,
Handle bootstrap_method,
Symbol* name,
Symbol* type,
Handle *appendix_result,
TRAPS);
static void invoke_bootstrap_method(BootstrapInfo& bootstrap_specifier, TRAPS);
// Record the error when the first attempt to resolve a reference from a constant
// pool entry to a class fails.

@ -358,8 +358,7 @@
template(destroyed_name, "destroyed") \
template(nthreads_name, "nthreads") \
template(ngroups_name, "ngroups") \
template(shutdown_method_name, "shutdown") \
template(bootstrapFinished_method_name, "bootstrapFinished") \
template(shutdown_name, "shutdown") \
template(finalize_method_name, "finalize") \
template(reference_lock_name, "lock") \
template(reference_discovered_name, "discovered") \

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -227,7 +227,7 @@ BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
size += align_up(buffer_size, oopSize);
assert(name != NULL, "must provide a name");
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) BufferBlob(name, size);
}
// Track memory usage statistic after releasing CodeCache_lock
@ -248,7 +248,7 @@ BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
assert(name != NULL, "must provide a name");
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) BufferBlob(name, size, cb);
}
// Track memory usage statistic after releasing CodeCache_lock
@ -265,7 +265,7 @@ void BufferBlob::free(BufferBlob *blob) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
blob->flush();
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free((RuntimeBlob*)blob);
}
// Track memory usage statistic after releasing CodeCache_lock
@ -287,7 +287,7 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
AdapterBlob* blob = NULL;
unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) AdapterBlob(size, cb);
}
// Track memory usage statistic after releasing CodeCache_lock
@ -310,7 +310,7 @@ VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
size += align_up(buffer_size, oopSize);
assert(name != NULL, "must provide a name");
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) VtableBlob(name, size);
}
// Track memory usage statistic after releasing CodeCache_lock
@ -331,7 +331,7 @@ MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
size = CodeBlob::align_code_offset(size);
size += align_up(buffer_size, oopSize);
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) MethodHandlesAdapterBlob(size);
if (blob == NULL) {
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
@ -369,7 +369,7 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
RuntimeStub* stub = NULL;
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
}
@ -428,7 +428,7 @@ DeoptimizationBlob* DeoptimizationBlob::create(
DeoptimizationBlob* blob = NULL;
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
blob = new (size) DeoptimizationBlob(cb,
size,
@ -467,7 +467,7 @@ UncommonTrapBlob* UncommonTrapBlob::create(
UncommonTrapBlob* blob = NULL;
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
}
@ -503,7 +503,7 @@ ExceptionBlob* ExceptionBlob::create(
ExceptionBlob* blob = NULL;
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
}
@ -538,7 +538,7 @@ SafepointBlob* SafepointBlob::create(
SafepointBlob* blob = NULL;
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
}

@ -531,7 +531,7 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_t
return allocate(size, type, orig_code_blob_type);
}
}
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompileBroker::handle_full_code_cache(orig_code_blob_type);
return NULL;
}
@ -792,7 +792,7 @@ CodeCache::UnloadingScope::~UnloadingScope() {
}
void CodeCache::verify_oops() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
VerifyOopClosure voc;
NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
while(iter.next()) {
@ -989,7 +989,7 @@ void CodeCache::cleanup_inline_caches() {
NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int number_of_marked_CodeBlobs = 0;
// search the hierarchy looking for nmethods which are affected by the loading of this class
@ -1154,7 +1154,7 @@ void CodeCache::flush_evol_dependents() {
// Deoptimize all methods
void CodeCache::mark_all_nmethods_for_deoptimization() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
while(iter.next()) {
CompiledMethod* nm = iter.method();
@ -1165,7 +1165,7 @@ void CodeCache::mark_all_nmethods_for_deoptimization() {
}
int CodeCache::mark_for_deoptimization(Method* dependee) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int number_of_marked_CodeBlobs = 0;
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
@ -1289,7 +1289,7 @@ void CodeCache::report_codemem_full(int code_blob_type, bool print) {
stringStream s;
// Dump code cache into a buffer before locking the tty.
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
print_summary(&s);
}
{
@ -1557,7 +1557,7 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
}
void CodeCache::print_codelist(outputStream* st) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
while (iter.next()) {
@ -1572,7 +1572,7 @@ void CodeCache::print_codelist(outputStream* st) {
}
void CodeCache::print_layout(outputStream* st) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
ResourceMark rm;
print_summary(st, true);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -258,7 +258,7 @@ void InlineCacheBuffer::release_pending_icholders() {
// not safe to free them until them since they might be visible to
// another thread.
void InlineCacheBuffer::queue_for_release(CompiledICHolder* icholder) {
MutexLockerEx mex(InlineCacheBuffer_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mex(InlineCacheBuffer_lock, Mutex::_no_safepoint_check_flag);
icholder->set_next(_pending_released);
_pending_released = icholder;
_pending_count++;

@ -64,7 +64,7 @@
#include "utilities/resourceHash.hpp"
#include "utilities/xmlstream.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmciJavaClasses.hpp"
#include "jvmci/jvmciRuntime.hpp"
#endif
#ifdef DTRACE_ENABLED
@ -112,6 +112,10 @@ struct java_nmethod_stats_struct {
int dependencies_size;
int handler_table_size;
int nul_chk_table_size;
#if INCLUDE_JVMCI
int speculations_size;
int jvmci_data_size;
#endif
int oops_size;
int metadata_size;
@ -129,6 +133,10 @@ struct java_nmethod_stats_struct {
dependencies_size += nm->dependencies_size();
handler_table_size += nm->handler_table_size();
nul_chk_table_size += nm->nul_chk_table_size();
#if INCLUDE_JVMCI
speculations_size += nm->speculations_size();
jvmci_data_size += nm->jvmci_data_size();
#endif
}
void print_nmethod_stats(const char* name) {
if (nmethod_count == 0) return;
@ -146,6 +154,10 @@ struct java_nmethod_stats_struct {
if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size);
if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size);
if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size);
#if INCLUDE_JVMCI
if (speculations_size != 0) tty->print_cr(" speculations = %d", speculations_size);
if (jvmci_data_size != 0) tty->print_cr(" JVMCI data = %d", jvmci_data_size);
#endif
}
};
@ -426,11 +438,6 @@ void nmethod::init_defaults() {
#if INCLUDE_RTM_OPT
_rtm_state = NoRTM;
#endif
#if INCLUDE_JVMCI
_jvmci_installed_code = NULL;
_speculation_log = NULL;
_jvmci_installed_code_triggers_invalidation = false;
#endif
}
nmethod* nmethod::new_native_nmethod(const methodHandle& method,
@ -446,7 +453,7 @@ nmethod* nmethod::new_native_nmethod(const methodHandle& method,
// create nmethod
nmethod* nm = NULL;
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
@ -483,8 +490,11 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
AbstractCompiler* compiler,
int comp_level
#if INCLUDE_JVMCI
, jweak installed_code,
jweak speculationLog
, char* speculations,
int speculations_len,
int nmethod_mirror_index,
const char* nmethod_mirror_name,
FailedSpeculation** failed_speculations
#endif
)
{
@ -492,13 +502,20 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
code_buffer->finalize_oop_references(method);
// create nmethod
nmethod* nm = NULL;
{ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
{ MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
#if INCLUDE_JVMCI
int jvmci_data_size = !compiler->is_jvmci() ? 0 : JVMCINMethodData::compute_size(nmethod_mirror_name);
#endif
int nmethod_size =
CodeBlob::allocation_size(code_buffer, sizeof(nmethod))
+ adjust_pcs_size(debug_info->pcs_size())
+ align_up((int)dependencies->size_in_bytes(), oopSize)
+ align_up(handler_table->size_in_bytes() , oopSize)
+ align_up(nul_chk_table->size_in_bytes() , oopSize)
#if INCLUDE_JVMCI
+ align_up(speculations_len , oopSize)
+ align_up(jvmci_data_size , oopSize)
#endif
+ align_up(debug_info->data_size() , oopSize);
nm = new (nmethod_size, comp_level)
@ -510,12 +527,19 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
compiler,
comp_level
#if INCLUDE_JVMCI
, installed_code,
speculationLog
, speculations,
speculations_len,
jvmci_data_size
#endif
);
if (nm != NULL) {
#if INCLUDE_JVMCI
if (compiler->is_jvmci()) {
// Initialize the JVMCINMethodData object inlined into nm
nm->jvmci_nmethod_data()->initialize(nmethod_mirror_index, nmethod_mirror_name, failed_speculations);
}
#endif
// To make dependency checking during class loading fast, record
// the nmethod dependencies in the classes it is dependent on.
// This allows the dependency checking code to simply walk the
@ -591,7 +615,13 @@ nmethod::nmethod(
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
_nul_chk_table_offset = _handler_table_offset;
#if INCLUDE_JVMCI
_speculations_offset = _nul_chk_table_offset;
_jvmci_data_offset = _speculations_offset;
_nmethod_end_offset = _jvmci_data_offset;
#else
_nmethod_end_offset = _nul_chk_table_offset;
#endif
_compile_id = compile_id;
_comp_level = CompLevel_none;
_entry_point = code_begin() + offsets->value(CodeOffsets::Entry);
@ -667,8 +697,9 @@ nmethod::nmethod(
AbstractCompiler* compiler,
int comp_level
#if INCLUDE_JVMCI
, jweak installed_code,
jweak speculation_log
, char* speculations,
int speculations_len,
int jvmci_data_size
#endif
)
: CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
@ -697,15 +728,6 @@ nmethod::nmethod(
set_ctable_begin(header_begin() + _consts_offset);
#if INCLUDE_JVMCI
_jvmci_installed_code = installed_code;
_speculation_log = speculation_log;
oop obj = JNIHandles::resolve(installed_code);
if (obj == NULL || (obj->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(obj))) {
_jvmci_installed_code_triggers_invalidation = false;
} else {
_jvmci_installed_code_triggers_invalidation = true;
}
if (compiler->is_jvmci()) {
// JVMCI might not produce any stub sections
if (offsets->value(CodeOffsets::Exceptions) != -1) {
@ -735,10 +757,10 @@ nmethod::nmethod(
_deopt_mh_handler_begin = (address) this + _stub_offset + offsets->value(CodeOffsets::DeoptMH);
} else {
_deopt_mh_handler_begin = NULL;
}
#if INCLUDE_JVMCI
}
#endif
}
if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
_unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
} else {
@ -753,7 +775,13 @@ nmethod::nmethod(
_dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
_handler_table_offset = _dependencies_offset + align_up((int)dependencies->size_in_bytes (), oopSize);
_nul_chk_table_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
#if INCLUDE_JVMCI
_speculations_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize);
_jvmci_data_offset = _speculations_offset + align_up(speculations_len, oopSize);
_nmethod_end_offset = _jvmci_data_offset + align_up(jvmci_data_size, oopSize);
#else
_nmethod_end_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize);
#endif
_entry_point = code_begin() + offsets->value(CodeOffsets::Entry);
_verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);
_osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
@ -779,6 +807,13 @@ nmethod::nmethod(
handler_table->copy_to(this);
nul_chk_table->copy_to(this);
#if INCLUDE_JVMCI
// Copy speculations to nmethod
if (speculations_size() != 0) {
memcpy(speculations_begin(), speculations, speculations_len);
}
#endif
// we use the information of entry points to find out if a method is
// static or non static
assert(compiler->is_c2() || compiler->is_jvmci() ||
@ -798,13 +833,14 @@ void nmethod::log_identity(xmlStream* log) const {
log->print(" level='%d'", comp_level());
}
#if INCLUDE_JVMCI
char buffer[O_BUFLEN];
char* jvmci_name = jvmci_installed_code_name(buffer, O_BUFLEN);
if (jvmci_nmethod_data() != NULL) {
const char* jvmci_name = jvmci_nmethod_data()->name();
if (jvmci_name != NULL) {
log->print(" jvmci_installed_code_name='");
log->print(" jvmci_mirror_name='");
log->text("%s", jvmci_name);
log->print("'");
}
}
#endif
}
@ -1103,7 +1139,7 @@ void nmethod::make_unloaded() {
// Unregister must be done before the state change
{
MutexLockerEx ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
Mutex::_no_safepoint_check_flag);
Universe::heap()->unregister_nmethod(this);
CodeCache::unregister_old_nmethod(this);
@ -1115,13 +1151,6 @@ void nmethod::make_unloaded() {
// Log the unloading.
log_state_change();
#if INCLUDE_JVMCI
// The method can only be unloaded after the pointer to the installed code
// Java wrapper is no longer alive. Here we need to clear out this weak
// reference to the dead object.
maybe_invalidate_installed_code();
#endif
// The Method* is gone at this point
assert(_method == NULL, "Tautology");
@ -1134,6 +1163,15 @@ void nmethod::make_unloaded() {
// concurrent nmethod unloading. Therefore, there is no need for
// acquire on the loader side.
OrderAccess::release_store(&_state, (signed char)unloaded);
#if INCLUDE_JVMCI
// Clear the link between this nmethod and a HotSpotNmethod mirror
JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
if (nmethod_data != NULL) {
nmethod_data->invalidate_nmethod_mirror(this);
nmethod_data->clear_nmethod_mirror(this);
}
#endif
}
void nmethod::invalidate_osr_method() {
@ -1222,7 +1260,7 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
}
// Enter critical section. Does not block for safepoint.
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
if (_state == state) {
// another thread already performed this transition so nothing
@ -1265,13 +1303,18 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
// Log the transition once
log_state_change();
// Invalidate while holding the patching lock
JVMCI_ONLY(maybe_invalidate_installed_code());
// Remove nmethod from method.
unlink_from_method(false /* already owns Patching_lock */);
} // leave critical region under Patching_lock
#if INCLUDE_JVMCI
// Invalidate can't occur while holding the Patching lock
JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
if (nmethod_data != NULL) {
nmethod_data->invalidate_nmethod_mirror(this);
}
#endif
#ifdef ASSERT
if (is_osr_method() && method() != NULL) {
// Make sure osr nmethod is invalidated, i.e. not on the list
@ -1289,7 +1332,7 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
// Flushing dependencies must be done before any possible
// safepoint can sneak in, otherwise the oops used by the
// dependency logic could have become stale.
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
if (nmethod_needs_unregister) {
Universe::heap()->unregister_nmethod(this);
CodeCache::unregister_old_nmethod(this);
@ -1297,6 +1340,14 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
flush_dependencies(/*delete_immediately*/true);
}
#if INCLUDE_JVMCI
// Now that the nmethod has been unregistered, it's
// safe to clear the HotSpotNmethod mirror oop.
if (nmethod_data != NULL) {
nmethod_data->clear_nmethod_mirror(this);
}
#endif
// Clear ICStubs to prevent back patching stubs of zombie or flushed
// nmethods during the next safepoint (see ICStub::finalize), as well
// as to free up CompiledICHolder resources.
@ -1324,7 +1375,7 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
assert(state == not_entrant, "other cases may need to be handled differently");
}
if (TraceCreateZombies) {
if (TraceCreateZombies && state == zombie) {
ResourceMark m;
tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null", (state == not_entrant) ? "not entrant" : "zombie");
}
@ -1334,7 +1385,7 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
}
void nmethod::flush() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// Note that there are no valid oops in the nmethod anymore.
assert(!is_osr_method() || is_unloaded() || is_zombie(),
"osr nmethod must be unloaded or zombie before flushing");
@ -1362,11 +1413,6 @@ void nmethod::flush() {
ec = next;
}
#if INCLUDE_JVMCI
assert(_jvmci_installed_code == NULL, "should have been nulled out when transitioned to zombie");
assert(_speculation_log == NULL, "should have been nulled out when transitioned to zombie");
#endif
Universe::heap()->flush_nmethod(this);
CodeBlob::flush();
@ -1452,7 +1498,7 @@ void nmethod::post_compiled_method_load_event() {
if (JvmtiExport::should_post_compiled_method_load()) {
// Let the Service thread (which is a real Java thread) post the event
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
JvmtiDeferredEventQueue::enqueue(
JvmtiDeferredEvent::compiled_method_load_event(this));
}
@ -1490,7 +1536,7 @@ void nmethod::post_compiled_method_unload() {
JvmtiDeferredEvent event =
JvmtiDeferredEvent::compiled_method_unload_event(this,
_jmethod_id, insts_begin());
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
JvmtiDeferredEventQueue::enqueue(event);
}
@ -1660,17 +1706,6 @@ void nmethod::do_unloading(bool unloading_occurred) {
if (is_unloading()) {
make_unloaded();
} else {
#if INCLUDE_JVMCI
if (_jvmci_installed_code != NULL) {
if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
if (_jvmci_installed_code_triggers_invalidation) {
make_not_entrant();
}
clear_jvmci_installed_code();
}
}
#endif
guarantee(unload_nmethod_caches(unloading_occurred),
"Should not need transition stubs");
}
@ -2066,7 +2101,7 @@ void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) {
if (cm->is_aot()) return; // FIXME: Revisit once _lock_count is added to aot_method
nmethod* nm = cm->as_nmethod();
Atomic::inc(&nm->_lock_count);
assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method: %p", nm);
}
void nmethodLocker::unlock_nmethod(CompiledMethod* cm) {
@ -2275,6 +2310,16 @@ void nmethod::print() const {
p2i(nul_chk_table_begin()),
p2i(nul_chk_table_end()),
nul_chk_table_size());
#if INCLUDE_JVMCI
if (speculations_size () > 0) tty->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
p2i(speculations_begin()),
p2i(speculations_end()),
speculations_size());
if (jvmci_data_size () > 0) tty->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
p2i(jvmci_data_begin()),
p2i(jvmci_data_end()),
jvmci_data_size());
#endif
}
#ifndef PRODUCT
@ -2857,115 +2902,18 @@ void nmethod::print_statistics() {
#endif // !PRODUCT
#if INCLUDE_JVMCI
void nmethod::clear_jvmci_installed_code() {
assert_locked_or_safepoint(Patching_lock);
if (_jvmci_installed_code != NULL) {
JNIHandles::destroy_weak_global(_jvmci_installed_code);
_jvmci_installed_code = NULL;
void nmethod::update_speculation(JavaThread* thread) {
jlong speculation = thread->pending_failed_speculation();
if (speculation != 0) {
guarantee(jvmci_nmethod_data() != NULL, "failed speculation in nmethod without failed speculation list");
jvmci_nmethod_data()->add_failed_speculation(this, speculation);
thread->set_pending_failed_speculation(0);
}
}
void nmethod::clear_speculation_log() {
assert_locked_or_safepoint(Patching_lock);
if (_speculation_log != NULL) {
JNIHandles::destroy_weak_global(_speculation_log);
_speculation_log = NULL;
}
}
void nmethod::maybe_invalidate_installed_code() {
if (!is_compiled_by_jvmci()) {
return;
}
assert(Patching_lock->is_locked() ||
SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
if (installed_code != NULL) {
// Update the values in the InstalledCode instance if it still refers to this nmethod
nmethod* nm = (nmethod*)InstalledCode::address(installed_code);
if (nm == this) {
if (!is_alive() || is_unloading()) {
// Break the link between nmethod and InstalledCode such that the nmethod
// can subsequently be flushed safely. The link must be maintained while
// the method could have live activations since invalidateInstalledCode
// might want to invalidate all existing activations.
InstalledCode::set_address(installed_code, 0);
InstalledCode::set_entryPoint(installed_code, 0);
} else if (is_not_entrant()) {
// Remove the entry point so any invocation will fail but keep
// the address link around that so that existing activations can
// be invalidated.
InstalledCode::set_entryPoint(installed_code, 0);
}
}
}
if (!is_alive() || is_unloading()) {
// Clear these out after the nmethod has been unregistered and any
// updates to the InstalledCode instance have been performed.
clear_jvmci_installed_code();
clear_speculation_log();
}
}
void nmethod::invalidate_installed_code(Handle installedCode, TRAPS) {
if (installedCode() == NULL) {
THROW(vmSymbols::java_lang_NullPointerException());
}
jlong nativeMethod = InstalledCode::address(installedCode);
nmethod* nm = (nmethod*)nativeMethod;
if (nm == NULL) {
// Nothing to do
return;
}
nmethodLocker nml(nm);
#ifdef ASSERT
{
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
// This relationship can only be checked safely under a lock
assert(!nm->is_alive() || nm->is_unloading() || nm->jvmci_installed_code() == installedCode(), "sanity check");
}
#endif
if (nm->is_alive()) {
// Invalidating the InstalledCode means we want the nmethod
// to be deoptimized.
nm->mark_for_deoptimization();
VM_Deoptimize op;
VMThread::execute(&op);
}
// Multiple threads could reach this point so we now need to
// lock and re-check the link to the nmethod so that only one
// thread clears it.
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
if (InstalledCode::address(installedCode) == nativeMethod) {
InstalledCode::set_address(installedCode, 0);
}
}
oop nmethod::jvmci_installed_code() {
return JNIHandles::resolve(_jvmci_installed_code);
}
oop nmethod::speculation_log() {
return JNIHandles::resolve(_speculation_log);
}
char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) const {
if (!this->is_compiled_by_jvmci()) {
return NULL;
}
oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
if (installed_code != NULL) {
oop installed_code_name = NULL;
if (installed_code->is_a(InstalledCode::klass())) {
installed_code_name = InstalledCode::name(installed_code);
}
if (installed_code_name != NULL) {
return java_lang_String::as_utf8_string(installed_code_name, buf, (int)buflen);
}
const char* nmethod::jvmci_name() {
if (jvmci_nmethod_data() != NULL) {
return jvmci_nmethod_data()->name();
}
return NULL;
}

@ -51,37 +51,28 @@ class DebugInformationRecorder;
// - handler entry point array
// [Implicit Null Pointer exception table]
// - implicit null table array
// [Speculations]
// - encoded speculations array
// [JVMCINMethodData]
// - meta data for JVMCI compiled nmethod
#if INCLUDE_JVMCI
class FailedSpeculation;
class JVMCINMethodData;
#endif
class nmethod : public CompiledMethod {
friend class VMStructs;
friend class JVMCIVMStructs;
friend class NMethodSweeper;
friend class CodeCache; // scavengable oops
friend class JVMCINMethodData;
private:
// Shared fields for all nmethod's
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
jmethodID _jmethod_id; // Cache of method()->jmethod_id()
#if INCLUDE_JVMCI
// A weak reference to an InstalledCode object associated with
// this nmethod.
jweak _jvmci_installed_code;
// A weak reference to a SpeculationLog object associated with
// this nmethod.
jweak _speculation_log;
// Determines whether this nmethod is unloaded when the
// referent in _jvmci_installed_code is cleared. This
// will be false if the referent is initialized to a
// HotSpotNMethod object whose isDefault field is true.
// That is, installed code other than a "default"
// HotSpotNMethod causes nmethod unloading.
// This field is ignored once _jvmci_installed_code is NULL.
bool _jvmci_installed_code_triggers_invalidation;
#endif
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
@ -107,6 +98,10 @@ class nmethod : public CompiledMethod {
int _dependencies_offset;
int _handler_table_offset;
int _nul_chk_table_offset;
#if INCLUDE_JVMCI
int _speculations_offset;
int _jvmci_data_offset;
#endif
int _nmethod_end_offset;
int code_offset() const { return (address) code_begin() - header_begin(); }
@ -207,8 +202,9 @@ class nmethod : public CompiledMethod {
AbstractCompiler* compiler,
int comp_level
#if INCLUDE_JVMCI
, jweak installed_code,
jweak speculation_log
, char* speculations,
int speculations_len,
int jvmci_data_size
#endif
);
@ -251,8 +247,11 @@ class nmethod : public CompiledMethod {
AbstractCompiler* compiler,
int comp_level
#if INCLUDE_JVMCI
, jweak installed_code = NULL,
jweak speculation_log = NULL
, char* speculations = NULL,
int speculations_len = 0,
int nmethod_mirror_index = -1,
const char* nmethod_mirror_name = NULL,
FailedSpeculation** failed_speculations = NULL
#endif
);
@ -299,12 +298,24 @@ class nmethod : public CompiledMethod {
address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
#if INCLUDE_JVMCI
address nul_chk_table_end () const { return header_begin() + _speculations_offset ; }
address speculations_begin () const { return header_begin() + _speculations_offset ; }
address speculations_end () const { return header_begin() + _jvmci_data_offset ; }
address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; }
address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; }
#else
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
#endif
// Sizes
int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); }
int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
#if INCLUDE_JVMCI
int speculations_size () const { return speculations_end () - speculations_begin (); }
int jvmci_data_size () const { return jvmci_data_end () - jvmci_data_begin (); }
#endif
int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; }
int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
@ -446,39 +457,19 @@ public:
void set_method(Method* method) { _method = method; }
#if INCLUDE_JVMCI
// Gets the InstalledCode object associated with this nmethod
// which may be NULL if this nmethod was not compiled by JVMCI
// or the weak reference has been cleared.
oop jvmci_installed_code();
// Gets the JVMCI name of this nmethod.
const char* jvmci_name();
// Copies the value of the name field in the InstalledCode
// object (if any) associated with this nmethod into buf.
// Returns the value of buf if it was updated otherwise NULL.
char* jvmci_installed_code_name(char* buf, size_t buflen) const;
// Records the pending failed speculation in the
// JVMCI speculation log associated with this nmethod.
void update_speculation(JavaThread* thread);
// Updates the state of the InstalledCode (if any) associated with
// this nmethod based on the current value of _state.
void maybe_invalidate_installed_code();
// Deoptimizes the nmethod (if any) in the address field of a given
// InstalledCode object. The address field is zeroed upon return.
static void invalidate_installed_code(Handle installed_code, TRAPS);
// Gets the SpeculationLog object associated with this nmethod
// which may be NULL if this nmethod was not compiled by JVMCI
// or the weak reference has been cleared.
oop speculation_log();
private:
// Deletes the weak reference (if any) to the InstalledCode object
// associated with this nmethod.
void clear_jvmci_installed_code();
// Deletes the weak reference (if any) to the SpeculationLog object
// associated with this nmethod.
void clear_speculation_log();
public:
// Gets the data specific to a JVMCI compiled method.
// This returns a non-NULL value iff this nmethod was
// compiled by the JVMCI compiler.
JVMCINMethodData* jvmci_nmethod_data() const {
return jvmci_data_size() == 0 ? NULL : (JVMCINMethodData*) jvmci_data_begin();
}
#endif
public:

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -207,7 +207,7 @@ void StubQueue::remove_all(){
void StubQueue::verify() {
// verify only if initialized
if (_stub_buffer == NULL) return;
MutexLockerEx lock(_mutex, Mutex::_no_safepoint_check_flag);
MutexLocker lock(_mutex, Mutex::_no_safepoint_check_flag);
// verify index boundaries
guarantee(0 <= _buffer_size, "buffer size must be positive");
guarantee(0 <= _buffer_limit && _buffer_limit <= _buffer_size , "_buffer_limit out of bounds");
@ -234,7 +234,7 @@ void StubQueue::verify() {
void StubQueue::print() {
MutexLockerEx lock(_mutex, Mutex::_no_safepoint_check_flag);
MutexLocker lock(_mutex, Mutex::_no_safepoint_check_flag);
for (Stub* s = first(); s != NULL; s = next(s)) {
stub_print(s);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -125,7 +125,7 @@ int VtableStubs::_itab_stub_size = 0;
void VtableStubs::initialize() {
VtableStub::_receiver_location = SharedRuntime::name_for_receiver();
{
MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
assert(_number_of_vtable_stubs == 0, "potential performance bug: VtableStubs initialized more than once");
assert(is_power_of_2(N), "N must be a power of 2");
for (int i = 0; i < N; i++) {
@ -211,7 +211,7 @@ address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
VtableStub* s;
{
MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
if (s == NULL) {
if (is_vtable_stub) {
@ -271,7 +271,7 @@ void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
}
VtableStub* VtableStubs::entry_point(address pc) {
MutexLockerEx ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
VtableStub* s;

@ -1,5 +1,5 @@
//
// Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -28,14 +28,14 @@
bool AbstractCompiler::should_perform_init() {
if (_compiler_state != initialized) {
MutexLocker only_one(CompileThread_lock);
MonitorLocker only_one(CompileThread_lock);
if (_compiler_state == uninitialized) {
_compiler_state = initializing;
return true;
} else {
while (_compiler_state == initializing) {
CompileThread_lock->wait();
only_one.wait();
}
}
}

@ -154,9 +154,6 @@ class AbstractCompiler : public CHeapObj<mtCompiler> {
const bool is_jvmci() { return _type == compiler_jvmci; }
const CompilerType type() { return _type; }
// Extra tests to identify trivial methods for the tiered compilation policy.
virtual bool is_trivial(Method* method) { return false; }
// Customization
virtual void initialize () = 0;

@ -68,9 +68,8 @@
#include "c1/c1_Compiler.hpp"
#endif
#if INCLUDE_JVMCI
#include "jvmci/jvmciCompiler.hpp"
#include "jvmci/jvmciEnv.hpp"
#include "jvmci/jvmciRuntime.hpp"
#include "jvmci/jvmciJavaClasses.hpp"
#include "runtime/vframe.hpp"
#endif
#ifdef COMPILER2
@ -402,7 +401,7 @@ CompileTask* CompileQueue::get() {
methodHandle save_method;
methodHandle save_hot_method;
MutexLocker locker(MethodCompileQueue_lock);
MonitorLocker locker(MethodCompileQueue_lock);
// If _first is NULL we have no more compile jobs. There are two reasons for
// having no compile jobs: First, we compiled everything we wanted. Second,
// we ran out of code cache so compilation has been disabled. In the latter
@ -423,7 +422,7 @@ CompileTask* CompileQueue::get() {
// We need a timed wait here, since compiler threads can exit if compilation
// is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
// is not critical and we do not want idle compiler threads to wake up too often.
MethodCompileQueue_lock->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
locker.wait(5*1000);
if (UseDynamicNumberOfCompilerThreads && _first == NULL) {
// Still nothing to compile. Give caller a chance to stop this thread.
@ -1064,36 +1063,34 @@ void CompileBroker::compile_method_base(const methodHandle& method,
}
#if INCLUDE_JVMCI
if (UseJVMCICompiler) {
if (blocking) {
// Don't allow blocking compiles for requests triggered by JVMCI.
if (thread->is_Compiler_thread()) {
blocking = false;
}
if (UseJVMCICompiler && blocking && !UseJVMCINativeLibrary) {
// Don't allow blocking compiles for requests triggered by JVMCI.
if (thread->is_Compiler_thread()) {
blocking = false;
}
// Don't allow blocking compiles if inside a class initializer or while performing class loading
vframeStream vfst((JavaThread*) thread);
for (; !vfst.at_end(); vfst.next()) {
if (vfst.method()->is_static_initializer() ||
(vfst.method()->method_holder()->is_subclass_of(SystemDictionary::ClassLoader_klass()) &&
vfst.method()->name() == vmSymbols::loadClass_name())) {
blocking = false;
break;
}
}
// Don't allow blocking compilation requests to JVMCI
// if JVMCI itself is not yet initialized
if (!JVMCIRuntime::is_HotSpotJVMCIRuntime_initialized() && compiler(comp_level)->is_jvmci()) {
// Don't allow blocking compiles if inside a class initializer or while performing class loading
vframeStream vfst((JavaThread*) thread);
for (; !vfst.at_end(); vfst.next()) {
if (vfst.method()->is_static_initializer() ||
(vfst.method()->method_holder()->is_subclass_of(SystemDictionary::ClassLoader_klass()) &&
vfst.method()->name() == vmSymbols::loadClass_name())) {
blocking = false;
break;
}
}
// Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown
// to avoid deadlock between compiler thread(s) and threads run at shutdown
// such as the DestroyJavaVM thread.
if (JVMCIRuntime::shutdown_called()) {
blocking = false;
}
// Don't allow blocking compilation requests to JVMCI
// if JVMCI itself is not yet initialized
if (!JVMCI::is_compiler_initialized() && compiler(comp_level)->is_jvmci()) {
blocking = false;
}
// Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown
// to avoid deadlock between compiler thread(s) and threads run at shutdown
// such as the DestroyJavaVM thread.
if (JVMCI::shutdown_called()) {
blocking = false;
}
}
#endif // INCLUDE_JVMCI
@ -1193,7 +1190,7 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
}
#if INCLUDE_JVMCI
if (comp->is_jvmci() && !JVMCIRuntime::can_initialize_JVMCI()) {
if (comp->is_jvmci() && !JVMCI::can_initialize_JVMCI()) {
return NULL;
}
#endif
@ -1496,11 +1493,11 @@ static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10;
* @return true if this thread needs to free/recycle the task
*/
bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) {
MutexLocker waiter(task->lock(), thread);
MonitorLocker ml(task->lock(), thread);
int progress_wait_attempts = 0;
int methods_compiled = jvmci->methods_compiled();
while (!task->is_complete() && !is_compilation_disabled_forever() &&
task->lock()->wait(!Mutex::_no_safepoint_check_flag, JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) {
ml.wait(JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) {
CompilerThread* jvmci_compiler_thread = task->jvmci_compiler_thread();
bool progress;
@ -1558,10 +1555,10 @@ void CompileBroker::wait_for_completion(CompileTask* task) {
} else
#endif
{
MutexLocker waiter(task->lock(), thread);
MonitorLocker ml(task->lock(), thread);
free_task = true;
while (!task->is_complete() && !is_compilation_disabled_forever()) {
task->lock()->wait();
ml.wait();
}
}
@ -1644,7 +1641,7 @@ bool CompileBroker::init_compiler_runtime() {
void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
// Free buffer blob, if allocated
if (thread->get_buffer_blob() != NULL) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free(thread->get_buffer_blob());
}
@ -1780,7 +1777,7 @@ void CompileBroker::compiler_thread_loop() {
}
// Free buffer blob, if allocated
if (thread->get_buffer_blob() != NULL) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free(thread->get_buffer_blob());
}
return; // Stop this thread.
@ -1910,7 +1907,7 @@ static void codecache_print(bool detailed)
stringStream s;
// Dump code cache into a buffer before locking the tty,
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::print_summary(&s, detailed);
}
ttyLocker ttyl;
@ -1924,7 +1921,7 @@ static void codecache_print(outputStream* out, bool detailed) {
// Dump code cache into a buffer
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::print_summary(&s, detailed);
}
@ -2061,20 +2058,24 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
// Skip redefined methods
if (target_handle->is_old()) {
failure_reason = "redefined method";
retry_message = "not retryable";
compilable = ciEnv::MethodCompilable_never;
failure_reason = "redefined method";
retry_message = "not retryable";
compilable = ciEnv::MethodCompilable_never;
} else {
JVMCIEnv env(task, system_dictionary_modification_counter);
methodHandle method(thread, target_handle);
jvmci->compile_method(method, osr_bci, &env);
JVMCICompileState compile_state(task, system_dictionary_modification_counter);
JVMCIEnv env(&compile_state, __FILE__, __LINE__);
methodHandle method(thread, target_handle);
env.runtime()->compile_method(&env, jvmci, method, osr_bci);
failure_reason = env.failure_reason();
failure_reason_on_C_heap = env.failure_reason_on_C_heap();
if (!env.retryable()) {
retry_message = "not retryable";
compilable = ciEnv::MethodCompilable_not_at_tier;
}
failure_reason = compile_state.failure_reason();
failure_reason_on_C_heap = compile_state.failure_reason_on_C_heap();
if (!compile_state.retryable()) {
retry_message = "not retryable";
compilable = ciEnv::MethodCompilable_not_at_tier;
}
if (task->code() == NULL) {
assert(failure_reason != NULL, "must specify failure_reason");
}
}
post_compile(thread, task, task->code() != NULL, NULL, compilable, failure_reason);
if (event.should_commit()) {
@ -2112,9 +2113,9 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
ci_env.record_method_not_compilable("no compiler", !TieredCompilation);
} else {
if (WhiteBoxAPI && WhiteBox::compilation_locked) {
MonitorLockerEx locker(Compilation_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker locker(Compilation_lock, Mutex::_no_safepoint_check_flag);
while (WhiteBox::compilation_locked) {
locker.wait(Mutex::_no_safepoint_check_flag);
locker.wait();
}
}
comp->compile_method(&ci_env, target, osr_bci, directive);
@ -2678,7 +2679,7 @@ void CompileBroker::print_heapinfo(outputStream* out, const char* function, cons
// CodeHeapStateAnalytics_lock could be held by a concurrent thread for a long time,
// leading to an unnecessarily long hold time of the CodeCache_lock.
ts.update(); // record starting point
MutexLockerEx mu1(CodeHeapStateAnalytics_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu1(CodeHeapStateAnalytics_lock, Mutex::_no_safepoint_check_flag);
out->print_cr("\n__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________\n", ts.seconds());
// If we serve an "allFun" call, it is beneficial to hold the CodeCache_lock
@ -2688,7 +2689,7 @@ void CompileBroker::print_heapinfo(outputStream* out, const char* function, cons
Monitor* global_lock = allFun ? CodeCache_lock : NULL;
Monitor* function_lock = allFun ? NULL : CodeCache_lock;
ts_global.update(); // record starting point
MutexLockerEx mu2(global_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu2(global_lock, Mutex::_no_safepoint_check_flag);
if (global_lock != NULL) {
out->print_cr("\n__ CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds());
ts_global.update(); // record starting point
@ -2696,7 +2697,7 @@ void CompileBroker::print_heapinfo(outputStream* out, const char* function, cons
if (aggregate) {
ts.update(); // record starting point
MutexLockerEx mu3(function_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu3(function_lock, Mutex::_no_safepoint_check_flag);
if (function_lock != NULL) {
out->print_cr("\n__ CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds());
}
@ -2716,7 +2717,7 @@ void CompileBroker::print_heapinfo(outputStream* out, const char* function, cons
if (methodNames) {
// print_names() has shown to be sensitive to concurrent CodeHeap modifications.
// Therefore, request the CodeCache_lock before calling...
MutexLockerEx mu3(function_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu3(function_lock, Mutex::_no_safepoint_check_flag);
CodeCache::print_names(out);
}
if (discard) CodeCache::discard(out);

@ -396,6 +396,7 @@ void CompileTask::log_task_done(CompileLog* log) {
ResourceMark rm(thread);
if (!_is_success) {
assert(_failure_reason != NULL, "missing");
const char* reason = _failure_reason != NULL ? _failure_reason : "unknown";
log->elem("failure reason='%s'", reason);
}

@ -253,22 +253,6 @@ void set_jvmci_specific_flags() {
if (FLAG_IS_DEFAULT(TypeProfileWidth)) {
FLAG_SET_DEFAULT(TypeProfileWidth, 8);
}
if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
FLAG_SET_DEFAULT(OnStackReplacePercentage, 933);
}
// JVMCI needs values not less than defaults
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
FLAG_SET_DEFAULT(ReservedCodeCacheSize, MAX2(64*M, ReservedCodeCacheSize));
}
if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize));
}
if (FLAG_IS_DEFAULT(MetaspaceSize)) {
FLAG_SET_DEFAULT(MetaspaceSize, MIN2(MAX2(12*M, MetaspaceSize), MaxMetaspaceSize));
}
if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) {
FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease));
}
if (TieredStopAtLevel != CompLevel_full_optimization) {
// Currently JVMCI compiler can only work at the full optimization level
warning("forcing TieredStopAtLevel to full optimization because JVMCI is enabled");
@ -277,7 +261,43 @@ void set_jvmci_specific_flags() {
if (FLAG_IS_DEFAULT(TypeProfileLevel)) {
FLAG_SET_DEFAULT(TypeProfileLevel, 0);
}
}
if (UseJVMCINativeLibrary) {
// SVM compiled code requires more stack space
if (FLAG_IS_DEFAULT(CompilerThreadStackSize)) {
// Duplicate logic in the implementations of os::create_thread
// so that we can then double the computed stack size. Once
// the stack size requirements of SVM are better understood,
// this logic can be pushed down into os::create_thread.
int stack_size = CompilerThreadStackSize;
if (stack_size == 0) {
stack_size = VMThreadStackSize;
}
if (stack_size != 0) {
FLAG_SET_DEFAULT(CompilerThreadStackSize, stack_size * 2);
}
}
} else {
// Adjust the on stack replacement percentage to avoid early
// OSR compilations while JVMCI itself is warming up
if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
FLAG_SET_DEFAULT(OnStackReplacePercentage, 933);
}
// JVMCI needs values not less than defaults
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
FLAG_SET_DEFAULT(ReservedCodeCacheSize, MAX2(64*M, ReservedCodeCacheSize));
}
if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize));
}
if (FLAG_IS_DEFAULT(MetaspaceSize)) {
FLAG_SET_DEFAULT(MetaspaceSize, MIN2(MAX2(12*M, MetaspaceSize), MaxMetaspaceSize));
}
if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) {
FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease));
}
} // !UseJVMCINativeLibrary
} // UseJVMCICompiler
}
#endif // INCLUDE_JVMCI
@ -392,6 +412,8 @@ void CompilerConfig::ergo_initialize() {
// Check that JVMCI compiler supports selested GC.
// Should be done after GCConfig::initialize() was called.
JVMCIGlobals::check_jvmci_supported_gc();
// Do JVMCI specific settings
set_jvmci_specific_flags();
#endif

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -456,7 +456,7 @@ void DirectivesStack::init() {
}
DirectiveSet* DirectivesStack::getDefaultDirective(AbstractCompiler* comp) {
MutexLockerEx locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
MutexLocker locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
assert(_bottom != NULL, "Must never be empty");
_bottom->inc_refcount();
@ -464,7 +464,7 @@ DirectiveSet* DirectivesStack::getDefaultDirective(AbstractCompiler* comp) {
}
void DirectivesStack::push(CompilerDirectives* directive) {
MutexLockerEx locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
MutexLocker locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
directive->inc_refcount();
if (_top == NULL) {
@ -478,7 +478,7 @@ void DirectivesStack::push(CompilerDirectives* directive) {
}
void DirectivesStack::pop(int count) {
MutexLockerEx locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
MutexLocker locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
assert(count > -1, "No negative values");
for (int i = 0; i < count; i++) {
pop_inner();
@ -508,14 +508,14 @@ bool DirectivesStack::check_capacity(int request_size, outputStream* st) {
void DirectivesStack::clear() {
// holding the lock during the whole operation ensuring consistent result
MutexLockerEx locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
MutexLocker locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
while (_top->next() != NULL) {
pop_inner();
}
}
void DirectivesStack::print(outputStream* st) {
MutexLockerEx locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
MutexLocker locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
CompilerDirectives* tmp = _top;
while (tmp != NULL) {
tmp->print(st);
@ -526,7 +526,7 @@ void DirectivesStack::print(outputStream* st) {
void DirectivesStack::release(DirectiveSet* set) {
assert(set != NULL, "Never NULL");
MutexLockerEx locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
MutexLocker locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
if (set->is_exclusive_copy()) {
// Old CompilecCmmands forced us to create an exclusive copy
delete set;
@ -550,7 +550,7 @@ DirectiveSet* DirectivesStack::getMatchingDirective(const methodHandle& method,
DirectiveSet* match = NULL;
{
MutexLockerEx locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
MutexLocker locker(DirectivesStack_lock, Mutex::_no_safepoint_check_flag);
CompilerDirectives* dir = _top;
assert(dir != NULL, "Must be initialized");

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -672,8 +672,7 @@ void Disassembler::decode(nmethod* nm, outputStream* st) {
nm->method()->signature()->print_symbol_on(env.output());
#if INCLUDE_JVMCI
{
char buffer[O_BUFLEN];
char* jvmciName = nm->jvmci_installed_code_name(buffer, O_BUFLEN);
const char* jvmciName = nm->jvmci_name();
if (jvmciName != NULL) {
env.output()->print(" (%s)", jvmciName);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -338,7 +338,7 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
// Protect the operation on the derived pointers. This
// protects the addition of derived pointers to the shared
// derived pointer table in DerivedPointerTable::add().
MutexLockerEx x(DerivedPointerTableGC_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(DerivedPointerTableGC_lock, Mutex::_no_safepoint_check_flag);
do {
omv = oms.current();
oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);

@ -25,10 +25,10 @@
#include "precompiled.hpp"
#include "gc/cms/cmsArguments.hpp"
#include "gc/cms/cmsCollectorPolicy.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/compactibleFreeListSpace.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "runtime/arguments.hpp"
@ -36,10 +36,6 @@
#include "runtime/globals_extension.hpp"
#include "utilities/defaultStream.hpp"
size_t CMSArguments::conservative_max_heap_alignment() {
return GenCollectedHeap::conservative_max_heap_alignment();
}
void CMSArguments::set_parnew_gc_flags() {
assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
"control point invariant");
@ -154,12 +150,12 @@ void CMSArguments::initialize() {
// Code along this path potentially sets NewSize and OldSize
log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size: " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
Arguments::min_heap_size(), InitialHeapSize, max_heap);
MinHeapSize, InitialHeapSize, max_heap);
size_t min_new = preferred_max_new_size;
if (FLAG_IS_CMDLINE(NewSize)) {
min_new = NewSize;
}
if (max_heap > min_new && Arguments::min_heap_size() > min_new) {
if (max_heap > min_new && MinHeapSize > min_new) {
// Unless explicitly requested otherwise, make young gen
// at least min_new, and at most preferred_max_new_size.
if (FLAG_IS_DEFAULT(NewSize)) {
@ -225,5 +221,5 @@ void CMSArguments::disable_adaptive_size_policy(const char* collector_name) {
}
CollectedHeap* CMSArguments::create_heap() {
return create_heap_with_policy<CMSHeap, ConcurrentMarkSweepPolicy>();
return new CMSHeap();
}

@ -26,16 +26,16 @@
#define SHARE_GC_CMS_CMSARGUMENTS_HPP
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/genArguments.hpp"
class CollectedHeap;
class CMSArguments : public GCArguments {
class CMSArguments : public GenArguments {
private:
void disable_adaptive_size_policy(const char* collector_name);
void set_parnew_gc_flags();
public:
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
};

@ -1,52 +0,0 @@
/*
* Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/cms/cmsCollectorPolicy.hpp"
#include "gc/cms/parNewGeneration.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/space.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
//
// ConcurrentMarkSweepPolicy methods
//
void ConcurrentMarkSweepPolicy::initialize_alignments() {
_space_alignment = _gen_alignment = (uintx)Generation::GenGrain;
_heap_alignment = compute_heap_alignment();
}

@ -65,9 +65,8 @@ public:
}
};
CMSHeap::CMSHeap(GenCollectorPolicy *policy) :
GenCollectedHeap(policy,
Generation::ParNew,
CMSHeap::CMSHeap() :
GenCollectedHeap(Generation::ParNew,
Generation::ConcurrentMarkSweep,
"ParNew:CMS"),
_workers(NULL),
@ -162,9 +161,7 @@ bool CMSHeap::create_cms_collector() {
assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
"Unexpected generation kinds");
CMSCollector* collector =
new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(),
rem_set(),
(ConcurrentMarkSweepPolicy*) gen_policy());
new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(), rem_set());
if (collector == NULL || !collector->completed_initialization()) {
if (collector) {

@ -34,7 +34,6 @@
#include "utilities/growableArray.hpp"
class CLDClosure;
class GenCollectorPolicy;
class GCMemoryManager;
class MemoryPool;
class OopsInGenClosure;
@ -45,7 +44,7 @@ class WorkGang;
class CMSHeap : public GenCollectedHeap {
public:
CMSHeap(GenCollectorPolicy *policy);
CMSHeap();
// Returns JNI_OK on success
virtual jint initialize();

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,7 +45,7 @@ void VM_CMS_Operation::verify_before_gc() {
GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
MutexLocker y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
CMSHeap::heap()->prepare_for_verify();
Universe::verify();
}
@ -57,7 +57,7 @@ void VM_CMS_Operation::verify_after_gc() {
GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
MutexLocker y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
Universe::verify();
}
}
@ -183,7 +183,7 @@ void VM_GenCollectFullConcurrent::doit() {
&& (_gc_count_before == heap->total_collections())),
"total_collections() should be monotonically increasing");
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
assert(_full_gc_count_before <= heap->total_full_collections(), "Error");
if (heap->total_full_collections() == _full_gc_count_before) {
// Nudge the CMS thread to start a concurrent collection.
@ -244,11 +244,11 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
// or by the CMS thread, so we do not want to be suspended
// while holding that lock.
ThreadToNativeFromVM native(jt);
MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
// Either a concurrent or a stop-world full gc is sufficient
// witness to our request.
while (heap->total_full_collections_completed() <= _full_gc_count_before) {
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
FullGCCount_lock->wait_without_safepoint_check();
}
}
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -331,11 +331,11 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, M
// Note: this requires that CFLspace c'tors
// are called serially in the order in which the locks are
// are acquired in the program text. This is true today.
_freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
Monitor::_safepoint_check_sometimes),
_freelistLock(_lockRank--, "CompactibleFreeListSpace_lock", true,
Monitor::_safepoint_check_never),
_preconsumptionDirtyCardClosure(NULL),
_parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
"CompactibleFreeListSpace._dict_par_lock", true,
"CompactibleFreeListSpace_dict_par_lock", true,
Monitor::_safepoint_check_never)
{
assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
@ -366,7 +366,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, M
// Initialize locks for parallel case.
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
_indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
"a freelist par lock", true, Mutex::_safepoint_check_sometimes);
"a freelist par lock", true, Mutex::_safepoint_check_never);
DEBUG_ONLY(
_indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
)
@ -1340,7 +1340,7 @@ size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
}
HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
return allocate(size);
}
@ -1524,8 +1524,8 @@ FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
// If GC is parallel, this might be called by several threads.
// This should be rare enough that the locking overhead won't affect
// the sequential code.
MutexLockerEx x(parDictionaryAllocLock(),
Mutex::_no_safepoint_check_flag);
MutexLocker x(parDictionaryAllocLock(),
Mutex::_no_safepoint_check_flag);
fc = getChunkFromDictionary(size);
}
if (fc != NULL) {
@ -1868,7 +1868,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
Mutex* lock = &_parDictionaryAllocLock;
FreeChunk* ec;
{
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
ec = dictionary()->find_largest_dict(); // get largest block
if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
// It's a coterminal block - we can coalesce.
@ -1885,7 +1885,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
if (size < SmallForDictionary) {
lock = _indexedFreeListParLocks[size];
}
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
// record the birth under the lock since the recording involves
// manipulation of the list on which the chunk lives and
@ -2042,7 +2042,7 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
if (rem_sz < SmallForDictionary) {
// The freeList lock is held, but multiple GC task threads might be executing in parallel.
bool is_par = Thread::current()->is_GC_task_thread();
if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
if (is_par) _indexedFreeListParLocks[rem_sz]->lock_without_safepoint_check();
returnChunkToFreeList(ffc);
split(size, rem_sz);
if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
@ -2682,8 +2682,8 @@ HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
// This locking manages sync with other large object allocations.
MutexLockerEx x(_cfls->parDictionaryAllocLock(),
Mutex::_no_safepoint_check_flag);
MutexLocker x(_cfls->parDictionaryAllocLock(),
Mutex::_no_safepoint_check_flag);
res = _cfls->getChunkFromDictionaryExact(word_sz);
if (res == NULL) return NULL;
} else {
@ -2781,8 +2781,8 @@ void CompactibleFreeListSpaceLAB::retire(int tid) {
size_t num_retire = _indexedFreeList[i].count();
assert(_num_blocks[i] > num_retire, "Should have used at least one");
{
// MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
// Mutex::_no_safepoint_check_flag);
// MutexLocker x(_cfls->_indexedFreeListParLocks[i],
// Mutex::_no_safepoint_check_flag);
// Update globals stats for num_blocks used
_global_num_blocks[i] += (_num_blocks[i] - num_retire);
@ -2824,8 +2824,8 @@ bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size
AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
fl_for_cur_sz.set_size(cur_sz);
{
MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
Mutex::_no_safepoint_check_flag);
MutexLocker x(_indexedFreeListParLocks[cur_sz],
Mutex::_no_safepoint_check_flag);
AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
if (gfl->count() != 0) {
// nn is the number of chunks of size cur_sz that
@ -2885,8 +2885,8 @@ bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size
}
// Update birth stats for this block size.
size_t num = fl->count();
MutexLockerEx x(_indexedFreeListParLocks[word_sz],
Mutex::_no_safepoint_check_flag);
MutexLocker x(_indexedFreeListParLocks[word_sz],
Mutex::_no_safepoint_check_flag);
ssize_t births = _indexedFreeList[word_sz].split_births() + num;
_indexedFreeList[word_sz].set_split_births(births);
return true;
@ -2902,8 +2902,8 @@ FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, si
FreeChunk* rem_fc = NULL;
size_t rem;
{
MutexLockerEx x(parDictionaryAllocLock(),
Mutex::_no_safepoint_check_flag);
MutexLocker x(parDictionaryAllocLock(),
Mutex::_no_safepoint_check_flag);
while (n > 0) {
fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
if (fc != NULL) {
@ -2968,8 +2968,8 @@ FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, si
}
}
if (rem_fc != NULL) {
MutexLockerEx x(_indexedFreeListParLocks[rem],
Mutex::_no_safepoint_check_flag);
MutexLocker x(_indexedFreeListParLocks[rem],
Mutex::_no_safepoint_check_flag);
_bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
_indexedFreeList[rem].return_chunk_at_head(rem_fc);
smallSplitBirth(rem);
@ -3027,8 +3027,8 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_s
assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
{
// Update the stats for this block size.
MutexLockerEx x(_indexedFreeListParLocks[word_sz],
Mutex::_no_safepoint_check_flag);
MutexLocker x(_indexedFreeListParLocks[word_sz],
Mutex::_no_safepoint_check_flag);
const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
_indexedFreeList[word_sz].set_split_births(births);
// ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;

@ -26,7 +26,6 @@
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc/cms/cmsCollectorPolicy.hpp"
#include "gc/cms/cmsGCStats.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/cmsOopClosures.inline.hpp"
@ -43,7 +42,6 @@
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/collectorCounters.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
#include "gc/shared/gcTimer.hpp"
@ -82,6 +80,9 @@
#include "services/runtimeService.hpp"
#include "utilities/align.hpp"
#include "utilities/stack.inline.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmci.hpp"
#endif
// statics
CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
@ -172,7 +173,7 @@ class CMSTokenSyncWithLocks: public CMSTokenSync {
private:
// Note: locks are acquired in textual declaration order
// and released in the opposite order
MutexLockerEx _locker1, _locker2, _locker3;
MutexLocker _locker1, _locker2, _locker3;
public:
CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
@ -204,7 +205,11 @@ class CMSParGCThreadState: public CHeapObj<mtGC> {
};
ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
ReservedSpace rs,
size_t initial_byte_size,
size_t min_byte_size,
size_t max_byte_size,
CardTableRS* ct) :
CardGeneration(rs, initial_byte_size, ct),
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
_did_compact(false)
@ -255,6 +260,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
// note that all arithmetic is in units of HeapWords.
assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
assert(_dilatation_factor >= 1.0, "from previous assert");
initialize_performance_counters(min_byte_size, max_byte_size);
}
@ -311,13 +318,13 @@ AdaptiveSizePolicy* CMSCollector::size_policy() {
return CMSHeap::heap()->size_policy();
}
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
void ConcurrentMarkSweepGeneration::initialize_performance_counters(size_t min_old_size,
size_t max_old_size) {
const char* gen_name = "old";
GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
// Generation Counters - generation 1, 1 subspace
_gen_counters = new GenerationCounters(gen_name, 1, 1,
gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
min_old_size, max_old_size, &_virtual_space);
_space_counters = new GSpaceCounters(gen_name, 0,
_virtual_space.reserved_size(),
@ -446,8 +453,7 @@ bool CMSCollector::_foregroundGCIsActive = false;
bool CMSCollector::_foregroundGCShouldWait = false;
CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
CardTableRS* ct,
ConcurrentMarkSweepPolicy* cp):
CardTableRS* ct):
_overflow_list(NULL),
_conc_workers(NULL), // may be set later
_completed_initialization(false),
@ -457,7 +463,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_roots_scanning_options(GenCollectedHeap::SO_None),
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
_verifying(false),
_collector_policy(cp),
_inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
_intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
@ -491,7 +496,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_stats(cmsGen),
_eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
//verify that this lock should be acquired with safepoint check.
Monitor::_safepoint_check_sometimes)),
Monitor::_safepoint_check_never)),
_eden_chunk_array(NULL), // may be set in ctor body
_eden_chunk_index(0), // -- ditto --
_eden_chunk_capacity(0), // -- ditto --
@ -509,7 +514,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
// Allocate MUT and marking bit map
{
MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
MutexLocker x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
if (!_markBitMap.allocate(_span)) {
log_warning(gc)("Failed to allocate CMS Bit Map");
return;
@ -797,7 +802,7 @@ Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
CMSSynchronousYieldRequest yr;
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
return have_lock_and_allocate(size, tlab);
}
@ -844,8 +849,8 @@ HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
assert(_markBitMap.covers(start, size), "Out of bounds");
if (_collectorState >= Marking) {
MutexLockerEx y(_markBitMap.lock(),
Mutex::_no_safepoint_check_flag);
MutexLocker y(_markBitMap.lock(),
Mutex::_no_safepoint_check_flag);
// [see comments preceding SweepClosure::do_blk() below for details]
//
// Can the P-bits be deleted now? JJJ
@ -1302,7 +1307,7 @@ void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause ca
CMSHeap* heap = CMSHeap::heap();
unsigned int gc_count = heap->total_full_collections();
if (gc_count == full_gc_count) {
MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
MutexLocker y(CGC_lock, Mutex::_no_safepoint_check_flag);
_full_gc_requested = true;
_full_gc_cause = cause;
CGC_lock->notify(); // nudge CMS thread
@ -1423,7 +1428,7 @@ void CMSCollector::acquire_control_and_collect(bool full,
bitMapLock()->unlock();
releaseFreelistLocks();
{
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (_foregroundGCShouldWait) {
// We are going to be waiting for action for the CMS thread;
// it had better not be gone (for instance at shutdown)!
@ -1440,7 +1445,7 @@ void CMSCollector::acquire_control_and_collect(bool full,
"Possible deadlock");
while (_foregroundGCShouldWait) {
// wait for notification
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
CGC_lock->wait_without_safepoint_check();
// Possibility of delay/starvation here, since CMS token does
// not know to give priority to VM thread? Actually, i think
// there wouldn't be any delay/starvation, but the proof of
@ -1685,7 +1690,7 @@ class ReleaseForegroundGC: public StackObj {
public:
ReleaseForegroundGC(CMSCollector* c) : _c(c) {
assert(_c->_foregroundGCShouldWait, "Else should not need to call");
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
// allow a potentially blocked foreground collector to proceed
_c->_foregroundGCShouldWait = false;
if (_c->_foregroundGCIsActive) {
@ -1697,7 +1702,7 @@ class ReleaseForegroundGC: public StackObj {
~ReleaseForegroundGC() {
assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
_c->_foregroundGCShouldWait = true;
}
};
@ -1708,10 +1713,9 @@ void CMSCollector::collect_in_background(GCCause::Cause cause) {
CMSHeap* heap = CMSHeap::heap();
{
bool safepoint_check = Mutex::_no_safepoint_check_flag;
MutexLockerEx hl(Heap_lock, safepoint_check);
MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
FreelistLocker fll(this);
MutexLockerEx x(CGC_lock, safepoint_check);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (_foregroundGCIsActive) {
// The foreground collector is. Skip this
// background collection.
@ -1855,7 +1859,7 @@ void CMSCollector::collect_in_background(GCCause::Cause cause) {
// collection was preempted.
{
ReleaseForegroundGC x(this); // unblock FG collection
MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
MutexLocker y(Heap_lock, Mutex::_no_safepoint_check_flag);
CMSTokenSync z(true); // not strictly needed.
if (_collectorState == Resizing) {
compute_new_size();
@ -1898,7 +1902,7 @@ void CMSCollector::collect_in_background(GCCause::Cause cause) {
// Clear _foregroundGCShouldWait and, in the event that the
// foreground collector is waiting, notify it, before
// returning.
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
_foregroundGCShouldWait = false;
if (_foregroundGCIsActive) {
CGC_lock->notify();
@ -1946,7 +1950,7 @@ bool CMSCollector::waitForForegroundGC() {
// Block the foreground collector until the
// background collectors decides whether to
// yield.
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
_foregroundGCShouldWait = true;
if (_foregroundGCIsActive) {
// The background collector yields to the
@ -1964,7 +1968,7 @@ bool CMSCollector::waitForForegroundGC() {
log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
p2i(Thread::current()), _collectorState);
while (_foregroundGCIsActive) {
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
CGC_lock->wait_without_safepoint_check();
}
ConcurrentMarkSweepThread::set_CMS_flag(
ConcurrentMarkSweepThread::CMS_cms_has_token);
@ -2206,7 +2210,7 @@ bool CMSCollector::is_cms_reachable(HeapWord* addr) {
_markBitMap.isMarked(addr) ? "" : " not");
if (verify_after_remark()) {
MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
MutexLocker x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
bool result = verification_mark_bm()->isMarked(addr);
tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
result ? "IS" : "is NOT");
@ -2266,7 +2270,7 @@ class VerifyMarkedClosure: public BitMapClosure {
bool CMSCollector::verify_after_remark() {
GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
MutexLocker ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
static bool init = false;
assert(SafepointSynchronize::is_at_safepoint(),
@ -2467,7 +2471,7 @@ ConcurrentMarkSweepGeneration::oop_iterate(OopIterateClosure* cl) {
if (freelistLock()->owned_by_self()) {
Generation::oop_iterate(cl);
} else {
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
Generation::oop_iterate(cl);
}
}
@ -2477,7 +2481,7 @@ ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
if (freelistLock()->owned_by_self()) {
Generation::object_iterate(cl);
} else {
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
Generation::object_iterate(cl);
}
}
@ -2487,7 +2491,7 @@ ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
if (freelistLock()->owned_by_self()) {
Generation::safe_object_iterate(cl);
} else {
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
Generation::safe_object_iterate(cl);
}
}
@ -2506,7 +2510,7 @@ ConcurrentMarkSweepGeneration::prepare_for_verify() {
if (freelistLock()->owned_by_self()) {
cmsSpace()->prepare_for_verify();
} else {
MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag);
cmsSpace()->prepare_for_verify();
}
}
@ -2519,7 +2523,7 @@ ConcurrentMarkSweepGeneration::verify() {
if (freelistLock()->owned_by_self()) {
cmsSpace()->verify();
} else {
MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag);
cmsSpace()->verify();
}
}
@ -2629,7 +2633,7 @@ ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
bool parallel) {
CMSSynchronousYieldRequest yr;
assert(!tlab, "Can't deal with TLAB allocation");
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
if (GCExpandToAllocateDelayMillis > 0) {
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
@ -2804,8 +2808,8 @@ void CMSCollector::checkpointRootsInitial() {
assert(_restart_addr == NULL, "Control point invariant");
{
// acquire locks for subsequent manipulations
MutexLockerEx x(bitMapLock(),
Mutex::_no_safepoint_check_flag);
MutexLocker x(bitMapLock(),
Mutex::_no_safepoint_check_flag);
checkpointRootsInitialWork();
// enable ("weak") refs discovery
rp->enable_discovery();
@ -3246,8 +3250,8 @@ bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
return false;
}
assert(work_q->size() == 0, "Shouldn't steal");
MutexLockerEx ml(ovflw_stk->par_lock(),
Mutex::_no_safepoint_check_flag);
MutexLocker ml(ovflw_stk->par_lock(),
Mutex::_no_safepoint_check_flag);
// Grab up to 1/4 the size of the work queue
size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
(size_t)ParGCDesiredObjsFromOverflowList);
@ -3451,8 +3455,8 @@ void ParConcMarkingClosure::trim_queue(size_t max) {
void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
// We need to do this under a mutex to prevent other
// workers from interfering with the work done below.
MutexLockerEx ml(_overflow_stack->par_lock(),
Mutex::_no_safepoint_check_flag);
MutexLocker ml(_overflow_stack->par_lock(),
Mutex::_no_safepoint_check_flag);
// Remember the least grey address discarded
HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
_collector->lower_restart_addr(ra);
@ -4169,8 +4173,8 @@ void CMSCollector::checkpointRootsFinal() {
);
}
FreelistLocker x(this);
MutexLockerEx y(bitMapLock(),
Mutex::_no_safepoint_check_flag);
MutexLocker y(bitMapLock(),
Mutex::_no_safepoint_check_flag);
checkpointRootsFinalWork();
}
verify_work_stacks_empty();
@ -5259,6 +5263,9 @@ void CMSCollector::refProcessingWork() {
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(purged_class);
// Clean JVMCI metadata handles.
JVMCI_ONLY(JVMCI::do_unloading(purged_class));
}
}
@ -5644,7 +5651,7 @@ CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
_shifter(shifter),
_bm(),
_lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
Monitor::_safepoint_check_sometimes) : NULL)
Monitor::_safepoint_check_never) : NULL)
{
_bmStartWord = 0;
_bmWordSize = 0;
@ -6725,8 +6732,8 @@ void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
// We need to do this under a mutex to prevent other
// workers from interfering with the work done below.
MutexLockerEx ml(_overflow_stack->par_lock(),
Mutex::_no_safepoint_check_flag);
MutexLocker ml(_overflow_stack->par_lock(),
Mutex::_no_safepoint_check_flag);
// Remember the least grey address discarded
HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
_collector->lower_restart_addr(ra);
@ -7527,7 +7534,7 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
}
ConcurrentMarkSweepThread::synchronize(true);
_freelistLock->lock();
_freelistLock->lock_without_safepoint_check();
_bitMap->lock()->lock_without_safepoint_check();
_collector->startTimer();
}
@ -7992,7 +7999,7 @@ void CMSCollector::preserve_mark_if_necessary(oop p) {
void CMSCollector::par_preserve_mark_if_necessary(oop p) {
markOop m = p->mark_raw();
if (m->must_be_preserved(p)) {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
// Even though we read the mark word without holding
// the lock, we are assured that it will not change
// because we "own" this oop, so no other thread can

@ -225,13 +225,13 @@ class CMSMarkStack: public CHeapObj<mtGC> {
// "Parallel versions" of some of the above
oop par_pop() {
// lock and pop
MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(&_par_lock, Mutex::_no_safepoint_check_flag);
return pop();
}
bool par_push(oop ptr) {
// lock and push
MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(&_par_lock, Mutex::_no_safepoint_check_flag);
return push(ptr);
}
@ -585,10 +585,6 @@ class CMSCollector: public CHeapObj<mtGC> {
bool verifying() const { return _verifying; }
void set_verifying(bool v) { _verifying = v; }
// Collector policy
ConcurrentMarkSweepPolicy* _collector_policy;
ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
void set_did_compact(bool v);
// XXX Move these to CMSStats ??? FIX ME !!!
@ -833,8 +829,7 @@ class CMSCollector: public CHeapObj<mtGC> {
void setup_cms_unloading_and_verification_state();
public:
CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
CardTableRS* ct,
ConcurrentMarkSweepPolicy* cp);
CardTableRS* ct);
ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
MemRegion ref_processor_span() const { return _span_based_discoverer.span(); }
@ -1075,7 +1070,11 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
void assert_correct_size_change_locking();
public:
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct);
ConcurrentMarkSweepGeneration(ReservedSpace rs,
size_t initial_byte_size,
size_t min_byte_size,
size_t max_byte_size,
CardTableRS* ct);
// Accessors
CMSCollector* collector() const { return _collector; }
@ -1212,7 +1211,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Performance Counters support
virtual void update_counters();
virtual void update_counters(size_t used);
void initialize_performance_counters();
void initialize_performance_counters(size_t min_old_size, size_t max_old_size);
CollectorCounters* counters() { return collector()->counters(); }
// Support for parallel remark of survivor space

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -112,7 +112,7 @@ void ConcurrentMarkSweepThread::stop_service() {
// Now post a notify on CGC_lock so as to nudge
// CMS thread(s) that might be slumbering in
// sleepBeforeNextCycle.
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
CGC_lock->notify_all();
}
@ -147,15 +147,14 @@ void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
void ConcurrentMarkSweepThread::synchronize(bool is_cms_thread) {
assert(UseConcMarkSweepGC, "just checking");
MutexLockerEx x(CGC_lock,
Mutex::_no_safepoint_check_flag);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (!is_cms_thread) {
assert(Thread::current()->is_VM_thread(), "Not a VM thread");
CMSSynchronousYieldRequest yr;
while (CMS_flag_is_set(CMS_cms_has_token)) {
// indicate that we want to get the token
set_CMS_flag(CMS_vm_wants_token);
CGC_lock->wait(true);
CGC_lock->wait_without_safepoint_check();
}
// claim the token and proceed
clear_CMS_flag(CMS_vm_wants_token);
@ -167,7 +166,7 @@ void ConcurrentMarkSweepThread::synchronize(bool is_cms_thread) {
// This will need to be modified is there are more CMS threads than one.
while (CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token)) {
set_CMS_flag(CMS_cms_wants_token);
CGC_lock->wait(true);
CGC_lock->wait_without_safepoint_check();
}
// claim the token
clear_CMS_flag(CMS_cms_wants_token);
@ -178,8 +177,7 @@ void ConcurrentMarkSweepThread::synchronize(bool is_cms_thread) {
void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
assert(UseConcMarkSweepGC, "just checking");
MutexLockerEx x(CGC_lock,
Mutex::_no_safepoint_check_flag);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (!is_cms_thread) {
assert(Thread::current()->is_VM_thread(), "Not a VM thread");
assert(CMS_flag_is_set(CMS_vm_has_token), "just checking");
@ -206,13 +204,12 @@ void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
// Wait until any cms_lock event
void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
MutexLockerEx x(CGC_lock,
Mutex::_no_safepoint_check_flag);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (should_terminate() || _collector->_full_gc_requested) {
return;
}
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
CGC_lock->wait(Mutex::_no_safepoint_check_flag, t_millis);
CGC_lock->wait_without_safepoint_check(t_millis);
clear_CMS_flag(CMS_cms_wants_token);
assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
"Should not be set");
@ -231,7 +228,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
// Total collections count before waiting loop
unsigned int before_count;
{
MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
before_count = heap->total_collections();
}
@ -255,14 +252,14 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
// Wait until the next event or the remaining timeout
{
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (should_terminate() || _collector->_full_gc_requested) {
return;
}
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
assert(t_millis == 0 || wait_time_millis > 0, "Sanity");
CGC_lock->wait(Mutex::_no_safepoint_check_flag, wait_time_millis);
CGC_lock->wait_without_safepoint_check(wait_time_millis);
clear_CMS_flag(CMS_cms_wants_token);
assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
"Should not be set");
@ -277,7 +274,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
// Total collections count after the event
unsigned int after_count;
{
MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
after_count = heap->total_collections();
}

@ -622,8 +622,11 @@ void ParNewGenTask::work(uint worker_id) {
_old_gen->par_oop_since_save_marks_iterate_done((int) worker_id);
}
ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
: DefNewGeneration(rs, initial_byte_size, "CMS young collection pauses"),
ParNewGeneration::ParNewGeneration(ReservedSpace rs,
size_t initial_byte_size,
size_t min_byte_size,
size_t max_byte_size)
: DefNewGeneration(rs, initial_byte_size, min_byte_size, max_byte_size, "CMS young collection pauses"),
_plab_stats("Young", YoungPLABSize, PLABWeight),
_overflow_list(NULL),
_is_alive_closure(this)

@ -348,7 +348,10 @@ class ParNewGeneration: public DefNewGeneration {
void restore_preserved_marks();
public:
ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);
ParNewGeneration(ReservedSpace rs,
size_t initial_byte_size,
size_t min_byte_size,
size_t max_byte_size);
~ParNewGeneration() {
for (uint i = 0; i < ParallelGCThreads; i++)

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,7 @@ YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
_monitor = new Monitor(/* priority */ Mutex::leaf,
/* name */ "WorkGroup monitor",
/* allow_vm_block */ are_GC_task_threads,
Monitor::_safepoint_check_sometimes);
Monitor::_safepoint_check_never);
assert(monitor() != NULL, "Failed to allocate monitor");
}
@ -137,7 +137,7 @@ NOTE: we can always create a new gang per each iteration
*/
/////////////////////
void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) {
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
assert(task() == NULL, "Gang currently tied to a task");
assert(new_task != NULL, "Null task");
// Bind task to gang
@ -175,7 +175,7 @@ void YieldingFlexibleWorkGang::wait_for_gang() {
assert(started_workers() <= active_workers(), "invariant");
assert(finished_workers() <= active_workers(), "invariant");
assert(yielded_workers() <= active_workers(), "invariant");
monitor()->wait(Mutex::_no_safepoint_check_flag);
monitor()->wait_without_safepoint_check();
}
switch (yielding_task()->status()) {
case COMPLETED:
@ -204,7 +204,7 @@ void YieldingFlexibleWorkGang::wait_for_gang() {
void YieldingFlexibleWorkGang::continue_task(
YieldingFlexibleGangTask* gang_task) {
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
assert(task() != NULL && task() == gang_task, "Incorrect usage");
assert(_started_workers == _active_workers, "Precondition");
assert(_yielded_workers > 0 && yielding_task()->status() == YIELDED,
@ -224,7 +224,7 @@ void YieldingFlexibleWorkGang::reset() {
void YieldingFlexibleWorkGang::yield() {
assert(task() != NULL, "Inconsistency; should have task binding");
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
assert(yielded_workers() < active_workers(), "Consistency check");
if (yielding_task()->status() == ABORTING) {
// Do not yield; we need to abort as soon as possible
@ -247,7 +247,7 @@ void YieldingFlexibleWorkGang::yield() {
switch (yielding_task()->status()) {
case YIELDING:
case YIELDED: {
monitor()->wait(Mutex::_no_safepoint_check_flag);
monitor()->wait_without_safepoint_check();
break; // from switch
}
case ACTIVE:
@ -271,7 +271,7 @@ void YieldingFlexibleWorkGang::yield() {
void YieldingFlexibleWorkGang::abort() {
assert(task() != NULL, "Inconsistency; should have task binding");
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
assert(yielded_workers() < active_workers(), "Consistency check");
#ifndef PRODUCT
switch (yielding_task()->status()) {
@ -319,7 +319,7 @@ void YieldingFlexibleGangTask::abort() {
void YieldingFlexibleGangWorker::loop() {
int previous_sequence_number = 0;
Monitor* gang_monitor = yf_gang()->monitor();
MutexLockerEx ml(gang_monitor, Mutex::_no_safepoint_check_flag);
MutexLocker ml(gang_monitor, Mutex::_no_safepoint_check_flag);
YieldingWorkData data;
int id;
while (true) {
@ -340,7 +340,7 @@ void YieldingFlexibleGangWorker::loop() {
yf_gang()->internal_note_start();
// Now, release the gang mutex and do the work.
{
MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag);
MutexUnlocker mul(gang_monitor, Mutex::_no_safepoint_check_flag);
GCIdMark gc_id_mark(data.task()->gc_id());
data.task()->work(id); // This might include yielding
}
@ -394,6 +394,6 @@ void YieldingFlexibleGangWorker::loop() {
// Remember the sequence number
previous_sequence_number = data.sequence_number();
// Wait for more work
gang_monitor->wait(Mutex::_no_safepoint_check_flag);
gang_monitor->wait_without_safepoint_check();
}
}

@ -25,8 +25,7 @@
#include "precompiled.hpp"
#include "gc/epsilon/epsilonArguments.hpp"
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/epsilon/epsilonCollectorPolicy.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "gc/shared/gcArguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/vm_version.hpp"
@ -67,6 +66,13 @@ void EpsilonArguments::initialize() {
#endif
}
CollectedHeap* EpsilonArguments::create_heap() {
return create_heap_with_policy<EpsilonHeap, EpsilonCollectorPolicy>();
void EpsilonArguments::initialize_alignments() {
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
size_t align = MAX2((size_t)os::vm_allocation_granularity(), page_size);
SpaceAlignment = align;
HeapAlignment = align;
}
CollectedHeap* EpsilonArguments::create_heap() {
return new EpsilonHeap();
}

@ -30,7 +30,9 @@
class CollectedHeap;
class EpsilonArguments : public GCArguments {
public:
private:
virtual void initialize_alignments();
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();

@ -25,7 +25,6 @@
#include "runtime/thread.hpp"
#include "gc/epsilon/epsilonBarrierSet.hpp"
#include "gc/epsilon/epsilonThreadLocalData.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "utilities/macros.hpp"

@ -1,42 +0,0 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_EPSILON_EPSILONCOLLECTORPOLICY_HPP
#define SHARE_GC_EPSILON_EPSILONCOLLECTORPOLICY_HPP
#include "gc/shared/collectorPolicy.hpp"
class EpsilonCollectorPolicy: public CollectorPolicy {
protected:
virtual void initialize_alignments() {
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
size_t align = MAX2((size_t)os::vm_allocation_granularity(), page_size);
_space_alignment = align;
_heap_alignment = align;
}
public:
EpsilonCollectorPolicy() : CollectorPolicy() {};
};
#endif // SHARE_GC_EPSILON_EPSILONCOLLECTORPOLICY_HPP

@ -25,14 +25,16 @@
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/epsilon/epsilonMemoryPool.hpp"
#include "gc/epsilon/epsilonThreadLocalData.hpp"
#include "gc/shared/gcArguments.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/globals.hpp"
jint EpsilonHeap::initialize() {
size_t align = _policy->heap_alignment();
size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
size_t max_byte_size = align_up(_policy->max_heap_byte_size(), align);
size_t align = HeapAlignment;
size_t init_byte_size = align_up(InitialHeapSize, align);
size_t max_byte_size = align_up(MaxHeapSize, align);
// Initialize backing storage
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
@ -124,7 +126,7 @@ HeapWord* EpsilonHeap::allocate_work(size_t size) {
while (res == NULL) {
// Allocation failed, attempt expansion, and retry:
MutexLockerEx ml(Heap_lock);
MutexLocker ml(Heap_lock);
size_t space_left = max_capacity() - capacity();
size_t want_space = MAX2(size, EpsilonMinHeapExpand);

@ -28,7 +28,6 @@
#include "gc/shared/softRefPolicy.hpp"
#include "gc/shared/space.hpp"
#include "services/memoryManager.hpp"
#include "gc/epsilon/epsilonCollectorPolicy.hpp"
#include "gc/epsilon/epsilonMonitoringSupport.hpp"
#include "gc/epsilon/epsilonBarrierSet.hpp"
#include "gc/epsilon/epsilon_globals.hpp"
@ -36,7 +35,6 @@
class EpsilonHeap : public CollectedHeap {
friend class VMStructs;
private:
EpsilonCollectorPolicy* _policy;
SoftRefPolicy _soft_ref_policy;
EpsilonMonitoringSupport* _monitoring_support;
MemoryPool* _pool;
@ -53,8 +51,7 @@ private:
public:
static EpsilonHeap* heap();
EpsilonHeap(EpsilonCollectorPolicy* p) :
_policy(p),
EpsilonHeap() :
_memory_manager("Epsilon Heap", "") {};
virtual Name kind() const {
@ -65,10 +62,6 @@ public:
return "Epsilon";
}
virtual CollectorPolicy* collector_policy() const {
return _policy;
}
virtual SoftRefPolicy* soft_ref_policy() {
return &_soft_ref_policy;
}

@ -196,7 +196,7 @@ HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
desired_word_size,
actual_word_size);
if (result == NULL && !survivor_is_full()) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = survivor_gc_alloc_region()->attempt_allocation_locked(min_word_size,
desired_word_size,
actual_word_size);
@ -220,7 +220,7 @@ HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
desired_word_size,
actual_word_size);
if (result == NULL && !old_is_full()) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size,
desired_word_size,
actual_word_size);

@ -26,15 +26,37 @@
#include "precompiled.hpp"
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/gcArguments.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
static const double MaxRamFractionForYoung = 0.8;
size_t G1Arguments::MaxMemoryForYoung;
static size_t calculate_heap_alignment(size_t space_alignment) {
size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
return MAX3(card_table_alignment, space_alignment, page_size);
}
void G1Arguments::initialize_alignments() {
// Set up the region size and associated fields.
//
// There is a circular dependency here. We base the region size on the heap
// size, but the heap size should be aligned with the region size. To get
// around this we use the unaligned values for the heap.
HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
HeapRegionRemSet::setup_remset_size();
SpaceAlignment = HeapRegion::GrainBytes;
HeapAlignment = calculate_heap_alignment(SpaceAlignment);
}
size_t G1Arguments::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
@ -156,10 +178,81 @@ void G1Arguments::initialize() {
initialize_verification_types();
}
CollectedHeap* G1Arguments::create_heap() {
if (AllocateOldGenAt != NULL) {
return create_heap_with_policy<G1CollectedHeap, G1HeterogeneousCollectorPolicy>();
static size_t calculate_reasonable_max_memory_for_young(FormatBuffer<100> &calc_str, double max_ram_fraction_for_young) {
julong phys_mem;
// If MaxRam is specified, we use that as maximum physical memory available.
if (FLAG_IS_DEFAULT(MaxRAM)) {
phys_mem = os::physical_memory();
calc_str.append("Physical_Memory");
} else {
return create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
phys_mem = (julong)MaxRAM;
calc_str.append("MaxRAM");
}
julong reasonable_max = phys_mem;
// If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
// reasonable max size of young generation.
if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
reasonable_max = (julong)(phys_mem / MaxRAMFraction);
calc_str.append(" / MaxRAMFraction");
} else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
calc_str.append(" * MaxRAMPercentage / 100");
} else {
// We use our own fraction to calculate max size of young generation.
reasonable_max = phys_mem * max_ram_fraction_for_young;
calc_str.append(" * %0.2f", max_ram_fraction_for_young);
}
return (size_t)reasonable_max;
}
void G1Arguments::initialize_heap_flags_and_sizes() {
if (AllocateOldGenAt != NULL) {
initialize_heterogeneous();
}
GCArguments::initialize_heap_flags_and_sizes();
}
void G1Arguments::initialize_heterogeneous() {
FormatBuffer<100> calc_str("");
MaxMemoryForYoung = calculate_reasonable_max_memory_for_young(calc_str, MaxRamFractionForYoung);
if (MaxNewSize > MaxMemoryForYoung) {
if (FLAG_IS_CMDLINE(MaxNewSize)) {
log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
MaxMemoryForYoung, calc_str.buffer());
} else {
log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
"Dram usage can be lowered by setting MaxNewSize to a lower value", MaxMemoryForYoung, calc_str.buffer());
}
MaxNewSize = MaxMemoryForYoung;
}
if (NewSize > MaxMemoryForYoung) {
if (FLAG_IS_CMDLINE(NewSize)) {
log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
MaxMemoryForYoung, calc_str.buffer());
}
NewSize = MaxMemoryForYoung;
}
}
CollectedHeap* G1Arguments::create_heap() {
return new G1CollectedHeap();
}
bool G1Arguments::is_heterogeneous_heap() {
return AllocateOldGenAt != NULL;
}
size_t G1Arguments::reasonable_max_memory_for_young() {
return MaxMemoryForYoung;
}
size_t G1Arguments::heap_reserved_size_bytes() {
return (is_heterogeneous_heap() ? 2 : 1) * MaxHeapSize;
}

@ -34,13 +34,25 @@ class G1Arguments : public GCArguments {
friend class G1HeapVerifierTest;
private:
static size_t MaxMemoryForYoung;
static void initialize_verification_types();
static void parse_verification_type(const char* type);
public:
virtual void initialize_alignments();
virtual void initialize_heap_flags_and_sizes();
void initialize_heterogeneous();
virtual void initialize();
virtual size_t conservative_max_heap_alignment();
virtual CollectedHeap* create_heap();
public:
// Heterogeneous heap support
static bool is_heterogeneous_heap();
static size_t reasonable_max_memory_for_young();
static size_t heap_reserved_size_bytes();
};
#endif // SHARE_GC_G1_G1ARGUMENTS_HPP

@ -29,10 +29,10 @@
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
@ -436,7 +436,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
uint gc_count_before;
{
MutexLockerEx x(Heap_lock);
MutexLocker x(Heap_lock);
result = _allocator->attempt_allocation_locked(word_size);
if (result != NULL) {
return result;
@ -575,7 +575,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MutexLockerEx x(Heap_lock);
MutexLocker x(Heap_lock);
MemRegion reserved = _hrm->reserved();
HeapWord* prev_last_addr = NULL;
@ -685,7 +685,7 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
// that contain the address range. The address range actually within the
// MemRegion will not be modified. That is assumed to have been initialized
// elsewhere, probably via an mmap of archived heap data.
MutexLockerEx x(Heap_lock);
MutexLocker x(Heap_lock);
for (size_t i = 0; i < count; i++) {
HeapWord* start_address = ranges[i].start();
HeapWord* last_address = ranges[i].last();
@ -771,7 +771,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count, b
// For each Memregion, free the G1 regions that constitute it, and
// notify mark-sweep that the range is no longer to be considered 'archive.'
MutexLockerEx x(Heap_lock);
MutexLocker x(Heap_lock);
for (size_t i = 0; i < count; i++) {
HeapWord* start_address = ranges[i].start();
HeapWord* last_address = ranges[i].last();
@ -882,7 +882,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
{
MutexLockerEx x(Heap_lock);
MutexLocker x(Heap_lock);
// Given that humongous objects are not allocated in young
// regions, we'll first try to do the allocation without doing a
@ -1177,9 +1177,6 @@ void G1CollectedHeap::resize_heap_if_necessary() {
const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
const double minimum_used_percentage = 1.0 - maximum_free_percentage;
const size_t min_heap_size = collector_policy()->min_heap_byte_size();
const size_t max_heap_size = collector_policy()->max_heap_byte_size();
// We have to be careful here as these two calculations can overflow
// 32-bit size_t's.
double used_after_gc_d = (double) used_after_gc;
@ -1188,7 +1185,7 @@ void G1CollectedHeap::resize_heap_if_necessary() {
// Let's make sure that they are both under the max heap size, which
// by default will make them fit into a size_t.
double desired_capacity_upper_bound = (double) max_heap_size;
double desired_capacity_upper_bound = (double) MaxHeapSize;
minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
desired_capacity_upper_bound);
maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
@ -1208,11 +1205,11 @@ void G1CollectedHeap::resize_heap_if_necessary() {
// Should not be greater than the heap max size. No need to adjust
// it with respect to the heap min size as it's a lower bound (i.e.,
// we'll try to make the capacity larger than it, not smaller).
minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
// Should not be less than the heap min size. No need to adjust it
// with respect to the heap max size as it's an upper bound (i.e.,
// we'll try to make the capacity smaller than it, not greater).
maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize);
if (capacity_after_gc < minimum_desired_capacity) {
// Don't expand unless it's significant
@ -1484,11 +1481,10 @@ public:
const char* get_description() { return "Humongous Regions"; }
};
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
G1CollectedHeap::G1CollectedHeap() :
CollectedHeap(),
_young_gen_sampling_thread(NULL),
_workers(NULL),
_collector_policy(collector_policy),
_card_table(NULL),
_soft_ref_policy(),
_old_set("Old Region Set", new OldRegionSetChecker()),
@ -1515,7 +1511,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
_survivor(),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
_policy(G1Policy::create_policy(collector_policy, _gc_timer_stw)),
_policy(G1Policy::create_policy(_gc_timer_stw)),
_heap_sizing_policy(NULL),
_collection_set(this, _policy),
_hot_card_cache(NULL),
@ -1644,14 +1640,13 @@ jint G1CollectedHeap::initialize() {
// HeapWordSize).
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
size_t init_byte_size = collector_policy()->initial_heap_byte_size();
size_t max_byte_size = _collector_policy->heap_reserved_size_bytes();
size_t heap_alignment = collector_policy()->heap_alignment();
size_t init_byte_size = InitialHeapSize;
size_t reserved_byte_size = G1Arguments::heap_reserved_size_bytes();
// Ensure that the sizes are properly aligned.
Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
Universe::check_alignment(reserved_byte_size, HeapRegion::GrainBytes, "g1 heap");
Universe::check_alignment(reserved_byte_size, HeapAlignment, "g1 heap");
// Reserve the maximum.
@ -1666,8 +1661,8 @@ jint G1CollectedHeap::initialize() {
// If this happens then we could end up using a non-optimal
// compressed oops mode.
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
heap_alignment);
ReservedSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
HeapAlignment);
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
@ -1699,7 +1694,7 @@ jint G1CollectedHeap::initialize() {
_hot_card_cache = new G1HotCardCache(this);
// Carve out the G1 part of the heap.
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
ReservedSpace g1_rs = heap_rs.first_part(reserved_byte_size);
size_t page_size = actual_reserved_page_size(heap_rs);
G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
@ -1714,8 +1709,8 @@ jint G1CollectedHeap::initialize() {
}
os::trace_page_sizes("Heap",
collector_policy()->min_heap_byte_size(),
max_byte_size,
MinHeapSize,
reserved_byte_size,
page_size,
heap_rs.base(),
heap_rs.size());
@ -1743,7 +1738,7 @@ jint G1CollectedHeap::initialize() {
G1RegionToSpaceMapper* next_bitmap_storage =
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
_hrm = HeapRegionManager::create_manager(this, _collector_policy);
_hrm = HeapRegionManager::create_manager(this);
_hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
_card_table->initialize(cardtable_storage);
@ -1870,10 +1865,6 @@ void G1CollectedHeap::safepoint_synchronize_end() {
SuspendibleThreadSet::desynchronize();
}
size_t G1CollectedHeap::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
void G1CollectedHeap::post_initialize() {
CollectedHeap::post_initialize();
ref_processing_init();
@ -1940,10 +1931,6 @@ void G1CollectedHeap::ref_processing_init() {
true); // allow changes to number of processing threads
}
CollectorPolicy* G1CollectedHeap::collector_policy() const {
return _collector_policy;
}
SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
return &_soft_ref_policy;
}
@ -2066,7 +2053,7 @@ void G1CollectedHeap::increment_old_marking_cycles_started() {
}
void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
// We assume that if concurrent == true, then the caller is a
// concurrent thread that was joined the Suspendible Thread
@ -2333,7 +2320,7 @@ bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
}
bool G1CollectedHeap::is_heterogeneous_heap() const {
return _collector_policy->is_heterogeneous_heap();
return G1Arguments::is_heterogeneous_heap();
}
class PrintRegionClosure: public HeapRegionClosure {
@ -2604,7 +2591,7 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
}
void G1CollectedHeap::do_concurrent_mark() {
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (!_cm_thread->in_progress()) {
_cm_thread->set_started();
CGC_lock->notify();
@ -3925,7 +3912,7 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
const uint humongous_regions_removed) {
if (old_regions_removed > 0 || humongous_regions_removed > 0) {
MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
_old_set.bulk_remove(old_regions_removed);
_humongous_set.bulk_remove(humongous_regions_removed);
}
@ -3935,7 +3922,7 @@ void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
assert(list != NULL, "list can't be null");
if (!list->is_empty()) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
_hrm->insert_list_into_free_list(list);
}
}
@ -4073,7 +4060,7 @@ private:
void do_serial_work() {
// Need to grab the lock to be allowed to modify the old region list.
MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
_collection_set->iterate(&_cl);
}

@ -74,7 +74,6 @@ class SpaceClosure;
class CompactibleSpaceClosure;
class Space;
class G1CollectionSet;
class G1CollectorPolicy;
class G1Policy;
class G1HotCardCache;
class G1RemSet;
@ -156,7 +155,6 @@ private:
G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
WorkGang* _workers;
G1CollectorPolicy* _collector_policy;
G1CardTable* _card_table;
SoftRefPolicy _soft_ref_policy;
@ -926,10 +924,10 @@ public:
// A set of cards where updates happened during the GC
G1DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
// Create a G1CollectedHeap with the specified policy.
// Create a G1CollectedHeap.
// Must call the initialize method afterwards.
// May not return if something goes wrong.
G1CollectedHeap(G1CollectorPolicy* policy);
G1CollectedHeap();
private:
jint initialize_concurrent_refinement();
@ -944,9 +942,6 @@ public:
virtual void safepoint_synchronize_begin();
virtual void safepoint_synchronize_end();
// Return the (conservative) maximum heap alignment for any G1 heap
static size_t conservative_max_heap_alignment();
// Does operations required after initialization has been done.
void post_initialize();
@ -976,8 +971,6 @@ public:
const G1CollectionSet* collection_set() const { return &_collection_set; }
G1CollectionSet* collection_set() { return &_collection_set; }
virtual CollectorPolicy* collector_policy() const;
virtual SoftRefPolicy* soft_ref_policy();
virtual void initialize_serviceability();
@ -1008,6 +1001,7 @@ public:
ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
size_t unused_committed_regions_in_bytes() const;
virtual size_t capacity() const;
virtual size_t used() const;
// This should be called when we're not holding the heap lock. The

@ -1,65 +0,0 @@
/*
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1YoungGenSizer.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
G1CollectorPolicy::G1CollectorPolicy() {
// Set up the region size and associated fields. Given that the
// policy is created before the heap, we have to set this up here,
// so it's done as soon as possible.
// It would have been natural to pass initial_heap_byte_size() and
// max_heap_byte_size() to setup_heap_region_size() but those have
// not been set up at this point since they should be aligned with
// the region size. So, there is a circular dependency here. We base
// the region size on the heap size, but the heap size should be
// aligned with the region size. To get around this we use the
// unaligned values for the heap.
HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
HeapRegionRemSet::setup_remset_size();
}
void G1CollectorPolicy::initialize_alignments() {
_space_alignment = HeapRegion::GrainBytes;
size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
_heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
}
size_t G1CollectorPolicy::heap_reserved_size_bytes() const {
return _max_heap_byte_size;
}
bool G1CollectorPolicy::is_heterogeneous_heap() const {
return false;
}

@ -166,13 +166,13 @@ void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQ
}
void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
add_chunk_to_list(&_chunk_list, elem);
_chunks_in_chunk_list++;
}
void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
add_chunk_to_list(&_free_list, elem);
}
@ -185,7 +185,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQu
}
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list() {
MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
TaskQueueEntryChunk* result = remove_chunk_from_list(&_chunk_list);
if (result != NULL) {
_chunks_in_chunk_list--;
@ -194,7 +194,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_chunk_list(
}
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_free_list() {
MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
return remove_chunk_from_list(&_free_list);
}
@ -311,7 +311,7 @@ uint G1CMRootRegions::num_root_regions() const {
}
void G1CMRootRegions::notify_scan_done() {
MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
_scan_in_progress = false;
RootRegionScan_lock->notify_all();
}
@ -338,9 +338,9 @@ bool G1CMRootRegions::wait_until_scan_finished() {
}
{
MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker ml(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
while (scan_in_progress()) {
RootRegionScan_lock->wait(Mutex::_no_safepoint_check_flag);
ml.wait();
}
}
return true;
@ -1288,7 +1288,7 @@ public:
// Now update the old/humongous region sets
_g1h->remove_from_old_sets(cl.old_regions_removed(), cl.humongous_regions_removed());
{
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
_g1h->decrement_summary_bytes(cl.freed_bytes());
_cleanup_list->add_ordered(&local_cleanup_list);

@ -397,7 +397,7 @@ void G1ConcurrentMarkThread::run_service() {
}
void G1ConcurrentMarkThread::stop_service() {
MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag);
CGC_lock->notify_all();
}
@ -407,9 +407,9 @@ void G1ConcurrentMarkThread::sleep_before_next_cycle() {
// below while the world is otherwise stopped.
assert(!in_progress(), "should have been cleared");
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker ml(CGC_lock, Mutex::_no_safepoint_check_flag);
while (!started() && !should_terminate()) {
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
ml.wait();
}
if (started()) {

@ -59,9 +59,9 @@ G1ConcurrentRefineThread::G1ConcurrentRefineThread(G1ConcurrentRefine* cr, uint
}
void G1ConcurrentRefineThread::wait_for_completed_buffers() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
MonitorLocker ml(_monitor, Mutex::_no_safepoint_check_flag);
while (!should_terminate() && !is_active()) {
_monitor->wait(Mutex::_no_safepoint_check_flag);
ml.wait();
}
}
@ -71,7 +71,7 @@ bool G1ConcurrentRefineThread::is_active() {
}
void G1ConcurrentRefineThread::activate() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
MutexLocker x(_monitor, Mutex::_no_safepoint_check_flag);
if (!is_primary()) {
set_active(true);
} else {
@ -82,7 +82,7 @@ void G1ConcurrentRefineThread::activate() {
}
void G1ConcurrentRefineThread::deactivate() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
MutexLocker x(_monitor, Mutex::_no_safepoint_check_flag);
if (!is_primary()) {
set_active(false);
} else {
@ -140,6 +140,6 @@ void G1ConcurrentRefineThread::run_service() {
}
void G1ConcurrentRefineThread::stop_service() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
MutexLocker x(_monitor, Mutex::_no_safepoint_check_flag);
_monitor->notify();
}

@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1FullCollector.hpp"
#include "gc/g1/g1FullGCAdjustTask.hpp"
#include "gc/g1/g1FullGCCompactTask.hpp"

@ -62,8 +62,7 @@ template <class T> void G1VerifyOopClosure::do_oop_work(T* p) {
oop obj = CompressedOops::decode_not_null(heap_oop);
bool failed = false;
if (!_g1h->is_in(obj) || _g1h->is_obj_dead_cond(obj, _verify_option)) {
MutexLockerEx x(ParGCRareEvent_lock,
Mutex::_no_safepoint_check_flag);
MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
LogStreamHandle(Error, gc, verify) yy;
if (!_failures) {
yy.cr();

@ -60,9 +60,8 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
_gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):");
_gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
_gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
#if INCLUDE_AOT
_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scan (ms):");
#endif
AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scan (ms):");)
JVMCI_ONLY(_gc_par_phases[JVMCIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMCI Root Scan (ms):");)
_gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
_gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms):");
_gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms):");
@ -527,9 +526,8 @@ const char* G1GCPhaseTimes::phase_name(GCParPhases phase) {
"SystemDictionaryRoots",
"CLDGRoots",
"JVMTIRoots",
#if INCLUDE_AOT
"AOTCodeRoots",
#endif
AOT_ONLY("AOTCodeRoots" COMMA)
JVMCI_ONLY("JVMCIRoots" COMMA)
"CMRefRoots",
"WaitForStrongCLD",
"WeakCLDRoots",

@ -55,9 +55,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
SystemDictionaryRoots,
CLDGRoots,
JVMTIRoots,
#if INCLUDE_AOT
AOTCodeRoots,
#endif
AOT_ONLY(AOTCodeRoots COMMA)
JVMCI_ONLY(JVMCIRoots COMMA)
CMRefRoots,
WaitForStrongCLD,
WeakCLDRoots,

@ -1,103 +0,0 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
#include "logging/log.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/os.hpp"
#include "utilities/formatBuffer.hpp"
const double G1HeterogeneousCollectorPolicy::MaxRamFractionForYoung = 0.8;
size_t G1HeterogeneousCollectorPolicy::MaxMemoryForYoung;
static size_t calculate_reasonable_max_memory_for_young(FormatBuffer<100> &calc_str, double max_ram_fraction_for_young) {
julong phys_mem;
// If MaxRam is specified, we use that as maximum physical memory available.
if (FLAG_IS_DEFAULT(MaxRAM)) {
phys_mem = os::physical_memory();
calc_str.append("Physical_Memory");
} else {
phys_mem = (julong)MaxRAM;
calc_str.append("MaxRAM");
}
julong reasonable_max = phys_mem;
// If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
// reasonable max size of young generation.
if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
reasonable_max = (julong)(phys_mem / MaxRAMFraction);
calc_str.append(" / MaxRAMFraction");
} else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
calc_str.append(" * MaxRAMPercentage / 100");
} else {
// We use our own fraction to calculate max size of young generation.
reasonable_max = phys_mem * max_ram_fraction_for_young;
calc_str.append(" * %0.2f", max_ram_fraction_for_young);
}
return (size_t)reasonable_max;
}
void G1HeterogeneousCollectorPolicy::initialize_flags() {
FormatBuffer<100> calc_str("");
MaxMemoryForYoung = calculate_reasonable_max_memory_for_young(calc_str, MaxRamFractionForYoung);
if (MaxNewSize > MaxMemoryForYoung) {
if (FLAG_IS_CMDLINE(MaxNewSize)) {
log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
MaxMemoryForYoung, calc_str.buffer());
} else {
log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
"Dram usage can be lowered by setting MaxNewSize to a lower value", MaxMemoryForYoung, calc_str.buffer());
}
MaxNewSize = MaxMemoryForYoung;
}
if (NewSize > MaxMemoryForYoung) {
if (FLAG_IS_CMDLINE(NewSize)) {
log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
MaxMemoryForYoung, calc_str.buffer());
}
NewSize = MaxMemoryForYoung;
}
// After setting new size flags, call base class initialize_flags()
G1CollectorPolicy::initialize_flags();
}
size_t G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() {
return MaxMemoryForYoung;
}
size_t G1HeterogeneousCollectorPolicy::heap_reserved_size_bytes() const {
return 2 * _max_heap_byte_size;
}
bool G1HeterogeneousCollectorPolicy::is_heterogeneous_heap() const {
return true;
}

@ -28,8 +28,8 @@
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
G1HeterogeneousHeapPolicy::G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer) :
G1Policy(policy, gc_timer), _manager(NULL) {}
G1HeterogeneousHeapPolicy::G1HeterogeneousHeapPolicy(STWGCTimer* gc_timer) :
G1Policy(gc_timer), _manager(NULL) {}
// We call the super class init(), after which we provision young_list_target_length() regions in dram.
void G1HeterogeneousHeapPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {

@ -25,7 +25,6 @@
#ifndef SHARE_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
#define SHARE_GC_G1_G1HETEROGENEOUSHEAPPOLICY_HPP
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/heterogeneousHeapRegionManager.hpp"
@ -34,7 +33,7 @@ class G1HeterogeneousHeapPolicy : public G1Policy {
HeterogeneousHeapRegionManager* _manager;
public:
G1HeterogeneousHeapPolicy(G1CollectorPolicy* policy, STWGCTimer* gc_timer);
G1HeterogeneousHeapPolicy(STWGCTimer* gc_timer);
// initialize policy
virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);

@ -23,13 +23,13 @@
*/
#include "precompiled.hpp"
#include "gc/g1/g1HeterogeneousCollectorPolicy.hpp"
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1HeterogeneousHeapYoungGenSizer.hpp"
#include "gc/g1/heapRegion.hpp"
G1HeterogeneousHeapYoungGenSizer::G1HeterogeneousHeapYoungGenSizer() : G1YoungGenSizer() {
// will be used later when min and max young size is calculated.
_max_young_length = (uint)(G1HeterogeneousCollectorPolicy::reasonable_max_memory_for_young() / HeapRegion::GrainBytes);
_max_young_length = (uint)(G1Arguments::reasonable_max_memory_for_young() / HeapRegion::GrainBytes);
}
// Since heap is sized potentially to larger value accounting for dram + nvdimm, we need to limit

Some files were not shown because too many files have changed in this diff Show More