Merge
This commit is contained in:
commit
b75ea9b069
.hgtags
doc
make
Init.gmkInitSupport.gmk
autoconf
boot-jdk.m4bootcycle-spec.gmk.inbuild-performance.m4configure.acjdk-options.m4jdk-version.m4spec.gmk.inutil.m4
common
conf
hotspot/symbols
src/hotspot
cpu/zero
os
aix
bsd
linux
posix
solaris
windows
share
c1
classfile
classListParser.cppclassLoader.cppclassLoader.hppclassLoader.inline.hppjavaClasses.cppjavaClasses.inline.hppsymbolTable.cppsystemDictionary.cppsystemDictionaryShared.cpp
compiler
gc
g1
shared
shenandoah
c2
heuristics
shenandoahAdaptiveHeuristics.cppshenandoahCompactHeuristics.cppshenandoahPassiveHeuristics.cppshenandoahStaticHeuristics.cpp
shenandoahCodeRoots.cppshenandoahConcurrentMark.cppshenandoahConcurrentMark.inline.hppshenandoahEvacOOMHandler.cppshenandoahHeap.cppshenandoahHeapRegion.hppshenandoahHeapRegion.inline.hppshenandoahJfrSupport.cppshenandoahMarkCompact.cppshenandoahMarkingContext.cppshenandoahOopClosures.hppshenandoahPhaseTimings.cppshenandoahPhaseTimings.hppshenandoahRootProcessor.cppshenandoahRootProcessor.inline.hppshenandoahThreadLocalData.hppshenandoahUnload.cppshenandoahUtils.cppshenandoahUtils.hppinclude
jfr
leakprofiler
recorder/checkpoint/types/traceid
jvmci
memory
oops
opto
prims
runtime
1
.hgtags
1
.hgtags
@ -632,3 +632,4 @@ dd5198db2e5b1ebcafe065d987c03ba9fcb50fc3 jdk-15+17
|
||||
7cc27caabe6e342151e8baf549beb07a9c755ec2 jdk-15+19
|
||||
46bca5e5e6fb26efd07245d26fe96a9c3260f51e jdk-15+20
|
||||
12b55fad80f30d24b1f8fdb3b947ea6465ef9518 jdk-15+21
|
||||
7223c6d610343fd8323af9d07d501e01fa1a7696 jdk-15+22
|
||||
|
@ -295,7 +295,7 @@
|
||||
</tr>
|
||||
<tr class="even">
|
||||
<td style="text-align: left;">Windows</td>
|
||||
<td style="text-align: left;">Microsoft Visual Studio 2017 update 15.9.16</td>
|
||||
<td style="text-align: left;">Microsoft Visual Studio 2019 update 16.5.3</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
@ -330,7 +330,7 @@ issues.
|
||||
Linux gcc 9.2.0
|
||||
macOS Apple Xcode 10.1 (using clang 10.0.0)
|
||||
Solaris Oracle Solaris Studio 12.6 (with compiler version 5.15)
|
||||
Windows Microsoft Visual Studio 2017 update 15.9.16
|
||||
Windows Microsoft Visual Studio 2019 update 16.5.3
|
||||
|
||||
All compilers are expected to be able to compile to the C99 language standard,
|
||||
as some C99 features are used in the source code. Microsoft Visual Studio
|
||||
|
@ -226,6 +226,9 @@ else # HAS_SPEC=true
|
||||
# Parse COMPARE_BUILD (for makefile development)
|
||||
$(eval $(call ParseCompareBuild))
|
||||
|
||||
# Setup reproducible build environment
|
||||
$(eval $(call SetupReproducibleBuild))
|
||||
|
||||
# If no LOG= was given on command line, but we have a non-standard default
|
||||
# value, use that instead and re-parse log level.
|
||||
ifeq ($(LOG), )
|
||||
|
@ -306,6 +306,15 @@ else # $(HAS_SPEC)=true
|
||||
topdir=$(TOPDIR)
|
||||
endif
|
||||
|
||||
# Setup the build environment to match the requested specification on
|
||||
# level of reproducible builds
|
||||
define SetupReproducibleBuild
|
||||
ifeq ($$(SOURCE_DATE), updated)
|
||||
SOURCE_DATE := $$(shell $$(DATE) +"%s")
|
||||
endif
|
||||
export SOURCE_DATE_EPOCH := $$(SOURCE_DATE)
|
||||
endef
|
||||
|
||||
# Parse COMPARE_BUILD into COMPARE_BUILD_*
|
||||
# Syntax: COMPARE_BUILD=CONF=<configure options>:PATCH=<patch file>:
|
||||
# MAKE=<make targets>:COMP_OPTS=<compare script options>:
|
||||
@ -471,15 +480,15 @@ else # $(HAS_SPEC)=true
|
||||
# Remove any javac server logs and port files. This
|
||||
# prevents a new make run to reuse the previous servers.
|
||||
define PrepareSmartJavac
|
||||
$(if $(SJAVAC_SERVER_DIR), \
|
||||
$(RM) -r $(SJAVAC_SERVER_DIR) 2> /dev/null && \
|
||||
$(MKDIR) -p $(SJAVAC_SERVER_DIR) \
|
||||
$(if $(JAVAC_SERVER_DIR), \
|
||||
$(RM) -r $(JAVAC_SERVER_DIR) 2> /dev/null && \
|
||||
$(MKDIR) -p $(JAVAC_SERVER_DIR) \
|
||||
)
|
||||
endef
|
||||
|
||||
define CleanupSmartJavac
|
||||
[ -f $(SJAVAC_SERVER_DIR)/server.port ] && $(ECHO) Stopping sjavac server && \
|
||||
$(TOUCH) $(SJAVAC_SERVER_DIR)/server.port.stop; true
|
||||
[ -f $(JAVAC_SERVER_DIR)/server.port ] && $(ECHO) Stopping sjavac server && \
|
||||
$(TOUCH) $(JAVAC_SERVER_DIR)/server.port.stop; true
|
||||
endef
|
||||
|
||||
ifeq ($(call isBuildOs, windows), true)
|
||||
@ -488,7 +497,7 @@ else # $(HAS_SPEC)=true
|
||||
# synchronization process, wait for a while and hope it helps. This is only
|
||||
# used by build comparisons.
|
||||
define WaitForSmartJavacFinish
|
||||
$(if $(SJAVAC_SERVER_DIR), \
|
||||
$(if $(JAVAC_SERVER_DIR), \
|
||||
sleep 5\
|
||||
)
|
||||
endef
|
||||
|
@ -74,7 +74,8 @@ AC_DEFUN([BOOTJDK_DO_CHECK],
|
||||
BOOT_JDK_FOUND=no
|
||||
else
|
||||
# Oh, this is looking good! We probably have found a proper JDK. Is it the correct version?
|
||||
BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java$EXE_SUFFIX" $USER_BOOT_JDK_OPTIONS -version 2>&1 | $HEAD -n 1`
|
||||
# Additional [] needed to keep m4 from mangling shell constructs.
|
||||
[ BOOT_JDK_VERSION=`"$BOOT_JDK/bin/java$EXE_SUFFIX" $USER_BOOT_JDK_OPTIONS -version 2>&1 | $AWK '/version \"[0-9\._\-a-zA-Z]+\"/{print $ 0; exit;}'` ]
|
||||
if [ [[ "$BOOT_JDK_VERSION" =~ "Picked up" ]] ]; then
|
||||
AC_MSG_NOTICE([You have _JAVA_OPTIONS or JAVA_TOOL_OPTIONS set. This can mess up the build. Please use --with-boot-jdk-jvmargs instead.])
|
||||
AC_MSG_NOTICE([Java reports: "$BOOT_JDK_VERSION".])
|
||||
@ -529,7 +530,8 @@ AC_DEFUN([BOOTJDK_CHECK_BUILD_JDK],
|
||||
BUILD_JDK_FOUND=no
|
||||
else
|
||||
# Oh, this is looking good! We probably have found a proper JDK. Is it the correct version?
|
||||
BUILD_JDK_VERSION=`"$BUILD_JDK/bin/java" -version 2>&1 | $HEAD -n 1`
|
||||
# Additional [] needed to keep m4 from mangling shell constructs.
|
||||
[ BUILD_JDK_VERSION=`"$BUILD_JDK/bin/java" -version 2>&1 | $AWK '/version \"[0-9\._\-a-zA-Z]+\"/{print $ 0; exit;}'` ]
|
||||
|
||||
# Extra M4 quote needed to protect [] in grep expression.
|
||||
[FOUND_CORRECT_VERSION=`echo $BUILD_JDK_VERSION | $EGREP "\"$VERSION_FEATURE([\.+-].*)?\""`]
|
||||
|
@ -44,7 +44,8 @@ BOOT_JDK := $(JDK_IMAGE_DIR)
|
||||
# The bootcycle build has a different output directory
|
||||
OLD_OUTPUTDIR:=@OUTPUTDIR@
|
||||
OUTPUTDIR:=$(OLD_OUTPUTDIR)/bootcycle-build
|
||||
SJAVAC_SERVER_DIR:=$(patsubst $(OLD_OUTPUTDIR)%, $(OUTPUTDIR)%, $(SJAVAC_SERVER_DIR))
|
||||
# No spaces in patsubst to avoid leading space in variable
|
||||
JAVAC_SERVER_DIR:=$(patsubst $(OLD_OUTPUTDIR)%,$(OUTPUTDIR)%,$(JAVAC_SERVER_DIR))
|
||||
|
||||
JAVA_CMD:=$(BOOT_JDK)/bin/java
|
||||
JAVAC_CMD:=$(BOOT_JDK)/bin/javac
|
||||
|
@ -32,7 +32,12 @@ AC_DEFUN([BPERF_CHECK_CORES],
|
||||
if test -f /proc/cpuinfo; then
|
||||
# Looks like a Linux (or cygwin) system
|
||||
NUM_CORES=`cat /proc/cpuinfo | grep -c processor`
|
||||
FOUND_CORES=yes
|
||||
if test "$NUM_CORES" -eq "0"; then
|
||||
NUM_CORES=`cat /proc/cpuinfo | grep -c ^CPU`
|
||||
fi
|
||||
if test "$NUM_CORES" -ne "0"; then
|
||||
FOUND_CORES=yes
|
||||
fi
|
||||
elif test -x /usr/sbin/psrinfo; then
|
||||
# Looks like a Solaris system
|
||||
NUM_CORES=`/usr/sbin/psrinfo -v | grep -c on-line`
|
||||
|
@ -249,6 +249,7 @@ JDKOPT_ENABLE_DISABLE_GENERATE_CLASSLIST
|
||||
JDKOPT_EXCLUDE_TRANSLATIONS
|
||||
JDKOPT_ENABLE_DISABLE_MANPAGES
|
||||
JDKOPT_ENABLE_DISABLE_CDS_ARCHIVE
|
||||
JDKOPT_SETUP_REPRODUCIBLE_BUILD
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
|
@ -630,3 +630,59 @@ AC_DEFUN([JDKOPT_ALLOW_ABSOLUTE_PATHS_IN_OUTPUT],
|
||||
|
||||
AC_SUBST(ALLOW_ABSOLUTE_PATHS_IN_OUTPUT)
|
||||
])
|
||||
|
||||
################################################################################
|
||||
#
|
||||
# Check and set options related to reproducible builds.
|
||||
#
|
||||
AC_DEFUN_ONCE([JDKOPT_SETUP_REPRODUCIBLE_BUILD],
|
||||
[
|
||||
AC_ARG_WITH([source-date], [AS_HELP_STRING([--with-source-date],
|
||||
[how to set SOURCE_DATE_EPOCH ('updated', 'current', 'version' a timestamp or an ISO-8601 date) @<:@updated@:>@])],
|
||||
[with_source_date_present=true], [with_source_date_present=false])
|
||||
|
||||
AC_MSG_CHECKING([what source date to use])
|
||||
|
||||
if test "x$with_source_date" = xyes; then
|
||||
AC_MSG_ERROR([--with-source-date must have a value])
|
||||
elif test "x$with_source_date" = xupdated || test "x$with_source_date" = x; then
|
||||
# Tell the makefiles to update at each build
|
||||
SOURCE_DATE=updated
|
||||
AC_MSG_RESULT([determined at build time, from 'updated'])
|
||||
elif test "x$with_source_date" = xcurrent; then
|
||||
# Set the current time
|
||||
SOURCE_DATE=$($DATE +"%s")
|
||||
AC_MSG_RESULT([$SOURCE_DATE, from 'current'])
|
||||
elif test "x$with_source_date" = xversion; then
|
||||
# Use the date from version-numbers
|
||||
UTIL_GET_EPOCH_TIMESTAMP(SOURCE_DATE, $DEFAULT_VERSION_DATE)
|
||||
if test "x$SOURCE_DATE" = x; then
|
||||
AC_MSG_RESULT([unavailable])
|
||||
AC_MSG_ERROR([Cannot convert DEFAULT_VERSION_DATE to timestamp])
|
||||
fi
|
||||
AC_MSG_RESULT([$SOURCE_DATE, from 'version'])
|
||||
else
|
||||
# It's a timestamp, an ISO-8601 date, or an invalid string
|
||||
# Additional [] needed to keep m4 from mangling shell constructs.
|
||||
if [ [[ "$with_source_date" =~ ^[0-9][0-9]*$ ]] ] ; then
|
||||
SOURCE_DATE=$with_source_date
|
||||
AC_MSG_RESULT([$SOURCE_DATE, from timestamp on command line])
|
||||
else
|
||||
UTIL_GET_EPOCH_TIMESTAMP(SOURCE_DATE, $with_source_date)
|
||||
if test "x$SOURCE_DATE" != x; then
|
||||
AC_MSG_RESULT([$SOURCE_DATE, from ISO-8601 date on command line])
|
||||
else
|
||||
AC_MSG_RESULT([unavailable])
|
||||
AC_MSG_ERROR([Cannot parse date string "$with_source_date"])
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
UTIL_ARG_ENABLE(NAME: reproducible-build, DEFAULT: $with_source_date_present,
|
||||
RESULT: ENABLE_REPRODUCIBLE_BUILD,
|
||||
DESC: [enable reproducible builds (not yet fully functional)],
|
||||
DEFAULT_DESC: [enabled if --with-source-date is given])
|
||||
|
||||
AC_SUBST(SOURCE_DATE)
|
||||
AC_SUBST(ENABLE_REPRODUCIBLE_BUILD)
|
||||
])
|
||||
|
@ -36,7 +36,7 @@
|
||||
AC_DEFUN([JDKVER_CHECK_AND_SET_NUMBER],
|
||||
[
|
||||
# Additional [] needed to keep m4 from mangling shell constructs.
|
||||
if [ ! [[ "$2" =~ ^0*([1-9][0-9]*)|(0)$ ]] ] ; then
|
||||
if [ ! [[ "$2" =~ ^0*([1-9][0-9]*)$|^0*(0)$ ]] ] ; then
|
||||
AC_MSG_ERROR(["$2" is not a valid numerical value for $1])
|
||||
fi
|
||||
# Extract the version number without leading zeros.
|
||||
|
@ -119,6 +119,9 @@ OPENJDK_MODULE_TARGET_PLATFORM:=@OPENJDK_MODULE_TARGET_PLATFORM@
|
||||
RELEASE_FILE_OS_NAME:=@RELEASE_FILE_OS_NAME@
|
||||
RELEASE_FILE_OS_ARCH:=@RELEASE_FILE_OS_ARCH@
|
||||
|
||||
SOURCE_DATE := @SOURCE_DATE@
|
||||
ENABLE_REPRODUCIBLE_BUILD := @ENABLE_REPRODUCIBLE_BUILD@
|
||||
|
||||
LIBM:=@LIBM@
|
||||
LIBDL:=@LIBDL@
|
||||
|
||||
@ -356,9 +359,9 @@ BOOT_JDK_SOURCETARGET:=@BOOT_JDK_SOURCETARGET@
|
||||
NUM_CORES:=@NUM_CORES@
|
||||
MEMORY_SIZE:=@MEMORY_SIZE@
|
||||
ENABLE_JAVAC_SERVER:=@ENABLE_JAVAC_SERVER@
|
||||
# Store sjavac server synchronization files here, and
|
||||
# the sjavac server log files.
|
||||
SJAVAC_SERVER_DIR=$(MAKESUPPORT_OUTPUTDIR)/javacservers
|
||||
# Store javac server synchronization files here, and
|
||||
# the javac server log files.
|
||||
JAVAC_SERVER_DIR=$(MAKESUPPORT_OUTPUTDIR)/javacservers
|
||||
|
||||
# Number of parallel jobs to use for compilation
|
||||
JOBS?=@JOBS@
|
||||
|
@ -227,6 +227,29 @@ AC_DEFUN([UTIL_GET_MATCHING_VALUES],
|
||||
fi
|
||||
])
|
||||
|
||||
###############################################################################
|
||||
# Converts an ISO-8601 date/time string to a unix epoch timestamp. If no
|
||||
# suitable conversion method was found, an empty string is returned.
|
||||
#
|
||||
# Sets the specified variable to the resulting list.
|
||||
#
|
||||
# $1: result variable name
|
||||
# $2: input date/time string
|
||||
AC_DEFUN([UTIL_GET_EPOCH_TIMESTAMP],
|
||||
[
|
||||
timestamp=$($DATE --utc --date=$2 +"%s" 2> /dev/null)
|
||||
if test "x$timestamp" = x; then
|
||||
# GNU date format did not work, try BSD date options
|
||||
timestamp=$($DATE -j -f "%F %T" "$2" "+%s" 2> /dev/null)
|
||||
if test "x$timestamp" = x; then
|
||||
# Perhaps the time was missing
|
||||
timestamp=$($DATE -j -f "%F %T" "$2 00:00:00" "+%s" 2> /dev/null)
|
||||
# If this did not work, we give up and return the empty string
|
||||
fi
|
||||
fi
|
||||
$1=$timestamp
|
||||
])
|
||||
|
||||
###############################################################################
|
||||
# Sort a space-separated list, and remove duplicates.
|
||||
#
|
||||
@ -320,12 +343,14 @@ AC_DEFUN([UTIL_ALIASED_ARG_ENABLE],
|
||||
# option should be available. Must set AVAILABLE to 'false' if not.
|
||||
# IF_GIVEN: An optional code block to execute if the option was given on the
|
||||
# command line (regardless of the value).
|
||||
# IF_NOT_GIVEN: An optional code block to execute if the option was not given
|
||||
# on the command line (regardless of the value).
|
||||
# IF_ENABLED: An optional code block to execute if the option is turned on.
|
||||
# IF_DISABLED: An optional code block to execute if the option is turned off.
|
||||
#
|
||||
UTIL_DEFUN_NAMED([UTIL_ARG_ENABLE],
|
||||
[*NAME RESULT DEFAULT AVAILABLE DESC DEFAULT_DESC CHECKING_MSG
|
||||
CHECK_AVAILABLE IF_GIVEN IF_ENABLED IF_DISABLED], [$@],
|
||||
CHECK_AVAILABLE IF_GIVEN IF_NOT_GIVEN IF_ENABLED IF_DISABLED], [$@],
|
||||
[
|
||||
##########################
|
||||
# Part 1: Set up m4 macros
|
||||
@ -356,6 +381,7 @@ UTIL_DEFUN_NAMED([UTIL_ARG_ENABLE],
|
||||
# tripping up bash.
|
||||
m4_define([ARG_CHECK_AVAILABLE], m4_if(ARG_CHECK_AVAILABLE, , :, ARG_CHECK_AVAILABLE))
|
||||
m4_define([ARG_IF_GIVEN], m4_if(ARG_IF_GIVEN, , :, ARG_IF_GIVEN))
|
||||
m4_define([ARG_IF_NOT_GIVEN], m4_if(ARG_IF_NOT_GIVEN, , :, ARG_IF_NOT_GIVEN))
|
||||
m4_define([ARG_IF_ENABLED], m4_if(ARG_IF_ENABLED, , :, ARG_IF_ENABLED))
|
||||
m4_define([ARG_IF_DISABLED], m4_if(ARG_IF_DISABLED, , :, ARG_IF_DISABLED))
|
||||
|
||||
@ -425,6 +451,8 @@ UTIL_DEFUN_NAMED([UTIL_ARG_ENABLE],
|
||||
# Execute result payloads, if present
|
||||
if test x$ARG_GIVEN = xtrue; then
|
||||
ARG_IF_GIVEN
|
||||
else
|
||||
ARG_IF_NOT_GIVEN
|
||||
fi
|
||||
|
||||
if test x$ARG_RESULT = xtrue; then
|
||||
@ -573,7 +601,7 @@ AC_DEFUN([UTIL_REQUIRE_BUILTIN_PROGS],
|
||||
UTIL_SETUP_TOOL($1, [AC_PATH_PROGS($1, $2, , $3)])
|
||||
if test "x[$]$1" = x; then
|
||||
AC_MSG_NOTICE([Required tool $2 not found in PATH, checking built-in])
|
||||
if help $2 > /dev/null 2>&1; then
|
||||
if command -v $2 > /dev/null 2>&1; then
|
||||
AC_MSG_NOTICE([Found $2 as shell built-in. Using it])
|
||||
$1="$2"
|
||||
else
|
||||
|
@ -216,7 +216,7 @@ define SetupJavaCompilationBody
|
||||
|
||||
# The port file contains the tcp/ip on which the server listens
|
||||
# and the cookie necessary to talk to the server.
|
||||
$1_JAVA_SERVER_FLAGS := --server:portfile=$$(SJAVAC_SERVER_DIR)/server.port,sjavac=$$($1_ESCAPED_CMD)
|
||||
$1_JAVA_SERVER_FLAGS := --server:portfile=$$(JAVAC_SERVER_DIR)/server.port,sjavac=$$($1_ESCAPED_CMD)
|
||||
|
||||
# Always use small to launch client
|
||||
$1_JAVAC_CMD := $$(JAVA_SMALL) $$($1_JAVA_FLAGS) $$($1_JAVAC) $$($1_JAVA_SERVER_FLAGS)
|
||||
|
@ -986,7 +986,7 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
macosx_x64: "Xcode10.1-MacOSX10.14+1.0",
|
||||
solaris_x64: "SS12u4-Solaris11u1+1.0",
|
||||
solaris_sparcv9: "SS12u6-Solaris11u3+1.0",
|
||||
windows_x64: "VS2017-15.9.16+1.0",
|
||||
windows_x64: "VS2019-16.5.3+1.0",
|
||||
linux_aarch64: "gcc9.2.0-OL7.6+1.0",
|
||||
linux_arm: "gcc8.2.0-Fedora27+1.0",
|
||||
linux_ppc64le: "gcc8.2.0-Fedora27+1.0",
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -122,6 +122,7 @@ JVM_GetNestMembers
|
||||
JVM_GetPrimitiveArrayElement
|
||||
JVM_GetProperties
|
||||
JVM_GetProtectionDomain
|
||||
JVM_GetRandomSeedForCDSDump
|
||||
JVM_GetRecordComponents
|
||||
JVM_GetSimpleBinaryName
|
||||
JVM_GetStackAccessControlContext
|
||||
|
@ -24,6 +24,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "interpreter/cppInterpreterGenerator.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
|
@ -1180,14 +1180,6 @@ void os::shutdown() {
|
||||
void os::abort(bool dump_core, void* siginfo, const void* context) {
|
||||
os::shutdown();
|
||||
if (dump_core) {
|
||||
#ifndef PRODUCT
|
||||
fdStream out(defaultStream::output_fd());
|
||||
out.print_raw("Current thread is ");
|
||||
char buf[16];
|
||||
jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
|
||||
out.print_raw_cr(buf);
|
||||
out.print_raw_cr("Dumping core ...");
|
||||
#endif
|
||||
::abort(); // dump core
|
||||
}
|
||||
|
||||
@ -3549,10 +3541,9 @@ jint os::init_2(void) {
|
||||
return JNI_ERR;
|
||||
}
|
||||
|
||||
if (UseNUMA) {
|
||||
UseNUMA = false;
|
||||
warning("NUMA optimizations are not available on this OS.");
|
||||
}
|
||||
// Not supported.
|
||||
FLAG_SET_ERGO(UseNUMA, false);
|
||||
FLAG_SET_ERGO(UseNUMAInterleaving, false);
|
||||
|
||||
if (MaxFDLimit) {
|
||||
// Set the number of file descriptors to max. print out error
|
||||
|
@ -1070,14 +1070,6 @@ void os::shutdown() {
|
||||
void os::abort(bool dump_core, void* siginfo, const void* context) {
|
||||
os::shutdown();
|
||||
if (dump_core) {
|
||||
#ifndef PRODUCT
|
||||
fdStream out(defaultStream::output_fd());
|
||||
out.print_raw("Current thread is ");
|
||||
char buf[16];
|
||||
jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
|
||||
out.print_raw_cr(buf);
|
||||
out.print_raw_cr("Dumping core ...");
|
||||
#endif
|
||||
::abort(); // dump core
|
||||
}
|
||||
|
||||
@ -3140,6 +3132,10 @@ jint os::init_2(void) {
|
||||
return JNI_ERR;
|
||||
}
|
||||
|
||||
// Not supported.
|
||||
FLAG_SET_ERGO(UseNUMA, false);
|
||||
FLAG_SET_ERGO(UseNUMAInterleaving, false);
|
||||
|
||||
if (MaxFDLimit) {
|
||||
// set the number of file descriptors to max. print out error
|
||||
// if getrlimit/setrlimit fails but continue regardless.
|
||||
|
@ -1520,14 +1520,6 @@ void os::abort(bool dump_core, void* siginfo, const void* context) {
|
||||
if (DumpPrivateMappingsInCore) {
|
||||
ClassLoader::close_jrt_image();
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
fdStream out(defaultStream::output_fd());
|
||||
out.print_raw("Current thread is ");
|
||||
char buf[16];
|
||||
jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
|
||||
out.print_raw_cr(buf);
|
||||
out.print_raw_cr("Dumping core ...");
|
||||
#endif
|
||||
::abort(); // dump core
|
||||
}
|
||||
|
||||
@ -2060,7 +2052,7 @@ static bool _print_ascii_file(const char* filename, outputStream* st, const char
|
||||
}
|
||||
|
||||
static void _print_ascii_file_h(const char* header, const char* filename, outputStream* st) {
|
||||
st->print("%s", header);
|
||||
st->print_cr("%s:", header);
|
||||
if (!_print_ascii_file(filename, st)) {
|
||||
st->print_cr("<Not Available>");
|
||||
}
|
||||
@ -2291,39 +2283,24 @@ void os::Linux::print_libversion_info(outputStream* st) {
|
||||
|
||||
void os::Linux::print_proc_sys_info(outputStream* st) {
|
||||
st->cr();
|
||||
st->print_cr("/proc/sys/kernel/threads-max (system-wide limit on the number of threads):");
|
||||
_print_ascii_file("/proc/sys/kernel/threads-max", st);
|
||||
st->cr();
|
||||
st->cr();
|
||||
|
||||
st->print_cr("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have):");
|
||||
_print_ascii_file("/proc/sys/vm/max_map_count", st);
|
||||
st->cr();
|
||||
st->cr();
|
||||
|
||||
st->print_cr("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers):");
|
||||
_print_ascii_file("/proc/sys/kernel/pid_max", st);
|
||||
st->cr();
|
||||
st->cr();
|
||||
_print_ascii_file_h("/proc/sys/kernel/threads-max (system-wide limit on the number of threads)",
|
||||
"/proc/sys/kernel/threads-max", st);
|
||||
_print_ascii_file_h("/proc/sys/vm/max_map_count (maximum number of memory map areas a process may have)",
|
||||
"/proc/sys/vm/max_map_count", st);
|
||||
_print_ascii_file_h("/proc/sys/kernel/pid_max (system-wide limit on number of process identifiers)",
|
||||
"/proc/sys/kernel/pid_max", st);
|
||||
}
|
||||
|
||||
void os::Linux::print_full_memory_info(outputStream* st) {
|
||||
st->print("\n/proc/meminfo:\n");
|
||||
_print_ascii_file("/proc/meminfo", st);
|
||||
_print_ascii_file_h("\n/proc/meminfo", "/proc/meminfo", st);
|
||||
st->cr();
|
||||
|
||||
// some information regarding THPs; for details see
|
||||
// https://www.kernel.org/doc/Documentation/vm/transhuge.txt
|
||||
st->print_cr("/sys/kernel/mm/transparent_hugepage/enabled:");
|
||||
if (!_print_ascii_file("/sys/kernel/mm/transparent_hugepage/enabled", st)) {
|
||||
st->print_cr(" <Not Available>");
|
||||
}
|
||||
st->cr();
|
||||
st->print_cr("/sys/kernel/mm/transparent_hugepage/defrag (defrag/compaction efforts parameter):");
|
||||
if (!_print_ascii_file("/sys/kernel/mm/transparent_hugepage/defrag", st)) {
|
||||
st->print_cr(" <Not Available>");
|
||||
}
|
||||
st->cr();
|
||||
_print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/enabled",
|
||||
"/sys/kernel/mm/transparent_hugepage/enabled", st);
|
||||
_print_ascii_file_h("/sys/kernel/mm/transparent_hugepage/defrag (defrag/compaction efforts parameter)",
|
||||
"/sys/kernel/mm/transparent_hugepage/defrag", st);
|
||||
}
|
||||
|
||||
void os::Linux::print_ld_preload_file(outputStream* st) {
|
||||
@ -2510,8 +2487,8 @@ static bool print_model_name_and_flags(outputStream* st, char* buf, size_t bufle
|
||||
|
||||
// additional information about CPU e.g. available frequency ranges
|
||||
static void print_sys_devices_cpu_info(outputStream* st, char* buf, size_t buflen) {
|
||||
_print_ascii_file_h("Online cpus:", "/sys/devices/system/cpu/online", st);
|
||||
_print_ascii_file_h("Offline cpus:", "/sys/devices/system/cpu/offline", st);
|
||||
_print_ascii_file_h("Online cpus", "/sys/devices/system/cpu/online", st);
|
||||
_print_ascii_file_h("Offline cpus", "/sys/devices/system/cpu/offline", st);
|
||||
|
||||
if (ExtensiveErrorReports) {
|
||||
// cache related info (cpu 0, should be similar for other CPUs)
|
||||
@ -2525,44 +2502,41 @@ static void print_sys_devices_cpu_info(outputStream* st, char* buf, size_t bufle
|
||||
snprintf(hbuf_size, 60, "/sys/devices/system/cpu/cpu0/cache/index%u/size", i);
|
||||
snprintf(hbuf_coherency_line_size, 80, "/sys/devices/system/cpu/cpu0/cache/index%u/coherency_line_size", i);
|
||||
if (file_exists(hbuf_level)) {
|
||||
_print_ascii_file_h("cache level:", hbuf_level, st);
|
||||
_print_ascii_file_h("cache type:", hbuf_type, st);
|
||||
_print_ascii_file_h("cache size:", hbuf_size, st);
|
||||
_print_ascii_file_h("cache coherency line size:", hbuf_coherency_line_size, st);
|
||||
_print_ascii_file_h("cache level", hbuf_level, st);
|
||||
_print_ascii_file_h("cache type", hbuf_type, st);
|
||||
_print_ascii_file_h("cache size", hbuf_size, st);
|
||||
_print_ascii_file_h("cache coherency line size", hbuf_coherency_line_size, st);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we miss the cpufreq entries on Power and s390x
|
||||
#if defined(IA32) || defined(AMD64)
|
||||
_print_ascii_file_h("BIOS frequency limitation:", "/sys/devices/system/cpu/cpu0/cpufreq/bios_limit", st);
|
||||
_print_ascii_file_h("Frequency switch latency (ns):", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_transition_latency", st);
|
||||
_print_ascii_file_h("Available cpu frequencies:", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", st);
|
||||
_print_ascii_file_h("BIOS frequency limitation", "/sys/devices/system/cpu/cpu0/cpufreq/bios_limit", st);
|
||||
_print_ascii_file_h("Frequency switch latency (ns)", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_transition_latency", st);
|
||||
_print_ascii_file_h("Available cpu frequencies", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", st);
|
||||
// min and max should be in the Available range but still print them (not all info might be available for all kernels)
|
||||
if (ExtensiveErrorReports) {
|
||||
_print_ascii_file_h("Maximum cpu frequency:", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", st);
|
||||
_print_ascii_file_h("Minimum cpu frequency:", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq", st);
|
||||
_print_ascii_file_h("Current cpu frequency:", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq", st);
|
||||
_print_ascii_file_h("Maximum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", st);
|
||||
_print_ascii_file_h("Minimum cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_min_freq", st);
|
||||
_print_ascii_file_h("Current cpu frequency", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq", st);
|
||||
}
|
||||
// governors are power schemes, see https://wiki.archlinux.org/index.php/CPU_frequency_scaling
|
||||
if (ExtensiveErrorReports) {
|
||||
_print_ascii_file_h("Available governors:", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors", st);
|
||||
_print_ascii_file_h("Available governors", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors", st);
|
||||
}
|
||||
_print_ascii_file_h("Current governor:", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", st);
|
||||
_print_ascii_file_h("Current governor", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", st);
|
||||
// Core performance boost, see https://www.kernel.org/doc/Documentation/cpu-freq/boost.txt
|
||||
// Raise operating frequency of some cores in a multi-core package if certain conditions apply, e.g.
|
||||
// whole chip is not fully utilized
|
||||
_print_ascii_file_h("Core performance/turbo boost:", "/sys/devices/system/cpu/cpufreq/boost", st);
|
||||
_print_ascii_file_h("Core performance/turbo boost", "/sys/devices/system/cpu/cpufreq/boost", st);
|
||||
#endif
|
||||
}
|
||||
|
||||
void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
|
||||
// Only print the model name if the platform provides this as a summary
|
||||
if (!print_model_name_and_flags(st, buf, buflen)) {
|
||||
st->print("\n/proc/cpuinfo:\n");
|
||||
if (!_print_ascii_file("/proc/cpuinfo", st)) {
|
||||
st->print_cr(" <Not Available>");
|
||||
}
|
||||
_print_ascii_file_h("\n/proc/cpuinfo", "/proc/cpuinfo", st);
|
||||
}
|
||||
print_sys_devices_cpu_info(st, buf, buflen);
|
||||
}
|
||||
@ -5175,7 +5149,8 @@ void os::Linux::numa_init() {
|
||||
// bitmask when externally configured to run on all or fewer nodes.
|
||||
|
||||
if (!Linux::libnuma_init()) {
|
||||
UseNUMA = false;
|
||||
FLAG_SET_ERGO(UseNUMA, false);
|
||||
FLAG_SET_ERGO(UseNUMAInterleaving, false); // Also depends on libnuma.
|
||||
} else {
|
||||
if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) {
|
||||
// If there's only one node (they start from 0) or if the process
|
||||
@ -5208,6 +5183,11 @@ void os::Linux::numa_init() {
|
||||
}
|
||||
}
|
||||
|
||||
// When NUMA requested, not-NUMA-aware allocations default to interleaving.
|
||||
if (UseNUMA && !UseNUMAInterleaving) {
|
||||
FLAG_SET_ERGO_IF_DEFAULT(UseNUMAInterleaving, true);
|
||||
}
|
||||
|
||||
if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
|
||||
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
|
||||
// we can make the adaptive lgrp chunk resizing work. If the user specified both
|
||||
@ -5272,7 +5252,7 @@ jint os::init_2(void) {
|
||||
log_info(os)("HotSpot is running with %s, %s",
|
||||
Linux::glibc_version(), Linux::libpthread_version());
|
||||
|
||||
if (UseNUMA) {
|
||||
if (UseNUMA || UseNUMAInterleaving) {
|
||||
Linux::numa_init();
|
||||
}
|
||||
|
||||
|
@ -1967,7 +1967,8 @@ void os::PlatformEvent::park() { // AKA "down()"
|
||||
while (_event < 0) {
|
||||
// OS-level "spurious wakeups" are ignored
|
||||
status = pthread_cond_wait(_cond, _mutex);
|
||||
assert_status(status == 0, status, "cond_wait");
|
||||
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
|
||||
status, "cond_wait");
|
||||
}
|
||||
--_nParked;
|
||||
|
||||
@ -2158,7 +2159,8 @@ void Parker::park(bool isAbsolute, jlong time) {
|
||||
if (time == 0) {
|
||||
_cur_index = REL_INDEX; // arbitrary choice when not timed
|
||||
status = pthread_cond_wait(&_cond[_cur_index], _mutex);
|
||||
assert_status(status == 0, status, "cond_timedwait");
|
||||
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
|
||||
status, "cond_wait");
|
||||
}
|
||||
else {
|
||||
_cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
|
||||
@ -2339,7 +2341,8 @@ int os::PlatformMonitor::wait(jlong millis) {
|
||||
return ret;
|
||||
} else {
|
||||
int status = pthread_cond_wait(cond(), mutex());
|
||||
assert_status(status == 0, status, "cond_wait");
|
||||
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
|
||||
status, "cond_wait");
|
||||
return OS_OK;
|
||||
}
|
||||
}
|
||||
|
@ -1147,14 +1147,6 @@ void os::shutdown() {
|
||||
void os::abort(bool dump_core, void* siginfo, const void* context) {
|
||||
os::shutdown();
|
||||
if (dump_core) {
|
||||
#ifndef PRODUCT
|
||||
fdStream out(defaultStream::output_fd());
|
||||
out.print_raw("Current thread is ");
|
||||
char buf[16];
|
||||
jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
|
||||
out.print_raw_cr(buf);
|
||||
out.print_raw_cr("Dumping core ...");
|
||||
#endif
|
||||
::abort(); // dump core (for debugging)
|
||||
}
|
||||
|
||||
@ -3916,7 +3908,7 @@ jint os::init_2(void) {
|
||||
|
||||
if (UseNUMA) {
|
||||
if (!Solaris::liblgrp_init()) {
|
||||
UseNUMA = false;
|
||||
FLAG_SET_ERGO(UseNUMA, false);
|
||||
} else {
|
||||
size_t lgrp_limit = os::numa_get_groups_num();
|
||||
int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
|
||||
@ -3930,6 +3922,11 @@ jint os::init_2(void) {
|
||||
}
|
||||
}
|
||||
|
||||
// When NUMA requested, not-NUMA-aware allocations default to interleaving.
|
||||
if (UseNUMA && !UseNUMAInterleaving) {
|
||||
FLAG_SET_ERGO_IF_DEFAULT(UseNUMAInterleaving, true);
|
||||
}
|
||||
|
||||
Solaris::signal_sets_init();
|
||||
Solaris::init_signal_mem();
|
||||
Solaris::install_signal_handlers();
|
||||
|
@ -4096,10 +4096,13 @@ jint os::init_2(void) {
|
||||
UseNUMA = false; // We don't fully support this yet
|
||||
}
|
||||
|
||||
if (UseNUMAInterleaving) {
|
||||
// first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
|
||||
bool success = numa_interleaving_init();
|
||||
if (!success) UseNUMAInterleaving = false;
|
||||
if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
|
||||
if (!numa_interleaving_init()) {
|
||||
FLAG_SET_ERGO(UseNUMAInterleaving, false);
|
||||
} else if (!UseNUMAInterleaving) {
|
||||
// When NUMA requested, not-NUMA-aware allocations default to interleaving.
|
||||
FLAG_SET_ERGO(UseNUMAInterleaving, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (initSock() != JNI_OK) {
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "c1/c1_LIRAssembler.hpp"
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/codeBlob.hpp"
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "jimage.hpp"
|
||||
#include "classfile/classListParser.hpp"
|
||||
#include "classfile/classLoaderExt.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
|
@ -95,6 +95,7 @@ static FindEntry_t FindEntry = NULL;
|
||||
static ReadEntry_t ReadEntry = NULL;
|
||||
static GetNextEntry_t GetNextEntry = NULL;
|
||||
static Crc32_t Crc32 = NULL;
|
||||
int ClassLoader::_libzip_loaded = 0;
|
||||
|
||||
// Entry points for jimage.dll for loading jimage file entries
|
||||
|
||||
@ -747,6 +748,7 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str
|
||||
// enable call to C land
|
||||
ThreadToNativeFromVM ttn(thread);
|
||||
HandleMark hm(thread);
|
||||
load_zip_library_if_needed();
|
||||
zip = (*ZipOpen)(canonical_path, &error_msg);
|
||||
}
|
||||
if (zip != NULL && error_msg == NULL) {
|
||||
@ -796,6 +798,7 @@ ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path, bo
|
||||
JavaThread* thread = JavaThread::current();
|
||||
ThreadToNativeFromVM ttn(thread);
|
||||
HandleMark hm(thread);
|
||||
load_zip_library_if_needed();
|
||||
zip = (*ZipOpen)(canonical_path, &error_msg);
|
||||
}
|
||||
if (zip != NULL && error_msg == NULL) {
|
||||
@ -967,6 +970,14 @@ void ClassLoader::load_java_library() {
|
||||
CanonicalizeEntry = CAST_TO_FN_PTR(canonicalize_fn_t, dll_lookup(javalib_handle, "JDK_Canonicalize", NULL));
|
||||
}
|
||||
|
||||
void ClassLoader::release_load_zip_library() {
|
||||
MutexLocker locker(Zip_lock, Monitor::_no_safepoint_check_flag);
|
||||
if (_libzip_loaded == 0) {
|
||||
load_zip_library();
|
||||
Atomic::release_store(&_libzip_loaded, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoader::load_zip_library() {
|
||||
assert(ZipOpen == NULL, "should not load zip library twice");
|
||||
char path[JVM_MAXPATHLEN];
|
||||
@ -1008,6 +1019,7 @@ void ClassLoader::load_jimage_library() {
|
||||
}
|
||||
|
||||
int ClassLoader::crc32(int crc, const char* buf, int len) {
|
||||
load_zip_library_if_needed();
|
||||
return (*Crc32)(crc, (const jbyte*)buf, len);
|
||||
}
|
||||
|
||||
@ -1466,8 +1478,6 @@ void ClassLoader::initialize() {
|
||||
|
||||
// lookup java library entry points
|
||||
load_java_library();
|
||||
// lookup zip library entry points
|
||||
load_zip_library();
|
||||
// jimage library entry points are loaded below, in lookup_vm_options
|
||||
setup_bootstrap_search_path();
|
||||
}
|
||||
|
@ -252,6 +252,11 @@ class ClassLoader: AllStatic {
|
||||
static void load_zip_library();
|
||||
static void load_jimage_library();
|
||||
|
||||
private:
|
||||
static int _libzip_loaded; // used to sync loading zip.
|
||||
static void release_load_zip_library();
|
||||
static inline void load_zip_library_if_needed();
|
||||
|
||||
public:
|
||||
static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
|
||||
bool throw_exception,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -56,6 +56,12 @@ inline ClassPathEntry* ClassLoader::classpath_entry(int n) {
|
||||
}
|
||||
}
|
||||
|
||||
inline void ClassLoader::load_zip_library_if_needed() {
|
||||
if (Atomic::load_acquire(&_libzip_loaded) == 0) {
|
||||
release_load_zip_library();
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
|
||||
// Helper function used by CDS code to get the number of boot classpath
|
||||
|
@ -1495,14 +1495,6 @@ oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, Basic
|
||||
}
|
||||
|
||||
|
||||
Klass* java_lang_Class::as_Klass(oop java_class) {
|
||||
//%note memory_2
|
||||
assert(java_lang_Class::is_instance(java_class), "must be a Class object");
|
||||
Klass* k = ((Klass*)java_class->metadata_field(_klass_offset));
|
||||
assert(k == NULL || k->is_klass(), "type check");
|
||||
return k;
|
||||
}
|
||||
|
||||
Klass* java_lang_Class::as_Klass_raw(oop java_class) {
|
||||
//%note memory_2
|
||||
assert(java_lang_Class::is_instance(java_class), "must be a Class object");
|
||||
|
@ -211,6 +211,14 @@ inline bool java_lang_Class::is_instance(oop obj) {
|
||||
return obj != NULL && obj->klass() == SystemDictionary::Class_klass();
|
||||
}
|
||||
|
||||
inline Klass* java_lang_Class::as_Klass(oop java_class) {
|
||||
//%note memory_2
|
||||
assert(java_lang_Class::is_instance(java_class), "must be a Class object");
|
||||
Klass* k = ((Klass*)java_class->metadata_field(_klass_offset));
|
||||
assert(k == NULL || k->is_klass(), "type check");
|
||||
return k;
|
||||
}
|
||||
|
||||
inline bool java_lang_Class::is_primitive(oop java_class) {
|
||||
// should assert:
|
||||
//assert(java_lang_Class::is_instance(java_class), "must be a Class object");
|
||||
|
@ -176,6 +176,11 @@ void SymbolTable::create_table () {
|
||||
}
|
||||
|
||||
void SymbolTable::delete_symbol(Symbol* sym) {
|
||||
if (Arguments::is_dumping_archive()) {
|
||||
// Do not delete symbols as we may be in the middle of preparing the
|
||||
// symbols for dumping.
|
||||
return;
|
||||
}
|
||||
if (sym->is_permanent()) {
|
||||
MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
|
||||
// Deleting permanent symbol should not occur very often (insert race condition),
|
||||
@ -221,12 +226,18 @@ Symbol* SymbolTable::allocate_symbol(const char* name, int len, bool c_heap) {
|
||||
|
||||
Symbol* sym;
|
||||
if (Arguments::is_dumping_archive()) {
|
||||
// Need to make all symbols permanent -- or else some symbols may be GC'ed
|
||||
// during the archive dumping code that's executed outside of a safepoint.
|
||||
c_heap = false;
|
||||
}
|
||||
if (c_heap) {
|
||||
// refcount starts as 1
|
||||
sym = new (len) Symbol((const u1*)name, len, 1);
|
||||
assert(sym != NULL, "new should call vm_exit_out_of_memory if C_HEAP is exhausted");
|
||||
} else if (DumpSharedSpaces) {
|
||||
// See comments inside Symbol::operator new(size_t, int)
|
||||
sym = new (len) Symbol((const u1*)name, len, PERM_REFCOUNT);
|
||||
assert(sym != NULL, "new should call vm_exit_out_of_memory if failed to allocate symbol during DumpSharedSpaces");
|
||||
} else {
|
||||
// Allocate to global arena
|
||||
MutexLocker ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
|
||||
|
@ -2336,12 +2336,14 @@ bool SystemDictionary::add_loader_constraint(Symbol* class_name,
|
||||
InstanceKlass* klass2 = find_class(d_hash2, constraint_name, dictionary2);
|
||||
bool result = constraints()->add_entry(constraint_name, klass1, class_loader1,
|
||||
klass2, class_loader2);
|
||||
#if INCLUDE_CDS
|
||||
if (Arguments::is_dumping_archive() && klass_being_linked != NULL &&
|
||||
!klass_being_linked->is_shared()) {
|
||||
SystemDictionaryShared::record_linking_constraint(constraint_name,
|
||||
InstanceKlass::cast(klass_being_linked),
|
||||
class_loader1, class_loader2, THREAD);
|
||||
}
|
||||
#endif // INCLUDE_CDS
|
||||
if (Signature::is_array(class_name)) {
|
||||
constraint_name->decrement_refcount();
|
||||
}
|
||||
|
@ -174,10 +174,22 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
inline unsigned DumpTimeSharedClassTable_hash(InstanceKlass* const& k) {
|
||||
if (DumpSharedSpaces) {
|
||||
// Deterministic archive contents
|
||||
uintx delta = k->name() - MetaspaceShared::symbol_rs_base();
|
||||
return primitive_hash<uintx>(delta);
|
||||
} else {
|
||||
// Deterministic archive is not possible because classes can be loaded
|
||||
// in multiple threads.
|
||||
return primitive_hash<InstanceKlass*>(k);
|
||||
}
|
||||
}
|
||||
|
||||
class DumpTimeSharedClassTable: public ResourceHashtable<
|
||||
InstanceKlass*,
|
||||
DumpTimeSharedClassInfo,
|
||||
primitive_hash<InstanceKlass*>,
|
||||
&DumpTimeSharedClassTable_hash,
|
||||
primitive_equals<InstanceKlass*>,
|
||||
15889, // prime number
|
||||
ResourceObj::C_HEAP>
|
||||
|
@ -33,9 +33,6 @@
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
||||
CompileTask* CompileTask::_task_free_list = NULL;
|
||||
#ifdef ASSERT
|
||||
int CompileTask::_num_allocated_tasks = 0;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Allocate a CompileTask, from the free list if possible.
|
||||
@ -50,8 +47,6 @@ CompileTask* CompileTask::allocate() {
|
||||
task->set_next(NULL);
|
||||
} else {
|
||||
task = new CompileTask();
|
||||
DEBUG_ONLY(_num_allocated_tasks++;)
|
||||
assert (WhiteBoxAPI || JVMCI_ONLY(UseJVMCICompiler ||) _num_allocated_tasks < 10000, "Leaking compilation tasks?");
|
||||
task->set_next(NULL);
|
||||
task->set_is_free(true);
|
||||
}
|
||||
|
@ -75,10 +75,6 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
|
||||
private:
|
||||
static CompileTask* _task_free_list;
|
||||
#ifdef ASSERT
|
||||
static int _num_allocated_tasks;
|
||||
#endif
|
||||
|
||||
Monitor* _lock;
|
||||
uint _compile_id;
|
||||
Method* _method;
|
||||
|
@ -46,7 +46,7 @@ G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
|
||||
_listener(NULL),
|
||||
_storage(rs, used_size, page_size),
|
||||
_region_granularity(region_granularity),
|
||||
_commit_map(rs.size() * commit_factor / region_granularity, mtGC),
|
||||
_region_commit_map(rs.size() * commit_factor / region_granularity, mtGC),
|
||||
_memory_type(type) {
|
||||
guarantee(is_power_of_2(page_size), "must be");
|
||||
guarantee(is_power_of_2(region_granularity), "must be");
|
||||
@ -88,13 +88,13 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
|
||||
if (AlwaysPreTouch) {
|
||||
_storage.pretouch(start_page, size_in_pages, pretouch_gang);
|
||||
}
|
||||
_commit_map.set_range(start_idx, start_idx + num_regions);
|
||||
_region_commit_map.set_range(start_idx, start_idx + num_regions);
|
||||
fire_on_commit(start_idx, num_regions, zero_filled);
|
||||
}
|
||||
|
||||
virtual void uncommit_regions(uint start_idx, size_t num_regions) {
|
||||
_storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
|
||||
_commit_map.clear_range(start_idx, start_idx + num_regions);
|
||||
_region_commit_map.clear_range(start_idx, start_idx + num_regions);
|
||||
}
|
||||
};
|
||||
|
||||
@ -102,18 +102,26 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
|
||||
// than the commit granularity.
|
||||
// Basically, the contents of one OS page span several regions.
|
||||
class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
|
||||
private:
|
||||
class CommitRefcountArray : public G1BiasedMappedArray<uint> {
|
||||
protected:
|
||||
virtual uint default_value() const { return 0; }
|
||||
};
|
||||
|
||||
size_t _regions_per_page;
|
||||
|
||||
CommitRefcountArray _refcounts;
|
||||
size_t region_idx_to_page_idx(uint region_idx) const {
|
||||
return region_idx / _regions_per_page;
|
||||
}
|
||||
|
||||
uintptr_t region_idx_to_page_idx(uint region) const {
|
||||
return region / _regions_per_page;
|
||||
bool is_page_committed(size_t page_idx) {
|
||||
size_t region = page_idx * _regions_per_page;
|
||||
size_t region_limit = region + _regions_per_page;
|
||||
// Committed if there is a bit set in the range.
|
||||
return _region_commit_map.get_next_one_offset(region, region_limit) != region_limit;
|
||||
}
|
||||
|
||||
void numa_request_on_node(size_t page_idx) {
|
||||
if (_memory_type == mtJavaHeap) {
|
||||
uint region = (uint)(page_idx * _regions_per_page);
|
||||
void* address = _storage.page_start(page_idx);
|
||||
size_t size_in_bytes = _storage.page_size();
|
||||
G1NUMA::numa()->request_memory_on_node(address, size_in_bytes, region);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
@ -124,63 +132,76 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
|
||||
size_t commit_factor,
|
||||
MemoryType type) :
|
||||
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
|
||||
_regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
|
||||
_regions_per_page((page_size * commit_factor) / alloc_granularity) {
|
||||
|
||||
guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
|
||||
_refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_up(rs.size(), page_size)), page_size);
|
||||
}
|
||||
|
||||
virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
|
||||
uint region_limit = (uint)(start_idx + num_regions);
|
||||
assert(num_regions > 0, "Must commit at least one region");
|
||||
assert(_region_commit_map.get_next_one_offset(start_idx, region_limit) == region_limit,
|
||||
"Should be no committed regions in the range [%u, %u)", start_idx, region_limit);
|
||||
|
||||
size_t const NoPage = ~(size_t)0;
|
||||
|
||||
size_t first_committed = NoPage;
|
||||
size_t num_committed = 0;
|
||||
|
||||
size_t start_page = region_idx_to_page_idx(start_idx);
|
||||
size_t end_page = region_idx_to_page_idx(region_limit - 1);
|
||||
|
||||
bool all_zero_filled = true;
|
||||
G1NUMA* numa = G1NUMA::numa();
|
||||
|
||||
for (uint region_idx = start_idx; region_idx < start_idx + num_regions; region_idx++) {
|
||||
assert(!_commit_map.at(region_idx), "Trying to commit storage at region %u that is already committed", region_idx);
|
||||
size_t page_idx = region_idx_to_page_idx(region_idx);
|
||||
uint old_refcount = _refcounts.get_by_index(page_idx);
|
||||
|
||||
bool zero_filled = false;
|
||||
if (old_refcount == 0) {
|
||||
if (first_committed == NoPage) {
|
||||
first_committed = page_idx;
|
||||
num_committed = 1;
|
||||
} else {
|
||||
num_committed++;
|
||||
for (size_t page = start_page; page <= end_page; page++) {
|
||||
if (!is_page_committed(page)) {
|
||||
// Page not committed.
|
||||
if (num_committed == 0) {
|
||||
first_committed = page;
|
||||
}
|
||||
zero_filled = _storage.commit(page_idx, 1);
|
||||
if (_memory_type == mtJavaHeap) {
|
||||
void* address = _storage.page_start(page_idx);
|
||||
size_t size_in_bytes = _storage.page_size();
|
||||
numa->request_memory_on_node(address, size_in_bytes, region_idx);
|
||||
num_committed++;
|
||||
|
||||
if (!_storage.commit(page, 1)) {
|
||||
// Found dirty region during commit.
|
||||
all_zero_filled = false;
|
||||
}
|
||||
|
||||
// Move memory to correct NUMA node for the heap.
|
||||
numa_request_on_node(page);
|
||||
} else {
|
||||
// Page already committed.
|
||||
all_zero_filled = false;
|
||||
}
|
||||
all_zero_filled &= zero_filled;
|
||||
|
||||
_refcounts.set_by_index(page_idx, old_refcount + 1);
|
||||
_commit_map.set_bit(region_idx);
|
||||
}
|
||||
|
||||
// Update the commit map for the given range.
|
||||
_region_commit_map.set_range(start_idx, region_limit);
|
||||
|
||||
if (AlwaysPreTouch && num_committed > 0) {
|
||||
_storage.pretouch(first_committed, num_committed, pretouch_gang);
|
||||
}
|
||||
|
||||
fire_on_commit(start_idx, num_regions, all_zero_filled);
|
||||
}
|
||||
|
||||
virtual void uncommit_regions(uint start_idx, size_t num_regions) {
|
||||
for (uint i = start_idx; i < start_idx + num_regions; i++) {
|
||||
assert(_commit_map.at(i), "Trying to uncommit storage at region %u that is not committed", i);
|
||||
size_t idx = region_idx_to_page_idx(i);
|
||||
uint old_refcount = _refcounts.get_by_index(idx);
|
||||
assert(old_refcount > 0, "must be");
|
||||
if (old_refcount == 1) {
|
||||
_storage.uncommit(idx, 1);
|
||||
uint region_limit = (uint)(start_idx + num_regions);
|
||||
assert(num_regions > 0, "Must uncommit at least one region");
|
||||
assert(_region_commit_map.get_next_zero_offset(start_idx, region_limit) == region_limit,
|
||||
"Should only be committed regions in the range [%u, %u)", start_idx, region_limit);
|
||||
|
||||
size_t start_page = region_idx_to_page_idx(start_idx);
|
||||
size_t end_page = region_idx_to_page_idx(region_limit - 1);
|
||||
|
||||
// Clear commit map for the given range.
|
||||
_region_commit_map.clear_range(start_idx, region_limit);
|
||||
|
||||
for (size_t page = start_page; page <= end_page; page++) {
|
||||
// We know all pages were committed before clearing the map. If the
|
||||
// the page is still marked as committed after the clear we should
|
||||
// not uncommit it.
|
||||
if (!is_page_committed(page)) {
|
||||
_storage.uncommit(page, 1);
|
||||
}
|
||||
_refcounts.set_by_index(idx, old_refcount - 1);
|
||||
_commit_map.clear_bit(i);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -51,7 +51,7 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
|
||||
|
||||
size_t _region_granularity;
|
||||
// Mapping management
|
||||
CHeapBitMap _commit_map;
|
||||
CHeapBitMap _region_commit_map;
|
||||
|
||||
MemoryType _memory_type;
|
||||
|
||||
@ -68,10 +68,6 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
|
||||
|
||||
virtual ~G1RegionToSpaceMapper() {}
|
||||
|
||||
bool is_committed(uintptr_t idx) const {
|
||||
return _commit_map.at(idx);
|
||||
}
|
||||
|
||||
void commit_and_set_special();
|
||||
virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL) = 0;
|
||||
virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
|
||||
|
@ -39,8 +39,10 @@ TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
|
||||
}
|
||||
|
||||
TaskTerminator::~TaskTerminator() {
|
||||
assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition");
|
||||
assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" );
|
||||
if (_offered_termination != 0) {
|
||||
assert(_offered_termination == _n_threads, "Must be terminated or aborted");
|
||||
assert_queue_set_empty();
|
||||
}
|
||||
|
||||
assert(_spin_master == NULL, "Should have been reset");
|
||||
assert(_blocker != NULL, "Can not be NULL");
|
||||
@ -48,8 +50,8 @@ TaskTerminator::~TaskTerminator() {
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
bool TaskTerminator::peek_in_queue_set() {
|
||||
return _queue_set->peek();
|
||||
void TaskTerminator::assert_queue_set_empty() const {
|
||||
_queue_set->assert_empty();
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -87,7 +89,7 @@ bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
// Single worker, done
|
||||
if (_n_threads == 1) {
|
||||
_offered_termination = 1;
|
||||
assert(!peek_in_queue_set(), "Precondition");
|
||||
assert_queue_set_empty();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -97,7 +99,7 @@ bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
if (_offered_termination == _n_threads) {
|
||||
_blocker->notify_all();
|
||||
_blocker->unlock();
|
||||
assert(!peek_in_queue_set(), "Precondition");
|
||||
assert_queue_set_empty();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -110,7 +112,7 @@ bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
|
||||
if (do_spin_master_work(terminator)) {
|
||||
assert(_offered_termination == _n_threads, "termination condition");
|
||||
assert(!peek_in_queue_set(), "Precondition");
|
||||
assert_queue_set_empty();
|
||||
return true;
|
||||
} else {
|
||||
_blocker->lock_without_safepoint_check();
|
||||
@ -118,7 +120,7 @@ bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
// before returning from do_spin_master_work() and acquiring lock above.
|
||||
if (_offered_termination == _n_threads) {
|
||||
_blocker->unlock();
|
||||
assert(!peek_in_queue_set(), "Precondition");
|
||||
assert_queue_set_empty();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -127,7 +129,7 @@ bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
|
||||
if (_offered_termination == _n_threads) {
|
||||
_blocker->unlock();
|
||||
assert(!peek_in_queue_set(), "Precondition");
|
||||
assert_queue_set_empty();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -57,9 +57,8 @@ class TaskTerminator : public CHeapObj<mtGC> {
|
||||
volatile uint _offered_termination;
|
||||
DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile uint));
|
||||
|
||||
#ifdef ASSERT
|
||||
bool peek_in_queue_set();
|
||||
#endif
|
||||
void assert_queue_set_empty() const NOT_DEBUG_RETURN;
|
||||
|
||||
void yield();
|
||||
|
||||
Monitor* _blocker;
|
||||
|
@ -202,12 +202,24 @@ protected:
|
||||
// threads attempting to perform the pop_global will all perform the same
|
||||
// CAS, and only one can succeed.) Any stealing thread that reads after
|
||||
// either the increment or decrement will see an empty queue, and will not
|
||||
// join the competitors. The "sz == -1 || sz == N-1" state will not be
|
||||
// modified by concurrent queues, so the owner thread can reset the state to
|
||||
// _bottom == top so subsequent pushes will be performed normally.
|
||||
// join the competitors. The "sz == -1" / "sz == N-1" state will not be
|
||||
// modified by concurrent threads, so the owner thread can reset the state
|
||||
// to _bottom == top so subsequent pushes will be performed normally.
|
||||
return (sz == N - 1) ? 0 : sz;
|
||||
}
|
||||
|
||||
// Assert that we're not in the underflow state where bottom has
|
||||
// been decremented past top, so that _bottom+1 mod N == top. See
|
||||
// the discussion in clean_size.
|
||||
|
||||
void assert_not_underflow(uint bot, uint top) const {
|
||||
assert_not_underflow(dirty_size(bot, top));
|
||||
}
|
||||
|
||||
void assert_not_underflow(uint dirty_size) const {
|
||||
assert(dirty_size != N - 1, "invariant");
|
||||
}
|
||||
|
||||
private:
|
||||
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
|
||||
|
||||
@ -228,10 +240,10 @@ private:
|
||||
public:
|
||||
TaskQueueSuper() : _bottom(0), _age() {}
|
||||
|
||||
// Return true if the TaskQueue contains any tasks.
|
||||
// Assert the queue is empty.
|
||||
// Unreliable if there are concurrent pushes or pops.
|
||||
bool peek() const {
|
||||
return bottom_relaxed() != age_top_relaxed();
|
||||
void assert_empty() const {
|
||||
assert(bottom_relaxed() == age_top_relaxed(), "not empty");
|
||||
}
|
||||
|
||||
bool is_empty() const {
|
||||
@ -313,6 +325,7 @@ protected:
|
||||
using TaskQueueSuper<N, F>::decrement_index;
|
||||
using TaskQueueSuper<N, F>::dirty_size;
|
||||
using TaskQueueSuper<N, F>::clean_size;
|
||||
using TaskQueueSuper<N, F>::assert_not_underflow;
|
||||
|
||||
public:
|
||||
using TaskQueueSuper<N, F>::max_elems;
|
||||
@ -426,8 +439,10 @@ private:
|
||||
|
||||
class TaskQueueSetSuper {
|
||||
public:
|
||||
// Returns "true" if some TaskQueue in the set contains a task.
|
||||
virtual bool peek() = 0;
|
||||
// Assert all queues in the set are empty.
|
||||
NOT_DEBUG(void assert_empty() const {})
|
||||
DEBUG_ONLY(virtual void assert_empty() const = 0;)
|
||||
|
||||
// Tasks in queue
|
||||
virtual uint tasks() const = 0;
|
||||
};
|
||||
@ -458,8 +473,9 @@ public:
|
||||
// Returns if stealing succeeds, and sets "t" to the stolen task.
|
||||
bool steal(uint queue_num, E& t);
|
||||
|
||||
bool peek();
|
||||
uint tasks() const;
|
||||
DEBUG_ONLY(virtual void assert_empty() const;)
|
||||
|
||||
virtual uint tasks() const;
|
||||
|
||||
uint size() const { return _n; }
|
||||
};
|
||||
@ -475,15 +491,14 @@ GenericTaskQueueSet<T, F>::queue(uint i) {
|
||||
return _queues[i];
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
template<class T, MEMFLAGS F>
|
||||
bool GenericTaskQueueSet<T, F>::peek() {
|
||||
// Try all the queues.
|
||||
void GenericTaskQueueSet<T, F>::assert_empty() const {
|
||||
for (uint j = 0; j < _n; j++) {
|
||||
if (_queues[j]->peek())
|
||||
return true;
|
||||
_queues[j]->assert_empty();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
template<class T, MEMFLAGS F>
|
||||
uint GenericTaskQueueSet<T, F>::tasks() const {
|
||||
|
@ -123,7 +123,7 @@ bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
|
||||
Age tempAge = cmpxchg_age(oldAge, newAge);
|
||||
if (tempAge == oldAge) {
|
||||
// We win.
|
||||
assert(dirty_size(localBot, age_top_relaxed()) != N - 1, "sanity");
|
||||
assert_not_underflow(localBot, age_top_relaxed());
|
||||
TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
|
||||
return true;
|
||||
}
|
||||
@ -132,7 +132,7 @@ bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
|
||||
// and top is greater than bottom. Fix this representation of the empty queue
|
||||
// to become the canonical one.
|
||||
set_age_relaxed(newAge);
|
||||
assert(dirty_size(localBot, age_top_relaxed()) != N - 1, "sanity");
|
||||
assert_not_underflow(localBot, age_top_relaxed());
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -144,7 +144,7 @@ GenericTaskQueue<E, F, N>::pop_local(E& t, uint threshold) {
|
||||
// resets the size to 0 before the next call (which is sequential,
|
||||
// since this is pop_local.)
|
||||
uint dirty_n_elems = dirty_size(localBot, age_top_relaxed());
|
||||
assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
|
||||
assert_not_underflow(dirty_n_elems);
|
||||
if (dirty_n_elems <= threshold) return false;
|
||||
localBot = decrement_index(localBot);
|
||||
set_bottom_relaxed(localBot);
|
||||
@ -158,7 +158,7 @@ GenericTaskQueue<E, F, N>::pop_local(E& t, uint threshold) {
|
||||
// a "pop_global" operation, and we're done.
|
||||
idx_t tp = age_top_relaxed();
|
||||
if (clean_size(localBot, tp) > 0) {
|
||||
assert(dirty_size(localBot, tp) != N - 1, "sanity");
|
||||
assert_not_underflow(localBot, tp);
|
||||
TASKQUEUE_STATS_ONLY(stats.record_pop());
|
||||
return true;
|
||||
} else {
|
||||
@ -241,7 +241,7 @@ bool GenericTaskQueue<E, F, N>::pop_global(E& t) {
|
||||
|
||||
// Note that using "bottom" here might fail, since a pop_local might
|
||||
// have decremented it.
|
||||
assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity");
|
||||
assert_not_underflow(localBot, newAge.top());
|
||||
return resAge == oldAge;
|
||||
}
|
||||
|
||||
|
@ -481,16 +481,16 @@ const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() {
|
||||
return TypeFunc::make(domain, range);
|
||||
}
|
||||
|
||||
const TypeFunc* ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type() {
|
||||
const TypeFunc* ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(const Type* value_type) {
|
||||
const Type **fields = TypeTuple::fields(2);
|
||||
fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
|
||||
fields[TypeFunc::Parms+0] = value_type; // original field value
|
||||
fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // original load address
|
||||
|
||||
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
|
||||
|
||||
// create result type (range)
|
||||
fields = TypeTuple::fields(1);
|
||||
fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL;
|
||||
fields[TypeFunc::Parms+0] = value_type;
|
||||
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
|
||||
|
||||
return TypeFunc::make(domain, range);
|
||||
@ -1059,37 +1059,10 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
|
||||
}
|
||||
}
|
||||
}
|
||||
if (n->Opcode() == Op_CmpP) {
|
||||
Node* in1 = n->in(1);
|
||||
Node* in2 = n->in(2);
|
||||
if (in1->bottom_type() == TypePtr::NULL_PTR) {
|
||||
in2 = step_over_gc_barrier(in2);
|
||||
}
|
||||
if (in2->bottom_type() == TypePtr::NULL_PTR) {
|
||||
in1 = step_over_gc_barrier(in1);
|
||||
}
|
||||
PhaseIterGVN* igvn = phase->is_IterGVN();
|
||||
if (in1 != n->in(1)) {
|
||||
if (igvn != NULL) {
|
||||
n->set_req_X(1, in1, igvn);
|
||||
} else {
|
||||
n->set_req(1, in1);
|
||||
}
|
||||
assert(in2 == n->in(2), "only one change");
|
||||
return n;
|
||||
}
|
||||
if (in2 != n->in(2)) {
|
||||
if (igvn != NULL) {
|
||||
n->set_req_X(2, in2, igvn);
|
||||
} else {
|
||||
n->set_req(2, in2);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
} else if (can_reshape &&
|
||||
n->Opcode() == Op_If &&
|
||||
ShenandoahBarrierC2Support::is_heap_stable_test(n) &&
|
||||
n->in(0) != NULL) {
|
||||
if (can_reshape &&
|
||||
n->Opcode() == Op_If &&
|
||||
ShenandoahBarrierC2Support::is_heap_stable_test(n) &&
|
||||
n->in(0) != NULL) {
|
||||
Node* dom = n->in(0);
|
||||
Node* prev_dom = n;
|
||||
int op = n->Opcode();
|
||||
|
@ -103,7 +103,7 @@ public:
|
||||
|
||||
static const TypeFunc* write_ref_field_pre_entry_Type();
|
||||
static const TypeFunc* shenandoah_clone_barrier_Type();
|
||||
static const TypeFunc* shenandoah_load_reference_barrier_Type();
|
||||
static const TypeFunc* shenandoah_load_reference_barrier_Type(const Type* value_type);
|
||||
virtual bool has_load_barrier_nodes() const { return true; }
|
||||
|
||||
// This is the entry-point for the backend to perform accesses through the Access API.
|
||||
|
@ -68,7 +68,7 @@ bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
|
||||
bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
|
||||
if (!UseShenandoahGC) {
|
||||
return false;
|
||||
}
|
||||
@ -102,7 +102,7 @@ bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
|
||||
return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
|
||||
return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
|
||||
@ -860,152 +860,96 @@ static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNod
|
||||
inner->clear_strip_mined();
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::test_heap_state(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
|
||||
PhaseIdealLoop* phase, int flags) {
|
||||
void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
|
||||
PhaseIdealLoop* phase, int flags) {
|
||||
PhaseIterGVN& igvn = phase->igvn();
|
||||
Node* old_ctrl = ctrl;
|
||||
|
||||
Node* thread = new ThreadLocalNode();
|
||||
Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
Node* gc_state_addr = new AddPNode(phase->C->top(), thread, gc_state_offset);
|
||||
Node* gc_state = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
|
||||
DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
|
||||
TypeInt::BYTE, MemNode::unordered);
|
||||
Node* gc_state_and = new AndINode(gc_state, igvn.intcon(flags));
|
||||
Node* gc_state_cmp = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
|
||||
Node* gc_state_bool = new BoolNode(gc_state_cmp, BoolTest::ne);
|
||||
|
||||
IfNode* gc_state_iff = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
|
||||
ctrl = new IfTrueNode(gc_state_iff);
|
||||
test_fail_ctrl = new IfFalseNode(gc_state_iff);
|
||||
|
||||
IdealLoopTree* loop = phase->get_loop(ctrl);
|
||||
Node* thread = new ThreadLocalNode();
|
||||
phase->register_new_node(thread, ctrl);
|
||||
Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
phase->set_ctrl(offset, phase->C->root());
|
||||
Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset);
|
||||
phase->register_new_node(gc_state_addr, ctrl);
|
||||
uint gc_state_idx = Compile::AliasIdxRaw;
|
||||
const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
|
||||
debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
|
||||
phase->register_control(gc_state_iff, loop, old_ctrl);
|
||||
phase->register_control(ctrl, loop, gc_state_iff);
|
||||
phase->register_control(test_fail_ctrl, loop, gc_state_iff);
|
||||
|
||||
Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered);
|
||||
phase->register_new_node(gc_state, ctrl);
|
||||
Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(flags));
|
||||
phase->register_new_node(heap_stable_and, ctrl);
|
||||
Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT));
|
||||
phase->register_new_node(heap_stable_cmp, ctrl);
|
||||
Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne);
|
||||
phase->register_new_node(heap_stable_test, ctrl);
|
||||
IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
|
||||
phase->register_control(heap_stable_iff, loop, ctrl);
|
||||
phase->register_new_node(thread, old_ctrl);
|
||||
phase->register_new_node(gc_state_addr, old_ctrl);
|
||||
phase->register_new_node(gc_state, old_ctrl);
|
||||
phase->register_new_node(gc_state_and, old_ctrl);
|
||||
phase->register_new_node(gc_state_cmp, old_ctrl);
|
||||
phase->register_new_node(gc_state_bool, old_ctrl);
|
||||
|
||||
heap_stable_ctrl = new IfFalseNode(heap_stable_iff);
|
||||
phase->register_control(heap_stable_ctrl, loop, heap_stable_iff);
|
||||
ctrl = new IfTrueNode(heap_stable_iff);
|
||||
phase->register_control(ctrl, loop, heap_stable_iff);
|
||||
phase->set_ctrl(gc_state_offset, phase->C->root());
|
||||
|
||||
assert(is_heap_state_test(heap_stable_iff, flags), "Should match the shape");
|
||||
assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
|
||||
const Type* val_t = phase->igvn().type(val);
|
||||
Node* old_ctrl = ctrl;
|
||||
PhaseIterGVN& igvn = phase->igvn();
|
||||
|
||||
const Type* val_t = igvn.type(val);
|
||||
if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
|
||||
IdealLoopTree* loop = phase->get_loop(ctrl);
|
||||
Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT));
|
||||
phase->register_new_node(null_cmp, ctrl);
|
||||
Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
|
||||
phase->register_new_node(null_test, ctrl);
|
||||
IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
|
||||
phase->register_control(null_iff, loop, ctrl);
|
||||
ctrl = new IfTrueNode(null_iff);
|
||||
phase->register_control(ctrl, loop, null_iff);
|
||||
null_ctrl = new IfFalseNode(null_iff);
|
||||
Node* null_cmp = new CmpPNode(val, igvn.zerocon(T_OBJECT));
|
||||
Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
|
||||
|
||||
IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
|
||||
ctrl = new IfTrueNode(null_iff);
|
||||
null_ctrl = new IfFalseNode(null_iff);
|
||||
|
||||
IdealLoopTree* loop = phase->get_loop(old_ctrl);
|
||||
phase->register_control(null_iff, loop, old_ctrl);
|
||||
phase->register_control(ctrl, loop, null_iff);
|
||||
phase->register_control(null_ctrl, loop, null_iff);
|
||||
|
||||
phase->register_new_node(null_cmp, old_ctrl);
|
||||
phase->register_new_node(null_test, old_ctrl);
|
||||
}
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
|
||||
IdealLoopTree *loop = phase->get_loop(c);
|
||||
Node* iff = unc_ctrl->in(0);
|
||||
assert(iff->is_If(), "broken");
|
||||
Node* new_iff = iff->clone();
|
||||
new_iff->set_req(0, c);
|
||||
phase->register_control(new_iff, loop, c);
|
||||
Node* iffalse = new IfFalseNode(new_iff->as_If());
|
||||
phase->register_control(iffalse, loop, new_iff);
|
||||
Node* iftrue = new IfTrueNode(new_iff->as_If());
|
||||
phase->register_control(iftrue, loop, new_iff);
|
||||
c = iftrue;
|
||||
const Type *t = phase->igvn().type(val);
|
||||
assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
|
||||
Node* uncasted_val = val->in(1);
|
||||
val = new CastPPNode(uncasted_val, t);
|
||||
val->init_req(0, c);
|
||||
phase->register_new_node(val, c);
|
||||
return val;
|
||||
}
|
||||
void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
|
||||
Node* old_ctrl = ctrl;
|
||||
PhaseIterGVN& igvn = phase->igvn();
|
||||
|
||||
void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
|
||||
Unique_Node_List& uses, PhaseIdealLoop* phase) {
|
||||
IfNode* iff = unc_ctrl->in(0)->as_If();
|
||||
Node* proj = iff->proj_out(0);
|
||||
assert(proj != unc_ctrl, "bad projection");
|
||||
Node* use = proj->unique_ctrl_out();
|
||||
Node* raw_val = new CastP2XNode(old_ctrl, val);
|
||||
Node* cset_idx = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
|
||||
Node* cset_addr = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
|
||||
Node* cset_load_addr = new AddPNode(phase->C->top(), cset_addr, cset_idx);
|
||||
Node* cset_load = new LoadBNode(old_ctrl, raw_mem, cset_load_addr,
|
||||
DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
|
||||
TypeInt::BYTE, MemNode::unordered);
|
||||
Node* cset_cmp = new CmpINode(cset_load, igvn.zerocon(T_INT));
|
||||
Node* cset_bool = new BoolNode(cset_cmp, BoolTest::ne);
|
||||
|
||||
assert(use == unc || use->is_Region(), "what else?");
|
||||
IfNode* cset_iff = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
|
||||
ctrl = new IfTrueNode(cset_iff);
|
||||
not_cset_ctrl = new IfFalseNode(cset_iff);
|
||||
|
||||
uses.clear();
|
||||
if (use == unc) {
|
||||
phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
|
||||
for (uint i = 1; i < unc->req(); i++) {
|
||||
Node* n = unc->in(i);
|
||||
if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
|
||||
uses.push(n);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert(use->is_Region(), "what else?");
|
||||
uint idx = 1;
|
||||
for (; use->in(idx) != proj; idx++);
|
||||
for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = use->fast_out(i);
|
||||
if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
|
||||
uses.push(u->in(idx));
|
||||
}
|
||||
}
|
||||
}
|
||||
for(uint next = 0; next < uses.size(); next++ ) {
|
||||
Node *n = uses.at(next);
|
||||
assert(phase->get_ctrl(n) == proj, "bad control");
|
||||
phase->set_ctrl_and_loop(n, new_unc_ctrl);
|
||||
if (n->in(0) == proj) {
|
||||
phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
|
||||
}
|
||||
for (uint i = 0; i < n->req(); i++) {
|
||||
Node* m = n->in(i);
|
||||
if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
|
||||
uses.push(m);
|
||||
}
|
||||
}
|
||||
}
|
||||
IdealLoopTree *loop = phase->get_loop(old_ctrl);
|
||||
phase->register_control(cset_iff, loop, old_ctrl);
|
||||
phase->register_control(ctrl, loop, cset_iff);
|
||||
phase->register_control(not_cset_ctrl, loop, cset_iff);
|
||||
|
||||
phase->igvn().rehash_node_delayed(use);
|
||||
int nb = use->replace_edge(proj, new_unc_ctrl);
|
||||
assert(nb == 1, "only use expected");
|
||||
}
|
||||
phase->set_ctrl(cset_addr, phase->C->root());
|
||||
|
||||
void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
|
||||
IdealLoopTree *loop = phase->get_loop(ctrl);
|
||||
Node* raw_rbtrue = new CastP2XNode(ctrl, val);
|
||||
phase->register_new_node(raw_rbtrue, ctrl);
|
||||
Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
|
||||
phase->register_new_node(cset_offset, ctrl);
|
||||
Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
|
||||
phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root());
|
||||
Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset);
|
||||
phase->register_new_node(in_cset_fast_test_adr, ctrl);
|
||||
uint in_cset_fast_test_idx = Compile::AliasIdxRaw;
|
||||
const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument
|
||||
debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx));
|
||||
Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered);
|
||||
phase->register_new_node(in_cset_fast_test_load, ctrl);
|
||||
Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT));
|
||||
phase->register_new_node(in_cset_fast_test_cmp, ctrl);
|
||||
Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq);
|
||||
phase->register_new_node(in_cset_fast_test_test, ctrl);
|
||||
IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
|
||||
phase->register_control(in_cset_fast_test_iff, loop, ctrl);
|
||||
|
||||
not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff);
|
||||
phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff);
|
||||
|
||||
ctrl = new IfFalseNode(in_cset_fast_test_iff);
|
||||
phase->register_control(ctrl, loop, in_cset_fast_test_iff);
|
||||
phase->register_new_node(raw_val, old_ctrl);
|
||||
phase->register_new_node(cset_idx, old_ctrl);
|
||||
phase->register_new_node(cset_load_addr, old_ctrl);
|
||||
phase->register_new_node(cset_load, old_ctrl);
|
||||
phase->register_new_node(cset_cmp, old_ctrl);
|
||||
phase->register_new_node(cset_bool, old_ctrl);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) {
|
||||
@ -1026,7 +970,7 @@ void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* lo
|
||||
address calladdr = is_native ? CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)
|
||||
: target;
|
||||
const char* name = is_native ? "load_reference_barrier_native" : "load_reference_barrier";
|
||||
Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
|
||||
Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(obj_type), calladdr, name, TypeRawPtr::BOTTOM);
|
||||
|
||||
call->init_req(TypeFunc::Control, ctrl);
|
||||
call->init_req(TypeFunc::I_O, phase->C->top());
|
||||
@ -1042,8 +986,6 @@ void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* lo
|
||||
phase->register_new_node(result_mem, call);
|
||||
val = new ProjNode(call, TypeFunc::Parms);
|
||||
phase->register_new_node(val, call);
|
||||
val = new CheckCastPPNode(ctrl, val, obj_type);
|
||||
phase->register_new_node(val, ctrl);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
|
||||
@ -1149,119 +1091,6 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
}
|
||||
|
||||
Node* ctrl = phase->get_ctrl(lrb);
|
||||
Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
|
||||
|
||||
CallStaticJavaNode* unc = NULL;
|
||||
Node* unc_ctrl = NULL;
|
||||
Node* uncasted_val = val;
|
||||
|
||||
for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = lrb->fast_out(i);
|
||||
if (u->Opcode() == Op_CastPP &&
|
||||
u->in(0) != NULL &&
|
||||
phase->is_dominator(u->in(0), ctrl)) {
|
||||
const Type* u_t = phase->igvn().type(u);
|
||||
|
||||
if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
|
||||
u->in(0)->Opcode() == Op_IfTrue &&
|
||||
u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
|
||||
u->in(0)->in(0)->is_If() &&
|
||||
u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
|
||||
u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
|
||||
u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
|
||||
u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
|
||||
u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
|
||||
IdealLoopTree* loop = phase->get_loop(ctrl);
|
||||
IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
|
||||
|
||||
if (!unc_loop->is_member(loop)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Node* branch = no_branches(ctrl, u->in(0), false, phase);
|
||||
assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
|
||||
if (branch == NodeSentinel) {
|
||||
continue;
|
||||
}
|
||||
|
||||
phase->igvn().replace_input_of(u, 1, val);
|
||||
phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
|
||||
phase->set_ctrl(u, u->in(0));
|
||||
phase->set_ctrl(lrb, u->in(0));
|
||||
unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
|
||||
unc_ctrl = u->in(0);
|
||||
val = u;
|
||||
|
||||
for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* u = val->fast_out(j);
|
||||
if (u == lrb) continue;
|
||||
phase->igvn().rehash_node_delayed(u);
|
||||
int nb = u->replace_edge(val, lrb);
|
||||
--j; jmax -= nb;
|
||||
}
|
||||
|
||||
RegionNode* r = new RegionNode(3);
|
||||
IfNode* iff = unc_ctrl->in(0)->as_If();
|
||||
|
||||
Node* ctrl_use = unc_ctrl->unique_ctrl_out();
|
||||
Node* unc_ctrl_clone = unc_ctrl->clone();
|
||||
phase->register_control(unc_ctrl_clone, loop, iff);
|
||||
Node* c = unc_ctrl_clone;
|
||||
Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
|
||||
r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
|
||||
|
||||
phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
|
||||
phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
|
||||
phase->lazy_replace(c, unc_ctrl);
|
||||
c = NULL;;
|
||||
phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
|
||||
phase->set_ctrl(val, unc_ctrl_clone);
|
||||
|
||||
IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
|
||||
fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
|
||||
Node* iff_proj = iff->proj_out(0);
|
||||
r->init_req(2, iff_proj);
|
||||
phase->register_control(r, phase->ltree_root(), iff);
|
||||
|
||||
Node* new_bol = new_iff->in(1)->clone();
|
||||
Node* new_cmp = new_bol->in(1)->clone();
|
||||
assert(new_cmp->Opcode() == Op_CmpP, "broken");
|
||||
assert(new_cmp->in(1) == val->in(1), "broken");
|
||||
new_bol->set_req(1, new_cmp);
|
||||
new_cmp->set_req(1, lrb);
|
||||
phase->register_new_node(new_bol, new_iff->in(0));
|
||||
phase->register_new_node(new_cmp, new_iff->in(0));
|
||||
phase->igvn().replace_input_of(new_iff, 1, new_bol);
|
||||
phase->igvn().replace_input_of(new_cast, 1, lrb);
|
||||
|
||||
for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = lrb->fast_out(i);
|
||||
if (u == new_cast || u == new_cmp) {
|
||||
continue;
|
||||
}
|
||||
phase->igvn().rehash_node_delayed(u);
|
||||
int nb = u->replace_edge(lrb, new_cast);
|
||||
assert(nb > 0, "no update?");
|
||||
--i; imax -= nb;
|
||||
}
|
||||
|
||||
for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = val->fast_out(i);
|
||||
if (u == lrb) {
|
||||
continue;
|
||||
}
|
||||
phase->igvn().rehash_node_delayed(u);
|
||||
int nb = u->replace_edge(val, new_cast);
|
||||
assert(nb > 0, "no update?");
|
||||
--i; imax -= nb;
|
||||
}
|
||||
|
||||
ctrl = unc_ctrl_clone;
|
||||
phase->set_ctrl_and_loop(lrb, ctrl);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
|
||||
CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
|
||||
if (call->entry_point() == OptoRuntime::rethrow_stub()) {
|
||||
@ -1402,90 +1231,45 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
Node* ctrl = phase->get_ctrl(lrb);
|
||||
Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
|
||||
|
||||
|
||||
Node* orig_ctrl = ctrl;
|
||||
|
||||
Node* raw_mem = fixer.find_mem(ctrl, lrb);
|
||||
Node* init_raw_mem = raw_mem;
|
||||
Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
|
||||
|
||||
IdealLoopTree *loop = phase->get_loop(ctrl);
|
||||
CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
|
||||
Node* unc_ctrl = NULL;
|
||||
if (unc != NULL) {
|
||||
if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
|
||||
unc = NULL;
|
||||
} else {
|
||||
unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
|
||||
}
|
||||
}
|
||||
|
||||
Node* uncasted_val = val;
|
||||
if (unc != NULL) {
|
||||
uncasted_val = val->in(1);
|
||||
}
|
||||
|
||||
Node* heap_stable_ctrl = NULL;
|
||||
Node* null_ctrl = NULL;
|
||||
IdealLoopTree* loop = phase->get_loop(ctrl);
|
||||
|
||||
assert(val->bottom_type()->make_oopptr(), "need oop");
|
||||
assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
|
||||
|
||||
enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT };
|
||||
enum { _heap_stable = 1, _not_cset, _evac_path, PATH_LIMIT };
|
||||
Node* region = new RegionNode(PATH_LIMIT);
|
||||
Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
|
||||
Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr());
|
||||
Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
|
||||
|
||||
// Stable path.
|
||||
test_heap_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::HAS_FORWARDED);
|
||||
Node* heap_stable_ctrl = NULL;
|
||||
test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::HAS_FORWARDED);
|
||||
IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
|
||||
|
||||
// Heap stable case
|
||||
region->init_req(_heap_stable, heap_stable_ctrl);
|
||||
val_phi->init_req(_heap_stable, uncasted_val);
|
||||
val_phi->init_req(_heap_stable, val);
|
||||
raw_mem_phi->init_req(_heap_stable, raw_mem);
|
||||
|
||||
Node* reg2_ctrl = NULL;
|
||||
// Null case
|
||||
test_null(ctrl, val, null_ctrl, phase);
|
||||
if (null_ctrl != NULL) {
|
||||
reg2_ctrl = null_ctrl->in(0);
|
||||
region->init_req(_null_path, null_ctrl);
|
||||
val_phi->init_req(_null_path, uncasted_val);
|
||||
raw_mem_phi->init_req(_null_path, raw_mem);
|
||||
} else {
|
||||
region->del_req(_null_path);
|
||||
val_phi->del_req(_null_path);
|
||||
raw_mem_phi->del_req(_null_path);
|
||||
}
|
||||
|
||||
// Test for in-cset.
|
||||
// Wires !in_cset(obj) to slot 2 of region and phis
|
||||
Node* not_cset_ctrl = NULL;
|
||||
in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
|
||||
test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
|
||||
if (not_cset_ctrl != NULL) {
|
||||
if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
|
||||
region->init_req(_not_cset, not_cset_ctrl);
|
||||
val_phi->init_req(_not_cset, uncasted_val);
|
||||
val_phi->init_req(_not_cset, val);
|
||||
raw_mem_phi->init_req(_not_cset, raw_mem);
|
||||
}
|
||||
|
||||
// Resolve object when orig-value is in cset.
|
||||
// Make the unconditional resolve for fwdptr.
|
||||
Node* new_val = uncasted_val;
|
||||
if (unc_ctrl != NULL) {
|
||||
// Clone the null check in this branch to allow implicit null check
|
||||
new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
|
||||
fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
|
||||
|
||||
IfNode* iff = unc_ctrl->in(0)->as_If();
|
||||
phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
|
||||
}
|
||||
|
||||
// Call lrb-stub and wire up that path in slots 4
|
||||
Node* result_mem = NULL;
|
||||
|
||||
Node* fwd = new_val;
|
||||
Node* addr;
|
||||
if (ShenandoahSelfFixing) {
|
||||
VectorSet visited(Thread::current()->resource_area());
|
||||
@ -1518,9 +1302,9 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
}
|
||||
}
|
||||
}
|
||||
call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, lrb->is_native(), phase);
|
||||
call_lrb_stub(ctrl, val, addr, result_mem, raw_mem, lrb->is_native(), phase);
|
||||
region->init_req(_evac_path, ctrl);
|
||||
val_phi->init_req(_evac_path, fwd);
|
||||
val_phi->init_req(_evac_path, val);
|
||||
raw_mem_phi->init_req(_evac_path, result_mem);
|
||||
|
||||
phase->register_control(region, loop, heap_stable_iff);
|
||||
@ -1532,20 +1316,6 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
|
||||
ctrl = orig_ctrl;
|
||||
|
||||
if (unc != NULL) {
|
||||
for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = val->fast_out(i);
|
||||
Node* c = phase->ctrl_or_self(u);
|
||||
if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
|
||||
phase->igvn().rehash_node_delayed(u);
|
||||
int nb = u->replace_edge(val, out_val);
|
||||
--i, imax -= nb;
|
||||
}
|
||||
}
|
||||
if (val->outcnt() == 0) {
|
||||
phase->igvn()._worklist.push(val);
|
||||
}
|
||||
}
|
||||
phase->igvn().replace_node(lrb, out_val);
|
||||
|
||||
follow_barrier_uses(out_val, ctrl, uses, phase);
|
||||
@ -1608,7 +1378,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
|
||||
|
||||
// Stable path.
|
||||
test_heap_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING);
|
||||
test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::MARKING);
|
||||
region->init_req(_heap_stable, heap_stable_ctrl);
|
||||
phi->init_req(_heap_stable, raw_mem);
|
||||
|
||||
@ -1790,7 +1560,7 @@ Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet
|
||||
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
|
||||
void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
|
||||
IdealLoopTree *loop = phase->get_loop(iff);
|
||||
Node* loop_head = loop->_head;
|
||||
Node* entry_c = loop_head->in(LoopNode::EntryControl);
|
||||
@ -1984,7 +1754,7 @@ void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, No
|
||||
if (head->as_Loop()->is_strip_mined()) {
|
||||
head->as_Loop()->verify_strip_mined(0);
|
||||
}
|
||||
move_heap_stable_test_out_of_loop(iff, phase);
|
||||
move_gc_state_test_out_of_loop(iff, phase);
|
||||
|
||||
AutoNodeBudget node_budget(phase);
|
||||
|
||||
@ -3173,6 +2943,11 @@ bool ShenandoahLoadReferenceBarrierNode::is_redundant() {
|
||||
bool visit_users = false;
|
||||
switch (n->Opcode()) {
|
||||
case Op_CallStaticJava:
|
||||
// Uncommon traps don't need barriers, values are handled during deoptimization. It also affects
|
||||
// optimizing null-checks into implicit null-checks.
|
||||
if (n->as_CallStaticJava()->uncommon_trap_request() != 0) {
|
||||
break;
|
||||
}
|
||||
case Op_CallDynamicJava:
|
||||
case Op_CallLeaf:
|
||||
case Op_CallLeafNoFP:
|
||||
@ -3314,26 +3089,3 @@ bool ShenandoahLoadReferenceBarrierNode::is_redundant() {
|
||||
// No need for barrier found.
|
||||
return true;
|
||||
}
|
||||
|
||||
CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
|
||||
Node* val = in(ValueIn);
|
||||
|
||||
const Type* val_t = igvn.type(val);
|
||||
|
||||
if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
|
||||
val->Opcode() == Op_CastPP &&
|
||||
val->in(0) != NULL &&
|
||||
val->in(0)->Opcode() == Op_IfTrue &&
|
||||
val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
|
||||
val->in(0)->in(0)->is_If() &&
|
||||
val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
|
||||
val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
|
||||
val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
|
||||
val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
|
||||
val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
|
||||
assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
|
||||
CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
|
||||
return unc;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -53,19 +53,16 @@ private:
|
||||
#endif
|
||||
static Node* dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase);
|
||||
static Node* no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase);
|
||||
static bool is_heap_state_test(Node* iff, int mask);
|
||||
static bool is_gc_state_test(Node* iff, int mask);
|
||||
static bool has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase);
|
||||
static Node* find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase);
|
||||
static void follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase);
|
||||
static void test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase);
|
||||
static void test_heap_state(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
|
||||
PhaseIdealLoop* phase, int flags);
|
||||
static void test_gc_state(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
|
||||
PhaseIdealLoop* phase, int flags);
|
||||
static void call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase);
|
||||
static Node* clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase);
|
||||
static void fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, Unique_Node_List& uses,
|
||||
PhaseIdealLoop* phase);
|
||||
static void in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
|
||||
static void move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase);
|
||||
static void test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
|
||||
static void move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase);
|
||||
static void merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase);
|
||||
static bool identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase);
|
||||
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase);
|
||||
@ -254,7 +251,6 @@ public:
|
||||
virtual bool cmp( const Node &n ) const;
|
||||
|
||||
bool is_redundant();
|
||||
CallStaticJavaNode* pin_and_expand_null_check(PhaseIterGVN& igvn);
|
||||
|
||||
private:
|
||||
bool needs_barrier(PhaseGVN* phase, Node* n);
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahFreeSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logTag.hpp"
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
|
||||
#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahFreeSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logTag.hpp"
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logTag.hpp"
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahFreeSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logTag.hpp"
|
||||
|
@ -194,6 +194,7 @@ public:
|
||||
ShenandoahDisarmNMethodsTask() :
|
||||
AbstractGangTask("ShenandoahDisarmNMethodsTask"),
|
||||
_iterator(ShenandoahCodeRoots::table()) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint");
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
_iterator.nmethods_do_begin();
|
||||
}
|
||||
@ -204,6 +205,7 @@ public:
|
||||
}
|
||||
|
||||
virtual void work(uint worker_id) {
|
||||
ShenandoahParallelWorkerSession worker_session(worker_id);
|
||||
_iterator.nmethods_do(&_cl);
|
||||
}
|
||||
};
|
||||
|
@ -351,6 +351,7 @@ public:
|
||||
_worker_phase(phase) {}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ShenandoahParallelWorkerSession worker_session(worker_id);
|
||||
ShenandoahUpdateRefsClosure cl;
|
||||
_thread_roots.oops_do(&cl, NULL, worker_id);
|
||||
}
|
||||
@ -588,6 +589,7 @@ public:
|
||||
HandleMark hm;
|
||||
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ShenandoahParallelWorkerSession worker_session(worker_id);
|
||||
ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
|
||||
if (heap->has_forwarded_objects()) {
|
||||
ShenandoahForwardedIsAliveClosure is_alive;
|
||||
@ -682,7 +684,11 @@ void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
|
||||
ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
|
||||
|
||||
{
|
||||
ShenandoahGCPhase phase(phase_process);
|
||||
// Note: Don't emit JFR event for this phase, to avoid overflow nesting phase level.
|
||||
// Reference Processor emits 2 levels JFR event, that can get us over the JFR
|
||||
// event nesting level limits, in case of degenerated GC gets upgraded to
|
||||
// full GC.
|
||||
ShenandoahTimingsTracker phase_timing(phase_process);
|
||||
|
||||
if (_heap->has_forwarded_objects()) {
|
||||
ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc/shenandoah/shenandoahAsserts.hpp"
|
||||
#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahUtils.hpp"
|
||||
#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
|
||||
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
|
||||
|
@ -1343,6 +1343,7 @@ public:
|
||||
_heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ShenandoahParallelWorkerSession worker_session(worker_id);
|
||||
size_t stride = ShenandoahParallelRegionStride;
|
||||
|
||||
size_t max = _heap->num_regions();
|
||||
@ -1412,12 +1413,12 @@ void ShenandoahHeap::op_init_mark() {
|
||||
set_concurrent_mark_in_progress(true);
|
||||
// We need to reset all TLABs because we'd lose marks on all objects allocated in them.
|
||||
{
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::make_parsable);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
|
||||
make_parsable(true);
|
||||
}
|
||||
|
||||
{
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::init_update_region_states);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
|
||||
ShenandoahInitMarkUpdateRegionStateClosure cl;
|
||||
parallel_heap_region_iterate(&cl);
|
||||
}
|
||||
@ -1428,7 +1429,7 @@ void ShenandoahHeap::op_init_mark() {
|
||||
concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
|
||||
|
||||
if (UseTLAB) {
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::resize_tlabs);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
|
||||
resize_tlabs();
|
||||
}
|
||||
|
||||
@ -1484,7 +1485,7 @@ public:
|
||||
|
||||
// Remember limit for updating refs. It's guaranteed that we get no
|
||||
// from-space-refs written from here on.
|
||||
r->set_update_watermark(r->top());
|
||||
r->set_update_watermark_at_safepoint(r->top());
|
||||
} else {
|
||||
assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
|
||||
assert(_ctx->top_at_mark_start(r) == r->top(),
|
||||
@ -1517,7 +1518,7 @@ void ShenandoahHeap::op_final_mark() {
|
||||
}
|
||||
|
||||
{
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_region_states);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_region_states);
|
||||
ShenandoahFinalMarkUpdateRegionStateClosure cl;
|
||||
parallel_heap_region_iterate(&cl);
|
||||
|
||||
@ -1530,19 +1531,19 @@ void ShenandoahHeap::op_final_mark() {
|
||||
// Weaker one: new allocations would happen past update watermark, and so less work would
|
||||
// be needed for reference updates (would update the large filler instead).
|
||||
{
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::retire_tlabs);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::retire_tlabs);
|
||||
make_parsable(true);
|
||||
}
|
||||
|
||||
{
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::choose_cset);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::choose_cset);
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
_collection_set->clear();
|
||||
heuristics()->choose_collection_set(_collection_set);
|
||||
}
|
||||
|
||||
{
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
_free_set->rebuild();
|
||||
}
|
||||
@ -1555,7 +1556,7 @@ void ShenandoahHeap::op_final_mark() {
|
||||
// If collection set has candidates, start evacuation.
|
||||
// Otherwise, bypass the rest of the cycle.
|
||||
if (!collection_set()->is_empty()) {
|
||||
ShenandoahGCSubPhase init_evac(ShenandoahPhaseTimings::init_evac);
|
||||
ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
|
||||
|
||||
if (ShenandoahVerify) {
|
||||
verifier()->verify_before_evacuation();
|
||||
@ -1651,6 +1652,7 @@ public:
|
||||
_cld_roots(phase) {}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ShenandoahConcurrentWorkerSession worker_session(worker_id);
|
||||
ShenandoahEvacOOMScope oom;
|
||||
{
|
||||
// vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
|
||||
@ -1789,6 +1791,7 @@ public:
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ShenandoahConcurrentWorkerSession worker_session(worker_id);
|
||||
{
|
||||
ShenandoahEvacOOMScope oom;
|
||||
// jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
|
||||
@ -1892,7 +1895,7 @@ void ShenandoahHeap::op_full(GCCause::Cause cause) {
|
||||
|
||||
full_gc()->do_it(cause);
|
||||
if (UseTLAB) {
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
|
||||
resize_all_tlabs();
|
||||
}
|
||||
|
||||
@ -2203,9 +2206,9 @@ void ShenandoahHeap::stw_unload_classes(bool full_gc) {
|
||||
|
||||
// Unload classes and purge SystemDictionary.
|
||||
{
|
||||
ShenandoahGCSubPhase phase(full_gc ?
|
||||
ShenandoahPhaseTimings::full_gc_purge_class_unload :
|
||||
ShenandoahPhaseTimings::purge_class_unload);
|
||||
ShenandoahGCPhase phase(full_gc ?
|
||||
ShenandoahPhaseTimings::full_gc_purge_class_unload :
|
||||
ShenandoahPhaseTimings::purge_class_unload);
|
||||
bool purged_class = SystemDictionary::do_unloading(gc_timer());
|
||||
|
||||
ShenandoahIsAliveSelector is_alive;
|
||||
@ -2215,9 +2218,9 @@ void ShenandoahHeap::stw_unload_classes(bool full_gc) {
|
||||
}
|
||||
|
||||
{
|
||||
ShenandoahGCSubPhase phase(full_gc ?
|
||||
ShenandoahPhaseTimings::full_gc_purge_cldg :
|
||||
ShenandoahPhaseTimings::purge_cldg);
|
||||
ShenandoahGCPhase phase(full_gc ?
|
||||
ShenandoahPhaseTimings::full_gc_purge_cldg :
|
||||
ShenandoahPhaseTimings::purge_cldg);
|
||||
ClassLoaderDataGraph::purge();
|
||||
}
|
||||
// Resize and verify metaspace
|
||||
@ -2230,14 +2233,14 @@ void ShenandoahHeap::stw_unload_classes(bool full_gc) {
|
||||
// However, we do need to "null" dead oops in the roots, if can not be done
|
||||
// in concurrent cycles.
|
||||
void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
|
||||
ShenandoahGCSubPhase root_phase(full_gc ?
|
||||
ShenandoahPhaseTimings::full_gc_purge :
|
||||
ShenandoahPhaseTimings::purge);
|
||||
ShenandoahGCPhase root_phase(full_gc ?
|
||||
ShenandoahPhaseTimings::full_gc_purge :
|
||||
ShenandoahPhaseTimings::purge);
|
||||
uint num_workers = _workers->active_workers();
|
||||
ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
|
||||
ShenandoahPhaseTimings::full_gc_purge_weak_par :
|
||||
ShenandoahPhaseTimings::purge_weak_par;
|
||||
ShenandoahGCSubPhase phase(timing_phase);
|
||||
ShenandoahGCPhase phase(timing_phase);
|
||||
ShenandoahGCWorkerPhase worker_phase(timing_phase);
|
||||
|
||||
// Cleanup weak roots
|
||||
@ -2493,7 +2496,7 @@ void ShenandoahHeap::op_init_updaterefs() {
|
||||
set_evacuation_in_progress(false);
|
||||
|
||||
{
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::init_update_refs_retire_gclabs);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_retire_gclabs);
|
||||
retire_and_reset_gclabs();
|
||||
}
|
||||
|
||||
@ -2549,7 +2552,7 @@ void ShenandoahHeap::op_final_updaterefs() {
|
||||
|
||||
// Check if there is left-over work, and finish it
|
||||
if (_update_refs_iterator.has_next()) {
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
|
||||
|
||||
// Finish updating references where we left off.
|
||||
clear_cancelled_gc();
|
||||
@ -2579,7 +2582,7 @@ void ShenandoahHeap::op_final_updaterefs() {
|
||||
}
|
||||
|
||||
{
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states);
|
||||
ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
|
||||
parallel_heap_region_iterate(&cl);
|
||||
|
||||
@ -2587,7 +2590,7 @@ void ShenandoahHeap::op_final_updaterefs() {
|
||||
}
|
||||
|
||||
{
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);
|
||||
trash_cset_regions();
|
||||
}
|
||||
|
||||
@ -2603,7 +2606,7 @@ void ShenandoahHeap::op_final_updaterefs() {
|
||||
}
|
||||
|
||||
{
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset);
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset);
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
_free_set->rebuild();
|
||||
}
|
||||
@ -2691,7 +2694,7 @@ void ShenandoahHeap::safepoint_synchronize_end() {
|
||||
|
||||
void ShenandoahHeap::vmop_entry_init_mark() {
|
||||
TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
|
||||
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
|
||||
|
||||
try_inject_alloc_failure();
|
||||
VM_ShenandoahInitMark op;
|
||||
@ -2700,7 +2703,7 @@ void ShenandoahHeap::vmop_entry_init_mark() {
|
||||
|
||||
void ShenandoahHeap::vmop_entry_final_mark() {
|
||||
TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
|
||||
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
|
||||
|
||||
try_inject_alloc_failure();
|
||||
VM_ShenandoahFinalMarkStartEvac op;
|
||||
@ -2709,7 +2712,7 @@ void ShenandoahHeap::vmop_entry_final_mark() {
|
||||
|
||||
void ShenandoahHeap::vmop_entry_init_updaterefs() {
|
||||
TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
|
||||
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
|
||||
|
||||
try_inject_alloc_failure();
|
||||
VM_ShenandoahInitUpdateRefs op;
|
||||
@ -2718,7 +2721,7 @@ void ShenandoahHeap::vmop_entry_init_updaterefs() {
|
||||
|
||||
void ShenandoahHeap::vmop_entry_final_updaterefs() {
|
||||
TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
|
||||
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
|
||||
|
||||
try_inject_alloc_failure();
|
||||
VM_ShenandoahFinalUpdateRefs op;
|
||||
@ -2727,7 +2730,7 @@ void ShenandoahHeap::vmop_entry_final_updaterefs() {
|
||||
|
||||
void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
|
||||
TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
|
||||
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
|
||||
|
||||
try_inject_alloc_failure();
|
||||
VM_ShenandoahFullGC op(cause);
|
||||
@ -2736,7 +2739,7 @@ void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
|
||||
|
||||
void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
|
||||
TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
|
||||
ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
|
||||
|
||||
VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
|
||||
VMThread::execute(°enerated_gc);
|
||||
@ -2744,11 +2747,9 @@ void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
|
||||
|
||||
void ShenandoahHeap::entry_init_mark() {
|
||||
const char* msg = init_mark_event_message();
|
||||
ShenandoahPausePhase gc_phase(msg);
|
||||
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
|
||||
"init marking");
|
||||
@ -2758,11 +2759,9 @@ void ShenandoahHeap::entry_init_mark() {
|
||||
|
||||
void ShenandoahHeap::entry_final_mark() {
|
||||
const char* msg = final_mark_event_message();
|
||||
ShenandoahPausePhase gc_phase(msg);
|
||||
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
|
||||
"final marking");
|
||||
@ -2772,11 +2771,9 @@ void ShenandoahHeap::entry_final_mark() {
|
||||
|
||||
void ShenandoahHeap::entry_init_updaterefs() {
|
||||
static const char* msg = "Pause Init Update Refs";
|
||||
ShenandoahPausePhase gc_phase(msg);
|
||||
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
|
||||
|
||||
// No workers used in this phase, no setup required
|
||||
|
||||
op_init_updaterefs();
|
||||
@ -2784,11 +2781,9 @@ void ShenandoahHeap::entry_init_updaterefs() {
|
||||
|
||||
void ShenandoahHeap::entry_final_updaterefs() {
|
||||
static const char* msg = "Pause Final Update Refs";
|
||||
ShenandoahPausePhase gc_phase(msg);
|
||||
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
|
||||
"final reference update");
|
||||
@ -2798,11 +2793,9 @@ void ShenandoahHeap::entry_final_updaterefs() {
|
||||
|
||||
void ShenandoahHeap::entry_full(GCCause::Cause cause) {
|
||||
static const char* msg = "Pause Full";
|
||||
ShenandoahPausePhase gc_phase(msg, true /* log_heap_usage */);
|
||||
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
|
||||
"full gc");
|
||||
@ -2813,11 +2806,9 @@ void ShenandoahHeap::entry_full(GCCause::Cause cause) {
|
||||
void ShenandoahHeap::entry_degenerated(int point) {
|
||||
ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
|
||||
const char* msg = degen_event_message(dpoint);
|
||||
ShenandoahPausePhase gc_phase(msg, true /* log_heap_usage */);
|
||||
ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
|
||||
"stw degenerated gc");
|
||||
@ -2831,11 +2822,9 @@ void ShenandoahHeap::entry_mark() {
|
||||
TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
|
||||
|
||||
const char* msg = conc_mark_event_message();
|
||||
ShenandoahConcurrentPhase gc_phase(msg);
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
|
||||
"concurrent marking");
|
||||
@ -2848,11 +2837,9 @@ void ShenandoahHeap::entry_evac() {
|
||||
TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
|
||||
|
||||
static const char* msg = "Concurrent evacuation";
|
||||
ShenandoahConcurrentPhase gc_phase(msg);
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
|
||||
"concurrent evacuation");
|
||||
@ -2863,11 +2850,9 @@ void ShenandoahHeap::entry_evac() {
|
||||
|
||||
void ShenandoahHeap::entry_updaterefs() {
|
||||
static const char* msg = "Concurrent update references";
|
||||
ShenandoahConcurrentPhase gc_phase(msg);
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
|
||||
"concurrent reference update");
|
||||
@ -2878,10 +2863,9 @@ void ShenandoahHeap::entry_updaterefs() {
|
||||
|
||||
void ShenandoahHeap::entry_weak_roots() {
|
||||
static const char* msg = "Concurrent weak roots";
|
||||
ShenandoahConcurrentPhase gc_phase(msg);
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_weak_roots);
|
||||
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
@ -2894,11 +2878,9 @@ void ShenandoahHeap::entry_weak_roots() {
|
||||
|
||||
void ShenandoahHeap::entry_class_unloading() {
|
||||
static const char* msg = "Concurrent class unloading";
|
||||
ShenandoahConcurrentPhase gc_phase(msg);
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unloading);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_class_unloading);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
|
||||
"concurrent class unloading");
|
||||
@ -2909,10 +2891,9 @@ void ShenandoahHeap::entry_class_unloading() {
|
||||
|
||||
void ShenandoahHeap::entry_strong_roots() {
|
||||
static const char* msg = "Concurrent strong roots";
|
||||
ShenandoahConcurrentPhase gc_phase(msg);
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_strong_roots);
|
||||
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
@ -2925,11 +2906,9 @@ void ShenandoahHeap::entry_strong_roots() {
|
||||
|
||||
void ShenandoahHeap::entry_cleanup_early() {
|
||||
static const char* msg = "Concurrent cleanup";
|
||||
ShenandoahConcurrentPhase gc_phase(msg, true /* log_heap_usage */);
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_cleanup_early);
|
||||
|
||||
// This phase does not use workers, no need for setup
|
||||
|
||||
try_inject_alloc_failure();
|
||||
@ -2938,11 +2917,9 @@ void ShenandoahHeap::entry_cleanup_early() {
|
||||
|
||||
void ShenandoahHeap::entry_cleanup_complete() {
|
||||
static const char* msg = "Concurrent cleanup";
|
||||
ShenandoahConcurrentPhase gc_phase(msg, true /* log_heap_usage */);
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_cleanup_complete);
|
||||
|
||||
// This phase does not use workers, no need for setup
|
||||
|
||||
try_inject_alloc_failure();
|
||||
@ -2951,11 +2928,9 @@ void ShenandoahHeap::entry_cleanup_complete() {
|
||||
|
||||
void ShenandoahHeap::entry_reset() {
|
||||
static const char* msg = "Concurrent reset";
|
||||
ShenandoahConcurrentPhase gc_phase(msg);
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_reset);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
|
||||
"concurrent reset");
|
||||
@ -2967,11 +2942,9 @@ void ShenandoahHeap::entry_reset() {
|
||||
void ShenandoahHeap::entry_preclean() {
|
||||
if (ShenandoahPreclean && process_references()) {
|
||||
static const char* msg = "Concurrent precleaning";
|
||||
ShenandoahConcurrentPhase gc_phase(msg);
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_preclean);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCSubPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
|
||||
|
||||
ShenandoahWorkerScope scope(workers(),
|
||||
ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
|
||||
"concurrent preclean",
|
||||
@ -2984,11 +2957,9 @@ void ShenandoahHeap::entry_preclean() {
|
||||
|
||||
void ShenandoahHeap::entry_uncommit(double shrink_before) {
|
||||
static const char *msg = "Concurrent uncommit";
|
||||
ShenandoahConcurrentPhase gc_phase(msg, true /* log_heap_usage */);
|
||||
ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
|
||||
EventMark em("%s", msg);
|
||||
|
||||
ShenandoahGCSubPhase phase(ShenandoahPhaseTimings::conc_uncommit);
|
||||
|
||||
op_uncommit(shrink_before);
|
||||
}
|
||||
|
||||
|
@ -243,7 +243,7 @@ private:
|
||||
volatile size_t _live_data;
|
||||
volatile size_t _critical_pins;
|
||||
|
||||
HeapWord* _update_watermark;
|
||||
HeapWord* volatile _update_watermark;
|
||||
|
||||
public:
|
||||
ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed);
|
||||
@ -382,19 +382,9 @@ public:
|
||||
size_t get_tlab_allocs() const;
|
||||
size_t get_gclab_allocs() const;
|
||||
|
||||
HeapWord* get_update_watermark() const {
|
||||
// Updates to the update-watermark only happen at safepoints.
|
||||
// Since those updates are only monotonically increasing, possibly reading
|
||||
// a stale value is only conservative - we would not miss to update any fields.
|
||||
HeapWord* watermark = _update_watermark;
|
||||
assert(bottom() <= watermark && watermark <= top(), "within bounds");
|
||||
return watermark;
|
||||
}
|
||||
|
||||
void set_update_watermark(HeapWord* w) {
|
||||
assert(bottom() <= w && w <= top(), "within bounds");
|
||||
_update_watermark = w;
|
||||
}
|
||||
inline HeapWord* get_update_watermark() const;
|
||||
inline void set_update_watermark(HeapWord* w);
|
||||
inline void set_update_watermark_at_safepoint(HeapWord* w);
|
||||
|
||||
private:
|
||||
void do_commit();
|
||||
|
@ -114,4 +114,22 @@ inline size_t ShenandoahHeapRegion::garbage() const {
|
||||
return result;
|
||||
}
|
||||
|
||||
inline HeapWord* ShenandoahHeapRegion::get_update_watermark() const {
|
||||
HeapWord* watermark = Atomic::load_acquire(&_update_watermark);
|
||||
assert(bottom() <= watermark && watermark <= top(), "within bounds");
|
||||
return watermark;
|
||||
}
|
||||
|
||||
inline void ShenandoahHeapRegion::set_update_watermark(HeapWord* w) {
|
||||
assert(bottom() <= w && w <= top(), "within bounds");
|
||||
Atomic::release_store(&_update_watermark, w);
|
||||
}
|
||||
|
||||
// Fast version that avoids synchronization, only to be used at safepoints.
|
||||
inline void ShenandoahHeapRegion::set_update_watermark_at_safepoint(HeapWord* w) {
|
||||
assert(bottom() <= w && w <= top(), "within bounds");
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at Shenandoah safepoint");
|
||||
_update_watermark = w;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
|
||||
|
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.hpp"
|
||||
#include "gc/shenandoah/shenandoahJfrSupport.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
|
@ -348,6 +348,7 @@ public:
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ShenandoahParallelWorkerSession worker_session(worker_id);
|
||||
ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
|
||||
ShenandoahHeapRegionSetIterator it(slice);
|
||||
ShenandoahHeapRegion* from_region = it.next();
|
||||
@ -727,6 +728,7 @@ public:
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ShenandoahParallelWorkerSession worker_session(worker_id);
|
||||
ShenandoahAdjustPointersObjectClosure obj_cl;
|
||||
ShenandoahHeapRegion* r = _regions.next();
|
||||
while (r != NULL) {
|
||||
@ -749,6 +751,7 @@ public:
|
||||
_preserved_marks(preserved_marks) {}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ShenandoahParallelWorkerSession worker_session(worker_id);
|
||||
ShenandoahAdjustPointersClosure cl;
|
||||
_rp->roots_do(worker_id, &cl);
|
||||
_preserved_marks->get(worker_id)->adjust_during_full_gc();
|
||||
@ -814,6 +817,7 @@ public:
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ShenandoahParallelWorkerSession worker_session(worker_id);
|
||||
ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
|
||||
|
||||
ShenandoahCompactObjectsClosure cl(worker_id);
|
||||
@ -960,6 +964,7 @@ public:
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ShenandoahParallelWorkerSession worker_session(worker_id);
|
||||
ShenandoahHeapRegion* region = _regions.next();
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/markBitMap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahMarkingContext.hpp"
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define SHARE_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP
|
||||
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahStrDedupQueue.hpp"
|
||||
#include "gc/shenandoah/shenandoahTaskqueue.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
|
@ -27,7 +27,7 @@
|
||||
#include "gc/shared/workerDataArray.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
|
||||
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahUtils.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
@ -57,7 +57,7 @@ ShenandoahPhaseTimings::ShenandoahPhaseTimings(uint max_workers) :
|
||||
_worker_data[i] = NULL;
|
||||
SHENANDOAH_PAR_PHASE_DO(,, SHENANDOAH_WORKER_DATA_NULL)
|
||||
#undef SHENANDOAH_WORKER_DATA_NULL
|
||||
_cycle_data[i] = 0;
|
||||
_cycle_data[i] = uninitialized();
|
||||
}
|
||||
|
||||
// Then punch in the worker-related data.
|
||||
@ -134,7 +134,7 @@ bool ShenandoahPhaseTimings::is_root_work_phase(Phase phase) {
|
||||
void ShenandoahPhaseTimings::set_cycle_data(Phase phase, double time) {
|
||||
#ifdef ASSERT
|
||||
double d = _cycle_data[phase];
|
||||
assert(d == 0, "Should not be set yet: %s, current value: %lf", phase_name(phase), d);
|
||||
assert(d == uninitialized(), "Should not be set yet: %s, current value: %lf", phase_name(phase), d);
|
||||
#endif
|
||||
_cycle_data[phase] = time;
|
||||
}
|
||||
@ -175,23 +175,44 @@ void ShenandoahPhaseTimings::flush_par_workers_to_cycle() {
|
||||
for (uint pi = 0; pi < _num_phases; pi++) {
|
||||
Phase phase = Phase(pi);
|
||||
if (is_worker_phase(phase)) {
|
||||
double s = 0;
|
||||
double s = uninitialized();
|
||||
for (uint i = 1; i < _num_par_phases; i++) {
|
||||
double t = worker_data(phase, ParPhase(i))->sum();
|
||||
// add to each line in phase
|
||||
set_cycle_data(Phase(phase + i + 1), t);
|
||||
s += t;
|
||||
ShenandoahWorkerData* wd = worker_data(phase, ParPhase(i));
|
||||
double ws = uninitialized();
|
||||
for (uint c = 0; c < _max_workers; c++) {
|
||||
double v = wd->get(c);
|
||||
if (v != ShenandoahWorkerData::uninitialized()) {
|
||||
if (ws == uninitialized()) {
|
||||
ws = v;
|
||||
} else {
|
||||
ws += v;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (ws != uninitialized()) {
|
||||
// add to each line in phase
|
||||
set_cycle_data(Phase(phase + i + 1), ws);
|
||||
if (s == uninitialized()) {
|
||||
s = ws;
|
||||
} else {
|
||||
s += ws;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (s != uninitialized()) {
|
||||
// add to total for phase
|
||||
set_cycle_data(Phase(phase + 1), s);
|
||||
}
|
||||
// add to total for phase
|
||||
set_cycle_data(Phase(phase + 1), s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahPhaseTimings::flush_cycle_to_global() {
|
||||
for (uint i = 0; i < _num_phases; i++) {
|
||||
_global_data[i].add(_cycle_data[i]);
|
||||
_cycle_data[i] = 0;
|
||||
if (_cycle_data[i] != uninitialized()) {
|
||||
_global_data[i].add(_cycle_data[i]);
|
||||
_cycle_data[i] = uninitialized();
|
||||
}
|
||||
if (_worker_data[i] != NULL) {
|
||||
_worker_data[i]->reset();
|
||||
}
|
||||
|
@ -195,6 +195,7 @@ private:
|
||||
Phase worker_par_phase(Phase phase, ParPhase par_phase);
|
||||
|
||||
void set_cycle_data(Phase phase, double time);
|
||||
static double uninitialized() { return -1; }
|
||||
|
||||
public:
|
||||
ShenandoahPhaseTimings(uint _max_workers);
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
|
||||
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
|
||||
#include "gc/shenandoah/shenandoahStringDedup.hpp"
|
||||
#include "gc/shenandoah/shenandoahVMOperations.hpp"
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
|
||||
#include "gc/shenandoah/shenandoahRootProcessor.hpp"
|
||||
#include "gc/shenandoah/shenandoahUtils.hpp"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,7 +54,12 @@ private:
|
||||
_gclab(NULL),
|
||||
_gclab_size(0),
|
||||
_worker_id(INVALID_WORKER_ID),
|
||||
_force_satb_flush(false) {
|
||||
_force_satb_flush(false),
|
||||
_disarmed_value(ShenandoahCodeRoots::disarmed_value()) {
|
||||
|
||||
// At least on x86_64, nmethod entry barrier encodes _disarmed_value offset
|
||||
// in instruction as disp8 immed
|
||||
assert(in_bytes(disarmed_value_offset()) < 128, "Offset range check");
|
||||
}
|
||||
|
||||
~ShenandoahThreadLocalData() {
|
||||
@ -128,7 +133,6 @@ public:
|
||||
assert(data(thread)->_gclab == NULL, "Only initialize once");
|
||||
data(thread)->_gclab = new PLAB(PLAB::min_size());
|
||||
data(thread)->_gclab_size = 0;
|
||||
data(thread)->_disarmed_value = ShenandoahCodeRoots::disarmed_value();
|
||||
}
|
||||
|
||||
static PLAB* gclab(Thread* thread) {
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahCodeRoots.hpp"
|
||||
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahNMethod.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahLock.hpp"
|
||||
#include "gc/shenandoah/shenandoahRootProcessor.hpp"
|
||||
|
@ -30,12 +30,12 @@
|
||||
#include "gc/shared/gcWhen.hpp"
|
||||
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
|
||||
#include "gc/shenandoah/shenandoahMarkCompact.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeuristics.hpp"
|
||||
#include "gc/shenandoah/shenandoahUtils.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
ShenandoahPhaseTimings::Phase ShenandoahGCPhase::_current_phase = ShenandoahPhaseTimings::_invalid_phase;
|
||||
ShenandoahPhaseTimings::Phase ShenandoahTimingsTracker::_current_phase = ShenandoahPhaseTimings::_invalid_phase;
|
||||
|
||||
ShenandoahGCSession::ShenandoahGCSession(GCCause::Cause cause) :
|
||||
_heap(ShenandoahHeap::heap()),
|
||||
@ -85,7 +85,8 @@ ShenandoahGCPauseMark::ShenandoahGCPauseMark(uint gc_id, SvcGCMarker::reason_typ
|
||||
);
|
||||
}
|
||||
|
||||
ShenandoahPausePhase::ShenandoahPausePhase(const char* title, bool log_heap_usage) :
|
||||
ShenandoahPausePhase::ShenandoahPausePhase(const char* title, ShenandoahPhaseTimings::Phase phase, bool log_heap_usage) :
|
||||
ShenandoahTimingsTracker(phase),
|
||||
_tracer(title, NULL, GCCause::_no_gc, log_heap_usage),
|
||||
_timer(ShenandoahHeap::heap()->gc_timer()) {
|
||||
_timer->register_gc_pause_start(title);
|
||||
@ -95,7 +96,8 @@ ShenandoahPausePhase::~ShenandoahPausePhase() {
|
||||
_timer->register_gc_pause_end();
|
||||
}
|
||||
|
||||
ShenandoahConcurrentPhase::ShenandoahConcurrentPhase(const char* title, bool log_heap_usage) :
|
||||
ShenandoahConcurrentPhase::ShenandoahConcurrentPhase(const char* title, ShenandoahPhaseTimings::Phase phase, bool log_heap_usage) :
|
||||
ShenandoahTimingsTracker(phase),
|
||||
_tracer(title, NULL, GCCause::_no_gc, log_heap_usage),
|
||||
_timer(ShenandoahHeap::heap()->gc_timer()) {
|
||||
_timer->register_gc_concurrent_start(title);
|
||||
@ -105,7 +107,7 @@ ShenandoahConcurrentPhase::~ShenandoahConcurrentPhase() {
|
||||
_timer->register_gc_concurrent_end();
|
||||
}
|
||||
|
||||
ShenandoahGCPhase::ShenandoahGCPhase(ShenandoahPhaseTimings::Phase phase) :
|
||||
ShenandoahTimingsTracker::ShenandoahTimingsTracker(ShenandoahPhaseTimings::Phase phase) :
|
||||
_timings(ShenandoahHeap::heap()->phase_timings()), _phase(phase) {
|
||||
assert(!Thread::current()->is_Worker_thread() &&
|
||||
(Thread::current()->is_VM_thread() ||
|
||||
@ -116,22 +118,22 @@ ShenandoahGCPhase::ShenandoahGCPhase(ShenandoahPhaseTimings::Phase phase) :
|
||||
_start = os::elapsedTime();
|
||||
}
|
||||
|
||||
ShenandoahGCPhase::~ShenandoahGCPhase() {
|
||||
ShenandoahTimingsTracker::~ShenandoahTimingsTracker() {
|
||||
_timings->record_phase_time(_phase, os::elapsedTime() - _start);
|
||||
_current_phase = _parent_phase;
|
||||
}
|
||||
|
||||
bool ShenandoahGCPhase::is_current_phase_valid() {
|
||||
bool ShenandoahTimingsTracker::is_current_phase_valid() {
|
||||
return _current_phase < ShenandoahPhaseTimings::_num_phases;
|
||||
}
|
||||
|
||||
ShenandoahGCSubPhase::ShenandoahGCSubPhase(ShenandoahPhaseTimings::Phase phase) :
|
||||
ShenandoahGCPhase(phase),
|
||||
ShenandoahGCPhase::ShenandoahGCPhase(ShenandoahPhaseTimings::Phase phase) :
|
||||
ShenandoahTimingsTracker(phase),
|
||||
_timer(ShenandoahHeap::heap()->gc_timer()) {
|
||||
_timer->register_gc_phase_start(ShenandoahPhaseTimings::phase_name(phase), Ticks::now());
|
||||
}
|
||||
|
||||
ShenandoahGCSubPhase::~ShenandoahGCSubPhase() {
|
||||
ShenandoahGCPhase::~ShenandoahGCPhase() {
|
||||
_timer->register_gc_phase_end(Ticks::now());
|
||||
}
|
||||
|
||||
|
@ -54,27 +54,11 @@ public:
|
||||
~ShenandoahGCSession();
|
||||
};
|
||||
|
||||
class ShenandoahPausePhase : public StackObj {
|
||||
private:
|
||||
GCTraceTimeWrapper<LogLevel::Info, LOG_TAGS(gc)> _tracer;
|
||||
ConcurrentGCTimer* const _timer;
|
||||
|
||||
public:
|
||||
ShenandoahPausePhase(const char* title, bool log_heap_usage = false);
|
||||
~ShenandoahPausePhase();
|
||||
};
|
||||
|
||||
class ShenandoahConcurrentPhase : public StackObj {
|
||||
private:
|
||||
GCTraceTimeWrapper<LogLevel::Info, LOG_TAGS(gc)> _tracer;
|
||||
ConcurrentGCTimer* const _timer;
|
||||
|
||||
public:
|
||||
ShenandoahConcurrentPhase(const char* title, bool log_heap_usage = false);
|
||||
~ShenandoahConcurrentPhase();
|
||||
};
|
||||
|
||||
class ShenandoahGCPhase : public StackObj {
|
||||
/*
|
||||
* ShenandoahGCPhaseTiming tracks Shenandoah specific timing information
|
||||
* of a GC phase
|
||||
*/
|
||||
class ShenandoahTimingsTracker : public StackObj {
|
||||
private:
|
||||
static ShenandoahPhaseTimings::Phase _current_phase;
|
||||
|
||||
@ -84,21 +68,53 @@ private:
|
||||
double _start;
|
||||
|
||||
public:
|
||||
ShenandoahGCPhase(ShenandoahPhaseTimings::Phase phase);
|
||||
~ShenandoahGCPhase();
|
||||
ShenandoahTimingsTracker(ShenandoahPhaseTimings::Phase phase);
|
||||
~ShenandoahTimingsTracker();
|
||||
|
||||
static ShenandoahPhaseTimings::Phase current_phase() { return _current_phase; }
|
||||
|
||||
static bool is_current_phase_valid();
|
||||
};
|
||||
|
||||
class ShenandoahGCSubPhase: public ShenandoahGCPhase {
|
||||
/*
|
||||
* ShenandoahPausePhase tracks a STW pause and emits Shenandoah timing and
|
||||
* a corresponding JFR event
|
||||
*/
|
||||
class ShenandoahPausePhase : public ShenandoahTimingsTracker {
|
||||
private:
|
||||
GCTraceTimeWrapper<LogLevel::Info, LOG_TAGS(gc)> _tracer;
|
||||
ConcurrentGCTimer* const _timer;
|
||||
|
||||
public:
|
||||
ShenandoahPausePhase(const char* title, ShenandoahPhaseTimings::Phase phase, bool log_heap_usage = false);
|
||||
~ShenandoahPausePhase();
|
||||
};
|
||||
|
||||
/*
|
||||
* ShenandoahConcurrentPhase tracks a concurrent GC phase and emits Shenandoah timing and
|
||||
* a corresponding JFR event
|
||||
*/
|
||||
class ShenandoahConcurrentPhase : public ShenandoahTimingsTracker {
|
||||
private:
|
||||
GCTraceTimeWrapper<LogLevel::Info, LOG_TAGS(gc)> _tracer;
|
||||
ConcurrentGCTimer* const _timer;
|
||||
|
||||
public:
|
||||
ShenandoahConcurrentPhase(const char* title, ShenandoahPhaseTimings::Phase phase, bool log_heap_usage = false);
|
||||
~ShenandoahConcurrentPhase();
|
||||
};
|
||||
|
||||
/*
|
||||
* ShenandoahGCPhase tracks Shenandoah specific timing information
|
||||
* and emits a corresponding JFR event of a GC phase
|
||||
*/
|
||||
class ShenandoahGCPhase : public ShenandoahTimingsTracker {
|
||||
private:
|
||||
ConcurrentGCTimer* const _timer;
|
||||
|
||||
public:
|
||||
ShenandoahGCSubPhase(ShenandoahPhaseTimings::Phase phase);
|
||||
~ShenandoahGCSubPhase();
|
||||
ShenandoahGCPhase(ShenandoahPhaseTimings::Phase phase);
|
||||
~ShenandoahGCPhase();
|
||||
};
|
||||
|
||||
class ShenandoahGCWorkerPhase : public StackObj {
|
||||
|
@ -176,6 +176,9 @@ JVM_GetVmArguments(JNIEnv *env);
|
||||
JNIEXPORT void JNICALL
|
||||
JVM_InitializeFromArchive(JNIEnv* env, jclass cls);
|
||||
|
||||
JNIEXPORT jlong JNICALL
|
||||
JVM_GetRandomSeedForCDSDump();
|
||||
|
||||
/*
|
||||
* java.lang.Throwable
|
||||
*/
|
||||
|
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "jfr/leakprofiler/chains/edge.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeStore.hpp"
|
||||
#include "jfr/leakprofiler/chains/edgeUtils.hpp"
|
||||
|
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "jfr/leakprofiler/checkpoint/objectSampleDescription.hpp"
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/classLoaderData.inline.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
|
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "jvmci/jvmciCodeInstaller.hpp"
|
||||
|
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "jvmci/jniAccessMark.inline.hpp"
|
||||
|
@ -503,14 +503,13 @@ private:
|
||||
void write_archive(char* serialized_data);
|
||||
|
||||
void init_first_dump_space(address reserved_bottom) {
|
||||
address first_space_base = reserved_bottom;
|
||||
DumpRegion* mc_space = MetaspaceShared::misc_code_dump_space();
|
||||
DumpRegion* rw_space = MetaspaceShared::read_write_dump_space();
|
||||
|
||||
// Use the same MC->RW->RO ordering as in the base archive.
|
||||
MetaspaceShared::init_shared_dump_space(mc_space, first_space_base);
|
||||
MetaspaceShared::init_shared_dump_space(mc_space);
|
||||
_current_dump_space = mc_space;
|
||||
_last_verified_top = first_space_base;
|
||||
_last_verified_top = reserved_bottom;
|
||||
_num_dump_regions_used = 1;
|
||||
}
|
||||
|
||||
|
@ -1177,6 +1177,10 @@ void FileMapRegion::init(int region_index, char* base, size_t size, bool read_on
|
||||
_mapped_base = NULL;
|
||||
}
|
||||
|
||||
static const char* region_names[] = {
|
||||
"mc", "rw", "ro", "bm", "ca0", "ca1", "oa0", "oa1"
|
||||
};
|
||||
|
||||
void FileMapInfo::write_region(int region, char* base, size_t size,
|
||||
bool read_only, bool allow_exec) {
|
||||
Arguments::assert_is_dumping_archive();
|
||||
@ -1184,6 +1188,9 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
|
||||
FileMapRegion* si = space_at(region);
|
||||
char* target_base;
|
||||
|
||||
const int num_regions = sizeof(region_names)/sizeof(region_names[0]);
|
||||
assert(0 <= region && region < num_regions, "sanity");
|
||||
|
||||
if (region == MetaspaceShared::bm) {
|
||||
target_base = NULL; // always NULL for bm region.
|
||||
} else {
|
||||
@ -1197,11 +1204,13 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
|
||||
|
||||
si->set_file_offset(_file_offset);
|
||||
char* requested_base = (target_base == NULL) ? NULL : target_base + MetaspaceShared::final_delta();
|
||||
log_debug(cds)("Shared file region %d: " SIZE_FORMAT_HEX_W(08)
|
||||
" bytes, addr " INTPTR_FORMAT " file offset " SIZE_FORMAT_HEX_W(08),
|
||||
region, size, p2i(requested_base), _file_offset);
|
||||
|
||||
int crc = ClassLoader::crc32(0, base, (jint)size);
|
||||
if (size > 0) {
|
||||
log_debug(cds)("Shared file region (%-3s) %d: " SIZE_FORMAT_W(8)
|
||||
" bytes, addr " INTPTR_FORMAT " file offset " SIZE_FORMAT_HEX_W(08)
|
||||
" crc 0x%08x",
|
||||
region_names[region], region, size, p2i(requested_base), _file_offset, crc);
|
||||
}
|
||||
si->init(region, target_base, size, read_only, allow_exec, crc);
|
||||
|
||||
if (base != NULL) {
|
||||
@ -1246,8 +1255,6 @@ void FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap,
|
||||
write_oopmaps(open_oopmaps, curr_offset, buffer);
|
||||
}
|
||||
|
||||
log_debug(cds)("ptrmap = " INTPTR_FORMAT " (" SIZE_FORMAT " bytes)",
|
||||
p2i(buffer), size_in_bytes);
|
||||
write_region(MetaspaceShared::bm, (char*)buffer, size_in_bytes, /*read_only=*/true, /*allow_exec=*/false);
|
||||
}
|
||||
|
||||
@ -1297,23 +1304,20 @@ size_t FileMapInfo::write_archive_heap_regions(GrowableArray<MemRegion> *heap_me
|
||||
}
|
||||
|
||||
size_t total_size = 0;
|
||||
for (int i = first_region_id, arr_idx = 0;
|
||||
i < first_region_id + max_num_regions;
|
||||
i++, arr_idx++) {
|
||||
for (int i = 0; i < max_num_regions; i++) {
|
||||
char* start = NULL;
|
||||
size_t size = 0;
|
||||
if (arr_idx < arr_len) {
|
||||
start = (char*)heap_mem->at(arr_idx).start();
|
||||
size = heap_mem->at(arr_idx).byte_size();
|
||||
if (i < arr_len) {
|
||||
start = (char*)heap_mem->at(i).start();
|
||||
size = heap_mem->at(i).byte_size();
|
||||
total_size += size;
|
||||
}
|
||||
|
||||
log_debug(cds)("Archive heap region %d: " INTPTR_FORMAT " - " INTPTR_FORMAT " = " SIZE_FORMAT_W(8) " bytes",
|
||||
i, p2i(start), p2i(start + size), size);
|
||||
write_region(i, start, size, false, false);
|
||||
int region_idx = i + first_region_id;
|
||||
write_region(region_idx, start, size, false, false);
|
||||
if (size > 0) {
|
||||
space_at(i)->init_oopmap(oopmaps->at(arr_idx)._offset,
|
||||
oopmaps->at(arr_idx)._oopmap_size_in_bits);
|
||||
space_at(region_idx)->init_oopmap(oopmaps->at(i)._offset,
|
||||
oopmaps->at(i)._oopmap_size_in_bits);
|
||||
}
|
||||
}
|
||||
return total_size;
|
||||
|
@ -142,6 +142,10 @@ oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
|
||||
if (archived_oop != NULL) {
|
||||
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
|
||||
MetaspaceShared::relocate_klass_ptr(archived_oop);
|
||||
// Clear age -- it might have been set if a GC happened during -Xshare:dump
|
||||
markWord mark = archived_oop->mark_raw();
|
||||
mark = mark.set_age(0);
|
||||
archived_oop->set_mark_raw(mark);
|
||||
ArchivedObjectCache* cache = archived_object_cache();
|
||||
cache->put(obj, archived_oop);
|
||||
log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
|
@ -77,6 +77,8 @@
|
||||
|
||||
ReservedSpace MetaspaceShared::_shared_rs;
|
||||
VirtualSpace MetaspaceShared::_shared_vs;
|
||||
ReservedSpace MetaspaceShared::_symbol_rs;
|
||||
VirtualSpace MetaspaceShared::_symbol_vs;
|
||||
MetaspaceSharedStats MetaspaceShared::_stats;
|
||||
bool MetaspaceShared::_has_error_classes;
|
||||
bool MetaspaceShared::_archive_loading_failed = false;
|
||||
@ -120,21 +122,24 @@ char* DumpRegion::expand_top_to(char* newtop) {
|
||||
MetaspaceShared::report_out_of_space(_name, newtop - _top);
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
uintx delta;
|
||||
if (DynamicDumpSharedSpaces) {
|
||||
delta = DynamicArchive::object_delta_uintx(newtop);
|
||||
} else {
|
||||
delta = MetaspaceShared::object_delta_uintx(newtop);
|
||||
}
|
||||
if (delta > MAX_SHARED_DELTA) {
|
||||
// This is just a sanity check and should not appear in any real world usage. This
|
||||
// happens only if you allocate more than 2GB of shared objects and would require
|
||||
// millions of shared classes.
|
||||
vm_exit_during_initialization("Out of memory in the CDS archive",
|
||||
"Please reduce the number of shared classes.");
|
||||
|
||||
if (_rs == MetaspaceShared::shared_rs()) {
|
||||
uintx delta;
|
||||
if (DynamicDumpSharedSpaces) {
|
||||
delta = DynamicArchive::object_delta_uintx(newtop);
|
||||
} else {
|
||||
delta = MetaspaceShared::object_delta_uintx(newtop);
|
||||
}
|
||||
if (delta > MAX_SHARED_DELTA) {
|
||||
// This is just a sanity check and should not appear in any real world usage. This
|
||||
// happens only if you allocate more than 2GB of shared objects and would require
|
||||
// millions of shared classes.
|
||||
vm_exit_during_initialization("Out of memory in the CDS archive",
|
||||
"Please reduce the number of shared classes.");
|
||||
}
|
||||
}
|
||||
|
||||
MetaspaceShared::commit_shared_space_to(newtop);
|
||||
MetaspaceShared::commit_to(_rs, _vs, newtop);
|
||||
_top = newtop;
|
||||
return _top;
|
||||
}
|
||||
@ -172,26 +177,35 @@ void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t neede
|
||||
}
|
||||
}
|
||||
|
||||
void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
|
||||
_rs = rs;
|
||||
_vs = vs;
|
||||
// Start with 0 committed bytes. The memory will be committed as needed by
|
||||
// MetaspaceShared::commit_to().
|
||||
if (!_vs->initialize(*_rs, 0)) {
|
||||
fatal("Unable to allocate memory for shared space");
|
||||
}
|
||||
_base = _top = _rs->base();
|
||||
_end = _rs->end();
|
||||
}
|
||||
|
||||
void DumpRegion::pack(DumpRegion* next) {
|
||||
assert(!is_packed(), "sanity");
|
||||
_end = (char*)align_up(_top, Metaspace::reserve_alignment());
|
||||
_is_packed = true;
|
||||
if (next != NULL) {
|
||||
next->_rs = _rs;
|
||||
next->_vs = _vs;
|
||||
next->_base = next->_top = this->_end;
|
||||
next->_end = MetaspaceShared::shared_rs()->end();
|
||||
next->_end = _rs->end();
|
||||
}
|
||||
}
|
||||
|
||||
static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw");
|
||||
static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols");
|
||||
static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
|
||||
|
||||
void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) {
|
||||
// Start with 0 committed bytes. The memory will be committed as needed by
|
||||
// MetaspaceShared::commit_shared_space_to().
|
||||
if (!_shared_vs.initialize(_shared_rs, 0)) {
|
||||
fatal("Unable to allocate memory for shared space");
|
||||
}
|
||||
first_space->init(&_shared_rs, (char*)first_space_bottom);
|
||||
void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space) {
|
||||
first_space->init(&_shared_rs, &_shared_vs);
|
||||
}
|
||||
|
||||
DumpRegion* MetaspaceShared::misc_code_dump_space() {
|
||||
@ -211,6 +225,10 @@ void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next,
|
||||
current->pack(next);
|
||||
}
|
||||
|
||||
char* MetaspaceShared::symbol_space_alloc(size_t num_bytes) {
|
||||
return _symbol_region.allocate(num_bytes);
|
||||
}
|
||||
|
||||
char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
|
||||
return _mc_region.allocate(num_bytes);
|
||||
}
|
||||
@ -320,6 +338,14 @@ void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
|
||||
SharedBaseAddress = (size_t)_shared_rs.base();
|
||||
log_info(cds)("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
|
||||
_shared_rs.size(), p2i(_shared_rs.base()));
|
||||
|
||||
size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M);
|
||||
_symbol_rs = ReservedSpace(symbol_rs_size);
|
||||
if (!_symbol_rs.is_reserved()) {
|
||||
vm_exit_during_initialization("Unable to reserve memory for symbols",
|
||||
err_msg(SIZE_FORMAT " bytes.", symbol_rs_size));
|
||||
}
|
||||
_symbol_region.init(&_symbol_rs, &_symbol_vs);
|
||||
}
|
||||
|
||||
// Called by universe_post_init()
|
||||
@ -397,33 +423,37 @@ void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
|
||||
}
|
||||
}
|
||||
|
||||
void MetaspaceShared::commit_shared_space_to(char* newtop) {
|
||||
void MetaspaceShared::commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) {
|
||||
Arguments::assert_is_dumping_archive();
|
||||
char* base = _shared_rs.base();
|
||||
char* base = rs->base();
|
||||
size_t need_committed_size = newtop - base;
|
||||
size_t has_committed_size = _shared_vs.committed_size();
|
||||
size_t has_committed_size = vs->committed_size();
|
||||
if (need_committed_size < has_committed_size) {
|
||||
return;
|
||||
}
|
||||
|
||||
size_t min_bytes = need_committed_size - has_committed_size;
|
||||
size_t preferred_bytes = 1 * M;
|
||||
size_t uncommitted = _shared_vs.reserved_size() - has_committed_size;
|
||||
size_t uncommitted = vs->reserved_size() - has_committed_size;
|
||||
|
||||
size_t commit =MAX2(min_bytes, preferred_bytes);
|
||||
commit = MIN2(commit, uncommitted);
|
||||
assert(commit <= uncommitted, "sanity");
|
||||
|
||||
bool result = _shared_vs.expand_by(commit, false);
|
||||
ArchivePtrMarker::expand_ptr_end((address*)_shared_vs.high());
|
||||
bool result = vs->expand_by(commit, false);
|
||||
if (rs == &_shared_rs) {
|
||||
ArchivePtrMarker::expand_ptr_end((address*)vs->high());
|
||||
}
|
||||
|
||||
if (!result) {
|
||||
vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
|
||||
need_committed_size));
|
||||
}
|
||||
|
||||
log_debug(cds)("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]",
|
||||
commit, _shared_vs.actual_committed_size(), _shared_vs.high());
|
||||
assert(rs == &_shared_rs || rs == &_symbol_rs, "must be");
|
||||
const char* which = (rs == &_shared_rs) ? "shared" : "symbol";
|
||||
log_debug(cds)("Expanding %s spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]",
|
||||
which, commit, vs->actual_committed_size(), vs->high());
|
||||
}
|
||||
|
||||
void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) {
|
||||
@ -506,6 +536,10 @@ uintx MetaspaceShared::object_delta_uintx(void* obj) {
|
||||
// is run at a safepoint just before exit, this is the entire set of classes.
|
||||
static GrowableArray<Klass*>* _global_klass_objects;
|
||||
|
||||
static int global_klass_compare(Klass** a, Klass **b) {
|
||||
return a[0]->name()->fast_compare(b[0]->name());
|
||||
}
|
||||
|
||||
GrowableArray<Klass*>* MetaspaceShared::collected_klasses() {
|
||||
return _global_klass_objects;
|
||||
}
|
||||
@ -1331,7 +1365,14 @@ public:
|
||||
RefRelocator ext_reloc;
|
||||
iterate_roots(&ext_reloc);
|
||||
}
|
||||
|
||||
{
|
||||
log_info(cds)("Fixing symbol identity hash ... ");
|
||||
os::init_random(0x12345678);
|
||||
GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
|
||||
for (int i=0; i<symbols->length(); i++) {
|
||||
symbols->at(i)->update_identity_hash();
|
||||
}
|
||||
}
|
||||
#ifdef ASSERT
|
||||
{
|
||||
log_info(cds)("Verifying external roots ... ");
|
||||
@ -1364,6 +1405,21 @@ public:
|
||||
}
|
||||
|
||||
static void iterate_roots(MetaspaceClosure* it) {
|
||||
// To ensure deterministic contents in the archive, we just need to ensure that
|
||||
// we iterate the MetsapceObjs in a deterministic order. It doesn't matter where
|
||||
// the MetsapceObjs are located originally, as they are copied sequentially into
|
||||
// the archive during the iteration.
|
||||
//
|
||||
// The only issue here is that the symbol table and the system directories may be
|
||||
// randomly ordered, so we copy the symbols and klasses into two arrays and sort
|
||||
// them deterministically.
|
||||
//
|
||||
// During -Xshare:dump, the order of Symbol creation is strictly determined by
|
||||
// the SharedClassListFile (class loading is done in a single thread and the JIT
|
||||
// is disabled). Also, Symbols are allocated in monotonically increasing addresses
|
||||
// (see Symbol::operator new(size_t, int)). So if we iterate the Symbols by
|
||||
// ascending address order, we ensure that all Symbols are copied into deterministic
|
||||
// locations in the archive.
|
||||
GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols();
|
||||
for (int i=0; i<symbols->length(); i++) {
|
||||
it->push(symbols->adr_at(i));
|
||||
@ -1525,6 +1581,7 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
_global_klass_objects = new GrowableArray<Klass*>(1000);
|
||||
CollectClassesClosure collect_classes;
|
||||
ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
|
||||
_global_klass_objects->sort(global_klass_compare);
|
||||
|
||||
print_class_stats();
|
||||
|
||||
@ -1558,8 +1615,10 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
_ro_region.pack();
|
||||
|
||||
// The vtable clones contain addresses of the current process.
|
||||
// We don't want to write these addresses into the archive.
|
||||
// We don't want to write these addresses into the archive. Same for i2i buffer.
|
||||
MetaspaceShared::zero_cpp_vtable_clones_for_writing();
|
||||
memset(MetaspaceShared::i2i_entry_code_buffers(), 0,
|
||||
MetaspaceShared::i2i_entry_code_buffers_size());
|
||||
|
||||
// relocate the data so that it can be mapped to Arguments::default_SharedBaseAddress()
|
||||
// without runtime relocation.
|
||||
@ -1631,7 +1690,7 @@ void VM_PopulateDumpSharedSpace::print_region_stats(FileMapInfo *map_info) {
|
||||
_mc_region.print(total_reserved);
|
||||
_rw_region.print(total_reserved);
|
||||
_ro_region.print(total_reserved);
|
||||
print_bitmap_region_stats(bitmap_reserved, total_reserved);
|
||||
print_bitmap_region_stats(bitmap_used, total_reserved);
|
||||
print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
|
||||
print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
|
||||
|
||||
@ -1640,8 +1699,8 @@ void VM_PopulateDumpSharedSpace::print_region_stats(FileMapInfo *map_info) {
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) {
|
||||
log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
|
||||
size, size/double(total_size)*100.0, size, p2i(NULL));
|
||||
log_debug(cds)("bm space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used]",
|
||||
size, size/double(total_size)*100.0, size);
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
|
||||
|
@ -63,6 +63,8 @@ private:
|
||||
char* _top;
|
||||
char* _end;
|
||||
bool _is_packed;
|
||||
ReservedSpace* _rs;
|
||||
VirtualSpace* _vs;
|
||||
|
||||
public:
|
||||
DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
|
||||
@ -85,19 +87,7 @@ public:
|
||||
void print(size_t total_bytes) const;
|
||||
void print_out_of_space_msg(const char* failing_region, size_t needed_bytes);
|
||||
|
||||
void init(const ReservedSpace* rs, char* base) {
|
||||
if (base == NULL) {
|
||||
base = rs->base();
|
||||
}
|
||||
assert(rs->contains(base), "must be");
|
||||
_base = _top = base;
|
||||
_end = rs->end();
|
||||
}
|
||||
void init(char* b, char* t, char* e) {
|
||||
_base = b;
|
||||
_top = t;
|
||||
_end = e;
|
||||
}
|
||||
void init(ReservedSpace* rs, VirtualSpace* vs);
|
||||
|
||||
void pack(DumpRegion* next = NULL);
|
||||
|
||||
@ -178,6 +168,8 @@ class MetaspaceShared : AllStatic {
|
||||
// CDS support
|
||||
static ReservedSpace _shared_rs;
|
||||
static VirtualSpace _shared_vs;
|
||||
static ReservedSpace _symbol_rs;
|
||||
static VirtualSpace _symbol_vs;
|
||||
static int _max_alignment;
|
||||
static MetaspaceSharedStats _stats;
|
||||
static bool _has_error_classes;
|
||||
@ -222,11 +214,15 @@ class MetaspaceShared : AllStatic {
|
||||
NOT_CDS(return NULL);
|
||||
}
|
||||
|
||||
static Symbol* symbol_rs_base() {
|
||||
return (Symbol*)_symbol_rs.base();
|
||||
}
|
||||
|
||||
static void set_shared_rs(ReservedSpace rs) {
|
||||
CDS_ONLY(_shared_rs = rs);
|
||||
}
|
||||
|
||||
static void commit_shared_space_to(char* newtop) NOT_CDS_RETURN;
|
||||
static void commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) NOT_CDS_RETURN;
|
||||
static void initialize_dumptime_shared_and_meta_spaces() NOT_CDS_RETURN;
|
||||
static void initialize_runtime_shared_and_meta_spaces() NOT_CDS_RETURN;
|
||||
static void post_initialize(TRAPS) NOT_CDS_RETURN;
|
||||
@ -302,7 +298,7 @@ class MetaspaceShared : AllStatic {
|
||||
#if INCLUDE_CDS
|
||||
static ReservedSpace reserve_shared_space(size_t size, char* requested_address = NULL);
|
||||
static size_t reserved_space_alignment();
|
||||
static void init_shared_dump_space(DumpRegion* first_space, address first_space_bottom = NULL);
|
||||
static void init_shared_dump_space(DumpRegion* first_space);
|
||||
static DumpRegion* misc_code_dump_space();
|
||||
static DumpRegion* read_write_dump_space();
|
||||
static DumpRegion* read_only_dump_space();
|
||||
@ -312,7 +308,10 @@ class MetaspaceShared : AllStatic {
|
||||
static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik);
|
||||
#endif
|
||||
|
||||
// Allocate a block of memory from the "mc", "ro", or "rw" regions.
|
||||
// Allocate a block of memory from the temporary "symbol" region.
|
||||
static char* symbol_space_alloc(size_t num_bytes);
|
||||
|
||||
// Allocate a block of memory from the "mc" or "ro" regions.
|
||||
static char* misc_code_space_alloc(size_t num_bytes);
|
||||
static char* read_only_space_alloc(size_t num_bytes);
|
||||
|
||||
|
@ -505,10 +505,6 @@ InstanceKlass::InstanceKlass(const ClassFileParser& parser, unsigned kind, Klass
|
||||
assert(is_instance_klass(), "is layout incorrect?");
|
||||
assert(size_helper() == parser.layout_size(), "incorrect size_helper?");
|
||||
|
||||
if (Arguments::is_dumping_archive()) {
|
||||
SystemDictionaryShared::init_dumptime_info(this);
|
||||
}
|
||||
|
||||
// Set biased locking bit for all instances of this class; it will be
|
||||
// cleared if revocation occurs too often for this type
|
||||
if (UseBiasedLocking && BiasedLocking::enabled()) {
|
||||
|
@ -958,6 +958,7 @@ public:
|
||||
}
|
||||
// allocation
|
||||
instanceOop allocate_instance(TRAPS);
|
||||
static instanceOop allocate_instance(oop cls, TRAPS);
|
||||
|
||||
// additional member function to return a handle
|
||||
instanceHandle allocate_instance_handle(TRAPS);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define SHARE_OOPS_INSTANCEKLASS_INLINE_HPP
|
||||
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -157,4 +158,16 @@ ALWAYSINLINE void InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType
|
||||
oop_oop_iterate_oop_maps_bounded<T>(obj, closure, mr);
|
||||
}
|
||||
|
||||
inline instanceOop InstanceKlass::allocate_instance(oop java_class, TRAPS) {
|
||||
Klass* k = java_lang_Class::as_Klass(java_class);
|
||||
if (k == NULL) {
|
||||
ResourceMark rm(THREAD);
|
||||
THROW_(vmSymbols::java_lang_InstantiationException(), NULL);
|
||||
}
|
||||
InstanceKlass* ik = cast(k);
|
||||
ik->check_valid_for_instantiation(false, CHECK_NULL);
|
||||
ik->initialize(CHECK_NULL);
|
||||
return ik->allocate_instance(THREAD);
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_INSTANCEKLASS_INLINE_HPP
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/moduleEntry.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
@ -79,6 +80,10 @@ void Klass::set_is_cloneable() {
|
||||
void Klass::set_name(Symbol* n) {
|
||||
_name = n;
|
||||
if (_name != NULL) _name->increment_refcount();
|
||||
|
||||
if (Arguments::is_dumping_archive() && is_instance_klass()) {
|
||||
SystemDictionaryShared::init_dumptime_info(InstanceKlass::cast(this));
|
||||
}
|
||||
}
|
||||
|
||||
bool Klass::is_subclass_of(const Klass* k) const {
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
@ -56,6 +57,22 @@ Symbol::Symbol(const u1* name, int length, int refcount) {
|
||||
}
|
||||
|
||||
void* Symbol::operator new(size_t sz, int len) throw() {
|
||||
#if INCLUDE_CDS
|
||||
if (DumpSharedSpaces) {
|
||||
// To get deterministic output from -Xshare:dump, we ensure that Symbols are allocated in
|
||||
// increasing addresses. When the symbols are copied into the archive, we preserve their
|
||||
// relative address order (see SortedSymbolClosure in metaspaceShared.cpp)
|
||||
//
|
||||
// We cannot use arena because arena chunks are allocated by the OS. As a result, for example,
|
||||
// the archived symbol of "java/lang/Object" may sometimes be lower than "java/lang/String", and
|
||||
// sometimes be higher. This would cause non-deterministic contents in the archive.
|
||||
DEBUG_ONLY(static void* last = 0);
|
||||
void* p = (void*)MetaspaceShared::symbol_space_alloc(size(len)*wordSize);
|
||||
assert(p > last, "must increase monotonically");
|
||||
DEBUG_ONLY(last = p);
|
||||
return p;
|
||||
}
|
||||
#endif
|
||||
int alloc_size = size(len)*wordSize;
|
||||
address res = (address) AllocateHeap(alloc_size, mtSymbol);
|
||||
return res;
|
||||
@ -72,11 +89,21 @@ void Symbol::operator delete(void *p) {
|
||||
FreeHeap(p);
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void Symbol::update_identity_hash() {
|
||||
// This is called at a safepoint during dumping of a static CDS archive. The caller should have
|
||||
// called os::init_random() with a deterministic seed and then iterate all archived Symbols in
|
||||
// a deterministic order.
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
||||
_hash_and_refcount = pack_hash_and_refcount((short)os::random(), PERM_REFCOUNT);
|
||||
}
|
||||
|
||||
void Symbol::set_permanent() {
|
||||
// This is called at a safepoint during dumping of a dynamic CDS archive.
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
||||
_hash_and_refcount = pack_hash_and_refcount(extract_hash(_hash_and_refcount), PERM_REFCOUNT);
|
||||
}
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Symbol::index_of
|
||||
|
@ -168,7 +168,8 @@ class Symbol : public MetaspaceObj {
|
||||
bool is_permanent() const {
|
||||
return (refcount() == PERM_REFCOUNT);
|
||||
}
|
||||
void set_permanent();
|
||||
void update_identity_hash() NOT_CDS_RETURN;
|
||||
void set_permanent() NOT_CDS_RETURN;
|
||||
void make_permanent();
|
||||
|
||||
// Function char_at() returns the Symbol's selected u1 byte as a char type.
|
||||
|
@ -2478,7 +2478,9 @@ uint Compile::compute_truth_table(Unique_Node_List& partition, Unique_Node_List&
|
||||
bool Compile::compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_Node_List& inputs) {
|
||||
assert(partition.size() == 0, "not empty");
|
||||
assert(inputs.size() == 0, "not empty");
|
||||
assert(!is_vector_ternary_bitwise_op(n), "not supported");
|
||||
if (is_vector_ternary_bitwise_op(n)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool is_unary_op = is_vector_unary_bitwise_op(n);
|
||||
if (is_unary_op) {
|
||||
@ -2520,6 +2522,7 @@ bool Compile::compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_No
|
||||
(inputs.size() == 2 || inputs.size() == 3);
|
||||
}
|
||||
|
||||
|
||||
void Compile::process_logic_cone_root(PhaseIterGVN &igvn, Node *n, VectorSet &visited) {
|
||||
assert(is_vector_bitwise_op(n), "not a root");
|
||||
|
||||
|
@ -1594,6 +1594,11 @@ Node* find_node(Node* n, int idx) {
|
||||
return n->find(idx);
|
||||
}
|
||||
|
||||
// call this from debugger with root node as default:
|
||||
Node* find_node(int idx) {
|
||||
return Compile::current()->root()->find(idx);
|
||||
}
|
||||
|
||||
//------------------------------find-------------------------------------------
|
||||
Node* Node::find(int idx) const {
|
||||
ResourceArea *area = Thread::current()->resource_area();
|
||||
|
@ -49,7 +49,7 @@
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/arrayOop.inline.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceOop.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "oops/method.hpp"
|
||||
@ -1064,19 +1064,6 @@ static void jni_invoke_nonstatic(JNIEnv *env, JavaValue* result, jobject receive
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static instanceOop alloc_object(jclass clazz, TRAPS) {
|
||||
Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz));
|
||||
if (k == NULL) {
|
||||
ResourceMark rm(THREAD);
|
||||
THROW_(vmSymbols::java_lang_InstantiationException(), NULL);
|
||||
}
|
||||
k->check_valid_for_instantiation(false, CHECK_NULL);
|
||||
k->initialize(CHECK_NULL);
|
||||
instanceOop ih = InstanceKlass::cast(k)->allocate_instance(THREAD);
|
||||
return ih;
|
||||
}
|
||||
|
||||
DT_RETURN_MARK_DECL(AllocObject, jobject
|
||||
, HOTSPOT_JNI_ALLOCOBJECT_RETURN(_ret_ref));
|
||||
|
||||
@ -1088,7 +1075,7 @@ JNI_ENTRY(jobject, jni_AllocObject(JNIEnv *env, jclass clazz))
|
||||
jobject ret = NULL;
|
||||
DT_RETURN_MARK(AllocObject, jobject, (const jobject&)ret);
|
||||
|
||||
instanceOop i = alloc_object(clazz, CHECK_NULL);
|
||||
instanceOop i = InstanceKlass::allocate_instance(JNIHandles::resolve_non_null(clazz), CHECK_NULL);
|
||||
ret = JNIHandles::make_local(env, i);
|
||||
return ret;
|
||||
JNI_END
|
||||
@ -1104,7 +1091,7 @@ JNI_ENTRY(jobject, jni_NewObjectA(JNIEnv *env, jclass clazz, jmethodID methodID,
|
||||
jobject obj = NULL;
|
||||
DT_RETURN_MARK(NewObjectA, jobject, (const jobject)obj);
|
||||
|
||||
instanceOop i = alloc_object(clazz, CHECK_NULL);
|
||||
instanceOop i = InstanceKlass::allocate_instance(JNIHandles::resolve_non_null(clazz), CHECK_NULL);
|
||||
obj = JNIHandles::make_local(env, i);
|
||||
JavaValue jvalue(T_VOID);
|
||||
JNI_ArgumentPusherArray ap(methodID, args);
|
||||
@ -1124,7 +1111,7 @@ JNI_ENTRY(jobject, jni_NewObjectV(JNIEnv *env, jclass clazz, jmethodID methodID,
|
||||
jobject obj = NULL;
|
||||
DT_RETURN_MARK(NewObjectV, jobject, (const jobject&)obj);
|
||||
|
||||
instanceOop i = alloc_object(clazz, CHECK_NULL);
|
||||
instanceOop i = InstanceKlass::allocate_instance(JNIHandles::resolve_non_null(clazz), CHECK_NULL);
|
||||
obj = JNIHandles::make_local(env, i);
|
||||
JavaValue jvalue(T_VOID);
|
||||
JNI_ArgumentPusherVaArg ap(methodID, args);
|
||||
@ -1144,7 +1131,7 @@ JNI_ENTRY(jobject, jni_NewObject(JNIEnv *env, jclass clazz, jmethodID methodID,
|
||||
jobject obj = NULL;
|
||||
DT_RETURN_MARK(NewObject, jobject, (const jobject&)obj);
|
||||
|
||||
instanceOop i = alloc_object(clazz, CHECK_NULL);
|
||||
instanceOop i = InstanceKlass::allocate_instance(JNIHandles::resolve_non_null(clazz), CHECK_NULL);
|
||||
obj = JNIHandles::make_local(env, i);
|
||||
va_list args;
|
||||
va_start(args, methodID);
|
||||
|
@ -3733,6 +3733,29 @@ JVM_ENTRY(void, JVM_InitializeFromArchive(JNIEnv* env, jclass cls))
|
||||
HeapShared::initialize_from_archived_subgraph(k);
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jlong, JVM_GetRandomSeedForCDSDump())
|
||||
JVMWrapper("JVM_GetRandomSeedForCDSDump");
|
||||
if (DumpSharedSpaces) {
|
||||
const char* release = Abstract_VM_Version::vm_release();
|
||||
const char* dbg_level = Abstract_VM_Version::jdk_debug_level();
|
||||
const char* version = VM_Version::internal_vm_info_string();
|
||||
jlong seed = (jlong)(java_lang_String::hash_code((const jbyte*)release, (int)strlen(release)) ^
|
||||
java_lang_String::hash_code((const jbyte*)dbg_level, (int)strlen(dbg_level)) ^
|
||||
java_lang_String::hash_code((const jbyte*)version, (int)strlen(version)));
|
||||
seed += (jlong)Abstract_VM_Version::vm_major_version();
|
||||
seed += (jlong)Abstract_VM_Version::vm_minor_version();
|
||||
seed += (jlong)Abstract_VM_Version::vm_security_version();
|
||||
seed += (jlong)Abstract_VM_Version::vm_patch_version();
|
||||
if (seed == 0) { // don't let this ever be zero.
|
||||
seed = 0x87654321;
|
||||
}
|
||||
log_debug(cds)("JVM_GetRandomSeedForCDSDump() = " JLONG_FORMAT, seed);
|
||||
return seed;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
JVM_END
|
||||
|
||||
// Returns an array of all live Thread objects (VM internal JavaThreads,
|
||||
// jvmti agent threads, and JNI attaching threads are skipped)
|
||||
// See CR 6404306 regarding JNI attaching threads
|
||||
|
@ -27,12 +27,14 @@
|
||||
#include "jvm.h"
|
||||
#include "classfile/classFileStream.hpp"
|
||||
#include "classfile/classLoader.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/fieldStreams.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/typeArrayOop.inline.hpp"
|
||||
@ -353,8 +355,8 @@ UNSAFE_LEAF(void, Unsafe_FullFence(JNIEnv *env, jobject unsafe)) {
|
||||
////// Allocation requests
|
||||
|
||||
UNSAFE_ENTRY(jobject, Unsafe_AllocateInstance(JNIEnv *env, jobject unsafe, jclass cls)) {
|
||||
ThreadToNativeFromVM ttnfv(thread);
|
||||
return env->AllocObject(cls);
|
||||
instanceOop i = InstanceKlass::allocate_instance(JNIHandles::resolve_non_null(cls), CHECK_NULL);
|
||||
return JNIHandles::make_local(env, i);
|
||||
} UNSAFE_END
|
||||
|
||||
UNSAFE_ENTRY(jlong, Unsafe_AllocateMemory0(JNIEnv *env, jobject unsafe, jlong size)) {
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <new>
|
||||
|
||||
#include "classfile/classLoaderDataGraph.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/modules.hpp"
|
||||
#include "classfile/protectionDomainCache.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
|
@ -522,7 +522,6 @@ static SpecialFlag const special_jvm_flags[] = {
|
||||
{ "UseMembar", JDK_Version::jdk(10), JDK_Version::jdk(12), JDK_Version::undefined() },
|
||||
{ "AllowRedefinitionToAddDeleteMethods", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "FlightRecorder", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||
{ "MonitorBound", JDK_Version::jdk(14), JDK_Version::jdk(15), JDK_Version::jdk(16) },
|
||||
{ "PrintVMQWaitTime", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
|
||||
{ "UseNewFieldLayout", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
|
||||
{ "ForceNUMA", JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
|
||||
@ -550,6 +549,7 @@ static SpecialFlag const special_jvm_flags[] = {
|
||||
{ "UseSSE", JDK_Version::undefined(), JDK_Version::jdk(15), JDK_Version::jdk(16) },
|
||||
#endif // !X86
|
||||
{ "UseAdaptiveGCBoundary", JDK_Version::undefined(), JDK_Version::jdk(15), JDK_Version::jdk(16) },
|
||||
{ "MonitorBound", JDK_Version::jdk(14), JDK_Version::jdk(15), JDK_Version::jdk(16) },
|
||||
|
||||
#ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS
|
||||
// These entries will generate build errors. Their purpose is to test the macros.
|
||||
@ -4158,14 +4158,6 @@ jint Arguments::adjust_after_os() {
|
||||
FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
|
||||
}
|
||||
}
|
||||
// UseNUMAInterleaving is set to ON for all collectors and platforms when
|
||||
// UseNUMA is set to ON. NUMA-aware collectors will interleave old gen and
|
||||
// survivor spaces on top of NUMA allocation policy for the eden space.
|
||||
// Non NUMA-aware collectors will interleave all of the heap spaces across
|
||||
// NUMA nodes.
|
||||
if (FLAG_IS_DEFAULT(UseNUMAInterleaving)) {
|
||||
FLAG_SET_ERGO(UseNUMAInterleaving, true);
|
||||
}
|
||||
}
|
||||
return JNI_OK;
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jvm.h"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
|
@ -426,7 +426,7 @@ void JVMFlag::print_on(outputStream* st, bool withComments, bool printRanges) {
|
||||
// double MinRAMPercentage [ 0.000 ... 100.000 ] {product} {default}
|
||||
// uintx MinSurvivorRatio [ 3 ... 18446744073709551615 ] {product} {default}
|
||||
// size_t MinTLABSize [ 1 ... 9223372036854775807 ] {product} {default}
|
||||
// intx MonitorBound [ 0 ... 2147483647 ] {product} {default}
|
||||
// intx MaxInlineSize [ 0 ... 2147483647 ] {product} {default}
|
||||
// | | | | | |
|
||||
// | | | | | +-- col7
|
||||
// | | | | +-- col6
|
||||
|
@ -691,9 +691,6 @@ const size_t minimumSymbolTableSize = 1024;
|
||||
"Use LWP-based instead of libthread-based synchronization " \
|
||||
"(SPARC only)") \
|
||||
\
|
||||
product(intx, MonitorBound, 0, "(Deprecated) Bound Monitor population") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
experimental(intx, MonitorUsedDeflationThreshold, 90, \
|
||||
"Percentage of used monitors before triggering cleanup " \
|
||||
"safepoint which deflates monitors (0 is off). " \
|
||||
|
@ -120,6 +120,7 @@ Monitor* Notification_lock = NULL;
|
||||
Monitor* PeriodicTask_lock = NULL;
|
||||
Monitor* RedefineClasses_lock = NULL;
|
||||
Mutex* Verify_lock = NULL;
|
||||
Monitor* Zip_lock = NULL;
|
||||
|
||||
#if INCLUDE_JFR
|
||||
Mutex* JfrStacktrace_lock = NULL;
|
||||
@ -309,6 +310,7 @@ void mutex_init() {
|
||||
def(PeriodicTask_lock , PaddedMonitor, nonleaf+5, true, _safepoint_check_always);
|
||||
def(RedefineClasses_lock , PaddedMonitor, nonleaf+5, true, _safepoint_check_always);
|
||||
def(Verify_lock , PaddedMutex, nonleaf+5, true, _safepoint_check_always);
|
||||
def(Zip_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
|
||||
|
||||
if (WhiteBoxAPI) {
|
||||
def(Compilation_lock , PaddedMonitor, leaf, false, _safepoint_check_never);
|
||||
|
@ -115,6 +115,7 @@ extern Monitor* Notification_lock; // a lock used for notification
|
||||
extern Monitor* PeriodicTask_lock; // protects the periodic task structure
|
||||
extern Monitor* RedefineClasses_lock; // locks classes from parallel redefinition
|
||||
extern Mutex* Verify_lock; // synchronize initialization of verify library
|
||||
extern Monitor* Zip_lock; // synchronize initialization of zip library
|
||||
extern Monitor* ThreadsSMRDelete_lock; // Used by ThreadsSMRSupport to take pressure off the Threads_lock
|
||||
extern Mutex* ThreadIdTableCreate_lock; // Used by ThreadIdTable to lazily create the thread id table
|
||||
extern Mutex* SharedDecoder_lock; // serializes access to the decoder during normal (not error reporting) use
|
||||
|
@ -499,10 +499,6 @@ bool SafepointSynchronize::is_cleanup_needed() {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool SafepointSynchronize::is_forced_cleanup_needed() {
|
||||
return ObjectSynchronizer::needs_monitor_scavenge();
|
||||
}
|
||||
|
||||
class ParallelSPCleanupThreadClosure : public ThreadClosure {
|
||||
private:
|
||||
CodeBlobClosure* _nmethod_cl;
|
||||
|
@ -162,7 +162,6 @@ public:
|
||||
static void handle_polling_page_exception(JavaThread *thread);
|
||||
|
||||
static bool is_cleanup_needed();
|
||||
static bool is_forced_cleanup_needed();
|
||||
static void do_cleanup_tasks();
|
||||
|
||||
static void set_is_at_safepoint() { _state = _synchronized; }
|
||||
|
@ -781,7 +781,6 @@ struct SharedGlobals {
|
||||
};
|
||||
|
||||
static SharedGlobals GVars;
|
||||
static int _forceMonitorScavenge = 0; // Scavenge required and pending
|
||||
|
||||
static markWord read_stable_mark(oop obj) {
|
||||
markWord mark = obj->mark();
|
||||
@ -1170,27 +1169,8 @@ static bool monitors_used_above_threshold() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns true if MonitorBound is set (> 0) and if the specified
|
||||
// cnt is > MonitorBound. Otherwise returns false.
|
||||
static bool is_MonitorBound_exceeded(const int cnt) {
|
||||
const int mx = MonitorBound;
|
||||
return mx > 0 && cnt > mx;
|
||||
}
|
||||
|
||||
bool ObjectSynchronizer::is_cleanup_needed() {
|
||||
if (monitors_used_above_threshold()) {
|
||||
// Too many monitors in use.
|
||||
return true;
|
||||
}
|
||||
return needs_monitor_scavenge();
|
||||
}
|
||||
|
||||
bool ObjectSynchronizer::needs_monitor_scavenge() {
|
||||
if (Atomic::load(&_forceMonitorScavenge) == 1) {
|
||||
log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return monitors_used_above_threshold();
|
||||
}
|
||||
|
||||
void ObjectSynchronizer::oops_do(OopClosure* f) {
|
||||
@ -1237,41 +1217,6 @@ void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
|
||||
// -- assigned to an object. The object is inflated and the mark refers
|
||||
// to the ObjectMonitor.
|
||||
|
||||
|
||||
// Constraining monitor pool growth via MonitorBound ...
|
||||
//
|
||||
// If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
|
||||
//
|
||||
// The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
|
||||
// the rate of scavenging is driven primarily by GC. As such, we can find
|
||||
// an inordinate number of monitors in circulation.
|
||||
// To avoid that scenario we can artificially induce a STW safepoint
|
||||
// if the pool appears to be growing past some reasonable bound.
|
||||
// Generally we favor time in space-time tradeoffs, but as there's no
|
||||
// natural back-pressure on the # of extant monitors we need to impose some
|
||||
// type of limit. Beware that if MonitorBound is set to too low a value
|
||||
// we could just loop. In addition, if MonitorBound is set to a low value
|
||||
// we'll incur more safepoints, which are harmful to performance.
|
||||
// See also: GuaranteedSafepointInterval
|
||||
//
|
||||
// If MonitorBound is set, the boundry applies to
|
||||
// (om_list_globals._population - om_list_globals._free_count)
|
||||
// i.e., if there are not enough ObjectMonitors on the global free list,
|
||||
// then a safepoint deflation is induced. Picking a good MonitorBound value
|
||||
// is non-trivial.
|
||||
|
||||
static void InduceScavenge(Thread* self, const char * Whence) {
|
||||
// Induce STW safepoint to trim monitors
|
||||
// Ultimately, this results in a call to deflate_idle_monitors() in the near future.
|
||||
// More precisely, trigger a cleanup safepoint as the number
|
||||
// of active monitors passes the specified threshold.
|
||||
// TODO: assert thread state is reasonable
|
||||
|
||||
if (Atomic::xchg(&_forceMonitorScavenge, 1) == 0) {
|
||||
VMThread::check_for_forced_cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
|
||||
// A large MAXPRIVATE value reduces both list lock contention
|
||||
// and list coherency traffic, but also tends to increase the
|
||||
@ -1315,15 +1260,6 @@ ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
|
||||
}
|
||||
self->om_free_provision += 1 + (self->om_free_provision / 2);
|
||||
if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
|
||||
|
||||
if (is_MonitorBound_exceeded(Atomic::load(&om_list_globals._population) -
|
||||
Atomic::load(&om_list_globals._free_count))) {
|
||||
// Not enough ObjectMonitors on the global free list.
|
||||
// We can't safely induce a STW safepoint from om_alloc() as our thread
|
||||
// state may not be appropriate for such activities and callers may hold
|
||||
// naked oops, so instead we defer the action.
|
||||
InduceScavenge(self, "om_alloc");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -2025,8 +1961,6 @@ void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* co
|
||||
Atomic::load(&om_list_globals._free_count));
|
||||
}
|
||||
|
||||
Atomic::store(&_forceMonitorScavenge, 0); // Reset
|
||||
|
||||
OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
|
||||
OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user