This commit is contained in:
J. Duke 2017-07-05 19:59:00 +02:00
commit 95e9d4885f
574 changed files with 12768 additions and 22391 deletions

View File

@ -270,3 +270,4 @@ ee4fd72b2ec3d92497f37163352f294aa695c6fb jdk9-b20
aefd8899a8d6615fb34ba99b2e38996a7145baa8 jdk9-b25
d3ec8d048e6c3c46b6e0ee011cc551ad386dfba5 jdk9-b26
ba5645f2735b41ed085d07ba20fa7b322afff318 jdk9-b27
ea2f7981236f3812436958748ab3d26e80a35130 jdk9-b28

View File

@ -136,10 +136,12 @@ help:
$(info . make docs # Create all docs)
$(info . make docs-javadoc # Create just javadocs, depends on less than full docs)
$(info . make profiles # Create complete j2re compact profile images)
$(info . make bootcycle-images # Build images twice, second time with newly build JDK)
$(info . make bootcycle-images # Build images twice, second time with newly built JDK)
$(info . make install # Install the generated images locally)
$(info . make clean # Remove all files generated by make, but not those)
$(info . # generated by configure)
$(info . # generated by configure. Do not run clean and other)
$(info . # targets together as that might behave in an)
$(info . # unexpected way.)
$(info . make dist-clean # Remove all files, including configuration)
$(info . make help # Give some help on using make)
$(info . make test # Run tests, default is all tests (see TEST below))

View File

@ -849,7 +849,12 @@ AC_DEFUN([BASIC_CHECK_FIND_DELETE],
if test -f $DELETEDIR/TestIfFindSupportsDelete; then
# No, it does not.
rm $DELETEDIR/TestIfFindSupportsDelete
FIND_DELETE="-exec rm \{\} \+"
if test "x$OPENJDK_TARGET_OS" = "xaix"; then
# AIX 'find' is buggy if called with '-exec {} \+' and an empty file list
FIND_DELETE="-print | xargs rm"
else
FIND_DELETE="-exec rm \{\} \+"
fi
AC_MSG_RESULT([no])
else
AC_MSG_RESULT([yes])

View File

@ -370,18 +370,27 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
# Maximum amount of heap memory.
# Maximum stack size.
JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
if test "x$BUILD_NUM_BITS" = x32; then
JVM_MAX_HEAP=1100M
if test "$JVM_MAX_HEAP" -gt "1100"; then
JVM_MAX_HEAP=1100
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=768
else
# Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
# pointers are used. Apparently, we need to increase the heap and stack
# space for the jvm. More specifically, when running javac to build huge
# jdk batch
JVM_MAX_HEAP=1600M
if test "$JVM_MAX_HEAP" -gt "1600"; then
JVM_MAX_HEAP=1600
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=1536
fi
ADD_JVM_ARG_IF_OK([-Xmx$JVM_MAX_HEAP],boot_jdk_jvmargs_big,[$JAVA])
ADD_JVM_ARG_IF_OK([-Xmx${JVM_MAX_HEAP}M],boot_jdk_jvmargs_big,[$JAVA])
ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs_big,[$JAVA])
AC_MSG_RESULT([$boot_jdk_jvmargs_big])

View File

@ -131,8 +131,8 @@ AC_DEFUN_ONCE([BPERF_SETUP_BUILD_JOBS],
if test "x$with_jobs" = x; then
# Number of jobs was not specified, calculate.
AC_MSG_CHECKING([for appropriate number of jobs to run in parallel])
# Approximate memory in GB, rounding up a bit.
memory_gb=`expr $MEMORY_SIZE / 1100`
# Approximate memory in GB.
memory_gb=`expr $MEMORY_SIZE / 1024`
# Pick the lowest of memory in gb and number of cores.
if test "$memory_gb" -lt "$NUM_CORES"; then
JOBS="$memory_gb"
@ -291,16 +291,11 @@ AC_DEFUN_ONCE([BPERF_SETUP_SMART_JAVAC],
AC_MSG_ERROR([Could not execute server java: $SJAVAC_SERVER_JAVA])
fi
else
SJAVAC_SERVER_JAVA=""
# Hotspot specific options.
ADD_JVM_ARG_IF_OK([-verbosegc],SJAVAC_SERVER_JAVA,[$JAVA])
# JRockit specific options.
ADD_JVM_ARG_IF_OK([-Xverbose:gc],SJAVAC_SERVER_JAVA,[$JAVA])
SJAVAC_SERVER_JAVA="$JAVA $SJAVAC_SERVER_JAVA"
SJAVAC_SERVER_JAVA="$JAVA"
fi
AC_SUBST(SJAVAC_SERVER_JAVA)
if test "$MEMORY_SIZE" -gt "2500"; then
if test "$MEMORY_SIZE" -gt "3000"; then
ADD_JVM_ARG_IF_OK([-d64],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
if test "$JVM_ARG_OK" = true; then
JVM_64BIT=true
@ -308,34 +303,33 @@ AC_DEFUN_ONCE([BPERF_SETUP_SMART_JAVAC],
fi
fi
MX_VALUE=`expr $MEMORY_SIZE / 2`
if test "$JVM_64BIT" = true; then
if test "$MEMORY_SIZE" -gt "17000"; then
ADD_JVM_ARG_IF_OK([-Xms10G -Xmx10G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
# Set ms lower than mx since more than one instance of the server might
# get launched at the same time before they figure out which instance won.
MS_VALUE=512
if test "$MX_VALUE" -gt "2048"; then
MX_VALUE=2048
fi
if test "$MEMORY_SIZE" -gt "10000" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms6G -Xmx6G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$MEMORY_SIZE" -gt "5000" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms1G -Xmx3G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$MEMORY_SIZE" -gt "3800" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms1G -Xmx2500M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
else
MS_VALUE=256
if test "$MX_VALUE" -gt "1500"; then
MX_VALUE=1500
fi
fi
if test "$MEMORY_SIZE" -gt "2500" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms1000M -Xmx1500M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$MEMORY_SIZE" -gt "1000" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms400M -Xmx1100M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms256M -Xmx512M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
if test "$MX_VALUE" -lt "512"; then
MX_VALUE=512
fi
ADD_JVM_ARG_IF_OK([-Xms${MS_VALUE}M -Xmx${MX_VALUE}M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
AC_MSG_CHECKING([whether to use sjavac])
AC_ARG_ENABLE([sjavac], [AS_HELP_STRING([--enable-sjavac],
[use sjavac to do fast incremental compiles @<:@disabled@:>@])],
[ENABLE_SJAVAC="${enableval}"], [ENABLE_SJAVAC='no'])
if test "x$JVM_ARG_OK" = "xfalse"; then
AC_MSG_WARN([Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac])
ENABLE_SJAVAC=no;
fi
AC_MSG_CHECKING([whether to use sjavac])
AC_MSG_RESULT([$ENABLE_SJAVAC])
AC_SUBST(ENABLE_SJAVAC)

View File

@ -142,7 +142,6 @@ JDKOPT_SETUP_JDK_VERSION_NUMBERS
###############################################################################
BOOTJDK_SETUP_BOOT_JDK
BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS
###############################################################################
#
@ -233,6 +232,9 @@ BPERF_SETUP_BUILD_CORES
BPERF_SETUP_BUILD_MEMORY
BPERF_SETUP_BUILD_JOBS
# Setup arguments for the boot jdk (after cores and memory have been setup)
BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS
# Setup smart javac (after cores and memory have been setup)
BPERF_SETUP_SMART_JAVAC

View File

@ -342,17 +342,15 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
# no adjustment
;;
fastdebug )
# Add compile time bounds checks.
CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
# no adjustment
;;
slowdebug )
# Add runtime bounds checks and symbol info.
CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
# Add runtime stack smashing and undefined behavior checks
CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then
CFLAGS_DEBUG_OPTIONS="$CFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIsOR_FLAG"
fi
;;
esac

View File

@ -634,6 +634,10 @@ USE_PRECOMPILED_HEADER
SJAVAC_SERVER_DIR
ENABLE_SJAVAC
SJAVAC_SERVER_JAVA
JAVA_TOOL_FLAGS_SMALL
JAVA_FLAGS_SMALL
JAVA_FLAGS_BIG
JAVA_FLAGS
JOBS
MEMORY_SIZE
NUM_CORES
@ -805,10 +809,6 @@ JAXWS_TOPDIR
JAXP_TOPDIR
CORBA_TOPDIR
LANGTOOLS_TOPDIR
JAVA_TOOL_FLAGS_SMALL
JAVA_FLAGS_SMALL
JAVA_FLAGS_BIG
JAVA_FLAGS
JAVAC_FLAGS
BOOT_JDK_SOURCETARGET
JARSIGNER
@ -1064,7 +1064,6 @@ with_update_version
with_user_release_suffix
with_build_number
with_boot_jdk
with_boot_jdk_jvmargs
with_add_source_root
with_override_source_root
with_adds_and_overrides
@ -1106,6 +1105,7 @@ with_dxsdk_include
with_num_cores
with_memory_size
with_jobs
with_boot_jdk_jvmargs
with_sjavac_server_java
enable_sjavac
enable_precompiled_headers
@ -1904,10 +1904,6 @@ Optional Packages:
number is not set.[username_builddateb00]
--with-build-number Set build number value for build [b00]
--with-boot-jdk path to Boot JDK (used to bootstrap build) [probed]
--with-boot-jdk-jvmargs specify JVM arguments to be passed to all java
invocations of boot JDK, overriding the default
values, e.g --with-boot-jdk-jvmargs="-Xmx8G
-enableassertions"
--with-add-source-root for each and every source directory, look in this
additional source root for the same directory; if it
exists and have files in it, include it in the build
@ -1979,6 +1975,10 @@ Optional Packages:
--with-memory-size=1024 [probed]
--with-jobs number of parallel jobs to let make run [calculated
based on cores and memory]
--with-boot-jdk-jvmargs specify JVM arguments to be passed to all java
invocations of boot JDK, overriding the default
values, e.g --with-boot-jdk-jvmargs="-Xmx8G
-enableassertions"
--with-sjavac-server-java
use this java binary for running the sjavac
background server [Boot JDK java]
@ -4321,7 +4321,7 @@ TOOLCHAIN_DESCRIPTION_xlc="IBM XL C/C++"
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1408448519
DATE_WHEN_GENERATED=1409311712
###############################################################################
#
@ -17284,7 +17284,12 @@ $as_echo_n "checking if find supports -delete... " >&6; }
if test -f $DELETEDIR/TestIfFindSupportsDelete; then
# No, it does not.
rm $DELETEDIR/TestIfFindSupportsDelete
FIND_DELETE="-exec rm \{\} \+"
if test "x$OPENJDK_TARGET_OS" = "xaix"; then
# AIX 'find' is buggy if called with '-exec {} \+' and an empty file list
FIND_DELETE="-print | xargs rm"
else
FIND_DELETE="-exec rm \{\} \+"
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
else
@ -26315,197 +26320,6 @@ $as_echo "$tool_specified" >&6; }
##############################################################################
#
# Specify jvm options for anything that is run with the Boot JDK.
# Not all JVM:s accept the same arguments on the command line.
#
# Check whether --with-boot-jdk-jvmargs was given.
if test "${with_boot_jdk_jvmargs+set}" = set; then :
withval=$with_boot_jdk_jvmargs;
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command " >&5
$as_echo_n "checking flags for boot jdk java command ... " >&6; }
# Disable special log output when a debug build is used as Boot JDK...
$ECHO "Check if jvm arg is ok: -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput" >&5
$ECHO "Command: $JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version" >&5
OUTPUT=`$JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs="$boot_jdk_jvmargs -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
# Apply user provided options.
$ECHO "Check if jvm arg is ok: $with_boot_jdk_jvmargs" >&5
$ECHO "Command: $JAVA $with_boot_jdk_jvmargs -version" >&5
OUTPUT=`$JAVA $with_boot_jdk_jvmargs -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs="$boot_jdk_jvmargs $with_boot_jdk_jvmargs"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs" >&5
$as_echo "$boot_jdk_jvmargs" >&6; }
# For now, general JAVA_FLAGS are the same as the boot jdk jvmargs
JAVA_FLAGS=$boot_jdk_jvmargs
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for big workloads" >&5
$as_echo_n "checking flags for boot jdk java command for big workloads... " >&6; }
# Starting amount of heap memory.
$ECHO "Check if jvm arg is ok: -Xms64M" >&5
$ECHO "Command: $JAVA -Xms64M -version" >&5
OUTPUT=`$JAVA -Xms64M -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xms64M"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
# Maximum amount of heap memory.
# Maximum stack size.
if test "x$BUILD_NUM_BITS" = x32; then
JVM_MAX_HEAP=1100M
STACK_SIZE=768
else
# Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
# pointers are used. Apparently, we need to increase the heap and stack
# space for the jvm. More specifically, when running javac to build huge
# jdk batch
JVM_MAX_HEAP=1600M
STACK_SIZE=1536
fi
$ECHO "Check if jvm arg is ok: -Xmx$JVM_MAX_HEAP" >&5
$ECHO "Command: $JAVA -Xmx$JVM_MAX_HEAP -version" >&5
OUTPUT=`$JAVA -Xmx$JVM_MAX_HEAP -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xmx$JVM_MAX_HEAP"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
$ECHO "Check if jvm arg is ok: -XX:ThreadStackSize=$STACK_SIZE" >&5
$ECHO "Command: $JAVA -XX:ThreadStackSize=$STACK_SIZE -version" >&5
OUTPUT=`$JAVA -XX:ThreadStackSize=$STACK_SIZE -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -XX:ThreadStackSize=$STACK_SIZE"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_big" >&5
$as_echo "$boot_jdk_jvmargs_big" >&6; }
JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for small workloads" >&5
$as_echo_n "checking flags for boot jdk java command for small workloads... " >&6; }
# Use serial gc for small short lived tools if possible
$ECHO "Check if jvm arg is ok: -XX:+UseSerialGC" >&5
$ECHO "Command: $JAVA -XX:+UseSerialGC -version" >&5
OUTPUT=`$JAVA -XX:+UseSerialGC -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -XX:+UseSerialGC"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
$ECHO "Check if jvm arg is ok: -Xms32M" >&5
$ECHO "Command: $JAVA -Xms32M -version" >&5
OUTPUT=`$JAVA -Xms32M -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xms32M"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
$ECHO "Check if jvm arg is ok: -Xmx512M" >&5
$ECHO "Command: $JAVA -Xmx512M -version" >&5
OUTPUT=`$JAVA -Xmx512M -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xmx512M"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_small" >&5
$as_echo "$boot_jdk_jvmargs_small" >&6; }
JAVA_FLAGS_SMALL=$boot_jdk_jvmargs_small
JAVA_TOOL_FLAGS_SMALL=""
for f in $JAVA_FLAGS_SMALL; do
JAVA_TOOL_FLAGS_SMALL="$JAVA_TOOL_FLAGS_SMALL -J$f"
done
###############################################################################
#
# Configure the sources to use. We can add or override individual directories.
@ -42515,17 +42329,15 @@ $as_echo "$ac_cv_c_bigendian" >&6; }
# no adjustment
;;
fastdebug )
# Add compile time bounds checks.
CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
# no adjustment
;;
slowdebug )
# Add runtime bounds checks and symbol info.
CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
# Add runtime stack smashing and undefined behavior checks
CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then
CFLAGS_DEBUG_OPTIONS="$CFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIsOR_FLAG"
fi
;;
esac
@ -49884,8 +49696,8 @@ fi
# Number of jobs was not specified, calculate.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for appropriate number of jobs to run in parallel" >&5
$as_echo_n "checking for appropriate number of jobs to run in parallel... " >&6; }
# Approximate memory in GB, rounding up a bit.
memory_gb=`expr $MEMORY_SIZE / 1100`
# Approximate memory in GB.
memory_gb=`expr $MEMORY_SIZE / 1024`
# Pick the lowest of memory in gb and number of cores.
if test "$memory_gb" -lt "$NUM_CORES"; then
JOBS="$memory_gb"
@ -49911,6 +49723,208 @@ $as_echo "$JOBS" >&6; }
# Setup arguments for the boot jdk (after cores and memory have been setup)
##############################################################################
#
# Specify jvm options for anything that is run with the Boot JDK.
# Not all JVM:s accept the same arguments on the command line.
#
# Check whether --with-boot-jdk-jvmargs was given.
if test "${with_boot_jdk_jvmargs+set}" = set; then :
withval=$with_boot_jdk_jvmargs;
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command " >&5
$as_echo_n "checking flags for boot jdk java command ... " >&6; }
# Disable special log output when a debug build is used as Boot JDK...
$ECHO "Check if jvm arg is ok: -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput" >&5
$ECHO "Command: $JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version" >&5
OUTPUT=`$JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs="$boot_jdk_jvmargs -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
# Apply user provided options.
$ECHO "Check if jvm arg is ok: $with_boot_jdk_jvmargs" >&5
$ECHO "Command: $JAVA $with_boot_jdk_jvmargs -version" >&5
OUTPUT=`$JAVA $with_boot_jdk_jvmargs -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs="$boot_jdk_jvmargs $with_boot_jdk_jvmargs"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs" >&5
$as_echo "$boot_jdk_jvmargs" >&6; }
# For now, general JAVA_FLAGS are the same as the boot jdk jvmargs
JAVA_FLAGS=$boot_jdk_jvmargs
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for big workloads" >&5
$as_echo_n "checking flags for boot jdk java command for big workloads... " >&6; }
# Starting amount of heap memory.
$ECHO "Check if jvm arg is ok: -Xms64M" >&5
$ECHO "Command: $JAVA -Xms64M -version" >&5
OUTPUT=`$JAVA -Xms64M -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xms64M"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
# Maximum amount of heap memory.
# Maximum stack size.
JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
if test "x$BUILD_NUM_BITS" = x32; then
if test "$JVM_MAX_HEAP" -gt "1100"; then
JVM_MAX_HEAP=1100
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=768
else
# Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
# pointers are used. Apparently, we need to increase the heap and stack
# space for the jvm. More specifically, when running javac to build huge
# jdk batch
if test "$JVM_MAX_HEAP" -gt "1600"; then
JVM_MAX_HEAP=1600
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=1536
fi
$ECHO "Check if jvm arg is ok: -Xmx${JVM_MAX_HEAP}M" >&5
$ECHO "Command: $JAVA -Xmx${JVM_MAX_HEAP}M -version" >&5
OUTPUT=`$JAVA -Xmx${JVM_MAX_HEAP}M -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xmx${JVM_MAX_HEAP}M"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
$ECHO "Check if jvm arg is ok: -XX:ThreadStackSize=$STACK_SIZE" >&5
$ECHO "Command: $JAVA -XX:ThreadStackSize=$STACK_SIZE -version" >&5
OUTPUT=`$JAVA -XX:ThreadStackSize=$STACK_SIZE -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -XX:ThreadStackSize=$STACK_SIZE"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_big" >&5
$as_echo "$boot_jdk_jvmargs_big" >&6; }
JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for small workloads" >&5
$as_echo_n "checking flags for boot jdk java command for small workloads... " >&6; }
# Use serial gc for small short lived tools if possible
$ECHO "Check if jvm arg is ok: -XX:+UseSerialGC" >&5
$ECHO "Command: $JAVA -XX:+UseSerialGC -version" >&5
OUTPUT=`$JAVA -XX:+UseSerialGC -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -XX:+UseSerialGC"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
$ECHO "Check if jvm arg is ok: -Xms32M" >&5
$ECHO "Command: $JAVA -Xms32M -version" >&5
OUTPUT=`$JAVA -Xms32M -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xms32M"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
$ECHO "Check if jvm arg is ok: -Xmx512M" >&5
$ECHO "Command: $JAVA -Xmx512M -version" >&5
OUTPUT=`$JAVA -Xmx512M -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xmx512M"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_small" >&5
$as_echo "$boot_jdk_jvmargs_small" >&6; }
JAVA_FLAGS_SMALL=$boot_jdk_jvmargs_small
JAVA_TOOL_FLAGS_SMALL=""
for f in $JAVA_FLAGS_SMALL; do
JAVA_TOOL_FLAGS_SMALL="$JAVA_TOOL_FLAGS_SMALL -J$f"
done
# Setup smart javac (after cores and memory have been setup)
@ -49927,44 +49941,11 @@ fi
as_fn_error $? "Could not execute server java: $SJAVAC_SERVER_JAVA" "$LINENO" 5
fi
else
SJAVAC_SERVER_JAVA=""
# Hotspot specific options.
$ECHO "Check if jvm arg is ok: -verbosegc" >&5
$ECHO "Command: $JAVA -verbosegc -version" >&5
OUTPUT=`$JAVA -verbosegc -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -verbosegc"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
# JRockit specific options.
$ECHO "Check if jvm arg is ok: -Xverbose:gc" >&5
$ECHO "Command: $JAVA -Xverbose:gc -version" >&5
OUTPUT=`$JAVA -Xverbose:gc -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xverbose:gc"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
SJAVAC_SERVER_JAVA="$JAVA $SJAVAC_SERVER_JAVA"
SJAVAC_SERVER_JAVA="$JAVA"
fi
if test "$MEMORY_SIZE" -gt "2500"; then
if test "$MEMORY_SIZE" -gt "3000"; then
$ECHO "Check if jvm arg is ok: -d64" >&5
$ECHO "Command: $SJAVAC_SERVER_JAVA -d64 -version" >&5
@ -49986,85 +49967,31 @@ fi
fi
fi
MX_VALUE=`expr $MEMORY_SIZE / 2`
if test "$JVM_64BIT" = true; then
if test "$MEMORY_SIZE" -gt "17000"; then
$ECHO "Check if jvm arg is ok: -Xms10G -Xmx10G" >&5
$ECHO "Command: $SJAVAC_SERVER_JAVA -Xms10G -Xmx10G -version" >&5
OUTPUT=`$SJAVAC_SERVER_JAVA -Xms10G -Xmx10G -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms10G -Xmx10G"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
# Set ms lower than mx since more than one instance of the server might
# get launched at the same time before they figure out which instance won.
MS_VALUE=512
if test "$MX_VALUE" -gt "2048"; then
MX_VALUE=2048
fi
if test "$MEMORY_SIZE" -gt "10000" && test "$JVM_ARG_OK" = false; then
$ECHO "Check if jvm arg is ok: -Xms6G -Xmx6G" >&5
$ECHO "Command: $SJAVAC_SERVER_JAVA -Xms6G -Xmx6G -version" >&5
OUTPUT=`$SJAVAC_SERVER_JAVA -Xms6G -Xmx6G -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms6G -Xmx6G"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
fi
if test "$MEMORY_SIZE" -gt "5000" && test "$JVM_ARG_OK" = false; then
$ECHO "Check if jvm arg is ok: -Xms1G -Xmx3G" >&5
$ECHO "Command: $SJAVAC_SERVER_JAVA -Xms1G -Xmx3G -version" >&5
OUTPUT=`$SJAVAC_SERVER_JAVA -Xms1G -Xmx3G -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms1G -Xmx3G"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
fi
if test "$MEMORY_SIZE" -gt "3800" && test "$JVM_ARG_OK" = false; then
$ECHO "Check if jvm arg is ok: -Xms1G -Xmx2500M" >&5
$ECHO "Command: $SJAVAC_SERVER_JAVA -Xms1G -Xmx2500M -version" >&5
OUTPUT=`$SJAVAC_SERVER_JAVA -Xms1G -Xmx2500M -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms1G -Xmx2500M"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
MS_VALUE=256
if test "$MX_VALUE" -gt "1500"; then
MX_VALUE=1500
fi
fi
if test "$MEMORY_SIZE" -gt "2500" && test "$JVM_ARG_OK" = false; then
if test "$MX_VALUE" -lt "512"; then
MX_VALUE=512
fi
$ECHO "Check if jvm arg is ok: -Xms1000M -Xmx1500M" >&5
$ECHO "Command: $SJAVAC_SERVER_JAVA -Xms1000M -Xmx1500M -version" >&5
OUTPUT=`$SJAVAC_SERVER_JAVA -Xms1000M -Xmx1500M -version 2>&1`
$ECHO "Check if jvm arg is ok: -Xms${MS_VALUE}M -Xmx${MX_VALUE}M" >&5
$ECHO "Command: $SJAVAC_SERVER_JAVA -Xms${MS_VALUE}M -Xmx${MX_VALUE}M -version" >&5
OUTPUT=`$SJAVAC_SERVER_JAVA -Xms${MS_VALUE}M -Xmx${MX_VALUE}M -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms1000M -Xmx1500M"
SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms${MS_VALUE}M -Xmx${MX_VALUE}M"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
@ -50072,44 +49999,7 @@ fi
JVM_ARG_OK=false
fi
fi
if test "$MEMORY_SIZE" -gt "1000" && test "$JVM_ARG_OK" = false; then
$ECHO "Check if jvm arg is ok: -Xms400M -Xmx1100M" >&5
$ECHO "Command: $SJAVAC_SERVER_JAVA -Xms400M -Xmx1100M -version" >&5
OUTPUT=`$SJAVAC_SERVER_JAVA -Xms400M -Xmx1100M -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms400M -Xmx1100M"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
fi
if test "$JVM_ARG_OK" = false; then
$ECHO "Check if jvm arg is ok: -Xms256M -Xmx512M" >&5
$ECHO "Command: $SJAVAC_SERVER_JAVA -Xms256M -Xmx512M -version" >&5
OUTPUT=`$SJAVAC_SERVER_JAVA -Xms256M -Xmx512M -version 2>&1`
FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn`
FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""`
if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms256M -Xmx512M"
JVM_ARG_OK=true
else
$ECHO "Arg failed:" >&5
$ECHO "$OUTPUT" >&5
JVM_ARG_OK=false
fi
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use sjavac" >&5
$as_echo_n "checking whether to use sjavac... " >&6; }
# Check whether --enable-sjavac was given.
if test "${enable_sjavac+set}" = set; then :
enableval=$enable_sjavac; ENABLE_SJAVAC="${enableval}"
@ -50117,6 +50007,13 @@ else
ENABLE_SJAVAC='no'
fi
if test "x$JVM_ARG_OK" = "xfalse"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac" >&5
$as_echo "$as_me: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac" >&2;}
ENABLE_SJAVAC=no;
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use sjavac" >&5
$as_echo_n "checking whether to use sjavac... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ENABLE_SJAVAC" >&5
$as_echo "$ENABLE_SJAVAC" >&6; }

View File

@ -77,6 +77,11 @@ do
shift
done
# debug mode
if [ "${HGFOREST_DEBUG:-false}" = "true" ] ; then
global_opts="${global_opts} --debug"
fi
# silence standard output?
if [ ${qflag} = "true" ] ; then
global_opts="${global_opts} -q"
@ -89,14 +94,26 @@ if [ ${vflag} = "true" ] ; then
fi
# Make sure we have a command.
if [ $# -lt 1 -o -z "${1:-}" ] ; then
echo "ERROR: No command to hg supplied!"
usage
if [ ${#} -lt 1 -o -z "${1:-}" ] ; then
echo "ERROR: No command to hg supplied!" > ${status_output}
usage > ${status_output}
fi
command="$1"; shift
# grab command
command="${1}"; shift
if [ ${vflag} = "true" ] ; then
echo "# Mercurial command: ${command}" > ${status_output}
fi
# capture command options and arguments (if any)
command_args="${@:-}"
if [ ${vflag} = "true" ] ; then
echo "# Mercurial command arguments: ${command_args}" > ${status_output}
fi
# Clean out the temporary directory that stores the pid files.
tmp=/tmp/forest.$$
rm -f -r ${tmp}
@ -104,7 +121,8 @@ mkdir -p ${tmp}
if [ "${HGFOREST_DEBUG:-false}" = "true" ] ; then
echo "DEBUG: temp files are in: ${tmp}"
# ignores redirection.
echo "DEBUG: temp files are in: ${tmp}" >&2
fi
# Check if we can use fifos for monitoring sub-process completion.
@ -377,21 +395,33 @@ else
fi
fi
done
if [ ${have_fifos} = "true" ]; then
# done with the fifo
exec 3>&-
fi
fi
# Wait for all subprocesses to complete
wait
# Terminate with exit 0 only if all subprocesses were successful
# Terminate with highest exit code of subprocesses
ec=0
if [ -d ${tmp} ]; then
rcfiles="`(ls -a ${tmp}/*.pid.rc 2> /dev/null) || echo ''`"
for rc in ${rcfiles} ; do
exit_code=`cat ${rc} | tr -d ' \n\r'`
if [ "${exit_code}" != "0" ] ; then
if [ ${exit_code} -gt 1 ]; then
# mercurial exit codes greater than "1" signal errors.
repo="`echo ${rc} | sed -e 's@^'${tmp}'@@' -e 's@/*\([^/]*\)\.pid\.rc$@\1@' -e 's@_@/@g'`"
echo "WARNING: ${repo} exited abnormally (${exit_code})" > ${status_output}
ec=1
fi
if [ ${exit_code} -gt ${ec} ]; then
# assume that larger exit codes are more significant
ec=${exit_code}
fi
fi
done
fi

View File

@ -270,3 +270,4 @@ ddc07abf4307855c0dc904cc5c96cc764023a930 jdk9-b22
da08cca6b97f41b7081a3e176dcb400af6e4bb26 jdk9-b25
6c777df597bbf5abba3488d44c401edfe73c74af jdk9-b26
7e06bf1dcb0907b80ddf59315426ce9ce775e56d jdk9-b27
a00b04ef067e39f50b9a0fea6f1904e35d632a73 jdk9-b28

View File

@ -50,8 +50,9 @@ $(eval $(call SetupJavaCompilation,BUILD_IDLJ, \
INCLUDES := com/sun/tools/corba/se/idl, \
EXCLUDE_FILES := ResourceBundleUtil.java))
# Force the language to english for predictable source code generation.
TOOL_IDLJ_CMD := $(JAVA) -cp $(CORBA_OUTPUTDIR)/idlj_classes \
com.sun.tools.corba.se.idl.toJavaPortable.Compile
-Duser.language=en com.sun.tools.corba.se.idl.toJavaPortable.Compile
################################################################################

View File

@ -67,7 +67,7 @@ if [ "x$hgwhere" = "x" ]; then
error "Could not locate Mercurial command"
fi
hgversion="`hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \([^+]*\).*)\$@\1@p'`"
hgversion="`LANGUAGE=en hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \([^+]*\).*)\$@\1@p'`"
if [ "x${hgversion}" = "x" ] ; then
error "Could not determine Mercurial version of $hgwhere"
fi

View File

@ -430,3 +430,4 @@ dde2d03b0ea46a27650839e3a1d212c7c1f7b4c8 jdk9-b24
6de94e8693240cec8aae11f6b42f43433456a733 jdk9-b25
48b95a073d752d6891cc0d1d2836b321ecf3ce0c jdk9-b26
f95347244306affc32ce3056f27ceff7b2100810 jdk9-b27
657294869d7ff063e055f5492cab7ce5612ca851 jdk9-b28

View File

@ -45,8 +45,8 @@ import sun.jvm.hotspot.types.TypeDataBase;
public class G1CollectedHeap extends SharedHeap {
// HeapRegionSeq _seq;
static private long hrsFieldOffset;
// MemRegion _g1_committed;
static private long g1CommittedFieldOffset;
// MemRegion _g1_reserved;
static private long g1ReservedFieldOffset;
// size_t _summary_bytes_used;
static private CIntegerField summaryBytesUsedField;
// G1MonitoringSupport* _g1mm;
@ -68,7 +68,6 @@ public class G1CollectedHeap extends SharedHeap {
Type type = db.lookupType("G1CollectedHeap");
hrsFieldOffset = type.getField("_hrs").getOffset();
g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
g1mmField = type.getAddressField("_g1mm");
oldSetFieldOffset = type.getField("_old_set").getOffset();
@ -76,9 +75,7 @@ public class G1CollectedHeap extends SharedHeap {
}
public long capacity() {
Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
MemRegion g1Committed = new MemRegion(g1CommittedAddr);
return g1Committed.byteSize();
return hrs().capacity();
}
public long used() {

View File

@ -93,19 +93,35 @@ public class G1HeapRegionTable extends VMObject {
private class HeapRegionIterator implements Iterator<HeapRegion> {
private long index;
private long length;
private HeapRegion next;
public HeapRegion positionToNext() {
HeapRegion result = next;
while (index < length && at(index) == null) {
index++;
}
if (index < length) {
next = at(index);
index++; // restart search at next element
} else {
next = null;
}
return result;
}
@Override
public boolean hasNext() { return index < length; }
public boolean hasNext() { return next != null; }
@Override
public HeapRegion next() { return at(index++); }
public HeapRegion next() { return positionToNext(); }
@Override
public void remove() { /* not supported */ }
public void remove() { /* not supported */ }
HeapRegionIterator(long committedLength) {
HeapRegionIterator(long totalLength) {
index = 0;
length = committedLength;
length = totalLength;
positionToNext();
}
}

View File

@ -43,7 +43,7 @@ public class HeapRegionSeq extends VMObject {
// G1HeapRegionTable _regions
static private long regionsFieldOffset;
// uint _committed_length
static private CIntegerField committedLengthField;
static private CIntegerField numCommittedField;
static {
VM.registerVMInitializedObserver(new Observer() {
@ -57,7 +57,7 @@ public class HeapRegionSeq extends VMObject {
Type type = db.lookupType("HeapRegionSeq");
regionsFieldOffset = type.getField("_regions").getOffset();
committedLengthField = type.getCIntegerField("_committed_length");
numCommittedField = type.getCIntegerField("_num_committed");
}
private G1HeapRegionTable regions() {
@ -66,16 +66,20 @@ public class HeapRegionSeq extends VMObject {
regionsAddr);
}
public long capacity() {
return length() * HeapRegion.grainBytes();
}
public long length() {
return regions().length();
}
public long committedLength() {
return committedLengthField.getValue(addr);
return numCommittedField.getValue(addr);
}
public Iterator<HeapRegion> heapRegionIterator() {
return regions().heapRegionIterator(committedLength());
return regions().heapRegionIterator(length());
}
public HeapRegionSeq(Address addr) {

View File

@ -508,13 +508,9 @@ endif
ifeq ($(USE_CLANG),)
# Enable bounds checking.
# _FORTIFY_SOURCE appears in GCC 4.0+
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
# compile time size bounds checks
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
# and runtime size bounds checks and paranoid stack smashing checks.
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
# stack smashing checks.
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
endif
endif

View File

@ -70,7 +70,8 @@ ifeq ($(INCLUDE_CDS), false)
CXXFLAGS += -DINCLUDE_CDS=0
CFLAGS += -DINCLUDE_CDS=0
Src_Files_EXCLUDE += filemap.cpp metaspaceShared.cpp
Src_Files_EXCLUDE += filemap.cpp metaspaceShared*.cpp sharedPathsMiscInfo.cpp \
systemDictionaryShared.cpp classLoaderExt.cpp sharedClassUtil.cpp
endif
ifeq ($(INCLUDE_ALL_GCS), false)

View File

@ -374,6 +374,7 @@ jprt.make.rule.test.targets.standard = \
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_compiler}, \
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_gc}, \
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime}, \
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime_closed}, \
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_serviceability}
jprt.make.rule.test.targets.embedded = \

View File

@ -365,16 +365,13 @@ endif
ifeq ($(USE_CLANG),)
# Enable bounds checking.
# _FORTIFY_SOURCE appears in GCC 4.0+
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
# compile time size bounds checks
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
# and runtime size bounds checks and paranoid stack smashing checks.
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
# stack smashing checks.
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
endif
endif
# If we are building HEADLESS, pass on to VM
# so it can set the java.awt.headless property
ifdef HEADLESS

View File

@ -240,11 +240,7 @@ ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
endif
# Enable bounds checking.
# _FORTIFY_SOURCE appears in GCC 4.0+
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
# compile time size bounds checks
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
# and runtime size bounds checks and paranoid stack smashing checks.
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
# stack smashing checks.
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
endif

View File

@ -731,7 +731,7 @@ InterpreterFrame *InterpreterFrame::build(Method* const method, TRAPS) {
if (method->is_static())
object = method->constants()->pool_holder()->java_mirror();
else
object = (oop) locals[0];
object = (oop) (void*)locals[0];
monitor->set_obj(object);
}

View File

@ -26,6 +26,8 @@
#ifndef CPU_ZERO_VM_FRAME_ZERO_INLINE_HPP
#define CPU_ZERO_VM_FRAME_ZERO_INLINE_HPP
#include "code/codeCache.hpp"
// Constructors
inline frame::frame() {

View File

@ -2246,7 +2246,7 @@ void os::print_siginfo(outputStream* st, void* siginfo) {
const siginfo_t* si = (const siginfo_t*)siginfo;
os::Posix::print_siginfo_brief(st, si);
#if INCLUDE_CDS
if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
UseSharedSpaces) {
FileMapInfo* mapinfo = FileMapInfo::current_info();
@ -2256,6 +2256,7 @@ void os::print_siginfo(outputStream* st, void* siginfo) {
" possible disk/network problem.");
}
}
#endif
st->cr();
}

View File

@ -66,129 +66,92 @@ static void do_sysinfo(int si, const char* string, int* features, int mask) {
}
int VM_Version::platform_features(int features) {
// getisax(2), SI_ARCHITECTURE_32, and SI_ARCHITECTURE_64 are
// supported on Solaris 10 and later.
if (os::Solaris::supports_getisax()) {
assert(os::Solaris::supports_getisax(), "getisax() must be available");
// Check 32-bit architecture.
do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
// Check 32-bit architecture.
do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
// Check 64-bit architecture.
do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
// Check 64-bit architecture.
do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
// Extract valid instruction set extensions.
uint_t avs[2];
uint_t avn = os::Solaris::getisax(avs, 2);
assert(avn <= 2, "should return two or less av's");
uint_t av = avs[0];
// Extract valid instruction set extensions.
uint_t avs[2];
uint_t avn = os::Solaris::getisax(avs, 2);
assert(avn <= 2, "should return two or less av's");
uint_t av = avs[0];
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print("getisax(2) returned: " PTR32_FORMAT, av);
if (avn > 1) {
tty->print(", " PTR32_FORMAT, avs[1]);
}
tty->cr();
if (PrintMiscellaneous && Verbose) {
tty->print("getisax(2) returned: " PTR32_FORMAT, av);
if (avn > 1) {
tty->print(", " PTR32_FORMAT, avs[1]);
}
tty->cr();
}
#endif
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
if (avn > 1) {
uint_t av2 = avs[1];
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
if (avn > 1) {
uint_t av2 = avs[1];
#ifndef AV2_SPARC_SPARC5
#define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */
#endif
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
}
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
}
// Next values are not defined before Solaris 10
// but Solaris 8 is used for jdk6 update builds.
// We only build on Solaris 10 and up, but some of the values below
// are not defined on all versions of Solaris 10, so we define them,
// if necessary.
#ifndef AV_SPARC_ASI_BLK_INIT
#define AV_SPARC_ASI_BLK_INIT 0x0080 /* ASI_BLK_INIT_xxx ASI */
#endif
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
#ifndef AV_SPARC_FMAF
#define AV_SPARC_FMAF 0x0100 /* Fused Multiply-Add */
#endif
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
#ifndef AV_SPARC_FMAU
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
#endif
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
#ifndef AV_SPARC_VIS3
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
#endif
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
#ifndef AV_SPARC_CBCOND
#define AV_SPARC_CBCOND 0x10000000 /* compare and branch instrs supported */
#endif
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
#ifndef AV_SPARC_AES
#define AV_SPARC_AES 0x00020000 /* aes instrs supported */
#endif
if (av & AV_SPARC_AES) features |= aes_instructions_m;
if (av & AV_SPARC_AES) features |= aes_instructions_m;
#ifndef AV_SPARC_SHA1
#define AV_SPARC_SHA1 0x00400000 /* sha1 instruction supported */
#endif
if (av & AV_SPARC_SHA1) features |= sha1_instruction_m;
if (av & AV_SPARC_SHA1) features |= sha1_instruction_m;
#ifndef AV_SPARC_SHA256
#define AV_SPARC_SHA256 0x00800000 /* sha256 instruction supported */
#endif
if (av & AV_SPARC_SHA256) features |= sha256_instruction_m;
if (av & AV_SPARC_SHA256) features |= sha256_instruction_m;
#ifndef AV_SPARC_SHA512
#define AV_SPARC_SHA512 0x01000000 /* sha512 instruction supported */
#endif
if (av & AV_SPARC_SHA512) features |= sha512_instruction_m;
} else {
// getisax(2) failed, use the old legacy code.
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose)
tty->print_cr("getisax(2) is not supported.");
#endif
char tmp;
size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
char* buf = (char*) os::malloc(bufsize, mtInternal);
if (buf != NULL) {
if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
// Figure out what kind of sparc we have
char *sparc_string = strstr(buf, "sparc");
if (sparc_string != NULL) { features |= v8_instructions_m;
if (sparc_string[5] == 'v') {
if (sparc_string[6] == '8') {
if (sparc_string[7] == '-') { features |= hardware_mul32_m;
features |= hardware_div32_m;
} else if (sparc_string[7] == 'p') features |= generic_v9_m;
else features |= generic_v8_m;
} else if (sparc_string[6] == '9') features |= generic_v9_m;
}
}
// Check for visualization instructions
char *vis = strstr(buf, "vis");
if (vis != NULL) { features |= vis1_instructions_m;
if (vis[3] == '2') features |= vis2_instructions_m;
}
}
os::free(buf);
}
}
if (av & AV_SPARC_SHA512) features |= sha512_instruction_m;
// Determine the machine type.
do_sysinfo(SI_MACHINE, "sun4v", &features, sun4v_m);
@ -203,27 +166,7 @@ int VM_Version::platform_features(int features) {
kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
for (int i = 0; i < ksp->ks_ndata; i++) {
if (strcmp((const char*)&(knm[i].name),"implementation") == 0) {
#ifndef KSTAT_DATA_STRING
#define KSTAT_DATA_STRING 9
#endif
if (knm[i].data_type == KSTAT_DATA_CHAR) {
// VM is running on Solaris 8 which does not have value.str.
implementation = &(knm[i].value.c[0]);
} else if (knm[i].data_type == KSTAT_DATA_STRING) {
// VM is running on Solaris 10.
#ifndef KSTAT_NAMED_STR_PTR
// Solaris 8 was used to build VM, define the structure it misses.
struct str_t {
union {
char *ptr; /* NULL-term string */
char __pad[8]; /* 64-bit padding */
} addr;
uint32_t len; /* # bytes for strlen + '\0' */
};
#define KSTAT_NAMED_STR_PTR(knptr) (( (str_t*)&((knptr)->value) )->addr.ptr)
#endif
implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
}
implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("cpu_info.implementation: %s", implementation);
@ -234,6 +177,7 @@ int VM_Version::platform_features(int features) {
for (int i = 0; impl[i] != 0; i++)
impl[i] = (char)toupper((uint)impl[i]);
if (strstr(impl, "SPARC64") != NULL) {
features |= sparc64_family_m;
} else if (strstr(impl, "SPARC-M") != NULL) {
@ -248,8 +192,10 @@ int VM_Version::platform_features(int features) {
if (strstr(impl, "SPARC") == NULL) {
#ifndef PRODUCT
// kstat on Solaris 8 virtual machines (branded zones)
// returns "(unsupported)" implementation.
warning("kstat cpu_info implementation = '%s', should contain SPARC", impl);
// returns "(unsupported)" implementation. Solaris 8 is not
// supported anymore, but include this check to be on the
// safe side.
warning("kstat cpu_info implementation = '%s', assume generic SPARC", impl);
#endif
implementation = "SPARC";
}

View File

@ -546,13 +546,18 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
// normal bytecode execution.
thread->clear_exception_oop_and_pc();
Handle original_exception(thread, exception());
continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
// If an exception was thrown during exception dispatch, the exception oop may have changed
thread->set_exception_oop(exception());
thread->set_exception_pc(pc);
// the exception cache is used only by non-implicit exceptions
if (continuation != NULL) {
// Update the exception cache only when there didn't happen
// another exception during the computation of the compiled
// exception handler.
if (continuation != NULL && original_exception() == exception()) {
nm->add_handler_for_exception_and_pc(exception, pc, continuation);
}
}

View File

@ -31,6 +31,9 @@
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#if INCLUDE_CDS
#include "classfile/systemDictionaryShared.hpp"
#endif
#include "classfile/verificationType.hpp"
#include "classfile/verifier.hpp"
#include "classfile/vmSymbols.hpp"
@ -60,6 +63,7 @@
#include "services/threadService.hpp"
#include "utilities/array.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
// We generally try to create the oops directly when parsing, rather than
// allocating temporary data structures and copying the bytes twice. A
@ -3786,7 +3790,15 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
instanceKlassHandle nullHandle;
// Figure out whether we can skip format checking (matching classic VM behavior)
_need_verify = Verifier::should_verify_for(class_loader(), verify);
if (DumpSharedSpaces) {
// verify == true means it's a 'remote' class (i.e., non-boot class)
// Verification decision is based on BytecodeVerificationRemote flag
// for those classes.
_need_verify = (verify) ? BytecodeVerificationRemote :
BytecodeVerificationLocal;
} else {
_need_verify = Verifier::should_verify_for(class_loader(), verify);
}
// Set the verify flag in stream
cfs->set_verify(_need_verify);
@ -3805,6 +3817,18 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
u2 minor_version = cfs->get_u2_fast();
u2 major_version = cfs->get_u2_fast();
if (DumpSharedSpaces && major_version < JAVA_1_5_VERSION) {
ResourceMark rm;
warning("Pre JDK 1.5 class not supported by CDS: %u.%u %s",
major_version, minor_version, name->as_C_string());
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_UnsupportedClassVersionError(),
"Unsupported major.minor version for dump time %u.%u",
major_version,
minor_version);
}
// Check version numbers - we check this even with verifier off
if (!is_supported_version(major_version, minor_version)) {
if (name == NULL) {
@ -3912,6 +3936,18 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
if (cfs->source() != NULL) tty->print(" from %s", cfs->source());
tty->print_cr("]");
}
#if INCLUDE_CDS
if (DumpLoadedClassList != NULL && cfs->source() != NULL && classlist_file->is_open()) {
// Only dump the classes that can be stored into CDS archive
if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
if (name != NULL) {
ResourceMark rm(THREAD);
classlist_file->print_cr("%s", name->as_C_string());
classlist_file->flush();
}
}
}
#endif
u2 super_class_index = cfs->get_u2_fast();
instanceKlassHandle super_klass = parse_super_class(super_class_index,

View File

@ -26,8 +26,13 @@
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderExt.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/javaClasses.hpp"
#if INCLUDE_CDS
#include "classfile/sharedPathsMiscInfo.hpp"
#include "classfile/sharedClassUtil.hpp"
#endif
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
@ -35,6 +40,7 @@
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/oopMapCache.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "memory/generation.hpp"
#include "memory/oopFactory.hpp"
#include "memory/universe.inline.hpp"
@ -114,8 +120,12 @@ PerfCounter* ClassLoader::_load_instance_class_failCounter = NULL;
ClassPathEntry* ClassLoader::_first_entry = NULL;
ClassPathEntry* ClassLoader::_last_entry = NULL;
int ClassLoader::_num_entries = 0;
PackageHashtable* ClassLoader::_package_hash_table = NULL;
#if INCLUDE_CDS
SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL;
#endif
// helper routines
bool string_starts_with(const char* str, const char* str_to_find) {
size_t str_len = strlen(str);
@ -194,6 +204,14 @@ ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
// check if file exists
struct stat st;
if (os::stat(path, &st) == 0) {
#if INCLUDE_CDS
if (DumpSharedSpaces) {
// We have already check in ClassLoader::check_shared_classpath() that the directory is empty, so
// we should never find a file underneath it -- unless user has added a new file while we are running
// the dump, in which case let's quit!
ShouldNotReachHere();
}
#endif
// found file, open it
int file_handle = os::open(path, 0, 0);
if (file_handle != -1) {
@ -228,13 +246,13 @@ ClassPathZipEntry::~ClassPathZipEntry() {
FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
}
ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
// enable call to C land
u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {
// enable call to C land
JavaThread* thread = JavaThread::current();
ThreadToNativeFromVM ttn(thread);
// check whether zip archive contains name
jint filesize, name_len;
jzentry* entry = (*FindEntry)(_zip, name, &filesize, &name_len);
jint name_len;
jzentry* entry = (*FindEntry)(_zip, name, filesize, &name_len);
if (entry == NULL) return NULL;
u1* buffer;
char name_buf[128];
@ -245,19 +263,33 @@ ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
filename = NEW_RESOURCE_ARRAY(char, name_len + 1);
}
// file found, get pointer to class in mmaped jar file.
// file found, get pointer to the entry in mmapped jar file.
if (ReadMappedEntry == NULL ||
!(*ReadMappedEntry)(_zip, entry, &buffer, filename)) {
// mmaped access not available, perhaps due to compression,
// mmapped access not available, perhaps due to compression,
// read contents into resource array
buffer = NEW_RESOURCE_ARRAY(u1, filesize);
int size = (*filesize) + ((nul_terminate) ? 1 : 0);
buffer = NEW_RESOURCE_ARRAY(u1, size);
if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL;
}
// return result
if (nul_terminate) {
buffer[*filesize] = 0;
}
return buffer;
}
ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
jint filesize;
u1* buffer = open_entry(name, &filesize, false, CHECK_NULL);
if (buffer == NULL) {
return NULL;
}
if (UsePerfData) {
ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize);
}
// return result
return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated
return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated
}
// invoke function for each entry in the zip file
@ -272,12 +304,13 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
}
}
LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception) : ClassPathEntry() {
_path = os::strdup_check_oom(path);
_st = *st;
_meta_index = NULL;
_resolved_entry = NULL;
_has_error = false;
_throw_exception = throw_exception;
}
LazyClassPathEntry::~LazyClassPathEntry() {
@ -293,7 +326,11 @@ ClassPathEntry* LazyClassPathEntry::resolve_entry(TRAPS) {
return (ClassPathEntry*) _resolved_entry;
}
ClassPathEntry* new_entry = NULL;
new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL);
new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, _throw_exception, CHECK_NULL);
if (!_throw_exception && new_entry == NULL) {
assert(!HAS_PENDING_EXCEPTION, "must be");
return NULL;
}
{
ThreadCritical tc;
if (_resolved_entry == NULL) {
@ -327,6 +364,23 @@ bool LazyClassPathEntry::is_lazy() {
return true;
}
u1* LazyClassPathEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {
if (_has_error) {
return NULL;
}
ClassPathEntry* cpe = resolve_entry(THREAD);
if (cpe == NULL) {
_has_error = true;
return NULL;
} else if (cpe->is_jar_file()) {
return ((ClassPathZipEntry*)cpe)->open_entry(name, filesize, nul_terminate,THREAD);
} else {
ShouldNotReachHere();
*filesize = 0;
return NULL;
}
}
static void print_meta_index(LazyClassPathEntry* entry,
GrowableArray<char*>& meta_packages) {
tty->print("[Meta index for %s=", entry->name());
@ -337,15 +391,62 @@ static void print_meta_index(LazyClassPathEntry* entry,
tty->print_cr("]");
}
#if INCLUDE_CDS
void ClassLoader::exit_with_path_failure(const char* error, const char* message) {
assert(DumpSharedSpaces, "only called at dump time");
tty->print_cr("Hint: enable -XX:+TraceClassPaths to diagnose the failure");
vm_exit_during_initialization(error, message);
}
#endif
void ClassLoader::setup_meta_index() {
void ClassLoader::trace_class_path(const char* msg, const char* name) {
if (!TraceClassPaths) {
return;
}
if (msg) {
tty->print("%s", msg);
}
if (name) {
if (strlen(name) < 256) {
tty->print("%s", name);
} else {
// For very long paths, we need to print each character separately,
// as print_cr() has a length limit
while (name[0] != '\0') {
tty->print("%c", name[0]);
name++;
}
}
}
if (msg && msg[0] == '[') {
tty->print_cr("]");
} else {
tty->cr();
}
}
void ClassLoader::setup_bootstrap_meta_index() {
// Set up meta index which allows us to open boot jars lazily if
// class data sharing is enabled
const char* meta_index_path = Arguments::get_meta_index_path();
const char* meta_index_dir = Arguments::get_meta_index_dir();
setup_meta_index(meta_index_path, meta_index_dir, 0);
}
void ClassLoader::setup_meta_index(const char* meta_index_path, const char* meta_index_dir, int start_index) {
const char* known_version = "% VERSION 2";
char* meta_index_path = Arguments::get_meta_index_path();
char* meta_index_dir = Arguments::get_meta_index_dir();
FILE* file = fopen(meta_index_path, "r");
int line_no = 0;
#if INCLUDE_CDS
if (DumpSharedSpaces) {
if (file != NULL) {
_shared_paths_misc_info->add_required_file(meta_index_path);
} else {
_shared_paths_misc_info->add_nonexist_path(meta_index_path);
}
}
#endif
if (file != NULL) {
ResourceMark rm;
LazyClassPathEntry* cur_entry = NULL;
@ -380,7 +481,7 @@ void ClassLoader::setup_meta_index() {
// Hand off current packages to current lazy entry (if any)
if ((cur_entry != NULL) &&
(boot_class_path_packages.length() > 0)) {
if (TraceClassLoading && Verbose) {
if ((TraceClassLoading || TraceClassPaths) && Verbose) {
print_meta_index(cur_entry, boot_class_path_packages);
}
MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
@ -391,8 +492,10 @@ void ClassLoader::setup_meta_index() {
boot_class_path_packages.clear();
// Find lazy entry corresponding to this jar file
for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next()) {
if (entry->is_lazy() &&
int count = 0;
for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next(), count++) {
if (count >= start_index &&
entry->is_lazy() &&
string_starts_with(entry->name(), meta_index_dir) &&
string_ends_with(entry->name(), &package_name[2])) {
cur_entry = (LazyClassPathEntry*) entry;
@ -429,7 +532,7 @@ void ClassLoader::setup_meta_index() {
// Hand off current packages to current lazy entry (if any)
if ((cur_entry != NULL) &&
(boot_class_path_packages.length() > 0)) {
if (TraceClassLoading && Verbose) {
if ((TraceClassLoading || TraceClassPaths) && Verbose) {
print_meta_index(cur_entry, boot_class_path_packages);
}
MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
@ -440,37 +543,88 @@ void ClassLoader::setup_meta_index() {
}
}
#if INCLUDE_CDS
void ClassLoader::check_shared_classpath(const char *path) {
if (strcmp(path, "") == 0) {
exit_with_path_failure("Cannot have empty path in archived classpaths", NULL);
}
struct stat st;
if (os::stat(path, &st) == 0) {
if ((st.st_mode & S_IFREG) != S_IFREG) { // is directory
if (!os::dir_is_empty(path)) {
tty->print_cr("Error: non-empty directory '%s'", path);
exit_with_path_failure("CDS allows only empty directories in archived classpaths", NULL);
}
}
}
}
#endif
void ClassLoader::setup_bootstrap_search_path() {
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
char* sys_class_path = os::strdup_check_oom(Arguments::get_sysclasspath());
if (TraceClassLoading && Verbose) {
tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
if (!PrintSharedArchiveAndExit) {
trace_class_path("[Bootstrap loader class path=", sys_class_path);
}
#if INCLUDE_CDS
if (DumpSharedSpaces) {
_shared_paths_misc_info->add_boot_classpath(Arguments::get_sysclasspath());
}
#endif
setup_search_path(sys_class_path);
os::free(sys_class_path);
}
int len = (int)strlen(sys_class_path);
#if INCLUDE_CDS
int ClassLoader::get_shared_paths_misc_info_size() {
return _shared_paths_misc_info->get_used_bytes();
}
void* ClassLoader::get_shared_paths_misc_info() {
return _shared_paths_misc_info->buffer();
}
bool ClassLoader::check_shared_paths_misc_info(void *buf, int size) {
SharedPathsMiscInfo* checker = SharedClassUtil::allocate_shared_paths_misc_info((char*)buf, size);
bool result = checker->check();
delete checker;
return result;
}
#endif
void ClassLoader::setup_search_path(char *class_path) {
int offset = 0;
int len = (int)strlen(class_path);
int end = 0;
// Iterate over class path entries
for (int start = 0; start < len; start = end) {
while (sys_class_path[end] && sys_class_path[end] != os::path_separator()[0]) {
while (class_path[end] && class_path[end] != os::path_separator()[0]) {
end++;
}
char* path = NEW_C_HEAP_ARRAY(char, end-start+1, mtClass);
strncpy(path, &sys_class_path[start], end-start);
path[end-start] = '\0';
EXCEPTION_MARK;
ResourceMark rm(THREAD);
char* path = NEW_RESOURCE_ARRAY(char, end - start + 1);
strncpy(path, &class_path[start], end - start);
path[end - start] = '\0';
update_class_path_entry_list(path, false);
FREE_C_HEAP_ARRAY(char, path, mtClass);
while (sys_class_path[end] == os::path_separator()[0]) {
#if INCLUDE_CDS
if (DumpSharedSpaces) {
check_shared_classpath(path);
}
#endif
while (class_path[end] == os::path_separator()[0]) {
end++;
}
}
os::free(sys_class_path);
}
ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st,
bool lazy, bool throw_exception, TRAPS) {
JavaThread* thread = JavaThread::current();
if (lazy) {
return new LazyClassPathEntry(path, st);
return new LazyClassPathEntry(path, st, throw_exception);
}
ClassPathEntry* new_entry = NULL;
if ((st->st_mode & S_IFREG) == S_IFREG) {
@ -479,7 +633,11 @@ ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct st
char canonical_path[JVM_MAXPATHLEN];
if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
// This matches the classic VM
THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
if (throw_exception) {
THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
} else {
return NULL;
}
}
char* error_msg = NULL;
jzfile* zip;
@ -491,7 +649,7 @@ ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct st
}
if (zip != NULL && error_msg == NULL) {
new_entry = new ClassPathZipEntry(zip, path);
if (TraceClassLoading) {
if (TraceClassLoading || TraceClassPaths) {
tty->print_cr("[Opened %s]", path);
}
} else {
@ -505,12 +663,16 @@ ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct st
msg = NEW_RESOURCE_ARRAY(char, len); ;
jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
}
THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
if (throw_exception) {
THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
} else {
return NULL;
}
}
} else {
// Directory
new_entry = new ClassPathDirEntry(path);
if (TraceClassLoading) {
if (TraceClassLoading || TraceClassPaths) {
tty->print_cr("[Path %s]", path);
}
}
@ -571,23 +733,37 @@ void ClassLoader::add_to_list(ClassPathEntry *new_entry) {
_last_entry = new_entry;
}
}
_num_entries ++;
}
void ClassLoader::update_class_path_entry_list(char *path,
bool check_for_duplicates) {
// Returns true IFF the file/dir exists and the entry was successfully created.
bool ClassLoader::update_class_path_entry_list(char *path,
bool check_for_duplicates,
bool throw_exception) {
struct stat st;
if (os::stat(path, &st) == 0) {
// File or directory found
ClassPathEntry* new_entry = NULL;
Thread* THREAD = Thread::current();
new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK);
new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, throw_exception, CHECK_(false));
if (new_entry == NULL) {
return false;
}
// The kernel VM adds dynamically to the end of the classloader path and
// doesn't reorder the bootclasspath which would break java.lang.Package
// (see PackageInfo).
// Add new entry to linked list
if (!check_for_duplicates || !contains_entry(new_entry)) {
add_to_list(new_entry);
ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry);
}
return true;
} else {
#if INCLUDE_CDS
if (DumpSharedSpaces) {
_shared_paths_misc_info->add_nonexist_path(path);
}
return false;
#endif
}
}
@ -739,10 +915,10 @@ public:
assert(n == number_of_entries(), "just checking");
}
void copy_table(char** top, char* end, PackageHashtable* table);
CDS_ONLY(void copy_table(char** top, char* end, PackageHashtable* table);)
};
#if INCLUDE_CDS
void PackageHashtable::copy_table(char** top, char* end,
PackageHashtable* table) {
// Copy (relocate) the table to the shared space.
@ -750,33 +926,30 @@ void PackageHashtable::copy_table(char** top, char* end,
// Calculate the space needed for the package name strings.
int i;
int n = 0;
for (i = 0; i < table_size(); ++i) {
for (PackageInfo* pp = table->bucket(i);
pp != NULL;
pp = pp->next()) {
n += (int)(strlen(pp->pkgname()) + 1);
}
}
if (*top + n + sizeof(intptr_t) >= end) {
report_out_of_shared_space(SharedMiscData);
}
// Copy the table data (the strings) to the shared space.
n = align_size_up(n, sizeof(HeapWord));
*(intptr_t*)(*top) = n;
*top += sizeof(intptr_t);
intptr_t* tableSize = (intptr_t*)(*top);
*top += sizeof(intptr_t); // For table size
char* tableStart = *top;
for (i = 0; i < table_size(); ++i) {
for (PackageInfo* pp = table->bucket(i);
pp != NULL;
pp = pp->next()) {
int n1 = (int)(strlen(pp->pkgname()) + 1);
if (*top + n1 >= end) {
report_out_of_shared_space(SharedMiscData);
}
pp->set_pkgname((char*)memcpy(*top, pp->pkgname(), n1));
*top += n1;
}
}
*top = (char*)align_size_up((intptr_t)*top, sizeof(HeapWord));
if (*top >= end) {
report_out_of_shared_space(SharedMiscData);
}
// Write table size
intptr_t len = *top - (char*)tableStart;
*tableSize = len;
}
@ -787,7 +960,7 @@ void ClassLoader::copy_package_info_buckets(char** top, char* end) {
void ClassLoader::copy_package_info_table(char** top, char* end) {
_package_hash_table->copy_table(top, end, _package_hash_table);
}
#endif
PackageInfo* ClassLoader::lookup_package(const char *pkgname) {
const char *cp = strrchr(pkgname, '/');
@ -880,7 +1053,8 @@ objArrayOop ClassLoader::get_system_packages(TRAPS) {
instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
ResourceMark rm(THREAD);
EventMark m("loading class %s", h_name->as_C_string());
const char* class_name = h_name->as_C_string();
EventMark m("loading class %s", class_name);
ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
stringStream st;
@ -888,18 +1062,24 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
// st.print("%s.class", h_name->as_utf8());
st.print_raw(h_name->as_utf8());
st.print_raw(".class");
char* name = st.as_string();
const char* file_name = st.as_string();
ClassLoaderExt::Context context(class_name, file_name, THREAD);
// Lookup stream for parsing .class file
ClassFileStream* stream = NULL;
int classpath_index = 0;
ClassPathEntry* e = NULL;
instanceKlassHandle h;
{
PerfClassTraceTime vmtimer(perf_sys_class_lookup_time(),
((JavaThread*) THREAD)->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::CLASS_LOAD);
ClassPathEntry* e = _first_entry;
e = _first_entry;
while (e != NULL) {
stream = e->open_stream(name, CHECK_NULL);
stream = e->open_stream(file_name, CHECK_NULL);
if (!context.check(stream, classpath_index)) {
return h; // NULL
}
if (stream != NULL) {
break;
}
@ -908,9 +1088,7 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
}
}
instanceKlassHandle h;
if (stream != NULL) {
// class file found, parse it
ClassFileParser parser(stream);
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
@ -920,12 +1098,19 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
loader_data,
protection_domain,
parsed_name,
false,
CHECK_(h));
// add to package table
if (add_package(name, classpath_index, THREAD)) {
h = result;
context.should_verify(classpath_index),
THREAD);
if (HAS_PENDING_EXCEPTION) {
ResourceMark rm;
if (DumpSharedSpaces) {
tty->print_cr("Preload Error: Failed to load %s", class_name);
}
return h;
}
h = context.record_result(classpath_index, e, result, THREAD);
} else {
if (DumpSharedSpaces) {
tty->print_cr("Preload Error: Cannot find %s", class_name);
}
}
@ -1020,14 +1205,27 @@ void ClassLoader::initialize() {
// lookup zip library entry points
load_zip_library();
#if INCLUDE_CDS
// initialize search path
if (DumpSharedSpaces) {
_shared_paths_misc_info = SharedClassUtil::allocate_shared_paths_misc_info();
}
#endif
setup_bootstrap_search_path();
if (LazyBootClassLoader) {
// set up meta index which makes boot classpath initialization lazier
setup_meta_index();
setup_bootstrap_meta_index();
}
}
#if INCLUDE_CDS
void ClassLoader::initialize_shared_path() {
if (DumpSharedSpaces) {
ClassLoaderExt::setup_search_paths();
_shared_paths_misc_info->write_jint(0); // see comments in SharedPathsMiscInfo::check()
}
}
#endif
jlong ClassLoader::classloader_time_ms() {
return UsePerfData ?

View File

@ -107,6 +107,7 @@ class ClassPathZipEntry: public ClassPathEntry {
const char* name() { return _zip_name; }
ClassPathZipEntry(jzfile* zip, const char* zip_name);
~ClassPathZipEntry();
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
ClassFileStream* open_stream(const char* name, TRAPS);
void contents_do(void f(const char* name, void* context), void* context);
// Debugging
@ -122,13 +123,15 @@ class LazyClassPathEntry: public ClassPathEntry {
struct stat _st;
MetaIndex* _meta_index;
bool _has_error;
bool _throw_exception;
volatile ClassPathEntry* _resolved_entry;
ClassPathEntry* resolve_entry(TRAPS);
public:
bool is_jar_file();
const char* name() { return _path; }
LazyClassPathEntry(char* path, const struct stat* st);
LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception);
virtual ~LazyClassPathEntry();
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
ClassFileStream* open_stream(const char* name, TRAPS);
void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
@ -140,6 +143,7 @@ class LazyClassPathEntry: public ClassPathEntry {
class PackageHashtable;
class PackageInfo;
class SharedPathsMiscInfo;
template <MEMFLAGS F> class HashtableBucket;
class ClassLoader: AllStatic {
@ -147,7 +151,7 @@ class ClassLoader: AllStatic {
enum SomeConstants {
package_hash_table_size = 31 // Number of buckets
};
private:
protected:
friend class LazyClassPathEntry;
// Performance counters
@ -189,10 +193,15 @@ class ClassLoader: AllStatic {
static ClassPathEntry* _first_entry;
// Last entry in linked list of ClassPathEntry instances
static ClassPathEntry* _last_entry;
static int _num_entries;
// Hash table used to keep track of loaded packages
static PackageHashtable* _package_hash_table;
static const char* _shared_archive;
// Info used by CDS
CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;)
// Hash function
static unsigned int hash(const char *s, int n);
// Returns the package file name corresponding to the specified package
@ -203,19 +212,23 @@ class ClassLoader: AllStatic {
static bool add_package(const char *pkgname, int classpath_index, TRAPS);
// Initialization
static void setup_meta_index();
static void setup_bootstrap_meta_index();
static void setup_meta_index(const char* meta_index_path, const char* meta_index_dir,
int start_index);
static void setup_bootstrap_search_path();
static void setup_search_path(char *class_path);
static void load_zip_library();
static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
bool lazy, TRAPS);
bool lazy, bool throw_exception, TRAPS);
// Canonicalizes path names, so strcmp will work properly. This is mainly
// to avoid confusing the zip library
static bool get_canonical_path(char* orig, char* out, int len);
public:
// Used by the kernel jvm.
static void update_class_path_entry_list(char *path,
bool check_for_duplicates);
static bool update_class_path_entry_list(char *path,
bool check_for_duplicates,
bool throw_exception=true);
static void print_bootclasspath();
// Timing
@ -298,6 +311,7 @@ class ClassLoader: AllStatic {
// Initialization
static void initialize();
CDS_ONLY(static void initialize_shared_path();)
static void create_package_info_table();
static void create_package_info_table(HashtableBucket<mtClass> *t, int length,
int number_of_entries);
@ -312,10 +326,21 @@ class ClassLoader: AllStatic {
return e;
}
#if INCLUDE_CDS
// Sharing dump and restore
static void copy_package_info_buckets(char** top, char* end);
static void copy_package_info_table(char** top, char* end);
static void check_shared_classpath(const char *path);
static void finalize_shared_paths_misc_info();
static int get_shared_paths_misc_info_size();
static void* get_shared_paths_misc_info();
static bool check_shared_paths_misc_info(void* info, int size);
static void exit_with_path_failure(const char* error, const char* message);
#endif
static void trace_class_path(const char* msg, const char* name = NULL);
// VM monitoring and management support
static jlong classloader_time_ms();
static jlong class_method_total_size();
@ -339,7 +364,7 @@ class ClassLoader: AllStatic {
// Force compilation of all methods in all classes in bootstrap class path (stress test)
#ifndef PRODUCT
private:
protected:
static int _compile_the_world_class_counter;
static int _compile_the_world_method_counter;
public:

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
#define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
#include "classfile/classLoader.hpp"
class ClassLoaderExt: public ClassLoader { // AllStatic
public:
class Context {
const char* _file_name;
public:
Context(const char* class_name, const char* file_name, TRAPS) {
_file_name = file_name;
}
bool check(ClassFileStream* stream, const int classpath_index) {
return true;
}
bool should_verify(int classpath_index) {
return false;
}
instanceKlassHandle record_result(const int classpath_index,
ClassPathEntry* e, instanceKlassHandle result, TRAPS) {
if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) {
if (DumpSharedSpaces) {
result->set_shared_classpath_index(classpath_index);
}
return result;
} else {
return instanceKlassHandle(); // NULL
}
}
};
static void add_class_path_entry(char* path, bool check_for_duplicates,
ClassPathEntry* new_entry) {
ClassLoader::add_to_list(new_entry);
}
static void setup_search_paths() {}
};
#endif // SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP

View File

@ -130,15 +130,13 @@ void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_dom
}
bool Dictionary::do_unloading() {
void Dictionary::do_unloading() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
bool class_was_unloaded = false;
int index = 0; // Defined here for portability! Do not move
// Remove unloadable entries and classes from system dictionary
// The placeholder array has been handled in always_strong_oops_do.
DictionaryEntry* probe = NULL;
for (index = 0; index < table_size(); index++) {
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
probe = *p;
Klass* e = probe->klass();
@ -158,16 +156,8 @@ bool Dictionary::do_unloading() {
// Do we need to delete this system dictionary entry?
if (loader_data->is_unloading()) {
// If the loader is not live this entry should always be
// removed (will never be looked up again). Note that this is
// not the same as unloading the referred class.
if (k_def_class_loader_data == loader_data) {
// This is the defining entry, so the referred class is about
// to be unloaded.
class_was_unloaded = true;
}
// Also remove this system dictionary entry.
// removed (will never be looked up again).
purge_entry = true;
} else {
// The loader in this entry is alive. If the klass is dead,
// (determined by checking the defining class loader)
@ -196,7 +186,6 @@ bool Dictionary::do_unloading() {
p = probe->next_addr();
}
}
return class_was_unloaded;
}
void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
@ -220,6 +209,29 @@ void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
_pd_cache_table->roots_oops_do(strong, weak);
}
void Dictionary::remove_classes_in_error_state() {
assert(DumpSharedSpaces, "supported only when dumping");
DictionaryEntry* probe = NULL;
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
probe = *p;
InstanceKlass* ik = InstanceKlass::cast(probe->klass());
if (ik->is_in_error_state()) { // purge this entry
*p = probe->next();
if (probe == _current_class_entry) {
_current_class_entry = NULL;
}
free_entry(probe);
ResourceMark rm;
tty->print_cr("Removed error class: %s", ik->external_name());
continue;
}
p = probe->next_addr();
}
}
}
void Dictionary::always_strong_oops_do(OopClosure* blk) {
// Follow all system classes and temporary placeholders in dictionary; only
// protection domain oops contain references into the heap. In a first
@ -693,16 +705,17 @@ void SymbolPropertyTable::methods_do(void f(Method*)) {
// ----------------------------------------------------------------------------
#ifndef PRODUCT
void Dictionary::print() {
void Dictionary::print(bool details) {
ResourceMark rm;
HandleMark hm;
tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
table_size(), number_of_entries());
tty->print_cr("^ indicates that initiating loader is different from "
"defining loader");
if (details) {
tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
table_size(), number_of_entries());
tty->print_cr("^ indicates that initiating loader is different from "
"defining loader");
}
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry* probe = bucket(index);
@ -713,21 +726,28 @@ void Dictionary::print() {
ClassLoaderData* loader_data = probe->loader_data();
bool is_defining_class =
(loader_data == InstanceKlass::cast(e)->class_loader_data());
tty->print("%s%s", is_defining_class ? " " : "^",
tty->print("%s%s", ((!details) || is_defining_class) ? " " : "^",
e->external_name());
if (details) {
tty->print(", loader ");
loader_data->print_value();
if (loader_data != NULL) {
loader_data->print_value();
} else {
tty->print("NULL");
}
}
tty->cr();
}
}
tty->cr();
_pd_cache_table->print();
if (details) {
tty->cr();
_pd_cache_table->print();
}
tty->cr();
}
#endif
void Dictionary::verify() {
guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");

View File

@ -100,6 +100,7 @@ public:
void methods_do(void f(Method*));
void unlink(BoolObjectClosure* is_alive);
void remove_classes_in_error_state();
// Classes loaded by the bootstrap loader are always strongly reachable.
// If we're not doing class unloading, all classes are strongly reachable.
@ -108,9 +109,8 @@ public:
return (loader_data->is_the_null_class_loader_data() || !ClassUnloading);
}
// Unload (that is, break root links to) all unmarked classes and
// loaders. Returns "true" iff something was unloaded.
bool do_unloading();
// Unload (that is, break root links to) all unmarked classes and loaders.
void do_unloading();
// Protection domains
Klass* find(int index, unsigned int hash, Symbol* name,
@ -127,9 +127,7 @@ public:
ProtectionDomainCacheEntry* cache_get(oop protection_domain);
#ifndef PRODUCT
void print();
#endif
void print(bool details = true);
void verify();
};

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
#define SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
#include "classfile/sharedPathsMiscInfo.hpp"
#include "memory/filemap.hpp"
class SharedClassUtil : AllStatic {
public:
static SharedPathsMiscInfo* allocate_shared_paths_misc_info() {
return new SharedPathsMiscInfo();
}
static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) {
return new SharedPathsMiscInfo(buf, size);
}
static FileMapInfo::FileMapHeader* allocate_file_map_header() {
return new FileMapInfo::FileMapHeader();
}
static size_t file_map_header_size() {
return sizeof(FileMapInfo::FileMapHeader);
}
static size_t shared_class_path_entry_size() {
return sizeof(SharedClassPathEntry);
}
static void update_shared_classpath(ClassPathEntry *cpe,
SharedClassPathEntry* ent,
time_t timestamp,
long filesize, TRAPS) {
ent->_timestamp = timestamp;
ent->_filesize = filesize;
}
static void initialize(TRAPS) {}
inline static bool is_shared_boot_class(Klass* klass) {
return (klass->_shared_class_path_index >= 0);
}
};
#endif // SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP

View File

@ -0,0 +1,154 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/sharedPathsMiscInfo.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/metaspaceShared.hpp"
#include "runtime/arguments.hpp"
void SharedPathsMiscInfo::add_path(const char* path, int type) {
if (TraceClassPaths) {
tty->print("[type=%s] ", type_name(type));
trace_class_path("[Add misc shared path ", path);
}
write(path, strlen(path) + 1);
write_jint(jint(type));
}
void SharedPathsMiscInfo::ensure_size(size_t needed_bytes) {
assert(_allocated, "cannot modify buffer during validation.");
int used = get_used_bytes();
int target = used + int(needed_bytes);
if (target > _buf_size) {
_buf_size = _buf_size * 2 + (int)needed_bytes;
_buf_start = REALLOC_C_HEAP_ARRAY(char, _buf_start, _buf_size, mtClass);
_cur_ptr = _buf_start + used;
_end_ptr = _buf_start + _buf_size;
}
}
void SharedPathsMiscInfo::write(const void* ptr, size_t size) {
ensure_size(size);
memcpy(_cur_ptr, ptr, size);
_cur_ptr += size;
}
bool SharedPathsMiscInfo::read(void* ptr, size_t size) {
if (_cur_ptr + size <= _end_ptr) {
memcpy(ptr, _cur_ptr, size);
_cur_ptr += size;
return true;
}
return false;
}
bool SharedPathsMiscInfo::fail(const char* msg, const char* name) {
ClassLoader::trace_class_path(msg, name);
MetaspaceShared::set_archive_loading_failed();
return false;
}
bool SharedPathsMiscInfo::check() {
// The whole buffer must be 0 terminated so that we can use strlen and strcmp
// without fear.
_end_ptr -= sizeof(jint);
if (_cur_ptr >= _end_ptr) {
return fail("Truncated archive file header");
}
if (*_end_ptr != 0) {
return fail("Corrupted archive file header");
}
while (_cur_ptr < _end_ptr) {
jint type;
const char* path = _cur_ptr;
_cur_ptr += strlen(path) + 1;
if (!read_jint(&type)) {
return fail("Corrupted archive file header");
}
if (TraceClassPaths) {
tty->print("[type=%s ", type_name(type));
print_path(tty, type, path);
tty->print_cr("]");
}
if (!check(type, path)) {
if (!PrintSharedArchiveAndExit) {
return false;
}
} else {
trace_class_path("[ok");
}
}
return true;
}
bool SharedPathsMiscInfo::check(jint type, const char* path) {
switch (type) {
case BOOT:
if (strcmp(path, Arguments::get_sysclasspath()) != 0) {
return fail("[BOOT classpath mismatch, actual: -Dsun.boot.class.path=", Arguments::get_sysclasspath());
}
break;
case NON_EXIST: // fall-through
case REQUIRED:
{
struct stat st;
if (os::stat(path, &st) != 0) {
// The file does not actually exist
if (type == REQUIRED) {
// but we require it to exist -> fail
return fail("Required file doesn't exist");
}
} else {
// The file actually exists
if (type == NON_EXIST) {
// But we want it to not exist -> fail
return fail("File must not exist");
}
time_t timestamp;
long filesize;
if (!read_time(&timestamp) || !read_long(&filesize)) {
return fail("Corrupted archive file header");
}
if (timestamp != st.st_mtime) {
return fail("Timestamp mismatch");
}
if (filesize != st.st_size) {
return fail("File size mismatch");
}
}
}
break;
default:
return fail("Corrupted archive file header");
}
return true;
}

View File

@ -0,0 +1,187 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
#define SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
#include "runtime/os.hpp"
// During dumping time, when processing class paths, we build up the dump-time
// classpath. The JAR files that exist are stored in the list ClassLoader::_first_entry.
// However, we need to store other "misc" information for run-time checking, such as
//
// + The values of Arguments::get_sysclasspath() used during dumping.
//
// + The meta-index file(s) used during dumping (incl modification time and size)
//
// + The class path elements specified during dumping but did not exist --
// these elements must also be specified at run time, and they also must not
// exist at run time.
//
// These misc items are stored in a linear buffer in SharedPathsMiscInfo.
// The storage format is stream oriented to minimize its size.
//
// When writing the information to the archive file, SharedPathsMiscInfo is stored in
// the archive file header. At run-time, this information is used only during initialization
// (accessed using read() instead of mmap()), and is deallocated afterwards to save space.
//
// The SharedPathsMiscInfo class is used for both creating the the information (during
// dumping time) and validation (at run time). Different constructors are used in the
// two situations. See below.
class SharedPathsMiscInfo : public CHeapObj<mtClass> {
protected:
char* _buf_start;
char* _cur_ptr;
char* _end_ptr;
int _buf_size;
bool _allocated; // was _buf_start allocated by me?
void ensure_size(size_t needed_bytes);
void add_path(const char* path, int type);
void write(const void* ptr, size_t size);
bool read(void* ptr, size_t size);
static void trace_class_path(const char* msg, const char* name = NULL) {
ClassLoader::trace_class_path(msg, name);
}
protected:
static bool fail(const char* msg, const char* name = NULL);
virtual bool check(jint type, const char* path);
public:
enum {
INITIAL_BUF_SIZE = 128
};
// This constructor is used when creating the misc information (during dump)
SharedPathsMiscInfo() {
_buf_size = INITIAL_BUF_SIZE;
_cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass);
_allocated = true;
}
// This constructor is used when validating the misc info (during run time)
SharedPathsMiscInfo(char *buff, int size) {
_cur_ptr = _buf_start = buff;
_end_ptr = _buf_start + size;
_buf_size = size;
_allocated = false;
}
~SharedPathsMiscInfo() {
if (_allocated) {
FREE_C_HEAP_ARRAY(char, _buf_start, mtClass);
}
}
int get_used_bytes() {
return _cur_ptr - _buf_start;
}
void* buffer() {
return _buf_start;
}
// writing --
// The path must not exist at run-time
void add_nonexist_path(const char* path) {
add_path(path, NON_EXIST);
}
// The path must exist and have required size and modification time
void add_required_file(const char* path) {
add_path(path, REQUIRED);
struct stat st;
if (os::stat(path, &st) != 0) {
assert(0, "sanity");
ClassLoader::exit_with_path_failure("failed to os::stat(%s)", path); // should not happen
}
write_time(st.st_mtime);
write_long(st.st_size);
}
// The path must exist, and must contain exactly <num_entries> files/dirs
void add_boot_classpath(const char* path) {
add_path(path, BOOT);
}
int write_jint(jint num) {
write(&num, sizeof(num));
return 0;
}
void write_time(time_t t) {
write(&t, sizeof(t));
}
void write_long(long l) {
write(&l, sizeof(l));
}
bool dump_to_file(int fd) {
int n = get_used_bytes();
return (os::write(fd, _buf_start, n) == (size_t)n);
}
// reading --
enum {
BOOT = 1,
NON_EXIST = 2,
REQUIRED = 3
};
virtual const char* type_name(int type) {
switch (type) {
case BOOT: return "BOOT";
case NON_EXIST: return "NON_EXIST";
case REQUIRED: return "REQUIRED";
default: ShouldNotReachHere(); return "?";
}
}
virtual void print_path(outputStream* out, int type, const char* path) {
switch (type) {
case BOOT:
out->print("Expecting -Dsun.boot.class.path=%s", path);
break;
case NON_EXIST:
out->print("Expecting that %s does not exist", path);
break;
case REQUIRED:
out->print("Expecting that file %s must exist and not altered", path);
break;
default:
ShouldNotReachHere();
}
}
bool check();
bool read_jint(jint *ptr) {
return read(ptr, sizeof(jint));
}
bool read_long(long *ptr) {
return read(ptr, sizeof(long));
}
bool read_time(time_t *ptr) {
return read(ptr, sizeof(time_t));
}
};
#endif // SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP

View File

@ -31,10 +31,15 @@
#include "classfile/resolutionErrors.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#if INCLUDE_CDS
#include "classfile/sharedClassUtil.hpp"
#include "classfile/systemDictionaryShared.hpp"
#endif
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/filemap.hpp"
#include "memory/gcLocker.hpp"
#include "memory/oopFactory.hpp"
#include "oops/instanceKlass.hpp"
@ -110,6 +115,8 @@ void SystemDictionary::compute_java_system_loader(TRAPS) {
CHECK);
_java_system_loader = (oop)result.get_jobject();
CDS_ONLY(SystemDictionaryShared::initialize(CHECK);)
}
@ -974,6 +981,7 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
// Create a new CLD for anonymous class, that uses the same class loader
// as the host_klass
guarantee(host_klass->class_loader() == class_loader(), "should be the same");
guarantee(!DumpSharedSpaces, "must not create anonymous classes when dumping");
loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL);
loader_data->record_dependency(host_klass(), CHECK_NULL);
} else {
@ -1134,7 +1142,7 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name,
return k();
}
#if INCLUDE_CDS
void SystemDictionary::set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
int number_of_entries) {
assert(length == _nof_buckets * sizeof(HashtableBucket<mtClass>),
@ -1167,15 +1175,21 @@ Klass* SystemDictionary::find_shared_class(Symbol* class_name) {
instanceKlassHandle SystemDictionary::load_shared_class(
Symbol* class_name, Handle class_loader, TRAPS) {
instanceKlassHandle ik (THREAD, find_shared_class(class_name));
return load_shared_class(ik, class_loader, THREAD);
// Make sure we only return the boot class for the NULL classloader.
if (ik.not_null() &&
SharedClassUtil::is_shared_boot_class(ik()) && class_loader.is_null()) {
Handle protection_domain;
return load_shared_class(ik, class_loader, protection_domain, THREAD);
}
return instanceKlassHandle();
}
instanceKlassHandle SystemDictionary::load_shared_class(
instanceKlassHandle ik, Handle class_loader, TRAPS) {
assert(class_loader.is_null(), "non-null classloader for shared class?");
instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
Handle class_loader,
Handle protection_domain, TRAPS) {
if (ik.not_null()) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
Symbol* class_name = ik->name();
Symbol* class_name = ik->name();
// Found the class, now load the superclass and interfaces. If they
// are shared, add them to the main system dictionary and reset
@ -1184,7 +1198,7 @@ instanceKlassHandle SystemDictionary::load_shared_class(
if (ik->super() != NULL) {
Symbol* cn = ik->super()->name();
resolve_super_or_fail(class_name, cn,
class_loader, Handle(), true, CHECK_(nh));
class_loader, protection_domain, true, CHECK_(nh));
}
Array<Klass*>* interfaces = ik->local_interfaces();
@ -1197,7 +1211,7 @@ instanceKlassHandle SystemDictionary::load_shared_class(
// reinitialized yet (they will be once the interface classes
// are loaded)
Symbol* name = k->name();
resolve_super_or_fail(class_name, name, class_loader, Handle(), false, CHECK_(nh));
resolve_super_or_fail(class_name, name, class_loader, protection_domain, false, CHECK_(nh));
}
// Adjust methods to recover missing data. They need addresses for
@ -1206,30 +1220,47 @@ instanceKlassHandle SystemDictionary::load_shared_class(
// Updating methods must be done under a lock so multiple
// threads don't update these in parallel
// Shared classes are all currently loaded by the bootstrap
// classloader, so this will never cause a deadlock on
// a custom class loader lock.
//
// Shared classes are all currently loaded by either the bootstrap or
// internal parallel class loaders, so this will never cause a deadlock
// on a custom class loader lock.
ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
{
Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
check_loader_lock_contention(lockObject, THREAD);
ObjectLocker ol(lockObject, THREAD, true);
ik->restore_unshareable_info(CHECK_(nh));
ik->restore_unshareable_info(loader_data, protection_domain, CHECK_(nh));
}
if (TraceClassLoading) {
ResourceMark rm;
tty->print("[Loaded %s", ik->external_name());
tty->print(" from shared objects file");
if (class_loader.not_null()) {
tty->print(" by %s", loader_data->loader_name());
}
tty->print_cr("]");
}
#if INCLUDE_CDS
if (DumpLoadedClassList != NULL && classlist_file->is_open()) {
// Only dump the classes that can be stored into CDS archive
if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
ResourceMark rm(THREAD);
classlist_file->print_cr("%s", ik->name()->as_C_string());
classlist_file->flush();
}
}
#endif
// notify a class loaded from shared object
ClassLoadingService::notify_class_loaded(InstanceKlass::cast(ik()),
true /* shared class */);
}
return ik;
}
#endif
instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
@ -1239,8 +1270,10 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha
// shared spaces.
instanceKlassHandle k;
{
#if INCLUDE_CDS
PerfTraceTime vmtimer(ClassLoader::perf_shared_classload_time());
k = load_shared_class(class_name, class_loader, THREAD);
#endif
}
if (k.is_null()) {
@ -1599,7 +1632,6 @@ void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) {
Universe::flush_dependents_on(k);
}
// ----------------------------------------------------------------------------
// GC support
@ -1661,10 +1693,9 @@ public:
// Note: anonymous classes are not in the SD.
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
// First, mark for unload all ClassLoaderData referencing a dead class loader.
bool has_dead_loaders = ClassLoaderDataGraph::do_unloading(is_alive);
bool unloading_occurred = false;
if (has_dead_loaders) {
unloading_occurred = dictionary()->do_unloading();
bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive);
if (unloading_occurred) {
dictionary()->do_unloading();
constraints()->purge_loader_constraints();
resolution_errors()->purge_resolution_errors();
}
@ -1682,6 +1713,7 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
strong->do_oop(&_java_system_loader);
strong->do_oop(&_system_loader_lock_obj);
CDS_ONLY(SystemDictionaryShared::roots_oops_do(strong);)
// Adjust dictionary
dictionary()->roots_oops_do(strong, weak);
@ -1693,6 +1725,7 @@ void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
void SystemDictionary::oops_do(OopClosure* f) {
f->do_oop(&_java_system_loader);
f->do_oop(&_system_loader_lock_obj);
CDS_ONLY(SystemDictionaryShared::oops_do(f);)
// Adjust dictionary
dictionary()->oops_do(f);
@ -1754,6 +1787,10 @@ void SystemDictionary::methods_do(void f(Method*)) {
invoke_method_table()->methods_do(f);
}
void SystemDictionary::remove_classes_in_error_state() {
dictionary()->remove_classes_in_error_state();
}
// ----------------------------------------------------------------------------
// Lazily load klasses
@ -2563,10 +2600,12 @@ int SystemDictionary::number_of_classes() {
// ----------------------------------------------------------------------------
#ifndef PRODUCT
void SystemDictionary::print_shared(bool details) {
shared_dictionary()->print(details);
}
void SystemDictionary::print() {
dictionary()->print();
void SystemDictionary::print(bool details) {
dictionary()->print(details);
// Placeholders
GCMutexLocker mu(SystemDictionary_lock);
@ -2576,7 +2615,6 @@ void SystemDictionary::print() {
constraints()->print();
}
#endif
void SystemDictionary::verify() {
guarantee(dictionary() != NULL, "Verify of system dictionary failed");

View File

@ -111,6 +111,7 @@ class Ticks;
do_klass(SecurityManager_klass, java_lang_SecurityManager, Pre ) \
do_klass(ProtectionDomain_klass, java_security_ProtectionDomain, Pre ) \
do_klass(AccessControlContext_klass, java_security_AccessControlContext, Pre ) \
do_klass(SecureClassLoader_klass, java_security_SecureClassLoader, Pre ) \
do_klass(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre ) \
do_klass(NoClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre ) \
do_klass(LinkageError_klass, java_lang_LinkageError, Pre ) \
@ -166,6 +167,15 @@ class Ticks;
do_klass(StringBuilder_klass, java_lang_StringBuilder, Pre ) \
do_klass(misc_Unsafe_klass, sun_misc_Unsafe, Pre ) \
\
/* support for CDS */ \
do_klass(ByteArrayInputStream_klass, java_io_ByteArrayInputStream, Pre ) \
do_klass(File_klass, java_io_File, Pre ) \
do_klass(URLClassLoader_klass, java_net_URLClassLoader, Pre ) \
do_klass(URL_klass, java_net_URL, Pre ) \
do_klass(Jar_Manifest_klass, java_util_jar_Manifest, Pre ) \
do_klass(sun_misc_Launcher_klass, sun_misc_Launcher, Pre ) \
do_klass(CodeSource_klass, java_security_CodeSource, Pre ) \
\
/* It's NULL in non-1.4 JDKs. */ \
do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
@ -221,7 +231,7 @@ class SystemDictionary : AllStatic {
static Klass* resolve_or_fail(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS);
// Convenient call for null loader and protection domain.
static Klass* resolve_or_fail(Symbol* class_name, bool throw_error, TRAPS);
private:
protected:
// handle error translation for resolve_or_null results
static Klass* handle_resolution_exception(Symbol* class_name, bool throw_error, KlassHandle klass_h, TRAPS);
@ -326,6 +336,9 @@ public:
// loaders. Returns "true" iff something was unloaded.
static bool do_unloading(BoolObjectClosure* is_alive);
// Used by DumpSharedSpaces only to remove classes that failed verification
static void remove_classes_in_error_state();
static int calculate_systemdictionary_size(int loadedclasses);
// Applies "f->do_oop" to all root oops in the system dictionary.
@ -335,7 +348,7 @@ public:
// System loader lock
static oop system_loader_lock() { return _system_loader_lock_obj; }
private:
protected:
// Extended Redefine classes support (tbi)
static void preloaded_classes_do(KlassClosure* f);
static void lazily_loaded_classes_do(KlassClosure* f);
@ -348,7 +361,8 @@ public:
static void set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
int number_of_entries);
// Printing
static void print() PRODUCT_RETURN;
static void print(bool details = true);
static void print_shared(bool details = true);
static void print_class_statistics() PRODUCT_RETURN;
static void print_method_statistics() PRODUCT_RETURN;
@ -424,7 +438,7 @@ public:
static void load_abstract_ownable_synchronizer_klass(TRAPS);
private:
protected:
// Tells whether ClassLoader.loadClassInternal is present
static bool has_loadClassInternal() { return _has_loadClassInternal; }
@ -452,7 +466,7 @@ public:
// Register a new class loader
static ClassLoaderData* register_loader(Handle class_loader, TRAPS);
private:
protected:
// Mirrors for primitive classes (created eagerly)
static oop check_mirror(oop m) {
assert(m != NULL, "mirror not initialized");
@ -523,7 +537,7 @@ public:
static Symbol* find_resolution_error(constantPoolHandle pool, int which,
Symbol** message);
private:
protected:
enum Constants {
_loader_constraint_size = 107, // number of entries in constraint table
@ -574,7 +588,7 @@ public:
friend class CounterDecay;
static Klass* try_get_next_class();
private:
protected:
static void validate_protection_domain(instanceKlassHandle klass,
Handle class_loader,
Handle protection_domain, TRAPS);
@ -601,10 +615,10 @@ private:
static instanceKlassHandle find_or_define_instance_class(Symbol* class_name,
Handle class_loader,
instanceKlassHandle k, TRAPS);
static instanceKlassHandle load_shared_class(Symbol* class_name,
Handle class_loader, TRAPS);
static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
Handle class_loader, TRAPS);
Handle class_loader,
Handle protection_domain,
TRAPS);
static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS);
static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
static void check_loader_lock_contention(Handle loader_lock, TRAPS);
@ -612,9 +626,12 @@ private:
static bool is_parallelDefine(Handle class_loader);
public:
static instanceKlassHandle load_shared_class(Symbol* class_name,
Handle class_loader,
TRAPS);
static bool is_ext_class_loader(Handle class_loader);
private:
protected:
static Klass* find_shared_class(Symbol* class_name);
// Setup link to hierarchy

View File

@ -1,12 +1,10 @@
/*
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
@ -21,31 +19,29 @@
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* This source code is provided to illustrate the usage of a given feature
* or technique and has been deliberately simplified. Additional steps
* required for a production-quality application, such as security checks,
* input validation and proper error handling, might not be present in
* this sample code.
*
*/
package com.sun.tools.example.debug.bdi;
#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
public class LineNotFoundException extends Exception
{
#include "classfile/systemDictionary.hpp"
private static final long serialVersionUID = -5630418117861587582L;
class SystemDictionaryShared: public SystemDictionary {
public:
static void initialize(TRAPS) {}
static instanceKlassHandle find_or_load_shared_class(Symbol* class_name,
Handle class_loader,
TRAPS) {
return instanceKlassHandle();
}
static void roots_oops_do(OopClosure* blk) {}
static void oops_do(OopClosure* f) {}
static bool is_sharing_possible(ClassLoaderData* loader_data) {
oop class_loader = loader_data->class_loader();
return (class_loader == NULL);
}
};
public LineNotFoundException()
{
super();
}
public LineNotFoundException(String s)
{
super(s);
}
}
#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP

View File

@ -91,11 +91,17 @@
template(java_lang_CharSequence, "java/lang/CharSequence") \
template(java_lang_SecurityManager, "java/lang/SecurityManager") \
template(java_security_AccessControlContext, "java/security/AccessControlContext") \
template(java_security_CodeSource, "java/security/CodeSource") \
template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \
template(java_security_SecureClassLoader, "java/security/SecureClassLoader") \
template(java_net_URLClassLoader, "java/net/URLClassLoader") \
template(java_net_URL, "java/net/URL") \
template(java_util_jar_Manifest, "java/util/jar/Manifest") \
template(impliesCreateAccessControlContext_name, "impliesCreateAccessControlContext") \
template(java_io_OutputStream, "java/io/OutputStream") \
template(java_io_Reader, "java/io/Reader") \
template(java_io_BufferedReader, "java/io/BufferedReader") \
template(java_io_File, "java/io/File") \
template(java_io_FileInputStream, "java/io/FileInputStream") \
template(java_io_ByteArrayInputStream, "java/io/ByteArrayInputStream") \
template(java_io_Serializable, "java/io/Serializable") \
@ -106,6 +112,7 @@
template(java_util_Hashtable, "java/util/Hashtable") \
template(java_lang_Compiler, "java/lang/Compiler") \
template(sun_misc_Signal, "sun/misc/Signal") \
template(sun_misc_Launcher, "sun/misc/Launcher") \
template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
template(sun_misc_PostVMInitHook, "sun/misc/PostVMInitHook") \
@ -396,6 +403,14 @@
template(signers_name, "signers_name") \
template(loader_data_name, "loader_data") \
template(dependencies_name, "dependencies") \
template(input_stream_void_signature, "(Ljava/io/InputStream;)V") \
template(getFileURL_name, "getFileURL") \
template(getFileURL_signature, "(Ljava/io/File;)Ljava/net/URL;") \
template(definePackageInternal_name, "definePackageInternal") \
template(definePackageInternal_signature, "(Ljava/lang/String;Ljava/util/jar/Manifest;Ljava/net/URL;)V") \
template(getProtectionDomain_name, "getProtectionDomain") \
template(getProtectionDomain_signature, "(Ljava/security/CodeSource;)Ljava/security/ProtectionDomain;") \
template(url_code_signer_array_void_signature, "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \
\
/* non-intrinsic name/signature pairs: */ \
template(register_method_name, "register") \

View File

@ -2734,10 +2734,12 @@ void CFLS_LAB::retire(int tid) {
}
}
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
assert(fl->count() == 0, "Precondition.");
assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
"Precondition");
// Used by par_get_chunk_of_blocks() for the chunks from the
// indexed_free_lists. Looks for a chunk with size that is a multiple
// of "word_sz" and if found, splits it into "word_sz" chunks and add
// to the free list "fl". "n" is the maximum number of chunks to
// be added to "fl".
bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
// We'll try all multiples of word_sz in the indexed set, starting with
// word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
@ -2818,11 +2820,15 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
Mutex::_no_safepoint_check_flag);
ssize_t births = _indexedFreeList[word_sz].split_births() + num;
_indexedFreeList[word_sz].set_split_births(births);
return;
return true;
}
}
return found;
}
// Otherwise, we'll split a block from the dictionary.
}
FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
FreeChunk* fc = NULL;
FreeChunk* rem_fc = NULL;
size_t rem;
@ -2833,16 +2839,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
FreeBlockDictionary<FreeChunk>::atLeast);
if (fc != NULL) {
_bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
dictionary()->dict_census_update(fc->size(),
true /*split*/,
false /*birth*/);
break;
} else {
n--;
}
}
if (fc == NULL) return;
if (fc == NULL) return NULL;
// Otherwise, split up that block.
assert((ssize_t)n >= 1, "Control point invariant");
assert(fc->is_free(), "Error: should be a free block");
@ -2864,10 +2866,14 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
// dictionary and return, leaving "fl" empty.
if (n == 0) {
returnChunkToDictionary(fc);
assert(fl->count() == 0, "We never allocated any blocks");
return;
return NULL;
}
_bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
dictionary()->dict_census_update(fc->size(),
true /*split*/,
false /*birth*/);
// First return the remainder, if any.
// Note that we hold the lock until we decide if we're going to give
// back the remainder to the dictionary, since a concurrent allocation
@ -2900,7 +2906,24 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
_indexedFreeList[rem].return_chunk_at_head(rem_fc);
smallSplitBirth(rem);
}
assert((ssize_t)n > 0 && fc != NULL, "Consistency");
assert(n * word_sz == fc->size(),
err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
fc->size(), n, word_sz));
return fc;
}
void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
if (fc == NULL) {
return;
}
size_t n = fc->size() / word_sz;
assert((ssize_t)n > 0, "Consistency");
// Now do the splitting up.
// Must do this in reverse order, so that anybody attempting to
// access the main chunk sees it as a single free block until we
@ -2948,6 +2971,20 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
assert(fl->tail()->next() == NULL, "List invariant.");
}
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
assert(fl->count() == 0, "Precondition.");
assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
"Precondition");
if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
// Got it
return;
}
// Otherwise, we'll split a block from the dictionary.
par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
}
// Set up the space's par_seq_tasks structure for work claiming
// for parallel rescan. See CMSParRemarkTask where this is currently used.
// XXX Need to suitably abstract and generalize this and the next

View File

@ -172,6 +172,20 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// list of size "word_sz", and must now be decremented.
void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Used by par_get_chunk_of_blocks() for the chunks from the
// indexed_free_lists.
bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Used by par_get_chunk_of_blocks_dictionary() to get a chunk
// evenly splittable into "n" "word_sz" chunks. Returns that
// evenly splittable chunk. May split a larger chunk to get the
// evenly splittable chunk.
FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
// Used by par_get_chunk_of_blocks() for the chunks from the
// dictionary.
void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Allocation helper functions
// Allocate using a strategy that takes from the indexed free lists
// first. This allocation strategy assumes a companion sweeping

View File

@ -81,8 +81,8 @@ void ConcurrentG1Refine::reset_threshold_step() {
}
}
void ConcurrentG1Refine::init() {
_hot_card_cache.initialize();
void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) {
_hot_card_cache.initialize(card_counts_storage);
}
void ConcurrentG1Refine::stop() {

View File

@ -34,6 +34,7 @@
class ConcurrentG1RefineThread;
class G1CollectedHeap;
class G1HotCardCache;
class G1RegionToSpaceMapper;
class G1RemSet;
class DirtyCardQueue;
@ -74,7 +75,7 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure);
~ConcurrentG1Refine();
void init(); // Accomplish some initialization that has to wait.
void init(G1RegionToSpaceMapper* card_counts_storage);
void stop();
void reinitialize_threads();

View File

@ -36,6 +36,7 @@
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
@ -99,12 +100,12 @@ int CMBitMapRO::heapWordDiffToOffsetDiff(size_t diff) const {
}
#ifndef PRODUCT
bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
bool CMBitMapRO::covers(MemRegion heap_rs) const {
// assert(_bm.map() == _virtual_space.low(), "map inconsistency");
assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
"size inconsistency");
return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
_bmWordSize == heap_rs.size()>>LogHeapWordSize;
return _bmStartWord == (HeapWord*)(heap_rs.start()) &&
_bmWordSize == heap_rs.word_size();
}
#endif
@ -112,33 +113,73 @@ void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
_bm.print_on_error(st, prefix);
}
bool CMBitMap::allocate(ReservedSpace heap_rs) {
_bmStartWord = (HeapWord*)(heap_rs.base());
_bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes
ReservedSpace brs(ReservedSpace::allocation_align_size_up(
(_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
if (!brs.is_reserved()) {
warning("ConcurrentMark marking bit map allocation failure");
return false;
}
MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
// For now we'll just commit all of the bit map up front.
// Later on we'll try to be more parsimonious with swap.
if (!_virtual_space.initialize(brs, brs.size())) {
warning("ConcurrentMark marking bit map backing store failure");
return false;
}
assert(_virtual_space.committed_size() == brs.size(),
"didn't reserve backing store for all of concurrent marking bit map?");
_bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
_bmWordSize, "inconsistency in bit map sizing");
_bm.set_size(_bmWordSize >> _shifter);
return true;
size_t CMBitMap::compute_size(size_t heap_size) {
return heap_size / mark_distance();
}
size_t CMBitMap::mark_distance() {
return MinObjAlignmentInBytes * BitsPerByte;
}
void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) {
_bmStartWord = heap.start();
_bmWordSize = heap.word_size();
_bm.set_map((BitMap::bm_word_t*) storage->reserved().start());
_bm.set_size(_bmWordSize >> _shifter);
storage->set_mapping_changed_listener(&_listener);
}
void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions) {
// We need to clear the bitmap on commit, removing any existing information.
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
_bm->clearRange(mr);
}
// Closure used for clearing the given mark bitmap.
class ClearBitmapHRClosure : public HeapRegionClosure {
private:
ConcurrentMark* _cm;
CMBitMap* _bitmap;
bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration.
public:
ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
}
virtual bool doHeapRegion(HeapRegion* r) {
size_t const chunk_size_in_words = M / HeapWordSize;
HeapWord* cur = r->bottom();
HeapWord* const end = r->end();
while (cur < end) {
MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
_bitmap->clearRange(mr);
cur += chunk_size_in_words;
// Abort iteration if after yielding the marking has been aborted.
if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
return true;
}
// Repeat the asserts from before the start of the closure. We will do them
// as asserts here to minimize their overhead on the product. However, we
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
}
return false;
}
};
void CMBitMap::clearAll() {
_bm.clear();
ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
G1CollectedHeap::heap()->heap_region_iterate(&cl);
guarantee(cl.complete(), "Must have completed iteration.");
return;
}
@ -483,10 +524,10 @@ uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
return MAX2((n_par_threads + 2) / 4, 1U);
}
ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) :
_g1h(g1h),
_markBitMap1(log2_intptr(MinObjAlignment)),
_markBitMap2(log2_intptr(MinObjAlignment)),
_markBitMap1(),
_markBitMap2(),
_parallel_marking_threads(0),
_max_parallel_marking_threads(0),
_sleep_factor(0.0),
@ -495,7 +536,7 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
_cleanup_task_overhead(1.0),
_cleanup_list("Cleanup List"),
_region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
_card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
_card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >>
CardTableModRefBS::card_shift,
false /* in_resource_area*/),
@ -545,14 +586,8 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
"heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end));
}
if (!_markBitMap1.allocate(heap_rs)) {
warning("Failed to allocate first CM bit map");
return;
}
if (!_markBitMap2.allocate(heap_rs)) {
warning("Failed to allocate second CM bit map");
return;
}
_markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
_markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage);
// Create & start a ConcurrentMark thread.
_cmThread = new ConcurrentMarkThread(this);
@ -563,8 +598,8 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
}
assert(CGC_lock != NULL, "Where's the CGC_lock?");
assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency");
assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency");
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
satb_qs.set_buffer_size(G1SATBBufferSize);
@ -724,38 +759,17 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
clear_all_count_data();
// so that the call below can read a sensible value
_heap_start = (HeapWord*) heap_rs.base();
_heap_start = g1h->reserved_region().start();
set_non_marking_state();
_completed_initialization = true;
}
void ConcurrentMark::update_g1_committed(bool force) {
// If concurrent marking is not in progress, then we do not need to
// update _heap_end.
if (!concurrent_marking_in_progress() && !force) return;
MemRegion committed = _g1h->g1_committed();
assert(committed.start() == _heap_start, "start shouldn't change");
HeapWord* new_end = committed.end();
if (new_end > _heap_end) {
// The heap has been expanded.
_heap_end = new_end;
}
// Notice that the heap can also shrink. However, this only happens
// during a Full GC (at least currently) and the entire marking
// phase will bail out and the task will not be restarted. So, let's
// do nothing.
}
void ConcurrentMark::reset() {
// Starting values for these two. This should be called in a STW
// phase. CM will be notified of any future g1_committed expansions
// will be at the end of evacuation pauses, when tasks are
// inactive.
MemRegion committed = _g1h->g1_committed();
_heap_start = committed.start();
_heap_end = committed.end();
// phase.
MemRegion reserved = _g1h->g1_reserved();
_heap_start = reserved.start();
_heap_end = reserved.end();
// Separated the asserts so that we know which one fires.
assert(_heap_start != NULL, "heap bounds should look ok");
@ -827,7 +841,6 @@ void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurren
assert(out_of_regions(),
err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
p2i(_finger), p2i(_heap_end)));
update_g1_committed(true);
}
}
@ -846,7 +859,6 @@ ConcurrentMark::~ConcurrentMark() {
void ConcurrentMark::clearNextBitmap() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1p = g1h->g1_policy();
// Make sure that the concurrent mark thread looks to still be in
// the current cycle.
@ -858,41 +870,36 @@ void ConcurrentMark::clearNextBitmap() {
// is the case.
guarantee(!g1h->mark_in_progress(), "invariant");
// clear the mark bitmap (no grey objects to start with).
// We need to do this in chunks and offer to yield in between
// each chunk.
HeapWord* start = _nextMarkBitMap->startWord();
HeapWord* end = _nextMarkBitMap->endWord();
HeapWord* cur = start;
size_t chunkSize = M;
while (cur < end) {
HeapWord* next = cur + chunkSize;
if (next > end) {
next = end;
}
MemRegion mr(cur,next);
_nextMarkBitMap->clearRange(mr);
cur = next;
do_yield_check();
ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
g1h->heap_region_iterate(&cl);
// Repeat the asserts from above. We'll do them as asserts here to
// minimize their overhead on the product. However, we'll have
// them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert(cmThread()->during_cycle(), "invariant");
assert(!g1h->mark_in_progress(), "invariant");
// Clear the liveness counting data. If the marking has been aborted, the abort()
// call already did that.
if (cl.complete()) {
clear_all_count_data();
}
// Clear the liveness counting data
clear_all_count_data();
// Repeat the asserts from above.
guarantee(cmThread()->during_cycle(), "invariant");
guarantee(!g1h->mark_in_progress(), "invariant");
}
class CheckBitmapClearHRClosure : public HeapRegionClosure {
CMBitMap* _bitmap;
bool _error;
public:
CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) {
}
virtual bool doHeapRegion(HeapRegion* r) {
return _bitmap->getNextMarkedWordAddress(r->bottom(), r->end()) != r->end();
}
};
bool ConcurrentMark::nextMarkBitmapIsClear() {
return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end;
CheckBitmapClearHRClosure cl(_nextMarkBitMap);
_g1h->heap_region_iterate(&cl);
return cl.complete();
}
class NoteStartOfMarkHRClosure: public HeapRegionClosure {
@ -2193,10 +2200,10 @@ void ConcurrentMark::completeCleanup() {
_cleanup_list.length());
}
// Noone else should be accessing the _cleanup_list at this point,
// so it's not necessary to take any locks
// No one else should be accessing the _cleanup_list at this point,
// so it is not necessary to take any locks
while (!_cleanup_list.is_empty()) {
HeapRegion* hr = _cleanup_list.remove_head();
HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
assert(hr != NULL, "Got NULL from a non-empty list");
hr->par_clear();
tmp_free_list.add_ordered(hr);
@ -2980,22 +2987,25 @@ ConcurrentMark::claim_region(uint worker_id) {
// claim_region() and a humongous object allocation might force us
// to do a bit of unnecessary work (due to some unnecessary bitmap
// iterations) but it should not introduce and correctness issues.
HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
HeapWord* bottom = curr_region->bottom();
HeapWord* end = curr_region->end();
HeapWord* limit = curr_region->next_top_at_mark_start();
HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger);
if (verbose_low()) {
gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
"["PTR_FORMAT", "PTR_FORMAT"), "
"limit = "PTR_FORMAT,
worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
}
// Above heap_region_containing_raw may return NULL as we always scan claim
// until the end of the heap. In this case, just jump to the next region.
HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
// Is the gap between reading the finger and doing the CAS too long?
HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
if (res == finger) {
if (res == finger && curr_region != NULL) {
// we succeeded
HeapWord* bottom = curr_region->bottom();
HeapWord* limit = curr_region->next_top_at_mark_start();
if (verbose_low()) {
gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" "
"["PTR_FORMAT", "PTR_FORMAT"), "
"limit = "PTR_FORMAT,
worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit));
}
// notice that _finger == end cannot be guaranteed here since,
// someone else might have moved the finger even further
@ -3026,10 +3036,17 @@ ConcurrentMark::claim_region(uint worker_id) {
} else {
assert(_finger > finger, "the finger should have moved forward");
if (verbose_low()) {
gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
"global finger = "PTR_FORMAT", "
"our finger = "PTR_FORMAT,
worker_id, p2i(_finger), p2i(finger));
if (curr_region == NULL) {
gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, "
"global finger = "PTR_FORMAT", "
"our finger = "PTR_FORMAT,
worker_id, p2i(_finger), p2i(finger));
} else {
gclog_or_tty->print_cr("[%u] somebody else moved the finger, "
"global finger = "PTR_FORMAT", "
"our finger = "PTR_FORMAT,
worker_id, p2i(_finger), p2i(finger));
}
}
// read it again
@ -3144,8 +3161,10 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
// happens, heap_region_containing() will return the bottom of the
// corresponding starts humongous region and the check below will
// not hold any more.
// Since we always iterate over all regions, we might get a NULL HeapRegion
// here.
HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
guarantee(global_finger == global_hr->bottom(),
guarantee(global_hr == NULL || global_finger == global_hr->bottom(),
err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
p2i(global_finger), HR_FORMAT_PARAMS(global_hr)));
}
@ -3158,7 +3177,7 @@ void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
if (task_finger != NULL && task_finger < _heap_end) {
// See above note on the global finger verification.
HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
guarantee(task_finger == task_hr->bottom() ||
guarantee(task_hr == NULL || task_finger == task_hr->bottom() ||
!task_hr->in_collection_set(),
err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
p2i(task_finger), HR_FORMAT_PARAMS(task_hr)));
@ -4674,7 +4693,6 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
_hum_prev_live_bytes(0), _hum_next_live_bytes(0),
_total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
MemRegion g1_committed = g1h->g1_committed();
MemRegion g1_reserved = g1h->g1_reserved();
double now = os::elapsedTime();
@ -4682,10 +4700,8 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
_out->cr();
_out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
_out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
G1PPRL_SUM_ADDR_FORMAT("committed")
G1PPRL_SUM_ADDR_FORMAT("reserved")
G1PPRL_SUM_BYTE_FORMAT("region-size"),
p2i(g1_committed.start()), p2i(g1_committed.end()),
p2i(g1_reserved.start()), p2i(g1_reserved.end()),
HeapRegion::GrainBytes);
_out->print_cr(G1PPRL_LINE_PREFIX);

View File

@ -27,10 +27,12 @@
#include "classfile/javaClasses.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "gc_implementation/shared/gcId.hpp"
#include "utilities/taskqueue.hpp"
class G1CollectedHeap;
class CMBitMap;
class CMTask;
typedef GenericTaskQueue<oop, mtGC> CMTaskQueue;
typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
@ -57,7 +59,6 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
HeapWord* _bmStartWord; // base address of range covered by map
size_t _bmWordSize; // map size (in #HeapWords covered)
const int _shifter; // map to char or bit
VirtualSpace _virtual_space; // underlying the bit map
BitMap _bm; // the bit map itself
public:
@ -115,42 +116,41 @@ class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
void print_on_error(outputStream* st, const char* prefix) const;
// debugging
NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
NOT_PRODUCT(bool covers(MemRegion rs) const;)
};
class CMBitMapMappingChangedListener : public G1MappingChangedListener {
private:
CMBitMap* _bm;
public:
CMBitMapMappingChangedListener() : _bm(NULL) {}
void set_bitmap(CMBitMap* bm) { _bm = bm; }
virtual void on_commit(uint start_idx, size_t num_regions);
};
class CMBitMap : public CMBitMapRO {
private:
CMBitMapMappingChangedListener _listener;
public:
// constructor
CMBitMap(int shifter) :
CMBitMapRO(shifter) {}
static size_t compute_size(size_t heap_size);
// Returns the amount of bytes on the heap between two marks in the bitmap.
static size_t mark_distance();
// Allocates the back store for the marking bitmap
bool allocate(ReservedSpace heap_rs);
CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
// Initializes the underlying BitMap to cover the given area.
void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
// Write marks.
inline void mark(HeapWord* addr);
inline void clear(HeapWord* addr);
inline bool parMark(HeapWord* addr);
inline bool parClear(HeapWord* addr);
// write marks
void mark(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?");
_bm.set_bit(heapWordToOffset(addr));
}
void clear(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?");
_bm.clear_bit(heapWordToOffset(addr));
}
bool parMark(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?");
return _bm.par_set_bit(heapWordToOffset(addr));
}
bool parClear(HeapWord* addr) {
assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
"outside underlying space?");
return _bm.par_clear_bit(heapWordToOffset(addr));
}
void markRange(MemRegion mr);
void clearAll();
void clearRange(MemRegion mr);
// Starting at the bit corresponding to "addr" (inclusive), find the next
@ -161,6 +161,9 @@ class CMBitMap : public CMBitMapRO {
// the run. If there is no "1" bit at or after "addr", return an empty
// MemRegion.
MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
// Clear the whole mark bitmap.
void clearAll();
};
// Represents a marking stack used by ConcurrentMarking in the G1 collector.
@ -680,7 +683,7 @@ public:
return _task_queues->steal(worker_id, hash_seed, obj);
}
ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage);
~ConcurrentMark();
ConcurrentMarkThread* cmThread() { return _cmThread; }
@ -736,7 +739,8 @@ public:
// Clear the next marking bitmap (will be called concurrently).
void clearNextBitmap();
// Return whether the next mark bitmap has no marks set.
// Return whether the next mark bitmap has no marks set. To be used for assertions
// only. Will not yield to pause requests.
bool nextMarkBitmapIsClear();
// These two do the work that needs to be done before and after the
@ -794,12 +798,6 @@ public:
bool verify_thread_buffers,
bool verify_fingers) PRODUCT_RETURN;
// It is called at the end of an evacuation pause during marking so
// that CM is notified of where the new end of the heap is. It
// doesn't do anything if concurrent_marking_in_progress() is false,
// unless the force parameter is true.
void update_g1_committed(bool force = false);
bool isMarked(oop p) const {
assert(p != NULL && p->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)p;

View File

@ -268,6 +268,36 @@ inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
return iterate(cl, mr);
}
#define check_mark(addr) \
assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \
"outside underlying space?"); \
assert(G1CollectedHeap::heap()->is_in_exact(addr), \
err_msg("Trying to access not available bitmap "PTR_FORMAT \
" corresponding to "PTR_FORMAT" (%u)", \
p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr)));
inline void CMBitMap::mark(HeapWord* addr) {
check_mark(addr);
_bm.set_bit(heapWordToOffset(addr));
}
inline void CMBitMap::clear(HeapWord* addr) {
check_mark(addr);
_bm.clear_bit(heapWordToOffset(addr));
}
inline bool CMBitMap::parMark(HeapWord* addr) {
check_mark(addr);
return _bm.par_set_bit(heapWordToOffset(addr));
}
inline bool CMBitMap::parClear(HeapWord* addr) {
check_mark(addr);
return _bm.par_clear_bit(heapWordToOffset(addr));
}
#undef check_mark
inline void CMTask::push(oop obj) {
HeapWord* objAddr = (HeapWord*) obj;
assert(_g1h->is_in_g1_reserved(objAddr), "invariant");

View File

@ -173,7 +173,7 @@ public:
// Should be called when we want to release the active region which
// is returned after it's been retired.
HeapRegion* release();
virtual HeapRegion* release();
#if G1_ALLOC_REGION_TRACING
void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL);

View File

@ -32,64 +32,37 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
// Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot
// retrieve it here since this would cause firing of several asserts. The code
// executed after commit of a region already needs to do some re-initialization of
// the HeapRegion, so we combine that.
}
//////////////////////////////////////////////////////////////////////
// G1BlockOffsetSharedArray
//////////////////////////////////////////////////////////////////////
G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
size_t init_word_size) :
_reserved(reserved), _end(NULL)
{
size_t size = compute_size(reserved.word_size());
ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
if (!rs.is_reserved()) {
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
}
if (!_vs.initialize(rs, 0)) {
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
}
G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) :
_reserved(), _end(NULL), _listener(), _offset_array(NULL) {
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
_reserved = heap;
_end = NULL;
MemRegion bot_reserved = storage->reserved();
_offset_array = (u_char*)bot_reserved.start();
_end = _reserved.end();
storage->set_mapping_changed_listener(&_listener);
_offset_array = (u_char*)_vs.low_boundary();
resize(init_word_size);
if (TraceBlockOffsetTable) {
gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
gclog_or_tty->print_cr(" "
" rs.base(): " INTPTR_FORMAT
" rs.size(): " INTPTR_FORMAT
" rs end(): " INTPTR_FORMAT,
rs.base(), rs.size(), rs.base() + rs.size());
gclog_or_tty->print_cr(" "
" _vs.low_boundary(): " INTPTR_FORMAT
" _vs.high_boundary(): " INTPTR_FORMAT,
_vs.low_boundary(),
_vs.high_boundary());
}
}
void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
size_t new_size = compute_size(new_word_size);
size_t old_size = _vs.committed_size();
size_t delta;
char* high = _vs.high();
_end = _reserved.start() + new_word_size;
if (new_size > old_size) {
delta = ReservedSpace::page_align_size_up(new_size - old_size);
assert(delta > 0, "just checking");
if (!_vs.expand_by(delta)) {
// Do better than this for Merlin
vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
}
assert(_vs.high() == high + delta, "invalid expansion");
// Initialization of the contents is left to the
// G1BlockOffsetArray that uses it.
} else {
delta = ReservedSpace::page_align_size_down(old_size - new_size);
if (delta == 0) return;
_vs.shrink_by(delta);
assert(_vs.high() == high - delta, "invalid expansion");
bot_reserved.start(), bot_reserved.byte_size(), bot_reserved.end());
}
}
@ -100,18 +73,7 @@ bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
}
void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
check_index(index_for(right - 1), "right address out of range");
assert(left < right, "Heap addresses out of order");
size_t num_cards = pointer_delta(right, left) >> LogN_words;
if (UseMemSetInBOT) {
memset(&_offset_array[index_for(left)], offset, num_cards);
} else {
size_t i = index_for(left);
const size_t end = i + num_cards;
for (; i < end; i++) {
_offset_array[i] = offset;
}
}
set_offset_array(index_for(left), index_for(right -1), offset);
}
//////////////////////////////////////////////////////////////////////
@ -650,6 +612,25 @@ G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
_next_offset_index = 0;
}
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() {
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
"just checking");
_next_offset_index = _array->index_for_raw(_bottom);
_next_offset_index++;
_next_offset_threshold =
_array->address_for_index_raw(_next_offset_index);
return _next_offset_threshold;
}
void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() {
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
"just checking");
size_t bottom_index = _array->index_for_raw(_bottom);
assert(_array->address_for_index_raw(bottom_index) == _bottom,
"Precondition of call");
_array->set_offset_array_raw(bottom_index, 0);
}
HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
"just checking");
@ -674,8 +655,7 @@ G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
assert(new_top <= _end, "_end should have already been updated");
// The first BOT entry should have offset 0.
zero_bottom_entry();
initialize_threshold();
reset_bot();
alloc_block(_bottom, new_top);
}

View File

@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "memory/memRegion.hpp"
#include "runtime/virtualspace.hpp"
#include "utilities/globalDefinitions.hpp"
@ -106,6 +107,11 @@ public:
inline HeapWord* block_start_const(const void* addr) const;
};
class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener {
public:
virtual void on_commit(uint start_idx, size_t num_regions);
};
// This implementation of "G1BlockOffsetTable" divides the covered region
// into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
// for each such subregion indicates how far back one must go to find the
@ -125,6 +131,7 @@ class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
friend class VMStructs;
private:
G1BlockOffsetSharedArrayMappingChangedListener _listener;
// The reserved region covered by the shared array.
MemRegion _reserved;
@ -133,16 +140,8 @@ private:
// Array for keeping offsets for retrieving object start fast given an
// address.
VirtualSpace _vs;
u_char* _offset_array; // byte array keeping backwards offsets
void check_index(size_t index, const char* msg) const {
assert(index < _vs.committed_size(),
err_msg("%s - "
"index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT,
msg, index, _vs.committed_size()));
}
void check_offset(size_t offset, const char* msg) const {
assert(offset <= N_words,
err_msg("%s - "
@ -152,63 +151,33 @@ private:
// Bounds checking accessors:
// For performance these have to devolve to array accesses in product builds.
u_char offset_array(size_t index) const {
check_index(index, "index out of range");
return _offset_array[index];
}
inline u_char offset_array(size_t index) const;
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset);
void set_offset_array(size_t index, u_char offset) {
check_index(index, "index out of range");
check_offset(offset, "offset too large");
void set_offset_array_raw(size_t index, u_char offset) {
_offset_array[index] = offset;
}
void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
check_index(index, "index out of range");
assert(high >= low, "addresses out of order");
check_offset(pointer_delta(high, low), "offset too large");
_offset_array[index] = (u_char) pointer_delta(high, low);
}
inline void set_offset_array(size_t index, u_char offset);
void set_offset_array(size_t left, size_t right, u_char offset) {
check_index(right, "right index out of range");
assert(left <= right, "indexes out of order");
size_t num_cards = right - left + 1;
if (UseMemSetInBOT) {
memset(&_offset_array[left], offset, num_cards);
} else {
size_t i = left;
const size_t end = i + num_cards;
for (; i < end; i++) {
_offset_array[i] = offset;
}
}
}
inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low);
void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
check_index(index, "index out of range");
assert(high >= low, "addresses out of order");
check_offset(pointer_delta(high, low), "offset too large");
assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
}
inline void set_offset_array(size_t left, size_t right, u_char offset);
inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const;
bool is_card_boundary(HeapWord* p) const;
public:
// Return the number of slots needed for an offset array
// that covers mem_region_words words.
// We always add an extra slot because if an object
// ends on a card boundary we put a 0 in the next
// offset array slot, so we want that slot always
// to be reserved.
size_t compute_size(size_t mem_region_words) {
size_t number_of_slots = (mem_region_words / N_words) + 1;
return ReservedSpace::page_align_size_up(number_of_slots);
static size_t compute_size(size_t mem_region_words) {
size_t number_of_slots = (mem_region_words / N_words);
return ReservedSpace::allocation_align_size_up(number_of_slots);
}
public:
enum SomePublicConstants {
LogN = 9,
LogN_words = LogN - LogHeapWordSize,
@ -222,25 +191,21 @@ public:
// least "init_word_size".) The contents of the initial table are
// undefined; it is the responsibility of the constituent
// G1BlockOffsetTable(s) to initialize cards.
G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
// Notes a change in the committed size of the region covered by the
// table. The "new_word_size" may not be larger than the size of the
// reserved region this table covers.
void resize(size_t new_word_size);
G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage);
void set_bottom(HeapWord* new_bottom);
// Updates all the BlockOffsetArray's sharing this shared array to
// reflect the current "top"'s of their spaces.
void update_offset_arrays();
// Return the appropriate index into "_offset_array" for "p".
inline size_t index_for(const void* p) const;
inline size_t index_for_raw(const void* p) const;
// Return the address indicating the start of the region corresponding to
// "index" in "_offset_array".
inline HeapWord* address_for_index(size_t index) const;
// Variant of address_for_index that does not check the index for validity.
inline HeapWord* address_for_index_raw(size_t index) const {
return _reserved.start() + (index << LogN_words);
}
};
// And here is the G1BlockOffsetTable subtype that uses the array.
@ -480,6 +445,14 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
blk_start, blk_end);
}
// Variant of zero_bottom_entry that does not check for availability of the
// memory first.
void zero_bottom_entry_raw();
// Variant of initialize_threshold that does not check for availability of the
// memory first.
HeapWord* initialize_threshold_raw();
// Zero out the entry for _bottom (offset will be zero).
void zero_bottom_entry();
public:
G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
@ -487,8 +460,10 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
// bottom of the covered region.
HeapWord* initialize_threshold();
// Zero out the entry for _bottom (offset will be zero).
void zero_bottom_entry();
void reset_bot() {
zero_bottom_entry_raw();
initialize_threshold_raw();
}
// Return the next threshold, the point at which the table should be
// updated.

View File

@ -47,14 +47,69 @@ G1BlockOffsetTable::block_start_const(const void* addr) const {
}
}
#define check_index(index, msg) \
assert((index) < (_reserved.word_size() >> LogN_words), \
err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \
msg, (index), (_reserved.word_size() >> LogN_words))); \
assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)), \
err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT \
" (%u) is not in committed area.", \
(index), \
p2i(address_for_index_raw(index)), \
G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index))));
u_char G1BlockOffsetSharedArray::offset_array(size_t index) const {
check_index(index, "index out of range");
return _offset_array[index];
}
void G1BlockOffsetSharedArray::set_offset_array(size_t index, u_char offset) {
check_index(index, "index out of range");
set_offset_array_raw(index, offset);
}
void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
check_index(index, "index out of range");
assert(high >= low, "addresses out of order");
size_t offset = pointer_delta(high, low);
check_offset(offset, "offset too large");
set_offset_array(index, (u_char)offset);
}
void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_char offset) {
check_index(right, "right index out of range");
assert(left <= right, "indexes out of order");
size_t num_cards = right - left + 1;
if (UseMemSetInBOT) {
memset(&_offset_array[left], offset, num_cards);
} else {
size_t i = left;
const size_t end = i + num_cards;
for (; i < end; i++) {
_offset_array[i] = offset;
}
}
}
void G1BlockOffsetSharedArray::check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
check_index(index, "index out of range");
assert(high >= low, "addresses out of order");
check_offset(pointer_delta(high, low), "offset too large");
assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset");
}
// Variant of index_for that does not check the index for validity.
inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const {
return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN;
}
inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
char* pc = (char*)p;
assert(pc >= (char*)_reserved.start() &&
pc < (char*)_reserved.end(),
err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")",
p2i(p), p2i(_reserved.start()), p2i(_reserved.end())));
size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
size_t result = delta >> LogN;
size_t result = index_for_raw(p);
check_index(result, "bad index from address");
return result;
}
@ -62,7 +117,7 @@ inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const {
inline HeapWord*
G1BlockOffsetSharedArray::address_for_index(size_t index) const {
check_index(index, "index out of range");
HeapWord* result = _reserved.start() + (index << LogN_words);
HeapWord* result = address_for_index_raw(index);
assert(result >= _reserved.start() && result < _reserved.end(),
err_msg("bad address from index result " PTR_FORMAT
" _reserved.start() " PTR_FORMAT " _reserved.end() "
@ -71,6 +126,8 @@ G1BlockOffsetSharedArray::address_for_index(size_t index) const {
return result;
}
#undef check_index
inline size_t
G1BlockOffsetArray::block_size(const HeapWord* p) const {
return gsp()->block_size(p);

View File

@ -33,31 +33,26 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
_counts->clear_range(mr);
}
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
if (has_count_table()) {
assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
assert(from_card_num < to_card_num,
err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
from_card_num, to_card_num));
assert(to_card_num <= _committed_max_card_num,
err_msg("to card num out of range: "
"to: "SIZE_FORMAT ", "
"max: "SIZE_FORMAT,
to_card_num, _committed_max_card_num));
to_card_num = MIN2(_committed_max_card_num, to_card_num);
Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
}
}
G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
_g1h(g1h), _card_counts(NULL),
_reserved_max_card_num(0), _committed_max_card_num(0),
_committed_size(0) {}
_listener(), _g1h(g1h), _card_counts(NULL), _reserved_max_card_num(0) {
_listener.set_cardcounts(this);
}
void G1CardCounts::initialize() {
void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
assert(_g1h->max_capacity() > 0, "initialization order");
assert(_g1h->capacity() == 0, "initialization order");
@ -70,70 +65,9 @@ void G1CardCounts::initialize() {
_ct_bs = _g1h->g1_barrier_set();
_ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
// Allocate/Reserve the counts table
size_t reserved_bytes = _g1h->max_capacity();
_reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift;
size_t reserved_size = _reserved_max_card_num * sizeof(jbyte);
ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size));
if (!rs.is_reserved()) {
warning("Could not reserve enough space for the card counts table");
guarantee(!has_reserved_count_table(), "should be NULL");
return;
}
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
_card_counts_storage.initialize(rs, 0);
_card_counts = (jubyte*) _card_counts_storage.low();
}
}
void G1CardCounts::resize(size_t heap_capacity) {
// Expand the card counts table to handle a heap with the given capacity.
if (!has_reserved_count_table()) {
// Don't expand if we failed to reserve the card counts table.
return;
}
assert(_committed_size ==
ReservedSpace::allocation_align_size_up(_committed_size),
err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size));
// Verify that the committed space for the card counts matches our
// committed max card num. Note for some allocation alignments, the
// amount of space actually committed for the counts table will be able
// to span more cards than the number spanned by the maximum heap.
size_t prev_committed_size = _committed_size;
size_t prev_committed_card_num = committed_to_card_num(prev_committed_size);
assert(prev_committed_card_num == _committed_max_card_num,
err_msg("Card mismatch: "
"prev: " SIZE_FORMAT ", "
"committed: "SIZE_FORMAT", "
"reserved: "SIZE_FORMAT,
prev_committed_card_num, _committed_max_card_num, _reserved_max_card_num));
size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte);
size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size);
size_t new_committed_card_num = committed_to_card_num(new_committed_size);
if (_committed_max_card_num < new_committed_card_num) {
// we need to expand the backing store for the card counts
size_t expand_size = new_committed_size - prev_committed_size;
if (!_card_counts_storage.expand_by(expand_size)) {
warning("Card counts table backing store commit failure");
return;
}
assert(_card_counts_storage.committed_size() == new_committed_size,
"expansion commit failure");
_committed_size = new_committed_size;
_committed_max_card_num = new_committed_card_num;
clear_range(prev_committed_card_num, _committed_max_card_num);
_card_counts = (jubyte*) mapper->reserved().start();
_reserved_max_card_num = mapper->reserved().byte_size();
mapper->set_mapping_changed_listener(&_listener);
}
}
@ -149,12 +83,13 @@ uint G1CardCounts::add_card_count(jbyte* card_ptr) {
uint count = 0;
if (has_count_table()) {
size_t card_num = ptr_2_card_num(card_ptr);
if (card_num < _committed_max_card_num) {
count = (uint) _card_counts[card_num];
if (count < G1ConcRSHotCardLimit) {
_card_counts[card_num] =
(jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
}
assert(card_num < _reserved_max_card_num,
err_msg("Card "SIZE_FORMAT" outside of card counts table (max size "SIZE_FORMAT")",
card_num, _reserved_max_card_num));
count = (uint) _card_counts[card_num];
if (count < G1ConcRSHotCardLimit) {
_card_counts[card_num] =
(jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
}
}
return count;
@ -165,31 +100,23 @@ bool G1CardCounts::is_hot(uint count) {
}
void G1CardCounts::clear_region(HeapRegion* hr) {
assert(!hr->isHumongous(), "Should have been cleared");
MemRegion mr(hr->bottom(), hr->end());
clear_range(mr);
}
void G1CardCounts::clear_range(MemRegion mr) {
if (has_count_table()) {
HeapWord* bottom = hr->bottom();
// We use the last address in hr as hr could be the
// last region in the heap. In which case trying to find
// the card for hr->end() will be an OOB access to the
// card table.
HeapWord* last = hr->end() - 1;
assert(_g1h->g1_committed().contains(last),
err_msg("last not in committed: "
"last: " PTR_FORMAT ", "
"committed: [" PTR_FORMAT ", " PTR_FORMAT ")",
last,
_g1h->g1_committed().start(),
_g1h->g1_committed().end()));
const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom);
const jbyte* last_card_ptr = _ct_bs->byte_for_const(last);
const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start());
// We use the last address in the range as the range could represent the
// last region in the heap. In which case trying to find the card will be an
// OOB access to the card table.
const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last());
#ifdef ASSERT
HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
assert(start_addr == hr->bottom(), "alignment");
assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment");
assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
#endif // ASSERT
// Clear the counts for the (exclusive) card range.
@ -199,14 +126,22 @@ void G1CardCounts::clear_region(HeapRegion* hr) {
}
}
class G1CardCountsClearClosure : public HeapRegionClosure {
private:
G1CardCounts* _card_counts;
public:
G1CardCountsClearClosure(G1CardCounts* card_counts) :
HeapRegionClosure(), _card_counts(card_counts) { }
virtual bool doHeapRegion(HeapRegion* r) {
_card_counts->clear_region(r);
return false;
}
};
void G1CardCounts::clear_all() {
assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
clear_range((size_t)0, _committed_max_card_num);
G1CardCountsClearClosure cl(this);
_g1h->heap_region_iterate(&cl);
}
G1CardCounts::~G1CardCounts() {
if (has_reserved_count_table()) {
_card_counts_storage.release();
}
}

View File

@ -25,14 +25,26 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "memory/allocation.hpp"
#include "runtime/virtualspace.hpp"
#include "utilities/globalDefinitions.hpp"
class CardTableModRefBS;
class G1CardCounts;
class G1CollectedHeap;
class G1RegionToSpaceMapper;
class HeapRegion;
class G1CardCountsMappingChangedListener : public G1MappingChangedListener {
private:
G1CardCounts* _counts;
public:
void set_cardcounts(G1CardCounts* counts) { _counts = counts; }
virtual void on_commit(uint start_idx, size_t num_regions);
};
// Table to track the number of times a card has been refined. Once
// a card has been refined a certain number of times, it is
// considered 'hot' and its refinement is delayed by inserting the
@ -41,6 +53,8 @@ class HeapRegion;
// is 'drained' during the next evacuation pause.
class G1CardCounts: public CHeapObj<mtGC> {
G1CardCountsMappingChangedListener _listener;
G1CollectedHeap* _g1h;
// The table of counts
@ -49,27 +63,18 @@ class G1CardCounts: public CHeapObj<mtGC> {
// Max capacity of the reserved space for the counts table
size_t _reserved_max_card_num;
// Max capacity of the committed space for the counts table
size_t _committed_max_card_num;
// Size of committed space for the counts table
size_t _committed_size;
// CardTable bottom.
const jbyte* _ct_bot;
// Barrier set
CardTableModRefBS* _ct_bs;
// The virtual memory backing the counts table
VirtualSpace _card_counts_storage;
// Returns true if the card counts table has been reserved.
bool has_reserved_count_table() { return _card_counts != NULL; }
// Returns true if the card counts table has been reserved and committed.
bool has_count_table() {
return has_reserved_count_table() && _committed_max_card_num > 0;
return has_reserved_count_table();
}
size_t ptr_2_card_num(const jbyte* card_ptr) {
@ -79,37 +84,24 @@ class G1CardCounts: public CHeapObj<mtGC> {
"_ct_bot: " PTR_FORMAT,
p2i(card_ptr), p2i(_ct_bot)));
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
assert(card_num >= 0 && card_num < _committed_max_card_num,
assert(card_num >= 0 && card_num < _reserved_max_card_num,
err_msg("card pointer out of range: " PTR_FORMAT, p2i(card_ptr)));
return card_num;
}
jbyte* card_num_2_ptr(size_t card_num) {
assert(card_num >= 0 && card_num < _committed_max_card_num,
assert(card_num >= 0 && card_num < _reserved_max_card_num,
err_msg("card num out of range: "SIZE_FORMAT, card_num));
return (jbyte*) (_ct_bot + card_num);
}
// Helper routine.
// Returns the number of cards that can be counted by the given committed
// table size, with a maximum of the number of cards spanned by the max
// capacity of the heap.
size_t committed_to_card_num(size_t committed_size) {
return MIN2(_reserved_max_card_num, committed_size / sizeof(jbyte));
}
// Clear the counts table for the given (exclusive) index range.
void clear_range(size_t from_card_num, size_t to_card_num);
public:
G1CardCounts(G1CollectedHeap* g1h);
~G1CardCounts();
void initialize();
// Resize the committed space for the card counts table in
// response to a resize of the committed space for the heap.
void resize(size_t heap_capacity);
void initialize(G1RegionToSpaceMapper* mapper);
// Increments the refinement count for the given card.
// Returns the pre-increment count value.
@ -122,8 +114,10 @@ class G1CardCounts: public CHeapObj<mtGC> {
// Clears the card counts for the cards spanned by the region
void clear_region(HeapRegion* hr);
// Clears the card counts for the cards spanned by the MemRegion
void clear_range(MemRegion mr);
// Clear the entire card counts table during GC.
// Updates the policy stats with the duration.
void clear_all();
};

View File

@ -45,12 +45,13 @@
#include "gc_implementation/g1/g1MarkSweep.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/g1StringDedup.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "gc_implementation/shared/gcHeapSummary.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
@ -381,6 +382,14 @@ void YoungList::print() {
gclog_or_tty->cr();
}
void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
OtherRegionsTable::invalidate(start_idx, num_regions);
}
void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
reset_from_card_cache(start_idx, num_regions);
}
void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
{
// Claim the right to put the region on the dirty cards region list
@ -523,9 +532,9 @@ G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
// again to allocate from it.
append_secondary_free_list();
assert(!_free_list.is_empty(), "if the secondary_free_list was not "
assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not "
"empty we should have moved at least one entry to the free_list");
HeapRegion* res = _free_list.remove_region(is_old);
HeapRegion* res = _hrs.allocate_free_region(is_old);
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
"allocated "HR_FORMAT" from secondary_free_list",
@ -566,7 +575,7 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_e
}
}
res = _free_list.remove_region(is_old);
res = _hrs.allocate_free_region(is_old);
if (res == NULL) {
if (G1ConcRegionFreeingVerbose) {
@ -591,8 +600,8 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_e
// Given that expand() succeeded in expanding the heap, and we
// always expand the heap by an amount aligned to the heap
// region size, the free list should in theory not be empty.
// In either case remove_region() will check for NULL.
res = _free_list.remove_region(is_old);
// In either case allocate_free_region() will check for NULL.
res = _hrs.allocate_free_region(is_old);
} else {
_expand_heap_after_alloc_failure = false;
}
@ -600,55 +609,11 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_e
return res;
}
uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
size_t word_size) {
assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
uint first = G1_NULL_HRS_INDEX;
if (num_regions == 1) {
// Only one region to allocate, no need to go through the slower
// path. The caller will attempt the expansion if this fails, so
// let's not try to expand here too.
HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
if (hr != NULL) {
first = hr->hrs_index();
} else {
first = G1_NULL_HRS_INDEX;
}
} else {
// We can't allocate humongous regions while cleanupComplete() is
// running, since some of the regions we find to be empty might not
// yet be added to the free list and it is not straightforward to
// know which list they are on so that we can remove them. Note
// that we only need to do this if we need to allocate more than
// one region to satisfy the current humongous allocation
// request. If we are only allocating one region we use the common
// region allocation code (see above).
wait_while_free_regions_coming();
append_secondary_free_list_if_not_empty_with_lock();
if (free_regions() >= num_regions) {
first = _hrs.find_contiguous(num_regions);
if (first != G1_NULL_HRS_INDEX) {
for (uint i = first; i < first + num_regions; ++i) {
HeapRegion* hr = region_at(i);
assert(hr->is_empty(), "sanity");
assert(is_on_master_free_list(hr), "sanity");
hr->set_pending_removal(true);
}
_free_list.remove_all_pending(num_regions);
}
}
}
return first;
}
HeapWord*
G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
uint num_regions,
size_t word_size) {
assert(first != G1_NULL_HRS_INDEX, "pre-condition");
assert(first != G1_NO_HRS_INDEX, "pre-condition");
assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
@ -786,42 +751,70 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
verify_region_sets_optional();
size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
uint x_num = expansion_regions();
uint fs = _hrs.free_suffix();
uint first = humongous_obj_allocate_find_first(num_regions, word_size);
if (first == G1_NULL_HRS_INDEX) {
// The only thing we can do now is attempt expansion.
if (fs + x_num >= num_regions) {
// If the number of regions we're trying to allocate for this
// object is at most the number of regions in the free suffix,
// then the call to humongous_obj_allocate_find_first() above
// should have succeeded and we wouldn't be here.
//
// We should only be trying to expand when the free suffix is
// not sufficient for the object _and_ we have some expansion
// room available.
assert(num_regions > fs, "earlier allocation should have succeeded");
uint first = G1_NO_HRS_INDEX;
uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
if (obj_regions == 1) {
// Only one region to allocate, try to use a fast path by directly allocating
// from the free lists. Do not try to expand here, we will potentially do that
// later.
HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
if (hr != NULL) {
first = hr->hrs_index();
}
} else {
// We can't allocate humongous regions spanning more than one region while
// cleanupComplete() is running, since some of the regions we find to be
// empty might not yet be added to the free list. It is not straightforward
// to know in which list they are on so that we can remove them. We only
// need to do this if we need to allocate more than one region to satisfy the
// current humongous allocation request. If we are only allocating one region
// we use the one-region region allocation code (see above), that already
// potentially waits for regions from the secondary free list.
wait_while_free_regions_coming();
append_secondary_free_list_if_not_empty_with_lock();
// Policy: Try only empty regions (i.e. already committed first). Maybe we
// are lucky enough to find some.
first = _hrs.find_contiguous_only_empty(obj_regions);
if (first != G1_NO_HRS_INDEX) {
_hrs.allocate_free_regions_starting_at(first, obj_regions);
}
}
if (first == G1_NO_HRS_INDEX) {
// Policy: We could not find enough regions for the humongous object in the
// free list. Look through the heap to find a mix of free and uncommitted regions.
// If so, try expansion.
first = _hrs.find_contiguous_empty_or_unavailable(obj_regions);
if (first != G1_NO_HRS_INDEX) {
// We found something. Make sure these regions are committed, i.e. expand
// the heap. Alternatively we could do a defragmentation GC.
ergo_verbose1(ErgoHeapSizing,
"attempt heap expansion",
ergo_format_reason("humongous allocation request failed")
ergo_format_byte("allocation request"),
word_size * HeapWordSize);
if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
// Even though the heap was expanded, it might not have
// reached the desired size. So, we cannot assume that the
// allocation will succeed.
first = humongous_obj_allocate_find_first(num_regions, word_size);
_hrs.expand_at(first, obj_regions);
g1_policy()->record_new_heap_size(num_regions());
#ifdef ASSERT
for (uint i = first; i < first + obj_regions; ++i) {
HeapRegion* hr = region_at(i);
assert(hr->is_empty(), "sanity");
assert(is_on_master_free_list(hr), "sanity");
}
#endif
_hrs.allocate_free_regions_starting_at(first, obj_regions);
} else {
// Policy: Potentially trigger a defragmentation GC.
}
}
HeapWord* result = NULL;
if (first != G1_NULL_HRS_INDEX) {
result =
humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
if (first != G1_NO_HRS_INDEX) {
result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
assert(result != NULL, "it should always return a valid result");
// A successful humongous object allocation changes the used space
@ -1384,7 +1377,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
}
assert(free_regions() == 0, "we should not have added any free regions");
assert(num_free_regions() == 0, "we should not have added any free regions");
rebuild_region_sets(false /* free_list_only */);
// Enqueue any discovered reference objects that have
@ -1749,21 +1742,6 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
return NULL;
}
void G1CollectedHeap::update_committed_space(HeapWord* old_end,
HeapWord* new_end) {
assert(old_end != new_end, "don't call this otherwise");
assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
// Update the committed mem region.
_g1_committed.set_end(new_end);
// Tell the card table about the update.
Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
// Tell the BOT about the update.
_bot_shared->resize(_g1_committed.word_size());
// Tell the hot card cache about the update
_cg1r->hot_card_cache()->resize_card_counts(capacity());
}
bool G1CollectedHeap::expand(size_t expand_bytes) {
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
aligned_expand_bytes = align_size_up(aligned_expand_bytes,
@ -1774,55 +1752,22 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
ergo_format_byte("attempted expansion amount"),
expand_bytes, aligned_expand_bytes);
if (_g1_storage.uncommitted_size() == 0) {
if (is_maximal_no_gc()) {
ergo_verbose0(ErgoHeapSizing,
"did not expand the heap",
ergo_format_reason("heap already fully expanded"));
return false;
}
// First commit the memory.
HeapWord* old_end = (HeapWord*) _g1_storage.high();
bool successful = _g1_storage.expand_by(aligned_expand_bytes);
if (successful) {
// Then propagate this update to the necessary data structures.
HeapWord* new_end = (HeapWord*) _g1_storage.high();
update_committed_space(old_end, new_end);
uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
assert(regions_to_expand > 0, "Must expand by at least one region");
FreeRegionList expansion_list("Local Expansion List");
MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
assert(mr.start() == old_end, "post-condition");
// mr might be a smaller region than what was requested if
// expand_by() was unable to allocate the HeapRegion instances
assert(mr.end() <= new_end, "post-condition");
uint expanded_by = _hrs.expand_by(regions_to_expand);
size_t actual_expand_bytes = mr.byte_size();
if (expanded_by > 0) {
size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
"post-condition");
if (actual_expand_bytes < aligned_expand_bytes) {
// We could not expand _hrs to the desired size. In this case we
// need to shrink the committed space accordingly.
assert(mr.end() < new_end, "invariant");
size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
// First uncommit the memory.
_g1_storage.shrink_by(diff_bytes);
// Then propagate this update to the necessary data structures.
update_committed_space(new_end, mr.end());
}
_free_list.add_as_tail(&expansion_list);
if (_hr_printer.is_active()) {
HeapWord* curr = mr.start();
while (curr < mr.end()) {
HeapWord* curr_end = curr + HeapRegion::GrainWords;
_hr_printer.commit(curr, curr_end);
curr = curr_end;
}
assert(curr == mr.end(), "post-condition");
}
g1_policy()->record_new_heap_size(n_regions());
g1_policy()->record_new_heap_size(num_regions());
} else {
ergo_verbose0(ErgoHeapSizing,
"did not expand the heap",
@ -1830,12 +1775,12 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
// The expansion of the virtual storage space was unsuccessful.
// Let's see if it was because we ran out of swap.
if (G1ExitOnExpansionFailure &&
_g1_storage.uncommitted_size() >= aligned_expand_bytes) {
_hrs.available() >= regions_to_expand) {
// We had head room...
vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
}
}
return successful;
return regions_to_expand > 0;
}
void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
@ -1846,7 +1791,6 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
HeapWord* old_end = (HeapWord*) _g1_storage.high();
size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
ergo_verbose3(ErgoHeapSizing,
@ -1856,22 +1800,7 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
ergo_format_byte("attempted shrinking amount"),
shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
if (num_regions_removed > 0) {
_g1_storage.shrink_by(shrunk_bytes);
HeapWord* new_end = (HeapWord*) _g1_storage.high();
if (_hr_printer.is_active()) {
HeapWord* curr = old_end;
while (curr > new_end) {
HeapWord* curr_end = curr;
curr -= HeapRegion::GrainWords;
_hr_printer.uncommit(curr, curr_end);
}
}
_expansion_regions += num_regions_removed;
update_committed_space(old_end, new_end);
HeapRegionRemSet::shrink_heap(n_regions());
g1_policy()->record_new_heap_size(n_regions());
g1_policy()->record_new_heap_size(num_regions());
} else {
ergo_verbose0(ErgoHeapSizing,
"did not shrink the heap",
@ -1922,7 +1851,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_g1mm(NULL),
_refine_cte_cl(NULL),
_full_collection(false),
_free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
@ -2036,8 +1964,6 @@ jint G1CollectedHeap::initialize() {
_reserved.set_start((HeapWord*)heap_rs.base());
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
_expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
// Create the gen rem set (and barrier set) for the entire reserved region.
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
set_barrier_set(rem_set()->bs());
@ -2051,20 +1977,65 @@ jint G1CollectedHeap::initialize() {
// Carve out the G1 part of the heap.
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
_g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
g1_rs.size()/HeapWordSize);
ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_mapper(g1_rs,
UseLargePages ? os::large_page_size() : os::vm_page_size(),
HeapRegion::GrainBytes,
1,
mtJavaHeap);
heap_storage->set_mapping_changed_listener(&_listener);
_g1_storage.initialize(g1_rs, 0);
_g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
_hrs.initialize((HeapWord*) _g1_reserved.start(),
(HeapWord*) _g1_reserved.end());
assert(_hrs.max_length() == _expansion_regions,
err_msg("max length: %u expansion regions: %u",
_hrs.max_length(), _expansion_regions));
// Reserve space for the block offset table. We do not support automatic uncommit
// for the card table at this time. BOT only.
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* bot_storage =
G1RegionToSpaceMapper::create_mapper(bot_rs,
os::vm_page_size(),
HeapRegion::GrainBytes,
G1BlockOffsetSharedArray::N_bytes,
mtGC);
// Do later initialization work for concurrent refinement.
_cg1r->init();
ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* cardtable_storage =
G1RegionToSpaceMapper::create_mapper(cardtable_rs,
os::vm_page_size(),
HeapRegion::GrainBytes,
G1BlockOffsetSharedArray::N_bytes,
mtGC);
// Reserve space for the card counts table.
ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* card_counts_storage =
G1RegionToSpaceMapper::create_mapper(card_counts_rs,
os::vm_page_size(),
HeapRegion::GrainBytes,
G1BlockOffsetSharedArray::N_bytes,
mtGC);
// Reserve space for prev and next bitmap.
size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
G1RegionToSpaceMapper* prev_bitmap_storage =
G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
os::vm_page_size(),
HeapRegion::GrainBytes,
CMBitMap::mark_distance(),
mtGC);
ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
G1RegionToSpaceMapper* next_bitmap_storage =
G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
os::vm_page_size(),
HeapRegion::GrainBytes,
CMBitMap::mark_distance(),
mtGC);
_hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
g1_barrier_set()->initialize(cardtable_storage);
// Do later initialization work for concurrent refinement.
_cg1r->init(card_counts_storage);
// 6843694 - ensure that the maximum region index can fit
// in the remembered set structures.
@ -2078,17 +2049,16 @@ jint G1CollectedHeap::initialize() {
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
_bot_shared = new G1BlockOffsetSharedArray(_reserved,
heap_word_size(init_byte_size));
_bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
_g1h = this;
_in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
_humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
_in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
_humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
// Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.)
_cm = new ConcurrentMark(this, heap_rs);
_cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
if (_cm == NULL || !_cm->completed_initialization()) {
vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
return JNI_ENOMEM;
@ -2143,12 +2113,10 @@ jint G1CollectedHeap::initialize() {
// counts and that mechanism.
SpecializationStats::clear();
// Here we allocate the dummy full region that is required by the
// G1AllocRegion class. If we don't pass an address in the reserved
// space here, lots of asserts fire.
// Here we allocate the dummy HeapRegion that is required by the
// G1AllocRegion class.
HeapRegion* dummy_region = _hrs.get_dummy_region();
HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
_g1_reserved.start());
// We'll re-use the same region whether the alloc region will
// require BOT updates or not and, if it doesn't, then a non-young
// region will complain that it cannot support allocations without
@ -2264,7 +2232,7 @@ void G1CollectedHeap::ref_processing_init() {
}
size_t G1CollectedHeap::capacity() const {
return _g1_committed.byte_size();
return _hrs.length() * HeapRegion::GrainBytes;
}
void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
@ -2548,7 +2516,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
}
}
} else {
if (cause == GCCause::_gc_locker
if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
// Schedule a standard evacuation pause. We're setting word_size
@ -2569,8 +2537,8 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
}
bool G1CollectedHeap::is_in(const void* p) const {
if (_g1_committed.contains(p)) {
// Given that we know that p is in the committed space,
if (_hrs.reserved().contains(p)) {
// Given that we know that p is in the reserved space,
// heap_region_containing_raw() should successfully
// return the containing region.
HeapRegion* hr = heap_region_containing_raw(p);
@ -2580,6 +2548,18 @@ bool G1CollectedHeap::is_in(const void* p) const {
}
}
#ifdef ASSERT
bool G1CollectedHeap::is_in_exact(const void* p) const {
bool contains = reserved_region().contains(p);
bool available = _hrs.is_available(addr_to_region((HeapWord*)p));
if (contains && available) {
return true;
} else {
return false;
}
}
#endif
// Iteration functions.
// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
@ -2644,83 +2624,9 @@ void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
void
G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
uint worker_id,
uint no_of_par_workers,
jint claim_value) {
const uint regions = n_regions();
const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
no_of_par_workers :
1);
assert(UseDynamicNumberOfGCThreads ||
no_of_par_workers == workers()->total_workers(),
"Non dynamic should use fixed number of workers");
// try to spread out the starting points of the workers
const HeapRegion* start_hr =
start_region_for_worker(worker_id, no_of_par_workers);
const uint start_index = start_hr->hrs_index();
// each worker will actually look at all regions
for (uint count = 0; count < regions; ++count) {
const uint index = (start_index + count) % regions;
assert(0 <= index && index < regions, "sanity");
HeapRegion* r = region_at(index);
// we'll ignore "continues humongous" regions (we'll process them
// when we come across their corresponding "start humongous"
// region) and regions already claimed
if (r->claim_value() == claim_value || r->continuesHumongous()) {
continue;
}
// OK, try to claim it
if (r->claimHeapRegion(claim_value)) {
// success!
assert(!r->continuesHumongous(), "sanity");
if (r->startsHumongous()) {
// If the region is "starts humongous" we'll iterate over its
// "continues humongous" first; in fact we'll do them
// first. The order is important. In on case, calling the
// closure on the "starts humongous" region might de-allocate
// and clear all its "continues humongous" regions and, as a
// result, we might end up processing them twice. So, we'll do
// them first (notice: most closures will ignore them anyway) and
// then we'll do the "starts humongous" region.
for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
HeapRegion* chr = region_at(ch_index);
// if the region has already been claimed or it's not
// "continues humongous" we're done
if (chr->claim_value() == claim_value ||
!chr->continuesHumongous()) {
break;
}
// No one should have claimed it directly. We can given
// that we claimed its "starts humongous" region.
assert(chr->claim_value() != claim_value, "sanity");
assert(chr->humongous_start_region() == r, "sanity");
if (chr->claimHeapRegion(claim_value)) {
// we should always be able to claim it; no one else should
// be trying to claim this region
bool res2 = cl->doHeapRegion(chr);
assert(!res2, "Should not abort");
// Right now, this holds (i.e., no closure that actually
// does something with "continues humongous" regions
// clears them). We might have to weaken it in the future,
// but let's leave these two asserts here for extra safety.
assert(chr->continuesHumongous(), "should still be the case");
assert(chr->humongous_start_region() == r, "sanity");
} else {
guarantee(false, "we should not reach here");
}
}
}
assert(!r->continuesHumongous(), "sanity");
bool res = cl->doHeapRegion(r);
assert(!res, "Should not abort");
}
}
uint num_workers,
jint claim_value) const {
_hrs.par_iterate(cl, worker_id, num_workers, claim_value);
}
class ResetClaimValuesClosure: public HeapRegionClosure {
@ -2898,17 +2804,6 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
return result;
}
HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
uint no_of_par_workers) {
uint worker_num =
G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
assert(UseDynamicNumberOfGCThreads ||
no_of_par_workers == workers()->total_workers(),
"Non dynamic should use fixed number of workers");
const uint start_index = n_regions() * worker_i / worker_num;
return region_at(start_index);
}
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
HeapRegion* r = g1_policy()->collection_set();
while (r != NULL) {
@ -2951,15 +2846,11 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
}
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
// We're not using an iterator given that it will wrap around when
// it reaches the last region and this is not what we want here.
for (uint index = from->hrs_index() + 1; index < n_regions(); index++) {
HeapRegion* hr = region_at(index);
if (!hr->isHumongous()) {
return hr;
}
HeapRegion* result = _hrs.next_region_in_heap(from);
while (result != NULL && result->isHumongous()) {
result = _hrs.next_region_in_heap(result);
}
return NULL;
return result;
}
Space* G1CollectedHeap::space_containing(const void* addr) const {
@ -3017,7 +2908,7 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
}
size_t G1CollectedHeap::max_capacity() const {
return _g1_reserved.byte_size();
return _hrs.reserved().byte_size();
}
jlong G1CollectedHeap::millis_since_last_gc() {
@ -3546,9 +3437,9 @@ void G1CollectedHeap::print_on(outputStream* st) const {
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
capacity()/K, used_unlocked()/K);
st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
_g1_storage.low_boundary(),
_g1_storage.high(),
_g1_storage.high_boundary());
_hrs.reserved().start(),
_hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords,
_hrs.reserved().end());
st->cr();
st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
uint young_regions = _young_list->length();
@ -4239,10 +4130,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// No need for an ergo verbose message here,
// expansion_amount() does this when it returns a value > 0.
if (!expand(expand_bytes)) {
// We failed to expand the heap so let's verify that
// committed/uncommitted amount match the backing store
assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
// We failed to expand the heap. Cannot do anything about it.
}
}
}
@ -4302,10 +4190,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// RETIRE events are generated before the end GC event.
_hr_printer.end_gc(false /* full */, (size_t) total_collections());
if (mark_in_progress()) {
concurrent_mark()->update_g1_committed();
}
#ifdef TRACESPINNING
ParallelTaskTerminator::print_termination_counts();
#endif
@ -6140,6 +6024,7 @@ void G1CollectedHeap::free_region(HeapRegion* hr,
bool locked) {
assert(!hr->isHumongous(), "this is only for non-humongous regions");
assert(!hr->is_empty(), "the region should not be empty");
assert(_hrs.is_available(hr->hrs_index()), "region should be committed");
assert(free_list != NULL, "pre-condition");
if (G1VerifyBitmaps) {
@ -6194,7 +6079,7 @@ void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
assert(list != NULL, "list can't be null");
if (!list->is_empty()) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
_free_list.add_ordered(list);
_hrs.insert_list_into_free_list(list);
}
}
@ -6802,22 +6687,22 @@ void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
// this is that during a full GC string deduplication needs to know if
// a collected region was young or old when the full GC was initiated.
}
_free_list.remove_all();
_hrs.remove_all_free_regions();
}
class RebuildRegionSetsClosure : public HeapRegionClosure {
private:
bool _free_list_only;
HeapRegionSet* _old_set;
FreeRegionList* _free_list;
HeapRegionSeq* _hrs;
size_t _total_used;
public:
RebuildRegionSetsClosure(bool free_list_only,
HeapRegionSet* old_set, FreeRegionList* free_list) :
HeapRegionSet* old_set, HeapRegionSeq* hrs) :
_free_list_only(free_list_only),
_old_set(old_set), _free_list(free_list), _total_used(0) {
assert(_free_list->is_empty(), "pre-condition");
_old_set(old_set), _hrs(hrs), _total_used(0) {
assert(_hrs->num_free_regions() == 0, "pre-condition");
if (!free_list_only) {
assert(_old_set->is_empty(), "pre-condition");
}
@ -6830,7 +6715,7 @@ public:
if (r->is_empty()) {
// Add free regions to the free list
_free_list->add_as_tail(r);
_hrs->insert_into_free_list(r);
} else if (!_free_list_only) {
assert(!r->is_young(), "we should not come across young regions");
@ -6858,7 +6743,7 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
_young_list->empty_list();
}
RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs);
heap_region_iterate(&cl);
if (!free_list_only) {
@ -7013,13 +6898,42 @@ void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
GCAllocForTenured);
}
HeapRegion* OldGCAllocRegion::release() {
HeapRegion* cur = get();
if (cur != NULL) {
// Determine how far we are from the next card boundary. If it is smaller than
// the minimum object size we can allocate into, expand into the next card.
HeapWord* top = cur->top();
HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
if (to_allocate_words != 0) {
// We are not at a card boundary. Fill up, possibly into the next, taking the
// end of the region and the minimum object size into account.
to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
// Skip allocation if there is not enough space to allocate even the smallest
// possible object. In this case this region will not be retained, so the
// original problem cannot occur.
if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
CollectedHeap::fill_with_object(dummy, to_allocate_words);
}
}
}
return G1AllocRegion::release();
}
// Heap region set verification
class VerifyRegionListsClosure : public HeapRegionClosure {
private:
HeapRegionSet* _old_set;
HeapRegionSet* _humongous_set;
FreeRegionList* _free_list;
HeapRegionSeq* _hrs;
public:
HeapRegionSetCount _old_count;
@ -7028,8 +6942,8 @@ public:
VerifyRegionListsClosure(HeapRegionSet* old_set,
HeapRegionSet* humongous_set,
FreeRegionList* free_list) :
_old_set(old_set), _humongous_set(humongous_set), _free_list(free_list),
HeapRegionSeq* hrs) :
_old_set(old_set), _humongous_set(humongous_set), _hrs(hrs),
_old_count(), _humongous_count(), _free_count(){ }
bool doHeapRegion(HeapRegion* hr) {
@ -7043,7 +6957,7 @@ public:
assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
_humongous_count.increment(1u, hr->capacity());
} else if (hr->is_empty()) {
assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
_free_count.increment(1u, hr->capacity());
} else {
assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
@ -7052,7 +6966,7 @@ public:
return false;
}
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) {
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) {
guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
old_set->total_capacity_bytes(), _old_count.capacity()));
@ -7061,26 +6975,17 @@ public:
guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length()));
guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
free_list->total_capacity_bytes(), _free_count.capacity()));
}
};
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
HeapWord* bottom) {
HeapWord* end = bottom + HeapRegion::GrainWords;
MemRegion mr(bottom, end);
assert(_g1_reserved.contains(mr), "invariant");
// This might return NULL if the allocation fails
return new HeapRegion(hrs_index, _bot_shared, mr);
}
void G1CollectedHeap::verify_region_sets() {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
// First, check the explicit lists.
_free_list.verify_list();
_hrs.verify();
{
// Given that a concurrent operation might be adding regions to
// the secondary free list we have to take the lock before
@ -7111,9 +7016,9 @@ void G1CollectedHeap::verify_region_sets() {
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.
VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs);
heap_region_iterate(&cl);
cl.verify_counts(&_old_set, &_humongous_set, &_free_list);
cl.verify_counts(&_old_set, &_humongous_set, &_hrs);
}
// Optimized nmethod scanning

View File

@ -183,6 +183,13 @@ protected:
public:
OldGCAllocRegion()
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
// This specialization of release() makes sure that the last card that has been
// allocated into has been completely filled by a dummy object.
// This avoids races when remembered set scanning wants to update the BOT of the
// last card in the retained old gc alloc region, and allocation threads
// allocating into that card at the same time.
virtual HeapRegion* release();
};
// The G1 STW is alive closure.
@ -199,6 +206,13 @@ public:
class RefineCardTableEntryClosure;
class G1RegionMappingChangedListener : public G1MappingChangedListener {
private:
void reset_from_card_cache(uint start_idx, size_t num_regions);
public:
virtual void on_commit(uint start_idx, size_t num_regions);
};
class G1CollectedHeap : public SharedHeap {
friend class VM_CollectForMetadataAllocation;
friend class VM_G1CollectForAllocation;
@ -237,19 +251,9 @@ private:
static size_t _humongous_object_threshold_in_words;
// Storage for the G1 heap.
VirtualSpace _g1_storage;
MemRegion _g1_reserved;
// The part of _g1_storage that is currently committed.
MemRegion _g1_committed;
// The master free list. It will satisfy all new region allocations.
FreeRegionList _free_list;
// The secondary free list which contains regions that have been
// freed up during the cleanup process. This will be appended to the
// master free list when appropriate.
// freed up during the cleanup process. This will be appended to
// the master free list when appropriate.
FreeRegionList _secondary_free_list;
// It keeps track of the old regions.
@ -283,6 +287,9 @@ private:
// after heap shrinking (free_list_only == true).
void rebuild_region_sets(bool free_list_only);
// Callback for region mapping changed events.
G1RegionMappingChangedListener _listener;
// The sequence of all heap regions in the heap.
HeapRegionSeq _hrs;
@ -513,14 +520,6 @@ protected:
// humongous object, set is_old to true. If not, to false.
HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
// Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the
// index of the first region or G1_NULL_HRS_INDEX if the search
// was unsuccessful.
uint humongous_obj_allocate_find_first(uint num_regions,
size_t word_size);
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
// humongous region.
@ -862,11 +861,6 @@ protected:
CodeBlobClosure* scan_strong_code,
uint worker_i);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
// after _g1_storage is updated.
void update_committed_space(HeapWord* old_end, HeapWord* new_end);
// The concurrent marker (and the thread it runs in.)
ConcurrentMark* _cm;
ConcurrentMarkThread* _cmThread;
@ -1177,27 +1171,20 @@ public:
// But G1CollectedHeap doesn't yet support this.
virtual bool is_maximal_no_gc() const {
return _g1_storage.uncommitted_size() == 0;
return _hrs.available() == 0;
}
// The total number of regions in the heap.
uint n_regions() const { return _hrs.length(); }
// The current number of regions in the heap.
uint num_regions() const { return _hrs.length(); }
// The max number of regions in the heap.
uint max_regions() const { return _hrs.max_length(); }
// The number of regions that are completely free.
uint free_regions() const { return _free_list.length(); }
uint num_free_regions() const { return _hrs.num_free_regions(); }
// The number of regions that are not completely free.
uint used_regions() const { return n_regions() - free_regions(); }
// The number of regions available for "regular" expansion.
uint expansion_regions() const { return _expansion_regions; }
// Factory method for HeapRegion instances. It will return NULL if
// the allocation fails.
HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
uint num_used_regions() const { return num_regions() - num_free_regions(); }
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
@ -1246,7 +1233,7 @@ public:
#ifdef ASSERT
bool is_on_master_free_list(HeapRegion* hr) {
return hr->containing_set() == &_free_list;
return _hrs.is_free(hr);
}
#endif // ASSERT
@ -1258,7 +1245,7 @@ public:
}
void append_secondary_free_list() {
_free_list.add_ordered(&_secondary_free_list);
_hrs.insert_list_into_free_list(&_secondary_free_list);
}
void append_secondary_free_list_if_not_empty_with_lock() {
@ -1304,6 +1291,11 @@ public:
// Returns "TRUE" iff "p" points into the committed areas of the heap.
virtual bool is_in(const void* p) const;
#ifdef ASSERT
// Returns whether p is in one of the available areas of the heap. Slow but
// extensive version.
bool is_in_exact(const void* p) const;
#endif
// Return "TRUE" iff the given object address is within the collection
// set. Slow implementation.
@ -1364,25 +1356,19 @@ public:
// Return "TRUE" iff the given object address is in the reserved
// region of g1.
bool is_in_g1_reserved(const void* p) const {
return _g1_reserved.contains(p);
return _hrs.reserved().contains(p);
}
// Returns a MemRegion that corresponds to the space that has been
// reserved for the heap
MemRegion g1_reserved() {
return _g1_reserved;
}
// Returns a MemRegion that corresponds to the space that has been
// committed in the heap
MemRegion g1_committed() {
return _g1_committed;
MemRegion g1_reserved() const {
return _hrs.reserved();
}
virtual bool is_in_closed_subset(const void* p) const;
G1SATBCardTableModRefBS* g1_barrier_set() {
return (G1SATBCardTableModRefBS*) barrier_set();
G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
return (G1SATBCardTableLoggingModRefBS*) barrier_set();
}
// This resets the card table to all zeros. It is used after
@ -1416,6 +1402,8 @@ public:
// within the heap.
inline uint addr_to_region(HeapWord* addr) const;
inline HeapWord* bottom_addr_for_region(uint index) const;
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
// overpartition factor, currently 4). Assumes that this will be called
@ -1429,10 +1417,10 @@ public:
// setting the claim value of the second and subsequent regions of the
// chunk.) For now requires that "doHeapRegion" always returns "false",
// i.e., that a closure never attempt to abort a traversal.
void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
uint worker,
uint no_of_par_workers,
jint claim_value);
void heap_region_par_iterate_chunked(HeapRegionClosure* cl,
uint worker_id,
uint num_workers,
jint claim_value) const;
// It resets all the region claim values to the default.
void reset_heap_region_claim_values();
@ -1457,11 +1445,6 @@ public:
// starting region for iterating over the current collection set.
HeapRegion* start_cset_region_for_worker(uint worker_i);
// This is a convenience method that is used by the
// HeapRegionIterator classes to calculate the starting region for
// each worker so that they do not all start from the same region.
HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
// Iterate over the regions (if any) in the current collection set.
void collection_set_iterate(HeapRegionClosure* blk);

View File

@ -47,19 +47,21 @@ inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
}
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
return _hrs.reserved().start() + index * HeapRegion::GrainWords;
}
template <class T>
inline HeapRegion*
G1CollectedHeap::heap_region_containing_raw(const T addr) const {
inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
assert(addr != NULL, "invariant");
assert(_g1_reserved.contains((const void*) addr),
assert(is_in_g1_reserved((const void*) addr),
err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
p2i((void*)addr), p2i(_g1_reserved.start()), p2i(_g1_reserved.end())));
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
return _hrs.addr_to_region((HeapWord*) addr);
}
template <class T>
inline HeapRegion*
G1CollectedHeap::heap_region_containing(const T addr) const {
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
HeapRegion* hr = heap_region_containing_raw(addr);
if (hr->continuesHumongous()) {
return hr->humongous_start_region();
@ -89,10 +91,9 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
return r != NULL && r->in_collection_set();
}
inline HeapWord*
G1CollectedHeap::attempt_allocation(size_t word_size,
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret) {
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
@ -252,8 +253,7 @@ G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
}
}
inline bool
G1CollectedHeap::evacuation_should_fail() {
inline bool G1CollectedHeap::evacuation_should_fail() {
if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
return false;
}

View File

@ -456,7 +456,7 @@ void G1CollectorPolicy::init() {
} else {
_young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
}
_free_regions_at_end_of_collection = _g1->free_regions();
_free_regions_at_end_of_collection = _g1->num_free_regions();
update_young_list_target_length();
// We may immediately start allocating regions and placing them on the
@ -829,7 +829,7 @@ void G1CollectorPolicy::record_full_collection_end() {
record_survivor_regions(0, NULL, NULL);
_free_regions_at_end_of_collection = _g1->free_regions();
_free_regions_at_end_of_collection = _g1->num_free_regions();
// Reset survivors SurvRateGroup.
_survivor_surv_rate_group->reset();
update_young_list_target_length();
@ -1181,7 +1181,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
_in_marking_window = new_in_marking_window;
_in_marking_window_im = new_in_marking_window_im;
_free_regions_at_end_of_collection = _g1->free_regions();
_free_regions_at_end_of_collection = _g1->num_free_regions();
update_young_list_target_length();
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
@ -1203,7 +1203,7 @@ void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
_survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
_heap_capacity_bytes_before_gc = _g1->capacity();
_heap_used_bytes_before_gc = _g1->used();
_cur_collection_pause_used_regions_at_start = _g1->used_regions();
_cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
_eden_capacity_bytes_before_gc =
(_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
@ -1618,7 +1618,7 @@ void
G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
_collectionSetChooser->clear();
uint region_num = _g1->n_regions();
uint region_num = _g1->num_regions();
if (G1CollectedHeap::use_parallel_gc_threads()) {
const uint OverpartitionFactor = 4;
uint WorkUnit;
@ -1639,7 +1639,7 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
MinWorkUnit);
}
_collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
_collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
WorkUnit);
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
(int) WorkUnit);
@ -1936,7 +1936,7 @@ uint G1CollectorPolicy::calc_max_old_cset_length() {
// of them are available.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
const size_t region_num = g1h->n_regions();
const size_t region_num = g1h->num_regions();
const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
size_t result = region_num * perc / 100;
// emulate ceiling

View File

@ -33,7 +33,7 @@
G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
_g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
void G1HotCardCache::initialize() {
void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
if (default_use_cache()) {
_use_cache = true;
@ -49,7 +49,7 @@ void G1HotCardCache::initialize() {
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
_hot_cache_par_claimed_idx = 0;
_card_counts.initialize();
_card_counts.initialize(card_counts_storage);
}
}
@ -135,11 +135,8 @@ void G1HotCardCache::drain(uint worker_i,
// above, are discarded prior to re-enabling the cache near the end of the GC.
}
void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
_card_counts.resize(heap_capacity);
}
void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
assert(!hr->isHumongous(), "Should have been cleared");
_card_counts.clear_region(hr);
}

View File

@ -78,7 +78,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
G1HotCardCache(G1CollectedHeap* g1h);
~G1HotCardCache();
void initialize();
void initialize(G1RegionToSpaceMapper* card_counts_storage);
bool use_cache() { return _use_cache; }
@ -115,9 +115,6 @@ class G1HotCardCache: public CHeapObj<mtGC> {
bool hot_cache_is_empty() { return _n_hot == 0; }
// Resizes the card counts table to match the given capacity
void resize_card_counts(size_t heap_capacity);
// Zeros the values in the card counts table for entire committed heap
void reset_card_counts();

View File

@ -0,0 +1,167 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "services/memTracker.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_aix
# include "os_aix.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
#include "utilities/bitMap.inline.hpp"
G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL),
_high_boundary(NULL), _committed(), _page_size(0), _special(false), _executable(false) {
}
bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
if (!rs.is_reserved()) {
return false; // Allocation failed.
}
assert(_low_boundary == NULL, "VirtualSpace already initialized");
assert(page_size > 0, "Granularity must be non-zero.");
_low_boundary = rs.base();
_high_boundary = _low_boundary + rs.size();
_special = rs.special();
_executable = rs.executable();
_page_size = page_size;
assert(_committed.size() == 0, "virtual space initialized more than once");
uintx size_in_bits = rs.size() / page_size;
_committed.resize(size_in_bits, /* in_resource_area */ false);
return true;
}
G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
release();
}
void G1PageBasedVirtualSpace::release() {
// This does not release memory it never reserved.
// Caller must release via rs.release();
_low_boundary = NULL;
_high_boundary = NULL;
_special = false;
_executable = false;
_page_size = 0;
_committed.resize(0, false);
}
size_t G1PageBasedVirtualSpace::committed_size() const {
return _committed.count_one_bits() * _page_size;
}
size_t G1PageBasedVirtualSpace::reserved_size() const {
return pointer_delta(_high_boundary, _low_boundary, sizeof(char));
}
size_t G1PageBasedVirtualSpace::uncommitted_size() const {
return reserved_size() - committed_size();
}
uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
return (addr - _low_boundary) / _page_size;
}
bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const {
uintptr_t end = start + size_in_pages;
return _committed.get_next_zero_offset(start, end) >= end;
}
bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const {
uintptr_t end = start + size_in_pages;
return _committed.get_next_one_offset(start, end) >= end;
}
char* G1PageBasedVirtualSpace::page_start(uintptr_t index) {
return _low_boundary + index * _page_size;
}
size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) {
return num * _page_size;
}
MemRegion G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) {
// We need to make sure to commit all pages covered by the given area.
guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted");
if (!_special) {
os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable,
err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
}
_committed.set_range(start, start + size_in_pages);
MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
return result;
}
MemRegion G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) {
guarantee(is_area_committed(start, size_in_pages), "checking");
if (!_special) {
os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages));
}
_committed.clear_range(start, start + size_in_pages);
MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
return result;
}
bool G1PageBasedVirtualSpace::contains(const void* p) const {
return _low_boundary <= (const char*) p && (const char*) p < _high_boundary;
}
#ifndef PRODUCT
void G1PageBasedVirtualSpace::print_on(outputStream* out) {
out->print ("Virtual space:");
if (special()) out->print(" (pinned in memory)");
out->cr();
out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary));
}
void G1PageBasedVirtualSpace::print() {
print_on(tty);
}
#endif

View File

@ -0,0 +1,111 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "runtime/virtualspace.hpp"
#include "utilities/bitMap.hpp"
// Virtual space management helper for a virtual space with an OS page allocation
// granularity.
// (De-)Allocation requests are always OS page aligned by passing a page index
// and multiples of pages.
// The implementation gives an error when trying to commit or uncommit pages that
// have already been committed or uncommitted.
class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
// Reserved area addresses.
char* _low_boundary;
char* _high_boundary;
// The commit/uncommit granularity in bytes.
size_t _page_size;
// Bitmap used for verification of commit/uncommit operations.
BitMap _committed;
// Indicates that the entire space has been committed and pinned in memory,
// os::commit_memory() or os::uncommit_memory() have no function.
bool _special;
// Indicates whether the committed space should be executable.
bool _executable;
// Returns the index of the page which contains the given address.
uintptr_t addr_to_page_index(char* addr) const;
// Returns the address of the given page index.
char* page_start(uintptr_t index);
// Returns the byte size of the given number of pages.
size_t byte_size_for_pages(size_t num);
// Returns true if the entire area is backed by committed memory.
bool is_area_committed(uintptr_t start, size_t size_in_pages) const;
// Returns true if the entire area is not backed by committed memory.
bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const;
public:
// Commit the given area of pages starting at start being size_in_pages large.
MemRegion commit(uintptr_t start, size_t size_in_pages);
// Uncommit the given area of pages starting at start being size_in_pages large.
MemRegion uncommit(uintptr_t start, size_t size_in_pages);
bool special() const { return _special; }
// Initialization
G1PageBasedVirtualSpace();
bool initialize_with_granularity(ReservedSpace rs, size_t page_size);
// Destruction
~G1PageBasedVirtualSpace();
// Amount of reserved memory.
size_t reserved_size() const;
// Memory used in this virtual space.
size_t committed_size() const;
// Memory left to use/expand in this virtual space.
size_t uncommitted_size() const;
bool contains(const void* p) const;
MemRegion reserved() {
MemRegion x((HeapWord*)_low_boundary, reserved_size() / HeapWordSize);
return x;
}
void release();
void check_for_contiguity() PRODUCT_RETURN;
// Debugging
void print_on(outputStream* out) PRODUCT_RETURN;
void print();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP

View File

@ -0,0 +1,159 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/virtualspace.hpp"
#include "services/memTracker.hpp"
#include "utilities/bitMap.inline.hpp"
G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
size_t commit_granularity,
size_t region_granularity,
MemoryType type) :
_storage(),
_commit_granularity(commit_granularity),
_region_granularity(region_granularity),
_listener(NULL),
_commit_map() {
guarantee(is_power_of_2(commit_granularity), "must be");
guarantee(is_power_of_2(region_granularity), "must be");
_storage.initialize_with_granularity(rs, commit_granularity);
MemTracker::record_virtual_memory_type((address)rs.base(), type);
}
// G1RegionToSpaceMapper implementation where the region granularity is larger than
// or the same as the commit granularity.
// Basically, the space corresponding to one region region spans several OS pages.
class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
private:
size_t _pages_per_region;
public:
G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
size_t os_commit_granularity,
size_t alloc_granularity,
size_t commit_factor,
MemoryType type) :
G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
_pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {
guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
}
virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
_storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region);
_commit_map.set_range(start_idx, start_idx + num_regions);
fire_on_commit(start_idx, num_regions);
}
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
_storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region);
_commit_map.clear_range(start_idx, start_idx + num_regions);
}
};
// G1RegionToSpaceMapper implementation where the region granularity is smaller
// than the commit granularity.
// Basically, the contents of one OS page span several regions.
class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
private:
class CommitRefcountArray : public G1BiasedMappedArray<uint> {
protected:
virtual uint default_value() const { return 0; }
};
size_t _regions_per_page;
CommitRefcountArray _refcounts;
uintptr_t region_idx_to_page_idx(uint region) const {
return region / _regions_per_page;
}
public:
G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
size_t os_commit_granularity,
size_t alloc_granularity,
size_t commit_factor,
MemoryType type) :
G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
_regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {
guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
_refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
_commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
}
virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i));
uintptr_t idx = region_idx_to_page_idx(i);
uint old_refcount = _refcounts.get_by_index(idx);
if (old_refcount == 0) {
_storage.commit(idx, 1);
}
_refcounts.set_by_index(idx, old_refcount + 1);
_commit_map.set_bit(i);
fire_on_commit(i, 1);
}
}
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i));
uintptr_t idx = region_idx_to_page_idx(i);
uint old_refcount = _refcounts.get_by_index(idx);
assert(old_refcount > 0, "must be");
if (old_refcount == 1) {
_storage.uncommit(idx, 1);
}
_refcounts.set_by_index(idx, old_refcount - 1);
_commit_map.clear_bit(i);
}
}
};
void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions) {
if (_listener != NULL) {
_listener->on_commit(start_idx, num_regions);
}
}
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
size_t os_commit_granularity,
size_t region_granularity,
size_t commit_factor,
MemoryType type) {
if (region_granularity >= (os_commit_granularity * commit_factor)) {
return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
} else {
return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
}
}

View File

@ -0,0 +1,83 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP
#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp"
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
class G1MappingChangedListener VALUE_OBJ_CLASS_SPEC {
public:
// Fired after commit of the memory, i.e. the memory this listener is registered
// for can be accessed.
virtual void on_commit(uint start_idx, size_t num_regions) = 0;
};
// Maps region based commit/uncommit requests to the underlying page sized virtual
// space.
class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
private:
G1MappingChangedListener* _listener;
protected:
// Backing storage.
G1PageBasedVirtualSpace _storage;
size_t _commit_granularity;
size_t _region_granularity;
// Mapping management
BitMap _commit_map;
G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type);
void fire_on_commit(uint start_idx, size_t num_regions);
public:
MemRegion reserved() { return _storage.reserved(); }
void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; }
virtual ~G1RegionToSpaceMapper() {
_commit_map.resize(0, /* in_resource_area */ false);
}
bool is_committed(uintptr_t idx) const {
return _commit_map.at(idx);
}
virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
// Creates an appropriate G1RegionToSpaceMapper for the given parameters.
// The byte_translation_factor defines how many bytes in a region correspond to
// a single byte in the data structure this mapper is for.
// Eg. in the card table, this value corresponds to the size a single card
// table entry corresponds to.
static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
size_t os_commit_granularity,
size_t region_granularity,
size_t byte_translation_factor,
MemoryType type);
};
#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP */

View File

@ -540,6 +540,12 @@ G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
bool check_for_refs_into_cset) {
assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)),
err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap",
p2i(card_ptr),
_ct_bs->index_for(_ct_bs->addr_for(card_ptr)),
_ct_bs->addr_for(card_ptr),
_g1->addr_to_region(_ct_bs->addr_for(card_ptr))));
// If the card is no longer dirty, nothing to do.
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/satbQueue.hpp"
@ -38,7 +39,6 @@ G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
_kind = G1SATBCT;
}
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
// Nulls should have been already filtered.
assert(pre_val->is_oop(true), "Error");
@ -125,13 +125,52 @@ void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
}
#endif
void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions) {
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
_card_table->clear(mr);
}
G1SATBCardTableLoggingModRefBS::
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
int max_covered_regions) :
G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
_dcqs(JavaThread::dirty_card_queue_set())
_dcqs(JavaThread::dirty_card_queue_set()),
_listener()
{
_kind = G1SATBCTLogging;
_listener.set_card_table(this);
}
void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
mapper->set_mapping_changed_listener(&_listener);
_byte_map_size = mapper->reserved().byte_size();
_guard_index = cards_required(_whole_heap.word_size()) - 1;
_last_valid_index = _guard_index - 1;
HeapWord* low_bound = _whole_heap.start();
HeapWord* high_bound = _whole_heap.end();
_cur_covered_regions = 1;
_covered[0] = _whole_heap;
_byte_map = (jbyte*) mapper->reserved().start();
byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
if (TraceCardTableModRefBS) {
gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
gclog_or_tty->print_cr(" "
" &_byte_map[0]: " INTPTR_FORMAT
" &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
p2i(&_byte_map[0]),
p2i(&_byte_map[_last_valid_index]));
gclog_or_tty->print_cr(" "
" byte_map_base: " INTPTR_FORMAT,
p2i(byte_map_base));
}
}
void

View File

@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/memRegion.hpp"
#include "oops/oop.inline.hpp"
@ -33,6 +34,7 @@
#if INCLUDE_ALL_GCS
class DirtyCardQueueSet;
class G1SATBCardTableLoggingModRefBS;
// This barrier is specialized to use a logging barrier to support
// snapshot-at-the-beginning marking.
@ -126,18 +128,40 @@ public:
jbyte val = _byte_map[card_index];
return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
}
};
class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
private:
G1SATBCardTableLoggingModRefBS* _card_table;
public:
G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { }
void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; }
virtual void on_commit(uint start_idx, size_t num_regions);
};
// Adds card-table logging to the post-barrier.
// Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
friend class G1SATBCardTableLoggingModRefBSChangedListener;
private:
G1SATBCardTableLoggingModRefBSChangedListener _listener;
DirtyCardQueueSet& _dcqs;
public:
static size_t compute_size(size_t mem_region_size_in_words) {
size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
return ReservedSpace::allocation_align_size_up(number_of_slots);
}
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
int max_covered_regions);
virtual void initialize() { }
virtual void initialize(G1RegionToSpaceMapper* mapper);
virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
bool is_a(BarrierSet::Name bsn) {
return bsn == BarrierSet::G1SATBCTLogging ||
G1SATBCardTableModRefBS::is_a(bsn);
@ -154,8 +178,6 @@ class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
void write_region_work(MemRegion mr) { invalidate(mr); }
void write_ref_array_work(MemRegion mr) { invalidate(mr); }
};

View File

@ -345,11 +345,6 @@ HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
return low;
}
#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER
HeapRegion::HeapRegion(uint hrs_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr) :
@ -361,7 +356,7 @@ HeapRegion::HeapRegion(uint hrs_index,
_claimed(InitialClaimValue), _evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
_young_type(NotYoung), _next_young_region(NULL),
_next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), _pending_removal(false),
_next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
#ifdef ASSERT
_containing_set(NULL),
#endif // ASSERT
@ -370,14 +365,20 @@ HeapRegion::HeapRegion(uint hrs_index,
_predicted_bytes_to_copy(0)
{
_rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
initialize(mr);
}
void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
assert(_rem_set->is_empty(), "Remembered set must be empty");
G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
_orig_end = mr.end();
// Note that initialize() will set the start of the unmarked area of the
// region.
hr_clear(false /*par*/, false /*clear_space*/);
set_top(bottom());
record_top_and_timestamp();
assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
}
CompactibleSpace* HeapRegion::next_compaction_space() const {
@ -905,7 +906,7 @@ void HeapRegion::verify(VerifyOption vo,
}
// If it returns false, verify_for_object() will output the
// appropriate messasge.
// appropriate message.
if (do_bot_verify &&
!g1->is_obj_dead(obj, this) &&
!_offsets.verify_for_object(p, obj_size)) {
@ -1036,8 +1037,7 @@ void G1OffsetTableContigSpace::clear(bool mangle_space) {
set_top(bottom());
set_saved_mark_word(bottom());
CompactibleSpace::clear(mangle_space);
_offsets.zero_bottom_entry();
_offsets.initialize_threshold();
reset_bot();
}
void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
@ -1127,9 +1127,11 @@ G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
_gc_time_stamp(0)
{
_offsets.set_space(this);
// false ==> we'll do the clearing if there's clearing to be done.
CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle);
_top = bottom();
_offsets.zero_bottom_entry();
_offsets.initialize_threshold();
}
void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
CompactibleSpace::initialize(mr, clear_space, mangle_space);
_top = bottom();
reset_bot();
}

View File

@ -62,7 +62,7 @@ class nmethod;
p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
// sentinel value for hrs_index
#define G1_NULL_HRS_INDEX ((uint) -1)
#define G1_NO_HRS_INDEX ((uint) -1)
// A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap
@ -146,6 +146,9 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
HeapWord* top() const { return _top; }
protected:
// Reset the G1OffsetTableContigSpace.
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
HeapWord** top_addr() { return &_top; }
// Allocation helpers (return NULL if full).
inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
@ -200,8 +203,7 @@ class G1OffsetTableContigSpace: public CompactibleSpace {
virtual void print() const;
void reset_bot() {
_offsets.zero_bottom_entry();
_offsets.initialize_threshold();
_offsets.reset_bot();
}
void update_bot_for_object(HeapWord* start, size_t word_size) {
@ -264,7 +266,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
#ifdef ASSERT
HeapRegionSetBase* _containing_set;
#endif // ASSERT
bool _pending_removal;
// For parallel heapRegion traversal.
jint _claimed;
@ -333,6 +334,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr);
// Initializing the HeapRegion not only resets the data structure, but also
// resets the BOT for that heap region.
// The default values for clear_space means that we will do the clearing if
// there's clearing to be done ourselves. We also always mangle the space.
virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
static int LogOfHRGrainBytes;
static int LogOfHRGrainWords;
@ -553,26 +560,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// to provide a dummy version of it.
#endif // ASSERT
// If we want to remove regions from a list in bulk we can simply tag
// them with the pending_removal tag and call the
// remove_all_pending() method on the list.
bool pending_removal() { return _pending_removal; }
void set_pending_removal(bool pending_removal) {
if (pending_removal) {
assert(!_pending_removal && containing_set() != NULL,
"can only set pending removal to true if it's false and "
"the region belongs to a region set");
} else {
assert( _pending_removal && containing_set() == NULL,
"can only set pending removal to false if it's true and "
"the region does not belong to a region set");
}
_pending_removal = pending_removal;
}
HeapRegion* get_next_young_region() { return _next_young_region; }
void set_next_young_region(HeapRegion* hr) {
_next_young_region = hr;

View File

@ -373,17 +373,17 @@ void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
_max_regions,
&_static_mem_size);
for (uint i = 0; i < n_par_rs; i++) {
for (uint j = 0; j < _max_regions; j++) {
set(i, j, InvalidCard);
}
}
invalidate(0, _max_regions);
}
void FromCardCache::shrink(uint new_num_regions) {
void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
guarantee((size_t)start_idx + new_num_regions <= max_uintx,
err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT,
start_idx, new_num_regions));
for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
assert(new_num_regions <= _max_regions, "Must be within max.");
for (uint j = new_num_regions; j < _max_regions; j++) {
uint end_idx = (start_idx + (uint)new_num_regions);
assert(end_idx <= _max_regions, "Must be within max.");
for (uint j = start_idx; j < end_idx; j++) {
set(i, j, InvalidCard);
}
}
@ -407,12 +407,12 @@ void FromCardCache::clear(uint region_idx) {
}
}
void OtherRegionsTable::init_from_card_cache(uint max_regions) {
void OtherRegionsTable::initialize(uint max_regions) {
FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
}
void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) {
FromCardCache::shrink(new_num_regions);
void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
FromCardCache::invalidate(start_idx, num_regions);
}
void OtherRegionsTable::print_from_card_cache() {
@ -841,7 +841,7 @@ HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
HeapRegion* hr)
: _bosa(bosa),
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
_code_roots(), _other_regions(hr, &_m) {
_code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
reset_for_par_iteration();
}

View File

@ -84,7 +84,7 @@ class FromCardCache : public AllStatic {
static void initialize(uint n_par_rs, uint max_num_regions);
static void shrink(uint new_num_regions);
static void invalidate(uint start_idx, size_t num_regions);
static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
@ -213,11 +213,11 @@ public:
// Declare the heap size (in # of regions) to the OtherRegionsTable.
// (Uses it to initialize from_card_cache).
static void init_from_card_cache(uint max_regions);
static void initialize(uint max_regions);
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
// Make sure any entries for higher regions are invalid.
static void shrink_from_card_cache(uint new_num_regions);
// Declares that regions between start_idx <= i < start_idx + num_regions are
// not in use. Make sure that any entries for these regions are invalid.
static void invalidate(uint start_idx, size_t num_regions);
static void print_from_card_cache();
};
@ -400,12 +400,11 @@ public:
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
// (Uses it to initialize from_card_cache).
static void init_heap(uint max_regions) {
OtherRegionsTable::init_from_card_cache(max_regions);
OtherRegionsTable::initialize(max_regions);
}
// Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
static void shrink_heap(uint new_n_regs) {
OtherRegionsTable::shrink_from_card_cache(new_n_regs);
static void invalidate(uint start_idx, uint num_regions) {
OtherRegionsTable::invalidate(start_idx, num_regions);
}
#ifndef PRODUCT

View File

@ -25,236 +25,426 @@
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/concurrentG1Refine.hpp"
#include "memory/allocation.hpp"
// Private
void HeapRegionSeq::initialize(G1RegionToSpaceMapper* heap_storage,
G1RegionToSpaceMapper* prev_bitmap,
G1RegionToSpaceMapper* next_bitmap,
G1RegionToSpaceMapper* bot,
G1RegionToSpaceMapper* cardtable,
G1RegionToSpaceMapper* card_counts) {
_allocated_heapregions_length = 0;
uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
uint len = length();
assert(num > 1, "use this only for sequences of length 2 or greater");
assert(from <= len,
err_msg("from: %u should be valid and <= than %u", from, len));
_heap_mapper = heap_storage;
uint curr = from;
uint first = G1_NULL_HRS_INDEX;
uint num_so_far = 0;
while (curr < len && num_so_far < num) {
if (at(curr)->is_empty()) {
if (first == G1_NULL_HRS_INDEX) {
first = curr;
num_so_far = 1;
} else {
num_so_far += 1;
}
} else {
first = G1_NULL_HRS_INDEX;
num_so_far = 0;
_prev_bitmap_mapper = prev_bitmap;
_next_bitmap_mapper = next_bitmap;
_bot_mapper = bot;
_cardtable_mapper = cardtable;
_card_counts_mapper = card_counts;
MemRegion reserved = heap_storage->reserved();
_regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
_available_map.resize(_regions.length(), false);
_available_map.clear();
}
bool HeapRegionSeq::is_available(uint region) const {
return _available_map.at(region);
}
#ifdef ASSERT
bool HeapRegionSeq::is_free(HeapRegion* hr) const {
return _free_list.contains(hr);
}
#endif
HeapRegion* HeapRegionSeq::new_heap_region(uint hrs_index) {
HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrs_index);
MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
assert(reserved().contains(mr), "invariant");
return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr);
}
void HeapRegionSeq::commit_regions(uint index, size_t num_regions) {
guarantee(num_regions > 0, "Must commit more than zero regions");
guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions");
_num_committed += (uint)num_regions;
_heap_mapper->commit_regions(index, num_regions);
// Also commit auxiliary data
_prev_bitmap_mapper->commit_regions(index, num_regions);
_next_bitmap_mapper->commit_regions(index, num_regions);
_bot_mapper->commit_regions(index, num_regions);
_cardtable_mapper->commit_regions(index, num_regions);
_card_counts_mapper->commit_regions(index, num_regions);
}
void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) {
guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start));
guarantee(_num_committed >= num_regions, "pre-condition");
// Print before uncommitting.
if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
for (uint i = start; i < start + num_regions; i++) {
HeapRegion* hr = at(i);
G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end());
}
curr += 1;
}
assert(num_so_far <= num, "post-condition");
if (num_so_far == num) {
// we found enough space for the humongous object
assert(from <= first && first < len, "post-condition");
assert(first < curr && (curr - first) == num, "post-condition");
for (uint i = first; i < first + num; ++i) {
assert(at(i)->is_empty(), "post-condition");
_num_committed -= (uint)num_regions;
_available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range);
_heap_mapper->uncommit_regions(start, num_regions);
// Also uncommit auxiliary data
_prev_bitmap_mapper->uncommit_regions(start, num_regions);
_next_bitmap_mapper->uncommit_regions(start, num_regions);
_bot_mapper->uncommit_regions(start, num_regions);
_cardtable_mapper->uncommit_regions(start, num_regions);
_card_counts_mapper->uncommit_regions(start, num_regions);
}
void HeapRegionSeq::make_regions_available(uint start, uint num_regions) {
guarantee(num_regions > 0, "No point in calling this for zero regions");
commit_regions(start, num_regions);
for (uint i = start; i < start + num_regions; i++) {
if (_regions.get_by_index(i) == NULL) {
HeapRegion* new_hr = new_heap_region(i);
_regions.set_by_index(i, new_hr);
_allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1);
}
return first;
}
_available_map.par_set_range(start, start + num_regions, BitMap::unknown_range);
for (uint i = start; i < start + num_regions; i++) {
assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i));
HeapRegion* hr = at(i);
if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end());
}
HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
hr->initialize(mr);
insert_into_free_list(at(i));
}
}
uint HeapRegionSeq::expand_by(uint num_regions) {
return expand_at(0, num_regions);
}
uint HeapRegionSeq::expand_at(uint start, uint num_regions) {
if (num_regions == 0) {
return 0;
}
uint cur = start;
uint idx_last_found = 0;
uint num_last_found = 0;
uint expanded = 0;
while (expanded < num_regions &&
(num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) {
uint to_expand = MIN2(num_regions - expanded, num_last_found);
make_regions_available(idx_last_found, to_expand);
expanded += to_expand;
cur = idx_last_found + num_last_found + 1;
}
verify_optional();
return expanded;
}
uint HeapRegionSeq::find_contiguous(size_t num, bool empty_only) {
uint found = 0;
size_t length_found = 0;
uint cur = 0;
while (length_found < num && cur < max_length()) {
HeapRegion* hr = _regions.get_by_index(cur);
if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) {
// This region is a potential candidate for allocation into.
length_found++;
} else {
// This region is not a candidate. The next region is the next possible one.
found = cur + 1;
length_found = 0;
}
cur++;
}
if (length_found == num) {
for (uint i = found; i < (found + num); i++) {
HeapRegion* hr = _regions.get_by_index(i);
// sanity check
guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()),
err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT
" that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)));
}
return found;
} else {
// we failed to find enough space for the humongous object
return G1_NULL_HRS_INDEX;
return G1_NO_HRS_INDEX;
}
}
// Public
void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
"bottom should be heap region aligned");
assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
"end should be heap region aligned");
_next_search_index = 0;
_allocated_length = 0;
_regions.initialize(bottom, end, HeapRegion::GrainBytes);
}
MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
HeapWord* new_end,
FreeRegionList* list) {
assert(old_end < new_end, "don't call it otherwise");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapWord* next_bottom = old_end;
assert(heap_bottom() <= next_bottom, "invariant");
while (next_bottom < new_end) {
assert(next_bottom < heap_end(), "invariant");
uint index = length();
assert(index < max_length(), "otherwise we cannot expand further");
if (index == 0) {
// We have not allocated any regions so far
assert(next_bottom == heap_bottom(), "invariant");
} else {
// next_bottom should match the end of the last/previous region
assert(next_bottom == at(index - 1)->end(), "invariant");
HeapRegion* HeapRegionSeq::next_region_in_heap(const HeapRegion* r) const {
guarantee(r != NULL, "Start region must be a valid region");
guarantee(is_available(r->hrs_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrs_index()));
for (uint i = r->hrs_index() + 1; i < _allocated_heapregions_length; i++) {
HeapRegion* hr = _regions.get_by_index(i);
if (is_available(i)) {
return hr;
}
if (index == _allocated_length) {
// We have to allocate a new HeapRegion.
HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom);
if (new_hr == NULL) {
// allocation failed, we bail out and return what we have done so far
return MemRegion(old_end, next_bottom);
}
assert(_regions.get_by_index(index) == NULL, "invariant");
_regions.set_by_index(index, new_hr);
increment_allocated_length();
}
// Have to increment the length first, otherwise we will get an
// assert failure at(index) below.
increment_length();
HeapRegion* hr = at(index);
list->add_as_tail(hr);
next_bottom = hr->end();
}
assert(next_bottom == new_end, "post-condition");
return MemRegion(old_end, next_bottom);
}
uint HeapRegionSeq::free_suffix() {
uint res = 0;
uint index = length();
while (index > 0) {
index -= 1;
if (!at(index)->is_empty()) {
break;
}
res += 1;
}
return res;
}
uint HeapRegionSeq::find_contiguous(uint num) {
assert(num > 1, "use this only for sequences of length 2 or greater");
assert(_next_search_index <= length(),
err_msg("_next_search_index: %u should be valid and <= than %u",
_next_search_index, length()));
uint start = _next_search_index;
uint res = find_contiguous_from(start, num);
if (res == G1_NULL_HRS_INDEX && start > 0) {
// Try starting from the beginning. If _next_search_index was 0,
// no point in doing this again.
res = find_contiguous_from(0, num);
}
if (res != G1_NULL_HRS_INDEX) {
assert(res < length(), err_msg("res: %u should be valid", res));
_next_search_index = res + num;
assert(_next_search_index <= length(),
err_msg("_next_search_index: %u should be valid and <= than %u",
_next_search_index, length()));
}
return res;
return NULL;
}
void HeapRegionSeq::iterate(HeapRegionClosure* blk) const {
iterate_from((HeapRegion*) NULL, blk);
}
uint len = max_length();
void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
uint hr_index = 0;
if (hr != NULL) {
hr_index = hr->hrs_index();
}
uint len = length();
for (uint i = hr_index; i < len; i += 1) {
for (uint i = 0; i < len; i++) {
if (!is_available(i)) {
continue;
}
guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i));
bool res = blk->doHeapRegion(at(i));
if (res) {
blk->incomplete();
return;
}
}
for (uint i = 0; i < hr_index; i += 1) {
bool res = blk->doHeapRegion(at(i));
}
uint HeapRegionSeq::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
guarantee(res_idx != NULL, "checking");
guarantee(start_idx <= (max_length() + 1), "checking");
uint num_regions = 0;
uint cur = start_idx;
while (cur < max_length() && is_available(cur)) {
cur++;
}
if (cur == max_length()) {
return num_regions;
}
*res_idx = cur;
while (cur < max_length() && !is_available(cur)) {
cur++;
}
num_regions = cur - *res_idx;
#ifdef ASSERT
for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
assert(!is_available(i), "just checking");
}
assert(cur == max_length() || num_regions == 0 || is_available(cur),
err_msg("The region at the current position %u must be available or at the end of the heap.", cur));
#endif
return num_regions;
}
uint HeapRegionSeq::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
return num_regions * worker_i / num_workers;
}
void HeapRegionSeq::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
// Every worker will actually look at all regions, skipping over regions that
// are currently not committed.
// This also (potentially) iterates over regions newly allocated during GC. This
// is no problem except for some extra work.
for (uint count = 0; count < _allocated_heapregions_length; count++) {
const uint index = (start_index + count) % _allocated_heapregions_length;
assert(0 <= index && index < _allocated_heapregions_length, "sanity");
// Skip over unavailable regions
if (!is_available(index)) {
continue;
}
HeapRegion* r = _regions.get_by_index(index);
// We'll ignore "continues humongous" regions (we'll process them
// when we come across their corresponding "start humongous"
// region) and regions already claimed.
if (r->claim_value() == claim_value || r->continuesHumongous()) {
continue;
}
// OK, try to claim it
if (!r->claimHeapRegion(claim_value)) {
continue;
}
// Success!
if (r->startsHumongous()) {
// If the region is "starts humongous" we'll iterate over its
// "continues humongous" first; in fact we'll do them
// first. The order is important. In one case, calling the
// closure on the "starts humongous" region might de-allocate
// and clear all its "continues humongous" regions and, as a
// result, we might end up processing them twice. So, we'll do
// them first (note: most closures will ignore them anyway) and
// then we'll do the "starts humongous" region.
for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
HeapRegion* chr = _regions.get_by_index(ch_index);
assert(chr->continuesHumongous(), "Must be humongous region");
assert(chr->humongous_start_region() == r,
err_msg("Must work on humongous continuation of the original start region "
PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
assert(chr->claim_value() != claim_value,
"Must not have been claimed yet because claiming of humongous continuation first claims the start region");
bool claim_result = chr->claimHeapRegion(claim_value);
// We should always be able to claim it; no one else should
// be trying to claim this region.
guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
bool res2 = blk->doHeapRegion(chr);
if (res2) {
return;
}
// Right now, this holds (i.e., no closure that actually
// does something with "continues humongous" regions
// clears them). We might have to weaken it in the future,
// but let's leave these two asserts here for extra safety.
assert(chr->continuesHumongous(), "should still be the case");
assert(chr->humongous_start_region() == r, "sanity");
}
}
bool res = blk->doHeapRegion(r);
if (res) {
blk->incomplete();
return;
}
}
}
uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
// Reset this in case it's currently pointing into the regions that
// we just removed.
_next_search_index = 0;
assert(length() > 0, "the region sequence should not be empty");
assert(length() <= _allocated_length, "invariant");
assert(_allocated_length > 0, "we should have at least one region committed");
assert(length() <= _allocated_heapregions_length, "invariant");
assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
assert(num_regions_to_remove < length(), "We should never remove all regions");
uint i = 0;
for (; i < num_regions_to_remove; i++) {
HeapRegion* cur = at(length() - 1);
if (!cur->is_empty()) {
// We have to give up if the region can not be moved
break;
if (num_regions_to_remove == 0) {
return 0;
}
assert(!cur->isHumongous(), "Humongous regions should not be empty");
decrement_length();
uint removed = 0;
uint cur = _allocated_heapregions_length - 1;
uint idx_last_found = 0;
uint num_last_found = 0;
while ((removed < num_regions_to_remove) &&
(num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) {
// Only allow uncommit from the end of the heap.
if ((idx_last_found + num_last_found) != _allocated_heapregions_length) {
return 0;
}
uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found);
uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove);
cur -= num_last_found;
removed += to_remove;
}
return i;
verify_optional();
return removed;
}
#ifndef PRODUCT
void HeapRegionSeq::verify_optional() {
guarantee(length() <= _allocated_length,
err_msg("invariant: _length: %u _allocated_length: %u",
length(), _allocated_length));
guarantee(_allocated_length <= max_length(),
err_msg("invariant: _allocated_length: %u _max_length: %u",
_allocated_length, max_length()));
guarantee(_next_search_index <= length(),
err_msg("invariant: _next_search_index: %u _length: %u",
_next_search_index, length()));
uint HeapRegionSeq::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const {
guarantee(start_idx < _allocated_heapregions_length, "checking");
guarantee(res_idx != NULL, "checking");
uint num_regions_found = 0;
jlong cur = start_idx;
while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) {
cur--;
}
if (cur == -1) {
return num_regions_found;
}
jlong old_cur = cur;
// cur indexes the first empty region
while (cur != -1 && is_available(cur) && at(cur)->is_empty()) {
cur--;
}
*res_idx = cur + 1;
num_regions_found = old_cur - cur;
#ifdef ASSERT
for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) {
assert(at(i)->is_empty(), "just checking");
}
#endif
return num_regions_found;
}
void HeapRegionSeq::verify() {
guarantee(length() <= _allocated_heapregions_length,
err_msg("invariant: _length: %u _allocated_length: %u",
length(), _allocated_heapregions_length));
guarantee(_allocated_heapregions_length <= max_length(),
err_msg("invariant: _allocated_length: %u _max_length: %u",
_allocated_heapregions_length, max_length()));
bool prev_committed = true;
uint num_committed = 0;
HeapWord* prev_end = heap_bottom();
for (uint i = 0; i < _allocated_length; i += 1) {
for (uint i = 0; i < _allocated_heapregions_length; i++) {
if (!is_available(i)) {
prev_committed = false;
continue;
}
num_committed++;
HeapRegion* hr = _regions.get_by_index(i);
guarantee(hr != NULL, err_msg("invariant: i: %u", i));
guarantee(hr->bottom() == prev_end,
guarantee(!prev_committed || hr->bottom() == prev_end,
err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
i, HR_FORMAT_PARAMS(hr), p2i(prev_end)));
guarantee(hr->hrs_index() == i,
err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
if (i < length()) {
// Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom();
guarantee(addr_to_region(addr) == hr, "sanity");
} else {
guarantee(hr->is_empty(), "sanity");
guarantee(!hr->isHumongous(), "sanity");
// using assert instead of guarantee here since containing_set()
// is only available in non-product builds.
assert(hr->containing_set() == NULL, "sanity");
}
// Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom();
guarantee(addr_to_region(addr) == hr, "sanity");
// We cannot check whether the region is part of a particular set: at the time
// this method may be called, we have only completed allocation of the regions,
// but not put into a region set.
prev_committed = true;
if (hr->startsHumongous()) {
prev_end = hr->orig_end();
} else {
prev_end = hr->end();
}
}
for (uint i = _allocated_length; i < max_length(); i += 1) {
for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
}
guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed));
_free_list.verify();
}
#ifndef PRODUCT
void HeapRegionSeq::verify_optional() {
verify();
}
#endif // PRODUCT

View File

@ -26,6 +26,8 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
class HeapRegion;
class HeapRegionClosure;
@ -33,16 +35,20 @@ class FreeRegionList;
class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
protected:
virtual HeapRegion* default_value() const { return NULL; }
virtual HeapRegion* default_value() const { return NULL; }
};
// This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in
// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
// the one after it, etc.). Two regions that are consecutive in the
// array should also be adjacent in the address space (i.e.,
// region(i).end() == region(i+1).bottom().
// This class keeps track of the actual heap memory, auxiliary data
// and its metadata (i.e., HeapRegion instances) and the list of free regions.
//
// This allows maximum flexibility for deciding what to commit or uncommit given
// a request from outside.
//
// HeapRegions are kept in the _regions array in address order. A region's
// index in the array corresponds to its index in the heap (i.e., 0 is the
// region at the bottom of the heap, 1 is the one after it, etc.). Two
// regions that are consecutive in the array should also be adjacent in the
// address space (i.e., region(i).end() == region(i+1).bottom().
//
// We create a HeapRegion when we commit the region's address space
// for the first time. When we uncommit the address space of a
@ -51,56 +57,94 @@ class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
//
// We keep track of three lengths:
//
// * _committed_length (returned by length()) is the number of currently
// committed regions.
// * _allocated_length (not exposed outside this class) is the
// number of regions for which we have HeapRegions.
// * _num_committed (returned by length()) is the number of currently
// committed regions. These may not be contiguous.
// * _allocated_heapregions_length (not exposed outside this class) is the
// number of regions+1 for which we have HeapRegions.
// * max_length() returns the maximum number of regions the heap can have.
//
// and maintain that: _committed_length <= _allocated_length <= max_length()
class HeapRegionSeq: public CHeapObj<mtGC> {
friend class VMStructs;
G1HeapRegionTable _regions;
// The number of regions committed in the heap.
uint _committed_length;
G1RegionToSpaceMapper* _heap_mapper;
G1RegionToSpaceMapper* _prev_bitmap_mapper;
G1RegionToSpaceMapper* _next_bitmap_mapper;
G1RegionToSpaceMapper* _bot_mapper;
G1RegionToSpaceMapper* _cardtable_mapper;
G1RegionToSpaceMapper* _card_counts_mapper;
// A hint for which index to start searching from for humongous
// allocations.
uint _next_search_index;
FreeRegionList _free_list;
// The number of regions for which we have allocated HeapRegions for.
uint _allocated_length;
// Each bit in this bitmap indicates that the corresponding region is available
// for allocation.
BitMap _available_map;
// Find a contiguous set of empty regions of length num, starting
// from the given index.
uint find_contiguous_from(uint from, uint num);
// The number of regions committed in the heap.
uint _num_committed;
void increment_allocated_length() {
assert(_allocated_length < max_length(), "pre-condition");
_allocated_length++;
}
void increment_length() {
assert(length() < max_length(), "pre-condition");
_committed_length++;
}
void decrement_length() {
assert(length() > 0, "pre-condition");
_committed_length--;
}
// Internal only. The highest heap region +1 we allocated a HeapRegion instance for.
uint _allocated_heapregions_length;
HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
HeapWord* heap_end() const {return _regions.end_address_mapped(); }
void make_regions_available(uint index, uint num_regions = 1);
// Pass down commit calls to the VirtualSpace.
void commit_regions(uint index, size_t num_regions = 1);
void uncommit_regions(uint index, size_t num_regions = 1);
// Notify other data structures about change in the heap layout.
void update_committed_space(HeapWord* old_end, HeapWord* new_end);
// Calculate the starting region for each worker during parallel iteration so
// that they do not all start from the same region.
uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
// Find a contiguous set of empty or uncommitted regions of length num and return
// the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful.
// If only_empty is true, only empty regions are considered.
// Searches from bottom to top of the heap, doing a first-fit.
uint find_contiguous(size_t num, bool only_empty);
// Finds the next sequence of unavailable regions starting from start_idx. Returns the
// length of the sequence found. If this result is zero, no such sequence could be found,
// otherwise res_idx indicates the start index of these regions.
uint find_unavailable_from_idx(uint start_idx, uint* res_idx) const;
// Finds the next sequence of empty regions starting from start_idx, going backwards in
// the heap. Returns the length of the sequence found. If this value is zero, no
// sequence could be found, otherwise res_idx contains the start index of this range.
uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
// Allocate a new HeapRegion for the given index.
HeapRegion* new_heap_region(uint hrs_index);
#ifdef ASSERT
public:
bool is_free(HeapRegion* hr) const;
#endif
// Returns whether the given region is available for allocation.
bool is_available(uint region) const;
public:
// Empty constructor, we'll initialize it with the initialize() method.
HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
HeapRegionSeq() : _regions(), _heap_mapper(NULL), _num_committed(0),
_next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL),
_allocated_heapregions_length(0), _available_map(),
_free_list("Free list", new MasterFreeRegionListMtSafeChecker())
{ }
void initialize(HeapWord* bottom, HeapWord* end);
void initialize(G1RegionToSpaceMapper* heap_storage,
G1RegionToSpaceMapper* prev_bitmap,
G1RegionToSpaceMapper* next_bitmap,
G1RegionToSpaceMapper* bot,
G1RegionToSpaceMapper* cardtable,
G1RegionToSpaceMapper* card_counts);
// Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
// new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
// the heap from the lowest address, this region (and its associated data
// structures) are available and we do not need to check further.
HeapRegion* get_dummy_region() { return new_heap_region(0); }
// Return the HeapRegion at the given index. Assume that the index
// is valid.
@ -110,45 +154,86 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
// HeapRegion, otherwise return NULL.
inline HeapRegion* addr_to_region(HeapWord* addr) const;
// Insert the given region into the free region list.
inline void insert_into_free_list(HeapRegion* hr);
// Insert the given region list into the global free region list.
void insert_list_into_free_list(FreeRegionList* list) {
_free_list.add_ordered(list);
}
HeapRegion* allocate_free_region(bool is_old) {
HeapRegion* hr = _free_list.remove_region(is_old);
if (hr != NULL) {
assert(hr->next() == NULL, "Single region should not have next");
assert(is_available(hr->hrs_index()), "Must be committed");
}
return hr;
}
inline void allocate_free_regions_starting_at(uint first, uint num_regions);
// Remove all regions from the free list.
void remove_all_free_regions() {
_free_list.remove_all();
}
// Return the number of committed free regions in the heap.
uint num_free_regions() const {
return _free_list.length();
}
size_t total_capacity_bytes() const {
return num_free_regions() * HeapRegion::GrainBytes;
}
// Return the number of available (uncommitted) regions.
uint available() const { return max_length() - length(); }
// Return the number of regions that have been committed in the heap.
uint length() const { return _committed_length; }
uint length() const { return _num_committed; }
// Return the maximum number of regions in the heap.
uint max_length() const { return (uint)_regions.length(); }
// Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use
// existing ones, and return them in the given list. Returns the
// memory region that covers the newly-created regions. If a
// HeapRegion allocation fails, the result memory region might be
// smaller than the desired one.
MemRegion expand_by(HeapWord* old_end, HeapWord* new_end,
FreeRegionList* list);
MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); }
// Return the number of contiguous regions at the end of the sequence
// that are available for allocation.
uint free_suffix();
// Expand the sequence to reflect that the heap has grown. Either create new
// HeapRegions, or re-use existing ones. Returns the number of regions the
// sequence was expanded by. If a HeapRegion allocation fails, the resulting
// number of regions might be smaller than what's desired.
uint expand_by(uint num_regions);
// Find a contiguous set of empty regions of length num and return
// the index of the first region or G1_NULL_HRS_INDEX if the
// search was unsuccessful.
uint find_contiguous(uint num);
// Makes sure that the regions from start to start+num_regions-1 are available
// for allocation. Returns the number of regions that were committed to achieve
// this.
uint expand_at(uint start, uint num_regions);
// Find a contiguous set of empty regions of length num. Returns the start index of
// that set, or G1_NO_HRS_INDEX.
uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
// Find a contiguous set of empty or unavailable regions of length num. Returns the
// start index of that set, or G1_NO_HRS_INDEX.
uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
// Apply blk->doHeapRegion() on all committed regions in address order,
// terminating the iteration early if doHeapRegion() returns true.
void iterate(HeapRegionClosure* blk) const;
// As above, but start the iteration from hr and loop around. If hr
// is NULL, we start from the first region in the heap.
void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const;
// Tag as uncommitted as many regions that are completely free as
// possible, up to num_regions_to_remove, from the suffix of the committed
// sequence. Return the actual number of removed regions.
// Uncommit up to num_regions_to_remove regions that are completely free.
// Return the actual number of uncommitted regions.
uint shrink_by(uint num_regions_to_remove);
void verify();
// Do some sanity checking.
void verify_optional() PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP

View File

@ -27,6 +27,7 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
assert(addr < heap_end(),
@ -35,16 +36,23 @@ inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom())));
HeapRegion* hr = _regions.get_by_address(addr);
assert(hr != NULL, "invariant");
return hr;
}
inline HeapRegion* HeapRegionSeq::at(uint index) const {
assert(index < length(), "pre-condition");
assert(is_available(index), "pre-condition");
HeapRegion* hr = _regions.get_by_index(index);
assert(hr != NULL, "sanity");
assert(hr->hrs_index() == index, "sanity");
return hr;
}
inline void HeapRegionSeq::insert_into_free_list(HeapRegion* hr) {
_free_list.add_ordered(hr);
}
inline void HeapRegionSeq::allocate_free_regions_starting_at(uint first, uint num_regions) {
_free_list.remove_starting_at(at(first), num_regions);
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
@ -67,7 +68,7 @@ void HeapRegionSetBase::verify_start() {
// Do the basic verification first before we do the checks over the regions.
HeapRegionSetBase::verify();
_verify_in_progress = true;
_verify_in_progress = true;
}
void HeapRegionSetBase::verify_end() {
@ -103,62 +104,7 @@ void FreeRegionList::set_unrealistically_long_length(uint len) {
}
void FreeRegionList::fill_in_ext_msg_extra(hrs_ext_msg* msg) {
msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
}
void FreeRegionList::add_as_head_or_tail(FreeRegionList* from_list, bool as_head) {
check_mt_safety();
from_list->check_mt_safety();
verify_optional();
from_list->verify_optional();
if (from_list->is_empty()) {
return;
}
#ifdef ASSERT
FreeRegionListIterator iter(from_list);
while (iter.more_available()) {
HeapRegion* hr = iter.get_next();
// In set_containing_set() we check that we either set the value
// from NULL to non-NULL or vice versa to catch bugs. So, we have
// to NULL it first before setting it to the value.
hr->set_containing_set(NULL);
hr->set_containing_set(this);
}
#endif // ASSERT
if (_head == NULL) {
assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
_head = from_list->_head;
_tail = from_list->_tail;
} else {
assert(length() > 0 && _tail != NULL, hrs_ext_msg(this, "invariant"));
if (as_head) {
from_list->_tail->set_next(_head);
_head->set_prev(from_list->_tail);
_head = from_list->_head;
} else {
_tail->set_next(from_list->_head);
from_list->_head->set_prev(_tail);
_tail = from_list->_tail;
}
}
_count.increment(from_list->length(), from_list->total_capacity_bytes());
from_list->clear();
verify_optional();
from_list->verify_optional();
}
void FreeRegionList::add_as_head(FreeRegionList* from_list) {
add_as_head_or_tail(from_list, true /* as_head */);
}
void FreeRegionList::add_as_tail(FreeRegionList* from_list) {
add_as_head_or_tail(from_list, false /* as_head */);
msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, _head, _tail);
}
void FreeRegionList::remove_all() {
@ -191,11 +137,6 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
return;
}
if (is_empty()) {
add_as_head(from_list);
return;
}
#ifdef ASSERT
FreeRegionListIterator iter(from_list);
while (iter.more_available()) {
@ -208,37 +149,43 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
}
#endif // ASSERT
HeapRegion* curr_to = _head;
HeapRegion* curr_from = from_list->_head;
while (curr_from != NULL) {
while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
curr_to = curr_to->next();
}
if (curr_to == NULL) {
// The rest of the from list should be added as tail
_tail->set_next(curr_from);
curr_from->set_prev(_tail);
curr_from = NULL;
} else {
HeapRegion* next_from = curr_from->next();
curr_from->set_next(curr_to);
curr_from->set_prev(curr_to->prev());
if (curr_to->prev() == NULL) {
_head = curr_from;
} else {
curr_to->prev()->set_next(curr_from);
}
curr_to->set_prev(curr_from);
curr_from = next_from;
}
}
if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
if (is_empty()) {
assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
_head = from_list->_head;
_tail = from_list->_tail;
} else {
HeapRegion* curr_to = _head;
HeapRegion* curr_from = from_list->_head;
while (curr_from != NULL) {
while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) {
curr_to = curr_to->next();
}
if (curr_to == NULL) {
// The rest of the from list should be added as tail
_tail->set_next(curr_from);
curr_from->set_prev(_tail);
curr_from = NULL;
} else {
HeapRegion* next_from = curr_from->next();
curr_from->set_next(curr_to);
curr_from->set_prev(curr_to->prev());
if (curr_to->prev() == NULL) {
_head = curr_from;
} else {
curr_to->prev()->set_next(curr_from);
}
curr_to->set_prev(curr_from);
curr_from = next_from;
}
}
if (_tail->hrs_index() < from_list->_tail->hrs_index()) {
_tail = from_list->_tail;
}
}
_count.increment(from_list->length(), from_list->total_capacity_bytes());
@ -248,68 +195,59 @@ void FreeRegionList::add_ordered(FreeRegionList* from_list) {
from_list->verify_optional();
}
void FreeRegionList::remove_all_pending(uint target_count) {
void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
check_mt_safety();
assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
assert(num_regions >= 1, hrs_ext_msg(this, "pre-condition"));
assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
verify_optional();
DEBUG_ONLY(uint old_length = length();)
HeapRegion* curr = _head;
HeapRegion* curr = first;
uint count = 0;
while (curr != NULL) {
while (count < num_regions) {
verify_region(curr);
HeapRegion* next = curr->next();
HeapRegion* prev = curr->prev();
if (curr->pending_removal()) {
assert(count < target_count,
hrs_err_msg("[%s] should not come across more regions "
"pending for removal than target_count: %u",
name(), target_count));
assert(count < num_regions,
hrs_err_msg("[%s] should not come across more regions "
"pending for removal than num_regions: %u",
name(), num_regions));
if (prev == NULL) {
assert(_head == curr, hrs_ext_msg(this, "invariant"));
_head = next;
} else {
assert(_head != curr, hrs_ext_msg(this, "invariant"));
prev->set_next(next);
}
if (next == NULL) {
assert(_tail == curr, hrs_ext_msg(this, "invariant"));
_tail = prev;
} else {
assert(_tail != curr, hrs_ext_msg(this, "invariant"));
next->set_prev(prev);
}
if (_last = curr) {
_last = NULL;
}
curr->set_next(NULL);
curr->set_prev(NULL);
remove(curr);
curr->set_pending_removal(false);
count += 1;
// If we have come across the target number of regions we can
// just bail out. However, for debugging purposes, we can just
// carry on iterating to make sure there are not more regions
// tagged with pending removal.
DEBUG_ONLY(if (count == target_count) break;)
if (prev == NULL) {
assert(_head == curr, hrs_ext_msg(this, "invariant"));
_head = next;
} else {
assert(_head != curr, hrs_ext_msg(this, "invariant"));
prev->set_next(next);
}
if (next == NULL) {
assert(_tail == curr, hrs_ext_msg(this, "invariant"));
_tail = prev;
} else {
assert(_tail != curr, hrs_ext_msg(this, "invariant"));
next->set_prev(prev);
}
if (_last = curr) {
_last = NULL;
}
curr->set_next(NULL);
curr->set_prev(NULL);
remove(curr);
count++;
curr = next;
}
assert(count == target_count,
hrs_err_msg("[%s] count: %u should be == target_count: %u",
name(), count, target_count));
assert(length() + target_count == old_length,
assert(count == num_regions,
hrs_err_msg("[%s] count: %u should be == num_regions: %u",
name(), count, num_regions));
assert(length() + num_regions == old_length,
hrs_err_msg("[%s] new length should be consistent "
"new length: %u old length: %u target_count: %u",
name(), length(), old_length, target_count));
"new length: %u old length: %u num_regions: %u",
name(), length(), old_length, num_regions));
verify_optional();
}
@ -348,10 +286,12 @@ void FreeRegionList::print_on(outputStream* out, bool print_contents) {
hr->print_on(out);
}
}
out->cr();
}
void FreeRegionList::verify_list() {
HeapRegion* curr = head();
HeapRegion* curr = _head;
HeapRegion* prev1 = NULL;
HeapRegion* prev0 = NULL;
uint count = 0;
@ -379,7 +319,7 @@ void FreeRegionList::verify_list() {
curr = curr->next();
}
guarantee(tail() == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), tail()->hrs_index(), prev0->hrs_index()));
guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrs_index(), prev0->hrs_index()));
guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
guarantee(length() == count, err_msg("%s count mismatch. Expected %u, actual %u.", name(), length(), count));
guarantee(total_capacity_bytes() == capacity, err_msg("%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
@ -463,3 +403,41 @@ void HumongousRegionSetMtSafeChecker::check() {
"master humongous set MT safety protocol outside a safepoint");
}
}
void FreeRegionList_test() {
FreeRegionList l("test");
const uint num_regions_in_test = 5;
// Create a fake heap. It does not need to be valid, as the HeapRegion constructor
// does not access it.
MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords);
// Allocate a fake BOT because the HeapRegion constructor initializes
// the BOT.
size_t bot_size = G1BlockOffsetSharedArray::compute_size(heap.word_size());
HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC);
ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
G1RegionToSpaceMapper* bot_storage =
G1RegionToSpaceMapper::create_mapper(bot_rs,
os::vm_page_size(),
HeapRegion::GrainBytes,
G1BlockOffsetSharedArray::N_bytes,
mtGC);
G1BlockOffsetSharedArray oa(heap, bot_storage);
bot_storage->commit_regions(0, num_regions_in_test);
HeapRegion hr0(0, &oa, heap);
HeapRegion hr1(1, &oa, heap);
HeapRegion hr2(2, &oa, heap);
HeapRegion hr3(3, &oa, heap);
HeapRegion hr4(4, &oa, heap);
l.add_ordered(&hr1);
l.add_ordered(&hr0);
l.add_ordered(&hr3);
l.add_ordered(&hr4);
l.add_ordered(&hr2);
assert(l.length() == num_regions_in_test, "wrong length");
l.verify_list();
bot_storage->uncommit_regions(0, num_regions_in_test);
delete bot_storage;
FREE_C_HEAP_ARRAY(HeapWord, bot_data, mtGC);
}

View File

@ -162,7 +162,7 @@ public:
// diagnosing failures.
class hrs_ext_msg : public hrs_err_msg {
public:
hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s","") {
hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("%s", "") {
set->fill_in_ext_msg(this, message);
}
};
@ -192,13 +192,9 @@ public:
};
// A set that links all the regions added to it in a doubly-linked
// list. We should try to avoid doing operations that iterate over
// sorted list. We should try to avoid doing operations that iterate over
// such lists in performance critical paths. Typically we should
// add / remove one region at a time or concatenate two lists. There are
// two ways to treat your lists, ordered and un-ordered. All un-ordered
// operations are done in constant time. To keep a list ordered only use
// add_ordered() to add elements to the list. If a list is not ordered
// from start, there is no way to sort it later.
// add / remove one region at a time or concatenate two lists.
class FreeRegionListIterator;
@ -210,13 +206,13 @@ private:
HeapRegion* _tail;
// _last is used to keep track of where we added an element the last
// time in ordered lists. It helps to improve performance when adding
// several ordered items in a row.
// time. It helps to improve performance when adding several ordered items in a row.
HeapRegion* _last;
static uint _unrealistically_long_length;
void add_as_head_or_tail(FreeRegionList* from_list, bool as_head);
inline HeapRegion* remove_from_head_impl();
inline HeapRegion* remove_from_tail_impl();
protected:
virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg);
@ -232,8 +228,11 @@ public:
void verify_list();
HeapRegion* head() { return _head; }
HeapRegion* tail() { return _tail; }
#ifdef ASSERT
bool contains(HeapRegion* hr) const {
return hr->containing_set() == this;
}
#endif
static void set_unrealistically_long_length(uint len);
@ -242,55 +241,20 @@ public:
// is determined by hrs_index.
inline void add_ordered(HeapRegion* hr);
// It adds hr to the list as the new head. The region should not be
// a member of another set.
inline void add_as_head(HeapRegion* hr);
// It adds hr to the list as the new tail. The region should not be
// a member of another set.
inline void add_as_tail(HeapRegion* hr);
// It removes and returns the head of the list. It assumes that the
// list is not empty so it will return a non-NULL value.
inline HeapRegion* remove_head();
// Convenience method.
inline HeapRegion* remove_head_or_null();
// Removes and returns the last element (_tail) of the list. It assumes
// that the list isn't empty so that it can return a non-NULL value.
inline HeapRegion* remove_tail();
// Convenience method
inline HeapRegion* remove_tail_or_null();
// Removes from head or tail based on the given argument.
inline HeapRegion* remove_region(bool from_head);
HeapRegion* remove_region(bool from_head);
// Merge two ordered lists. The result is also ordered. The order is
// determined by hrs_index.
void add_ordered(FreeRegionList* from_list);
// It moves the regions from from_list to this list and empties
// from_list. The new regions will appear in the same order as they
// were in from_list and be linked in the beginning of this list.
void add_as_head(FreeRegionList* from_list);
// It moves the regions from from_list to this list and empties
// from_list. The new regions will appear in the same order as they
// were in from_list and be linked in the end of this list.
void add_as_tail(FreeRegionList* from_list);
// It empties the list by removing all regions from it.
void remove_all();
// It removes all regions in the list that are pending for removal
// (i.e., they have been tagged with "pending_removal"). The list
// must not be empty, target_count should reflect the exact number
// of regions that are pending for removal in the list, and
// target_count should be > 1 (currently, we never need to remove a
// single region using this).
void remove_all_pending(uint target_count);
// Remove all (contiguous) regions from first to first + num_regions -1 from
// this list.
// Num_regions must be > 1.
void remove_starting_at(HeapRegion* first, uint num_regions);
virtual void verify();
@ -298,7 +262,7 @@ public:
};
// Iterator class that provides a convenient way to iterate over the
// regions of a HeapRegionLinkedList instance.
// regions of a FreeRegionList.
class FreeRegionListIterator : public StackObj {
private:
@ -324,7 +288,7 @@ public:
}
FreeRegionListIterator(FreeRegionList* list) : _curr(NULL), _list(list) {
_curr = list->head();
_curr = list->_head;
}
};

View File

@ -30,7 +30,8 @@
inline void HeapRegionSetBase::add(HeapRegion* hr) {
check_mt_safety();
assert(hr->containing_set() == NULL, hrs_ext_msg(this, "should not already have a containing set %u"));
assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
assert(hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
_count.increment(1u, hr->capacity());
hr->set_containing_set(this);
@ -40,7 +41,8 @@ inline void HeapRegionSetBase::add(HeapRegion* hr) {
inline void HeapRegionSetBase::remove(HeapRegion* hr) {
check_mt_safety();
verify_region(hr);
assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
assert(hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
hr->set_containing_set(NULL);
assert(_count.length() > 0, hrs_ext_msg(this, "pre-condition"));
@ -48,8 +50,7 @@ inline void HeapRegionSetBase::remove(HeapRegion* hr) {
}
inline void FreeRegionList::add_ordered(HeapRegion* hr) {
check_mt_safety();
assert((length() == 0 && _head == NULL && _tail == NULL) ||
assert((length() == 0 && _head == NULL && _tail == NULL && _last == NULL) ||
(length() > 0 && _head != NULL && _tail != NULL),
hrs_ext_msg(this, "invariant"));
// add() will verify the region and check mt safety.
@ -95,89 +96,48 @@ inline void FreeRegionList::add_ordered(HeapRegion* hr) {
_last = hr;
}
inline void FreeRegionList::add_as_head(HeapRegion* hr) {
assert((length() == 0 && _head == NULL && _tail == NULL) ||
(length() > 0 && _head != NULL && _tail != NULL),
hrs_ext_msg(this, "invariant"));
// add() will verify the region and check mt safety.
add(hr);
// Now link the region.
if (_head != NULL) {
hr->set_next(_head);
_head->set_prev(hr);
} else {
_tail = hr;
}
_head = hr;
}
inline void FreeRegionList::add_as_tail(HeapRegion* hr) {
check_mt_safety();
assert((length() == 0 && _head == NULL && _tail == NULL) ||
(length() > 0 && _head != NULL && _tail != NULL),
hrs_ext_msg(this, "invariant"));
// add() will verify the region and check mt safety.
add(hr);
// Now link the region.
if (_tail != NULL) {
_tail->set_next(hr);
hr->set_prev(_tail);
} else {
_head = hr;
}
_tail = hr;
}
inline HeapRegion* FreeRegionList::remove_head() {
assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty"));
assert(length() > 0 && _head != NULL && _tail != NULL,
hrs_ext_msg(this, "invariant"));
// We need to unlink it first.
HeapRegion* hr = _head;
_head = hr->next();
inline HeapRegion* FreeRegionList::remove_from_head_impl() {
HeapRegion* result = _head;
_head = result->next();
if (_head == NULL) {
_tail = NULL;
} else {
_head->set_prev(NULL);
}
hr->set_next(NULL);
if (_last == hr) {
_last = NULL;
}
// remove() will verify the region and check mt safety.
remove(hr);
return hr;
result->set_next(NULL);
return result;
}
inline HeapRegion* FreeRegionList::remove_head_or_null() {
check_mt_safety();
if (!is_empty()) {
return remove_head();
} else {
return NULL;
}
}
inline HeapRegion* FreeRegionList::remove_from_tail_impl() {
HeapRegion* result = _tail;
inline HeapRegion* FreeRegionList::remove_tail() {
assert(!is_empty(), hrs_ext_msg(this, "The list should not be empty"));
assert(length() > 0 && _head != NULL && _tail != NULL,
hrs_ext_msg(this, "invariant"));
// We need to unlink it first
HeapRegion* hr = _tail;
_tail = hr->prev();
_tail = result->prev();
if (_tail == NULL) {
_head = NULL;
} else {
_tail->set_next(NULL);
}
hr->set_prev(NULL);
result->set_prev(NULL);
return result;
}
inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
check_mt_safety();
verify_optional();
if (is_empty()) {
return NULL;
}
assert(length() > 0 && _head != NULL && _tail != NULL,
hrs_ext_msg(this, "invariant"));
HeapRegion* hr;
if (from_head) {
hr = remove_from_head_impl();
} else {
hr = remove_from_tail_impl();
}
if (_last == hr) {
_last = NULL;
@ -188,22 +148,5 @@ inline HeapRegion* FreeRegionList::remove_tail() {
return hr;
}
inline HeapRegion* FreeRegionList::remove_tail_or_null() {
check_mt_safety();
if (!is_empty()) {
return remove_tail();
} else {
return NULL;
}
}
inline HeapRegion* FreeRegionList::remove_region(bool from_head) {
if (from_head) {
return remove_head_or_null();
} else {
return remove_tail_or_null();
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSET_INLINE_HPP

View File

@ -43,10 +43,9 @@
nonstatic_field(G1HeapRegionTable, _shift_by, uint) \
\
nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \
nonstatic_field(HeapRegionSeq, _committed_length, uint) \
nonstatic_field(HeapRegionSeq, _num_committed, uint) \
\
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \

View File

@ -78,6 +78,7 @@ jint ParallelScavengeHeap::initialize() {
(HeapWord*)(heap_rs.base() + heap_rs.size()));
CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
barrier_set->initialize();
_barrier_set = barrier_set;
oopDesc::set_bs(_barrier_set);
if (_barrier_set == NULL) {

View File

@ -70,7 +70,7 @@ void VM_ParallelGCSystemGC::doit() {
"must be a ParallelScavengeHeap");
GCCauseSetter gccs(heap, _gc_cause);
if (_gc_cause == GCCause::_gc_locker
if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc
DEBUG_ONLY(|| _gc_cause == GCCause::_scavenge_alot)) {
// If (and only if) the scavenge fails, this will invoke a full gc.
heap->invoke_scavenge();

View File

@ -51,6 +51,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _heap_dump:
return "Heap Dump Initiated GC";
case _wb_young_gc:
return "WhiteBox Initiated Young GC";
case _no_gc:
return "No GC";

View File

@ -46,6 +46,7 @@ class GCCause : public AllStatic {
_gc_locker,
_heap_inspection,
_heap_dump,
_wb_young_gc,
/* implementation independent, but reserved for GC use */
_no_gc,

View File

@ -3402,7 +3402,7 @@ BytecodeInterpreter::print() {
tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp);
tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp));
tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);

View File

@ -265,7 +265,8 @@ class MetaspaceObj {
f(ConstantPool) \
f(ConstantPoolCache) \
f(Annotation) \
f(MethodCounters)
f(MethodCounters) \
f(Deallocated)
#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;

View File

@ -44,13 +44,6 @@
// enumerate ref fields that have been modified (since the last
// enumeration.)
size_t CardTableModRefBS::cards_required(size_t covered_words)
{
// Add one for a guard card, used to detect errors.
const size_t words = align_size_up(covered_words, card_size_in_words);
return words / card_size_in_words + 1;
}
size_t CardTableModRefBS::compute_byte_map_size()
{
assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
@ -64,27 +57,50 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
int max_covered_regions):
ModRefBarrierSet(max_covered_regions),
_whole_heap(whole_heap),
_guard_index(cards_required(whole_heap.word_size()) - 1),
_last_valid_index(_guard_index - 1),
_guard_index(0),
_guard_region(),
_last_valid_index(0),
_page_size(os::vm_page_size()),
_byte_map_size(compute_byte_map_size())
_byte_map_size(0),
_covered(NULL),
_committed(NULL),
_cur_covered_regions(0),
_byte_map(NULL),
byte_map_base(NULL),
// LNC functionality
_lowest_non_clean(NULL),
_lowest_non_clean_chunk_size(NULL),
_lowest_non_clean_base_chunk_index(NULL),
_last_LNC_resizing_collection(NULL)
{
_kind = BarrierSet::CardTableModRef;
HeapWord* low_bound = _whole_heap.start();
HeapWord* high_bound = _whole_heap.end();
assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
assert(card_size <= 512, "card_size must be less than 512"); // why?
_covered = new MemRegion[max_covered_regions];
_committed = new MemRegion[max_covered_regions];
if (_covered == NULL || _committed == NULL) {
vm_exit_during_initialization("couldn't alloc card table covered region set.");
_covered = new MemRegion[_max_covered_regions];
if (_covered == NULL) {
vm_exit_during_initialization("Could not allocate card table covered region set.");
}
}
void CardTableModRefBS::initialize() {
_guard_index = cards_required(_whole_heap.word_size()) - 1;
_last_valid_index = _guard_index - 1;
_byte_map_size = compute_byte_map_size();
HeapWord* low_bound = _whole_heap.start();
HeapWord* high_bound = _whole_heap.end();
_cur_covered_regions = 0;
_committed = new MemRegion[_max_covered_regions];
if (_committed == NULL) {
vm_exit_during_initialization("Could not allocate card table committed region set.");
}
const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
MAX2(_page_size, (size_t) os::vm_allocation_granularity());
ReservedSpace heap_rs(_byte_map_size, rs_align, false);
@ -114,20 +130,20 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
!ExecMem, "card table last card");
*guard_card = last_card;
_lowest_non_clean =
NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
_lowest_non_clean =
NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
_lowest_non_clean_chunk_size =
NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
_lowest_non_clean_base_chunk_index =
NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
_last_LNC_resizing_collection =
NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
if (_lowest_non_clean == NULL
|| _lowest_non_clean_chunk_size == NULL
|| _lowest_non_clean_base_chunk_index == NULL
|| _last_LNC_resizing_collection == NULL)
vm_exit_during_initialization("couldn't allocate an LNC array.");
for (int i = 0; i < max_covered_regions; i++) {
for (int i = 0; i < _max_covered_regions; i++) {
_lowest_non_clean[i] = NULL;
_lowest_non_clean_chunk_size[i] = 0;
_last_LNC_resizing_collection[i] = -1;
@ -650,7 +666,7 @@ void CardTableModRefBS::verify_region(MemRegion mr,
jbyte val, bool val_equals) {
jbyte* start = byte_for(mr.start());
jbyte* end = byte_for(mr.last());
bool failures = false;
bool failures = false;
for (jbyte* curr = start; curr <= end; ++curr) {
jbyte curr_val = *curr;
bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);

View File

@ -96,12 +96,12 @@ class CardTableModRefBS: public ModRefBarrierSet {
// The declaration order of these const fields is important; see the
// constructor before changing.
const MemRegion _whole_heap; // the region covered by the card table
const size_t _guard_index; // index of very last element in the card
size_t _guard_index; // index of very last element in the card
// table; it is set to a guard value
// (last_card) and should never be modified
const size_t _last_valid_index; // index of the last valid element
size_t _last_valid_index; // index of the last valid element
const size_t _page_size; // page size used when mapping _byte_map
const size_t _byte_map_size; // in bytes
size_t _byte_map_size; // in bytes
jbyte* _byte_map; // the card marking array
int _cur_covered_regions;
@ -123,7 +123,12 @@ class CardTableModRefBS: public ModRefBarrierSet {
protected:
// Initialization utilities; covered_words is the size of the covered region
// in, um, words.
inline size_t cards_required(size_t covered_words);
inline size_t cards_required(size_t covered_words) {
// Add one for a guard card, used to detect errors.
const size_t words = align_size_up(covered_words, card_size_in_words);
return words / card_size_in_words + 1;
}
inline size_t compute_byte_map_size();
// Finds and return the index of the region, if any, to which the given
@ -137,7 +142,7 @@ class CardTableModRefBS: public ModRefBarrierSet {
int find_covering_region_containing(HeapWord* addr);
// Resize one of the regions covered by the remembered set.
void resize_covered_region(MemRegion new_region);
virtual void resize_covered_region(MemRegion new_region);
// Returns the leftmost end of a committed region corresponding to a
// covered region before covered region "ind", or else "NULL" if "ind" is
@ -282,6 +287,8 @@ public:
CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
~CardTableModRefBS();
virtual void initialize();
// *** Barrier set functions.
bool has_write_ref_pre_barrier() { return false; }

View File

@ -54,6 +54,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap,
#else
_ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
#endif
_ct_bs->initialize();
set_bs(_ct_bs);
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);

View File

@ -24,9 +24,14 @@
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/sharedClassUtil.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/altHashing.hpp"
#include "memory/filemap.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
#include "oops/objArrayOop.hpp"
#include "runtime/arguments.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
@ -42,7 +47,6 @@
#endif
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
extern address JVM_FunctionAtStart();
extern address JVM_FunctionAtEnd();
@ -78,16 +82,27 @@ void FileMapInfo::fail_stop(const char *msg, ...) {
void FileMapInfo::fail_continue(const char *msg, ...) {
va_list ap;
va_start(ap, msg);
if (RequireSharedSpaces) {
fail(msg, ap);
MetaspaceShared::set_archive_loading_failed();
if (PrintSharedArchiveAndExit && _validating_classpath_entry_table) {
// If we are doing PrintSharedArchiveAndExit and some of the classpath entries
// do not validate, we can still continue "limping" to validate the remaining
// entries. No need to quit.
tty->print("[");
tty->vprint(msg, ap);
tty->print_cr("]");
} else {
if (PrintSharedSpaces) {
tty->print_cr("UseSharedSpaces: %s", msg);
if (RequireSharedSpaces) {
fail(msg, ap);
} else {
if (PrintSharedSpaces) {
tty->print_cr("UseSharedSpaces: %s", msg);
}
}
}
va_end(ap);
UseSharedSpaces = false;
close();
assert(current_info() != NULL, "singleton must be registered");
current_info()->close();
}
// Fill in the fileMapInfo structure with data about this VM instance.
@ -122,67 +137,201 @@ template <int N> static void get_header_version(char (&header_version) [N]) {
}
}
FileMapInfo::FileMapInfo() {
assert(_current_info == NULL, "must be singleton"); // not thread safe
_current_info = this;
memset(this, 0, sizeof(FileMapInfo));
_file_offset = 0;
_file_open = false;
_header = SharedClassUtil::allocate_file_map_header();
_header->_version = _invalid_version;
}
FileMapInfo::~FileMapInfo() {
assert(_current_info == this, "must be singleton"); // not thread safe
_current_info = NULL;
}
void FileMapInfo::populate_header(size_t alignment) {
_header._magic = 0xf00baba2;
_header._version = _current_version;
_header._alignment = alignment;
_header._obj_alignment = ObjectAlignmentInBytes;
_header->populate(this, alignment);
}
size_t FileMapInfo::FileMapHeader::data_size() {
return SharedClassUtil::file_map_header_size() - sizeof(FileMapInfo::FileMapHeaderBase);
}
void FileMapInfo::FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment) {
_magic = 0xf00baba2;
_version = _current_version;
_alignment = alignment;
_obj_alignment = ObjectAlignmentInBytes;
_classpath_entry_table_size = mapinfo->_classpath_entry_table_size;
_classpath_entry_table = mapinfo->_classpath_entry_table;
_classpath_entry_size = mapinfo->_classpath_entry_size;
// The following fields are for sanity checks for whether this archive
// will function correctly with this JVM and the bootclasspath it's
// invoked with.
// JVM version string ... changes on each build.
get_header_version(_header._jvm_ident);
get_header_version(_jvm_ident);
}
// Build checks on classpath and jar files
_header._num_jars = 0;
ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
for ( ; cpe != NULL; cpe = cpe->next()) {
void FileMapInfo::allocate_classpath_entry_table() {
int bytes = 0;
int count = 0;
char* strptr = NULL;
char* strptr_max = NULL;
Thread* THREAD = Thread::current();
if (cpe->is_jar_file()) {
if (_header._num_jars >= JVM_SHARED_JARS_MAX) {
fail_stop("Too many jar files to share.", NULL);
}
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
size_t entry_size = SharedClassUtil::shared_class_path_entry_size();
// Jar file - record timestamp and file size.
struct stat st;
const char *path = cpe->name();
if (os::stat(path, &st) != 0) {
// If we can't access a jar file in the boot path, then we can't
// make assumptions about where classes get loaded from.
fail_stop("Unable to open jar file %s.", path);
}
_header._jar[_header._num_jars]._timestamp = st.st_mtime;
_header._jar[_header._num_jars]._filesize = st.st_size;
_header._num_jars++;
} else {
for (int pass=0; pass<2; pass++) {
ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
// If directories appear in boot classpath, they must be empty to
// avoid having to verify each individual class file.
const char* name = ((ClassPathDirEntry*)cpe)->name();
if (!os::dir_is_empty(name)) {
fail_stop("Boot classpath directory %s is not empty.", name);
for (int cur_entry = 0 ; cpe != NULL; cpe = cpe->next(), cur_entry++) {
const char *name = cpe->name();
int name_bytes = (int)(strlen(name) + 1);
if (pass == 0) {
count ++;
bytes += (int)entry_size;
bytes += name_bytes;
if (TraceClassPaths || (TraceClassLoading && Verbose)) {
tty->print_cr("[Add main shared path (%s) %s]", (cpe->is_jar_file() ? "jar" : "dir"), name);
}
} else {
SharedClassPathEntry* ent = shared_classpath(cur_entry);
if (cpe->is_jar_file()) {
struct stat st;
if (os::stat(name, &st) != 0) {
// The file/dir must exist, or it would not have been added
// into ClassLoader::classpath_entry().
//
// If we can't access a jar file in the boot path, then we can't
// make assumptions about where classes get loaded from.
FileMapInfo::fail_stop("Unable to open jar file %s.", name);
}
EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
SharedClassUtil::update_shared_classpath(cpe, ent, st.st_mtime, st.st_size, THREAD);
} else {
ent->_filesize = -1;
if (!os::dir_is_empty(name)) {
ClassLoader::exit_with_path_failure("Cannot have non-empty directory in archived classpaths", name);
}
}
ent->_name = strptr;
if (strptr + name_bytes <= strptr_max) {
strncpy(strptr, name, (size_t)name_bytes); // name_bytes includes trailing 0.
strptr += name_bytes;
} else {
assert(0, "miscalculated buffer size");
}
}
}
if (pass == 0) {
EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
Array<u8>* arr = MetadataFactory::new_array<u8>(loader_data, (bytes + 7)/8, THREAD);
strptr = (char*)(arr->data());
strptr_max = strptr + bytes;
SharedClassPathEntry* table = (SharedClassPathEntry*)strptr;
strptr += entry_size * count;
_classpath_entry_table_size = count;
_classpath_entry_table = table;
_classpath_entry_size = entry_size;
}
}
}
bool FileMapInfo::validate_classpath_entry_table() {
_validating_classpath_entry_table = true;
int count = _header->_classpath_entry_table_size;
_classpath_entry_table = _header->_classpath_entry_table;
_classpath_entry_size = _header->_classpath_entry_size;
for (int i=0; i<count; i++) {
SharedClassPathEntry* ent = shared_classpath(i);
struct stat st;
const char* name = ent->_name;
bool ok = true;
if (TraceClassPaths || (TraceClassLoading && Verbose)) {
tty->print_cr("[Checking shared classpath entry: %s]", name);
}
if (os::stat(name, &st) != 0) {
fail_continue("Required classpath entry does not exist: %s", name);
ok = false;
} else if (ent->is_dir()) {
if (!os::dir_is_empty(name)) {
fail_continue("directory is not empty: %s", name);
ok = false;
}
} else {
if (ent->_timestamp != st.st_mtime ||
ent->_filesize != st.st_size) {
ok = false;
if (PrintSharedArchiveAndExit) {
fail_continue(ent->_timestamp != st.st_mtime ?
"Timestamp mismatch" :
"File size mismatch");
} else {
fail_continue("A jar file is not the one used while building"
" the shared archive file: %s", name);
}
}
}
if (ok) {
if (TraceClassPaths || (TraceClassLoading && Verbose)) {
tty->print_cr("[ok]");
}
} else if (!PrintSharedArchiveAndExit) {
_validating_classpath_entry_table = false;
return false;
}
}
_classpath_entry_table_size = _header->_classpath_entry_table_size;
_validating_classpath_entry_table = false;
return true;
}
// Read the FileMapInfo information from the file.
bool FileMapInfo::init_from_file(int fd) {
size_t n = read(fd, &_header, sizeof(struct FileMapHeader));
if (n != sizeof(struct FileMapHeader)) {
size_t sz = _header->data_size();
char* addr = _header->data();
size_t n = os::read(fd, addr, (unsigned int)sz);
if (n != sz) {
fail_continue("Unable to read the file header.");
return false;
}
if (_header._version != current_version()) {
if (_header->_version != current_version()) {
fail_continue("The shared archive file has the wrong version.");
return false;
}
_file_offset = (long)n;
size_t info_size = _header->_paths_misc_info_size;
_paths_misc_info = NEW_C_HEAP_ARRAY_RETURN_NULL(char, info_size, mtClass);
if (_paths_misc_info == NULL) {
fail_continue("Unable to read the file header.");
return false;
}
n = os::read(fd, _paths_misc_info, (unsigned int)info_size);
if (n != info_size) {
fail_continue("Unable to read the shared path info header.");
FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
_paths_misc_info = NULL;
return false;
}
_file_offset += (long)n;
return true;
}
@ -237,7 +386,16 @@ void FileMapInfo::open_for_write() {
// Write the header to the file, seek to the next allocation boundary.
void FileMapInfo::write_header() {
write_bytes_aligned(&_header, sizeof(FileMapHeader));
int info_size = ClassLoader::get_shared_paths_misc_info_size();
_header->_paths_misc_info_size = info_size;
align_file_position();
size_t sz = _header->data_size();
char* addr = _header->data();
write_bytes(addr, (int)sz); // skip the C++ vtable
write_bytes(ClassLoader::get_shared_paths_misc_info(), info_size);
align_file_position();
}
@ -247,7 +405,7 @@ void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) {
align_file_position();
size_t used = space->used_bytes_slow(Metaspace::NonClassType);
size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType);
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
}
@ -257,7 +415,7 @@ void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) {
void FileMapInfo::write_region(int region, char* base, size_t size,
size_t capacity, bool read_only,
bool allow_exec) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[region];
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[region];
if (_file_open) {
guarantee(si->_file_offset == _file_offset, "file offset mismatch.");
@ -339,7 +497,7 @@ void FileMapInfo::close() {
// JVM/TI RedefineClasses() support:
// Remap the shared readonly space to shared readwrite, private.
bool FileMapInfo::remap_shared_readonly_as_readwrite() {
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
if (!si->_read_only) {
// the space is already readwrite so we are done
return true;
@ -367,7 +525,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
// Map the whole region at once, assumed to be allocated contiguously.
ReservedSpace FileMapInfo::reserve_shared_memory() {
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
char* requested_addr = si->_base;
size_t size = FileMapInfo::shared_spaces_size();
@ -389,7 +547,7 @@ ReservedSpace FileMapInfo::reserve_shared_memory() {
static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"};
char* FileMapInfo::map_region(int i) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
size_t used = si->_used;
size_t alignment = os::vm_allocation_granularity();
size_t size = align_size_up(used, alignment);
@ -415,7 +573,7 @@ char* FileMapInfo::map_region(int i) {
// Unmap a memory region in the address space.
void FileMapInfo::unmap_region(int i) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
size_t used = si->_used;
size_t size = align_size_up(used, os::vm_allocation_granularity());
if (!os::unmap_memory(si->_base, size)) {
@ -432,12 +590,21 @@ void FileMapInfo::assert_mark(bool check) {
FileMapInfo* FileMapInfo::_current_info = NULL;
SharedClassPathEntry* FileMapInfo::_classpath_entry_table = NULL;
int FileMapInfo::_classpath_entry_table_size = 0;
size_t FileMapInfo::_classpath_entry_size = 0x1234baad;
bool FileMapInfo::_validating_classpath_entry_table = false;
// Open the shared archive file, read and validate the header
// information (version, boot classpath, etc.). If initialization
// fails, shared spaces are disabled and the file is closed. [See
// fail_continue.]
//
// Validation of the archive is done in two steps:
//
// [1] validate_header() - done here. This checks the header, including _paths_misc_info.
// [2] validate_classpath_entry_table - this is done later, because the table is in the RW
// region of the archive, which is not mapped yet.
bool FileMapInfo::initialize() {
assert(UseSharedSpaces, "UseSharedSpaces expected.");
@ -451,92 +618,66 @@ bool FileMapInfo::initialize() {
}
init_from_file(_fd);
if (!validate()) {
if (!validate_header()) {
return false;
}
SharedReadOnlySize = _header._space[0]._capacity;
SharedReadWriteSize = _header._space[1]._capacity;
SharedMiscDataSize = _header._space[2]._capacity;
SharedMiscCodeSize = _header._space[3]._capacity;
SharedReadOnlySize = _header->_space[0]._capacity;
SharedReadWriteSize = _header->_space[1]._capacity;
SharedMiscDataSize = _header->_space[2]._capacity;
SharedMiscCodeSize = _header->_space[3]._capacity;
return true;
}
bool FileMapInfo::validate() {
if (_header._version != current_version()) {
fail_continue("The shared archive file is the wrong version.");
bool FileMapInfo::FileMapHeader::validate() {
if (_version != current_version()) {
FileMapInfo::fail_continue("The shared archive file is the wrong version.");
return false;
}
if (_header._magic != (int)0xf00baba2) {
fail_continue("The shared archive file has a bad magic number.");
if (_magic != (int)0xf00baba2) {
FileMapInfo::fail_continue("The shared archive file has a bad magic number.");
return false;
}
char header_version[JVM_IDENT_MAX];
get_header_version(header_version);
if (strncmp(_header._jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
fail_continue("The shared archive file was created by a different"
" version or build of HotSpot.");
return false;
}
if (_header._obj_alignment != ObjectAlignmentInBytes) {
fail_continue("The shared archive file's ObjectAlignmentInBytes of %d"
" does not equal the current ObjectAlignmentInBytes of %d.",
_header._obj_alignment, ObjectAlignmentInBytes);
return false;
}
// Cannot verify interpreter yet, as it can only be created after the GC
// heap has been initialized.
if (_header._num_jars >= JVM_SHARED_JARS_MAX) {
fail_continue("Too many jar files to share.");
return false;
}
// Build checks on classpath and jar files
int num_jars_now = 0;
ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
for ( ; cpe != NULL; cpe = cpe->next()) {
if (cpe->is_jar_file()) {
if (num_jars_now < _header._num_jars) {
// Jar file - verify timestamp and file size.
struct stat st;
const char *path = cpe->name();
if (os::stat(path, &st) != 0) {
fail_continue("Unable to open jar file %s.", path);
return false;
}
if (_header._jar[num_jars_now]._timestamp != st.st_mtime ||
_header._jar[num_jars_now]._filesize != st.st_size) {
fail_continue("A jar file is not the one used while building"
" the shared archive file.");
return false;
}
}
++num_jars_now;
} else {
// If directories appear in boot classpath, they must be empty to
// avoid having to verify each individual class file.
const char* name = ((ClassPathDirEntry*)cpe)->name();
if (!os::dir_is_empty(name)) {
fail_continue("Boot classpath directory %s is not empty.", name);
return false;
}
if (strncmp(_jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
if (TraceClassPaths) {
tty->print_cr("Expected: %s", header_version);
tty->print_cr("Actual: %s", _jvm_ident);
}
FileMapInfo::fail_continue("The shared archive file was created by a different"
" version or build of HotSpot");
return false;
}
if (num_jars_now < _header._num_jars) {
fail_continue("The number of jar files in the boot classpath is"
" less than the number the shared archive was created with.");
if (_obj_alignment != ObjectAlignmentInBytes) {
FileMapInfo::fail_continue("The shared archive file's ObjectAlignmentInBytes of %d"
" does not equal the current ObjectAlignmentInBytes of %d.",
_obj_alignment, ObjectAlignmentInBytes);
return false;
}
return true;
}
bool FileMapInfo::validate_header() {
bool status = _header->validate();
if (status) {
if (!ClassLoader::check_shared_paths_misc_info(_paths_misc_info, _header->_paths_misc_info_size)) {
if (!PrintSharedArchiveAndExit) {
fail_continue("shared class paths mismatch (hint: enable -XX:+TraceClassPaths to diagnose the failure)");
status = false;
}
}
}
if (_paths_misc_info != NULL) {
FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
_paths_misc_info = NULL;
}
return status;
}
// The following method is provided to see whether a given pointer
// falls in the mapped shared space.
// Param:
@ -545,8 +686,8 @@ bool FileMapInfo::validate() {
// True if the p is within the mapped shared space, otherwise, false.
bool FileMapInfo::is_in_shared_space(const void* p) {
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
if (p >= _header._space[i]._base &&
p < _header._space[i]._base + _header._space[i]._used) {
if (p >= _header->_space[i]._base &&
p < _header->_space[i]._base + _header->_space[i]._used) {
return true;
}
}
@ -557,7 +698,7 @@ bool FileMapInfo::is_in_shared_space(const void* p) {
void FileMapInfo::print_shared_spaces() {
gclog_or_tty->print_cr("Shared Spaces:");
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
gclog_or_tty->print(" %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
shared_region_name[i],
si->_base, si->_base + si->_used);
@ -570,9 +711,9 @@ void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
if (map_info) {
map_info->fail_continue(msg);
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
if (map_info->_header._space[i]._base != NULL) {
if (map_info->_header->_space[i]._base != NULL) {
map_info->unmap_region(i);
map_info->_header._space[i]._base = NULL;
map_info->_header->_space[i]._base = NULL;
}
}
} else if (DumpSharedSpaces) {

View File

@ -37,30 +37,55 @@
// misc data (block offset table, string table, symbols, dictionary, etc.)
// tag(666)
static const int JVM_SHARED_JARS_MAX = 128;
static const int JVM_SPACENAME_MAX = 128;
static const int JVM_IDENT_MAX = 256;
static const int JVM_ARCH_MAX = 12;
class Metaspace;
class SharedClassPathEntry VALUE_OBJ_CLASS_SPEC {
public:
const char *_name;
time_t _timestamp; // jar timestamp, 0 if is directory
long _filesize; // jar file size, -1 if is directory
bool is_dir() {
return _filesize == -1;
}
};
class FileMapInfo : public CHeapObj<mtInternal> {
private:
friend class ManifestStream;
enum {
_invalid_version = -1,
_current_version = 1
_current_version = 2
};
bool _file_open;
int _fd;
long _file_offset;
private:
static SharedClassPathEntry* _classpath_entry_table;
static int _classpath_entry_table_size;
static size_t _classpath_entry_size;
static bool _validating_classpath_entry_table;
// FileMapHeader describes the shared space data in the file to be
// mapped. This structure gets written to a file. It is not a class, so
// that the compilers don't add any compiler-private data to it.
struct FileMapHeader {
public:
struct FileMapHeaderBase : public CHeapObj<mtClass> {
virtual bool validate() = 0;
virtual void populate(FileMapInfo* info, size_t alignment) = 0;
};
struct FileMapHeader : FileMapHeaderBase {
// Use data() and data_size() to memcopy to/from the FileMapHeader. We need to
// avoid read/writing the C++ vtable pointer.
static size_t data_size();
char* data() {
return ((char*)this) + sizeof(FileMapHeaderBase);
}
int _magic; // identify file type.
int _version; // (from enum, above.)
size_t _alignment; // how shared archive should be aligned
@ -78,44 +103,64 @@ private:
// The following fields are all sanity checks for whether this archive
// will function correctly with this JVM and the bootclasspath it's
// invoked with.
char _arch[JVM_ARCH_MAX]; // architecture
char _jvm_ident[JVM_IDENT_MAX]; // identifier for jvm
int _num_jars; // Number of jars in bootclasspath
// Per jar file data: timestamp, size.
// The _paths_misc_info is a variable-size structure that records "miscellaneous"
// information during dumping. It is generated and validated by the
// SharedPathsMiscInfo class. See SharedPathsMiscInfo.hpp and sharedClassUtil.hpp for
// detailed description.
//
// The _paths_misc_info data is stored as a byte array in the archive file header,
// immediately after the _header field. This information is used only when
// checking the validity of the archive and is deallocated after the archive is loaded.
//
// Note that the _paths_misc_info does NOT include information for JAR files
// that existed during dump time. Their information is stored in _classpath_entry_table.
int _paths_misc_info_size;
// The following is a table of all the class path entries that were used
// during dumping. At run time, we require these files to exist and have the same
// size/modification time, or else the archive will refuse to load.
//
// All of these entries must be JAR files. The dumping process would fail if a non-empty
// directory was specified in the classpaths. If an empty directory was specified
// it is checked by the _paths_misc_info as described above.
//
// FIXME -- if JAR files in the tail of the list were specified but not used during dumping,
// they should be removed from this table, to save space and to avoid spurious
// loading failures during runtime.
int _classpath_entry_table_size;
size_t _classpath_entry_size;
SharedClassPathEntry* _classpath_entry_table;
virtual bool validate();
virtual void populate(FileMapInfo* info, size_t alignment);
};
FileMapHeader * _header;
struct {
time_t _timestamp; // jar timestamp.
long _filesize; // jar file size.
} _jar[JVM_SHARED_JARS_MAX];
} _header;
const char* _full_path;
char* _paths_misc_info;
static FileMapInfo* _current_info;
bool init_from_file(int fd);
void align_file_position();
bool validate_header_impl();
public:
FileMapInfo() {
_file_offset = 0;
_file_open = false;
_header._version = _invalid_version;
}
FileMapInfo();
~FileMapInfo();
static int current_version() { return _current_version; }
void populate_header(size_t alignment);
bool validate();
bool validate_header();
void invalidate();
int version() { return _header._version; }
size_t alignment() { return _header._alignment; }
size_t space_capacity(int i) { return _header._space[i]._capacity; }
char* region_base(int i) { return _header._space[i]._base; }
struct FileMapHeader* header() { return &_header; }
static void set_current_info(FileMapInfo* info) {
CDS_ONLY(_current_info = info;)
}
int version() { return _header->_version; }
size_t alignment() { return _header->_alignment; }
size_t space_capacity(int i) { return _header->_space[i]._capacity; }
char* region_base(int i) { return _header->_space[i]._base; }
struct FileMapHeader* header() { return _header; }
static FileMapInfo* current_info() {
CDS_ONLY(return _current_info;)
@ -146,7 +191,7 @@ public:
// Errors.
static void fail_stop(const char *msg, ...);
void fail_continue(const char *msg, ...);
static void fail_continue(const char *msg, ...);
// Return true if given address is in the mapped shared space.
bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
@ -160,6 +205,22 @@ public:
// Stop CDS sharing and unmap CDS regions.
static void stop_sharing_and_unmap(const char* msg);
static void allocate_classpath_entry_table();
bool validate_classpath_entry_table();
static SharedClassPathEntry* shared_classpath(int index) {
char* p = (char*)_classpath_entry_table;
p += _classpath_entry_size * index;
return (SharedClassPathEntry*)p;
}
static const char* shared_classpath_name(int index) {
return shared_classpath(index)->_name;
}
static int get_number_of_share_classpaths() {
return _classpath_entry_table_size;
}
};
#endif // SHARE_VM_MEMORY_FILEMAP_HPP

View File

@ -708,15 +708,18 @@ void GenCollectedHeap::collect(GCCause::Cause cause) {
#else // INCLUDE_ALL_GCS
ShouldNotReachHere();
#endif // INCLUDE_ALL_GCS
} else if (cause == GCCause::_wb_young_gc) {
// minor collection for WhiteBox API
collect(cause, 0);
} else {
#ifdef ASSERT
if (cause == GCCause::_scavenge_alot) {
// minor collection only
collect(cause, 0);
} else {
// Stop-the-world full collection
collect(cause, n_gens() - 1);
}
if (cause == GCCause::_scavenge_alot) {
// minor collection only
collect(cause, 0);
} else {
// Stop-the-world full collection
collect(cause, n_gens() - 1);
}
#else
// Stop-the-world full collection
collect(cause, n_gens() - 1);

View File

@ -79,6 +79,12 @@ class MetadataFactory : AllStatic {
// Deallocation method for metadata
template <class T>
static void free_metadata(ClassLoaderData* loader_data, T md) {
if (DumpSharedSpaces) {
// FIXME: the freeing code is buggy, especially when PrintSharedSpaces is enabled.
// Disable for now -- this means if you specify bad classes in your classlist you
// may have wasted space inside the archive.
return;
}
if (md != NULL) {
assert(loader_data != NULL, "shouldn't pass null");
int size = md->size();

View File

@ -413,6 +413,7 @@ static bool should_commit_large_pages_when_reserving(size_t bytes) {
VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
#if INCLUDE_CDS
// This allocates memory with mmap. For DumpSharedspaces, try to reserve
// configurable address, generally at the top of the Java heap so other
// memory addresses don't conflict.
@ -428,7 +429,9 @@ VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
}
MetaspaceShared::set_shared_rs(&_rs);
} else {
} else
#endif
{
bool large_pages = should_commit_large_pages_when_reserving(bytes);
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
@ -2939,11 +2942,14 @@ void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address
// between the lower base and higher address.
address lower_base;
address higher_address;
#if INCLUDE_CDS
if (UseSharedSpaces) {
higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
(address)(metaspace_base + compressed_class_space_size()));
lower_base = MIN2(metaspace_base, cds_base);
} else {
} else
#endif
{
higher_address = metaspace_base + compressed_class_space_size();
lower_base = metaspace_base;
@ -2964,6 +2970,7 @@ void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address
}
}
#if INCLUDE_CDS
// Return TRUE if the specified metaspace_base and cds_base are close enough
// to work with compressed klass pointers.
bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
@ -2974,6 +2981,7 @@ bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cd
(address)(metaspace_base + compressed_class_space_size()));
return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
}
#endif
// Try to allocate the metaspace at the requested addr.
void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
@ -2993,6 +3001,7 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
large_pages,
requested_addr, 0);
if (!metaspace_rs.is_reserved()) {
#if INCLUDE_CDS
if (UseSharedSpaces) {
size_t increment = align_size_up(1*G, _reserve_alignment);
@ -3007,7 +3016,7 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
_reserve_alignment, large_pages, addr, 0);
}
}
#endif
// If no successful allocation then try to allocate the space anywhere. If
// that fails then OOM doom. At this point we cannot try allocating the
// metaspace as if UseCompressedClassPointers is off because too much
@ -3026,12 +3035,13 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
// If we got here then the metaspace got allocated.
MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
#if INCLUDE_CDS
// Verify that we can use shared spaces. Otherwise, turn off CDS.
if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
FileMapInfo::stop_sharing_and_unmap(
"Could not allocate metaspace at a compatible address");
}
#endif
set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
UseSharedSpaces ? (address)cds_base : 0);
@ -3115,6 +3125,7 @@ void Metaspace::global_initialize() {
MetaspaceShared::set_max_alignment(max_alignment);
if (DumpSharedSpaces) {
#if INCLUDE_CDS
SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
@ -3152,23 +3163,22 @@ void Metaspace::global_initialize() {
}
Universe::set_narrow_klass_shift(0);
#endif
#endif // _LP64
#endif // INCLUDE_CDS
} else {
#if INCLUDE_CDS
// If using shared space, open the file that contains the shared space
// and map in the memory before initializing the rest of metaspace (so
// the addresses don't conflict)
address cds_address = NULL;
if (UseSharedSpaces) {
FileMapInfo* mapinfo = new FileMapInfo();
memset(mapinfo, 0, sizeof(FileMapInfo));
// Open the shared archive file, read and validate the header. If
// initialization fails, shared spaces [UseSharedSpaces] are
// disabled and the file is closed.
// Map in spaces now also
if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
FileMapInfo::set_current_info(mapinfo);
cds_total = FileMapInfo::shared_spaces_size();
cds_address = (address)mapinfo->region_base(0);
} else {
@ -3176,21 +3186,23 @@ void Metaspace::global_initialize() {
"archive file not closed or shared spaces not disabled.");
}
}
#endif // INCLUDE_CDS
#ifdef _LP64
// If UseCompressedClassPointers is set then allocate the metaspace area
// above the heap and above the CDS area (if it exists).
if (using_class_space()) {
if (UseSharedSpaces) {
#if INCLUDE_CDS
char* cds_end = (char*)(cds_address + cds_total);
cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
#endif
} else {
char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
allocate_metaspace_compressed_klass_ptrs(base, 0);
}
}
#endif
#endif // _LP64
// Initialize these before initializing the VirtualSpaceList
_first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
@ -3380,6 +3392,10 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
assert(!SafepointSynchronize::is_at_safepoint()
|| Thread::current()->is_VM_thread(), "should be the VM thread");
if (DumpSharedSpaces && PrintSharedSpaces) {
record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
}
MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
@ -3417,8 +3433,9 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
if (result == NULL) {
report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
}
space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
if (PrintSharedSpaces) {
space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
}
// Zero initialize.
Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
@ -3517,15 +3534,55 @@ const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
assert(DumpSharedSpaces, "sanity");
AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
int byte_size = (int)word_size * HeapWordSize;
AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
if (_alloc_record_head == NULL) {
_alloc_record_head = _alloc_record_tail = rec;
} else {
} else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
_alloc_record_tail->_next = rec;
_alloc_record_tail = rec;
} else {
// slow linear search, but this doesn't happen that often, and only when dumping
for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
if (old->_ptr == ptr) {
assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
int remain_bytes = old->_byte_size - byte_size;
assert(remain_bytes >= 0, "sanity");
old->_type = type;
if (remain_bytes == 0) {
delete(rec);
} else {
address remain_ptr = address(ptr) + byte_size;
rec->_ptr = remain_ptr;
rec->_byte_size = remain_bytes;
rec->_type = MetaspaceObj::DeallocatedType;
rec->_next = old->_next;
old->_byte_size = byte_size;
old->_next = rec;
}
return;
}
}
assert(0, "reallocating a freed pointer that was not recorded");
}
}
void Metaspace::record_deallocation(void* ptr, size_t word_size) {
assert(DumpSharedSpaces, "sanity");
for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
if (rec->_ptr == ptr) {
assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
rec->_type = MetaspaceObj::DeallocatedType;
return;
}
}
assert(0, "deallocating a pointer that was not recorded");
}
void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");

View File

@ -171,9 +171,10 @@ class Metaspace : public CHeapObj<mtClass> {
static const MetaspaceTracer* tracer() { return _tracer; }
private:
// This is used by DumpSharedSpaces only, where only _vsm is used. So we will
// These 2 methods are used by DumpSharedSpaces only, where only _vsm is used. So we will
// maintain a single list for now.
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
void record_deallocation(void* ptr, size_t word_size);
#ifdef _LP64
static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);

View File

@ -26,6 +26,7 @@
#include "classfile/dictionary.hpp"
#include "classfile/loaderConstraints.hpp"
#include "classfile/placeholders.hpp"
#include "classfile/sharedClassUtil.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
@ -47,6 +48,10 @@ int MetaspaceShared::_max_alignment = 0;
ReservedSpace* MetaspaceShared::_shared_rs = NULL;
bool MetaspaceShared::_link_classes_made_progress;
bool MetaspaceShared::_check_classes_made_progress;
bool MetaspaceShared::_has_error_classes;
bool MetaspaceShared::_archive_loading_failed = false;
// Read/write a data stream for restoring/preserving metadata pointers and
// miscellaneous data from/to the shared archive file.
@ -446,6 +451,23 @@ void VM_PopulateDumpSharedSpace::doit() {
SystemDictionary::classes_do(collect_classes);
tty->print_cr("Number of classes %d", _global_klass_objects->length());
{
int num_type_array = 0, num_obj_array = 0, num_inst = 0;
for (int i = 0; i < _global_klass_objects->length(); i++) {
Klass* k = _global_klass_objects->at(i);
if (k->oop_is_instance()) {
num_inst ++;
} else if (k->oop_is_objArray()) {
num_obj_array ++;
} else {
assert(k->oop_is_typeArray(), "sanity");
num_type_array ++;
}
}
tty->print_cr(" instance classes = %5d", num_inst);
tty->print_cr(" obj array classes = %5d", num_obj_array);
tty->print_cr(" type array classes = %5d", num_type_array);
}
// Update all the fingerprints in the shared methods.
tty->print("Calculating fingerprints ... ");
@ -611,38 +633,58 @@ void VM_PopulateDumpSharedSpace::doit() {
#undef fmt_space
}
static void link_shared_classes(Klass* obj, TRAPS) {
void MetaspaceShared::link_one_shared_class(Klass* obj, TRAPS) {
Klass* k = obj;
if (k->oop_is_instance()) {
InstanceKlass* ik = (InstanceKlass*) k;
// Link the class to cause the bytecodes to be rewritten and the
// cpcache to be created.
if (ik->init_state() < InstanceKlass::linked) {
ik->link_class(THREAD);
guarantee(!HAS_PENDING_EXCEPTION, "exception in class rewriting");
// cpcache to be created. Class verification is done according
// to -Xverify setting.
_link_classes_made_progress |= try_link_class(ik, THREAD);
guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
}
}
void MetaspaceShared::check_one_shared_class(Klass* k) {
if (k->oop_is_instance() && InstanceKlass::cast(k)->check_sharing_error_state()) {
_check_classes_made_progress = true;
}
}
void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
// We need to iterate because verification may cause additional classes
// to be loaded.
do {
_link_classes_made_progress = false;
SystemDictionary::classes_do(link_one_shared_class, THREAD);
guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
} while (_link_classes_made_progress);
if (_has_error_classes) {
// Mark all classes whose super class or interfaces failed verification.
do {
// Not completely sure if we need to do this iteratively. Anyway,
// we should come here only if there are unverifiable classes, which
// shouldn't happen in normal cases. So better safe than sorry.
_check_classes_made_progress = false;
SystemDictionary::classes_do(check_one_shared_class);
} while (_check_classes_made_progress);
if (IgnoreUnverifiableClassesDuringDump) {
// This is useful when running JCK or SQE tests. You should not
// enable this when running real apps.
SystemDictionary::remove_classes_in_error_state();
} else {
tty->print_cr("Please remove the unverifiable classes from your class list and try again");
exit(1);
}
}
}
// Support for a simple checksum of the contents of the class list
// file to prevent trivial tampering. The algorithm matches that in
// the MakeClassList program used by the J2SE build process.
#define JSUM_SEED ((jlong)CONST64(0xcafebabebabecafe))
static jlong
jsum(jlong start, const char *buf, const int len)
{
jlong h = start;
char *p = (char *)buf, *e = p + len;
while (p < e) {
char c = *p++;
if (c <= ' ') {
/* Skip spaces and control characters */
continue;
}
h = 31 * h + c;
}
return h;
void MetaspaceShared::prepare_for_dumping() {
ClassLoader::initialize_shared_path();
FileMapInfo::allocate_classpath_entry_table();
}
// Preload classes from a list, populate the shared spaces and dump to a
@ -651,72 +693,112 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
TraceTime timer("Dump Shared Spaces", TraceStartupTime);
ResourceMark rm;
tty->print_cr("Allocated shared space: %d bytes at " PTR_FORMAT,
MetaspaceShared::shared_rs()->size(),
MetaspaceShared::shared_rs()->base());
// Preload classes to be shared.
// Should use some os:: method rather than fopen() here. aB.
// Construct the path to the class list (in jre/lib)
// Walk up two directories from the location of the VM and
// optionally tack on "lib" (depending on platform)
char class_list_path[JVM_MAXPATHLEN];
os::jvm_path(class_list_path, sizeof(class_list_path));
for (int i = 0; i < 3; i++) {
char *end = strrchr(class_list_path, *os::file_separator());
if (end != NULL) *end = '\0';
}
int class_list_path_len = (int)strlen(class_list_path);
if (class_list_path_len >= 3) {
if (strcmp(class_list_path + class_list_path_len - 3, "lib") != 0) {
strcat(class_list_path, os::file_separator());
strcat(class_list_path, "lib");
const char* class_list_path;
if (SharedClassListFile == NULL) {
// Construct the path to the class list (in jre/lib)
// Walk up two directories from the location of the VM and
// optionally tack on "lib" (depending on platform)
char class_list_path_str[JVM_MAXPATHLEN];
os::jvm_path(class_list_path_str, sizeof(class_list_path_str));
for (int i = 0; i < 3; i++) {
char *end = strrchr(class_list_path_str, *os::file_separator());
if (end != NULL) *end = '\0';
}
int class_list_path_len = (int)strlen(class_list_path_str);
if (class_list_path_len >= 3) {
if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
strcat(class_list_path_str, os::file_separator());
strcat(class_list_path_str, "lib");
}
}
strcat(class_list_path_str, os::file_separator());
strcat(class_list_path_str, "classlist");
class_list_path = class_list_path_str;
} else {
class_list_path = SharedClassListFile;
}
strcat(class_list_path, os::file_separator());
strcat(class_list_path, "classlist");
int class_count = 0;
GrowableArray<Klass*>* class_promote_order = new GrowableArray<Klass*>();
// sun.io.Converters
static const char obj_array_sig[] = "[[Ljava/lang/Object;";
SymbolTable::new_permanent_symbol(obj_array_sig, THREAD);
// java.util.HashMap
static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;";
SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD);
tty->print_cr("Loading classes to share ...");
_has_error_classes = false;
class_count += preload_and_dump(class_list_path, class_promote_order,
THREAD);
if (ExtraSharedClassListFile) {
class_count += preload_and_dump(ExtraSharedClassListFile, class_promote_order,
THREAD);
}
tty->print_cr("Loading classes to share: done.");
if (PrintSharedSpaces) {
tty->print_cr("Shared spaces: preloaded %d classes", class_count);
}
// Rewrite and link classes
tty->print_cr("Rewriting and linking classes ...");
// Link any classes which got missed. This would happen if we have loaded classes that
// were not explicitly specified in the classlist. E.g., if an interface implemented by class K
// fails verification, all other interfaces that were not specified in the classlist but
// are implemented by K are not verified.
link_and_cleanup_shared_classes(CATCH);
tty->print_cr("Rewriting and linking classes: done");
// Create and dump the shared spaces. Everything so far is loaded
// with the null class loader.
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
VM_PopulateDumpSharedSpace op(loader_data, class_promote_order);
VMThread::execute(&op);
// Since various initialization steps have been undone by this process,
// it is not reasonable to continue running a java process.
exit(0);
}
int MetaspaceShared::preload_and_dump(const char * class_list_path,
GrowableArray<Klass*>* class_promote_order,
TRAPS) {
FILE* file = fopen(class_list_path, "r");
char class_name[256];
int class_count = 0;
if (file != NULL) {
jlong computed_jsum = JSUM_SEED;
jlong file_jsum = 0;
char class_name[256];
int class_count = 0;
GrowableArray<Klass*>* class_promote_order = new GrowableArray<Klass*>();
// sun.io.Converters
static const char obj_array_sig[] = "[[Ljava/lang/Object;";
SymbolTable::new_permanent_symbol(obj_array_sig, THREAD);
// java.util.HashMap
static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;";
SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD);
tty->print("Loading classes to share ... ");
while ((fgets(class_name, sizeof class_name, file)) != NULL) {
if (*class_name == '#') {
jint fsh, fsl;
if (sscanf(class_name, "# %8x%8x\n", &fsh, &fsl) == 2) {
file_jsum = ((jlong)(fsh) << 32) | (fsl & 0xffffffff);
}
if (*class_name == '#') { // comment
continue;
}
// Remove trailing newline
size_t name_len = strlen(class_name);
class_name[name_len-1] = '\0';
computed_jsum = jsum(computed_jsum, class_name, (const int)name_len - 1);
if (class_name[name_len-1] == '\n') {
class_name[name_len-1] = '\0';
}
// Got a class name - load it.
TempNewSymbol class_name_symbol = SymbolTable::new_permanent_symbol(class_name, THREAD);
guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol.");
Klass* klass = SystemDictionary::resolve_or_null(class_name_symbol,
THREAD);
guarantee(!HAS_PENDING_EXCEPTION, "Exception resolving a class.");
CLEAR_PENDING_EXCEPTION;
if (klass != NULL) {
if (PrintSharedSpaces && Verbose && WizardMode) {
tty->print_cr("Shared spaces preloaded: %s", class_name);
}
InstanceKlass* ik = InstanceKlass::cast(klass);
// Should be class load order as per -XX:+TraceClassLoadingPreorder
@ -726,52 +808,14 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
// cpcache to be created. The linking is done as soon as classes
// are loaded in order that the related data structures (klass and
// cpCache) are located together.
if (ik->init_state() < InstanceKlass::linked) {
ik->link_class(THREAD);
guarantee(!(HAS_PENDING_EXCEPTION), "exception in class rewriting");
}
// TODO: Resolve klasses in constant pool
ik->constants()->resolve_class_constants(THREAD);
try_link_class(ik, THREAD);
guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
class_count++;
} else {
if (PrintSharedSpaces && Verbose && WizardMode) {
tty->cr();
tty->print_cr(" Preload failed: %s", class_name);
}
//tty->print_cr("Preload failed: %s", class_name);
}
file_jsum = 0; // Checksum must be on last line of file
}
if (computed_jsum != file_jsum) {
tty->cr();
tty->print_cr("Preload failed: checksum of class list was incorrect.");
exit(1);
}
tty->print_cr("done. ");
if (PrintSharedSpaces) {
tty->print_cr("Shared spaces: preloaded %d classes", class_count);
}
// Rewrite and unlink classes.
tty->print("Rewriting and linking classes ... ");
// Link any classes which got missed. (It's not quite clear why
// they got missed.) This iteration would be unsafe if we weren't
// single-threaded at this point; however we can't do it on the VM
// thread because it requires object allocation.
SystemDictionary::classes_do(link_shared_classes, CATCH);
tty->print_cr("done. ");
// Create and dump the shared spaces. Everything so far is loaded
// with the null class loader.
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
VM_PopulateDumpSharedSpace op(loader_data, class_promote_order);
VMThread::execute(&op);
} else {
char errmsg[JVM_MAXPATHLEN];
os::lasterror(errmsg, JVM_MAXPATHLEN);
@ -779,11 +823,39 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
exit(1);
}
// Since various initialization steps have been undone by this process,
// it is not reasonable to continue running a java process.
exit(0);
return class_count;
}
// Returns true if the class's status has changed
bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
assert(DumpSharedSpaces, "should only be called during dumping");
if (ik->init_state() < InstanceKlass::linked) {
bool saved = BytecodeVerificationLocal;
if (!SharedClassUtil::is_shared_boot_class(ik)) {
// The verification decision is based on BytecodeVerificationRemote
// for non-system classes. Since we are using the NULL classloader
// to load non-system classes during dumping, we need to temporarily
// change BytecodeVerificationLocal to be the same as
// BytecodeVerificationRemote. Note this can cause the parent system
// classes also being verified. The extra overhead is acceptable during
// dumping.
BytecodeVerificationLocal = BytecodeVerificationRemote;
}
ik->link_class(THREAD);
if (HAS_PENDING_EXCEPTION) {
ResourceMark rm;
tty->print_cr("Preload Error: Verification failed for %s",
ik->external_name());
CLEAR_PENDING_EXCEPTION;
ik->set_in_error_state();
_has_error_classes = true;
}
BytecodeVerificationLocal = saved;
return true;
} else {
return false;
}
}
// Closure for serializing initialization data in from a data area
// (ptr_array) read from the shared file.
@ -867,7 +939,8 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
(_rw_base = mapinfo->map_region(rw)) != NULL &&
(_md_base = mapinfo->map_region(md)) != NULL &&
(_mc_base = mapinfo->map_region(mc)) != NULL &&
(image_alignment == (size_t)max_alignment())) {
(image_alignment == (size_t)max_alignment()) &&
mapinfo->validate_classpath_entry_table()) {
// Success (no need to do anything)
return true;
} else {
@ -884,7 +957,7 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
// If -Xshare:on is specified, print out the error message and exit VM,
// otherwise, set UseSharedSpaces to false and continue.
if (RequireSharedSpaces) {
vm_exit_during_initialization("Unable to use shared archive.", NULL);
vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
} else {
FLAG_SET_DEFAULT(UseSharedSpaces, false);
}
@ -984,6 +1057,20 @@ void MetaspaceShared::initialize_shared_spaces() {
// Close the mapinfo file
mapinfo->close();
if (PrintSharedArchiveAndExit) {
if (PrintSharedDictionary) {
tty->print_cr("\nShared classes:\n");
SystemDictionary::print_shared(false);
}
if (_archive_loading_failed) {
tty->print_cr("archive is invalid");
vm_exit(1);
} else {
tty->print_cr("archive is valid");
vm_exit(0);
}
}
}
// JVM/TI RedefineClasses() support:

View File

@ -38,7 +38,10 @@ class MetaspaceShared : AllStatic {
// CDS support
static ReservedSpace* _shared_rs;
static int _max_alignment;
static bool _link_classes_made_progress;
static bool _check_classes_made_progress;
static bool _has_error_classes;
static bool _archive_loading_failed;
public:
enum {
vtbl_list_size = 17, // number of entries in the shared space vtable list.
@ -67,7 +70,11 @@ class MetaspaceShared : AllStatic {
NOT_CDS(return 0);
}
static void prepare_for_dumping() NOT_CDS_RETURN;
static void preload_and_dump(TRAPS) NOT_CDS_RETURN;
static int preload_and_dump(const char * class_list_path,
GrowableArray<Klass*>* class_promote_order,
TRAPS) NOT_CDS_RETURN;
static ReservedSpace* shared_rs() {
CDS_ONLY(return _shared_rs);
@ -78,6 +85,9 @@ class MetaspaceShared : AllStatic {
CDS_ONLY(_shared_rs = rs;)
}
static void set_archive_loading_failed() {
_archive_loading_failed = true;
}
static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
static void initialize_shared_spaces() NOT_CDS_RETURN;
@ -97,5 +107,10 @@ class MetaspaceShared : AllStatic {
static bool remap_shared_readonly_as_readwrite() NOT_CDS_RETURN_(true);
static void print_shared_spaces();
static bool try_link_class(InstanceKlass* ik, TRAPS);
static void link_one_shared_class(Klass* obj, TRAPS);
static void check_one_shared_class(Klass* obj);
static void link_and_cleanup_shared_classes(TRAPS);
};
#endif // SHARE_VM_MEMORY_METASPACE_SHARED_HPP

View File

@ -26,6 +26,9 @@
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.hpp"
#if INCLUDE_CDS
#include "classfile/sharedClassUtil.hpp"
#endif
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@ -34,6 +37,7 @@
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/filemap.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genRemSet.hpp"
@ -239,8 +243,9 @@ void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
void initialize_basic_type_klass(Klass* k, TRAPS) {
Klass* ok = SystemDictionary::Object_klass();
if (UseSharedSpaces) {
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
assert(k->super() == ok, "u3");
k->restore_unshareable_info(CHECK);
k->restore_unshareable_info(loader_data, Handle(), CHECK);
} else {
k->initialize_supers(ok, CHECK);
}
@ -666,6 +671,10 @@ jint universe_init() {
SymbolTable::create_table();
StringTable::create_table();
ClassLoader::create_package_info_table();
if (DumpSharedSpaces) {
MetaspaceShared::prepare_for_dumping();
}
}
return JNI_OK;
@ -1155,6 +1164,11 @@ bool universe_post_init() {
MemoryService::add_metaspace_memory_pools();
MemoryService::set_universe_heap(Universe::_collectedHeap);
#if INCLUDE_CDS
if (UseSharedSpaces) {
SharedClassUtil::initialize(CHECK_false);
}
#endif
return true;
}

View File

@ -186,8 +186,9 @@ void ArrayKlass::remove_unshareable_info() {
set_component_mirror(NULL);
}
void ArrayKlass::restore_unshareable_info(TRAPS) {
Klass::restore_unshareable_info(CHECK);
void ArrayKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
assert(loader_data == ClassLoaderData::the_null_class_loader_data(), "array classes belong to null loader");
Klass::restore_unshareable_info(loader_data, protection_domain, CHECK);
// Klass recreates the component mirror also
}

View File

@ -137,7 +137,7 @@ class ArrayKlass: public Klass {
// CDS support - remove and restore oops from metadata. Oops are not shared.
virtual void remove_unshareable_info();
virtual void restore_unshareable_info(TRAPS);
virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
// Printing
void print_on(outputStream* st) const;

View File

@ -2303,12 +2303,14 @@ void InstanceKlass::remove_unshareable_info() {
array_klasses_do(remove_unshareable_in_class);
}
void restore_unshareable_in_class(Klass* k, TRAPS) {
k->restore_unshareable_info(CHECK);
static void restore_unshareable_in_class(Klass* k, TRAPS) {
// Array classes have null protection domain.
// --> see ArrayKlass::complete_create_array_klass()
k->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
}
void InstanceKlass::restore_unshareable_info(TRAPS) {
Klass::restore_unshareable_info(CHECK);
void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
Klass::restore_unshareable_info(loader_data, protection_domain, CHECK);
instanceKlassHandle ik(THREAD, this);
Array<Method*>* methods = ik->methods();
@ -2334,6 +2336,38 @@ void InstanceKlass::restore_unshareable_info(TRAPS) {
ik->array_klasses_do(restore_unshareable_in_class, CHECK);
}
// returns true IFF is_in_error_state() has been changed as a result of this call.
bool InstanceKlass::check_sharing_error_state() {
assert(DumpSharedSpaces, "should only be called during dumping");
bool old_state = is_in_error_state();
if (!is_in_error_state()) {
bool bad = false;
for (InstanceKlass* sup = java_super(); sup; sup = sup->java_super()) {
if (sup->is_in_error_state()) {
bad = true;
break;
}
}
if (!bad) {
Array<Klass*>* interfaces = transitive_interfaces();
for (int i = 0; i < interfaces->length(); i++) {
Klass* iface = interfaces->at(i);
if (InstanceKlass::cast(iface)->is_in_error_state()) {
bad = true;
break;
}
}
}
if (bad) {
set_in_error_state();
}
}
return (old_state != is_in_error_state());
}
static void clear_all_breakpoints(Method* m) {
m->clear_all_breakpoints();
}

View File

@ -980,6 +980,13 @@ class InstanceKlass: public Klass {
u2 idnum_allocated_count() const { return _idnum_allocated_count; }
public:
void set_in_error_state() {
assert(DumpSharedSpaces, "only call this when dumping archive");
_init_state = initialization_error;
}
bool check_sharing_error_state();
private:
// initialization state
#ifdef ASSERT
@ -1038,7 +1045,7 @@ private:
public:
// CDS support - remove and restore oops from metadata. Oops are not shared.
virtual void remove_unshareable_info();
virtual void restore_unshareable_info(TRAPS);
virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
// jvm support
jint compute_modifier_flags(TRAPS) const;

View File

@ -184,6 +184,7 @@ Klass::Klass() {
// The klass doesn't have any references at this point.
clear_modified_oops();
clear_accumulated_modified_oops();
_shared_class_path_index = -1;
}
jint Klass::array_layout_helper(BasicType etype) {
@ -500,13 +501,12 @@ void Klass::remove_unshareable_info() {
set_class_loader_data(NULL);
}
void Klass::restore_unshareable_info(TRAPS) {
void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
TRACE_INIT_ID(this);
// If an exception happened during CDS restore, some of these fields may already be
// set. We leave the class on the CLD list, even if incomplete so that we don't
// modify the CLD list outside a safepoint.
if (class_loader_data() == NULL) {
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
// Restore class_loader_data to the null class loader data
set_class_loader_data(loader_data);
@ -515,12 +515,12 @@ void Klass::restore_unshareable_info(TRAPS) {
loader_data->add_class(this);
}
// Recreate the class mirror. The protection_domain is always null for
// boot loader, for now.
// Recreate the class mirror.
// Only recreate it if not present. A previous attempt to restore may have
// gotten an OOM later but keep the mirror if it was created.
if (java_mirror() == NULL) {
java_lang_Class::create_mirror(this, Handle(NULL), Handle(NULL), CHECK);
Handle loader = loader_data->class_loader();
java_lang_Class::create_mirror(this, loader, protection_domain, CHECK);
}
}

View File

@ -147,6 +147,16 @@ class Klass : public Metadata {
jbyte _modified_oops; // Card Table Equivalent (YC/CMS support)
jbyte _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
private:
// This is an index into FileMapHeader::_classpath_entry_table[], to
// associate this class with the JAR file where it's loaded from during
// dump time. If a class is not loaded from the shared archive, this field is
// -1.
jshort _shared_class_path_index;
friend class SharedClassUtil;
protected:
// Constructor
Klass();
@ -253,6 +263,15 @@ class Klass : public Metadata {
void clear_accumulated_modified_oops() { _accumulated_modified_oops = 0; }
bool has_accumulated_modified_oops() { return _accumulated_modified_oops == 1; }
int shared_classpath_index() const {
return _shared_class_path_index;
};
void set_shared_classpath_index(int index) {
_shared_class_path_index = index;
};
protected: // internal accessors
void set_subklass(Klass* s);
void set_next_sibling(Klass* s);
@ -422,7 +441,7 @@ class Klass : public Metadata {
public:
// CDS support - remove and restore oops from metadata. Oops are not shared.
virtual void remove_unshareable_info();
virtual void restore_unshareable_info(TRAPS);
virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
protected:
// computes the subtype relationship

Some files were not shown because too many files have changed in this diff Show More