Merge
This commit is contained in:
commit
a6bd52da93
.hgtags
make
src/hotspot
cpu
aarch64
ppc
s390
s390.adtemplateTable_s390.cppvm_version_ext_s390.cppvm_version_ext_s390.hppvm_version_s390.cppvm_version_s390.hpp
x86
os
aix
bsd
linux
posix
solaris
windows
os_cpu/linux_x86/gc/z
zBackingFile_linux_x86.cppzBackingFile_linux_x86.hppzBackingPath_linux_x86.cppzBackingPath_linux_x86.hppzPhysicalMemoryBacking_linux_x86.cppzPhysicalMemoryBacking_linux_x86.hpp
share
aot
ci
classfile
classLoader.cppclassLoader.hppjavaClasses.cppjavaClasses.hppstringTable.cppsystemDictionary.hppvmSymbols.hpp
code
compiler
gc
epsilon
g1
shared
barrierSet.hppoopStorage.cppoopStorage.hppoopStorage.inline.hppoopStorageParState.hppreferenceProcessor.cppreferenceProcessorPhaseTimes.cppspace.inline.hpptaskqueue.hpptaskqueue.inline.hpp
z
include
interpreter
jfr
memory
oops
opto
gcm.cppifnode.cpploopPredicate.cpploopTransform.cpploopnode.cpploopnode.hpploopopts.cppmemnode.cppsuperword.cpp
prims
8
.hgtags
8
.hgtags
@ -490,9 +490,9 @@ a11c1cb542bbd1671d25b85efe7d09b983c48525 jdk-11+15
|
||||
02934b0d661b82b7fe1052a04998d2091352e08d jdk-11+16
|
||||
64e4b1686141e57a681936a8283983341484676e jdk-11+17
|
||||
e1b3def126240d5433902f3cb0e91a4c27f6db50 jdk-11+18
|
||||
fb8b3f4672774e15654958295558a1af1b576919 jdk-11+19
|
||||
fb8b3f4672774e15654958295558a1af1b576919 jdk-11+19
|
||||
36ca515343e00b021dcfc902e986d26ec994a2e5 jdk-11+19
|
||||
c9cd3ec6a0ebaf373b6ff0071c396cc657f7c4bd jdk-12+0
|
||||
c9cd3ec6a0ebaf373b6ff0071c396cc657f7c4bd jdk-12+0
|
||||
95aad0c785e497f1bade3955c4e4a677b629fa9d jdk-12+0
|
||||
9816d7cc655e53ba081f938b656e31971b8f097a jdk-11+20
|
||||
14708e1acdc3974f4539027cbbcfa6d69f83cf51 jdk-11+21
|
||||
00b16d0457e43d23f6ca5ade6b243edce62750a0 jdk-12+1
|
||||
69b438908512d3dfef5852c6a843a5778333a309 jdk-12+2
|
||||
|
@ -274,6 +274,8 @@ define SetupApiDocsGenerationBody
|
||||
$1_INDIRECT_EXPORTS := $$(call FindTransitiveIndirectDepsForModules, $$($1_MODULES))
|
||||
$1_ALL_MODULES := $$(sort $$($1_MODULES) $$($1_INDIRECT_EXPORTS))
|
||||
|
||||
$1_JAVA_ARGS := -Dextlink.spec.version=$$(VERSION_SPECIFICATION)
|
||||
|
||||
ifeq ($$(ENABLE_FULL_DOCS), true)
|
||||
# Tell the ModuleGraph taglet to generate html links to soon-to-be-created
|
||||
# png files with module graphs.
|
||||
@ -327,9 +329,10 @@ define SetupApiDocsGenerationBody
|
||||
)
|
||||
|
||||
ifeq ($$($1_JAVADOC_CMD), )
|
||||
$1_JAVADOC_CMD := $$(JAVA) -Djava.awt.headless=true \
|
||||
-Dextlink.spec.version=$$(VERSION_SPECIFICATION) $$($1_JAVA_ARGS) \
|
||||
$1_JAVADOC_CMD := $$(JAVA) -Djava.awt.headless=true $$($1_JAVA_ARGS) \
|
||||
$$(NEW_JAVADOC)
|
||||
else
|
||||
$1_OPTIONS += $$(addprefix -J, $$($1_JAVA_ARGS))
|
||||
endif
|
||||
|
||||
$1_VARDEPS := $$($1_JAVA_ARGS) $$($1_OPTIONS) $$(MODULES_SOURCE_PATH) \
|
||||
@ -463,7 +466,9 @@ $(eval $(call SetupApiDocsGeneration, JAVASE_API, \
|
||||
# Setup generation of the reference Java SE API documentation (javadoc + modulegraph)
|
||||
|
||||
# The reference javadoc is just the same as javase, but using the BootJDK javadoc
|
||||
# and a stable set of javadoc options.
|
||||
# and a stable set of javadoc options. Typically it is used for generating
|
||||
# diffs between the reference javadoc and a javadoc bundle of a specific build
|
||||
# generated in the same way.
|
||||
|
||||
$(eval $(call SetupApiDocsGeneration, REFERENCE_API, \
|
||||
MODULES := $(JAVASE_MODULES), \
|
||||
@ -497,10 +502,9 @@ $(eval $(call SetupCopyFiles, COPY_GLOBAL_RESOURCES, \
|
||||
JDK_INDEX_TARGETS += $(COPY_GLOBAL_RESOURCES)
|
||||
|
||||
# Copy the legal notices distributed with the docs bundle
|
||||
DOCS_LEGAL_NOTICES := jquery.md jszip.md pako.md
|
||||
$(eval $(call SetupCopyFiles, COPY_DOCS_LEGAL_NOTICES, \
|
||||
SRC := $(TOPDIR)/src/jdk.javadoc/share/legal, \
|
||||
FILES := $(DOCS_LEGAL_NOTICES), \
|
||||
FILES := $(wildcard $(TOPDIR)/src/jdk.javadoc/share/legal/*), \
|
||||
DEST := $(DOCS_OUTPUTDIR)/legal, \
|
||||
))
|
||||
JDK_INDEX_TARGETS += $(COPY_DOCS_LEGAL_NOTICES)
|
||||
|
@ -487,6 +487,14 @@ test-image-jdk-jtreg-native:
|
||||
+($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) -f JtregNativeJdk.gmk \
|
||||
test-image-jdk-jtreg-native)
|
||||
|
||||
build-test-hotspot-jtreg-graal:
|
||||
+($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) -f JtregGraalUnit.gmk \
|
||||
build-test-hotspot-jtreg-graal)
|
||||
|
||||
test-image-hotspot-jtreg-graal:
|
||||
+($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) -f JtregGraalUnit.gmk \
|
||||
test-image-hotspot-jtreg-graal)
|
||||
|
||||
run-test:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f RunTests.gmk run-test TEST="$(TEST)")
|
||||
|
||||
@ -523,6 +531,7 @@ ALL_TARGETS += prepare-test-image build-test-hotspot-jtreg-native \
|
||||
test-image-hotspot-jtreg-native build-test-jdk-jtreg-native \
|
||||
test-image-jdk-jtreg-native build-test-lib build-test-failure-handler \
|
||||
test-failure-handler test-image-failure-handler test-image-hotspot-gtest \
|
||||
test-image-hotspot-jtreg-graal build-test-hotspot-jtreg-graal \
|
||||
run-test exploded-run-test
|
||||
|
||||
################################################################################
|
||||
@ -853,10 +862,14 @@ else
|
||||
|
||||
build-test-jdk-jtreg-native: buildtools-jdk java.base-libs
|
||||
|
||||
build-test-hotspot-jtreg-graal: exploded-image-optimize
|
||||
|
||||
test-image-hotspot-jtreg-native: build-test-hotspot-jtreg-native
|
||||
|
||||
test-image-jdk-jtreg-native: build-test-jdk-jtreg-native
|
||||
|
||||
test-image-hotspot-jtreg-graal: build-test-hotspot-jtreg-graal
|
||||
|
||||
test-image-hotspot-gtest: hotspot
|
||||
|
||||
test-hotspot-internal: exploded-image
|
||||
@ -993,6 +1006,10 @@ docs-image: docs-jdk
|
||||
# If not already set, set the JVM specific targets to build the test image
|
||||
JVM_TEST_IMAGE_TARGETS ?= test-image-hotspot-jtreg-native test-image-hotspot-gtest
|
||||
|
||||
ifeq ($(INCLUDE_GRAAL), true)
|
||||
JVM_TEST_IMAGE_TARGETS += test-image-hotspot-jtreg-graal
|
||||
endif
|
||||
|
||||
# This target builds the test image
|
||||
test-image: prepare-test-image \
|
||||
test-image-jdk-jtreg-native test-image-failure-handler \
|
||||
|
@ -535,6 +535,8 @@ define SetupRunJtregTestBody
|
||||
$1_JTREG_BASIC_OPTIONS += -cpa:$$(JIB_JAR)
|
||||
endif
|
||||
|
||||
$1_JTREG_BASIC_OPTIONS += -e:TEST_IMAGE_GRAAL_DIR=${TEST_IMAGE_DIR}/hotspot/jtreg/graal
|
||||
|
||||
ifneq ($$(JTREG_FAILURE_HANDLER_OPTIONS), )
|
||||
$1_JTREG_LAUNCHER_OPTIONS += -Djava.library.path="$(JTREG_FAILURE_HANDLER_DIR)"
|
||||
endif
|
||||
|
@ -201,8 +201,6 @@ AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_AOT],
|
||||
ENABLE_AOT="true"
|
||||
elif test "x$enable_aot" = "xno"; then
|
||||
ENABLE_AOT="false"
|
||||
AC_MSG_CHECKING([if aot should be enabled])
|
||||
AC_MSG_RESULT([no, forced])
|
||||
else
|
||||
AC_MSG_ERROR([Invalid value for --enable-aot: $enable_aot])
|
||||
fi
|
||||
@ -228,7 +226,7 @@ AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_AOT],
|
||||
else
|
||||
ENABLE_AOT="false"
|
||||
if test "x$enable_aot" = "xyes"; then
|
||||
AC_MSG_ERROR([AOT is currently only supported on x86_64. Remove --enable-aot.])
|
||||
AC_MSG_ERROR([AOT is currently only supported on x86_64 and aarch64. Remove --enable-aot.])
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -374,57 +372,106 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
|
||||
fi
|
||||
fi
|
||||
|
||||
# Only enable jvmci on x86_64, sparcv9 and aarch64.
|
||||
if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xsparcv9" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
|
||||
JVM_FEATURES_jvmci="jvmci"
|
||||
else
|
||||
AC_MSG_CHECKING([if jvmci module jdk.internal.vm.ci should be built])
|
||||
# Check if jvmci is diabled
|
||||
DISABLE_JVMCI=`$ECHO $DISABLED_JVM_FEATURES | $GREP jvmci`
|
||||
if test "x$DISABLE_JVMCI" = "xjvmci"; then
|
||||
AC_MSG_RESULT([no, forced])
|
||||
JVM_FEATURES_jvmci=""
|
||||
INCLUDE_JVMCI="false"
|
||||
else
|
||||
# Only enable jvmci on x86_64, sparcv9 and aarch64
|
||||
if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xsparcv9" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
|
||||
AC_MSG_RESULT([yes])
|
||||
JVM_FEATURES_jvmci="jvmci"
|
||||
INCLUDE_JVMCI="true"
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
JVM_FEATURES_jvmci=""
|
||||
INCLUDE_JVMCI="false"
|
||||
if HOTSPOT_CHECK_JVM_FEATURE(jvmci); then
|
||||
AC_MSG_ERROR([JVMCI is currently not supported on this platform.])
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_MSG_CHECKING([if jdk.internal.vm.compiler should be built])
|
||||
if HOTSPOT_CHECK_JVM_FEATURE(graal); then
|
||||
AC_MSG_RESULT([yes, forced])
|
||||
if test "x$JVM_FEATURES_jvmci" != "xjvmci" ; then
|
||||
AC_MSG_ERROR([Specified JVM feature 'graal' requires feature 'jvmci'])
|
||||
fi
|
||||
INCLUDE_GRAAL="true"
|
||||
AC_SUBST(INCLUDE_JVMCI)
|
||||
|
||||
AC_MSG_CHECKING([if graal module jdk.internal.vm.compiler should be built])
|
||||
# Check if graal is diabled
|
||||
DISABLE_GRAAL=`$ECHO $DISABLED_JVM_FEATURES | $GREP graal`
|
||||
if test "x$DISABLE_GRAAL" = "xgraal"; then
|
||||
AC_MSG_RESULT([no, forced])
|
||||
JVM_FEATURES_graal=""
|
||||
INCLUDE_GRAAL="false"
|
||||
else
|
||||
# By default enable graal build on x64 or where AOT is available.
|
||||
# graal build requires jvmci.
|
||||
if test "x$JVM_FEATURES_jvmci" = "xjvmci" && \
|
||||
(test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
|
||||
test "x$ENABLE_AOT" = "xtrue") ; then
|
||||
AC_MSG_RESULT([yes])
|
||||
if HOTSPOT_CHECK_JVM_FEATURE(graal); then
|
||||
AC_MSG_RESULT([yes, forced])
|
||||
if test "x$JVM_FEATURES_jvmci" != "xjvmci" ; then
|
||||
AC_MSG_ERROR([Specified JVM feature 'graal' requires feature 'jvmci'])
|
||||
fi
|
||||
JVM_FEATURES_graal="graal"
|
||||
INCLUDE_GRAAL="true"
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
JVM_FEATURES_graal=""
|
||||
INCLUDE_GRAAL="false"
|
||||
# By default enable graal build on x64 or where AOT is available.
|
||||
# graal build requires jvmci.
|
||||
if test "x$JVM_FEATURES_jvmci" = "xjvmci" && \
|
||||
(test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
|
||||
test "x$ENABLE_AOT" = "xtrue") ; then
|
||||
AC_MSG_RESULT([yes])
|
||||
JVM_FEATURES_graal="graal"
|
||||
INCLUDE_GRAAL="true"
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
JVM_FEATURES_graal=""
|
||||
INCLUDE_GRAAL="false"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_SUBST(INCLUDE_GRAAL)
|
||||
|
||||
# Disable aot with '--with-jvm-features=-aot'
|
||||
DISABLE_AOT=`$ECHO $DISABLED_JVM_FEATURES | $GREP aot`
|
||||
if test "x$DISABLE_AOT" = "xaot"; then
|
||||
ENABLE_AOT="false"
|
||||
fi
|
||||
|
||||
AC_MSG_CHECKING([if aot should be enabled])
|
||||
if test "x$ENABLE_AOT" = "xtrue"; then
|
||||
if test "x$enable_aot" = "xyes"; then
|
||||
AC_MSG_RESULT([yes, forced])
|
||||
if test "x$JVM_FEATURES_graal" != "xgraal"; then
|
||||
if test "x$enable_aot" = "xyes" || HOTSPOT_CHECK_JVM_FEATURE(aot); then
|
||||
AC_MSG_RESULT([yes, forced])
|
||||
AC_MSG_ERROR([Specified JVM feature 'aot' requires feature 'graal'])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
JVM_FEATURES_aot=""
|
||||
ENABLE_AOT="false"
|
||||
else
|
||||
AC_MSG_RESULT([yes])
|
||||
if test "x$enable_aot" = "xyes" || HOTSPOT_CHECK_JVM_FEATURE(aot); then
|
||||
AC_MSG_RESULT([yes, forced])
|
||||
else
|
||||
AC_MSG_RESULT([yes])
|
||||
fi
|
||||
JVM_FEATURES_aot="aot"
|
||||
fi
|
||||
JVM_FEATURES_aot="aot"
|
||||
else
|
||||
if test "x$enable_aot" = "xno"; then
|
||||
if test "x$enable_aot" = "xno" || "x$DISABLE_AOT" = "xaot"; then
|
||||
AC_MSG_RESULT([no, forced])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
JVM_FEATURES_aot=""
|
||||
if HOTSPOT_CHECK_JVM_FEATURE(aot); then
|
||||
AC_MSG_ERROR([To enable aot, you must use --enable-aot])
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_SUBST(ENABLE_AOT)
|
||||
|
||||
if test "x$OPENJDK_TARGET_CPU" = xarm ; then
|
||||
# Default to use link time optimizations on minimal on arm
|
||||
JVM_FEATURES_link_time_opt="link-time-opt"
|
||||
|
57
make/autoconf/lib-tests.m4
Normal file
57
make/autoconf/lib-tests.m4
Normal file
@ -0,0 +1,57 @@
|
||||
#
|
||||
# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Check for graalunit libs, needed for running graalunit tests.
|
||||
#
|
||||
AC_DEFUN_ONCE([LIB_TESTS_SETUP_GRAALUNIT],
|
||||
[
|
||||
AC_ARG_WITH(graalunit-lib, [AS_HELP_STRING([--with-graalunit-lib],
|
||||
[specify location of 3rd party libraries used by Graal unit tests])])
|
||||
|
||||
GRAALUNIT_LIB=
|
||||
if test "x${with_graalunit_lib}" != x; then
|
||||
AC_MSG_CHECKING([for graalunit libs])
|
||||
if test "x${with_graalunit_lib}" = xno; then
|
||||
AC_MSG_RESULT([disabled, graalunit tests can not be run])
|
||||
elif test "x${with_graalunit_lib}" = xyes; then
|
||||
AC_MSG_RESULT([not specified])
|
||||
AC_MSG_ERROR([You must specify the path to 3rd party libraries used by Graal unit tests])
|
||||
else
|
||||
GRAALUNIT_LIB="${with_graalunit_lib}"
|
||||
if test ! -d "${GRAALUNIT_LIB}"; then
|
||||
AC_MSG_RESULT([no])
|
||||
AC_MSG_ERROR([Could not find graalunit 3rd party libraries as specified. (${with_graalunit_lib})])
|
||||
else
|
||||
AC_MSG_RESULT([$GRAALUNIT_LIB])
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
BASIC_FIXUP_PATH([GRAALUNIT_LIB])
|
||||
AC_SUBST(GRAALUNIT_LIB)
|
||||
])
|
||||
|
@ -32,6 +32,7 @@ m4_include([lib-freetype.m4])
|
||||
m4_include([lib-std.m4])
|
||||
m4_include([lib-x11.m4])
|
||||
m4_include([lib-fontconfig.m4])
|
||||
m4_include([lib-tests.m4])
|
||||
|
||||
################################################################################
|
||||
# Determine which libraries are needed for this configuration
|
||||
@ -101,6 +102,7 @@ AC_DEFUN_ONCE([LIB_SETUP_LIBRARIES],
|
||||
LIB_SETUP_BUNDLED_LIBS
|
||||
LIB_SETUP_MISC_LIBS
|
||||
LIB_SETUP_SOLARIS_STLPORT
|
||||
LIB_TESTS_SETUP_GRAALUNIT
|
||||
|
||||
if test "x$TOOLCHAIN_TYPE" = xsolstudio; then
|
||||
GLOBAL_LIBS="-lc"
|
||||
|
@ -347,6 +347,7 @@ LIBFFI_LIBS:=@LIBFFI_LIBS@
|
||||
LIBFFI_CFLAGS:=@LIBFFI_CFLAGS@
|
||||
ENABLE_LIBFFI_BUNDLING:=@ENABLE_LIBFFI_BUNDLING@
|
||||
LIBFFI_LIB_FILE:=@LIBFFI_LIB_FILE@
|
||||
GRAALUNIT_LIB := @GRAALUNIT_LIB@
|
||||
|
||||
PACKAGE_PATH=@PACKAGE_PATH@
|
||||
|
||||
@ -813,6 +814,7 @@ PNG_CFLAGS:=@PNG_CFLAGS@
|
||||
|
||||
INCLUDE_SA=@INCLUDE_SA@
|
||||
INCLUDE_GRAAL=@INCLUDE_GRAAL@
|
||||
INCLUDE_JVMCI=@INCLUDE_JVMCI@
|
||||
|
||||
OS_VERSION_MAJOR:=@OS_VERSION_MAJOR@
|
||||
OS_VERSION_MINOR:=@OS_VERSION_MINOR@
|
||||
|
@ -205,7 +205,14 @@ ifeq ($(INCLUDE_SA), false)
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
# Filter out Graal specific modules if Graal build is disabled
|
||||
# Filter out jvmci specific modules if jvmci is disabled
|
||||
|
||||
ifeq ($(INCLUDE_JVMCI), false)
|
||||
MODULES_FILTER += jdk.internal.vm.ci
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
# Filter out Graal specific modules if Graal is disabled
|
||||
|
||||
ifeq ($(INCLUDE_GRAAL), false)
|
||||
MODULES_FILTER += jdk.internal.vm.compiler
|
||||
|
@ -239,7 +239,7 @@ var getJibProfilesCommon = function (input, data) {
|
||||
|
||||
// These are the base setttings for all the main build profiles.
|
||||
common.main_profile_base = {
|
||||
dependencies: ["boot_jdk", "gnumake", "jtreg", "jib"],
|
||||
dependencies: ["boot_jdk", "gnumake", "jtreg", "jib", "autoconf"],
|
||||
default_make_targets: ["product-bundles", "test-bundles"],
|
||||
configure_args: concat(["--enable-jtreg-failure-handler"],
|
||||
"--with-exclude-translations=de,es,fr,it,ko,pt_BR,sv,ca,tr,cs,sk,ja_JP_A,ja_JP_HA,ja_JP_HI,ja_JP_I",
|
||||
@ -378,7 +378,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"linux-x64": {
|
||||
target_os: "linux",
|
||||
target_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "graphviz", "pandoc"],
|
||||
dependencies: ["devkit", "graphviz", "pandoc", "graalunit_lib"],
|
||||
configure_args: concat(common.configure_args_64bit,
|
||||
"--enable-full-docs", "--with-zlib=system"),
|
||||
default_make_targets: ["docs-bundles"],
|
||||
@ -388,7 +388,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "linux",
|
||||
target_cpu: "x86",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf"],
|
||||
dependencies: ["devkit"],
|
||||
configure_args: concat(common.configure_args_32bit,
|
||||
"--with-jvm-variants=minimal,server", "--with-zlib=system"),
|
||||
},
|
||||
@ -396,7 +396,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"macosx-x64": {
|
||||
target_os: "macosx",
|
||||
target_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf"],
|
||||
dependencies: ["devkit", "graalunit_lib"],
|
||||
configure_args: concat(common.configure_args_64bit, "--with-zlib=system",
|
||||
"--with-macosx-version-max=10.9.0"),
|
||||
},
|
||||
@ -404,7 +404,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"solaris-x64": {
|
||||
target_os: "solaris",
|
||||
target_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "cups"],
|
||||
dependencies: ["devkit", "cups"],
|
||||
configure_args: concat(common.configure_args_64bit,
|
||||
"--with-zlib=system", "--enable-dtrace"),
|
||||
},
|
||||
@ -412,7 +412,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"solaris-sparcv9": {
|
||||
target_os: "solaris",
|
||||
target_cpu: "sparcv9",
|
||||
dependencies: ["devkit", "autoconf", "cups"],
|
||||
dependencies: ["devkit", "cups"],
|
||||
configure_args: concat(common.configure_args_64bit,
|
||||
"--with-zlib=system", "--enable-dtrace"),
|
||||
},
|
||||
@ -420,7 +420,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"windows-x64": {
|
||||
target_os: "windows",
|
||||
target_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf"],
|
||||
dependencies: ["devkit", "graalunit_lib"],
|
||||
configure_args: concat(common.configure_args_64bit),
|
||||
},
|
||||
|
||||
@ -428,7 +428,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "windows",
|
||||
target_cpu: "x86",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf"],
|
||||
dependencies: ["devkit"],
|
||||
configure_args: concat(common.configure_args_32bit),
|
||||
},
|
||||
|
||||
@ -436,7 +436,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "linux",
|
||||
target_cpu: "aarch64",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "build_devkit", "cups"],
|
||||
dependencies: ["devkit", "build_devkit", "cups"],
|
||||
configure_args: [
|
||||
"--openjdk-target=aarch64-linux-gnu", "--with-freetype=bundled",
|
||||
"--disable-warnings-as-errors", "--with-cpu-port=aarch64",
|
||||
@ -447,7 +447,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "linux",
|
||||
target_cpu: "aarch64",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "build_devkit", "cups", "headless_stubs"],
|
||||
dependencies: ["devkit", "build_devkit", "cups", "headless_stubs"],
|
||||
configure_args: [
|
||||
"--with-cpu-port=arm64",
|
||||
"--with-jvm-variants=server",
|
||||
@ -460,7 +460,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "linux",
|
||||
target_cpu: "arm",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "build_devkit", "cups"],
|
||||
dependencies: ["devkit", "build_devkit", "cups"],
|
||||
configure_args: [
|
||||
"--openjdk-target=arm-linux-gnueabihf", "--with-freetype=bundled",
|
||||
"--with-abi-profile=arm-vfp-hflt", "--disable-warnings-as-errors"
|
||||
@ -471,7 +471,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "linux",
|
||||
target_cpu: "arm",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "build_devkit", "cups"],
|
||||
dependencies: ["devkit", "build_devkit", "cups"],
|
||||
configure_args: [
|
||||
"--with-jvm-variants=minimal1,client",
|
||||
"--with-x=" + input.get("devkit", "install_path") + "/arm-linux-gnueabihf/libc/usr/X11R6-PI",
|
||||
@ -972,6 +972,14 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
configure_args: "",
|
||||
},
|
||||
|
||||
graalunit_lib: {
|
||||
organization: common.organization,
|
||||
ext: "zip",
|
||||
revision: "619_Apr_12_2018",
|
||||
module: "graalunit-lib",
|
||||
configure_args: "--with-graalunit-lib=" + input.get("graalunit_lib", "install_path"),
|
||||
environment_name: "GRAALUNIT_LIB"
|
||||
},
|
||||
};
|
||||
|
||||
// Need to add a value for the Visual Studio tools variable to make
|
||||
|
@ -1,2 +1,2 @@
|
||||
Github: https://raw.githubusercontent.com/publicsuffix/list/2225db8d9f4a2a27ec697c883360632fa0c16261/public_suffix_list.dat
|
||||
Date: 2018-05-09
|
||||
Date: 2018-05-24
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -109,6 +109,7 @@ ifneq ($(call check-jvm-feature, cds), true)
|
||||
classListParser.cpp \
|
||||
classLoaderExt.cpp \
|
||||
filemap.cpp \
|
||||
heapShared.cpp \
|
||||
metaspaceShared.cpp \
|
||||
metaspaceShared_$(HOTSPOT_TARGET_CPU).cpp \
|
||||
metaspaceShared_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
|
||||
|
@ -136,6 +136,7 @@ JVM_IHashCode
|
||||
JVM_InitProperties
|
||||
JVM_InitStackTraceElement
|
||||
JVM_InitStackTraceElementArray
|
||||
JVM_InitializeFromArchive
|
||||
JVM_InternString
|
||||
JVM_Interrupt
|
||||
JVM_InvokeMethod
|
||||
|
@ -1385,26 +1385,22 @@ if [ "$SKIP_DEFAULT" != "true" ]; then
|
||||
echo " $OTHER_SPARKLE_DIR"
|
||||
fi
|
||||
|
||||
if [ -d "$OTHER/images" ]; then
|
||||
OTHER_SEC_DIR="$OTHER/images"
|
||||
else
|
||||
OTHER_SEC_DIR="$OTHER/tmp"
|
||||
fi
|
||||
if [ -f "$THIS_SEC_DIR/sec-bin.zip" ]; then
|
||||
THIS_SEC_DIR="$THIS/images"
|
||||
OTHER_SEC_DIR="$OTHER/images"
|
||||
if [ -f "$THIS_SEC_DIR/sec-bin.zip" ] && [ -f "$OTHER_SEC_DIR/sec-bin.zip" ]; then
|
||||
OTHER_SEC_BIN="$OTHER_SEC_DIR/sec-bin.zip"
|
||||
THIS_SEC_DIR="$THIS/images"
|
||||
THIS_SEC_BIN="$THIS_SEC_DIR/sec-bin.zip"
|
||||
fi
|
||||
if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
|
||||
if [ "$OPENJDK_TARGET_CPU" = "x86_64" ]; then
|
||||
JGSS_WINDOWS_BIN="jgss-windows-x64-bin.zip"
|
||||
else
|
||||
JGSS_WINDOWS_BIN="jgss-windows-i586-bin.zip"
|
||||
if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
|
||||
if [ "$OPENJDK_TARGET_CPU" = "x86_64" ]; then
|
||||
JGSS_WINDOWS_BIN="jgss-windows-x64-bin.zip"
|
||||
else
|
||||
JGSS_WINDOWS_BIN="jgss-windows-i586-bin.zip"
|
||||
fi
|
||||
OTHER_SEC_WINDOWS_BIN="$OTHER_SEC_DIR/sec-windows-bin.zip"
|
||||
OTHER_JGSS_WINDOWS_BIN="$OTHER_SEC_DIR/$JGSS_WINDOWS_BIN"
|
||||
THIS_SEC_WINDOWS_BIN="$THIS_SEC_DIR/sec-windows-bin.zip"
|
||||
THIS_JGSS_WINDOWS_BIN="$THIS_SEC_DIR/$JGSS_WINDOWS_BIN"
|
||||
fi
|
||||
OTHER_SEC_WINDOWS_BIN="$OTHER_SEC_DIR/sec-windows-bin.zip"
|
||||
OTHER_JGSS_WINDOWS_BIN="$OTHER_SEC_DIR/$JGSS_WINDOWS_BIN"
|
||||
THIS_SEC_WINDOWS_BIN="$THIS_SEC_DIR/sec-windows-bin.zip"
|
||||
THIS_JGSS_WINDOWS_BIN="$THIS_SEC_DIR/$JGSS_WINDOWS_BIN"
|
||||
fi
|
||||
|
||||
if [ -d "$THIS/images/docs" ] && [ -d "$OTHER/images/docs" ]; then
|
||||
|
144
make/test/JtregGraalUnit.gmk
Normal file
144
make/test/JtregGraalUnit.gmk
Normal file
@ -0,0 +1,144 @@
|
||||
#
|
||||
# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
################################################################################
|
||||
# This file builds Graal component of the JTReg tests for Hotspot.
|
||||
# It also covers the test-image part, where the built files are copied to the
|
||||
# test image.
|
||||
################################################################################
|
||||
|
||||
default: all
|
||||
|
||||
include $(SPEC)
|
||||
include MakeBase.gmk
|
||||
include SetupJavaCompilers.gmk
|
||||
|
||||
TARGETS_BUILD :=
|
||||
TARGETS_IMAGE :=
|
||||
TARGETS_EXTRA_LIB :=
|
||||
|
||||
ifeq ($(INCLUDE_GRAAL), true)
|
||||
ifneq ($(GRAALUNIT_LIB), )
|
||||
SRC_DIR := $(TOPDIR)/src/jdk.internal.vm.compiler/share/classes
|
||||
TEST_DIR := $(TOPDIR)/test/hotspot/jtreg/compiler/graalunit
|
||||
COMPILE_OUTPUTDIR := $(SUPPORT_OUTPUTDIR)/test/graalunit
|
||||
LIB_OUTPUTDIR := $(TEST_IMAGE_DIR)/hotspot/jtreg/graal
|
||||
|
||||
### Copy 3rd party libs
|
||||
$(eval $(call SetupCopyFiles, COPY_GRAALUNIT_LIBS, \
|
||||
FILES := $(wildcard $(GRAALUNIT_LIB)/*.jar), \
|
||||
DEST := $(LIB_OUTPUTDIR), \
|
||||
))
|
||||
|
||||
TARGETS_EXTRA_LIB += $(COPY_GRAALUNIT_LIBS)
|
||||
|
||||
### Compile and build graalunit tests
|
||||
$(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_TESTS, \
|
||||
SETUP := GENERATE_USINGJDKBYTECODE, \
|
||||
SRC := \
|
||||
$(SRC_DIR)/jdk.internal.vm.compiler.collections.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.api.directives.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.api.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.asm.aarch64.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.asm.amd64.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.asm.sparc.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.asm.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.core.amd64.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.core.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.debug.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.graph.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.hotspot.amd64.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.hotspot.lir.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.hotspot.sparc.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.hotspot.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.lir.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.loop.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.nodes.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.options.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.phases.common.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.replacements.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.test/src \
|
||||
$(SRC_DIR)/org.graalvm.util.test/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.jtt/src \
|
||||
$(SRC_DIR)/org.graalvm.compiler.lir.jtt/src \
|
||||
, \
|
||||
BIN := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \
|
||||
JAR := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests.jar, \
|
||||
CLASSPATH := \
|
||||
$(JDK_OUTPUTDIR)/modules/jdk.internal.vm.compiler \
|
||||
$(JDK_OUTPUTDIR)/modules/jdk.internal.vm.ci \
|
||||
$(LIB_OUTPUTDIR)/junit-4.12.jar \
|
||||
$(LIB_OUTPUTDIR)/asm-5.0.4.jar \
|
||||
$(LIB_OUTPUTDIR)/asm-tree-5.0.4.jar \
|
||||
$(LIB_OUTPUTDIR)/java-allocation-instrumenter.jar \
|
||||
$(LIB_OUTPUTDIR)/hamcrest-core-1.3.jar \
|
||||
, \
|
||||
ADD_JAVAC_FLAGS := \
|
||||
-Xlint:none -processorpath \
|
||||
$(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier.jar \
|
||||
--add-exports jdk.unsupported/sun.misc=ALL-UNNAMED \
|
||||
, \
|
||||
))
|
||||
|
||||
TARGETS_BUILD += $(BUILD_VM_COMPILER_TESTS)
|
||||
|
||||
|
||||
### Compile and build mxtool
|
||||
$(eval $(call SetupJavaCompilation, BUILD_MXTOOL, \
|
||||
SETUP := GENERATE_USINGJDKBYTECODE, \
|
||||
SRC := $(TEST_DIR)/com.oracle.mxtool.junit, \
|
||||
BIN := $(COMPILE_OUTPUTDIR)/com.oracle.mxtool.junit, \
|
||||
JAR := $(COMPILE_OUTPUTDIR)/com.oracle.mxtool.junit.jar, \
|
||||
CLASSPATH := $(LIB_OUTPUTDIR)/junit-4.12.jar, \
|
||||
))
|
||||
|
||||
TARGETS_BUILD += $(BUILD_MXTOOL)
|
||||
|
||||
|
||||
################################################################################
|
||||
# Targets for building test-image.
|
||||
################################################################################
|
||||
|
||||
# Copy to hotspot jtreg test image
|
||||
$(eval $(call SetupCopyFiles, COPY_HOTSPOT_JTREG_GRAAL, \
|
||||
SRC := $(COMPILE_OUTPUTDIR), \
|
||||
DEST := $(LIB_OUTPUTDIR), \
|
||||
FILES := jdk.vm.compiler.tests.jar com.oracle.mxtool.junit.jar, \
|
||||
))
|
||||
|
||||
TARGETS_IMAGE += $(COPY_HOTSPOT_JTREG_GRAAL)
|
||||
else
|
||||
$(info Skip building of Graal unit tests because 3rd party libraries directory is not specified)
|
||||
endif
|
||||
endif
|
||||
|
||||
$(TARGETS_BUILD): $(TARGETS_EXTRA_LIB)
|
||||
build-test-hotspot-jtreg-graal: $(TARGETS_BUILD)
|
||||
test-image-hotspot-jtreg-graal: $(TARGETS_IMAGE)
|
||||
|
||||
all: build-test-hotspot-jtreg-graal
|
||||
test-image: test-image-hotspot-jtreg-graal
|
||||
|
||||
.PHONY: default all build-test-hotspot-jtreg-graal test-image-hotspot-jtreg-graal test-image
|
@ -836,6 +836,10 @@ BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libVirtualMachine09agent00 := $(NSK_AOD_INC
|
||||
|
||||
################################################################################
|
||||
|
||||
ifeq ($(TOOLCHAIN_TYPE), solstudio)
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libji06t001 += -erroff=E_END_OF_LOOP_CODE_NOT_REACHED
|
||||
endif
|
||||
|
||||
# Platform specific setup
|
||||
ifneq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_ARCH), solaris-sparc)
|
||||
BUILD_HOTSPOT_JTREG_EXCLUDE += liboverflow.c exeThreadSignalMask.c
|
||||
@ -858,9 +862,13 @@ endif
|
||||
|
||||
BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exesigtest := -ljvm
|
||||
|
||||
ifeq ($(OPENJDK_TARGET_OS), solaris)
|
||||
BUILD_HOTSPOT_JTREG_EXCLUDE += libterminatedThread.c
|
||||
endif
|
||||
|
||||
ifeq ($(OPENJDK_TARGET_OS), windows)
|
||||
BUILD_HOTSPOT_JTREG_EXECUTABLES_CFLAGS_exeFPRegs := -MT
|
||||
BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c
|
||||
BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c libterminatedThread.c
|
||||
|
||||
else
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libbootclssearch_agent += -lpthread
|
||||
@ -1494,6 +1502,7 @@ else
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libvmdeath001 += -lpthread
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libgetphase001 += -lpthread
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libgetphase002 += -lpthread
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libterminatedThread += -lpthread
|
||||
endif
|
||||
|
||||
$(eval $(call SetupTestFilesCompilation, BUILD_HOTSPOT_JTREG_LIBRARIES, \
|
||||
|
@ -1471,7 +1471,7 @@ source %{
|
||||
// Ctl+Mem to a StoreB node (which does the actual card mark).
|
||||
//
|
||||
// n.b. a StoreCM node will only appear in this configuration when
|
||||
// using CMS. StoreCM differs from a normal card mark write (StoreB)
|
||||
// using CMS or G1. StoreCM differs from a normal card mark write (StoreB)
|
||||
// because it implies a requirement to order visibility of the card
|
||||
// mark (StoreCM) relative to the object put (StoreP/N) using a
|
||||
// StoreStore memory barrier (arguably this ought to be represented
|
||||
@ -1481,16 +1481,12 @@ source %{
|
||||
// the sequence
|
||||
//
|
||||
// dmb ishst
|
||||
// stlrb
|
||||
// strb
|
||||
//
|
||||
// However, in the case of a volatile put if we can recognise this
|
||||
// configuration and plant an stlr for the object write then we can
|
||||
// omit the dmb and just plant an strb since visibility of the stlr
|
||||
// is ordered before visibility of subsequent stores. StoreCM nodes
|
||||
// also arise when using G1 or using CMS with conditional card
|
||||
// marking. In these cases (as we shall see) we don't need to insert
|
||||
// the dmb when translating StoreCM because there is already an
|
||||
// intervening StoreLoad barrier between it and the StoreP/N.
|
||||
// However, when using G1 or CMS with conditional card marking (as
|
||||
// we shall see) we don't need to insert the dmb when translating
|
||||
// StoreCM because there is already an intervening StoreLoad barrier
|
||||
// between it and the StoreP/N.
|
||||
//
|
||||
// It is also possible to perform the card mark conditionally on it
|
||||
// currently being unmarked in which case the volatile put graph
|
||||
@ -2868,50 +2864,17 @@ bool unnecessary_storestore(const Node *storecm)
|
||||
{
|
||||
assert(storecm->Opcode() == Op_StoreCM, "expecting a StoreCM");
|
||||
|
||||
// we only ever need to generate a dmb ishst between an object put
|
||||
// and the associated card mark when we are using CMS without
|
||||
// conditional card marking
|
||||
// we need to generate a dmb ishst between an object put and the
|
||||
// associated card mark when we are using CMS without conditional
|
||||
// card marking
|
||||
|
||||
if (!UseConcMarkSweepGC || UseCondCardMark) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// if we are implementing volatile puts using barriers then the
|
||||
// object put is an str so we must insert the dmb ishst
|
||||
|
||||
if (UseBarriersForVolatile) {
|
||||
if (UseConcMarkSweepGC && !UseCondCardMark) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// we can omit the dmb ishst if this StoreCM is part of a volatile
|
||||
// put because in thta case the put will be implemented by stlr
|
||||
//
|
||||
// we need to check for a normal subgraph feeding this StoreCM.
|
||||
// that means the StoreCM must be fed Memory from a leading membar,
|
||||
// either a MemBarRelease or its dependent MemBarCPUOrder, and the
|
||||
// leading membar must be part of a normal subgraph
|
||||
// a storestore is unnecesary in all other cases
|
||||
|
||||
Node *x = storecm->in(StoreNode::Memory);
|
||||
|
||||
if (!x->is_Proj()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
x = x->in(0);
|
||||
|
||||
if (!x->is_MemBar()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
MemBarNode *leading = x->as_MemBar();
|
||||
|
||||
// reject invalid candidates
|
||||
if (!leading_membar(leading)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// we can omit the StoreStore if it is the head of a normal subgraph
|
||||
return (leading_to_normal(leading) != NULL);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
@ -25,19 +25,30 @@
|
||||
#include "jvm.h"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "vm_version_ext_ppc.hpp"
|
||||
|
||||
// VM_Version_Ext statics
|
||||
int VM_Version_Ext::_no_of_threads = 0;
|
||||
int VM_Version_Ext::_no_of_cores = 0;
|
||||
int VM_Version_Ext::_no_of_sockets = 0;
|
||||
bool VM_Version_Ext::_initialized = false;
|
||||
char VM_Version_Ext::_cpu_name[CPU_TYPE_DESC_BUF_SIZE] = {0};
|
||||
char VM_Version_Ext::_cpu_desc[CPU_DETAILED_DESC_BUF_SIZE] = {0};
|
||||
|
||||
// get cpu information.
|
||||
bool VM_Version_Ext::initialize_cpu_information(void) {
|
||||
// Not yet implemented.
|
||||
return false;
|
||||
void VM_Version_Ext::initialize_cpu_information(void) {
|
||||
// do nothing if cpu info has been initialized
|
||||
if (_initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
_no_of_cores = os::processor_count();
|
||||
_no_of_threads = _no_of_cores;
|
||||
_no_of_sockets = _no_of_cores;
|
||||
snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE, "PowerPC POWER%lu", PowerArchitecturePPC64);
|
||||
snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "PPC %s", features_string());
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
int VM_Version_Ext::number_of_threads(void) {
|
||||
@ -56,9 +67,7 @@ int VM_Version_Ext::number_of_sockets(void) {
|
||||
}
|
||||
|
||||
const char* VM_Version_Ext::cpu_name(void) {
|
||||
if (!initialize_cpu_information()) {
|
||||
return NULL;
|
||||
}
|
||||
initialize_cpu_information();
|
||||
char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing);
|
||||
if (NULL == tmp) {
|
||||
return NULL;
|
||||
@ -68,9 +77,7 @@ const char* VM_Version_Ext::cpu_name(void) {
|
||||
}
|
||||
|
||||
const char* VM_Version_Ext::cpu_description(void) {
|
||||
if (!initialize_cpu_information()) {
|
||||
return NULL;
|
||||
}
|
||||
initialize_cpu_information();
|
||||
char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing);
|
||||
if (NULL == tmp) {
|
||||
return NULL;
|
||||
|
@ -43,10 +43,11 @@ class VM_Version_Ext : public VM_Version {
|
||||
static int _no_of_threads;
|
||||
static int _no_of_cores;
|
||||
static int _no_of_sockets;
|
||||
static bool _initialized;
|
||||
static char _cpu_name[CPU_TYPE_DESC_BUF_SIZE];
|
||||
static char _cpu_desc[CPU_DETAILED_DESC_BUF_SIZE];
|
||||
|
||||
static bool initialize_cpu_information(void);
|
||||
static void initialize_cpu_information(void);
|
||||
|
||||
public:
|
||||
|
||||
|
@ -9839,7 +9839,7 @@ instruct partialSubtypeCheck(rarg1RegP index, rarg2RegP sub, rarg3RegP super, fl
|
||||
match(Set index (PartialSubtypeCheck sub super));
|
||||
effect(KILL pcc, KILL scratch1, KILL scratch2);
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
size(12);
|
||||
// TODO: s390 port size(FIXED_SIZE);
|
||||
format %{ " CALL PartialSubtypeCheck\n" %}
|
||||
ins_encode %{
|
||||
AddressLiteral stub_address(StubRoutines::zarch::partial_subtype_check());
|
||||
|
@ -3636,7 +3636,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
|
||||
NearLabel subtype, no_such_interface;
|
||||
|
||||
__ check_klass_subtype(klass, interface, Z_tmp_2, Z_tmp_3, subtype);
|
||||
__ check_klass_subtype(klass, interface, Z_tmp_2, flags/*scratch*/, subtype);
|
||||
// If we get here the typecheck failed
|
||||
__ z_bru(no_such_interface);
|
||||
__ bind(subtype);
|
||||
@ -3649,7 +3649,6 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ bind(notVFinal);
|
||||
|
||||
// Get receiver klass into klass - also a null check.
|
||||
__ restore_locals();
|
||||
__ load_klass(klass, receiver);
|
||||
|
||||
__ lookup_interface_method(klass, interface, noreg, noreg, /*temp*/Z_ARG1,
|
||||
@ -3680,7 +3679,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
// interpreter entry point and a conditional jump to it in case of a null
|
||||
// method.
|
||||
__ compareU64_and_branch(method2, (intptr_t) 0,
|
||||
Assembler::bcondZero, no_such_method);
|
||||
Assembler::bcondZero, no_such_method);
|
||||
|
||||
__ profile_arguments_type(Z_tmp_1, method2, Z_tmp_2, true);
|
||||
|
||||
@ -3695,8 +3694,6 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ bind(no_such_method);
|
||||
|
||||
// Throw exception.
|
||||
__ restore_bcp(); // Bcp must be correct for exception handler (was destroyed).
|
||||
__ restore_locals(); // Make sure locals pointer is correct as well (was destroyed).
|
||||
// Pass arguments for generating a verbose error message.
|
||||
__ z_lgr(Z_tmp_1, method); // Prevent register clash.
|
||||
__ call_VM(noreg,
|
||||
@ -3709,8 +3706,6 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ bind(no_such_interface);
|
||||
|
||||
// Throw exception.
|
||||
__ restore_bcp(); // Bcp must be correct for exception handler (was destroyed).
|
||||
__ restore_locals(); // Make sure locals pointer is correct as well (was destroyed).
|
||||
// Pass arguments for generating a verbose error message.
|
||||
__ call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address,
|
||||
|
@ -31,13 +31,23 @@
|
||||
int VM_Version_Ext::_no_of_threads = 0;
|
||||
int VM_Version_Ext::_no_of_cores = 0;
|
||||
int VM_Version_Ext::_no_of_sockets = 0;
|
||||
bool VM_Version_Ext::_initialized = false;
|
||||
char VM_Version_Ext::_cpu_name[CPU_TYPE_DESC_BUF_SIZE] = {0};
|
||||
char VM_Version_Ext::_cpu_desc[CPU_DETAILED_DESC_BUF_SIZE] = {0};
|
||||
|
||||
// get cpu information.
|
||||
bool VM_Version_Ext::initialize_cpu_information(void) {
|
||||
// Not yet implemented.
|
||||
return false;
|
||||
void VM_Version_Ext::initialize_cpu_information(void) {
|
||||
// do nothing if cpu info has been initialized
|
||||
if (_initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
_no_of_cores = os::processor_count();
|
||||
_no_of_threads = _no_of_cores;
|
||||
_no_of_sockets = _no_of_cores;
|
||||
snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE, "s390 %s", VM_Version::get_model_string());
|
||||
snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "zArch %s", features_string());
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
int VM_Version_Ext::number_of_threads(void) {
|
||||
@ -56,9 +66,7 @@ int VM_Version_Ext::number_of_sockets(void) {
|
||||
}
|
||||
|
||||
const char* VM_Version_Ext::cpu_name(void) {
|
||||
if (!initialize_cpu_information()) {
|
||||
return NULL;
|
||||
}
|
||||
initialize_cpu_information();
|
||||
char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing);
|
||||
if (NULL == tmp) {
|
||||
return NULL;
|
||||
@ -68,9 +76,7 @@ const char* VM_Version_Ext::cpu_name(void) {
|
||||
}
|
||||
|
||||
const char* VM_Version_Ext::cpu_description(void) {
|
||||
if (!initialize_cpu_information()) {
|
||||
return NULL;
|
||||
}
|
||||
initialize_cpu_information();
|
||||
char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing);
|
||||
if (NULL == tmp) {
|
||||
return NULL;
|
||||
|
@ -43,10 +43,11 @@ class VM_Version_Ext : public VM_Version {
|
||||
static int _no_of_threads;
|
||||
static int _no_of_cores;
|
||||
static int _no_of_sockets;
|
||||
static bool _initialized;
|
||||
static char _cpu_name[CPU_TYPE_DESC_BUF_SIZE];
|
||||
static char _cpu_desc[CPU_DETAILED_DESC_BUF_SIZE];
|
||||
|
||||
static bool initialize_cpu_information(void);
|
||||
static void initialize_cpu_information(void);
|
||||
|
||||
public:
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,6 +36,7 @@
|
||||
# include <sys/sysinfo.h>
|
||||
|
||||
bool VM_Version::_is_determine_features_test_running = false;
|
||||
const char* VM_Version::_model_string;
|
||||
|
||||
unsigned long VM_Version::_features[_features_buffer_len] = {0, 0, 0, 0};
|
||||
unsigned long VM_Version::_cipher_features[_features_buffer_len] = {0, 0, 0, 0};
|
||||
@ -210,6 +211,10 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
||||
if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseSHA, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
|
||||
}
|
||||
@ -244,32 +249,40 @@ void VM_Version::initialize() {
|
||||
void VM_Version::set_features_string() {
|
||||
|
||||
unsigned int ambiguity = 0;
|
||||
_model_string = z_name[0];
|
||||
if (is_z13()) {
|
||||
_features_string = "System z G7-z13 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB, Cond Load/Store, Interlocked Update, TxM, VectorInstr)";
|
||||
_model_string = z_name[7];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_ec12()) {
|
||||
_features_string = "System z G6-EC12 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB, Cond Load/Store, Interlocked Update, TxM)";
|
||||
_model_string = z_name[6];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_z196()) {
|
||||
_features_string = "System z G5-z196 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB, Cond Load/Store, Interlocked Update)";
|
||||
_model_string = z_name[5];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_z10()) {
|
||||
_features_string = "System z G4-z10 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB)";
|
||||
_model_string = z_name[4];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_z9()) {
|
||||
_features_string = "System z G3-z9 (LDISP_fast, ExtImm), out-of-support as of 2016-04-01";
|
||||
_model_string = z_name[3];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_z990()) {
|
||||
_features_string = "System z G2-z990 (LDISP_fast), out-of-support as of 2014-07-01";
|
||||
_model_string = z_name[2];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_z900()) {
|
||||
_features_string = "System z G1-z900 (LDISP), out-of-support as of 2014-07-01";
|
||||
_model_string = z_name[1];
|
||||
ambiguity++;
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -131,6 +131,7 @@ class VM_Version: public Abstract_VM_Version {
|
||||
static unsigned int _Dcache_lineSize;
|
||||
static unsigned int _Icache_lineSize;
|
||||
static bool _is_determine_features_test_running;
|
||||
static const char* _model_string;
|
||||
|
||||
static bool test_feature_bit(unsigned long* featureBuffer, int featureNum, unsigned int bufLen);
|
||||
static void set_features_string();
|
||||
@ -346,6 +347,7 @@ class VM_Version: public Abstract_VM_Version {
|
||||
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
|
||||
|
||||
// CPU feature query functions
|
||||
static const char* get_model_string() { return _model_string; }
|
||||
static bool has_StoreFacilityListExtended() { return (_features[0] & StoreFacilityListExtendedMask) == StoreFacilityListExtendedMask; }
|
||||
static bool has_Crypto() { return (_features[0] & CryptoFacilityMask) == CryptoFacilityMask; }
|
||||
static bool has_ETF2() { return (_features[0] & ETF2Mask) == ETF2Mask; }
|
||||
|
@ -440,8 +440,6 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
|
||||
}
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
|
||||
Register temp3_intf = temp3;
|
||||
__ load_heap_oop(temp3_intf, member_clazz);
|
||||
load_klass_from_Class(_masm, temp3_intf);
|
||||
|
@ -576,7 +576,9 @@ void os::init_system_properties_values() {
|
||||
}
|
||||
}
|
||||
Arguments::set_java_home(buf);
|
||||
set_boot_path('/', ':');
|
||||
if (!set_boot_path('/', ':')) {
|
||||
vm_exit_during_initialization("Failed setting boot class path.", NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// Where to look for native libraries.
|
||||
@ -1208,22 +1210,6 @@ void os::die() {
|
||||
::abort();
|
||||
}
|
||||
|
||||
// This method is a copy of JDK's sysGetLastErrorString
|
||||
// from src/solaris/hpi/src/system_md.c
|
||||
|
||||
size_t os::lasterror(char *buf, size_t len) {
|
||||
if (errno == 0) return 0;
|
||||
|
||||
const char *s = os::strerror(errno);
|
||||
size_t n = ::strlen(s);
|
||||
if (n >= len) {
|
||||
n = len - 1;
|
||||
}
|
||||
::strncpy(buf, s, n);
|
||||
buf[n] = '\0';
|
||||
return n;
|
||||
}
|
||||
|
||||
intx os::current_thread_id() {
|
||||
return (intx)pthread_self();
|
||||
}
|
||||
|
@ -372,7 +372,9 @@ void os::init_system_properties_values() {
|
||||
}
|
||||
}
|
||||
Arguments::set_java_home(buf);
|
||||
set_boot_path('/', ':');
|
||||
if (!set_boot_path('/', ':')) {
|
||||
vm_exit_during_initialization("Failed setting boot class path.", NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// Where to look for native libraries.
|
||||
@ -1081,22 +1083,6 @@ void os::die() {
|
||||
::abort();
|
||||
}
|
||||
|
||||
// This method is a copy of JDK's sysGetLastErrorString
|
||||
// from src/solaris/hpi/src/system_md.c
|
||||
|
||||
size_t os::lasterror(char *buf, size_t len) {
|
||||
if (errno == 0) return 0;
|
||||
|
||||
const char *s = os::strerror(errno);
|
||||
size_t n = ::strlen(s);
|
||||
if (n >= len) {
|
||||
n = len - 1;
|
||||
}
|
||||
::strncpy(buf, s, n);
|
||||
buf[n] = '\0';
|
||||
return n;
|
||||
}
|
||||
|
||||
// Information of current thread in variety of formats
|
||||
pid_t os::Bsd::gettid() {
|
||||
int retval = -1;
|
||||
|
@ -367,7 +367,9 @@ void os::init_system_properties_values() {
|
||||
}
|
||||
}
|
||||
Arguments::set_java_home(buf);
|
||||
set_boot_path('/', ':');
|
||||
if (!set_boot_path('/', ':')) {
|
||||
vm_exit_during_initialization("Failed setting boot class path.", NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// Where to look for native libraries.
|
||||
@ -1419,23 +1421,6 @@ void os::die() {
|
||||
::abort();
|
||||
}
|
||||
|
||||
|
||||
// This method is a copy of JDK's sysGetLastErrorString
|
||||
// from src/solaris/hpi/src/system_md.c
|
||||
|
||||
size_t os::lasterror(char *buf, size_t len) {
|
||||
if (errno == 0) return 0;
|
||||
|
||||
const char *s = os::strerror(errno);
|
||||
size_t n = ::strlen(s);
|
||||
if (n >= len) {
|
||||
n = len - 1;
|
||||
}
|
||||
::strncpy(buf, s, n);
|
||||
buf[n] = '\0';
|
||||
return n;
|
||||
}
|
||||
|
||||
// thread_id is kernel thread id (similar to Solaris LWP id)
|
||||
intx os::current_thread_id() { return os::Linux::gettid(); }
|
||||
int os::current_process_id() {
|
||||
@ -5572,14 +5557,18 @@ bool os::pd_unmap_memory(char* addr, size_t bytes) {
|
||||
|
||||
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
|
||||
|
||||
static clockid_t thread_cpu_clockid(Thread* thread) {
|
||||
pthread_t tid = thread->osthread()->pthread_id();
|
||||
clockid_t clockid;
|
||||
|
||||
// Get thread clockid
|
||||
int rc = os::Linux::pthread_getcpuclockid(tid, &clockid);
|
||||
assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code");
|
||||
return clockid;
|
||||
static jlong fast_cpu_time(Thread *thread) {
|
||||
clockid_t clockid;
|
||||
int rc = os::Linux::pthread_getcpuclockid(thread->osthread()->pthread_id(),
|
||||
&clockid);
|
||||
if (rc == 0) {
|
||||
return os::Linux::fast_thread_cpu_time(clockid);
|
||||
} else {
|
||||
// It's possible to encounter a terminated native thread that failed
|
||||
// to detach itself from the VM - which should result in ESRCH.
|
||||
assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
|
||||
@ -5601,7 +5590,7 @@ jlong os::current_thread_cpu_time() {
|
||||
jlong os::thread_cpu_time(Thread* thread) {
|
||||
// consistent with what current_thread_cpu_time() returns
|
||||
if (os::Linux::supports_fast_thread_cpu_time()) {
|
||||
return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread));
|
||||
return fast_cpu_time(thread);
|
||||
} else {
|
||||
return slow_thread_cpu_time(thread, true /* user + sys */);
|
||||
}
|
||||
@ -5617,7 +5606,7 @@ jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
|
||||
|
||||
jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
|
||||
if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
|
||||
return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread));
|
||||
return fast_cpu_time(thread);
|
||||
} else {
|
||||
return slow_thread_cpu_time(thread, user_sys_cpu_time);
|
||||
}
|
||||
|
@ -150,6 +150,19 @@ int os::get_last_error() {
|
||||
return errno;
|
||||
}
|
||||
|
||||
size_t os::lasterror(char *buf, size_t len) {
|
||||
if (errno == 0) return 0;
|
||||
|
||||
const char *s = os::strerror(errno);
|
||||
size_t n = ::strlen(s);
|
||||
if (n >= len) {
|
||||
n = len - 1;
|
||||
}
|
||||
::strncpy(buf, s, n);
|
||||
buf[n] = '\0';
|
||||
return n;
|
||||
}
|
||||
|
||||
bool os::is_debugger_attached() {
|
||||
// not implemented
|
||||
return false;
|
||||
|
@ -580,7 +580,9 @@ void os::init_system_properties_values() {
|
||||
}
|
||||
}
|
||||
Arguments::set_java_home(buf);
|
||||
set_boot_path('/', ':');
|
||||
if (!set_boot_path('/', ':')) {
|
||||
vm_exit_during_initialization("Failed setting boot class path.", NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// Where to look for native libraries.
|
||||
@ -2010,23 +2012,6 @@ void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
|
||||
// no suffix required
|
||||
}
|
||||
|
||||
// This method is a copy of JDK's sysGetLastErrorString
|
||||
// from src/solaris/hpi/src/system_md.c
|
||||
|
||||
size_t os::lasterror(char *buf, size_t len) {
|
||||
if (errno == 0) return 0;
|
||||
|
||||
const char *s = os::strerror(errno);
|
||||
size_t n = ::strlen(s);
|
||||
if (n >= len) {
|
||||
n = len - 1;
|
||||
}
|
||||
::strncpy(buf, s, n);
|
||||
buf[n] = '\0';
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
// sun.misc.Signal
|
||||
|
||||
extern "C" {
|
||||
|
@ -230,7 +230,7 @@ void os::init_system_properties_values() {
|
||||
FREE_C_HEAP_ARRAY(char, dll_path);
|
||||
|
||||
if (!set_boot_path('\\', ';')) {
|
||||
return;
|
||||
vm_exit_during_initialization("Failed setting boot class path.", NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "gc/z/zErrno.hpp"
|
||||
#include "gc/z/zLargePages.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -47,10 +46,6 @@
|
||||
// Sysfs file for transparent huge page on tmpfs
|
||||
#define ZFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
|
||||
|
||||
// Default mount points
|
||||
#define ZMOUNTPOINT_TMPFS "/dev/shm"
|
||||
#define ZMOUNTPOINT_HUGETLBFS "/hugepages"
|
||||
|
||||
// Java heap filename
|
||||
#define ZFILENAME_HEAP "java_heap"
|
||||
|
||||
@ -79,13 +74,30 @@
|
||||
#define HUGETLBFS_MAGIC 0x958458f6
|
||||
#endif
|
||||
|
||||
// Preferred tmpfs mount points, ordered by priority
|
||||
static const char* z_preferred_tmpfs_mountpoints[] = {
|
||||
"/dev/shm",
|
||||
"/run/shm",
|
||||
NULL
|
||||
};
|
||||
|
||||
// Preferred hugetlbfs mount points, ordered by priority
|
||||
static const char* z_preferred_hugetlbfs_mountpoints[] = {
|
||||
"/dev/hugepages",
|
||||
"/hugepages",
|
||||
NULL
|
||||
};
|
||||
|
||||
static int z_memfd_create(const char *name, unsigned int flags) {
|
||||
return syscall(__NR_memfd_create, name, flags);
|
||||
}
|
||||
|
||||
bool ZBackingFile::_hugetlbfs_mmap_retry = true;
|
||||
|
||||
ZBackingFile::ZBackingFile() :
|
||||
_fd(-1),
|
||||
_filesystem(0),
|
||||
_available(0),
|
||||
_initialized(false) {
|
||||
|
||||
// Create backing file
|
||||
@ -94,39 +106,47 @@ ZBackingFile::ZBackingFile() :
|
||||
return;
|
||||
}
|
||||
|
||||
// Get filesystem type
|
||||
// Get filesystem statistics
|
||||
struct statfs statfs_buf;
|
||||
if (fstatfs(_fd, &statfs_buf) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to determine filesystem type for backing file (%s)", err.to_string());
|
||||
log_error(gc, init)("Failed to determine filesystem type for backing file (%s)",
|
||||
err.to_string());
|
||||
return;
|
||||
}
|
||||
|
||||
_filesystem = statfs_buf.f_type;
|
||||
_available = statfs_buf.f_bavail * statfs_buf.f_bsize;
|
||||
|
||||
// Make sure we're on a supported filesystem
|
||||
if (!is_tmpfs() && !is_hugetlbfs()) {
|
||||
log_error(gc, init)("Backing file must be located on a %s or a %s filesystem", ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc, init)("Backing file must be located on a %s or a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure the filesystem type matches requested large page type
|
||||
if (ZLargePages::is_transparent() && !is_tmpfs()) {
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem", ZFILESYSTEM_TMPFS);
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", ZFILESYSTEM_TMPFS);
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
|
||||
log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem",
|
||||
ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
|
||||
log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem",
|
||||
ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -149,17 +169,21 @@ int ZBackingFile::create_mem_fd(const char* name) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_debug(gc, init)("Heap backed by file /memfd:%s", filename);
|
||||
log_info(gc, init)("Heap backed by file: /memfd:%s", filename);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
int ZBackingFile::create_file_fd(const char* name) const {
|
||||
const char* const filesystem = ZLargePages::is_explicit() ? ZFILESYSTEM_HUGETLBFS : ZFILESYSTEM_TMPFS;
|
||||
const char* const mountpoint = ZLargePages::is_explicit() ? ZMOUNTPOINT_HUGETLBFS : ZMOUNTPOINT_TMPFS;
|
||||
const char* const filesystem = ZLargePages::is_explicit()
|
||||
? ZFILESYSTEM_HUGETLBFS
|
||||
: ZFILESYSTEM_TMPFS;
|
||||
const char** const preferred_mountpoints = ZLargePages::is_explicit()
|
||||
? z_preferred_hugetlbfs_mountpoints
|
||||
: z_preferred_tmpfs_mountpoints;
|
||||
|
||||
// Find mountpoint
|
||||
ZBackingPath path(filesystem, mountpoint);
|
||||
ZBackingPath path(filesystem, preferred_mountpoints);
|
||||
if (path.get() == NULL) {
|
||||
log_error(gc, init)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
|
||||
return -1;
|
||||
@ -181,7 +205,7 @@ int ZBackingFile::create_file_fd(const char* name) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_debug(gc, init)("Heap backed by file %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
|
||||
log_info(gc, init)("Heap backed by file: %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
|
||||
|
||||
return fd_anon;
|
||||
}
|
||||
@ -207,7 +231,7 @@ int ZBackingFile::create_file_fd(const char* name) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_debug(gc, init)("Heap backed by file %s", filename);
|
||||
log_info(gc, init)("Heap backed by file: %s", filename);
|
||||
|
||||
return fd;
|
||||
}
|
||||
@ -238,6 +262,10 @@ int ZBackingFile::fd() const {
|
||||
return _fd;
|
||||
}
|
||||
|
||||
size_t ZBackingFile::available() const {
|
||||
return _available;
|
||||
}
|
||||
|
||||
bool ZBackingFile::is_tmpfs() const {
|
||||
return _filesystem == TMPFS_MAGIC;
|
||||
}
|
||||
@ -292,12 +320,12 @@ bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length, size_t alignme
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZBackingFile::expand_tmpfs(size_t offset, size_t length) const {
|
||||
bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length) const {
|
||||
assert(is_tmpfs(), "Wrong filesystem");
|
||||
return try_expand_tmpfs(offset, length, os::vm_page_size());
|
||||
}
|
||||
|
||||
bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
bool ZBackingFile::try_expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
assert(is_hugetlbfs(), "Wrong filesystem");
|
||||
|
||||
// Prior to kernel 4.3, hugetlbfs did not support posix_fallocate().
|
||||
@ -320,11 +348,11 @@ bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
// process being returned to the huge page pool and made available for new
|
||||
// allocations.
|
||||
void* addr = MAP_FAILED;
|
||||
const int max_attempts = 3;
|
||||
const int max_attempts = 5;
|
||||
for (int attempt = 1; attempt <= max_attempts; attempt++) {
|
||||
addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
|
||||
if (addr != MAP_FAILED || is_init_completed()) {
|
||||
// Mapping was successful or initialization phase has completed
|
||||
if (addr != MAP_FAILED || !_hugetlbfs_mmap_retry) {
|
||||
// Mapping was successful or mmap retry is disabled
|
||||
break;
|
||||
}
|
||||
|
||||
@ -337,6 +365,11 @@ bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
// Disable mmap retry from now on
|
||||
if (_hugetlbfs_mmap_retry) {
|
||||
_hugetlbfs_mmap_retry = false;
|
||||
}
|
||||
|
||||
if (addr == MAP_FAILED) {
|
||||
// Not enough huge pages left
|
||||
ZErrno err;
|
||||
@ -355,6 +388,39 @@ bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZBackingFile::expand(size_t offset, size_t length) const {
|
||||
return is_hugetlbfs() ? expand_hugetlbfs(offset, length) : expand_tmpfs(offset, length);
|
||||
bool ZBackingFile::try_expand_tmpfs_or_hugetlbfs(size_t offset, size_t length, size_t alignment) const {
|
||||
assert(is_aligned(offset, alignment), "Invalid offset");
|
||||
assert(is_aligned(length, alignment), "Invalid length");
|
||||
|
||||
log_debug(gc)("Expanding heap from " SIZE_FORMAT "M to " SIZE_FORMAT "M", offset / M, (offset + length) / M);
|
||||
|
||||
return is_hugetlbfs() ? try_expand_hugetlbfs(offset, length) : try_expand_tmpfs(offset, length);
|
||||
}
|
||||
|
||||
size_t ZBackingFile::try_expand(size_t offset, size_t length, size_t alignment) const {
|
||||
size_t start = offset;
|
||||
size_t end = offset + length;
|
||||
|
||||
// Try to expand
|
||||
if (try_expand_tmpfs_or_hugetlbfs(start, length, alignment)) {
|
||||
// Success
|
||||
return end;
|
||||
}
|
||||
|
||||
// Failed, try to expand as much as possible
|
||||
for (;;) {
|
||||
length = align_down((end - start) / 2, alignment);
|
||||
if (length < alignment) {
|
||||
// Done, don't expand more
|
||||
return start;
|
||||
}
|
||||
|
||||
if (try_expand_tmpfs_or_hugetlbfs(start, length, alignment)) {
|
||||
// Success, try expand more
|
||||
start += length;
|
||||
} else {
|
||||
// Failed, try expand less
|
||||
end -= length;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,8 +28,11 @@
|
||||
|
||||
class ZBackingFile {
|
||||
private:
|
||||
static bool _hugetlbfs_mmap_retry;
|
||||
|
||||
int _fd;
|
||||
uint64_t _filesystem;
|
||||
size_t _available;
|
||||
bool _initialized;
|
||||
|
||||
int create_mem_fd(const char* name) const;
|
||||
@ -42,9 +45,9 @@ private:
|
||||
|
||||
bool try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
|
||||
bool try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
|
||||
bool expand_tmpfs(size_t offset, size_t length) const;
|
||||
|
||||
bool expand_hugetlbfs(size_t offset, size_t length) const;
|
||||
bool try_expand_tmpfs(size_t offset, size_t length) const;
|
||||
bool try_expand_hugetlbfs(size_t offset, size_t length) const;
|
||||
bool try_expand_tmpfs_or_hugetlbfs(size_t offset, size_t length, size_t alignment) const;
|
||||
|
||||
public:
|
||||
ZBackingFile();
|
||||
@ -52,7 +55,9 @@ public:
|
||||
bool is_initialized() const;
|
||||
|
||||
int fd() const;
|
||||
bool expand(size_t offset, size_t length) const;
|
||||
size_t available() const;
|
||||
|
||||
size_t try_expand(size_t offset, size_t length, size_t alignment) const;
|
||||
};
|
||||
|
||||
#endif // OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
|
||||
|
@ -33,13 +33,13 @@
|
||||
// Mount information, see proc(5) for more details.
|
||||
#define PROC_SELF_MOUNTINFO "/proc/self/mountinfo"
|
||||
|
||||
ZBackingPath::ZBackingPath(const char* filesystem, const char* preferred_path) {
|
||||
ZBackingPath::ZBackingPath(const char* filesystem, const char** preferred_mountpoints) {
|
||||
if (ZPath != NULL) {
|
||||
// Use specified path
|
||||
_path = strdup(ZPath);
|
||||
} else {
|
||||
// Find suitable path
|
||||
_path = find_mountpoint(filesystem, preferred_path);
|
||||
_path = find_mountpoint(filesystem, preferred_mountpoints);
|
||||
}
|
||||
}
|
||||
|
||||
@ -52,8 +52,8 @@ char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) con
|
||||
char* line_mountpoint = NULL;
|
||||
char* line_filesystem = NULL;
|
||||
|
||||
// Parse line and return a newly allocated string containing the mountpoint if
|
||||
// the line contains a matching filesystem and the mountpoint is accessible by
|
||||
// Parse line and return a newly allocated string containing the mount point if
|
||||
// the line contains a matching filesystem and the mount point is accessible by
|
||||
// the current user.
|
||||
if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
|
||||
strcmp(line_filesystem, filesystem) != 0 ||
|
||||
@ -68,7 +68,7 @@ char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) con
|
||||
return line_mountpoint;
|
||||
}
|
||||
|
||||
void ZBackingPath::get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const {
|
||||
void ZBackingPath::get_mountpoints(const char* filesystem, ZArray<char*>* mountpoints) const {
|
||||
FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
|
||||
if (fd == NULL) {
|
||||
ZErrno err;
|
||||
@ -98,37 +98,45 @@ void ZBackingPath::free_mountpoints(ZArray<char*>* mountpoints) const {
|
||||
mountpoints->clear();
|
||||
}
|
||||
|
||||
char* ZBackingPath::find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const {
|
||||
char* ZBackingPath::find_preferred_mountpoint(const char* filesystem,
|
||||
ZArray<char*>* mountpoints,
|
||||
const char** preferred_mountpoints) const {
|
||||
// Find preferred mount point
|
||||
ZArrayIterator<char*> iter1(mountpoints);
|
||||
for (char* mountpoint; iter1.next(&mountpoint);) {
|
||||
for (const char** preferred = preferred_mountpoints; *preferred != NULL; preferred++) {
|
||||
if (!strcmp(mountpoint, *preferred)) {
|
||||
// Preferred mount point found
|
||||
return strdup(mountpoint);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Preferred mount point not found
|
||||
log_error(gc, init)("More than one %s filesystem found:", filesystem);
|
||||
ZArrayIterator<char*> iter2(mountpoints);
|
||||
for (char* mountpoint; iter2.next(&mountpoint);) {
|
||||
log_error(gc, init)(" %s", mountpoint);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
char* ZBackingPath::find_mountpoint(const char* filesystem, const char** preferred_mountpoints) const {
|
||||
char* path = NULL;
|
||||
ZArray<char*> mountpoints;
|
||||
|
||||
get_mountpoints(&mountpoints, filesystem);
|
||||
get_mountpoints(filesystem, &mountpoints);
|
||||
|
||||
if (mountpoints.size() == 0) {
|
||||
// No filesystem found
|
||||
// No mount point found
|
||||
log_error(gc, init)("Failed to find an accessible %s filesystem", filesystem);
|
||||
} else if (mountpoints.size() == 1) {
|
||||
// One filesystem found
|
||||
// One mount point found
|
||||
path = strdup(mountpoints.at(0));
|
||||
} else if (mountpoints.size() > 1) {
|
||||
// More than one filesystem found
|
||||
ZArrayIterator<char*> iter(&mountpoints);
|
||||
for (char* mountpoint; iter.next(&mountpoint);) {
|
||||
if (!strcmp(mountpoint, preferred_mountpoint)) {
|
||||
// Preferred mount point found
|
||||
path = strdup(mountpoint);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (path == NULL) {
|
||||
// Preferred mount point not found
|
||||
log_error(gc, init)("More than one %s filesystem found:", filesystem);
|
||||
ZArrayIterator<char*> iter2(&mountpoints);
|
||||
for (char* mountpoint; iter2.next(&mountpoint);) {
|
||||
log_error(gc, init)(" %s", mountpoint);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// More than one mount point found
|
||||
path = find_preferred_mountpoint(filesystem, &mountpoints, preferred_mountpoints);
|
||||
}
|
||||
|
||||
free_mountpoints(&mountpoints);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,13 +31,19 @@ class ZBackingPath : public StackObj {
|
||||
private:
|
||||
char* _path;
|
||||
|
||||
char* get_mountpoint(const char* line, const char* filesystem) const;
|
||||
void get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const;
|
||||
char* get_mountpoint(const char* line,
|
||||
const char* filesystem) const;
|
||||
void get_mountpoints(const char* filesystem,
|
||||
ZArray<char*>* mountpoints) const;
|
||||
void free_mountpoints(ZArray<char*>* mountpoints) const;
|
||||
char* find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const;
|
||||
char* find_preferred_mountpoint(const char* filesystem,
|
||||
ZArray<char*>* mountpoints,
|
||||
const char** preferred_mountpoints) const;
|
||||
char* find_mountpoint(const char* filesystem,
|
||||
const char** preferred_mountpoints) const;
|
||||
|
||||
public:
|
||||
ZBackingPath(const char* filesystem, const char* preferred_path);
|
||||
ZBackingPath(const char* filesystem, const char** preferred_mountpoints);
|
||||
~ZBackingPath();
|
||||
|
||||
const char* get() const;
|
||||
|
@ -52,8 +52,15 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity, size_t granu
|
||||
_file(),
|
||||
_granule_size(granule_size) {
|
||||
|
||||
// Check and warn if max map count seems too low
|
||||
if (!_file.is_initialized()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Check and warn if max map count is too low
|
||||
check_max_map_count(max_capacity, granule_size);
|
||||
|
||||
// Check and warn if available space on filesystem is too low
|
||||
check_available_space_on_filesystem(max_capacity);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t granule_size) const {
|
||||
@ -61,7 +68,7 @@ void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t gra
|
||||
FILE* const file = fopen(filename, "r");
|
||||
if (file == NULL) {
|
||||
// Failed to open file, skip check
|
||||
log_debug(gc)("Failed to open %s", filename);
|
||||
log_debug(gc, init)("Failed to open %s", filename);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -70,7 +77,7 @@ void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t gra
|
||||
fclose(file);
|
||||
if (result != 1) {
|
||||
// Failed to read file, skip check
|
||||
log_debug(gc)("Failed to read %s", filename);
|
||||
log_debug(gc, init)("Failed to read %s", filename);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -81,15 +88,43 @@ void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t gra
|
||||
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
|
||||
const size_t required_max_map_count = (max_capacity / granule_size) * 3 * 1.2;
|
||||
if (actual_max_map_count < required_max_map_count) {
|
||||
log_warning(gc)("The system limit on number of memory mappings "
|
||||
"per process might be too low for the given");
|
||||
log_warning(gc)("Java heap size (" SIZE_FORMAT "M). Please "
|
||||
"adjust %s to allow for at least", max_capacity / M, filename);
|
||||
log_warning(gc)(SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). "
|
||||
"Continuing execution with the current limit could",
|
||||
required_max_map_count, actual_max_map_count);
|
||||
log_warning(gc)("lead to a fatal error down the line, due to failed "
|
||||
"attempts to map memory.");
|
||||
log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc, init)("The system limit on number of memory mappings per process might be too low "
|
||||
"for the given");
|
||||
log_warning(gc, init)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
|
||||
max_capacity / M, filename);
|
||||
log_warning(gc, init)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing "
|
||||
"execution with the current", required_max_map_count, actual_max_map_count);
|
||||
log_warning(gc, init)("limit could lead to a fatal error, due to failure to map memory.");
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::check_available_space_on_filesystem(size_t max_capacity) const {
|
||||
// Note that the available space on a tmpfs or a hugetlbfs filesystem
|
||||
// will be zero if no size limit was specified when it was mounted.
|
||||
const size_t available = _file.available();
|
||||
if (available == 0) {
|
||||
// No size limit set, skip check
|
||||
log_info(gc, init)("Available space on backing filesystem: N/A");
|
||||
return;
|
||||
}
|
||||
|
||||
log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M",
|
||||
available / M);
|
||||
|
||||
// Warn if the filesystem doesn't currently have enough space available to hold
|
||||
// the max heap size. The max heap size will be capped if we later hit this limit
|
||||
// when trying to expand the heap.
|
||||
if (available < max_capacity) {
|
||||
log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc, init)("Not enough space available on the backing filesystem to hold the current "
|
||||
"max Java heap");
|
||||
log_warning(gc, init)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem "
|
||||
"accordingly (available", max_capacity / M);
|
||||
log_warning(gc, init)("space is currently " SIZE_FORMAT "M). Continuing execution with the current "
|
||||
"filesystem size could", available / M);
|
||||
log_warning(gc, init)("lead to a premature OutOfMemoryError being thrown, due to failure to map "
|
||||
"memory.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -97,18 +132,16 @@ bool ZPhysicalMemoryBacking::is_initialized() const {
|
||||
return _file.is_initialized();
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryBacking::expand(size_t from, size_t to) {
|
||||
const size_t size = to - from;
|
||||
size_t ZPhysicalMemoryBacking::try_expand(size_t old_capacity, size_t new_capacity) {
|
||||
assert(old_capacity < new_capacity, "Invalid old/new capacity");
|
||||
|
||||
// Expand
|
||||
if (!_file.expand(from, size)) {
|
||||
return false;
|
||||
const size_t capacity = _file.try_expand(old_capacity, new_capacity - old_capacity, _granule_size);
|
||||
if (capacity > old_capacity) {
|
||||
// Add expanded capacity to free list
|
||||
_manager.free(old_capacity, capacity - old_capacity);
|
||||
}
|
||||
|
||||
// Add expanded space to free list
|
||||
_manager.free(from, size);
|
||||
|
||||
return true;
|
||||
return capacity;
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,6 +37,7 @@ private:
|
||||
const size_t _granule_size;
|
||||
|
||||
void check_max_map_count(size_t max_capacity, size_t granule_size) const;
|
||||
void check_available_space_on_filesystem(size_t max_capacity) const;
|
||||
void map_failed(ZErrno err) const;
|
||||
|
||||
void advise_view(uintptr_t addr, size_t size) const;
|
||||
@ -49,7 +50,8 @@ public:
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
bool expand(size_t from, size_t to);
|
||||
size_t try_expand(size_t old_capacity, size_t new_capacity);
|
||||
|
||||
ZPhysicalMemory alloc(size_t size);
|
||||
void free(ZPhysicalMemory pmem);
|
||||
|
||||
|
@ -272,6 +272,7 @@ void AOTCompiledMethod::metadata_do(void f(Metadata*)) {
|
||||
if (md != _method) f(md);
|
||||
}
|
||||
} else if (iter.type() == relocInfo::virtual_call_type) {
|
||||
ResourceMark rm;
|
||||
// Check compiledIC holders associated with this nmethod
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
if (ic->is_icholder_call()) {
|
||||
@ -444,6 +445,7 @@ void AOTCompiledMethod::clear_inline_caches() {
|
||||
return;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
RelocIterator iter(this);
|
||||
while (iter.next()) {
|
||||
iter.reloc()->clear_inline_cache();
|
||||
|
@ -938,9 +938,9 @@ void ciEnv::validate_compile_task_dependencies(ciMethod* target) {
|
||||
_inc_decompile_count_on_failure = false;
|
||||
record_failure("call site target change");
|
||||
} else if (Dependencies::is_klass_type(result)) {
|
||||
record_failure("invalid non-klass dependency");
|
||||
} else {
|
||||
record_failure("concurrent class loading");
|
||||
} else {
|
||||
record_failure("invalid non-klass dependency");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -255,8 +255,7 @@ ciConstant ciBytecodeStream::get_constant() {
|
||||
// constant.
|
||||
constantTag ciBytecodeStream::get_constant_pool_tag(int index) const {
|
||||
VM_ENTRY_MARK;
|
||||
BasicType bt = _method->get_Method()->constants()->basic_type_for_constant_at(index);
|
||||
return constantTag::ofBasicType(bt);
|
||||
return _method->get_Method()->constants()->constant_tag_at(index);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
@ -298,8 +298,6 @@ ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name, bool is_
|
||||
char *copy = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass);
|
||||
strcpy(copy, zip_name);
|
||||
_zip_name = copy;
|
||||
_is_boot_append = is_boot_append;
|
||||
_multi_versioned = _unknown;
|
||||
}
|
||||
|
||||
ClassPathZipEntry::~ClassPathZipEntry() {
|
||||
@ -338,95 +336,11 @@ u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_ter
|
||||
return buffer;
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
u1* ClassPathZipEntry::open_versioned_entry(const char* name, jint* filesize, TRAPS) {
|
||||
u1* buffer = NULL;
|
||||
if (DumpSharedSpaces && !_is_boot_append) {
|
||||
// We presume default is multi-release enabled
|
||||
const char* multi_ver = Arguments::get_property("jdk.util.jar.enableMultiRelease");
|
||||
const char* verstr = Arguments::get_property("jdk.util.jar.version");
|
||||
bool is_multi_ver = (multi_ver == NULL ||
|
||||
strcmp(multi_ver, "true") == 0 ||
|
||||
strcmp(multi_ver, "force") == 0) &&
|
||||
is_multiple_versioned(THREAD);
|
||||
// command line version setting
|
||||
int version = 0;
|
||||
const int base_version = 8; // JDK8
|
||||
int cur_ver = JDK_Version::current().major_version();
|
||||
if (verstr != NULL) {
|
||||
version = atoi(verstr);
|
||||
if (version < base_version || version > cur_ver) {
|
||||
// If the specified version is lower than the base version, the base
|
||||
// entry will be used; if the version is higher than the current
|
||||
// jdk version, the highest versioned entry will be used.
|
||||
if (version < base_version) {
|
||||
is_multi_ver = false;
|
||||
}
|
||||
// print out warning, do not use assertion here since it will continue to look
|
||||
// for proper version.
|
||||
warning("JDK%d is not supported in multiple version jars", version);
|
||||
}
|
||||
}
|
||||
|
||||
if (is_multi_ver) {
|
||||
int n;
|
||||
const char* version_entry = "META-INF/versions/";
|
||||
// 10 is the max length of a decimal 32-bit non-negative number
|
||||
// 2 includes the '/' and trailing zero
|
||||
size_t entry_name_len = strlen(version_entry) + 10 + strlen(name) + 2;
|
||||
char* entry_name = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, entry_name_len);
|
||||
if (version > 0) {
|
||||
n = jio_snprintf(entry_name, entry_name_len, "%s%d/%s", version_entry, version, name);
|
||||
entry_name[n] = '\0';
|
||||
buffer = open_entry((const char*)entry_name, filesize, false, CHECK_NULL);
|
||||
if (buffer == NULL) {
|
||||
warning("Could not find %s in %s, try to find highest version instead", entry_name, _zip_name);
|
||||
}
|
||||
}
|
||||
if (buffer == NULL) {
|
||||
for (int i = cur_ver; i >= base_version; i--) {
|
||||
n = jio_snprintf(entry_name, entry_name_len, "%s%d/%s", version_entry, i, name);
|
||||
entry_name[n] = '\0';
|
||||
buffer = open_entry((const char*)entry_name, filesize, false, CHECK_NULL);
|
||||
if (buffer != NULL) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
FREE_RESOURCE_ARRAY(char, entry_name, entry_name_len);
|
||||
}
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
bool ClassPathZipEntry::is_multiple_versioned(TRAPS) {
|
||||
assert(DumpSharedSpaces, "called only at dump time");
|
||||
if (_multi_versioned != _unknown) {
|
||||
return (_multi_versioned == _yes) ? true : false;
|
||||
}
|
||||
jint size;
|
||||
char* buffer = (char*)open_entry("META-INF/MANIFEST.MF", &size, true, CHECK_false);
|
||||
if (buffer != NULL) {
|
||||
char* p = buffer;
|
||||
for ( ; *p; ++p) *p = tolower(*p);
|
||||
if (strstr(buffer, "multi-release: true") != NULL) {
|
||||
_multi_versioned = _yes;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
_multi_versioned = _no;
|
||||
return false;
|
||||
}
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
|
||||
jint filesize;
|
||||
u1* buffer = open_versioned_entry(name, &filesize, CHECK_NULL);
|
||||
u1* buffer = open_entry(name, &filesize, false, CHECK_NULL);
|
||||
if (buffer == NULL) {
|
||||
buffer = open_entry(name, &filesize, false, CHECK_NULL);
|
||||
if (buffer == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
if (UsePerfData) {
|
||||
ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize);
|
||||
@ -635,6 +549,7 @@ void ClassLoader::trace_class_path(const char* msg, const char* name) {
|
||||
|
||||
void ClassLoader::setup_bootstrap_search_path() {
|
||||
const char* sys_class_path = Arguments::get_sysclasspath();
|
||||
assert(sys_class_path != NULL, "System boot class path must not be NULL");
|
||||
if (PrintSharedArchiveAndExit) {
|
||||
// Don't print sys_class_path - this is the bootcp of this current VM process, not necessarily
|
||||
// the same as the bootcp of the shared archive.
|
||||
|
@ -94,17 +94,9 @@ typedef struct {
|
||||
} jzentry;
|
||||
|
||||
class ClassPathZipEntry: public ClassPathEntry {
|
||||
enum {
|
||||
_unknown = 0,
|
||||
_yes = 1,
|
||||
_no = 2
|
||||
};
|
||||
private:
|
||||
jzfile* _zip; // The zip archive
|
||||
const char* _zip_name; // Name of zip archive
|
||||
bool _is_boot_append; // entry coming from -Xbootclasspath/a
|
||||
u1 _multi_versioned; // indicates if the jar file has multi-versioned entries.
|
||||
// It can have value of "_unknown", "_yes", or "_no"
|
||||
public:
|
||||
bool is_modules_image() const { return false; }
|
||||
bool is_jar_file() const { return true; }
|
||||
@ -113,10 +105,8 @@ class ClassPathZipEntry: public ClassPathEntry {
|
||||
ClassPathZipEntry(jzfile* zip, const char* zip_name, bool is_boot_append);
|
||||
virtual ~ClassPathZipEntry();
|
||||
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
|
||||
u1* open_versioned_entry(const char* name, jint* filesize, TRAPS) NOT_CDS_RETURN_(NULL);
|
||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||
void contents_do(void f(const char* name, void* context), void* context);
|
||||
bool is_multiple_versioned(TRAPS) NOT_CDS_RETURN_(false);
|
||||
// Debugging
|
||||
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
|
||||
};
|
||||
|
@ -1051,8 +1051,9 @@ void java_lang_Class::archive_basic_type_mirrors(TRAPS) {
|
||||
ResetMirrorField reset(archived_mirror_h);
|
||||
InstanceKlass::cast(k)->do_nonstatic_fields(&reset);
|
||||
|
||||
log_trace(cds, mirror)("Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
type2name((BasicType)t), p2i(Universe::_mirrors[t]), p2i(archived_m));
|
||||
log_trace(cds, heap, mirror)(
|
||||
"Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
type2name((BasicType)t), p2i(Universe::_mirrors[t]), p2i(archived_m));
|
||||
|
||||
Universe::_mirrors[t] = archived_m;
|
||||
}
|
||||
@ -1133,8 +1134,9 @@ oop java_lang_Class::archive_mirror(Klass* k, TRAPS) {
|
||||
k->set_has_raw_archived_mirror();
|
||||
|
||||
ResourceMark rm;
|
||||
log_trace(cds, mirror)("Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
k->external_name(), p2i(mirror), p2i(archived_mirror));
|
||||
log_trace(cds, heap, mirror)(
|
||||
"Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
k->external_name(), p2i(mirror), p2i(archived_mirror));
|
||||
|
||||
return archived_mirror;
|
||||
}
|
||||
@ -1186,8 +1188,9 @@ oop java_lang_Class::process_archived_mirror(Klass* k, oop mirror,
|
||||
// klass. Updated the field in the archived mirror to point to the relocated
|
||||
// klass in the archive.
|
||||
Klass *reloc_k = MetaspaceShared::get_relocated_klass(as_Klass(mirror));
|
||||
log_debug(cds, mirror)("Relocate mirror metadata field at _klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
p2i(as_Klass(mirror)), p2i(reloc_k));
|
||||
log_debug(cds, heap, mirror)(
|
||||
"Relocate mirror metadata field at _klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
p2i(as_Klass(mirror)), p2i(reloc_k));
|
||||
archived_mirror->metadata_field_put(_klass_offset, reloc_k);
|
||||
|
||||
// The field at _array_klass_offset is pointing to the original one dimension
|
||||
@ -1195,8 +1198,9 @@ oop java_lang_Class::process_archived_mirror(Klass* k, oop mirror,
|
||||
Klass *arr = array_klass_acquire(mirror);
|
||||
if (arr != NULL) {
|
||||
Klass *reloc_arr = MetaspaceShared::get_relocated_klass(arr);
|
||||
log_debug(cds, mirror)("Relocate mirror metadata field at _array_klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
p2i(arr), p2i(reloc_arr));
|
||||
log_debug(cds, heap, mirror)(
|
||||
"Relocate mirror metadata field at _array_klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
p2i(arr), p2i(reloc_arr));
|
||||
archived_mirror->metadata_field_put(_array_klass_offset, reloc_arr);
|
||||
}
|
||||
return archived_mirror;
|
||||
@ -1247,7 +1251,8 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
|
||||
set_mirror_module_field(k, mirror, module, THREAD);
|
||||
|
||||
ResourceMark rm;
|
||||
log_trace(cds, mirror)("Restored %s archived mirror " PTR_FORMAT, k->external_name(), p2i(mirror()));
|
||||
log_trace(cds, heap, mirror)(
|
||||
"Restored %s archived mirror " PTR_FORMAT, k->external_name(), p2i(mirror()));
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -4273,6 +4278,9 @@ int java_nio_Buffer::_limit_offset;
|
||||
int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset;
|
||||
int reflect_ConstantPool::_oop_offset;
|
||||
int reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
|
||||
int jdk_internal_module_ArchivedModuleGraph::_archivedSystemModules_offset;
|
||||
int jdk_internal_module_ArchivedModuleGraph::_archivedModuleFinder_offset;
|
||||
int jdk_internal_module_ArchivedModuleGraph::_archivedMainModule_offset;
|
||||
|
||||
#define STACKTRACEELEMENT_FIELDS_DO(macro) \
|
||||
macro(declaringClassObject_offset, k, "declaringClassObject", class_signature, false); \
|
||||
@ -4435,6 +4443,23 @@ static int member_offset(int hardcoded_offset) {
|
||||
return (hardcoded_offset * heapOopSize) + instanceOopDesc::base_offset_in_bytes();
|
||||
}
|
||||
|
||||
#define MODULEBOOTSTRAP_FIELDS_DO(macro) \
|
||||
macro(_archivedSystemModules_offset, k, "archivedSystemModules", systemModules_signature, true); \
|
||||
macro(_archivedModuleFinder_offset, k, "archivedModuleFinder", moduleFinder_signature, true); \
|
||||
macro(_archivedMainModule_offset, k, "archivedMainModule", string_signature, true)
|
||||
|
||||
void jdk_internal_module_ArchivedModuleGraph::compute_offsets() {
|
||||
InstanceKlass* k = SystemDictionary::ArchivedModuleGraph_klass();
|
||||
assert(k != NULL, "must be loaded");
|
||||
MODULEBOOTSTRAP_FIELDS_DO(FIELD_COMPUTE_OFFSET);
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void jdk_internal_module_ArchivedModuleGraph::serialize(SerializeClosure* f) {
|
||||
MODULEBOOTSTRAP_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Compute hard-coded offsets
|
||||
// Invoked before SystemDictionary::initialize, so pre-loaded classes
|
||||
// are not available to determine the offset_of_static_fields.
|
||||
@ -4493,6 +4518,8 @@ void JavaClasses::compute_offsets() {
|
||||
java_lang_LiveStackFrameInfo::compute_offsets();
|
||||
java_util_concurrent_locks_AbstractOwnableSynchronizer::compute_offsets();
|
||||
|
||||
jdk_internal_module_ArchivedModuleGraph::compute_offsets();
|
||||
|
||||
// generated interpreter code wants to know about the offsets we just computed:
|
||||
AbstractAssembler::update_delayed_values();
|
||||
}
|
||||
|
@ -1491,6 +1491,19 @@ class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
|
||||
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
|
||||
};
|
||||
|
||||
class jdk_internal_module_ArchivedModuleGraph: AllStatic {
|
||||
private:
|
||||
static int _archivedSystemModules_offset;
|
||||
static int _archivedModuleFinder_offset;
|
||||
static int _archivedMainModule_offset;
|
||||
public:
|
||||
static int archivedSystemModules_offset() { return _archivedSystemModules_offset; }
|
||||
static int archivedModuleFinder_offset() { return _archivedModuleFinder_offset; }
|
||||
static int archivedMainModule_offset() { return _archivedMainModule_offset; }
|
||||
static void compute_offsets();
|
||||
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
|
||||
};
|
||||
|
||||
// Use to declare fields that need to be injected into Java classes
|
||||
// for the JVM to use. The name_index and signature_index are
|
||||
// declared in vmSymbols. The may_be_java flag is used to declare
|
||||
|
@ -785,6 +785,10 @@ oop StringTable::lookup_shared(jchar* name, int len, unsigned int hash) {
|
||||
oop StringTable::create_archived_string(oop s, Thread* THREAD) {
|
||||
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
|
||||
|
||||
if (MetaspaceShared::is_archive_object(s)) {
|
||||
return s;
|
||||
}
|
||||
|
||||
oop new_s = NULL;
|
||||
typeArrayOop v = java_lang_String::value_no_keepalive(s);
|
||||
typeArrayOop new_v =
|
||||
|
@ -187,6 +187,7 @@ class OopStorage;
|
||||
do_klass(jdk_internal_loader_ClassLoaders_AppClassLoader_klass, jdk_internal_loader_ClassLoaders_AppClassLoader, Pre ) \
|
||||
do_klass(jdk_internal_loader_ClassLoaders_PlatformClassLoader_klass, jdk_internal_loader_ClassLoaders_PlatformClassLoader, Pre ) \
|
||||
do_klass(CodeSource_klass, java_security_CodeSource, Pre ) \
|
||||
do_klass(ArchivedModuleGraph_klass, jdk_internal_module_ArchivedModuleGraph, Pre ) \
|
||||
\
|
||||
do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \
|
||||
\
|
||||
|
@ -124,6 +124,7 @@
|
||||
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
|
||||
template(jdk_internal_vm_PostVMInitHook, "jdk/internal/vm/PostVMInitHook") \
|
||||
template(sun_net_www_ParseUtil, "sun/net/www/ParseUtil") \
|
||||
template(jdk_internal_module_ArchivedModuleGraph, "jdk/internal/module/ArchivedModuleGraph") \
|
||||
\
|
||||
template(jdk_internal_loader_ClassLoaders_AppClassLoader, "jdk/internal/loader/ClassLoaders$AppClassLoader") \
|
||||
template(jdk_internal_loader_ClassLoaders_PlatformClassLoader, "jdk/internal/loader/ClassLoaders$PlatformClassLoader") \
|
||||
@ -652,6 +653,8 @@
|
||||
template(url_void_signature, "(Ljava/net/URL;)V") \
|
||||
template(toFileURL_name, "toFileURL") \
|
||||
template(toFileURL_signature, "(Ljava/lang/String;)Ljava/net/URL;") \
|
||||
template(moduleFinder_signature, "Ljava/lang/module/ModuleFinder;") \
|
||||
template(systemModules_signature, "Ljdk/internal/module/SystemModules;") \
|
||||
\
|
||||
/*end*/
|
||||
|
||||
|
@ -325,6 +325,7 @@ void CompiledMethod::clear_inline_caches() {
|
||||
// Clear ICStubs of all compiled ICs
|
||||
void CompiledMethod::clear_ic_stubs() {
|
||||
assert_locked_or_safepoint(CompiledIC_lock);
|
||||
ResourceMark rm;
|
||||
RelocIterator iter(this);
|
||||
while(iter.next()) {
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
@ -547,6 +548,7 @@ bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurre
|
||||
bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) {
|
||||
assert_locked_or_safepoint(CompiledIC_lock);
|
||||
bool postponed = false;
|
||||
ResourceMark rm;
|
||||
|
||||
// Find all calls in an nmethod and clear the ones that point to non-entrant,
|
||||
// zombie and unloaded nmethods.
|
||||
|
@ -530,7 +530,6 @@ CompileQueue* CompileBroker::compile_queue(int comp_level) {
|
||||
|
||||
void CompileBroker::print_compile_queues(outputStream* st) {
|
||||
st->print_cr("Current compiles: ");
|
||||
MutexLocker locker(MethodCompileQueue_lock);
|
||||
|
||||
char buf[2000];
|
||||
int buflen = sizeof(buf);
|
||||
@ -546,7 +545,7 @@ void CompileBroker::print_compile_queues(outputStream* st) {
|
||||
}
|
||||
|
||||
void CompileQueue::print(outputStream* st) {
|
||||
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
|
||||
assert_locked_or_safepoint(MethodCompileQueue_lock);
|
||||
st->print_cr("%s:", name());
|
||||
CompileTask* task = _first;
|
||||
if (task == NULL) {
|
||||
@ -1638,12 +1637,6 @@ bool CompileBroker::init_compiler_runtime() {
|
||||
* out to be a problem.
|
||||
*/
|
||||
void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
|
||||
// Free buffer blob, if allocated
|
||||
if (thread->get_buffer_blob() != NULL) {
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
CodeCache::free(thread->get_buffer_blob());
|
||||
}
|
||||
|
||||
if (comp->should_perform_shutdown()) {
|
||||
// There are two reasons for shutting down the compiler
|
||||
// 1) compiler runtime initialization failed
|
||||
|
@ -253,17 +253,18 @@ void set_jvmci_specific_flags() {
|
||||
if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
|
||||
FLAG_SET_DEFAULT(OnStackReplacePercentage, 933);
|
||||
}
|
||||
// JVMCI needs values not less than defaults
|
||||
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
|
||||
FLAG_SET_DEFAULT(ReservedCodeCacheSize, 64*M);
|
||||
FLAG_SET_DEFAULT(ReservedCodeCacheSize, MAX2(64*M, ReservedCodeCacheSize));
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
|
||||
FLAG_SET_DEFAULT(InitialCodeCacheSize, 16*M);
|
||||
FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize));
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(MetaspaceSize)) {
|
||||
FLAG_SET_DEFAULT(MetaspaceSize, 12*M);
|
||||
FLAG_SET_DEFAULT(MetaspaceSize, MAX2(12*M, MetaspaceSize));
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) {
|
||||
FLAG_SET_DEFAULT(NewSizeThreadIncrease, 4*K);
|
||||
FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease));
|
||||
}
|
||||
if (TieredStopAtLevel != CompLevel_full_optimization) {
|
||||
// Currently JVMCI compiler can only work at the full optimization level
|
||||
|
@ -121,6 +121,11 @@ public:
|
||||
safe_object_iterate(cl);
|
||||
}
|
||||
|
||||
// Object pinning support: every object is implicitly pinned
|
||||
virtual bool supports_object_pinning() const { return true; }
|
||||
virtual oop pin_object(JavaThread* thread, oop obj) { return obj; }
|
||||
virtual void unpin_object(JavaThread* thread, oop obj) { }
|
||||
|
||||
// No support for block parsing.
|
||||
virtual HeapWord* block_start(const void* addr) const { return NULL; }
|
||||
virtual size_t block_size(const HeapWord* addr) const { return 0; }
|
||||
|
@ -1024,11 +1024,17 @@ class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
|
||||
|
||||
uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
|
||||
|
||||
void update_remset_before_rebuild(HeapRegion * hr) {
|
||||
void update_remset_before_rebuild(HeapRegion* hr) {
|
||||
G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
|
||||
|
||||
size_t const live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
|
||||
bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
|
||||
bool selected_for_rebuild;
|
||||
if (hr->is_humongous()) {
|
||||
bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
|
||||
selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
|
||||
} else {
|
||||
size_t const live_bytes = _cm->liveness(hr->hrm_index());
|
||||
selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
|
||||
}
|
||||
if (selected_for_rebuild) {
|
||||
_num_regions_selected_for_rebuild++;
|
||||
}
|
||||
|
@ -29,10 +29,6 @@
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
bool G1RemSetTrackingPolicy::is_interesting_humongous_region(HeapRegion* r) const {
|
||||
return r->is_humongous() && oop(r->humongous_start_region()->bottom())->is_typeArray();
|
||||
}
|
||||
|
||||
bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(HeapRegion* r) const {
|
||||
// All non-free, non-young, non-closed archive regions need to be scanned for references;
|
||||
// At every gc we gather references to other regions in young, and closed archive
|
||||
@ -64,51 +60,81 @@ void G1RemSetTrackingPolicy::update_at_free(HeapRegion* r) {
|
||||
/* nothing to do */
|
||||
}
|
||||
|
||||
bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) {
|
||||
static void print_before_rebuild(HeapRegion* r, bool selected_for_rebuild, size_t total_live_bytes, size_t live_bytes) {
|
||||
log_trace(gc, remset, tracking)("Before rebuild region %u "
|
||||
"(ntams: " PTR_FORMAT ") "
|
||||
"total_live_bytes " SIZE_FORMAT " "
|
||||
"selected %s "
|
||||
"(live_bytes " SIZE_FORMAT " "
|
||||
"next_marked " SIZE_FORMAT " "
|
||||
"marked " SIZE_FORMAT " "
|
||||
"type %s)",
|
||||
r->hrm_index(),
|
||||
p2i(r->next_top_at_mark_start()),
|
||||
total_live_bytes,
|
||||
BOOL_TO_STR(selected_for_rebuild),
|
||||
live_bytes,
|
||||
r->next_marked_bytes(),
|
||||
r->marked_bytes(),
|
||||
r->get_type_str());
|
||||
}
|
||||
|
||||
bool G1RemSetTrackingPolicy::update_humongous_before_rebuild(HeapRegion* r, bool is_live) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(r->is_humongous(), "Region %u should be humongous", r->hrm_index());
|
||||
|
||||
if (r->is_archive()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
|
||||
|
||||
bool selected_for_rebuild = false;
|
||||
// For humongous regions, to be of interest for rebuilding the remembered set the following must apply:
|
||||
// - We always try to update the remembered sets of humongous regions containing
|
||||
// type arrays as they might have been reset after full gc.
|
||||
if (is_live && oop(r->humongous_start_region()->bottom())->is_typeArray() && !r->rem_set()->is_tracked()) {
|
||||
r->rem_set()->set_state_updating();
|
||||
selected_for_rebuild = true;
|
||||
}
|
||||
|
||||
size_t const live_bytes = is_live ? HeapRegion::GrainBytes : 0;
|
||||
print_before_rebuild(r, selected_for_rebuild, live_bytes, live_bytes);
|
||||
|
||||
return selected_for_rebuild;
|
||||
}
|
||||
|
||||
bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(!r->is_humongous(), "Region %u is humongous", r->hrm_index());
|
||||
|
||||
// Only consider updating the remembered set for old gen regions - excluding archive regions
|
||||
// which never move (but are "Old" regions).
|
||||
if (r->is_old_or_humongous() && !r->is_archive()) {
|
||||
size_t between_ntams_and_top = (r->top() - r->next_top_at_mark_start()) * HeapWordSize;
|
||||
size_t total_live_bytes = live_bytes + between_ntams_and_top;
|
||||
// Completely free regions after rebuild are of no interest wrt rebuilding the
|
||||
// remembered set.
|
||||
assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
|
||||
// To be of interest for rebuilding the remembered set the following must apply:
|
||||
// - They must contain some live data in them.
|
||||
// - We always try to update the remembered sets of humongous regions containing
|
||||
// type arrays if they are empty as they might have been reset after full gc.
|
||||
// - Only need to rebuild non-complete remembered sets.
|
||||
// - Otherwise only add those old gen regions which occupancy is low enough that there
|
||||
// is a chance that we will ever evacuate them in the mixed gcs.
|
||||
if ((total_live_bytes > 0) &&
|
||||
(is_interesting_humongous_region(r) || CollectionSetChooser::region_occupancy_low_enough_for_evac(total_live_bytes)) &&
|
||||
!r->rem_set()->is_tracked()) {
|
||||
|
||||
r->rem_set()->set_state_updating();
|
||||
selected_for_rebuild = true;
|
||||
}
|
||||
log_trace(gc, remset, tracking)("Before rebuild region %u "
|
||||
"(ntams: " PTR_FORMAT ") "
|
||||
"total_live_bytes " SIZE_FORMAT " "
|
||||
"selected %s "
|
||||
"(live_bytes " SIZE_FORMAT " "
|
||||
"next_marked " SIZE_FORMAT " "
|
||||
"marked " SIZE_FORMAT " "
|
||||
"type %s)",
|
||||
r->hrm_index(),
|
||||
p2i(r->next_top_at_mark_start()),
|
||||
total_live_bytes,
|
||||
BOOL_TO_STR(selected_for_rebuild),
|
||||
live_bytes,
|
||||
r->next_marked_bytes(),
|
||||
r->marked_bytes(),
|
||||
r->get_type_str());
|
||||
if (!r->is_old() || r->is_archive()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
|
||||
|
||||
size_t between_ntams_and_top = (r->top() - r->next_top_at_mark_start()) * HeapWordSize;
|
||||
size_t total_live_bytes = live_bytes + between_ntams_and_top;
|
||||
|
||||
bool selected_for_rebuild = false;
|
||||
// For old regions, to be of interest for rebuilding the remembered set the following must apply:
|
||||
// - They must contain some live data in them.
|
||||
// - Only need to rebuild non-complete remembered sets.
|
||||
// - Otherwise only add those old gen regions which occupancy is low enough that there
|
||||
// is a chance that we will ever evacuate them in the mixed gcs.
|
||||
if ((total_live_bytes > 0) &&
|
||||
CollectionSetChooser::region_occupancy_low_enough_for_evac(total_live_bytes) &&
|
||||
!r->rem_set()->is_tracked()) {
|
||||
|
||||
r->rem_set()->set_state_updating();
|
||||
selected_for_rebuild = true;
|
||||
}
|
||||
|
||||
print_before_rebuild(r, selected_for_rebuild, total_live_bytes, live_bytes);
|
||||
|
||||
return selected_for_rebuild;
|
||||
}
|
||||
|
||||
@ -149,4 +175,3 @@ void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
|
||||
r->rem_set()->mem_size());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -33,10 +33,6 @@
|
||||
// the remembered set, ie. when it should be tracked, and if/when the remembered
|
||||
// set is complete.
|
||||
class G1RemSetTrackingPolicy : public CHeapObj<mtGC> {
|
||||
private:
|
||||
// Is the given region an interesting humongous region to start remembered set tracking
|
||||
// for?
|
||||
bool is_interesting_humongous_region(HeapRegion* r) const;
|
||||
public:
|
||||
// Do we need to scan the given region to get all outgoing references for remembered
|
||||
// set rebuild?
|
||||
@ -45,6 +41,9 @@ public:
|
||||
// called at any time. The caller makes sure that the changes to the remembered
|
||||
// set state are visible to other threads.
|
||||
void update_at_allocate(HeapRegion* r);
|
||||
// Update remembered set tracking state for humongous regions before we are going to
|
||||
// rebuild remembered sets. Called at safepoint in the remark pause.
|
||||
bool update_humongous_before_rebuild(HeapRegion* r, bool is_live);
|
||||
// Update remembered set tracking state before we are going to rebuild remembered
|
||||
// sets. Called at safepoint in the remark pause.
|
||||
bool update_before_rebuild(HeapRegion* r, size_t live_bytes);
|
||||
|
@ -199,7 +199,7 @@ public:
|
||||
|
||||
template <typename T>
|
||||
static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
|
||||
return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value);
|
||||
return Raw::atomic_cmpxchg_at(new_value, base, offset, compare_value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -45,47 +45,47 @@
|
||||
#include "utilities/ostream.hpp"
|
||||
#include "utilities/spinYield.hpp"
|
||||
|
||||
OopStorage::AllocateEntry::AllocateEntry() : _prev(NULL), _next(NULL) {}
|
||||
OopStorage::AllocationListEntry::AllocationListEntry() : _prev(NULL), _next(NULL) {}
|
||||
|
||||
OopStorage::AllocateEntry::~AllocateEntry() {
|
||||
OopStorage::AllocationListEntry::~AllocationListEntry() {
|
||||
assert(_prev == NULL, "deleting attached block");
|
||||
assert(_next == NULL, "deleting attached block");
|
||||
}
|
||||
|
||||
OopStorage::AllocateList::AllocateList() : _head(NULL), _tail(NULL) {}
|
||||
OopStorage::AllocationList::AllocationList() : _head(NULL), _tail(NULL) {}
|
||||
|
||||
OopStorage::AllocateList::~AllocateList() {
|
||||
OopStorage::AllocationList::~AllocationList() {
|
||||
// ~OopStorage() empties its lists before destroying them.
|
||||
assert(_head == NULL, "deleting non-empty block list");
|
||||
assert(_tail == NULL, "deleting non-empty block list");
|
||||
}
|
||||
|
||||
void OopStorage::AllocateList::push_front(const Block& block) {
|
||||
void OopStorage::AllocationList::push_front(const Block& block) {
|
||||
const Block* old = _head;
|
||||
if (old == NULL) {
|
||||
assert(_tail == NULL, "invariant");
|
||||
_head = _tail = █
|
||||
} else {
|
||||
block.allocate_entry()._next = old;
|
||||
old->allocate_entry()._prev = █
|
||||
block.allocation_list_entry()._next = old;
|
||||
old->allocation_list_entry()._prev = █
|
||||
_head = █
|
||||
}
|
||||
}
|
||||
|
||||
void OopStorage::AllocateList::push_back(const Block& block) {
|
||||
void OopStorage::AllocationList::push_back(const Block& block) {
|
||||
const Block* old = _tail;
|
||||
if (old == NULL) {
|
||||
assert(_head == NULL, "invariant");
|
||||
_head = _tail = █
|
||||
} else {
|
||||
old->allocate_entry()._next = █
|
||||
block.allocate_entry()._prev = old;
|
||||
old->allocation_list_entry()._next = █
|
||||
block.allocation_list_entry()._prev = old;
|
||||
_tail = █
|
||||
}
|
||||
}
|
||||
|
||||
void OopStorage::AllocateList::unlink(const Block& block) {
|
||||
const AllocateEntry& block_entry = block.allocate_entry();
|
||||
void OopStorage::AllocationList::unlink(const Block& block) {
|
||||
const AllocationListEntry& block_entry = block.allocation_list_entry();
|
||||
const Block* prev_blk = block_entry._prev;
|
||||
const Block* next_blk = block_entry._next;
|
||||
block_entry._prev = NULL;
|
||||
@ -96,15 +96,15 @@ void OopStorage::AllocateList::unlink(const Block& block) {
|
||||
_head = _tail = NULL;
|
||||
} else if (prev_blk == NULL) {
|
||||
assert(_head == &block, "invariant");
|
||||
next_blk->allocate_entry()._prev = NULL;
|
||||
next_blk->allocation_list_entry()._prev = NULL;
|
||||
_head = next_blk;
|
||||
} else if (next_blk == NULL) {
|
||||
assert(_tail == &block, "invariant");
|
||||
prev_blk->allocate_entry()._next = NULL;
|
||||
prev_blk->allocation_list_entry()._next = NULL;
|
||||
_tail = prev_blk;
|
||||
} else {
|
||||
next_blk->allocate_entry()._prev = prev_blk;
|
||||
prev_blk->allocate_entry()._next = next_blk;
|
||||
next_blk->allocation_list_entry()._prev = prev_blk;
|
||||
prev_blk->allocation_list_entry()._next = next_blk;
|
||||
}
|
||||
}
|
||||
|
||||
@ -210,7 +210,7 @@ OopStorage::Block::Block(const OopStorage* owner, void* memory) :
|
||||
_owner(owner),
|
||||
_memory(memory),
|
||||
_active_index(0),
|
||||
_allocate_entry(),
|
||||
_allocation_list_entry(),
|
||||
_deferred_updates_next(NULL),
|
||||
_release_refcount(0)
|
||||
{
|
||||
@ -367,65 +367,65 @@ OopStorage::Block::block_for_ptr(const OopStorage* owner, const oop* ptr) {
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Allocation
|
||||
//
|
||||
// Allocation involves the _allocate_list, which contains a subset of the
|
||||
// Allocation involves the _allocation_list, which contains a subset of the
|
||||
// blocks owned by a storage object. This is a doubly-linked list, linked
|
||||
// through dedicated fields in the blocks. Full blocks are removed from this
|
||||
// list, though they are still present in the _active_array. Empty blocks are
|
||||
// kept at the end of the _allocate_list, to make it easy for empty block
|
||||
// kept at the end of the _allocation_list, to make it easy for empty block
|
||||
// deletion to find them.
|
||||
//
|
||||
// allocate(), and delete_empty_blocks_concurrent() lock the
|
||||
// _allocate_mutex while performing any list and array modifications.
|
||||
// _allocation_mutex while performing any list and array modifications.
|
||||
//
|
||||
// allocate() and release() update a block's _allocated_bitmask using CAS
|
||||
// loops. This prevents loss of updates even though release() performs
|
||||
// its updates without any locking.
|
||||
//
|
||||
// allocate() obtains the entry from the first block in the _allocate_list,
|
||||
// allocate() obtains the entry from the first block in the _allocation_list,
|
||||
// and updates that block's _allocated_bitmask to indicate the entry is in
|
||||
// use. If this makes the block full (all entries in use), the block is
|
||||
// removed from the _allocate_list so it won't be considered by future
|
||||
// removed from the _allocation_list so it won't be considered by future
|
||||
// allocations until some entries in it are released.
|
||||
//
|
||||
// release() is performed lock-free. release() first looks up the block for
|
||||
// the entry, using address alignment to find the enclosing block (thereby
|
||||
// avoiding iteration over the _active_array). Once the block has been
|
||||
// determined, its _allocated_bitmask needs to be updated, and its position in
|
||||
// the _allocate_list may need to be updated. There are two cases:
|
||||
// the _allocation_list may need to be updated. There are two cases:
|
||||
//
|
||||
// (a) If the block is neither full nor would become empty with the release of
|
||||
// the entry, only its _allocated_bitmask needs to be updated. But if the CAS
|
||||
// update fails, the applicable case may change for the retry.
|
||||
//
|
||||
// (b) Otherwise, the _allocate_list also needs to be modified. This requires
|
||||
// locking the _allocate_mutex. To keep the release() operation lock-free,
|
||||
// rather than updating the _allocate_list itself, it instead performs a
|
||||
// (b) Otherwise, the _allocation_list also needs to be modified. This requires
|
||||
// locking the _allocation_mutex. To keep the release() operation lock-free,
|
||||
// rather than updating the _allocation_list itself, it instead performs a
|
||||
// lock-free push of the block onto the _deferred_updates list. Entries on
|
||||
// that list are processed by allocate() and delete_empty_blocks_XXX(), while
|
||||
// they already hold the necessary lock. That processing makes the block's
|
||||
// list state consistent with its current _allocated_bitmask. The block is
|
||||
// added to the _allocate_list if not already present and the bitmask is not
|
||||
// full. The block is moved to the end of the _allocated_list if the bitmask
|
||||
// added to the _allocation_list if not already present and the bitmask is not
|
||||
// full. The block is moved to the end of the _allocation_list if the bitmask
|
||||
// is empty, for ease of empty block deletion processing.
|
||||
|
||||
oop* OopStorage::allocate() {
|
||||
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
|
||||
MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
|
||||
// Do some deferred update processing every time we allocate.
|
||||
// Continue processing deferred updates if _allocate_list is empty,
|
||||
// Continue processing deferred updates if _allocation_list is empty,
|
||||
// in the hope that we'll get a block from that, rather than
|
||||
// allocating a new block.
|
||||
while (reduce_deferred_updates() && (_allocate_list.head() == NULL)) {}
|
||||
while (reduce_deferred_updates() && (_allocation_list.head() == NULL)) {}
|
||||
|
||||
// Use the first block in _allocate_list for the allocation.
|
||||
Block* block = _allocate_list.head();
|
||||
// Use the first block in _allocation_list for the allocation.
|
||||
Block* block = _allocation_list.head();
|
||||
if (block == NULL) {
|
||||
// No available blocks; make a new one, and add to storage.
|
||||
{
|
||||
MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
|
||||
MutexUnlockerEx mul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
|
||||
block = Block::new_block(this);
|
||||
}
|
||||
if (block == NULL) {
|
||||
while (_allocate_list.head() == NULL) {
|
||||
while (_allocation_list.head() == NULL) {
|
||||
if (!reduce_deferred_updates()) {
|
||||
// Failed to make new block, no other thread made a block
|
||||
// available while the mutex was released, and didn't get
|
||||
@ -448,13 +448,13 @@ oop* OopStorage::allocate() {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
// Add to end of _allocate_list. The mutex release allowed
|
||||
// other threads to add blocks to the _allocate_list. We prefer
|
||||
// Add to end of _allocation_list. The mutex release allowed
|
||||
// other threads to add blocks to the _allocation_list. We prefer
|
||||
// to allocate from non-empty blocks, to allow empty blocks to
|
||||
// be deleted.
|
||||
_allocate_list.push_back(*block);
|
||||
_allocation_list.push_back(*block);
|
||||
}
|
||||
block = _allocate_list.head();
|
||||
block = _allocation_list.head();
|
||||
}
|
||||
// Allocate from first block.
|
||||
assert(block != NULL, "invariant");
|
||||
@ -471,7 +471,7 @@ oop* OopStorage::allocate() {
|
||||
// Transitioning from not full to full.
|
||||
// Remove full blocks from consideration by future allocates.
|
||||
log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block));
|
||||
_allocate_list.unlink(*block);
|
||||
_allocation_list.unlink(*block);
|
||||
}
|
||||
log_info(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result));
|
||||
return result;
|
||||
@ -482,7 +482,7 @@ oop* OopStorage::allocate() {
|
||||
// Return true if the array was successfully expanded, false to
|
||||
// indicate allocation failure.
|
||||
bool OopStorage::expand_active_array() {
|
||||
assert_lock_strong(_allocate_mutex);
|
||||
assert_lock_strong(_allocation_mutex);
|
||||
ActiveArray* old_array = _active_array;
|
||||
size_t new_size = 2 * old_array->size();
|
||||
log_info(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
|
||||
@ -632,7 +632,7 @@ void OopStorage::Block::release_entries(uintx releasing, Block* volatile* deferr
|
||||
// (updated bitmask is empty or old bitmask was full), atomically push
|
||||
// this block onto the deferred updates list. Some future call to
|
||||
// reduce_deferred_updates will make any needed changes related to this
|
||||
// block and _allocate_list. This deferral avoids list updates and the
|
||||
// block and _allocation_list. This deferral avoids list updates and the
|
||||
// associated locking here.
|
||||
if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) {
|
||||
// Log transitions. Both transitions are possible in a single update.
|
||||
@ -663,7 +663,7 @@ void OopStorage::Block::release_entries(uintx releasing, Block* volatile* deferr
|
||||
|
||||
// Process one available deferred update. Returns true if one was processed.
|
||||
bool OopStorage::reduce_deferred_updates() {
|
||||
assert_locked_or_safepoint(_allocate_mutex);
|
||||
assert_locked_or_safepoint(_allocation_mutex);
|
||||
// Atomically pop a block off the list, if any available.
|
||||
// No ABA issue because this is only called by one thread at a time.
|
||||
// The atomicity is wrto pushes by release().
|
||||
@ -687,20 +687,20 @@ bool OopStorage::reduce_deferred_updates() {
|
||||
uintx allocated = block->allocated_bitmask();
|
||||
|
||||
// Make membership in list consistent with bitmask state.
|
||||
if ((_allocate_list.ctail() != NULL) &&
|
||||
((_allocate_list.ctail() == block) ||
|
||||
(_allocate_list.next(*block) != NULL))) {
|
||||
// Block is in the allocate list.
|
||||
if ((_allocation_list.ctail() != NULL) &&
|
||||
((_allocation_list.ctail() == block) ||
|
||||
(_allocation_list.next(*block) != NULL))) {
|
||||
// Block is in the _allocation_list.
|
||||
assert(!is_full_bitmask(allocated), "invariant");
|
||||
} else if (!is_full_bitmask(allocated)) {
|
||||
// Block is not in the allocate list, but now should be.
|
||||
_allocate_list.push_front(*block);
|
||||
// Block is not in the _allocation_list, but now should be.
|
||||
_allocation_list.push_front(*block);
|
||||
} // Else block is full and not in list, which is correct.
|
||||
|
||||
// Move empty block to end of list, for possible deletion.
|
||||
if (is_empty_bitmask(allocated)) {
|
||||
_allocate_list.unlink(*block);
|
||||
_allocate_list.push_back(*block);
|
||||
_allocation_list.unlink(*block);
|
||||
_allocation_list.push_back(*block);
|
||||
}
|
||||
|
||||
log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
|
||||
@ -759,24 +759,24 @@ const char* dup_name(const char* name) {
|
||||
const size_t initial_active_array_size = 8;
|
||||
|
||||
OopStorage::OopStorage(const char* name,
|
||||
Mutex* allocate_mutex,
|
||||
Mutex* allocation_mutex,
|
||||
Mutex* active_mutex) :
|
||||
_name(dup_name(name)),
|
||||
_active_array(ActiveArray::create(initial_active_array_size)),
|
||||
_allocate_list(),
|
||||
_allocation_list(),
|
||||
_deferred_updates(NULL),
|
||||
_allocate_mutex(allocate_mutex),
|
||||
_allocation_mutex(allocation_mutex),
|
||||
_active_mutex(active_mutex),
|
||||
_allocation_count(0),
|
||||
_concurrent_iteration_active(false)
|
||||
{
|
||||
_active_array->increment_refcount();
|
||||
assert(_active_mutex->rank() < _allocate_mutex->rank(),
|
||||
"%s: active_mutex must have lower rank than allocate_mutex", _name);
|
||||
assert(_active_mutex->rank() < _allocation_mutex->rank(),
|
||||
"%s: active_mutex must have lower rank than allocation_mutex", _name);
|
||||
assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
|
||||
"%s: active mutex requires safepoint check", _name);
|
||||
assert(_allocate_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
|
||||
"%s: allocate mutex requires safepoint check", _name);
|
||||
assert(_allocation_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
|
||||
"%s: allocation mutex requires safepoint check", _name);
|
||||
}
|
||||
|
||||
void OopStorage::delete_empty_block(const Block& block) {
|
||||
@ -791,8 +791,8 @@ OopStorage::~OopStorage() {
|
||||
_deferred_updates = block->deferred_updates_next();
|
||||
block->set_deferred_updates_next(NULL);
|
||||
}
|
||||
while ((block = _allocate_list.head()) != NULL) {
|
||||
_allocate_list.unlink(*block);
|
||||
while ((block = _allocation_list.head()) != NULL) {
|
||||
_allocation_list.unlink(*block);
|
||||
}
|
||||
bool unreferenced = _active_array->decrement_refcount();
|
||||
assert(unreferenced, "deleting storage while _active_array is referenced");
|
||||
@ -811,18 +811,18 @@ void OopStorage::delete_empty_blocks_safepoint() {
|
||||
while (reduce_deferred_updates()) {}
|
||||
// Don't interfere with a concurrent iteration.
|
||||
if (_concurrent_iteration_active) return;
|
||||
// Delete empty (and otherwise deletable) blocks from end of _allocate_list.
|
||||
for (Block* block = _allocate_list.tail();
|
||||
// Delete empty (and otherwise deletable) blocks from end of _allocation_list.
|
||||
for (Block* block = _allocation_list.tail();
|
||||
(block != NULL) && block->is_deletable();
|
||||
block = _allocate_list.tail()) {
|
||||
block = _allocation_list.tail()) {
|
||||
_active_array->remove(block);
|
||||
_allocate_list.unlink(*block);
|
||||
_allocation_list.unlink(*block);
|
||||
delete_empty_block(*block);
|
||||
}
|
||||
}
|
||||
|
||||
void OopStorage::delete_empty_blocks_concurrent() {
|
||||
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
|
||||
MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
|
||||
// Other threads could be adding to the empty block count while we
|
||||
// release the mutex across the block deletions. Set an upper bound
|
||||
// on how many blocks we'll try to release, so other threads can't
|
||||
@ -834,7 +834,7 @@ void OopStorage::delete_empty_blocks_concurrent() {
|
||||
// lock. But limit number processed to limit lock duration.
|
||||
reduce_deferred_updates();
|
||||
|
||||
Block* block = _allocate_list.tail();
|
||||
Block* block = _allocation_list.tail();
|
||||
if ((block == NULL) || !block->is_deletable()) {
|
||||
// No block to delete, so done. There could be more pending
|
||||
// deferred updates that could give us more work to do; deal with
|
||||
@ -848,10 +848,10 @@ void OopStorage::delete_empty_blocks_concurrent() {
|
||||
if (_concurrent_iteration_active) return;
|
||||
_active_array->remove(block);
|
||||
}
|
||||
// Remove block from _allocate_list and delete it.
|
||||
_allocate_list.unlink(*block);
|
||||
// Remove block from _allocation_list and delete it.
|
||||
_allocation_list.unlink(*block);
|
||||
// Release mutex while deleting block.
|
||||
MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
|
||||
MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
|
||||
delete_empty_block(*block);
|
||||
}
|
||||
}
|
||||
@ -860,7 +860,7 @@ OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
|
||||
const Block* block = find_block_or_null(ptr);
|
||||
if (block != NULL) {
|
||||
// Prevent block deletion and _active_array modification.
|
||||
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
|
||||
MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
|
||||
// Block could be a false positive, so get index carefully.
|
||||
size_t index = Block::active_index_safe(block);
|
||||
if ((index < _active_array->block_count()) &&
|
||||
|
@ -73,7 +73,7 @@ class outputStream;
|
||||
|
||||
class OopStorage : public CHeapObj<mtGC> {
|
||||
public:
|
||||
OopStorage(const char* name, Mutex* allocate_mutex, Mutex* active_mutex);
|
||||
OopStorage(const char* name, Mutex* allocation_mutex, Mutex* active_mutex);
|
||||
~OopStorage();
|
||||
|
||||
// These count and usage accessors are racy unless at a safepoint.
|
||||
@ -94,12 +94,12 @@ public:
|
||||
ALLOCATED_ENTRY
|
||||
};
|
||||
|
||||
// Locks _allocate_mutex.
|
||||
// Locks _allocation_mutex.
|
||||
// precondition: ptr != NULL.
|
||||
EntryStatus allocation_status(const oop* ptr) const;
|
||||
|
||||
// Allocates and returns a new entry. Returns NULL if memory allocation
|
||||
// failed. Locks _allocate_mutex.
|
||||
// failed. Locks _allocation_mutex.
|
||||
// postcondition: *result == NULL.
|
||||
oop* allocate();
|
||||
|
||||
@ -152,7 +152,7 @@ public:
|
||||
|
||||
// Block cleanup functions are for the exclusive use of the GC.
|
||||
// Both stop deleting if there is an in-progress concurrent iteration.
|
||||
// Concurrent deletion locks both the allocate_mutex and the active_mutex.
|
||||
// Concurrent deletion locks both the _allocation_mutex and the _active_mutex.
|
||||
void delete_empty_blocks_safepoint();
|
||||
void delete_empty_blocks_concurrent();
|
||||
|
||||
@ -172,20 +172,20 @@ public:
|
||||
NOT_AIX( private: )
|
||||
class Block; // Fixed-size array of oops, plus bookkeeping.
|
||||
class ActiveArray; // Array of Blocks, plus bookkeeping.
|
||||
class AllocateEntry; // Provides AllocateList links in a Block.
|
||||
class AllocationListEntry; // Provides AllocationList links in a Block.
|
||||
|
||||
// Doubly-linked list of Blocks.
|
||||
class AllocateList {
|
||||
class AllocationList {
|
||||
const Block* _head;
|
||||
const Block* _tail;
|
||||
|
||||
// Noncopyable.
|
||||
AllocateList(const AllocateList&);
|
||||
AllocateList& operator=(const AllocateList&);
|
||||
AllocationList(const AllocationList&);
|
||||
AllocationList& operator=(const AllocationList&);
|
||||
|
||||
public:
|
||||
AllocateList();
|
||||
~AllocateList();
|
||||
AllocationList();
|
||||
~AllocationList();
|
||||
|
||||
Block* head();
|
||||
Block* tail();
|
||||
@ -219,10 +219,10 @@ NOT_AIX( private: )
|
||||
private:
|
||||
const char* _name;
|
||||
ActiveArray* _active_array;
|
||||
AllocateList _allocate_list;
|
||||
AllocationList _allocation_list;
|
||||
Block* volatile _deferred_updates;
|
||||
|
||||
Mutex* _allocate_mutex;
|
||||
Mutex* _allocation_mutex;
|
||||
Mutex* _active_mutex;
|
||||
|
||||
// Volatile for racy unlocked accesses.
|
||||
|
@ -107,10 +107,10 @@ inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const {
|
||||
return *block_ptr(index);
|
||||
}
|
||||
|
||||
// A Block has an embedded AllocateEntry to provide the links between
|
||||
// Blocks in a AllocateList.
|
||||
class OopStorage::AllocateEntry {
|
||||
friend class OopStorage::AllocateList;
|
||||
// A Block has an embedded AllocationListEntry to provide the links between
|
||||
// Blocks in an AllocationList.
|
||||
class OopStorage::AllocationListEntry {
|
||||
friend class OopStorage::AllocationList;
|
||||
|
||||
// Members are mutable, and we deal exclusively with pointers to
|
||||
// const, to make const blocks easier to use; a block being const
|
||||
@ -119,18 +119,18 @@ class OopStorage::AllocateEntry {
|
||||
mutable const Block* _next;
|
||||
|
||||
// Noncopyable.
|
||||
AllocateEntry(const AllocateEntry&);
|
||||
AllocateEntry& operator=(const AllocateEntry&);
|
||||
AllocationListEntry(const AllocationListEntry&);
|
||||
AllocationListEntry& operator=(const AllocationListEntry&);
|
||||
|
||||
public:
|
||||
AllocateEntry();
|
||||
~AllocateEntry();
|
||||
AllocationListEntry();
|
||||
~AllocationListEntry();
|
||||
};
|
||||
|
||||
// Fixed-sized array of oops, plus bookkeeping data.
|
||||
// All blocks are in the storage's _active_array, at the block's _active_index.
|
||||
// Non-full blocks are in the storage's _allocate_list, linked through the
|
||||
// block's _allocate_entry. Empty blocks are at the end of that list.
|
||||
// Non-full blocks are in the storage's _allocation_list, linked through the
|
||||
// block's _allocation_list_entry. Empty blocks are at the end of that list.
|
||||
class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
|
||||
// _data must be the first non-static data member, for alignment.
|
||||
oop _data[BitsPerWord];
|
||||
@ -140,7 +140,7 @@ class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
|
||||
const OopStorage* _owner;
|
||||
void* _memory; // Unaligned storage containing block.
|
||||
size_t _active_index;
|
||||
AllocateEntry _allocate_entry;
|
||||
AllocationListEntry _allocation_list_entry;
|
||||
Block* volatile _deferred_updates_next;
|
||||
volatile uintx _release_refcount;
|
||||
|
||||
@ -158,7 +158,7 @@ class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
|
||||
Block& operator=(const Block&);
|
||||
|
||||
public:
|
||||
const AllocateEntry& allocate_entry() const;
|
||||
const AllocationListEntry& allocation_list_entry() const;
|
||||
|
||||
static size_t allocation_size();
|
||||
static size_t allocation_alignment_shift();
|
||||
@ -197,36 +197,36 @@ public:
|
||||
template<typename F> bool iterate(F f) const;
|
||||
}; // class Block
|
||||
|
||||
inline OopStorage::Block* OopStorage::AllocateList::head() {
|
||||
inline OopStorage::Block* OopStorage::AllocationList::head() {
|
||||
return const_cast<Block*>(_head);
|
||||
}
|
||||
|
||||
inline OopStorage::Block* OopStorage::AllocateList::tail() {
|
||||
inline OopStorage::Block* OopStorage::AllocationList::tail() {
|
||||
return const_cast<Block*>(_tail);
|
||||
}
|
||||
|
||||
inline const OopStorage::Block* OopStorage::AllocateList::chead() const {
|
||||
inline const OopStorage::Block* OopStorage::AllocationList::chead() const {
|
||||
return _head;
|
||||
}
|
||||
|
||||
inline const OopStorage::Block* OopStorage::AllocateList::ctail() const {
|
||||
inline const OopStorage::Block* OopStorage::AllocationList::ctail() const {
|
||||
return _tail;
|
||||
}
|
||||
|
||||
inline OopStorage::Block* OopStorage::AllocateList::prev(Block& block) {
|
||||
return const_cast<Block*>(block.allocate_entry()._prev);
|
||||
inline OopStorage::Block* OopStorage::AllocationList::prev(Block& block) {
|
||||
return const_cast<Block*>(block.allocation_list_entry()._prev);
|
||||
}
|
||||
|
||||
inline OopStorage::Block* OopStorage::AllocateList::next(Block& block) {
|
||||
return const_cast<Block*>(block.allocate_entry()._next);
|
||||
inline OopStorage::Block* OopStorage::AllocationList::next(Block& block) {
|
||||
return const_cast<Block*>(block.allocation_list_entry()._next);
|
||||
}
|
||||
|
||||
inline const OopStorage::Block* OopStorage::AllocateList::prev(const Block& block) const {
|
||||
return block.allocate_entry()._prev;
|
||||
inline const OopStorage::Block* OopStorage::AllocationList::prev(const Block& block) const {
|
||||
return block.allocation_list_entry()._prev;
|
||||
}
|
||||
|
||||
inline const OopStorage::Block* OopStorage::AllocateList::next(const Block& block) const {
|
||||
return block.allocate_entry()._next;
|
||||
inline const OopStorage::Block* OopStorage::AllocationList::next(const Block& block) const {
|
||||
return block.allocation_list_entry()._next;
|
||||
}
|
||||
|
||||
template<typename Closure>
|
||||
@ -298,8 +298,8 @@ inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) {
|
||||
|
||||
// Inline Block accesses for use in iteration loops.
|
||||
|
||||
inline const OopStorage::AllocateEntry& OopStorage::Block::allocate_entry() const {
|
||||
return _allocate_entry;
|
||||
inline const OopStorage::AllocationListEntry& OopStorage::Block::allocation_list_entry() const {
|
||||
return _allocation_list_entry;
|
||||
}
|
||||
|
||||
inline void OopStorage::Block::check_index(unsigned index) const {
|
||||
|
@ -52,7 +52,7 @@
|
||||
// interfering with with each other.
|
||||
//
|
||||
// Both allocate() and delete_empty_blocks_concurrent() lock the
|
||||
// _allocate_mutex while performing their respective list and array
|
||||
// _allocation_mutex while performing their respective list and array
|
||||
// manipulations, preventing them from interfering with each other.
|
||||
//
|
||||
// When allocate() creates a new block, it is added to the end of the
|
||||
|
@ -1033,7 +1033,7 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
|
||||
// The last ref must have its discovered field pointing to itself.
|
||||
oop next_discovered = (current_head != NULL) ? current_head : obj;
|
||||
|
||||
oop retest = RawAccess<>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL));
|
||||
oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL));
|
||||
|
||||
if (retest == NULL) {
|
||||
// This thread just won the right to enqueue the object.
|
||||
|
@ -80,8 +80,7 @@ static const char* ReferenceTypeNames[REF_PHANTOM + 1] = {
|
||||
STATIC_ASSERT((REF_PHANTOM + 1) == ARRAY_SIZE(ReferenceTypeNames));
|
||||
|
||||
static const char* phase_enum_2_phase_string(ReferenceProcessor::RefProcPhases phase) {
|
||||
assert(phase >= ReferenceProcessor::RefPhase1 && phase <= ReferenceProcessor::RefPhaseMax,
|
||||
"Invalid reference processing phase (%d)", phase);
|
||||
ASSERT_PHASE(phase);
|
||||
return PhaseNames[phase];
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
|
||||
#define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
|
||||
|
||||
#include "gc/shared/blockOffsetTable.inline.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/generation.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
|
@ -61,10 +61,11 @@ public:
|
||||
public:
|
||||
inline TaskQueueStats() { reset(); }
|
||||
|
||||
inline void record_push() { ++_stats[push]; }
|
||||
inline void record_pop() { ++_stats[pop]; }
|
||||
inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
|
||||
inline void record_steal(bool success);
|
||||
inline void record_push() { ++_stats[push]; }
|
||||
inline void record_pop() { ++_stats[pop]; }
|
||||
inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
|
||||
inline void record_steal_attempt() { ++_stats[steal_attempt]; }
|
||||
inline void record_steal() { ++_stats[steal]; }
|
||||
inline void record_overflow(size_t new_length);
|
||||
|
||||
TaskQueueStats & operator +=(const TaskQueueStats & addend);
|
||||
@ -87,11 +88,6 @@ private:
|
||||
static const char * const _names[last_stat_id];
|
||||
};
|
||||
|
||||
void TaskQueueStats::record_steal(bool success) {
|
||||
++_stats[steal_attempt];
|
||||
if (success) ++_stats[steal];
|
||||
}
|
||||
|
||||
void TaskQueueStats::record_overflow(size_t new_len) {
|
||||
++_stats[overflow];
|
||||
if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
|
||||
@ -364,18 +360,19 @@ template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public Ta
|
||||
|
||||
template<class T, MEMFLAGS F>
|
||||
class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> {
|
||||
public:
|
||||
typedef typename T::element_type E;
|
||||
|
||||
private:
|
||||
uint _n;
|
||||
T** _queues;
|
||||
|
||||
public:
|
||||
typedef typename T::element_type E;
|
||||
bool steal_best_of_2(uint queue_num, int* seed, E& t);
|
||||
|
||||
public:
|
||||
GenericTaskQueueSet(int n);
|
||||
~GenericTaskQueueSet();
|
||||
|
||||
bool steal_best_of_2(uint queue_num, int* seed, E& t);
|
||||
|
||||
void register_queue(uint i, T* q);
|
||||
|
||||
T* queue(uint n);
|
||||
|
@ -252,12 +252,12 @@ GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
|
||||
template<class T, MEMFLAGS F> bool
|
||||
GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
|
||||
for (uint i = 0; i < 2 * _n; i++) {
|
||||
TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal_attempt());
|
||||
if (steal_best_of_2(queue_num, seed, t)) {
|
||||
TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
|
||||
TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ bool ZDirector::rule_warmup() const {
|
||||
// Perform GC if heap usage passes 10/20/30% and no other GC has been
|
||||
// performed yet. This allows us to get some early samples of the GC
|
||||
// duration, which is needed by the other rules.
|
||||
const size_t max_capacity = ZHeap::heap()->max_capacity();
|
||||
const size_t max_capacity = ZHeap::heap()->current_max_capacity();
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const double used_threshold_percent = (ZStatCycle::ncycles() + 1) * 0.1;
|
||||
const size_t used_threshold = max_capacity * used_threshold_percent;
|
||||
@ -107,7 +107,7 @@ bool ZDirector::rule_allocation_rate() const {
|
||||
// Calculate amount of free memory available to Java threads. Note that
|
||||
// the heap reserve is not available to Java threads and is therefore not
|
||||
// considered part of the free memory.
|
||||
const size_t max_capacity = ZHeap::heap()->max_capacity();
|
||||
const size_t max_capacity = ZHeap::heap()->current_max_capacity();
|
||||
const size_t max_reserve = ZHeap::heap()->max_reserve();
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const size_t free_with_reserve = max_capacity - used;
|
||||
@ -155,7 +155,7 @@ bool ZDirector::rule_proactive() const {
|
||||
// passed since the previous GC. This helps avoid superfluous GCs when running
|
||||
// applications with very low allocation rate.
|
||||
const size_t used_after_last_gc = ZStatHeap::used_at_relocate_end();
|
||||
const size_t used_increase_threshold = ZHeap::heap()->max_capacity() * 0.10; // 10%
|
||||
const size_t used_increase_threshold = ZHeap::heap()->current_max_capacity() * 0.10; // 10%
|
||||
const size_t used_threshold = used_after_last_gc + used_increase_threshold;
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const double time_since_last_gc = ZStatCycle::time_since_last();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -21,6 +21,38 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is available under and governed by the GNU General Public
|
||||
* License version 2 only, as published by the Free Software Foundation.
|
||||
* However, the following notice accompanied the original version of this
|
||||
* file:
|
||||
*
|
||||
* (C) 2009 by Remo Dentato (rdentato@gmail.com)
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* http://opensource.org/licenses/bsd-license.php
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZHASH_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZHASH_INLINE_HPP
|
||||
|
||||
|
@ -107,6 +107,10 @@ size_t ZHeap::max_capacity() const {
|
||||
return _page_allocator.max_capacity();
|
||||
}
|
||||
|
||||
size_t ZHeap::current_max_capacity() const {
|
||||
return _page_allocator.current_max_capacity();
|
||||
}
|
||||
|
||||
size_t ZHeap::capacity() const {
|
||||
return _page_allocator.capacity();
|
||||
}
|
||||
|
@ -79,6 +79,7 @@ public:
|
||||
// Heap metrics
|
||||
size_t min_capacity() const;
|
||||
size_t max_capacity() const;
|
||||
size_t current_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
size_t max_reserve() const;
|
||||
size_t used_high() const;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -84,11 +84,12 @@ public:
|
||||
ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
|
||||
|
||||
ZPageAllocator::ZPageAllocator(size_t min_capacity, size_t max_capacity, size_t max_reserve) :
|
||||
_lock(),
|
||||
_virtual(),
|
||||
_physical(max_capacity, ZPageSizeMin),
|
||||
_cache(),
|
||||
_pre_mapped(_virtual, _physical, min_capacity),
|
||||
_max_reserve(max_reserve),
|
||||
_pre_mapped(_virtual, _physical, try_ensure_unused_for_pre_mapped(min_capacity)),
|
||||
_used_high(0),
|
||||
_used_low(0),
|
||||
_used(0),
|
||||
@ -107,6 +108,10 @@ size_t ZPageAllocator::max_capacity() const {
|
||||
return _physical.max_capacity();
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::current_max_capacity() const {
|
||||
return _physical.current_max_capacity();
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::capacity() const {
|
||||
return _physical.capacity();
|
||||
}
|
||||
@ -169,18 +174,43 @@ void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::available(ZAllocationFlags flags) const {
|
||||
size_t available = max_capacity() - used();
|
||||
assert(_physical.available() + _pre_mapped.available() + _cache.available() == available, "Should be equal");
|
||||
size_t ZPageAllocator::max_available(bool no_reserve) const {
|
||||
size_t available = current_max_capacity() - used();
|
||||
|
||||
if (flags.no_reserve()) {
|
||||
// The memory reserve should not be considered free
|
||||
if (no_reserve) {
|
||||
// The reserve should not be considered available
|
||||
available -= MIN2(available, max_reserve());
|
||||
}
|
||||
|
||||
return available;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::try_ensure_unused(size_t size, bool no_reserve) {
|
||||
// Ensure that we always have space available for the reserve. This
|
||||
// is needed to avoid losing the reserve because of failure to map
|
||||
// more memory before reaching max capacity.
|
||||
_physical.try_ensure_unused_capacity(size + max_reserve());
|
||||
|
||||
size_t unused = _physical.unused_capacity();
|
||||
|
||||
if (no_reserve) {
|
||||
// The reserve should not be considered unused
|
||||
unused -= MIN2(unused, max_reserve());
|
||||
}
|
||||
|
||||
return MIN2(size, unused);
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::try_ensure_unused_for_pre_mapped(size_t size) {
|
||||
// This function is called during construction, where the
|
||||
// physical memory manager might have failed to initialied.
|
||||
if (!_physical.is_initialized()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return try_ensure_unused(size, true /* no_reserve */);
|
||||
}
|
||||
|
||||
ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
|
||||
// Allocate physical memory
|
||||
const ZPhysicalMemory pmem = _physical.alloc(size);
|
||||
@ -259,8 +289,8 @@ void ZPageAllocator::check_out_of_memory_during_initialization() {
|
||||
}
|
||||
|
||||
ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZAllocationFlags flags) {
|
||||
const size_t available_total = available(flags);
|
||||
if (available_total < size) {
|
||||
const size_t max = max_available(flags.no_reserve());
|
||||
if (max < size) {
|
||||
// Not enough free memory
|
||||
return NULL;
|
||||
}
|
||||
@ -281,11 +311,11 @@ ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZAlloc
|
||||
// subsequent allocations can use the physical memory.
|
||||
flush_pre_mapped();
|
||||
|
||||
// Check if physical memory is available
|
||||
const size_t available_physical = _physical.available();
|
||||
if (available_physical < size) {
|
||||
// Try ensure that physical memory is available
|
||||
const size_t unused = try_ensure_unused(size, flags.no_reserve());
|
||||
if (unused < size) {
|
||||
// Flush cache to free up more physical memory
|
||||
flush_cache(size - available_physical);
|
||||
flush_cache(size - unused);
|
||||
}
|
||||
|
||||
// Create new page and allocate physical memory
|
||||
@ -303,7 +333,7 @@ ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationF
|
||||
increase_used(size, flags.relocation());
|
||||
|
||||
// Send trace event
|
||||
ZTracer::tracer()->report_page_alloc(size, used(), available(flags), _cache.available(), flags);
|
||||
ZTracer::tracer()->report_page_alloc(size, used(), max_available(flags.no_reserve()), _cache.available(), flags);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,8 +43,8 @@ private:
|
||||
ZVirtualMemoryManager _virtual;
|
||||
ZPhysicalMemoryManager _physical;
|
||||
ZPageCache _cache;
|
||||
ZPreMappedMemory _pre_mapped;
|
||||
const size_t _max_reserve;
|
||||
ZPreMappedMemory _pre_mapped;
|
||||
size_t _used_high;
|
||||
size_t _used_low;
|
||||
size_t _used;
|
||||
@ -58,7 +58,9 @@ private:
|
||||
void increase_used(size_t size, bool relocation);
|
||||
void decrease_used(size_t size, bool reclaimed);
|
||||
|
||||
size_t available(ZAllocationFlags flags) const;
|
||||
size_t max_available(bool no_reserve) const;
|
||||
size_t try_ensure_unused(size_t size, bool no_reserve);
|
||||
size_t try_ensure_unused_for_pre_mapped(size_t size);
|
||||
|
||||
ZPage* create_page(uint8_t type, size_t size);
|
||||
void map_page(ZPage* page);
|
||||
@ -83,6 +85,7 @@ public:
|
||||
bool is_initialized() const;
|
||||
|
||||
size_t max_capacity() const;
|
||||
size_t current_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
size_t max_reserve() const;
|
||||
size_t used_high() const;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory() :
|
||||
_nsegments(0),
|
||||
@ -93,6 +94,7 @@ void ZPhysicalMemory::clear() {
|
||||
ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity, size_t granule_size) :
|
||||
_backing(max_capacity, granule_size),
|
||||
_max_capacity(max_capacity),
|
||||
_current_max_capacity(max_capacity),
|
||||
_capacity(0),
|
||||
_used(0) {}
|
||||
|
||||
@ -100,31 +102,34 @@ bool ZPhysicalMemoryManager::is_initialized() const {
|
||||
return _backing.is_initialized();
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::ensure_available(size_t size) {
|
||||
const size_t unused_capacity = _capacity - _used;
|
||||
if (unused_capacity >= size) {
|
||||
// Enough unused capacity available
|
||||
return true;
|
||||
void ZPhysicalMemoryManager::try_ensure_unused_capacity(size_t size) {
|
||||
const size_t unused = unused_capacity();
|
||||
if (unused >= size) {
|
||||
// Don't try to expand, enough unused capacity available
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t expand_with = size - unused_capacity;
|
||||
const size_t new_capacity = _capacity + expand_with;
|
||||
if (new_capacity > _max_capacity) {
|
||||
// Can not expand beyond max capacity
|
||||
return false;
|
||||
const size_t current_max = current_max_capacity();
|
||||
if (_capacity == current_max) {
|
||||
// Don't try to expand, current max capacity reached
|
||||
return;
|
||||
}
|
||||
|
||||
// Expand
|
||||
if (!_backing.expand(_capacity, new_capacity)) {
|
||||
log_error(gc)("Failed to expand Java heap with " SIZE_FORMAT "%s",
|
||||
byte_size_in_proper_unit(expand_with),
|
||||
proper_unit_for_byte_size(expand_with));
|
||||
return false;
|
||||
// Try to expand
|
||||
const size_t old_capacity = capacity();
|
||||
const size_t new_capacity = MIN2(old_capacity + size - unused, current_max);
|
||||
_capacity = _backing.try_expand(old_capacity, new_capacity);
|
||||
|
||||
if (_capacity != new_capacity) {
|
||||
// Failed, or partly failed, to expand
|
||||
log_error(gc, init)("Not enough space available on the backing filesystem to hold the current max");
|
||||
log_error(gc, init)("Java heap size (" SIZE_FORMAT "M). Forcefully lowering max Java heap size to "
|
||||
SIZE_FORMAT "M (%.0lf%%).", current_max / M, _capacity / M,
|
||||
percent_of(_capacity, current_max));
|
||||
|
||||
// Adjust current max capacity to avoid further expand attempts
|
||||
_current_max_capacity = _capacity;
|
||||
}
|
||||
|
||||
_capacity = new_capacity;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::nmt_commit(ZPhysicalMemory pmem, uintptr_t offset) {
|
||||
@ -144,7 +149,7 @@ void ZPhysicalMemoryManager::nmt_uncommit(ZPhysicalMemory pmem, uintptr_t offset
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) {
|
||||
if (!ensure_available(size)) {
|
||||
if (unused_capacity() < size) {
|
||||
// Not enough memory available
|
||||
return ZPhysicalMemory();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,11 +70,10 @@ class ZPhysicalMemoryManager {
|
||||
private:
|
||||
ZPhysicalMemoryBacking _backing;
|
||||
const size_t _max_capacity;
|
||||
size_t _current_max_capacity;
|
||||
size_t _capacity;
|
||||
size_t _used;
|
||||
|
||||
bool ensure_available(size_t size);
|
||||
|
||||
void nmt_commit(ZPhysicalMemory pmem, uintptr_t offset);
|
||||
void nmt_uncommit(ZPhysicalMemory pmem, uintptr_t offset);
|
||||
|
||||
@ -84,9 +83,11 @@ public:
|
||||
bool is_initialized() const;
|
||||
|
||||
size_t max_capacity() const;
|
||||
size_t current_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
size_t used() const;
|
||||
size_t available() const;
|
||||
size_t unused_capacity() const;
|
||||
|
||||
void try_ensure_unused_capacity(size_t size);
|
||||
|
||||
ZPhysicalMemory alloc(size_t size);
|
||||
void free(ZPhysicalMemory pmem);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -71,16 +71,16 @@ inline size_t ZPhysicalMemoryManager::max_capacity() const {
|
||||
return _max_capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::current_max_capacity() const {
|
||||
return _current_max_capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::capacity() const {
|
||||
return _capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::used() const {
|
||||
return _used;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::available() const {
|
||||
return _max_capacity - _used;
|
||||
inline size_t ZPhysicalMemoryManager::unused_capacity() const {
|
||||
return _capacity - _used;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,21 +42,25 @@ ZPreMappedMemory::ZPreMappedMemory(ZVirtualMemoryManager &vmm, ZPhysicalMemoryMa
|
||||
log_info(gc, init)("Pre-touching: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
|
||||
log_info(gc, init)("Pre-mapping: " SIZE_FORMAT "M", size / M);
|
||||
|
||||
_pmem = pmm.alloc(size);
|
||||
if (_pmem.is_null()) {
|
||||
// Out of memory
|
||||
return;
|
||||
}
|
||||
if (size > 0) {
|
||||
_pmem = pmm.alloc(size);
|
||||
if (_pmem.is_null()) {
|
||||
// Out of memory
|
||||
log_error(gc, init)("Failed to pre-map Java heap (Cannot allocate physical memory)");
|
||||
return;
|
||||
}
|
||||
|
||||
_vmem = vmm.alloc(size, true /* alloc_from_front */);
|
||||
if (_vmem.is_null()) {
|
||||
// Out of address space
|
||||
pmm.free(_pmem);
|
||||
return;
|
||||
}
|
||||
_vmem = vmm.alloc(size, true /* alloc_from_front */);
|
||||
if (_vmem.is_null()) {
|
||||
// Out of address space
|
||||
log_error(gc, init)("Failed to pre-map Java heap (Cannot allocate virtual memory)");
|
||||
pmm.free(_pmem);
|
||||
return;
|
||||
}
|
||||
|
||||
// Map physical memory
|
||||
pmm.map(_pmem, _vmem.start());
|
||||
// Map physical memory
|
||||
pmm.map(_pmem, _vmem.start());
|
||||
}
|
||||
|
||||
_initialized = true;
|
||||
}
|
||||
|
@ -171,6 +171,8 @@ JVM_IsSupportedJNIVersion(jint version);
|
||||
JNIEXPORT jobjectArray JNICALL
|
||||
JVM_GetVmArguments(JNIEnv *env);
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
JVM_InitializeFromArchive(JNIEnv* env, jclass cls);
|
||||
|
||||
/*
|
||||
* java.lang.Throwable
|
||||
|
@ -1221,17 +1221,14 @@ void LinkResolver::runtime_resolve_special_method(CallInfo& result,
|
||||
// check if the method is not <init>
|
||||
resolved_method->name() != vmSymbols::object_initializer_name()) {
|
||||
|
||||
// check if this is an old-style super call and do a new lookup if so
|
||||
// a) check if ACC_SUPER flag is set for the current class
|
||||
Klass* current_klass = link_info.current_klass();
|
||||
if ((current_klass->is_super() || !AllowNonVirtualCalls) &&
|
||||
// b) check if the class of the resolved_klass is a superclass
|
||||
// (not supertype in order to exclude interface classes) of the current class.
|
||||
// This check is not performed for super.invoke for interface methods
|
||||
// in super interfaces.
|
||||
current_klass->is_subclass_of(resolved_klass) &&
|
||||
current_klass != resolved_klass
|
||||
) {
|
||||
|
||||
// Check if the class of the resolved_klass is a superclass
|
||||
// (not supertype in order to exclude interface classes) of the current class.
|
||||
// This check is not performed for super.invoke for interface methods
|
||||
// in super interfaces.
|
||||
if (current_klass->is_subclass_of(resolved_klass) &&
|
||||
current_klass != resolved_klass) {
|
||||
// Lookup super method
|
||||
Klass* super_klass = current_klass->super();
|
||||
sel_method = lookup_instance_method_in_klasses(super_klass,
|
||||
|
@ -26,10 +26,10 @@
|
||||
#include "jfr/jfr.hpp"
|
||||
#include "jfr/leakprofiler/leakProfiler.hpp"
|
||||
#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
|
||||
#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/support/jfrThreadLocal.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
|
||||
@ -64,9 +64,7 @@ void Jfr::on_unloading_classes() {
|
||||
}
|
||||
|
||||
void Jfr::on_thread_exit(JavaThread* thread) {
|
||||
if (JfrRecorder::is_recording()) {
|
||||
JfrThreadLocal::on_exit(thread);
|
||||
}
|
||||
JfrThreadLocal::on_exit(thread);
|
||||
}
|
||||
|
||||
void Jfr::on_thread_destruct(Thread* thread) {
|
||||
|
@ -74,7 +74,6 @@ CLDClaimContext::CLDClaimContext(ClassLoaderData* cld) : _cld(cld) {
|
||||
|
||||
CLDClaimContext::~CLDClaimContext() {
|
||||
if (_cld != NULL) {
|
||||
assert(!_cld->claimed(), "invariant");
|
||||
_cld->claim();
|
||||
assert(_cld->claimed(), "invariant");
|
||||
}
|
||||
|
@ -60,22 +60,32 @@
|
||||
#include "gc/g1/g1YCTypes.hpp"
|
||||
#endif
|
||||
|
||||
class JfrCheckpointThreadCountClosure : public ThreadClosure {
|
||||
private:
|
||||
u4 _total_threads;
|
||||
public:
|
||||
JfrCheckpointThreadCountClosure() : _total_threads(0) {}
|
||||
u4 total_threads() { return _total_threads; }
|
||||
void do_thread(Thread *t) { _total_threads++; }
|
||||
};
|
||||
|
||||
// Requires a ResourceMark for get_thread_name/as_utf8
|
||||
class JfrCheckpointThreadClosure : public ThreadClosure {
|
||||
private:
|
||||
JfrCheckpointWriter& _writer;
|
||||
Thread* _curthread;
|
||||
JfrCheckpointContext _ctx;
|
||||
const intptr_t _count_position;
|
||||
Thread* const _curthread;
|
||||
u4 _count;
|
||||
|
||||
public:
|
||||
JfrCheckpointThreadClosure(JfrCheckpointWriter& writer) : _writer(writer), _curthread(Thread::current()) {}
|
||||
JfrCheckpointThreadClosure(JfrCheckpointWriter& writer) : _writer(writer),
|
||||
_ctx(writer.context()),
|
||||
_count_position(writer.reserve(sizeof(u4))),
|
||||
_curthread(Thread::current()),
|
||||
_count(0) {
|
||||
}
|
||||
|
||||
~JfrCheckpointThreadClosure() {
|
||||
if (_count == 0) {
|
||||
// restore
|
||||
_writer.set_context(_ctx);
|
||||
return;
|
||||
}
|
||||
_writer.write_count(_count, _count_position);
|
||||
}
|
||||
|
||||
void do_thread(Thread* t);
|
||||
};
|
||||
|
||||
@ -83,10 +93,16 @@ class JfrCheckpointThreadClosure : public ThreadClosure {
|
||||
void JfrCheckpointThreadClosure::do_thread(Thread* t) {
|
||||
assert(t != NULL, "invariant");
|
||||
assert_locked_or_safepoint(Threads_lock);
|
||||
_writer.write_key(t->jfr_thread_local()->thread_id());
|
||||
const JfrThreadLocal* const tl = t->jfr_thread_local();
|
||||
assert(tl != NULL, "invariant");
|
||||
if (tl->is_dead()) {
|
||||
return;
|
||||
}
|
||||
++_count;
|
||||
_writer.write_key(tl->thread_id());
|
||||
_writer.write(t->name());
|
||||
const OSThread* const os_thread = t->osthread();
|
||||
_writer.write<traceid>(os_thread != NULL ? os_thread->thread_id() : (u8)0);
|
||||
_writer.write<traceid>(os_thread != NULL ? os_thread->thread_id() : 0);
|
||||
if (t->is_Java_thread()) {
|
||||
JavaThread* const jt = (JavaThread*)t;
|
||||
_writer.write(jt->name());
|
||||
@ -97,17 +113,12 @@ void JfrCheckpointThreadClosure::do_thread(Thread* t) {
|
||||
return;
|
||||
}
|
||||
_writer.write((const char*)NULL); // java name
|
||||
_writer.write<traceid>((traceid)0); // java thread id
|
||||
_writer.write<traceid>((traceid)0); // java thread group
|
||||
_writer.write((traceid)0); // java thread id
|
||||
_writer.write((traceid)0); // java thread group
|
||||
}
|
||||
|
||||
void JfrThreadConstantSet::serialize(JfrCheckpointWriter& writer) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
JfrCheckpointThreadCountClosure tcc;
|
||||
Threads::threads_do(&tcc);
|
||||
const u4 total_threads = tcc.total_threads();
|
||||
// THREADS
|
||||
writer.write_count(total_threads);
|
||||
JfrCheckpointThreadClosure tc(writer);
|
||||
Threads::threads_do(&tc);
|
||||
}
|
||||
@ -334,7 +345,7 @@ void JfrThreadConstant::serialize(JfrCheckpointWriter& writer) {
|
||||
writer.write_count(1);
|
||||
writer.write_key(_thread->jfr_thread_local()->thread_id());
|
||||
writer.write(thread_name);
|
||||
writer.write((u8)_thread->osthread()->thread_id());
|
||||
writer.write((traceid)_thread->osthread()->thread_id());
|
||||
writer.write(thread_name);
|
||||
writer.write(java_lang_thread_id);
|
||||
writer.write(thread_group_id);
|
||||
|
@ -148,9 +148,8 @@ void JfrTypeManager::write_safepoint_types(JfrCheckpointWriter& writer) {
|
||||
}
|
||||
|
||||
void JfrTypeManager::write_type_set() {
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
// can safepoint here because of Module_lock
|
||||
MutexLockerEx lock(Module_lock);
|
||||
MutexLockerEx lock(SafepointSynchronize::is_at_safepoint() ? NULL : Module_lock);
|
||||
JfrCheckpointWriter writer(true, true, Thread::current());
|
||||
TypeSet set;
|
||||
set.serialize(writer);
|
||||
|
@ -23,8 +23,9 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
|
||||
#include "jfr/jni/jfrJavaSupport.hpp"
|
||||
#include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
@ -51,7 +52,8 @@ JfrThreadLocal::JfrThreadLocal() :
|
||||
_wallclock_time(os::javaTimeNanos()),
|
||||
_stack_trace_hash(0),
|
||||
_stackdepth(0),
|
||||
_entering_suspend_flag(0) {}
|
||||
_entering_suspend_flag(0),
|
||||
_dead(false) {}
|
||||
|
||||
u8 JfrThreadLocal::add_data_lost(u8 value) {
|
||||
_data_lost += value;
|
||||
@ -71,9 +73,17 @@ const JfrCheckpointBlobHandle& JfrThreadLocal::thread_checkpoint() const {
|
||||
return _thread_cp;
|
||||
}
|
||||
|
||||
void JfrThreadLocal::set_dead() {
|
||||
assert(!is_dead(), "invariant");
|
||||
_dead = true;
|
||||
}
|
||||
|
||||
void JfrThreadLocal::on_exit(JavaThread* thread) {
|
||||
JfrCheckpointManager::write_thread_checkpoint(thread);
|
||||
JfrThreadCPULoadEvent::send_event_for_thread(thread);
|
||||
if (JfrRecorder::is_recording()) {
|
||||
JfrCheckpointManager::write_thread_checkpoint(thread);
|
||||
JfrThreadCPULoadEvent::send_event_for_thread(thread);
|
||||
}
|
||||
thread->jfr_thread_local()->set_dead();
|
||||
}
|
||||
|
||||
void JfrThreadLocal::on_destruct(Thread* thread) {
|
||||
|
@ -50,11 +50,14 @@ class JfrThreadLocal {
|
||||
unsigned int _stack_trace_hash;
|
||||
mutable u4 _stackdepth;
|
||||
volatile jint _entering_suspend_flag;
|
||||
bool _dead;
|
||||
|
||||
JfrBuffer* install_native_buffer() const;
|
||||
JfrBuffer* install_java_buffer() const;
|
||||
JfrStackFrame* install_stackframes() const;
|
||||
|
||||
void set_dead();
|
||||
|
||||
public:
|
||||
JfrThreadLocal();
|
||||
|
||||
@ -202,6 +205,10 @@ class JfrThreadLocal {
|
||||
_trace_id = id;
|
||||
}
|
||||
|
||||
bool is_dead() const {
|
||||
return _dead;
|
||||
}
|
||||
|
||||
bool has_thread_checkpoint() const;
|
||||
void set_thread_checkpoint(const JfrCheckpointBlobHandle& handle);
|
||||
const JfrCheckpointBlobHandle& thread_checkpoint() const;
|
||||
|
506
src/hotspot/share/memory/heapShared.cpp
Normal file
506
src/hotspot/share/memory/heapShared.cpp
Normal file
@ -0,0 +1,506 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logMessage.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/heapShared.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
KlassSubGraphInfo* HeapShared::_subgraph_info_list = NULL;
|
||||
int HeapShared::_num_archived_subgraph_info_records = 0;
|
||||
Array<ArchivedKlassSubGraphInfoRecord>* HeapShared::_archived_subgraph_info_records = NULL;
|
||||
|
||||
// Currently there is only one class mirror (ArchivedModuleGraph) with archived
|
||||
// sub-graphs.
|
||||
KlassSubGraphInfo* HeapShared::find_subgraph_info(Klass* k) {
|
||||
KlassSubGraphInfo* info = _subgraph_info_list;
|
||||
while (info != NULL) {
|
||||
if (info->klass() == k) {
|
||||
return info;
|
||||
}
|
||||
info = info->next();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Get the subgraph_info for Klass k. A new subgraph_info is created if
|
||||
// there is no existing one for k. The subgraph_info records the relocated
|
||||
// Klass* of the original k.
|
||||
KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
|
||||
Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
|
||||
KlassSubGraphInfo* info = find_subgraph_info(relocated_k);
|
||||
if (info != NULL) {
|
||||
return info;
|
||||
}
|
||||
|
||||
info = new KlassSubGraphInfo(relocated_k, _subgraph_info_list);
|
||||
_subgraph_info_list = info;
|
||||
return info;
|
||||
}
|
||||
|
||||
int HeapShared::num_of_subgraph_infos() {
|
||||
int num = 0;
|
||||
KlassSubGraphInfo* info = _subgraph_info_list;
|
||||
while (info != NULL) {
|
||||
num ++;
|
||||
info = info->next();
|
||||
}
|
||||
return num;
|
||||
}
|
||||
|
||||
// Add an entry field to the current KlassSubGraphInfo.
|
||||
void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
|
||||
assert(DumpSharedSpaces, "dump time only");
|
||||
if (_subgraph_entry_fields == NULL) {
|
||||
_subgraph_entry_fields =
|
||||
new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true);
|
||||
}
|
||||
_subgraph_entry_fields->append((juint)static_field_offset);
|
||||
_subgraph_entry_fields->append(CompressedOops::encode(v));
|
||||
}
|
||||
|
||||
// Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
|
||||
// Only objects of boot classes can be included in sub-graph.
|
||||
void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) {
|
||||
assert(DumpSharedSpaces, "dump time only");
|
||||
assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k),
|
||||
"must be the relocated Klass in the shared space");
|
||||
|
||||
if (_subgraph_object_klasses == NULL) {
|
||||
_subgraph_object_klasses =
|
||||
new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true);
|
||||
}
|
||||
|
||||
assert(relocated_k->is_shared(), "must be a shared class");
|
||||
if (relocated_k->is_instance_klass()) {
|
||||
assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
|
||||
"must be boot class");
|
||||
// SystemDictionary::xxx_klass() are not updated, need to check
|
||||
// the original Klass*
|
||||
if (orig_k == SystemDictionary::String_klass() ||
|
||||
orig_k == SystemDictionary::Object_klass()) {
|
||||
// Initialized early during VM initialization. No need to be added
|
||||
// to the sub-graph object class list.
|
||||
return;
|
||||
}
|
||||
} else if (relocated_k->is_objArray_klass()) {
|
||||
Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
|
||||
if (abk->is_instance_klass()) {
|
||||
assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
|
||||
"must be boot class");
|
||||
}
|
||||
if (relocated_k == Universe::objectArrayKlassObj()) {
|
||||
// Initialized early during Universe::genesis. No need to be added
|
||||
// to the list.
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
assert(relocated_k->is_typeArray_klass(), "must be");
|
||||
// Primitive type arrays are created early during Universe::genesis.
|
||||
return;
|
||||
}
|
||||
|
||||
_subgraph_object_klasses->append_if_missing(relocated_k);
|
||||
}
|
||||
|
||||
// Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
|
||||
void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
|
||||
_k = info->klass();
|
||||
_next = NULL;
|
||||
_entry_field_records = NULL;
|
||||
_subgraph_klasses = NULL;
|
||||
|
||||
// populate the entry fields
|
||||
GrowableArray<juint>* entry_fields = info->subgraph_entry_fields();
|
||||
if (entry_fields != NULL) {
|
||||
int num_entry_fields = entry_fields->length();
|
||||
assert(num_entry_fields % 2 == 0, "sanity");
|
||||
_entry_field_records =
|
||||
MetaspaceShared::new_ro_array<juint>(num_entry_fields);
|
||||
for (int i = 0 ; i < num_entry_fields; i++) {
|
||||
_entry_field_records->at_put(i, entry_fields->at(i));
|
||||
}
|
||||
}
|
||||
|
||||
// the Klasses of the objects in the sub-graphs
|
||||
GrowableArray<Klass*>* subgraph_klasses = info->subgraph_object_klasses();
|
||||
if (subgraph_klasses != NULL) {
|
||||
int num_subgraphs_klasses = subgraph_klasses->length();
|
||||
_subgraph_klasses =
|
||||
MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses);
|
||||
for (int i = 0; i < num_subgraphs_klasses; i++) {
|
||||
Klass* subgraph_k = subgraph_klasses->at(i);
|
||||
if (log_is_enabled(Info, cds, heap)) {
|
||||
ResourceMark rm;
|
||||
log_info(cds, heap)(
|
||||
"Archived object klass (%d): %s in %s sub-graphs",
|
||||
i, subgraph_k->external_name(), _k->external_name());
|
||||
}
|
||||
_subgraph_klasses->at_put(i, subgraph_k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build the records of archived subgraph infos, which include:
|
||||
// - Entry points to all subgraphs from the containing class mirror. The entry
|
||||
// points are static fields in the mirror. For each entry point, the field
|
||||
// offset and value are recorded in the sub-graph info. The value are stored
|
||||
// back to the corresponding field at runtime.
|
||||
// - A list of klasses that need to be loaded/initialized before archived
|
||||
// java object sub-graph can be accessed at runtime.
|
||||
//
|
||||
// The records are saved in the archive file and reloaded at runtime. Currently
|
||||
// there is only one class mirror (ArchivedModuleGraph) with archived sub-graphs.
|
||||
//
|
||||
// Layout of the archived subgraph info records:
|
||||
//
|
||||
// records_size | num_records | records*
|
||||
// ArchivedKlassSubGraphInfoRecord | entry_fields | subgraph_object_klasses
|
||||
size_t HeapShared::build_archived_subgraph_info_records(int num_records) {
|
||||
// remember the start address
|
||||
char* start_p = MetaspaceShared::read_only_space_top();
|
||||
|
||||
// now populate the archived subgraph infos, which will be saved in the
|
||||
// archive file
|
||||
_archived_subgraph_info_records =
|
||||
MetaspaceShared::new_ro_array<ArchivedKlassSubGraphInfoRecord>(num_records);
|
||||
KlassSubGraphInfo* info = _subgraph_info_list;
|
||||
int i = 0;
|
||||
while (info != NULL) {
|
||||
assert(i < _archived_subgraph_info_records->length(), "sanity");
|
||||
ArchivedKlassSubGraphInfoRecord* record =
|
||||
_archived_subgraph_info_records->adr_at(i);
|
||||
record->init(info);
|
||||
info = info->next();
|
||||
i ++;
|
||||
}
|
||||
|
||||
// _subgraph_info_list is no longer needed
|
||||
delete _subgraph_info_list;
|
||||
_subgraph_info_list = NULL;
|
||||
|
||||
char* end_p = MetaspaceShared::read_only_space_top();
|
||||
size_t records_size = end_p - start_p;
|
||||
return records_size;
|
||||
}
|
||||
|
||||
// Write the subgraph info records in the shared _ro region
|
||||
void HeapShared::write_archived_subgraph_infos() {
|
||||
assert(DumpSharedSpaces, "dump time only");
|
||||
|
||||
Array<intptr_t>* records_header = MetaspaceShared::new_ro_array<intptr_t>(3);
|
||||
|
||||
_num_archived_subgraph_info_records = num_of_subgraph_infos();
|
||||
size_t records_size = build_archived_subgraph_info_records(
|
||||
_num_archived_subgraph_info_records);
|
||||
|
||||
// Now write the header information:
|
||||
// records_size, num_records, _archived_subgraph_info_records
|
||||
assert(records_header != NULL, "sanity");
|
||||
intptr_t* p = (intptr_t*)(records_header->data());
|
||||
*p = (intptr_t)records_size;
|
||||
p ++;
|
||||
*p = (intptr_t)_num_archived_subgraph_info_records;
|
||||
p ++;
|
||||
*p = (intptr_t)_archived_subgraph_info_records;
|
||||
}
|
||||
|
||||
char* HeapShared::read_archived_subgraph_infos(char* buffer) {
|
||||
Array<intptr_t>* records_header = (Array<intptr_t>*)buffer;
|
||||
intptr_t* p = (intptr_t*)(records_header->data());
|
||||
size_t records_size = (size_t)(*p);
|
||||
p ++;
|
||||
_num_archived_subgraph_info_records = *p;
|
||||
p ++;
|
||||
_archived_subgraph_info_records =
|
||||
(Array<ArchivedKlassSubGraphInfoRecord>*)(*p);
|
||||
|
||||
buffer = (char*)_archived_subgraph_info_records + records_size;
|
||||
return buffer;
|
||||
}
|
||||
|
||||
void HeapShared::initialize_from_archived_subgraph(Klass* k) {
|
||||
if (!MetaspaceShared::open_archive_heap_region_mapped()) {
|
||||
return; // nothing to do
|
||||
}
|
||||
|
||||
if (_num_archived_subgraph_info_records == 0) {
|
||||
return; // no subgraph info records
|
||||
}
|
||||
|
||||
// Initialize from archived data. Currently only ArchivedModuleGraph
|
||||
// has archived object subgraphs, which is used during VM initialization
|
||||
// time when bootstraping the system modules. No lock is needed.
|
||||
Thread* THREAD = Thread::current();
|
||||
for (int i = 0; i < _archived_subgraph_info_records->length(); i++) {
|
||||
ArchivedKlassSubGraphInfoRecord* record = _archived_subgraph_info_records->adr_at(i);
|
||||
if (record->klass() == k) {
|
||||
int i;
|
||||
// Found the archived subgraph info record for the requesting klass.
|
||||
// Load/link/initialize the klasses of the objects in the subgraph.
|
||||
// NULL class loader is used.
|
||||
Array<Klass*>* klasses = record->subgraph_klasses();
|
||||
if (klasses != NULL) {
|
||||
for (i = 0; i < klasses->length(); i++) {
|
||||
Klass* obj_k = klasses->at(i);
|
||||
Klass* resolved_k = SystemDictionary::resolve_or_null(
|
||||
(obj_k)->name(), THREAD);
|
||||
if (resolved_k != obj_k) {
|
||||
return;
|
||||
}
|
||||
if ((obj_k)->is_instance_klass()) {
|
||||
InstanceKlass* ik = InstanceKlass::cast(obj_k);
|
||||
ik->initialize(THREAD);
|
||||
} else if ((obj_k)->is_objArray_klass()) {
|
||||
ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k);
|
||||
oak->initialize(THREAD);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
// None of the field value will be set if there was an exception.
|
||||
// The java code will not see any of the archived objects in the
|
||||
// subgraphs referenced from k in this case.
|
||||
return;
|
||||
}
|
||||
|
||||
// Load the subgraph entry fields from the record and store them back to
|
||||
// the corresponding fields within the mirror.
|
||||
oop m = k->java_mirror();
|
||||
Array<juint>* entry_field_records = record->entry_field_records();
|
||||
if (entry_field_records != NULL) {
|
||||
int efr_len = entry_field_records->length();
|
||||
assert(efr_len % 2 == 0, "sanity");
|
||||
for (i = 0; i < efr_len;) {
|
||||
int field_offset = entry_field_records->at(i);
|
||||
// The object refereced by the field becomes 'known' by GC from this
|
||||
// point. All objects in the subgraph reachable from the object are
|
||||
// also 'known' by GC.
|
||||
oop v = MetaspaceShared::materialize_archived_object(
|
||||
CompressedOops::decode(entry_field_records->at(i+1)));
|
||||
m->obj_field_put(field_offset, v);
|
||||
i += 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Done. Java code can see the archived sub-graphs referenced from k's
|
||||
// mirror after this point.
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
||||
int _level;
|
||||
KlassSubGraphInfo* _subgraph_info;
|
||||
oop _orig_referencing_obj;
|
||||
oop _archived_referencing_obj;
|
||||
public:
|
||||
WalkOopAndArchiveClosure(int level, KlassSubGraphInfo* subgraph_info,
|
||||
oop orig, oop archived) : _level(level),
|
||||
_subgraph_info(subgraph_info),
|
||||
_orig_referencing_obj(orig),
|
||||
_archived_referencing_obj(archived) {}
|
||||
void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
|
||||
void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
|
||||
|
||||
protected:
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(obj)) {
|
||||
// A java.lang.Class instance can not be included in an archived
|
||||
// object sub-graph.
|
||||
if (java_lang_Class::is_instance(obj)) {
|
||||
tty->print("Unknown java.lang.Class object is in the archived sub-graph\n");
|
||||
vm_exit(1);
|
||||
}
|
||||
|
||||
LogTarget(Debug, cds, heap) log;
|
||||
LogStream ls(log);
|
||||
outputStream* out = &ls;
|
||||
{
|
||||
ResourceMark rm;
|
||||
log.print("(%d) %s <--- referenced from: %s",
|
||||
_level, obj->klass()->external_name(),
|
||||
CompressedOops::is_null(_orig_referencing_obj) ?
|
||||
"" : _orig_referencing_obj->klass()->external_name());
|
||||
obj->print_on(out);
|
||||
}
|
||||
|
||||
if (MetaspaceShared::is_archive_object(obj)) {
|
||||
// The current oop is an archived oop, nothing needs to be done
|
||||
log.print("--- object is already archived ---");
|
||||
return;
|
||||
}
|
||||
|
||||
size_t field_delta = pointer_delta(
|
||||
p, _orig_referencing_obj, sizeof(char));
|
||||
T* new_p = (T*)(address(_archived_referencing_obj) + field_delta);
|
||||
oop archived = MetaspaceShared::find_archived_heap_object(obj);
|
||||
if (archived != NULL) {
|
||||
// There is an archived copy existing, update reference to point
|
||||
// to the archived copy
|
||||
RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
|
||||
log.print(
|
||||
"--- found existing archived copy, store archived " PTR_FORMAT " in " PTR_FORMAT,
|
||||
p2i(archived), p2i(new_p));
|
||||
return;
|
||||
}
|
||||
|
||||
int l = _level + 1;
|
||||
Thread* THREAD = Thread::current();
|
||||
// Archive the current oop before iterating through its references
|
||||
archived = MetaspaceShared::archive_heap_object(obj, THREAD);
|
||||
assert(MetaspaceShared::is_archive_object(archived), "must be archived");
|
||||
log.print("=== archiving oop " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
p2i(obj), p2i(archived));
|
||||
|
||||
// Following the references in the current oop and archive any
|
||||
// encountered objects during the process
|
||||
WalkOopAndArchiveClosure walker(l, _subgraph_info, obj, archived);
|
||||
obj->oop_iterate(&walker);
|
||||
|
||||
// Update the reference in the archived copy of the referencing object
|
||||
RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
|
||||
log.print("=== store archived " PTR_FORMAT " in " PTR_FORMAT,
|
||||
p2i(archived), p2i(new_p));
|
||||
|
||||
// Add the klass to the list of classes that need to be loaded before
|
||||
// module system initialization
|
||||
Klass *orig_k = obj->klass();
|
||||
Klass *relocated_k = archived->klass();
|
||||
_subgraph_info->add_subgraph_object_klass(orig_k, relocated_k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Start from the given static field in a java mirror and archive the
|
||||
// complete sub-graph of java heap objects that are reached directly
|
||||
// or indirectly from the starting object by following references.
|
||||
// Currently, only ArchivedModuleGraph class instance (mirror) has archived
|
||||
// object subgraphs. Sub-graph archiving restrictions (current):
|
||||
//
|
||||
// - All classes of objects in the archived sub-graph (including the
|
||||
// entry class) must be boot class only.
|
||||
// - No java.lang.Class instance (java mirror) can be included inside
|
||||
// an archived sub-graph. Mirror can only be the sub-graph entry object.
|
||||
//
|
||||
// The Java heap object sub-graph archiving process (see
|
||||
// WalkOopAndArchiveClosure):
|
||||
//
|
||||
// 1) Java object sub-graph archiving starts from a given static field
|
||||
// within a Class instance (java mirror). If the static field is a
|
||||
// refererence field and points to a non-null java object, proceed to
|
||||
// the next step.
|
||||
//
|
||||
// 2) Archives the referenced java object. If an archived copy of the
|
||||
// current object already exists, updates the pointer in the archived
|
||||
// copy of the referencing object to point to the current archived object.
|
||||
// Otherwise, proceed to the next step.
|
||||
//
|
||||
// 3) Follows all references within the current java object and recursively
|
||||
// archive the sub-graph of objects starting from each reference.
|
||||
//
|
||||
// 4) Updates the pointer in the archived copy of referencing object to
|
||||
// point to the current archived object.
|
||||
//
|
||||
// 5) The Klass of the current java object is added to the list of Klasses
|
||||
// for loading and initialzing before any object in the archived graph can
|
||||
// be accessed at runtime.
|
||||
//
|
||||
void HeapShared::archive_reachable_objects_from_static_field(Klass *k,
|
||||
int field_offset,
|
||||
BasicType field_type,
|
||||
TRAPS) {
|
||||
assert(DumpSharedSpaces, "dump time only");
|
||||
assert(k->is_instance_klass(), "sanity");
|
||||
assert(InstanceKlass::cast(k)->is_shared_boot_class(),
|
||||
"must be boot class");
|
||||
|
||||
oop m = k->java_mirror();
|
||||
oop archived_m = MetaspaceShared::find_archived_heap_object(m);
|
||||
if (CompressedOops::is_null(archived_m)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (field_type == T_OBJECT) {
|
||||
// obtain k's subGraph Info
|
||||
KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
|
||||
|
||||
// get the object referenced by the field
|
||||
oop f = m->obj_field(field_offset);
|
||||
if (!CompressedOops::is_null(f)) {
|
||||
LogTarget(Debug, cds, heap) log;
|
||||
LogStream ls(log);
|
||||
outputStream* out = &ls;
|
||||
log.print("Start from: ");
|
||||
f->print_on(out);
|
||||
|
||||
// get the archived copy of the field referenced object
|
||||
oop af = MetaspaceShared::archive_heap_object(f, THREAD);
|
||||
if (!MetaspaceShared::is_archive_object(f)) {
|
||||
WalkOopAndArchiveClosure walker(1, subgraph_info, f, af);
|
||||
f->oop_iterate(&walker);
|
||||
}
|
||||
|
||||
// The field value is not preserved in the archived mirror.
|
||||
// Record the field as a new subGraph entry point. The recorded
|
||||
// information is restored from the archive at runtime.
|
||||
subgraph_info->add_subgraph_entry_field(field_offset, af);
|
||||
Klass *relocated_k = af->klass();
|
||||
Klass *orig_k = f->klass();
|
||||
subgraph_info->add_subgraph_object_klass(orig_k, relocated_k);
|
||||
} else {
|
||||
// The field contains null, we still need to record the entry point,
|
||||
// so it can be restored at runtime.
|
||||
subgraph_info->add_subgraph_entry_field(field_offset, NULL);
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
#define do_module_object_graph(archive_object_graph_do) \
|
||||
archive_object_graph_do(SystemDictionary::ArchivedModuleGraph_klass(), jdk_internal_module_ArchivedModuleGraph::archivedSystemModules_offset(), T_OBJECT, CHECK); \
|
||||
archive_object_graph_do(SystemDictionary::ArchivedModuleGraph_klass(), jdk_internal_module_ArchivedModuleGraph::archivedModuleFinder_offset(), T_OBJECT, CHECK); \
|
||||
archive_object_graph_do(SystemDictionary::ArchivedModuleGraph_klass(), jdk_internal_module_ArchivedModuleGraph::archivedMainModule_offset(), T_OBJECT, CHECK)
|
||||
|
||||
void HeapShared::archive_module_graph_objects(Thread* THREAD) {
|
||||
do_module_object_graph(archive_reachable_objects_from_static_field);
|
||||
}
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
134
src/hotspot/share/memory/heapShared.hpp
Normal file
134
src/hotspot/share/memory/heapShared.hpp
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_MEMORY_HEAPSHARED_HPP
|
||||
#define SHARE_VM_MEMORY_HEAPSHARED_HPP
|
||||
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "oops/typeArrayKlass.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
// A dump time sub-graph info for Klass _k. It includes the entry points
|
||||
// (static fields in _k's mirror) of the archived sub-graphs reachable
|
||||
// from _k's mirror. It also contains a list of Klasses of the objects
|
||||
// within the sub-graphs.
|
||||
class KlassSubGraphInfo: public CHeapObj<mtClass> {
|
||||
private:
|
||||
KlassSubGraphInfo* _next;
|
||||
// The class that contains the static field(s) as the entry point(s)
|
||||
// of archived object sub-graph(s).
|
||||
Klass* _k;
|
||||
// A list of classes need to be loaded and initialized before the archived
|
||||
// object sub-graphs can be accessed at runtime.
|
||||
GrowableArray<Klass*>* _subgraph_object_klasses;
|
||||
// A list of _k's static fields as the entry points of archived sub-graphs.
|
||||
// For each entry field, it is a pair of field_offset and field_value.
|
||||
GrowableArray<juint>* _subgraph_entry_fields;
|
||||
|
||||
public:
|
||||
KlassSubGraphInfo(Klass* k, KlassSubGraphInfo* next) :
|
||||
_next(next), _k(k), _subgraph_object_klasses(NULL),
|
||||
_subgraph_entry_fields(NULL) {}
|
||||
~KlassSubGraphInfo() {
|
||||
if (_subgraph_object_klasses != NULL) {
|
||||
delete _subgraph_object_klasses;
|
||||
}
|
||||
if (_subgraph_entry_fields != NULL) {
|
||||
delete _subgraph_entry_fields;
|
||||
}
|
||||
};
|
||||
|
||||
KlassSubGraphInfo* next() { return _next; }
|
||||
Klass* klass() { return _k; }
|
||||
GrowableArray<Klass*>* subgraph_object_klasses() {
|
||||
return _subgraph_object_klasses;
|
||||
}
|
||||
GrowableArray<juint>* subgraph_entry_fields() {
|
||||
return _subgraph_entry_fields;
|
||||
}
|
||||
void add_subgraph_entry_field(int static_field_offset, oop v);
|
||||
void add_subgraph_object_klass(Klass *orig_k, Klass *relocated_k);
|
||||
};
|
||||
|
||||
// An archived record of object sub-graphs reachable from static
|
||||
// fields within _k's mirror. The record is reloaded from the archive
|
||||
// at runtime.
|
||||
class ArchivedKlassSubGraphInfoRecord {
|
||||
private:
|
||||
ArchivedKlassSubGraphInfoRecord* _next;
|
||||
Klass* _k;
|
||||
|
||||
// contains pairs of field offset and value for each subgraph entry field
|
||||
Array<juint>* _entry_field_records;
|
||||
|
||||
// klasses of objects in archived sub-graphs referenced from the entry points
|
||||
// (static fields) in the containing class
|
||||
Array<Klass*>* _subgraph_klasses;
|
||||
public:
|
||||
ArchivedKlassSubGraphInfoRecord() :
|
||||
_next(NULL), _k(NULL), _entry_field_records(NULL), _subgraph_klasses(NULL) {}
|
||||
void init(KlassSubGraphInfo* info);
|
||||
Klass* klass() { return _k; }
|
||||
ArchivedKlassSubGraphInfoRecord* next() { return _next; }
|
||||
void set_next(ArchivedKlassSubGraphInfoRecord* next) { _next = next; }
|
||||
Array<juint>* entry_field_records() { return _entry_field_records; }
|
||||
Array<Klass*>* subgraph_klasses() { return _subgraph_klasses; }
|
||||
};
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
class HeapShared: AllStatic {
|
||||
private:
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
// This is a list of subgraph infos built at dump time while
|
||||
// archiving object subgraphs.
|
||||
static KlassSubGraphInfo* _subgraph_info_list;
|
||||
|
||||
// Contains a list of ArchivedKlassSubGraphInfoRecords that is stored
|
||||
// in the archive file and reloaded at runtime.
|
||||
static int _num_archived_subgraph_info_records;
|
||||
static Array<ArchivedKlassSubGraphInfoRecord>* _archived_subgraph_info_records;
|
||||
|
||||
// Archive object sub-graph starting from the given static field
|
||||
// in Klass k's mirror.
|
||||
static void archive_reachable_objects_from_static_field(
|
||||
Klass* k, int field_ofset, BasicType field_type, TRAPS);
|
||||
|
||||
static KlassSubGraphInfo* find_subgraph_info(Klass *k);
|
||||
static KlassSubGraphInfo* get_subgraph_info(Klass *k);
|
||||
static int num_of_subgraph_infos();
|
||||
|
||||
static size_t build_archived_subgraph_info_records(int num_records);
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
public:
|
||||
static char* read_archived_subgraph_infos(char* buffer) NOT_CDS_JAVA_HEAP_RETURN_(buffer);
|
||||
static void write_archived_subgraph_infos() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void initialize_from_archived_subgraph(Klass* k) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
static void archive_module_graph_objects(Thread* THREAD) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
};
|
||||
#endif // SHARE_VM_MEMORY_HEAPSHARED_HPP
|
@ -39,6 +39,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logMessage.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/heapShared.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
@ -207,6 +208,10 @@ char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
|
||||
return _ro_region.allocate(num_bytes);
|
||||
}
|
||||
|
||||
char* MetaspaceShared::read_only_space_top() {
|
||||
return _ro_region.top();
|
||||
}
|
||||
|
||||
void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
|
||||
assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
|
||||
|
||||
@ -456,6 +461,7 @@ void MetaspaceShared::serialize_well_known_classes(SerializeClosure* soc) {
|
||||
java_lang_StackFrameInfo::serialize(soc);
|
||||
java_lang_LiveStackFrameInfo::serialize(soc);
|
||||
java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize(soc);
|
||||
jdk_internal_module_ArchivedModuleGraph::serialize(soc);
|
||||
}
|
||||
|
||||
address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
|
||||
@ -1350,6 +1356,11 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
|
||||
char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t));
|
||||
SystemDictionary::copy_table(table_top, _ro_region.top());
|
||||
|
||||
// Write the archived object sub-graph infos. For each klass with sub-graphs,
|
||||
// the info includes the static fields (sub-graph entry points) and Klasses
|
||||
// of objects included in the sub-graph.
|
||||
HeapShared::write_archived_subgraph_infos();
|
||||
|
||||
// Write the other data to the output array.
|
||||
WriteClosure wc(&_ro_region);
|
||||
MetaspaceShared::serialize(&wc);
|
||||
@ -1861,6 +1872,8 @@ void MetaspaceShared::dump_open_archive_heap_objects(
|
||||
|
||||
MetaspaceShared::archive_klass_objects(THREAD);
|
||||
|
||||
HeapShared::archive_module_graph_objects(THREAD);
|
||||
|
||||
G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
|
||||
os::vm_allocation_granularity());
|
||||
}
|
||||
@ -1906,14 +1919,16 @@ oop MetaspaceShared::archive_heap_object(oop obj, Thread* THREAD) {
|
||||
ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
|
||||
cache->put(obj, archived_oop);
|
||||
}
|
||||
log_debug(cds)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
p2i(obj), p2i(archived_oop));
|
||||
log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
p2i(obj), p2i(archived_oop));
|
||||
return archived_oop;
|
||||
}
|
||||
|
||||
oop MetaspaceShared::materialize_archived_object(oop obj) {
|
||||
assert(obj != NULL, "sanity");
|
||||
return G1CollectedHeap::heap()->materialize_archived_object(obj);
|
||||
if (obj != NULL) {
|
||||
return G1CollectedHeap::heap()->materialize_archived_object(obj);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void MetaspaceShared::archive_klass_objects(Thread* THREAD) {
|
||||
@ -2121,6 +2136,9 @@ void MetaspaceShared::initialize_shared_spaces() {
|
||||
buffer += sizeof(intptr_t);
|
||||
buffer += len;
|
||||
|
||||
// The table of archived java heap object sub-graph infos
|
||||
buffer = HeapShared::read_archived_subgraph_infos(buffer);
|
||||
|
||||
// Verify various attributes of the archive, plus initialize the
|
||||
// shared string/symbol tables
|
||||
intptr_t* array = (intptr_t*)buffer;
|
||||
|
@ -232,6 +232,8 @@ class MetaspaceShared : AllStatic {
|
||||
static char* misc_code_space_alloc(size_t num_bytes);
|
||||
static char* read_only_space_alloc(size_t num_bytes);
|
||||
|
||||
static char* read_only_space_top();
|
||||
|
||||
template <typename T>
|
||||
static Array<T>* new_ro_array(int length) {
|
||||
#if INCLUDE_CDS
|
||||
|
@ -807,6 +807,17 @@ void ConstantPool::save_and_throw_exception(const constantPoolHandle& this_cp, i
|
||||
}
|
||||
}
|
||||
|
||||
constantTag ConstantPool::constant_tag_at(int which) {
|
||||
constantTag tag = tag_at(which);
|
||||
if (tag.is_dynamic_constant() ||
|
||||
tag.is_dynamic_constant_in_error()) {
|
||||
// have to look at the signature for this one
|
||||
Symbol* constant_type = uncached_signature_ref_at(which);
|
||||
return constantTag::ofBasicType(FieldType::basic_type(constant_type));
|
||||
}
|
||||
return tag;
|
||||
}
|
||||
|
||||
BasicType ConstantPool::basic_type_for_constant_at(int which) {
|
||||
constantTag tag = tag_at(which);
|
||||
if (tag.is_dynamic_constant() ||
|
||||
|
@ -719,6 +719,9 @@ class ConstantPool : public Metadata {
|
||||
enum { _no_index_sentinel = -1, _possible_index_sentinel = -2 };
|
||||
public:
|
||||
|
||||
// Get the tag for a constant, which may involve a constant dynamic
|
||||
constantTag constant_tag_at(int which);
|
||||
// Get the basic type for a constant, which may involve a constant dynamic
|
||||
BasicType basic_type_for_constant_at(int which);
|
||||
|
||||
// Resolve late bound constants.
|
||||
|
@ -683,7 +683,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
|
||||
Block* store_block = get_block_for_node(store);
|
||||
assert(store_block != NULL, "unused killing projections skipped above");
|
||||
|
||||
if (store->is_Phi() && store->in(0)->is_Loop()) {
|
||||
if (store->is_Phi()) {
|
||||
// Loop-phis need to raise load before input. (Other phis are treated
|
||||
// as store below.)
|
||||
//
|
||||
|
@ -1490,7 +1490,8 @@ Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN *igvn) {
|
||||
// be skipped. For example, range check predicate has two checks
|
||||
// for lower and upper bounds.
|
||||
ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
|
||||
if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL) {
|
||||
if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL ||
|
||||
unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != NULL) {
|
||||
prev_dom = idom;
|
||||
}
|
||||
|
||||
|
@ -1056,7 +1056,9 @@ void PhaseIdealLoop::loop_predication_follow_branches(Node *n, IdealLoopTree *lo
|
||||
stack.push(in, 1);
|
||||
break;
|
||||
} else if (in->is_IfProj() &&
|
||||
in->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
|
||||
in->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
|
||||
(in->in(0)->Opcode() == Op_If ||
|
||||
in->in(0)->Opcode() == Op_RangeCheck)) {
|
||||
if (pf.to(in) * loop_trip_cnt >= 1) {
|
||||
stack.push(in, 1);
|
||||
}
|
||||
@ -1281,7 +1283,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
|
||||
Node* n = skip_loop_predicates(entry);
|
||||
// Check if predicates were already added to the profile predicate
|
||||
// block
|
||||
if (n != entry->in(0)->in(0)) {
|
||||
if (n != entry->in(0)->in(0) || n->outcnt() != 1) {
|
||||
has_profile_predicates = true;
|
||||
}
|
||||
entry = n;
|
||||
|
@ -861,7 +861,9 @@ bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) {
|
||||
|
||||
// Check for being too big
|
||||
if (body_size > (uint)_local_loop_unroll_limit) {
|
||||
if ((UseSubwordForMaxVector || xors_in_loop >= 4) && body_size < (uint)LoopUnrollLimit * 4) return true;
|
||||
if ((cl->is_subword_loop() || xors_in_loop >= 4) && body_size < (uint)LoopUnrollLimit * 4) {
|
||||
return true;
|
||||
}
|
||||
// Normal case: loop too big
|
||||
return false;
|
||||
}
|
||||
|
@ -616,6 +616,11 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop) {
|
||||
}
|
||||
|
||||
IfNode* check_iff = limit_check_proj->in(0)->as_If();
|
||||
|
||||
if (!is_dominator(get_ctrl(limit), check_iff->in(0))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Node* cmp_limit;
|
||||
Node* bol;
|
||||
|
||||
@ -4224,34 +4229,34 @@ void PhaseIdealLoop::build_loop_late_post( Node *n ) {
|
||||
// which can inhibit range check elimination.
|
||||
if (least != early) {
|
||||
Node* ctrl_out = least->unique_ctrl_out();
|
||||
if (ctrl_out && ctrl_out->is_CountedLoop() &&
|
||||
if (ctrl_out && ctrl_out->is_Loop() &&
|
||||
least == ctrl_out->in(LoopNode::EntryControl)) {
|
||||
// Move the node above predicates as far up as possible so a
|
||||
// following pass of loop predication doesn't hoist a predicate
|
||||
// that depends on it above that node.
|
||||
Node* new_ctrl = least;
|
||||
// Move the node above predicates so a following pass of loop
|
||||
// predication doesn't hoist a predicate that depends on it
|
||||
// above that node.
|
||||
if (find_predicate_insertion_point(new_ctrl, Deoptimization::Reason_loop_limit_check) != NULL) {
|
||||
new_ctrl = new_ctrl->in(0)->in(0);
|
||||
assert(is_dominator(early, new_ctrl), "least != early so we can move up the dominator tree");
|
||||
}
|
||||
if (find_predicate_insertion_point(new_ctrl, Deoptimization::Reason_profile_predicate) != NULL) {
|
||||
Node* c = new_ctrl->in(0)->in(0);
|
||||
assert(is_dominator(early, c), "least != early so we can move up the dominator tree");
|
||||
new_ctrl = c;
|
||||
}
|
||||
if (find_predicate_insertion_point(new_ctrl, Deoptimization::Reason_predicate) != NULL) {
|
||||
Node* c = new_ctrl->in(0)->in(0);
|
||||
assert(is_dominator(early, c), "least != early so we can move up the dominator tree");
|
||||
new_ctrl = c;
|
||||
}
|
||||
if (new_ctrl != ctrl_out) {
|
||||
least = new_ctrl;
|
||||
} else if (ctrl_out->is_CountedLoop() || ctrl_out->is_OuterStripMinedLoop()) {
|
||||
Node* least_dom = idom(least);
|
||||
if (get_loop(least_dom)->is_member(get_loop(least))) {
|
||||
least = least_dom;
|
||||
for (;;) {
|
||||
if (!new_ctrl->is_Proj()) {
|
||||
break;
|
||||
}
|
||||
CallStaticJavaNode* call = new_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
|
||||
if (call == NULL) {
|
||||
break;
|
||||
}
|
||||
int req = call->uncommon_trap_request();
|
||||
Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
|
||||
if (trap_reason != Deoptimization::Reason_loop_limit_check &&
|
||||
trap_reason != Deoptimization::Reason_predicate &&
|
||||
trap_reason != Deoptimization::Reason_profile_predicate) {
|
||||
break;
|
||||
}
|
||||
Node* c = new_ctrl->in(0)->in(0);
|
||||
if (is_dominator(c, early) && c != early) {
|
||||
break;
|
||||
}
|
||||
new_ctrl = c;
|
||||
}
|
||||
least = new_ctrl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,8 @@ protected:
|
||||
HasRangeChecks=8192,
|
||||
IsMultiversioned=16384,
|
||||
StripMined=32768,
|
||||
ProfileTripFailed=65536};
|
||||
SubwordLoop=65536,
|
||||
ProfileTripFailed=131072};
|
||||
char _unswitch_count;
|
||||
enum { _unswitch_max=3 };
|
||||
char _postloop_flags;
|
||||
@ -99,6 +100,7 @@ public:
|
||||
bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
|
||||
bool is_strip_mined() const { return _loop_flags & StripMined; }
|
||||
bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; }
|
||||
bool is_subword_loop() const { return _loop_flags & SubwordLoop; }
|
||||
|
||||
void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
|
||||
void mark_has_reductions() { _loop_flags |= HasReductions; }
|
||||
@ -112,6 +114,7 @@ public:
|
||||
void mark_strip_mined() { _loop_flags |= StripMined; }
|
||||
void clear_strip_mined() { _loop_flags &= ~StripMined; }
|
||||
void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; }
|
||||
void mark_subword_loop() { _loop_flags |= SubwordLoop; }
|
||||
|
||||
int unswitch_max() { return _unswitch_max; }
|
||||
int unswitch_count() { return _unswitch_count; }
|
||||
|
@ -257,6 +257,7 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exc
|
||||
ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
|
||||
if (exclude_loop_predicate &&
|
||||
(unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL ||
|
||||
unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != NULL ||
|
||||
unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check) != NULL)) {
|
||||
// If this is a range check (IfNode::is_range_check), do not
|
||||
// reorder because Compile::allow_range_check_smearing might have
|
||||
|
@ -518,8 +518,7 @@ Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, N
|
||||
if (ac->is_clonebasic()) {
|
||||
intptr_t offset;
|
||||
AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest), phase, offset);
|
||||
assert(alloc != NULL && (!ReduceBulkZeroing || alloc->initialization()->is_complete_with_arraycopy()), "broken allocation");
|
||||
if (alloc == ld_alloc) {
|
||||
if (alloc != NULL && alloc == ld_alloc) {
|
||||
return ac;
|
||||
}
|
||||
}
|
||||
|
@ -376,6 +376,7 @@ void SuperWord::unrolling_analysis(int &local_loop_unroll_factor) {
|
||||
if (same_type) {
|
||||
max_vector = cur_max_vector;
|
||||
flag_small_bt = true;
|
||||
cl->mark_subword_loop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1995,9 +1995,6 @@ JNI_ENTRY_CHECKED(jobject,
|
||||
checked_jni_GetModule(JNIEnv *env,
|
||||
jclass clazz))
|
||||
functionEnter(thr);
|
||||
IN_VM(
|
||||
jniCheck::validate_class(thr, clazz, false);
|
||||
)
|
||||
jobject result = UNCHECKED()->GetModule(env,clazz);
|
||||
functionExit(thr);
|
||||
return result;
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include "interpreter/bytecode.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/heapShared.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/referenceType.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
@ -3598,6 +3599,13 @@ JVM_LEAF(jboolean, JVM_SupportsCX8())
|
||||
return VM_Version::supports_cx8();
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY(void, JVM_InitializeFromArchive(JNIEnv* env, jclass cls))
|
||||
JVMWrapper("JVM_InitializeFromArchive");
|
||||
Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls));
|
||||
assert(k->is_klass(), "just checking");
|
||||
HeapShared::initialize_from_archived_subgraph(k);
|
||||
JVM_END
|
||||
|
||||
// Returns an array of all live Thread objects (VM internal JavaThreads,
|
||||
// jvmti agent threads, and JNI attaching threads are skipped)
|
||||
// See CR 6404306 regarding JNI attaching threads
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user