Merge
This commit is contained in:
commit
d8cfaff123
1
.hgtags
1
.hgtags
@ -483,3 +483,4 @@ f7363de371c9a1f668bd0a01b7df3d1ddb9cc58b jdk-11+7
|
||||
6fa770f9f8ab296e1ce255ec17ccf6d4e1051886 jdk-10+46
|
||||
69d7398038c54774d9395b6810e0cca335edc02c jdk-11+10
|
||||
e1e60f75cd39312a7f59d2a4f91d624e5aecc95e jdk-11+11
|
||||
3ab6ba9f94a9045a526d645af26c933235371d6f jdk-11+12
|
||||
|
@ -156,9 +156,6 @@ else
|
||||
JRE_IMAGE_HOMEDIR := $(JRE_IMAGE_DIR)
|
||||
JDK_BUNDLE_SUBDIR := jdk-$(VERSION_NUMBER)
|
||||
JRE_BUNDLE_SUBDIR := jre-$(VERSION_NUMBER)
|
||||
JRE_COMPACT1_BUNDLE_SUBDIR := jre-$(VERSION_NUMBER)-compact1
|
||||
JRE_COMPACT2_BUNDLE_SUBDIR := jre-$(VERSION_NUMBER)-compact2
|
||||
JRE_COMPACT3_BUNDLE_SUBDIR := jre-$(VERSION_NUMBER)-compact3
|
||||
ifneq ($(DEBUG_LEVEL), release)
|
||||
JDK_BUNDLE_SUBDIR := $(JDK_BUNDLE_SUBDIR)/$(DEBUG_LEVEL)
|
||||
JRE_BUNDLE_SUBDIR := $(JRE_BUNDLE_SUBDIR)/$(DEBUG_LEVEL)
|
||||
@ -281,35 +278,6 @@ endif
|
||||
|
||||
################################################################################
|
||||
|
||||
ifneq ($(filter profiles-bundles, $(MAKECMDGOALS)), )
|
||||
ifeq ($(OPENJDK_TARGET_OS), macosx)
|
||||
$(error Creating compact profiles bundles on macosx is unsupported)
|
||||
endif
|
||||
|
||||
define GenerateCompactProfilesBundles
|
||||
ALL_JRE_COMPACT$1_FILES := $$(call CacheFind, $$(JRE_COMPACT$1_IMAGE_DIR))
|
||||
|
||||
JRE_COMPACT$1_BUNDLE_FILES := $$(filter-out \
|
||||
$$(SYMBOLS_EXCLUDE_PATTERN), \
|
||||
$$(ALL_JRE_COMPACT$1_FILES))
|
||||
|
||||
$$(eval $$(call SetupBundleFile, BUILD_JRE_COMPACT$1_BUNDLE, \
|
||||
BUNDLE_NAME := $$(JRE_COMPACT$1_BUNDLE_NAME), \
|
||||
FILES := $$(JRE_COMPACT$1_BUNDLE_FILES), \
|
||||
BASE_DIRS := $$(JRE_COMPACT$1_IMAGE_DIR), \
|
||||
SUBDIR := $$(JRE_COMPACT$1_BUNDLE_SUBDIR), \
|
||||
))
|
||||
|
||||
PROFILES_TARGETS += $$(BUILD_JRE_COMPACT$1_BUNDLE)
|
||||
endef
|
||||
|
||||
$(eval $(call GenerateCompactProfilesBundles,1))
|
||||
$(eval $(call GenerateCompactProfilesBundles,2))
|
||||
$(eval $(call GenerateCompactProfilesBundles,3))
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
|
||||
ifneq ($(filter test-bundles, $(MAKECMDGOALS)), )
|
||||
TEST_BUNDLE_FILES := $(call CacheFind, $(TEST_IMAGE_DIR))
|
||||
|
||||
@ -345,8 +313,7 @@ $(eval $(call IncludeCustomExtension, Bundles.gmk))
|
||||
################################################################################
|
||||
|
||||
product-bundles: $(PRODUCT_TARGETS)
|
||||
profiles-bundles: $(PROFILES_TARGETS)
|
||||
test-bundles: $(TEST_TARGETS)
|
||||
docs-bundles: $(DOCS_TARGETS)
|
||||
|
||||
.PHONY: all default product-bundles profiles-bundles test-bundles docs-bundles
|
||||
.PHONY: all default product-bundles test-bundles docs-bundles
|
||||
|
@ -43,7 +43,7 @@ help:
|
||||
$(info $(_) make images # Create complete jdk and jre images)
|
||||
$(info $(_) # (alias for product-images))
|
||||
$(info $(_) make <name>-image # Build just the image for any of: )
|
||||
$(info $(_) # jdk, jre, test, docs, symbols, profiles)
|
||||
$(info $(_) # jdk, jre, test, docs, symbols)
|
||||
$(info $(_) make <phase> # Build the specified phase and everything it depends on)
|
||||
$(info $(_) # (gensrc, java, copy, libs, launchers, gendata, rmic))
|
||||
$(info $(_) make *-only # Applies to most targets and disables building the)
|
||||
@ -51,7 +51,6 @@ help:
|
||||
$(info $(_) # result in incorrect build results!)
|
||||
$(info $(_) make docs # Create all docs)
|
||||
$(info $(_) make docs-jdk-api # Create just JDK javadocs)
|
||||
$(info $(_) make profiles # Create complete jre compact profile images)
|
||||
$(info $(_) make bootcycle-images # Build images twice, second time with newly built JDK)
|
||||
$(info $(_) make install # Install the generated images locally)
|
||||
$(info $(_) make reconfigure # Rerun configure with the same arguments as last time)
|
||||
|
@ -47,50 +47,8 @@ JRE_MODULES += $(filter $(ALL_MODULES), $(BOOT_MODULES) \
|
||||
$(PLATFORM_MODULES) $(JRE_TOOL_MODULES))
|
||||
JDK_MODULES += $(ALL_MODULES)
|
||||
|
||||
# Modules list for compact builds
|
||||
JRE_COMPACT1_MODULES := \
|
||||
java.logging \
|
||||
java.scripting \
|
||||
jdk.localedata \
|
||||
jdk.crypto.cryptoki \
|
||||
jdk.crypto.ec \
|
||||
jdk.unsupported \
|
||||
#
|
||||
|
||||
JRE_COMPACT2_MODULES := \
|
||||
$(JRE_COMPACT1_MODULES) \
|
||||
java.rmi \
|
||||
java.sql \
|
||||
java.xml \
|
||||
jdk.xml.dom \
|
||||
jdk.httpserver \
|
||||
#
|
||||
|
||||
JRE_COMPACT3_MODULES := \
|
||||
$(JRE_COMPACT2_MODULES) \
|
||||
java.smartcardio \
|
||||
java.compiler \
|
||||
java.instrument \
|
||||
java.management \
|
||||
java.management.rmi \
|
||||
java.naming \
|
||||
java.prefs \
|
||||
java.security.jgss \
|
||||
java.security.sasl \
|
||||
java.sql.rowset \
|
||||
java.xml.crypto \
|
||||
jdk.management \
|
||||
jdk.naming.dns \
|
||||
jdk.naming.rmi \
|
||||
jdk.sctp \
|
||||
jdk.security.auth \
|
||||
#
|
||||
|
||||
JRE_MODULES_LIST := $(call CommaList, $(JRE_MODULES))
|
||||
JDK_MODULES_LIST := $(call CommaList, $(JDK_MODULES))
|
||||
JRE_COMPACT1_MODULES_LIST := $(call CommaList, $(JRE_COMPACT1_MODULES))
|
||||
JRE_COMPACT2_MODULES_LIST := $(call CommaList, $(JRE_COMPACT2_MODULES))
|
||||
JRE_COMPACT3_MODULES_LIST := $(call CommaList, $(JRE_COMPACT3_MODULES))
|
||||
|
||||
################################################################################
|
||||
|
||||
@ -152,45 +110,8 @@ $(JRE_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(JMODS) \
|
||||
)
|
||||
$(TOUCH) $@
|
||||
|
||||
|
||||
$(JRE_COMPACT1_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(JMODS) \
|
||||
$(call DependOnVariable, JRE_COMPACT1_MODULES_LIST) $(BASE_RELEASE_FILE)
|
||||
$(ECHO) Creating jre compact1 jimage
|
||||
$(RM) -r $(JRE_COMPACT1_IMAGE_DIR)
|
||||
$(call ExecuteWithLog, $(SUPPORT_OUTPUTDIR)/images/jre_compact1, \
|
||||
$(JLINK_TOOL) --add-modules $(JRE_COMPACT1_MODULES_LIST) \
|
||||
$(JLINK_JRE_EXTRA_OPTS) \
|
||||
--output $(JRE_COMPACT1_IMAGE_DIR) \
|
||||
)
|
||||
$(TOUCH) $@
|
||||
|
||||
$(JRE_COMPACT2_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(JMODS) \
|
||||
$(call DependOnVariable, JRE_COMPACT2_MODULES_LIST) $(BASE_RELEASE_FILE)
|
||||
$(ECHO) Creating jre compact2 jimage
|
||||
$(RM) -r $(JRE_COMPACT2_IMAGE_DIR)
|
||||
$(call ExecuteWithLog, $(SUPPORT_OUTPUTDIR)/images/jre_compact2, \
|
||||
$(JLINK_TOOL) --add-modules $(JRE_COMPACT2_MODULES_LIST) \
|
||||
$(JLINK_JRE_EXTRA_OPTS) \
|
||||
--output $(JRE_COMPACT2_IMAGE_DIR) \
|
||||
)
|
||||
$(TOUCH) $@
|
||||
|
||||
$(JRE_COMPACT3_IMAGE_DIR)/$(JIMAGE_TARGET_FILE): $(JMODS) \
|
||||
$(call DependOnVariable, JRE_COMPACT3_MODULES_LIST) $(BASE_RELEASE_FILE)
|
||||
$(ECHO) Creating jre compact3 jimage
|
||||
$(RM) -r $(JRE_COMPACT3_IMAGE_DIR)
|
||||
$(call ExecuteWithLog, $(SUPPORT_OUTPUTDIR)/images/jre_compact3, \
|
||||
$(JLINK_TOOL) --add-modules $(JRE_COMPACT3_MODULES_LIST) \
|
||||
$(JLINK_JRE_EXTRA_OPTS) \
|
||||
--output $(JRE_COMPACT3_IMAGE_DIR) \
|
||||
)
|
||||
$(TOUCH) $@
|
||||
|
||||
TOOL_JRE_TARGETS := $(JRE_IMAGE_DIR)/$(JIMAGE_TARGET_FILE)
|
||||
TOOL_JDK_TARGETS := $(JDK_IMAGE_DIR)/$(JIMAGE_TARGET_FILE)
|
||||
TOOL_JRE_COMPACT1_TARGETS := $(JRE_COMPACT1_IMAGE_DIR)/$(JIMAGE_TARGET_FILE)
|
||||
TOOL_JRE_COMPACT2_TARGETS := $(JRE_COMPACT2_IMAGE_DIR)/$(JIMAGE_TARGET_FILE)
|
||||
TOOL_JRE_COMPACT3_TARGETS := $(JRE_COMPACT3_IMAGE_DIR)/$(JIMAGE_TARGET_FILE)
|
||||
|
||||
################################################################################
|
||||
# /man dir
|
||||
@ -441,15 +362,4 @@ symbols: $(SYMBOLS_TARGETS)
|
||||
|
||||
all: jdk jre symbols
|
||||
|
||||
$(JRE_COMPACT1_TARGETS): $(TOOL_JRE_COMPACT1_TARGETS)
|
||||
$(JRE_COMPACT2_TARGETS): $(TOOL_JRE_COMPACT2_TARGETS)
|
||||
$(JRE_COMPACT3_TARGETS): $(TOOL_JRE_COMPACT3_TARGETS)
|
||||
|
||||
profiles: $(TOOL_JRE_COMPACT1_TARGETS) \
|
||||
$(TOOL_JRE_COMPACT2_TARGETS) \
|
||||
$(TOOL_JRE_COMPACT3_TARGETS) \
|
||||
$(JRE_COMPACT1_TARGETS) \
|
||||
$(JRE_COMPACT2_TARGETS) \
|
||||
$(JRE_COMPACT3_TARGETS)
|
||||
|
||||
.PHONY: default all jdk jre symbols profiles
|
||||
.PHONY: default all jdk jre symbols
|
||||
|
@ -344,9 +344,6 @@ jre-image:
|
||||
symbols-image:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Images.gmk symbols)
|
||||
|
||||
profiles-image:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Images.gmk profiles)
|
||||
|
||||
mac-bundles-jdk:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f MacBundles.gmk)
|
||||
|
||||
@ -358,7 +355,7 @@ exploded-image-optimize:
|
||||
|
||||
ALL_TARGETS += store-source-revision create-source-revision-tracker bootcycle-images zip-security \
|
||||
zip-source jrtfs-jar jdk-image jre-image \
|
||||
symbols-image profiles-image mac-bundles-jdk \
|
||||
symbols-image mac-bundles-jdk \
|
||||
release-file exploded-image-optimize
|
||||
|
||||
################################################################################
|
||||
@ -569,16 +566,13 @@ ALL_TARGETS += test test-hotspot-jtreg test-hotspot-jtreg-native \
|
||||
product-bundles:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk product-bundles)
|
||||
|
||||
profiles-bundles:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk profiles-bundles)
|
||||
|
||||
test-bundles:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk test-bundles)
|
||||
|
||||
docs-bundles:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk docs-bundles)
|
||||
|
||||
ALL_TARGETS += product-bundles profiles-bundles test-bundles docs-bundles
|
||||
ALL_TARGETS += product-bundles test-bundles docs-bundles
|
||||
|
||||
################################################################################
|
||||
# Install targets
|
||||
@ -804,8 +798,6 @@ else
|
||||
jre-image: jmods release-file
|
||||
symbols-image: $(LIBS_TARGETS) $(LAUNCHER_TARGETS)
|
||||
|
||||
profiles-image: jmods release-file
|
||||
|
||||
mac-bundles-jdk: jdk-image jre-image
|
||||
|
||||
# The optimize target can run as soon as the modules dir has been completely
|
||||
@ -886,8 +878,6 @@ else
|
||||
|
||||
product-bundles: product-images
|
||||
|
||||
profiles-bundles: profiles-images
|
||||
|
||||
test-bundles: test-image
|
||||
|
||||
docs-bundles: docs-image
|
||||
@ -992,9 +982,6 @@ product-images: jdk-image jre-image symbols-image exploded-image
|
||||
# an image until this can be cleaned up properly.
|
||||
product-images: zip-security
|
||||
|
||||
# Declare these for backwards compatiblity and convenience.
|
||||
profiles profiles-images: profiles-image
|
||||
|
||||
# The module summary cannot be run when:
|
||||
# * Cross compiling and building a partial BUILDJDK for the build host
|
||||
# * An external buildjdk has been supplied since it may not match the
|
||||
@ -1034,7 +1021,6 @@ ALL_TARGETS += buildtools hotspot hotspot-libs hotspot-gensrc gensrc gendata \
|
||||
exploded-image-base exploded-image \
|
||||
create-buildjdk docs-jdk-api docs-javase-api docs-reference-api docs-jdk \
|
||||
docs-javase docs-reference docs-javadoc mac-bundles product-images \
|
||||
profiles profiles-images \
|
||||
docs-image test-image all-images \
|
||||
all-bundles
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
|
||||
# All valid JVM features, regardless of platform
|
||||
VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
|
||||
graal vm-structs jni-check services management all-gcs nmt cds \
|
||||
graal vm-structs jni-check services management cmsgc g1gc parallelgc serialgc nmt cds \
|
||||
static-build link-time-opt aot"
|
||||
|
||||
# All valid JVM variants
|
||||
@ -305,12 +305,8 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
|
||||
AC_MSG_ERROR([Specified JVM feature 'jvmci' requires feature 'compiler2' or 'compiler1'])
|
||||
fi
|
||||
|
||||
if HOTSPOT_CHECK_JVM_FEATURE(compiler2) && ! HOTSPOT_CHECK_JVM_FEATURE(all-gcs); then
|
||||
AC_MSG_ERROR([Specified JVM feature 'compiler2' requires feature 'all-gcs'])
|
||||
fi
|
||||
|
||||
if HOTSPOT_CHECK_JVM_FEATURE(vm-structs) && ! HOTSPOT_CHECK_JVM_FEATURE(all-gcs); then
|
||||
AC_MSG_ERROR([Specified JVM feature 'vm-structs' requires feature 'all-gcs'])
|
||||
if HOTSPOT_CHECK_JVM_FEATURE(cmsgc) && ! HOTSPOT_CHECK_JVM_FEATURE(serialgc); then
|
||||
AC_MSG_ERROR([Specified JVM feature 'cmsgc' requires feature 'serialgc'])
|
||||
fi
|
||||
|
||||
# Turn on additional features based on other parts of configure
|
||||
@ -395,7 +391,7 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
|
||||
fi
|
||||
|
||||
# All variants but minimal (and custom) get these features
|
||||
NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES jvmti vm-structs jni-check services management all-gcs nmt"
|
||||
NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc jni-check jvmti management nmt services vm-structs"
|
||||
if test "x$ENABLE_CDS" = "xtrue"; then
|
||||
NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cds"
|
||||
fi
|
||||
@ -404,7 +400,7 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
|
||||
JVM_FEATURES_server="compiler1 compiler2 $NON_MINIMAL_FEATURES $JVM_FEATURES $JVM_FEATURES_jvmci $JVM_FEATURES_aot $JVM_FEATURES_graal"
|
||||
JVM_FEATURES_client="compiler1 $NON_MINIMAL_FEATURES $JVM_FEATURES $JVM_FEATURES_jvmci"
|
||||
JVM_FEATURES_core="$NON_MINIMAL_FEATURES $JVM_FEATURES"
|
||||
JVM_FEATURES_minimal="compiler1 minimal $JVM_FEATURES $JVM_FEATURES_link_time_opt"
|
||||
JVM_FEATURES_minimal="compiler1 minimal serialgc $JVM_FEATURES $JVM_FEATURES_link_time_opt"
|
||||
JVM_FEATURES_zero="zero $NON_MINIMAL_FEATURES $JVM_FEATURES"
|
||||
JVM_FEATURES_custom="$JVM_FEATURES"
|
||||
|
||||
@ -442,6 +438,12 @@ AC_DEFUN_ONCE([HOTSPOT_FINALIZE_JVM_FEATURES],
|
||||
eval $features_var_name='"'$JVM_FEATURES_FOR_VARIANT'"'
|
||||
AC_MSG_RESULT(["$JVM_FEATURES_FOR_VARIANT"])
|
||||
|
||||
# Verify that we have at least one gc selected
|
||||
GC_FEATURES=`$ECHO $JVM_FEATURES_FOR_VARIANT | $GREP gc`
|
||||
if test "x$GC_FEATURES" = x; then
|
||||
AC_MSG_WARN([Invalid JVM features: No gc selected for variant $variant.])
|
||||
fi
|
||||
|
||||
# Validate features (for configure script errors, not user errors)
|
||||
BASIC_GET_NON_MATCHING_VALUES(INVALID_FEATURES, $JVM_FEATURES_FOR_VARIANT, $VALID_JVM_FEATURES)
|
||||
if test "x$INVALID_FEATURES" != x; then
|
||||
|
@ -817,18 +817,11 @@ OS_VERSION_MICRO:=@OS_VERSION_MICRO@
|
||||
# Images directory definitions
|
||||
JDK_IMAGE_SUBDIR:=jdk
|
||||
JRE_IMAGE_SUBDIR:=jre
|
||||
JRE_COMPACT1_IMAGE_SUBDIR := jre-compact1
|
||||
JRE_COMPACT2_IMAGE_SUBDIR := jre-compact2
|
||||
JRE_COMPACT3_IMAGE_SUBDIR := jre-compact3
|
||||
|
||||
# Colon left out to be able to override output dir for bootcycle-images
|
||||
JDK_IMAGE_DIR=$(IMAGES_OUTPUTDIR)/$(JDK_IMAGE_SUBDIR)
|
||||
JRE_IMAGE_DIR=$(IMAGES_OUTPUTDIR)/$(JRE_IMAGE_SUBDIR)
|
||||
|
||||
JRE_COMPACT1_IMAGE_DIR := $(IMAGES_OUTPUTDIR)/$(JRE_COMPACT1_IMAGE_SUBDIR)
|
||||
JRE_COMPACT2_IMAGE_DIR := $(IMAGES_OUTPUTDIR)/$(JRE_COMPACT2_IMAGE_SUBDIR)
|
||||
JRE_COMPACT3_IMAGE_DIR := $(IMAGES_OUTPUTDIR)/$(JRE_COMPACT3_IMAGE_SUBDIR)
|
||||
|
||||
# Test image, as above
|
||||
TEST_IMAGE_SUBDIR:=test
|
||||
TEST_IMAGE_DIR=$(IMAGES_OUTPUTDIR)/$(TEST_IMAGE_SUBDIR)
|
||||
@ -866,12 +859,6 @@ else ifneq ($(DEBUG_LEVEL), release)
|
||||
endif
|
||||
JDK_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART).tar.gz
|
||||
JRE_BUNDLE_NAME := jre-$(BASE_NAME)_bin$(DEBUG_PART).tar.gz
|
||||
JRE_COMPACT1_BUNDLE_NAME := \
|
||||
jre-$(VERSION_SHORT)+$(VERSION_BUILD)-compact1_$(OPENJDK_TARGET_BUNDLE_PLATFORM)_bin$(DEBUG_PART).tar.gz
|
||||
JRE_COMPACT2_BUNDLE_NAME := \
|
||||
jre-$(VERSION_SHORT)+$(VERSION_BUILD)-compact2_$(OPENJDK_TARGET_BUNDLE_PLATFORM)_bin$(DEBUG_PART).tar.gz
|
||||
JRE_COMPACT3_BUNDLE_NAME := \
|
||||
jre-$(VERSION_SHORT)+$(VERSION_BUILD)-compact3_$(OPENJDK_TARGET_BUNDLE_PLATFORM)_bin$(DEBUG_PART).tar.gz
|
||||
JDK_SYMBOLS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART)-symbols.tar.gz
|
||||
JRE_SYMBOLS_BUNDLE_NAME := jre-$(BASE_NAME)_bin$(DEBUG_PART)-symbols.tar.gz
|
||||
TEST_DEMOS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin-tests-demos$(DEBUG_PART).tar.gz
|
||||
|
@ -32,7 +32,7 @@ DEFAULT_VERSION_PATCH=0
|
||||
DEFAULT_VERSION_DATE=2018-09-25
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=55 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="9 10 11"
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="10 11"
|
||||
|
||||
LAUNCHER_NAME=openjdk
|
||||
PRODUCT_NAME=OpenJDK
|
||||
|
@ -390,7 +390,7 @@ var getJibProfilesCommon = function (input, data) {
|
||||
};
|
||||
};
|
||||
|
||||
common.boot_jdk_version = "9";
|
||||
common.boot_jdk_version = "10";
|
||||
common.boot_jdk_home = input.get("boot_jdk", "home_path") + "/jdk-"
|
||||
+ common.boot_jdk_version
|
||||
+ (input.build_os == "macosx" ? ".jdk/Contents/Home" : "");
|
||||
@ -848,7 +848,7 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
server: "jpg",
|
||||
product: "jdk",
|
||||
version: common.boot_jdk_version,
|
||||
build_number: "181",
|
||||
build_number: "46",
|
||||
file: "bundles/" + boot_jdk_platform + "/jdk-" + common.boot_jdk_version + "_"
|
||||
+ boot_jdk_platform + "_bin.tar.gz",
|
||||
configure_args: "--with-boot-jdk=" + common.boot_jdk_home,
|
||||
|
@ -77,9 +77,14 @@ ifeq ($(call check-jvm-feature, dtrace), true)
|
||||
vmGCOperations.o \
|
||||
)
|
||||
|
||||
ifeq ($(call check-jvm-feature, all-gcs), true)
|
||||
ifeq ($(call check-jvm-feature, cmsgc), true)
|
||||
DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
|
||||
vmCMSOperations.o \
|
||||
)
|
||||
endif
|
||||
|
||||
ifeq ($(call check-jvm-feature, parallelgc), true)
|
||||
DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
|
||||
vmPSOperations.o \
|
||||
)
|
||||
endif
|
||||
|
@ -118,19 +118,6 @@ ifneq ($(call check-jvm-feature, cds), true)
|
||||
#
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, all-gcs), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_ALL_GCS=0
|
||||
JVM_EXCLUDE_PATTERNS += \
|
||||
cms/ g1/ parallel/
|
||||
JVM_EXCLUDE_FILES += \
|
||||
concurrentGCThread.cpp \
|
||||
suspendibleThreadSet.cpp \
|
||||
plab.cpp
|
||||
JVM_EXCLUDE_FILES += \
|
||||
g1MemoryPool.cpp \
|
||||
psMemoryPool.cpp
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, nmt), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_NMT=0
|
||||
JVM_EXCLUDE_FILES += \
|
||||
@ -138,13 +125,34 @@ ifneq ($(call check-jvm-feature, nmt), true)
|
||||
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
|
||||
endif
|
||||
|
||||
ifeq ($(call check-jvm-feature, aot), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_AOT
|
||||
else
|
||||
ifneq ($(call check-jvm-feature, aot), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_AOT=0
|
||||
JVM_EXCLUDE_FILES += \
|
||||
compiledIC_aot_x86_64.cpp compilerRuntime.cpp \
|
||||
aotCodeHeap.cpp aotCompiledMethod.cpp aotLoader.cpp compiledIC_aot.cpp
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, cmsgc), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_CMSGC=0
|
||||
JVM_EXCLUDE_PATTERNS += gc/cms
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, g1gc), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_G1GC=0
|
||||
JVM_EXCLUDE_PATTERNS += gc/g1
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, parallelgc), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_PARALLELGC=0
|
||||
JVM_EXCLUDE_PATTERNS += gc/parallel
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, serialgc), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_SERIALGC=0
|
||||
JVM_EXCLUDE_PATTERNS += gc/serial
|
||||
# If serial is disabled, we cannot use serial as OldGC in parallel
|
||||
JVM_EXCLUDE_FILES += psMarkSweep.cpp psMarkSweepDecorator.cpp
|
||||
endif
|
||||
################################################################################
|
||||
|
||||
ifeq ($(call check-jvm-feature, link-time-opt), true)
|
||||
|
@ -59,8 +59,18 @@ VM_SHARE_INCLUDES := \
|
||||
-I$(VM_TESTBASE_DIR)/nsk/share/native \
|
||||
-I$(VM_TESTBASE_DIR)/nsk/share/jni
|
||||
|
||||
NSK_MONITORING_INCLUDES := \
|
||||
-I$(VM_TESTBASE_DIR)/nsk/share/native \
|
||||
-I$(VM_TESTBASE_DIR)/nsk/share/jni
|
||||
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libProcessUtils := $(VM_SHARE_INCLUDES)
|
||||
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libThreadController := $(NSK_MONITORING_INCLUDES)
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libDeadlock := $(NSK_MONITORING_INCLUDES)
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libRecursiveMonitoringThread := $(NSK_MONITORING_INCLUDES)
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libLockingThreads := $(NSK_MONITORING_INCLUDES)
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libStackTraceController := $(NSK_MONITORING_INCLUDES)
|
||||
|
||||
################################################################################
|
||||
|
||||
# Platform specific setup
|
||||
|
@ -49,11 +49,6 @@
|
||||
#include "runtime/jniHandles.inline.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/g1CardTable.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
@ -1951,6 +1946,11 @@ void MacroAssembler::decrement(Register reg, int value)
|
||||
void MacroAssembler::decrementw(Address dst, int value)
|
||||
{
|
||||
assert(!dst.uses(rscratch1), "invalid dst for address decrement");
|
||||
if (dst.getMode() == Address::literal) {
|
||||
assert(abs(value) < (1 << 12), "invalid value and address mode combination");
|
||||
lea(rscratch2, dst);
|
||||
dst = Address(rscratch2);
|
||||
}
|
||||
ldrw(rscratch1, dst);
|
||||
decrementw(rscratch1, value);
|
||||
strw(rscratch1, dst);
|
||||
@ -1959,6 +1959,11 @@ void MacroAssembler::decrementw(Address dst, int value)
|
||||
void MacroAssembler::decrement(Address dst, int value)
|
||||
{
|
||||
assert(!dst.uses(rscratch1), "invalid address for decrement");
|
||||
if (dst.getMode() == Address::literal) {
|
||||
assert(abs(value) < (1 << 12), "invalid value and address mode combination");
|
||||
lea(rscratch2, dst);
|
||||
dst = Address(rscratch2);
|
||||
}
|
||||
ldr(rscratch1, dst);
|
||||
decrement(rscratch1, value);
|
||||
str(rscratch1, dst);
|
||||
@ -1991,6 +1996,11 @@ void MacroAssembler::increment(Register reg, int value)
|
||||
void MacroAssembler::incrementw(Address dst, int value)
|
||||
{
|
||||
assert(!dst.uses(rscratch1), "invalid dst for address increment");
|
||||
if (dst.getMode() == Address::literal) {
|
||||
assert(abs(value) < (1 << 12), "invalid value and address mode combination");
|
||||
lea(rscratch2, dst);
|
||||
dst = Address(rscratch2);
|
||||
}
|
||||
ldrw(rscratch1, dst);
|
||||
incrementw(rscratch1, value);
|
||||
strw(rscratch1, dst);
|
||||
@ -1999,6 +2009,11 @@ void MacroAssembler::incrementw(Address dst, int value)
|
||||
void MacroAssembler::increment(Address dst, int value)
|
||||
{
|
||||
assert(!dst.uses(rscratch1), "invalid dst for address increment");
|
||||
if (dst.getMode() == Address::literal) {
|
||||
assert(abs(value) < (1 << 12), "invalid value and address mode combination");
|
||||
lea(rscratch2, dst);
|
||||
dst = Address(rscratch2);
|
||||
}
|
||||
ldr(rscratch1, dst);
|
||||
increment(rscratch1, value);
|
||||
str(rscratch1, dst);
|
||||
|
@ -781,23 +781,6 @@ public:
|
||||
|
||||
void resolve_jobject(Register value, Register thread, Register tmp);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
|
||||
void g1_write_barrier_pre(Register obj,
|
||||
Register pre_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
bool tosca_live,
|
||||
bool expand_call);
|
||||
|
||||
void g1_write_barrier_post(Register store_addr,
|
||||
Register new_val,
|
||||
Register thread,
|
||||
Register tmp,
|
||||
Register tmp2);
|
||||
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// oop manipulations
|
||||
void load_klass(Register dst, Register src);
|
||||
void store_klass(Register dst, Register src);
|
||||
|
@ -2339,7 +2339,7 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
__ b(cont);
|
||||
|
||||
int reexecute_offset = __ pc() - start;
|
||||
#if defined(INCLUDE_JVMCI) && !defined(COMPILER1)
|
||||
#if INCLUDE_JVMCI && !defined(COMPILER1)
|
||||
if (EnableJVMCI && UseJVMCICompiler) {
|
||||
// JVMCI does not use this kind of deoptimization
|
||||
__ should_not_reach_here();
|
||||
|
@ -42,10 +42,6 @@
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
int AbstractAssembler::code_fill_byte() {
|
||||
return 0xff; // illegal instruction 0xffffffff
|
||||
|
@ -42,10 +42,6 @@
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#ifdef COMPILER2
|
||||
// Convert the raw encoding form into the form expected by the
|
||||
|
@ -42,10 +42,6 @@
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/hashtable.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// Returns whether given imm has equal bit fields <0:size-1> and <size:2*size-1>.
|
||||
inline bool Assembler::LogicalImmediate::has_equal_subpatterns(uintx imm, int size) {
|
||||
|
@ -486,6 +486,9 @@ void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
|
||||
void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) {
|
||||
assert(addr->is_register(), "must be a register at this point");
|
||||
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
|
||||
CardTable* ct = ctbs->card_table();
|
||||
|
||||
LIR_Opr tmp = FrameMap::LR_ptr_opr;
|
||||
|
||||
// TODO-AARCH64: check performance
|
||||
@ -507,7 +510,7 @@ void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LI
|
||||
LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN);
|
||||
#endif
|
||||
if (UseCondCardMark) {
|
||||
if (UseConcMarkSweepGC) {
|
||||
if (ct->scanned_concurrently()) {
|
||||
__ membar_storeload();
|
||||
}
|
||||
LIR_Opr cur_value = new_register(T_INT);
|
||||
@ -519,11 +522,9 @@ void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LI
|
||||
set_card(tmp, card_addr);
|
||||
__ branch_destination(L_already_dirty->label());
|
||||
} else {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
|
||||
if (ct->scanned_concurrently()) {
|
||||
__ membar_storestore();
|
||||
}
|
||||
#endif
|
||||
set_card(tmp, card_addr);
|
||||
}
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc/g1/g1BarrierSetAssembler.hpp"
|
||||
#include "gc/g1/g1ThreadLocalData.hpp"
|
||||
#include "gc/g1/g1CardTable.hpp"
|
||||
#include "gc/g1/g1ThreadLocalData.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
@ -127,6 +128,239 @@ void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* mas
|
||||
#endif // !AARCH64
|
||||
}
|
||||
|
||||
// G1 pre-barrier.
|
||||
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
// If store_addr != noreg, then previous value is loaded from [store_addr];
|
||||
// in such case store_addr and new_val registers are preserved;
|
||||
// otherwise pre_val register is preserved.
|
||||
void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2) {
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
if (store_addr != noreg) {
|
||||
assert_different_registers(store_addr, new_val, pre_val, tmp1, tmp2, noreg);
|
||||
} else {
|
||||
assert (new_val == noreg, "should be");
|
||||
assert_different_registers(pre_val, tmp1, tmp2, noreg);
|
||||
}
|
||||
|
||||
Address in_progress(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
|
||||
Address index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
// Is marking active?
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "adjust this code");
|
||||
__ ldrb(tmp1, in_progress);
|
||||
__ cbz(tmp1, done);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
if (store_addr != noreg) {
|
||||
__ load_heap_oop(pre_val, Address(store_addr, 0));
|
||||
}
|
||||
|
||||
// Is the previous value null?
|
||||
__ cbz(pre_val, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
// Is index == 0?
|
||||
// (The index field is typed as size_t.)
|
||||
|
||||
__ ldr(tmp1, index); // tmp1 := *index_adr
|
||||
__ ldr(tmp2, buffer);
|
||||
|
||||
__ subs(tmp1, tmp1, wordSize); // tmp1 := tmp1 - wordSize
|
||||
__ b(runtime, lt); // If negative, goto runtime
|
||||
|
||||
__ str(tmp1, index); // *index_adr := tmp1
|
||||
|
||||
// Record the previous value
|
||||
__ str(pre_val, Address(tmp2, tmp1));
|
||||
__ b(done);
|
||||
|
||||
__ bind(runtime);
|
||||
|
||||
// save the live input values
|
||||
#ifdef AARCH64
|
||||
if (store_addr != noreg) {
|
||||
__ raw_push(store_addr, new_val);
|
||||
} else {
|
||||
__ raw_push(pre_val, ZR);
|
||||
}
|
||||
#else
|
||||
if (store_addr != noreg) {
|
||||
// avoid raw_push to support any ordering of store_addr and new_val
|
||||
__ push(RegisterSet(store_addr) | RegisterSet(new_val));
|
||||
} else {
|
||||
__ push(pre_val);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
if (pre_val != R0) {
|
||||
__ mov(R0, pre_val);
|
||||
}
|
||||
__ mov(R1, Rthread);
|
||||
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), R0, R1);
|
||||
|
||||
#ifdef AARCH64
|
||||
if (store_addr != noreg) {
|
||||
__ raw_pop(store_addr, new_val);
|
||||
} else {
|
||||
__ raw_pop(pre_val, ZR);
|
||||
}
|
||||
#else
|
||||
if (store_addr != noreg) {
|
||||
__ pop(RegisterSet(store_addr) | RegisterSet(new_val));
|
||||
} else {
|
||||
__ pop(pre_val);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
// G1 post-barrier.
|
||||
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3) {
|
||||
|
||||
Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
|
||||
Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
|
||||
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
|
||||
CardTable* ct = ctbs->card_table();
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
// Does store cross heap regions?
|
||||
|
||||
__ eor(tmp1, store_addr, new_val);
|
||||
#ifdef AARCH64
|
||||
__ logical_shift_right(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
|
||||
__ cbz(tmp1, done);
|
||||
#else
|
||||
__ movs(tmp1, AsmOperand(tmp1, lsr, HeapRegion::LogOfHRGrainBytes));
|
||||
__ b(done, eq);
|
||||
#endif
|
||||
|
||||
// crosses regions, storing NULL?
|
||||
|
||||
__ cbz(new_val, done);
|
||||
|
||||
// storing region crossing non-NULL, is card already dirty?
|
||||
const Register card_addr = tmp1;
|
||||
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||
|
||||
__ mov_address(tmp2, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
|
||||
__ add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift));
|
||||
|
||||
__ ldrb(tmp2, Address(card_addr));
|
||||
__ cmp(tmp2, (int)G1CardTable::g1_young_card_val());
|
||||
__ b(done, eq);
|
||||
|
||||
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
|
||||
|
||||
assert(CardTable::dirty_card_val() == 0, "adjust this code");
|
||||
__ ldrb(tmp2, Address(card_addr));
|
||||
__ cbz(tmp2, done);
|
||||
|
||||
// storing a region crossing, non-NULL oop, card is clean.
|
||||
// dirty card and log.
|
||||
|
||||
__ strb(__ zero_register(tmp2), Address(card_addr));
|
||||
|
||||
__ ldr(tmp2, queue_index);
|
||||
__ ldr(tmp3, buffer);
|
||||
|
||||
__ subs(tmp2, tmp2, wordSize);
|
||||
__ b(runtime, lt); // go to runtime if now negative
|
||||
|
||||
__ str(tmp2, queue_index);
|
||||
|
||||
__ str(card_addr, Address(tmp3, tmp2));
|
||||
__ b(done);
|
||||
|
||||
__ bind(runtime);
|
||||
|
||||
if (card_addr != R0) {
|
||||
__ mov(R0, card_addr);
|
||||
}
|
||||
__ mov(R1, Rthread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), R0, R1);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp2, Register tmp3) {
|
||||
bool on_oop = type == T_OBJECT || type == T_ARRAY;
|
||||
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
|
||||
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
|
||||
bool on_reference = on_weak || on_phantom;
|
||||
|
||||
ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3);
|
||||
if (on_oop && on_reference) {
|
||||
// Generate the G1 pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer.
|
||||
g1_write_barrier_pre(masm, noreg, noreg, dst, tmp1, tmp2);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {
|
||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||
bool in_concurrent_root = (decorators & IN_CONCURRENT_ROOT) != 0;
|
||||
|
||||
bool needs_pre_barrier = in_heap || in_concurrent_root;
|
||||
bool needs_post_barrier = (new_val != noreg) && in_heap;
|
||||
|
||||
// flatten object address if needed
|
||||
assert (obj.mode() == basic_offset, "pre- or post-indexing is not supported here");
|
||||
|
||||
const Register store_addr = obj.base();
|
||||
if (obj.index() != noreg) {
|
||||
assert (obj.disp() == 0, "index or displacement, not both");
|
||||
#ifdef AARCH64
|
||||
__ add(store_addr, obj.base(), obj.index(), obj.extend(), obj.shift_imm());
|
||||
#else
|
||||
assert(obj.offset_op() == add_offset, "addition is expected");
|
||||
__ add(store_addr, obj.base(), AsmOperand(obj.index(), obj.shift(), obj.shift_imm()));
|
||||
#endif // AARCH64
|
||||
} else if (obj.disp() != 0) {
|
||||
__ add(store_addr, obj.base(), obj.disp());
|
||||
}
|
||||
|
||||
if (needs_pre_barrier) {
|
||||
g1_write_barrier_pre(masm, store_addr, new_val, tmp1, tmp2, tmp3);
|
||||
}
|
||||
|
||||
if (is_null) {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, Address(store_addr), new_val, tmp1, tmp2, tmp3, true);
|
||||
} else {
|
||||
// G1 barrier needs uncompressed oop for region cross check.
|
||||
Register val_to_store = new_val;
|
||||
if (UseCompressedOops) {
|
||||
val_to_store = tmp1;
|
||||
__ mov(val_to_store, new_val);
|
||||
}
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, Address(store_addr), val_to_store, tmp1, tmp2, tmp3, false);
|
||||
if (needs_post_barrier) {
|
||||
g1_write_barrier_post(masm, store_addr, new_val, tmp1, tmp2, tmp3);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef COMPILER1
|
||||
|
||||
#undef __
|
||||
|
@ -41,6 +41,27 @@ protected:
|
||||
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register addr, Register count, Register tmp);
|
||||
|
||||
void g1_write_barrier_pre(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2);
|
||||
|
||||
void g1_write_barrier_post(MacroAssembler* masm,
|
||||
Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3);
|
||||
|
||||
virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null);
|
||||
|
||||
public:
|
||||
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
#ifdef COMPILER1
|
||||
public:
|
||||
void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
|
||||
|
87
src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp
Normal file
87
src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp2, Register tmp3) {
|
||||
bool on_heap = (decorators & IN_HEAP) != 0;
|
||||
bool on_root = (decorators & IN_ROOT) != 0;
|
||||
switch (type) {
|
||||
case T_OBJECT:
|
||||
case T_ARRAY: {
|
||||
if (on_heap) {
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedOops) {
|
||||
__ ldr_w(dst, src);
|
||||
__ decode_heap_oop(dst);
|
||||
} else
|
||||
#endif // AARCH64
|
||||
{
|
||||
__ ldr(dst, src);
|
||||
}
|
||||
} else {
|
||||
assert(on_root, "why else?");
|
||||
__ ldr(dst, src);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: Unimplemented();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address obj, Register val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {
|
||||
bool on_heap = (decorators & IN_HEAP) != 0;
|
||||
bool on_root = (decorators & IN_ROOT) != 0;
|
||||
switch (type) {
|
||||
case T_OBJECT:
|
||||
case T_ARRAY: {
|
||||
if (on_heap) {
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedOops) {
|
||||
assert(!dst.uses(src), "not enough registers");
|
||||
if (!is_null) {
|
||||
__ encode_heap_oop(src);
|
||||
}
|
||||
__ str_w(val, obj);
|
||||
} else
|
||||
#endif // AARCH64
|
||||
{
|
||||
__ str(val, obj);
|
||||
}
|
||||
} else {
|
||||
assert(on_root, "why else?");
|
||||
__ str(val, obj);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: Unimplemented();
|
||||
}
|
||||
}
|
||||
|
@ -36,6 +36,11 @@ public:
|
||||
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register addr, Register count, Register tmp) {}
|
||||
|
||||
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register dst, Address src, Register tmp1, Register tmp2, Register tmp3);
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null);
|
||||
|
||||
virtual void barrier_stubs_init() {}
|
||||
};
|
||||
|
||||
|
@ -72,3 +72,111 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
|
||||
__ b(L_cardtable_loop, ge);
|
||||
__ BIND(L_done);
|
||||
}
|
||||
|
||||
void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {
|
||||
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
|
||||
bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
|
||||
bool precise = on_array || on_anonymous;
|
||||
|
||||
if (is_null) {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, obj, new_val, tmp1, tmp2, tmp3, true);
|
||||
} else {
|
||||
assert (!precise || (obj.index() == noreg && obj.disp() == 0),
|
||||
"store check address should be calculated beforehand");
|
||||
|
||||
store_check_part1(masm, tmp1);
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, obj, new_val, tmp1, tmp2, tmp3, false);
|
||||
new_val = noreg;
|
||||
store_check_part2(masm, obj.base(), tmp1, tmp2);
|
||||
}
|
||||
}
|
||||
|
||||
// The 1st part of the store check.
|
||||
// Sets card_table_base register.
|
||||
void CardTableBarrierSetAssembler::store_check_part1(MacroAssembler* masm, Register card_table_base) {
|
||||
// Check barrier set type (should be card table) and element size
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableBarrierSet,
|
||||
"Wrong barrier set kind");
|
||||
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
|
||||
CardTable* ct = ctbs->card_table();
|
||||
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
|
||||
|
||||
// Load card table base address.
|
||||
|
||||
/* Performance note.
|
||||
|
||||
There is an alternative way of loading card table base address
|
||||
from thread descriptor, which may look more efficient:
|
||||
|
||||
ldr(card_table_base, Address(Rthread, JavaThread::card_table_base_offset()));
|
||||
|
||||
However, performance measurements of micro benchmarks and specJVM98
|
||||
showed that loading of card table base from thread descriptor is
|
||||
7-18% slower compared to loading of literal embedded into the code.
|
||||
Possible cause is a cache miss (card table base address resides in a
|
||||
rarely accessed area of thread descriptor).
|
||||
*/
|
||||
// TODO-AARCH64 Investigate if mov_slow is faster than ldr from Rthread on AArch64
|
||||
__ mov_address(card_table_base, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
|
||||
}
|
||||
|
||||
// The 2nd part of the store check.
|
||||
void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Register obj, Register card_table_base, Register tmp) {
|
||||
assert_different_registers(obj, card_table_base, tmp);
|
||||
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableBarrierSet,
|
||||
"Wrong barrier set kind");
|
||||
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
|
||||
CardTable* ct = ctbs->card_table();
|
||||
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
|
||||
|
||||
assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
|
||||
#ifdef AARCH64
|
||||
add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTable::card_shift));
|
||||
Address card_table_addr(card_table_base);
|
||||
#else
|
||||
Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift);
|
||||
#endif
|
||||
|
||||
if (UseCondCardMark) {
|
||||
if (ct->scanned_concurrently()) {
|
||||
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), noreg);
|
||||
}
|
||||
Label already_dirty;
|
||||
|
||||
__ ldrb(tmp, card_table_addr);
|
||||
__ cbz(tmp, already_dirty);
|
||||
|
||||
set_card(masm, card_table_base, card_table_addr, tmp);
|
||||
__ bind(already_dirty);
|
||||
|
||||
} else {
|
||||
if (ct->scanned_concurrently()) {
|
||||
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore), noreg);
|
||||
}
|
||||
set_card(masm, card_table_base, card_table_addr, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
void CardTableBarrierSetAssembler::set_card(MacroAssembler* masm, Register card_table_base, Address card_table_addr, Register tmp) {
|
||||
#ifdef AARCH64
|
||||
strb(ZR, card_table_addr);
|
||||
#else
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
|
||||
CardTable* ct = ctbs->card_table();
|
||||
if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
|
||||
// Card table is aligned so the lowest byte of the table address base is zero.
|
||||
// This works only if the code is not saved for later use, possibly
|
||||
// in a context where the base would no longer be aligned.
|
||||
__ strb(card_table_base, card_table_addr);
|
||||
} else {
|
||||
__ mov(tmp, 0);
|
||||
__ strb(tmp, card_table_addr);
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
@ -29,9 +29,18 @@
|
||||
#include "gc/shared/modRefBarrierSetAssembler.hpp"
|
||||
|
||||
class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
|
||||
private:
|
||||
void store_check(MacroAssembler* masm, Register obj, Address dst);
|
||||
void store_check_part1(MacroAssembler* masm, Register card_table_base);
|
||||
void store_check_part2(MacroAssembler* masm, Register obj, Register card_table_base, Register tmp);
|
||||
|
||||
void set_card(MacroAssembler* masm, Register card_table_base, Address card_table_addr, Register tmp);
|
||||
|
||||
protected:
|
||||
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register addr, Register count, Register tmp);
|
||||
virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null);
|
||||
};
|
||||
|
||||
#endif // #ifndef CPU_ARM_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ARM_HPP
|
||||
|
@ -42,3 +42,12 @@ void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Decorat
|
||||
gen_write_ref_array_post_barrier(masm, decorators, addr, count, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
void ModRefBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {
|
||||
if (type == T_OBJECT || type == T_ARRAY) {
|
||||
oop_store_at(masm, decorators, type, obj, new_val, tmp1, tmp2, tmp3, is_null);
|
||||
} else {
|
||||
BarrierSetAssembler::store_at(masm, decorators, type, obj, new_val, tmp1, tmp2, tmp3, is_null);
|
||||
}
|
||||
}
|
||||
|
@ -28,6 +28,10 @@
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
|
||||
// The ModRefBarrierSetAssembler filters away accesses on BasicTypes other
|
||||
// than T_OBJECT/T_ARRAY (oops). The oop accesses call one of the protected
|
||||
// accesses, which are overridden in the concrete BarrierSetAssembler.
|
||||
|
||||
class ModRefBarrierSetAssembler: public BarrierSetAssembler {
|
||||
protected:
|
||||
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
@ -35,11 +39,16 @@ protected:
|
||||
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
Register addr, Register count, Register tmp) {}
|
||||
|
||||
virtual void oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address obj, Register val, Register tmp1, Register tmp2, Register tmp3, bool is_null) = 0;
|
||||
|
||||
public:
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register addr, Register count, int callee_saved_regs);
|
||||
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register addr, Register count, Register tmp);
|
||||
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Address obj, Register val, Register tmp1, Register tmp2, Register tmp3, bool is_null);
|
||||
};
|
||||
|
||||
#endif // CPU_ARM_GC_SHARED_MODREFBARRIERSETASSEMBLER_ARM_HPP
|
||||
|
@ -43,11 +43,6 @@
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
//--------------------------------------------------------------------
|
||||
// Implementation of InterpreterMacroAssembler
|
||||
|
||||
@ -406,91 +401,6 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
|
||||
}
|
||||
|
||||
|
||||
// The 1st part of the store check.
|
||||
// Sets card_table_base register.
|
||||
void InterpreterMacroAssembler::store_check_part1(Register card_table_base) {
|
||||
// Check barrier set type (should be card table) and element size
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
assert(bs->kind() == BarrierSet::CardTableBarrierSet,
|
||||
"Wrong barrier set kind");
|
||||
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
|
||||
CardTable* ct = ctbs->card_table();
|
||||
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
|
||||
|
||||
// Load card table base address.
|
||||
|
||||
/* Performance note.
|
||||
|
||||
There is an alternative way of loading card table base address
|
||||
from thread descriptor, which may look more efficient:
|
||||
|
||||
ldr(card_table_base, Address(Rthread, JavaThread::card_table_base_offset()));
|
||||
|
||||
However, performance measurements of micro benchmarks and specJVM98
|
||||
showed that loading of card table base from thread descriptor is
|
||||
7-18% slower compared to loading of literal embedded into the code.
|
||||
Possible cause is a cache miss (card table base address resides in a
|
||||
rarely accessed area of thread descriptor).
|
||||
*/
|
||||
// TODO-AARCH64 Investigate if mov_slow is faster than ldr from Rthread on AArch64
|
||||
mov_address(card_table_base, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
|
||||
}
|
||||
|
||||
// The 2nd part of the store check.
|
||||
void InterpreterMacroAssembler::store_check_part2(Register obj, Register card_table_base, Register tmp) {
|
||||
assert_different_registers(obj, card_table_base, tmp);
|
||||
|
||||
assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
|
||||
#ifdef AARCH64
|
||||
add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTable::card_shift));
|
||||
Address card_table_addr(card_table_base);
|
||||
#else
|
||||
Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift);
|
||||
#endif
|
||||
|
||||
if (UseCondCardMark) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseConcMarkSweepGC) {
|
||||
membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), noreg);
|
||||
}
|
||||
#endif
|
||||
Label already_dirty;
|
||||
|
||||
ldrb(tmp, card_table_addr);
|
||||
cbz(tmp, already_dirty);
|
||||
|
||||
set_card(card_table_base, card_table_addr, tmp);
|
||||
bind(already_dirty);
|
||||
|
||||
} else {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
|
||||
membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore), noreg);
|
||||
}
|
||||
#endif
|
||||
set_card(card_table_base, card_table_addr, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::set_card(Register card_table_base, Address card_table_addr, Register tmp) {
|
||||
#ifdef AARCH64
|
||||
strb(ZR, card_table_addr);
|
||||
#else
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
|
||||
CardTable* ct = ctbs->card_table();
|
||||
if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
|
||||
// Card table is aligned so the lowest byte of the table address base is zero.
|
||||
// This works only if the code is not saved for later use, possibly
|
||||
// in a context where the base would no longer be aligned.
|
||||
strb(card_table_base, card_table_addr);
|
||||
} else {
|
||||
mov(tmp, 0);
|
||||
strb(tmp, card_table_addr);
|
||||
}
|
||||
#endif // AARCH64
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
|
@ -144,11 +144,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// load cpool->resolved_klass_at(index); Rtemp is corrupted upon return
|
||||
void load_resolved_klass_at_offset(Register Rcpool, Register Rindex, Register Rklass);
|
||||
|
||||
void store_check_part1(Register card_table_base); // Sets card_table_base register.
|
||||
void store_check_part2(Register obj, Register card_table_base, Register tmp);
|
||||
|
||||
void set_card(Register card_table_base, Address card_table_addr, Register tmp);
|
||||
|
||||
void pop_ptr(Register r);
|
||||
void pop_i(Register r = R0_tos);
|
||||
#ifdef AARCH64
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/cardTable.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
@ -44,12 +45,6 @@
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/g1CardTable.hpp"
|
||||
#include "gc/g1/g1ThreadLocalData.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif
|
||||
|
||||
// Implementation of AddressLiteral
|
||||
|
||||
@ -2131,204 +2126,20 @@ void MacroAssembler::resolve_jobject(Register value,
|
||||
cbz(value, done); // Use NULL as-is.
|
||||
STATIC_ASSERT(JNIHandles::weak_tag_mask == 1u);
|
||||
tbz(value, 0, not_weak); // Test for jweak tag.
|
||||
|
||||
// Resolve jweak.
|
||||
ldr(value, Address(value, -JNIHandles::weak_tag_value));
|
||||
verify_oop(value);
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseG1GC) {
|
||||
g1_write_barrier_pre(noreg, // store_addr
|
||||
noreg, // new_val
|
||||
value, // pre_val
|
||||
tmp1, // tmp1
|
||||
tmp2); // tmp2
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
access_load_at(T_OBJECT, IN_ROOT | ON_PHANTOM_OOP_REF,
|
||||
Address(value, -JNIHandles::weak_tag_value), value, tmp1, tmp2, noreg);
|
||||
b(done);
|
||||
bind(not_weak);
|
||||
// Resolve (untagged) jobject.
|
||||
ldr(value, Address(value));
|
||||
access_load_at(T_OBJECT, IN_ROOT | IN_CONCURRENT_ROOT,
|
||||
Address(value, 0), value, tmp1, tmp2, noreg);
|
||||
verify_oop(value);
|
||||
bind(done);
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
|
||||
// G1 pre-barrier.
|
||||
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
// If store_addr != noreg, then previous value is loaded from [store_addr];
|
||||
// in such case store_addr and new_val registers are preserved;
|
||||
// otherwise pre_val register is preserved.
|
||||
void MacroAssembler::g1_write_barrier_pre(Register store_addr,
|
||||
Register new_val,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2) {
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
if (store_addr != noreg) {
|
||||
assert_different_registers(store_addr, new_val, pre_val, tmp1, tmp2, noreg);
|
||||
} else {
|
||||
assert (new_val == noreg, "should be");
|
||||
assert_different_registers(pre_val, tmp1, tmp2, noreg);
|
||||
}
|
||||
|
||||
Address in_progress(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
|
||||
Address index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
// Is marking active?
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "adjust this code");
|
||||
ldrb(tmp1, in_progress);
|
||||
cbz(tmp1, done);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
if (store_addr != noreg) {
|
||||
load_heap_oop(pre_val, Address(store_addr, 0));
|
||||
}
|
||||
|
||||
// Is the previous value null?
|
||||
cbz(pre_val, done);
|
||||
|
||||
// Can we store original value in the thread's buffer?
|
||||
// Is index == 0?
|
||||
// (The index field is typed as size_t.)
|
||||
|
||||
ldr(tmp1, index); // tmp1 := *index_adr
|
||||
ldr(tmp2, buffer);
|
||||
|
||||
subs(tmp1, tmp1, wordSize); // tmp1 := tmp1 - wordSize
|
||||
b(runtime, lt); // If negative, goto runtime
|
||||
|
||||
str(tmp1, index); // *index_adr := tmp1
|
||||
|
||||
// Record the previous value
|
||||
str(pre_val, Address(tmp2, tmp1));
|
||||
b(done);
|
||||
|
||||
bind(runtime);
|
||||
|
||||
// save the live input values
|
||||
#ifdef AARCH64
|
||||
if (store_addr != noreg) {
|
||||
raw_push(store_addr, new_val);
|
||||
} else {
|
||||
raw_push(pre_val, ZR);
|
||||
}
|
||||
#else
|
||||
if (store_addr != noreg) {
|
||||
// avoid raw_push to support any ordering of store_addr and new_val
|
||||
push(RegisterSet(store_addr) | RegisterSet(new_val));
|
||||
} else {
|
||||
push(pre_val);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
if (pre_val != R0) {
|
||||
mov(R0, pre_val);
|
||||
}
|
||||
mov(R1, Rthread);
|
||||
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), R0, R1);
|
||||
|
||||
#ifdef AARCH64
|
||||
if (store_addr != noreg) {
|
||||
raw_pop(store_addr, new_val);
|
||||
} else {
|
||||
raw_pop(pre_val, ZR);
|
||||
}
|
||||
#else
|
||||
if (store_addr != noreg) {
|
||||
pop(RegisterSet(store_addr) | RegisterSet(new_val));
|
||||
} else {
|
||||
pop(pre_val);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
bind(done);
|
||||
}
|
||||
|
||||
// G1 post-barrier.
|
||||
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
void MacroAssembler::g1_write_barrier_post(Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3) {
|
||||
|
||||
Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
|
||||
Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
|
||||
|
||||
BarrierSet* bs = BarrierSet::barrier_set();
|
||||
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
|
||||
CardTable* ct = ctbs->card_table();
|
||||
Label done;
|
||||
Label runtime;
|
||||
|
||||
// Does store cross heap regions?
|
||||
|
||||
eor(tmp1, store_addr, new_val);
|
||||
#ifdef AARCH64
|
||||
logical_shift_right(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
|
||||
cbz(tmp1, done);
|
||||
#else
|
||||
movs(tmp1, AsmOperand(tmp1, lsr, HeapRegion::LogOfHRGrainBytes));
|
||||
b(done, eq);
|
||||
#endif
|
||||
|
||||
// crosses regions, storing NULL?
|
||||
|
||||
cbz(new_val, done);
|
||||
|
||||
// storing region crossing non-NULL, is card already dirty?
|
||||
const Register card_addr = tmp1;
|
||||
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
|
||||
|
||||
mov_address(tmp2, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
|
||||
add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift));
|
||||
|
||||
ldrb(tmp2, Address(card_addr));
|
||||
cmp(tmp2, (int)G1CardTable::g1_young_card_val());
|
||||
b(done, eq);
|
||||
|
||||
membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
|
||||
|
||||
assert(CardTable::dirty_card_val() == 0, "adjust this code");
|
||||
ldrb(tmp2, Address(card_addr));
|
||||
cbz(tmp2, done);
|
||||
|
||||
// storing a region crossing, non-NULL oop, card is clean.
|
||||
// dirty card and log.
|
||||
|
||||
strb(zero_register(tmp2), Address(card_addr));
|
||||
|
||||
ldr(tmp2, queue_index);
|
||||
ldr(tmp3, buffer);
|
||||
|
||||
subs(tmp2, tmp2, wordSize);
|
||||
b(runtime, lt); // go to runtime if now negative
|
||||
|
||||
str(tmp2, queue_index);
|
||||
|
||||
str(card_addr, Address(tmp3, tmp2));
|
||||
b(done);
|
||||
|
||||
bind(runtime);
|
||||
|
||||
if (card_addr != R0) {
|
||||
mov(R0, card_addr);
|
||||
}
|
||||
mov(R1, Rthread);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), R0, R1);
|
||||
|
||||
bind(done);
|
||||
}
|
||||
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifdef AARCH64
|
||||
@ -2873,38 +2684,39 @@ void MacroAssembler::store_klass_gap(Register dst) {
|
||||
#endif // AARCH64
|
||||
|
||||
|
||||
void MacroAssembler::load_heap_oop(Register dst, Address src) {
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedOops) {
|
||||
ldr_w(dst, src);
|
||||
decode_heap_oop(dst);
|
||||
return;
|
||||
}
|
||||
#endif // AARCH64
|
||||
ldr(dst, src);
|
||||
void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, Register tmp2, Register tmp3, DecoratorSet decorators) {
|
||||
access_load_at(T_OBJECT, IN_HEAP | decorators, src, dst, tmp1, tmp2, tmp3);
|
||||
}
|
||||
|
||||
// Blows src and flags.
|
||||
void MacroAssembler::store_heap_oop(Register src, Address dst) {
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedOops) {
|
||||
assert(!dst.uses(src), "not enough registers");
|
||||
encode_heap_oop(src);
|
||||
str_w(src, dst);
|
||||
return;
|
||||
}
|
||||
#endif // AARCH64
|
||||
str(src, dst);
|
||||
void MacroAssembler::store_heap_oop(Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, DecoratorSet decorators) {
|
||||
access_store_at(T_OBJECT, IN_HEAP | decorators, obj, new_val, tmp1, tmp2, tmp3, false);
|
||||
}
|
||||
|
||||
void MacroAssembler::store_heap_oop_null(Register src, Address dst) {
|
||||
#ifdef AARCH64
|
||||
if (UseCompressedOops) {
|
||||
str_w(src, dst);
|
||||
return;
|
||||
void MacroAssembler::store_heap_oop_null(Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, DecoratorSet decorators) {
|
||||
access_store_at(T_OBJECT, IN_HEAP, obj, new_val, tmp1, tmp2, tmp3, true);
|
||||
}
|
||||
|
||||
void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
|
||||
Address src, Register dst, Register tmp1, Register tmp2, Register tmp3) {
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bool as_raw = (decorators & AS_RAW) != 0;
|
||||
if (as_raw) {
|
||||
bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2, tmp3);
|
||||
} else {
|
||||
bs->load_at(this, decorators, type, dst, src, tmp1, tmp2, tmp3);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
|
||||
Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bool as_raw = (decorators & AS_RAW) != 0;
|
||||
if (as_raw) {
|
||||
bs->BarrierSetAssembler::store_at(this, decorators, type, obj, new_val, tmp1, tmp2, tmp3, is_null);
|
||||
} else {
|
||||
bs->store_at(this, decorators, type, obj, new_val, tmp1, tmp2, tmp3, is_null);
|
||||
}
|
||||
#endif // AARCH64
|
||||
str(src, dst);
|
||||
}
|
||||
|
||||
|
||||
|
@ -401,27 +401,6 @@ public:
|
||||
|
||||
void resolve_jobject(Register value, Register tmp1, Register tmp2);
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
// G1 pre-barrier.
|
||||
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
// If store_addr != noreg, then previous value is loaded from [store_addr];
|
||||
// in such case store_addr and new_val registers are preserved;
|
||||
// otherwise pre_val register is preserved.
|
||||
void g1_write_barrier_pre(Register store_addr,
|
||||
Register new_val,
|
||||
Register pre_val,
|
||||
Register tmp1,
|
||||
Register tmp2);
|
||||
|
||||
// G1 post-barrier.
|
||||
// Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
|
||||
void g1_write_barrier_post(Register store_addr,
|
||||
Register new_val,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3);
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#ifndef AARCH64
|
||||
void nop() {
|
||||
mov(R0, R0);
|
||||
@ -1072,12 +1051,12 @@ public:
|
||||
|
||||
// oop manipulations
|
||||
|
||||
void load_heap_oop(Register dst, Address src);
|
||||
void store_heap_oop(Register src, Address dst);
|
||||
void store_heap_oop(Address dst, Register src) {
|
||||
store_heap_oop(src, dst);
|
||||
}
|
||||
void store_heap_oop_null(Register src, Address dst);
|
||||
void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
|
||||
void store_heap_oop(Address obj, Register new_val, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
|
||||
void store_heap_oop_null(Address obj, Register new_val, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
|
||||
|
||||
void access_load_at(BasicType type, DecoratorSet decorators, Address src, Register dst, Register tmp1, Register tmp2, Register tmp3);
|
||||
void access_store_at(BasicType type, DecoratorSet decorators, Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null);
|
||||
|
||||
#ifdef AARCH64
|
||||
void encode_heap_oop(Register dst, Register src);
|
||||
|
@ -3260,7 +3260,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(store_element);
|
||||
if (UseCompressedOops) {
|
||||
__ store_heap_oop(R5, Address(to, BytesPerHeapOop, post_indexed)); // store the oop, changes flags
|
||||
__ store_heap_oop(Address(to, BytesPerHeapOop, post_indexed), R5); // store the oop, changes flags
|
||||
__ subs_32(count,count,1);
|
||||
} else {
|
||||
__ subs_32(count,count,1);
|
||||
|
@ -852,80 +852,53 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
//
|
||||
|
||||
address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseG1GC) {
|
||||
// Code: _aload_0, _getfield, _areturn
|
||||
// parameter size = 1
|
||||
//
|
||||
// The code that gets generated by this routine is split into 2 parts:
|
||||
// 1. The "intrinsified" code for G1 (or any SATB based GC),
|
||||
// 2. The slow path - which is an expansion of the regular method entry.
|
||||
//
|
||||
// Notes:-
|
||||
// * In the G1 code we do not check whether we need to block for
|
||||
// a safepoint. If G1 is enabled then we must execute the specialized
|
||||
// code for Reference.get (except when the Reference object is null)
|
||||
// so that we can log the value in the referent field with an SATB
|
||||
// update buffer.
|
||||
// If the code for the getfield template is modified so that the
|
||||
// G1 pre-barrier code is executed when the current method is
|
||||
// Reference.get() then going through the normal method entry
|
||||
// will be fine.
|
||||
// * The G1 code can, however, check the receiver object (the instance
|
||||
// of java.lang.Reference) and jump to the slow path if null. If the
|
||||
// Reference object is null then we obviously cannot fetch the referent
|
||||
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
||||
// regular method entry code to generate the NPE.
|
||||
//
|
||||
// This code is based on generate_accessor_enty.
|
||||
//
|
||||
// Rmethod: Method*
|
||||
// Rthread: thread
|
||||
// Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path
|
||||
// Rparams: parameters
|
||||
// Code: _aload_0, _getfield, _areturn
|
||||
// parameter size = 1
|
||||
//
|
||||
// The code that gets generated by this routine is split into 2 parts:
|
||||
// 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load,
|
||||
// 2. The slow path - which is an expansion of the regular method entry.
|
||||
//
|
||||
// Notes:-
|
||||
// * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed.
|
||||
// * We may jump to the slow path iff the receiver is null. If the
|
||||
// Reference object is null then we no longer perform an ON_WEAK_OOP_REF load
|
||||
// Thus we can use the regular method entry code to generate the NPE.
|
||||
//
|
||||
// Rmethod: Method*
|
||||
// Rthread: thread
|
||||
// Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path
|
||||
// Rparams: parameters
|
||||
|
||||
address entry = __ pc();
|
||||
Label slow_path;
|
||||
const Register Rthis = R0;
|
||||
const Register Rret_addr = Rtmp_save1;
|
||||
assert_different_registers(Rthis, Rret_addr, Rsender_sp);
|
||||
address entry = __ pc();
|
||||
Label slow_path;
|
||||
const Register Rthis = R0;
|
||||
const Register Rret_addr = Rtmp_save1;
|
||||
assert_different_registers(Rthis, Rret_addr, Rsender_sp);
|
||||
|
||||
const int referent_offset = java_lang_ref_Reference::referent_offset;
|
||||
guarantee(referent_offset > 0, "referent offset not initialized");
|
||||
const int referent_offset = java_lang_ref_Reference::referent_offset;
|
||||
guarantee(referent_offset > 0, "referent offset not initialized");
|
||||
|
||||
// Check if local 0 != NULL
|
||||
// If the receiver is null then it is OK to jump to the slow path.
|
||||
__ ldr(Rthis, Address(Rparams));
|
||||
__ cbz(Rthis, slow_path);
|
||||
// Check if local 0 != NULL
|
||||
// If the receiver is null then it is OK to jump to the slow path.
|
||||
__ ldr(Rthis, Address(Rparams));
|
||||
__ cbz(Rthis, slow_path);
|
||||
|
||||
// Generate the G1 pre-barrier code to log the value of
|
||||
// the referent field in an SATB buffer.
|
||||
// Preserve LR
|
||||
__ mov(Rret_addr, LR);
|
||||
|
||||
// Load the value of the referent field.
|
||||
__ load_heap_oop(R0, Address(Rthis, referent_offset));
|
||||
// Load the value of the referent field.
|
||||
const Address field_address(Rthis, referent_offset);
|
||||
__ load_heap_oop(R0, field_address, Rtemp, R1_tmp, R2_tmp, ON_WEAK_OOP_REF);
|
||||
|
||||
// Preserve LR
|
||||
__ mov(Rret_addr, LR);
|
||||
// _areturn
|
||||
__ mov(SP, Rsender_sp);
|
||||
__ ret(Rret_addr);
|
||||
|
||||
__ g1_write_barrier_pre(noreg, // store_addr
|
||||
noreg, // new_val
|
||||
R0, // pre_val
|
||||
Rtemp, // tmp1
|
||||
R1_tmp); // tmp2
|
||||
|
||||
// _areturn
|
||||
__ mov(SP, Rsender_sp);
|
||||
__ ret(Rret_addr);
|
||||
|
||||
// generate a vanilla interpreter entry as the slow path
|
||||
__ bind(slow_path);
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||
return entry;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// If G1 is not enabled then attempt to go through the normal entry point
|
||||
return NULL;
|
||||
// generate a vanilla interpreter entry as the slow path
|
||||
__ bind(slow_path);
|
||||
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Not supported
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "gc/shared/barrierSetAssembler.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
@ -187,72 +188,24 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
Register tmp1,
|
||||
Register tmp2,
|
||||
Register tmp3,
|
||||
BarrierSet::Name barrier,
|
||||
bool precise,
|
||||
bool is_null) {
|
||||
bool is_null,
|
||||
DecoratorSet decorators = 0) {
|
||||
|
||||
assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
|
||||
switch (barrier) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
case BarrierSet::G1BarrierSet:
|
||||
{
|
||||
// flatten object address if needed
|
||||
assert (obj.mode() == basic_offset, "pre- or post-indexing is not supported here");
|
||||
|
||||
const Register store_addr = obj.base();
|
||||
if (obj.index() != noreg) {
|
||||
assert (obj.disp() == 0, "index or displacement, not both");
|
||||
#ifdef AARCH64
|
||||
__ add(store_addr, obj.base(), obj.index(), obj.extend(), obj.shift_imm());
|
||||
#else
|
||||
assert(obj.offset_op() == add_offset, "addition is expected");
|
||||
__ add(store_addr, obj.base(), AsmOperand(obj.index(), obj.shift(), obj.shift_imm()));
|
||||
#endif // AARCH64
|
||||
} else if (obj.disp() != 0) {
|
||||
__ add(store_addr, obj.base(), obj.disp());
|
||||
}
|
||||
|
||||
__ g1_write_barrier_pre(store_addr, new_val, tmp1, tmp2, tmp3);
|
||||
if (is_null) {
|
||||
__ store_heap_oop_null(new_val, Address(store_addr));
|
||||
} else {
|
||||
// G1 barrier needs uncompressed oop for region cross check.
|
||||
Register val_to_store = new_val;
|
||||
if (UseCompressedOops) {
|
||||
val_to_store = tmp1;
|
||||
__ mov(val_to_store, new_val);
|
||||
}
|
||||
__ store_heap_oop(val_to_store, Address(store_addr)); // blows val_to_store:
|
||||
val_to_store = noreg;
|
||||
__ g1_write_barrier_post(store_addr, new_val, tmp1, tmp2, tmp3);
|
||||
}
|
||||
}
|
||||
break;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableBarrierSet:
|
||||
{
|
||||
if (is_null) {
|
||||
__ store_heap_oop_null(new_val, obj);
|
||||
} else {
|
||||
assert (!precise || (obj.index() == noreg && obj.disp() == 0),
|
||||
"store check address should be calculated beforehand");
|
||||
|
||||
__ store_check_part1(tmp1);
|
||||
__ store_heap_oop(new_val, obj); // blows new_val:
|
||||
new_val = noreg;
|
||||
__ store_check_part2(obj.base(), tmp1, tmp2);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case BarrierSet::ModRef:
|
||||
ShouldNotReachHere();
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
break;
|
||||
if (is_null) {
|
||||
__ store_heap_oop_null(obj, new_val, tmp1, tmp2, tmp3, decorators);
|
||||
} else {
|
||||
__ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, decorators);
|
||||
}
|
||||
}
|
||||
|
||||
static void do_oop_load(InterpreterMacroAssembler* _masm,
|
||||
Register dst,
|
||||
Address obj,
|
||||
DecoratorSet decorators = 0) {
|
||||
__ load_heap_oop(dst, obj, noreg, noreg, noreg, decorators);
|
||||
}
|
||||
|
||||
Address TemplateTable::at_bcp(int offset) {
|
||||
assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
|
||||
return Address(Rbcp, offset);
|
||||
@ -863,7 +816,7 @@ void TemplateTable::aaload() {
|
||||
const Register Rindex = R0_tos;
|
||||
|
||||
index_check(Rarray, Rindex);
|
||||
__ load_heap_oop(R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp));
|
||||
do_oop_load(_masm, R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp), IN_HEAP_ARRAY);
|
||||
}
|
||||
|
||||
|
||||
@ -1248,7 +1201,7 @@ void TemplateTable::aastore() {
|
||||
__ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
|
||||
|
||||
// Now store using the appropriate barrier
|
||||
do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, false);
|
||||
do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IN_HEAP_ARRAY);
|
||||
__ b(done);
|
||||
|
||||
__ bind(throw_array_store);
|
||||
@ -1264,7 +1217,7 @@ void TemplateTable::aastore() {
|
||||
__ profile_null_seen(R0_tmp);
|
||||
|
||||
// Store a NULL
|
||||
do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, _bs->kind(), true, true);
|
||||
do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IN_HEAP_ARRAY);
|
||||
|
||||
// Pop stack arguments
|
||||
__ bind(done);
|
||||
@ -3286,7 +3239,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
// atos case for AArch64 and slow version on 32-bit ARM
|
||||
if(!atos_merged_with_itos) {
|
||||
__ bind(Latos);
|
||||
__ load_heap_oop(R0_tos, Address(Robj, Roffset));
|
||||
do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
|
||||
__ push(atos);
|
||||
// Rewrite bytecode to be faster
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
@ -3638,7 +3591,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
__ pop(atos);
|
||||
if (!is_static) pop_and_check_object(Robj);
|
||||
// Store into the field
|
||||
do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, _bs->kind(), false, false);
|
||||
do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false);
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
|
||||
}
|
||||
@ -3816,7 +3769,7 @@ void TemplateTable::fast_storefield(TosState state) {
|
||||
#endif // AARCH64
|
||||
|
||||
case Bytecodes::_fast_aputfield:
|
||||
do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, _bs->kind(), false, false);
|
||||
do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, false);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -3912,7 +3865,7 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
|
||||
#endif // __SOFTFP__
|
||||
#endif // AARCH64
|
||||
case Bytecodes::_fast_agetfield: __ load_heap_oop(R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
|
||||
case Bytecodes::_fast_agetfield: do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@ -3992,7 +3945,7 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
if (state == itos) {
|
||||
__ ldr_s32(R0_tos, Address(Robj, Roffset));
|
||||
} else if (state == atos) {
|
||||
__ load_heap_oop(R0_tos, Address(Robj, Roffset));
|
||||
do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
|
||||
__ verify_oop(R0_tos);
|
||||
} else if (state == ftos) {
|
||||
#ifdef AARCH64
|
||||
|
@ -37,10 +37,6 @@
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) // nothing
|
||||
|
@ -38,10 +38,6 @@
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif
|
||||
|
||||
// Convention: Use Z_R0 and Z_R1 instead of Z_scratch_* in all
|
||||
// assembler_s390.* files.
|
||||
|
@ -36,10 +36,6 @@
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
#ifdef PRODUCT
|
||||
#define BLOCK_COMMENT(str) /* nothing */
|
||||
|
@ -37,10 +37,6 @@
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
int AbstractAssembler::code_fill_byte() {
|
||||
return 0;
|
||||
|
@ -65,7 +65,7 @@ address CppInterpreterGenerator::generate_accessor_entry() {
|
||||
}
|
||||
|
||||
address CppInterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_G1GC
|
||||
if (UseG1GC) {
|
||||
// We need to generate have a routine that generates code to:
|
||||
// * load the value in the referent field
|
||||
@ -77,7 +77,7 @@ address CppInterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
// field as live.
|
||||
Unimplemented();
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#endif // INCLUDE_G1GC
|
||||
|
||||
// If G1 is not enabled then attempt to go through the normal entry point
|
||||
// Reference.get could be instrumented by jvmti
|
||||
|
@ -3111,6 +3111,68 @@ static address get_stack_commited_bottom(address bottom, size_t size) {
|
||||
return nbot;
|
||||
}
|
||||
|
||||
bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
|
||||
int mincore_return_value;
|
||||
const size_t stripe = 1024; // query this many pages each time
|
||||
unsigned char vec[stripe];
|
||||
const size_t page_sz = os::vm_page_size();
|
||||
size_t pages = size / page_sz;
|
||||
|
||||
assert(is_aligned(start, page_sz), "Start address must be page aligned");
|
||||
assert(is_aligned(size, page_sz), "Size must be page aligned");
|
||||
|
||||
committed_start = NULL;
|
||||
|
||||
int loops = (pages + stripe - 1) / stripe;
|
||||
int committed_pages = 0;
|
||||
address loop_base = start;
|
||||
for (int index = 0; index < loops; index ++) {
|
||||
assert(pages > 0, "Nothing to do");
|
||||
int pages_to_query = (pages >= stripe) ? stripe : pages;
|
||||
pages -= pages_to_query;
|
||||
|
||||
// Get stable read
|
||||
while ((mincore_return_value = mincore(loop_base, pages_to_query * page_sz, vec)) == -1 && errno == EAGAIN);
|
||||
|
||||
// During shutdown, some memory goes away without properly notifying NMT,
|
||||
// E.g. ConcurrentGCThread/WatcherThread can exit without deleting thread object.
|
||||
// Bailout and return as not committed for now.
|
||||
if (mincore_return_value == -1 && errno == ENOMEM) {
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(mincore_return_value == 0, "Range must be valid");
|
||||
// Process this stripe
|
||||
for (int vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) {
|
||||
if ((vec[vecIdx] & 0x01) == 0) { // not committed
|
||||
// End of current contiguous region
|
||||
if (committed_start != NULL) {
|
||||
break;
|
||||
}
|
||||
} else { // committed
|
||||
// Start of region
|
||||
if (committed_start == NULL) {
|
||||
committed_start = loop_base + page_sz * vecIdx;
|
||||
}
|
||||
committed_pages ++;
|
||||
}
|
||||
}
|
||||
|
||||
loop_base += pages_to_query * page_sz;
|
||||
}
|
||||
|
||||
if (committed_start != NULL) {
|
||||
assert(committed_pages > 0, "Must have committed region");
|
||||
assert(committed_pages <= int(size / page_sz), "Can not commit more than it has");
|
||||
assert(committed_start >= start && committed_start < start + size, "Out of range");
|
||||
committed_size = page_sz * committed_pages;
|
||||
return true;
|
||||
} else {
|
||||
assert(committed_pages == 0, "Should not have committed region");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Linux uses a growable mapping for the stack, and if the mapping for
|
||||
// the stack guard pages is not removed when we detach a thread the
|
||||
|
@ -98,26 +98,8 @@ inline int os::ftruncate(int fd, jlong length) {
|
||||
|
||||
inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf)
|
||||
{
|
||||
// readdir_r has been deprecated since glibc 2.24.
|
||||
// See https://sourceware.org/bugzilla/show_bug.cgi?id=19056 for more details.
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
|
||||
dirent* p;
|
||||
int status;
|
||||
assert(dirp != NULL, "just checking");
|
||||
|
||||
// NOTE: Linux readdir_r (on RH 6.2 and 7.2 at least) is NOT like the POSIX
|
||||
// version. Here is the doc for this function:
|
||||
// http://www.gnu.org/manual/glibc-2.2.3/html_node/libc_262.html
|
||||
|
||||
if((status = ::readdir_r(dirp, dbuf, &p)) != 0) {
|
||||
errno = status;
|
||||
return NULL;
|
||||
} else
|
||||
return p;
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
return ::readdir(dirp);
|
||||
}
|
||||
|
||||
inline int os::closedir(DIR *dirp) {
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "services/attachListener.hpp"
|
||||
|
@ -365,6 +365,39 @@ size_t os::current_stack_size() {
|
||||
return sz;
|
||||
}
|
||||
|
||||
bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
|
||||
MEMORY_BASIC_INFORMATION minfo;
|
||||
committed_start = NULL;
|
||||
committed_size = 0;
|
||||
address top = start + size;
|
||||
const address start_addr = start;
|
||||
while (start < top) {
|
||||
VirtualQuery(start, &minfo, sizeof(minfo));
|
||||
if ((minfo.State & MEM_COMMIT) == 0) { // not committed
|
||||
if (committed_start != NULL) {
|
||||
break;
|
||||
}
|
||||
} else { // committed
|
||||
if (committed_start == NULL) {
|
||||
committed_start = start;
|
||||
}
|
||||
size_t offset = start - (address)minfo.BaseAddress;
|
||||
committed_size += minfo.RegionSize - offset;
|
||||
}
|
||||
start = (address)minfo.BaseAddress + minfo.RegionSize;
|
||||
}
|
||||
|
||||
if (committed_start == NULL) {
|
||||
assert(committed_size == 0, "Sanity");
|
||||
return false;
|
||||
} else {
|
||||
assert(committed_start >= start_addr && committed_start < top, "Out of range");
|
||||
// current region may go beyond the limit, trim to the limit
|
||||
committed_size = MIN2(committed_size, size_t(top - committed_start));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
|
||||
const struct tm* time_struct_ptr = localtime(clock);
|
||||
if (time_struct_ptr != NULL) {
|
||||
|
@ -421,9 +421,11 @@ void AOTCodeHeap::link_graal_runtime_symbols() {
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_multi_array", address, JVMCIRuntime::new_multi_array);
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_array", address, JVMCIRuntime::dynamic_new_array);
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_validate_object", address, JVMCIRuntime::validate_object);
|
||||
#if INCLUDE_G1GC
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_pre", address, JVMCIRuntime::write_barrier_pre);
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_identity_hash_code", address, JVMCIRuntime::identity_hash_code);
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_post", address, JVMCIRuntime::write_barrier_post);
|
||||
#endif
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_identity_hash_code", address, JVMCIRuntime::identity_hash_code);
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_instance", address, JVMCIRuntime::dynamic_new_instance);
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_thread_is_interrupted", address, JVMCIRuntime::thread_is_interrupted);
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_exception_handler_for_pc", address, JVMCIRuntime::exception_handler_for_pc);
|
||||
@ -552,7 +554,9 @@ void AOTCodeHeap::link_global_lib_symbols() {
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_polling_page", address, os::get_polling_page());
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_klass_base_address", address, Universe::narrow_klass_base());
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_oop_base_address", address, Universe::narrow_oop_base());
|
||||
#if INCLUDE_G1GC
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_log_of_heap_region_grain_bytes", int, HeapRegion::LogOfHRGrainBytes);
|
||||
#endif
|
||||
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_inline_contiguous_allocation_supported", bool, heap->supports_inline_contig_alloc());
|
||||
link_shared_runtime_symbols();
|
||||
link_stub_routines_symbols();
|
||||
|
@ -3592,8 +3592,13 @@ void ClassFileParser::apply_parsed_class_metadata(
|
||||
this_klass->set_methods(_methods);
|
||||
this_klass->set_inner_classes(_inner_classes);
|
||||
this_klass->set_local_interfaces(_local_interfaces);
|
||||
this_klass->set_transitive_interfaces(_transitive_interfaces);
|
||||
this_klass->set_annotations(_combined_annotations);
|
||||
// Delay the setting of _transitive_interfaces until after initialize_supers() in
|
||||
// fill_instance_klass(). It is because the _transitive_interfaces may be shared with
|
||||
// its _super. If an OOM occurs while loading the current klass, its _super field
|
||||
// may not have been set. When GC tries to free the klass, the _transitive_interfaces
|
||||
// may be deallocated mistakenly in InstanceKlass::deallocate_interfaces(). Subsequent
|
||||
// dereferences to the deallocated _transitive_interfaces will result in a crash.
|
||||
|
||||
// Clear out these fields so they don't get deallocated by the destructor
|
||||
clear_class_metadata();
|
||||
@ -5462,7 +5467,6 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
|
||||
assert(NULL == _methods, "invariant");
|
||||
assert(NULL == _inner_classes, "invariant");
|
||||
assert(NULL == _local_interfaces, "invariant");
|
||||
assert(NULL == _transitive_interfaces, "invariant");
|
||||
assert(NULL == _combined_annotations, "invariant");
|
||||
|
||||
if (_has_final_method) {
|
||||
@ -5529,7 +5533,9 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
|
||||
}
|
||||
|
||||
// Fill in information needed to compute superclasses.
|
||||
ik->initialize_supers(const_cast<InstanceKlass*>(_super_klass), CHECK);
|
||||
ik->initialize_supers(const_cast<InstanceKlass*>(_super_klass), _transitive_interfaces, CHECK);
|
||||
ik->set_transitive_interfaces(_transitive_interfaces);
|
||||
_transitive_interfaces = NULL;
|
||||
|
||||
// Initialize itable offset tables
|
||||
klassItable::setup_itable_offset_table(ik);
|
||||
@ -5834,7 +5840,6 @@ void ClassFileParser::clear_class_metadata() {
|
||||
_methods = NULL;
|
||||
_inner_classes = NULL;
|
||||
_local_interfaces = NULL;
|
||||
_transitive_interfaces = NULL;
|
||||
_combined_annotations = NULL;
|
||||
_annotations = _type_annotations = NULL;
|
||||
_fields_annotations = _fields_type_annotations = NULL;
|
||||
@ -5886,6 +5891,7 @@ ClassFileParser::~ClassFileParser() {
|
||||
}
|
||||
|
||||
clear_class_metadata();
|
||||
_transitive_interfaces = NULL;
|
||||
|
||||
// deallocate the klass if already created. Don't directly deallocate, but add
|
||||
// to the deallocate list so that the klass is removed from the CLD::_klasses list
|
||||
|
@ -37,8 +37,6 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "gc/shared/generation.hpp"
|
||||
#include "interpreter/bytecodeStream.hpp"
|
||||
#include "interpreter/oopMapCache.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
@ -44,7 +44,7 @@
|
||||
#include "services/diagnosticCommand.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#endif
|
||||
|
||||
@ -260,7 +260,7 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
|
||||
string = java_lang_String::create_from_unicode(name, len, CHECK_NULL);
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_G1GC
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
// Deduplicate the string before it is interned. Note that we should never
|
||||
// deduplicate a string after it has been interned. Doing so will counteract
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "gc/shared/gcLocker.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
|
@ -389,22 +389,24 @@ static void check_class(Metadata* md) {
|
||||
|
||||
void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
|
||||
if (ic->is_icholder_call()) {
|
||||
// The only exception is compiledICHolder oops which may
|
||||
// The only exception is compiledICHolder metdata which may
|
||||
// yet be marked below. (We check this further below).
|
||||
CompiledICHolder* cichk_oop = ic->cached_icholder();
|
||||
CompiledICHolder* cichk_metdata = ic->cached_icholder();
|
||||
|
||||
if (cichk_oop->is_loader_alive()) {
|
||||
if (cichk_metdata->is_loader_alive()) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
Metadata* ic_oop = ic->cached_metadata();
|
||||
if (ic_oop != NULL) {
|
||||
if (ic_oop->is_klass()) {
|
||||
if (((Klass*)ic_oop)->is_loader_alive()) {
|
||||
Metadata* ic_metdata = ic->cached_metadata();
|
||||
if (ic_metdata != NULL) {
|
||||
if (ic_metdata->is_klass()) {
|
||||
if (((Klass*)ic_metdata)->is_loader_alive()) {
|
||||
return;
|
||||
}
|
||||
} else if (ic_oop->is_method()) {
|
||||
if (((Method*)ic_oop)->method_holder()->is_loader_alive()) {
|
||||
} else if (ic_metdata->is_method()) {
|
||||
Method* method = (Method*)ic_metdata;
|
||||
assert(!method->is_old(), "old method should have been cleaned");
|
||||
if (method->method_holder()->is_loader_alive()) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
@ -493,16 +495,6 @@ void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_oc
|
||||
// (See comment above.)
|
||||
}
|
||||
|
||||
// The RedefineClasses() API can cause the class unloading invariant
|
||||
// to no longer be true. See jvmtiExport.hpp for details.
|
||||
// Also, leave a debugging breadcrumb in local flag.
|
||||
if (JvmtiExport::has_redefined_a_class()) {
|
||||
// This set of the unloading_occurred flag is done before the
|
||||
// call to post_compiled_method_unload() so that the unloading
|
||||
// of this nmethod is reported.
|
||||
unloading_occurred = true;
|
||||
}
|
||||
|
||||
// Exception cache
|
||||
clean_exception_cache();
|
||||
|
||||
@ -581,16 +573,6 @@ bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unl
|
||||
// (See comment above.)
|
||||
}
|
||||
|
||||
// The RedefineClasses() API can cause the class unloading invariant
|
||||
// to no longer be true. See jvmtiExport.hpp for details.
|
||||
// Also, leave a debugging breadcrumb in local flag.
|
||||
if (JvmtiExport::has_redefined_a_class()) {
|
||||
// This set of the unloading_occurred flag is done before the
|
||||
// call to post_compiled_method_unload() so that the unloading
|
||||
// of this nmethod is reported.
|
||||
unloading_occurred = true;
|
||||
}
|
||||
|
||||
// Exception cache
|
||||
clean_exception_cache();
|
||||
|
||||
|
@ -21,7 +21,6 @@
|
||||
// questions.
|
||||
//
|
||||
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "compiler/abstractCompiler.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
|
@ -266,9 +266,9 @@ OopMap* OopMapSet::find_map_at_offset(int pc_offset) const {
|
||||
}
|
||||
|
||||
static void add_derived_oop(oop* base, oop* derived) {
|
||||
#if !defined(TIERED) && !defined(INCLUDE_JVMCI)
|
||||
#if !defined(TIERED) && !INCLUDE_JVMCI
|
||||
COMPILER1_PRESENT(ShouldNotReachHere();)
|
||||
#endif // !defined(TIERED) && !defined(INCLUDE_JVMCI)
|
||||
#endif // !defined(TIERED) && !INCLUDE_JVMCI
|
||||
#if COMPILER2_OR_JVMCI
|
||||
DerivedPointerTable::add(derived, base);
|
||||
#endif // COMPILER2_OR_JVMCI
|
||||
@ -459,7 +459,7 @@ void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
|
||||
#ifndef PRODUCT
|
||||
|
||||
bool ImmutableOopMap::has_derived_pointer() const {
|
||||
#if !defined(TIERED) && !defined(INCLUDE_JVMCI)
|
||||
#if !defined(TIERED) && !INCLUDE_JVMCI
|
||||
COMPILER1_PRESENT(return false);
|
||||
#endif // !TIERED
|
||||
#if COMPILER2_OR_JVMCI
|
||||
|
@ -290,13 +290,13 @@ void CMSCollector::ref_processor_init() {
|
||||
if (_ref_processor == NULL) {
|
||||
// Allocate and initialize a reference processor
|
||||
_ref_processor =
|
||||
new ReferenceProcessor(_span, // span
|
||||
new ReferenceProcessor(&_span_based_discoverer,
|
||||
(ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
|
||||
ParallelGCThreads, // mt processing degree
|
||||
_cmsGen->refs_discovery_is_mt(), // mt discovery
|
||||
ParallelGCThreads, // mt processing degree
|
||||
_cmsGen->refs_discovery_is_mt(), // mt discovery
|
||||
MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
|
||||
_cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
|
||||
&_is_alive_closure); // closure for liveness info
|
||||
_cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
|
||||
&_is_alive_closure); // closure for liveness info
|
||||
// Initialize the _ref_processor field of CMSGen
|
||||
_cmsGen->set_ref_processor(_ref_processor);
|
||||
|
||||
@ -445,7 +445,10 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
CardTableRS* ct,
|
||||
ConcurrentMarkSweepPolicy* cp):
|
||||
_cmsGen(cmsGen),
|
||||
// Adjust span to cover old (cms) gen
|
||||
_span(cmsGen->reserved()),
|
||||
_ct(ct),
|
||||
_span_based_discoverer(_span),
|
||||
_ref_processor(NULL), // will be set later
|
||||
_conc_workers(NULL), // may be set later
|
||||
_abort_preclean(false),
|
||||
@ -455,8 +458,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
|
||||
_modUnionTable((CardTable::card_shift - LogHeapWordSize),
|
||||
-1 /* lock-free */, "No_lock" /* dummy */),
|
||||
_modUnionClosurePar(&_modUnionTable),
|
||||
// Adjust my span to cover old (cms) gen
|
||||
_span(cmsGen->reserved()),
|
||||
// Construct the is_alive_closure with _span & markBitMap
|
||||
_is_alive_closure(_span, &_markBitMap),
|
||||
_restart_addr(NULL),
|
||||
@ -3744,7 +3745,6 @@ void CMSCollector::sample_eden() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
assert(_collectorState == Precleaning ||
|
||||
_collectorState == AbortablePreclean, "incorrect state");
|
||||
@ -3761,7 +3761,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||
// referents.
|
||||
if (clean_refs) {
|
||||
CMSPrecleanRefsYieldClosure yield_cl(this);
|
||||
assert(rp->span().equals(_span), "Spans should be equal");
|
||||
assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
|
||||
CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
|
||||
&_markStack, true /* preclean */);
|
||||
CMSDrainMarkingStackClosure complete_trace(this,
|
||||
@ -5153,7 +5153,7 @@ void CMSRefProcTaskExecutor::execute(ProcessTask& task)
|
||||
WorkGang* workers = heap->workers();
|
||||
assert(workers != NULL, "Need parallel worker threads.");
|
||||
CMSRefProcTaskProxy rp_task(task, &_collector,
|
||||
_collector.ref_processor()->span(),
|
||||
_collector.ref_processor_span(),
|
||||
_collector.markBitMap(),
|
||||
workers, _collector.task_queues());
|
||||
workers->run_task(&rp_task);
|
||||
@ -5174,13 +5174,13 @@ void CMSCollector::refProcessingWork() {
|
||||
HandleMark hm;
|
||||
|
||||
ReferenceProcessor* rp = ref_processor();
|
||||
assert(rp->span().equals(_span), "Spans should be equal");
|
||||
assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
|
||||
assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
|
||||
// Process weak references.
|
||||
rp->setup_policy(false);
|
||||
verify_work_stacks_empty();
|
||||
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues());
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
|
||||
|
||||
@ -5245,7 +5245,7 @@ void CMSCollector::refProcessingWork() {
|
||||
CodeCache::do_unloading(&_is_alive_closure, purged_class);
|
||||
|
||||
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||
Klass::clean_weak_klass_links();
|
||||
Klass::clean_weak_klass_links(purged_class);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -69,7 +69,6 @@ class FreeChunk;
|
||||
class ParNewGeneration;
|
||||
class PromotionInfo;
|
||||
class ScanMarkedObjectsAgainCarefullyClosure;
|
||||
class TenuredGeneration;
|
||||
class SerialOldTracer;
|
||||
|
||||
// A generic CMS bit map. It's the basis for both the CMS marking bit map
|
||||
@ -617,7 +616,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
|
||||
protected:
|
||||
ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS)
|
||||
MemRegion _span; // Span covering above two
|
||||
MemRegion _span; // Span covering above
|
||||
CardTableRS* _ct; // Card table
|
||||
|
||||
// CMS marking support structures
|
||||
@ -641,8 +640,9 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
NOT_PRODUCT(ssize_t _num_par_pushes;)
|
||||
|
||||
// ("Weak") Reference processing support.
|
||||
ReferenceProcessor* _ref_processor;
|
||||
CMSIsAliveClosure _is_alive_closure;
|
||||
SpanSubjectToDiscoveryClosure _span_based_discoverer;
|
||||
ReferenceProcessor* _ref_processor;
|
||||
CMSIsAliveClosure _is_alive_closure;
|
||||
// Keep this textually after _markBitMap and _span; c'tor dependency.
|
||||
|
||||
ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work
|
||||
@ -841,6 +841,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||
ConcurrentMarkSweepPolicy* cp);
|
||||
ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
|
||||
|
||||
MemRegion ref_processor_span() const { return _span_based_discoverer.span(); }
|
||||
ReferenceProcessor* ref_processor() { return _ref_processor; }
|
||||
void ref_processor_init();
|
||||
|
||||
|
@ -983,7 +983,7 @@ void ParNewGeneration::collect(bool full,
|
||||
// Can the mt_degree be set later (at run_task() time would be best)?
|
||||
rp->set_active_mt_degree(active_workers);
|
||||
ReferenceProcessorStats stats;
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_queues());
|
||||
if (rp->processing_is_mt()) {
|
||||
ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
|
||||
stats = rp->process_discovered_references(&is_alive, &keep_alive,
|
||||
@ -1471,8 +1471,9 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
|
||||
void ParNewGeneration::ref_processor_init() {
|
||||
if (_ref_processor == NULL) {
|
||||
// Allocate and initialize a reference processor
|
||||
_span_based_discoverer.set_span(_reserved);
|
||||
_ref_processor =
|
||||
new ReferenceProcessor(_reserved, // span
|
||||
new ReferenceProcessor(&_span_based_discoverer, // span
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
|
||||
ParallelGCThreads, // mt processing degree
|
||||
refs_discovery_is_mt(), // mt discovery
|
||||
|
@ -81,11 +81,9 @@ void G1Arguments::initialize() {
|
||||
vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
|
||||
FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
|
||||
}
|
||||
#endif
|
||||
|
||||
// MarkStackSize will be set (if it hasn't been set by the user)
|
||||
// when concurrent marking is initialized.
|
||||
|
@ -988,9 +988,9 @@ void G1CollectedHeap::abort_concurrent_cycle() {
|
||||
|
||||
// Disable discovery and empty the discovered lists
|
||||
// for the CM ref processor.
|
||||
ref_processor_cm()->disable_discovery();
|
||||
ref_processor_cm()->abandon_partial_discovery();
|
||||
ref_processor_cm()->verify_no_references_recorded();
|
||||
_ref_processor_cm->disable_discovery();
|
||||
_ref_processor_cm->abandon_partial_discovery();
|
||||
_ref_processor_cm->verify_no_references_recorded();
|
||||
|
||||
// Abandon current iterations of concurrent marking and concurrent
|
||||
// refinement, if any are in progress.
|
||||
@ -1080,10 +1080,10 @@ void G1CollectedHeap::verify_after_full_collection() {
|
||||
// That will be done at the start of the next marking cycle.
|
||||
// We also know that the STW processor should no longer
|
||||
// discover any new references.
|
||||
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
|
||||
assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
|
||||
ref_processor_stw()->verify_no_references_recorded();
|
||||
ref_processor_cm()->verify_no_references_recorded();
|
||||
assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
|
||||
assert(!_ref_processor_cm->discovery_enabled(), "Postcondition");
|
||||
_ref_processor_stw->verify_no_references_recorded();
|
||||
_ref_processor_cm->verify_no_references_recorded();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
|
||||
@ -1410,10 +1410,12 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
|
||||
_g1_policy(new G1Policy(_gc_timer_stw)),
|
||||
_collection_set(this, _g1_policy),
|
||||
_dirty_card_queue_set(false),
|
||||
_is_alive_closure_cm(this),
|
||||
_is_alive_closure_stw(this),
|
||||
_ref_processor_cm(NULL),
|
||||
_ref_processor_stw(NULL),
|
||||
_is_alive_closure_stw(this),
|
||||
_is_subject_to_discovery_stw(this),
|
||||
_ref_processor_cm(NULL),
|
||||
_is_alive_closure_cm(this),
|
||||
_is_subject_to_discovery_cm(this),
|
||||
_bot(NULL),
|
||||
_hot_card_cache(NULL),
|
||||
_g1_rem_set(NULL),
|
||||
@ -1786,43 +1788,27 @@ void G1CollectedHeap::ref_processing_init() {
|
||||
// * Discovery is atomic - i.e. not concurrent.
|
||||
// * Reference discovery will not need a barrier.
|
||||
|
||||
MemRegion mr = reserved_region();
|
||||
|
||||
bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
|
||||
|
||||
// Concurrent Mark ref processor
|
||||
_ref_processor_cm =
|
||||
new ReferenceProcessor(mr, // span
|
||||
mt_processing,
|
||||
// mt processing
|
||||
ParallelGCThreads,
|
||||
// degree of mt processing
|
||||
(ParallelGCThreads > 1) || (ConcGCThreads > 1),
|
||||
// mt discovery
|
||||
MAX2(ParallelGCThreads, ConcGCThreads),
|
||||
// degree of mt discovery
|
||||
false,
|
||||
// Reference discovery is not atomic
|
||||
&_is_alive_closure_cm);
|
||||
// is alive closure
|
||||
// (for efficiency/performance)
|
||||
new ReferenceProcessor(&_is_subject_to_discovery_cm,
|
||||
mt_processing, // mt processing
|
||||
ParallelGCThreads, // degree of mt processing
|
||||
(ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery
|
||||
MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
|
||||
false, // Reference discovery is not atomic
|
||||
&_is_alive_closure_cm); // is alive closure
|
||||
|
||||
// STW ref processor
|
||||
_ref_processor_stw =
|
||||
new ReferenceProcessor(mr, // span
|
||||
mt_processing,
|
||||
// mt processing
|
||||
ParallelGCThreads,
|
||||
// degree of mt processing
|
||||
(ParallelGCThreads > 1),
|
||||
// mt discovery
|
||||
ParallelGCThreads,
|
||||
// degree of mt discovery
|
||||
true,
|
||||
// Reference discovery is atomic
|
||||
&_is_alive_closure_stw);
|
||||
// is alive closure
|
||||
// (for efficiency/performance)
|
||||
new ReferenceProcessor(&_is_subject_to_discovery_stw,
|
||||
mt_processing, // mt processing
|
||||
ParallelGCThreads, // degree of mt processing
|
||||
(ParallelGCThreads > 1), // mt discovery
|
||||
ParallelGCThreads, // degree of mt discovery
|
||||
true, // Reference discovery is atomic
|
||||
&_is_alive_closure_stw); // is alive closure
|
||||
}
|
||||
|
||||
CollectorPolicy* G1CollectedHeap::collector_policy() const {
|
||||
@ -2853,14 +2839,14 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
// reference processing currently works in G1.
|
||||
|
||||
// Enable discovery in the STW reference processor
|
||||
ref_processor_stw()->enable_discovery();
|
||||
_ref_processor_stw->enable_discovery();
|
||||
|
||||
{
|
||||
// We want to temporarily turn off discovery by the
|
||||
// CM ref processor, if necessary, and turn it back on
|
||||
// on again later if we do. Using a scoped
|
||||
// NoRefDiscovery object will do this.
|
||||
NoRefDiscovery no_cm_discovery(ref_processor_cm());
|
||||
NoRefDiscovery no_cm_discovery(_ref_processor_cm);
|
||||
|
||||
// Forget the current alloc region (we might even choose it to be part
|
||||
// of the collection set!).
|
||||
@ -2998,8 +2984,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
_verifier->verify_after_gc(verify_type);
|
||||
_verifier->check_bitmaps("GC End");
|
||||
|
||||
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
|
||||
ref_processor_stw()->verify_no_references_recorded();
|
||||
assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
|
||||
_ref_processor_stw->verify_no_references_recorded();
|
||||
|
||||
// CM reference discovery will be re-enabled if necessary.
|
||||
}
|
||||
@ -3543,6 +3529,7 @@ public:
|
||||
// To minimize the remark pause times, the tasks below are done in parallel.
|
||||
class G1ParallelCleaningTask : public AbstractGangTask {
|
||||
private:
|
||||
bool _unloading_occurred;
|
||||
G1StringAndSymbolCleaningTask _string_symbol_task;
|
||||
G1CodeCacheUnloadingTask _code_cache_task;
|
||||
G1KlassCleaningTask _klass_cleaning_task;
|
||||
@ -3555,6 +3542,7 @@ public:
|
||||
_string_symbol_task(is_alive, true, true, G1StringDedup::is_enabled()),
|
||||
_code_cache_task(num_workers, is_alive, unloading_occurred),
|
||||
_klass_cleaning_task(),
|
||||
_unloading_occurred(unloading_occurred),
|
||||
_resolved_method_cleaning_task() {
|
||||
}
|
||||
|
||||
@ -3580,7 +3568,11 @@ public:
|
||||
_code_cache_task.work_second_pass(worker_id);
|
||||
|
||||
// Clean all klasses that were not unloaded.
|
||||
_klass_cleaning_task.work();
|
||||
// The weak metadata in klass doesn't need to be
|
||||
// processed if there was no unloading.
|
||||
if (_unloading_occurred) {
|
||||
_klass_cleaning_task.work();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -3642,26 +3634,21 @@ void G1CollectedHeap::redirty_logged_cards() {
|
||||
|
||||
// Weak Reference Processing support
|
||||
|
||||
// An always "is_alive" closure that is used to preserve referents.
|
||||
// If the object is non-null then it's alive. Used in the preservation
|
||||
// of referent objects that are pointed to by reference objects
|
||||
// discovered by the CM ref processor.
|
||||
class G1AlwaysAliveClosure: public BoolObjectClosure {
|
||||
public:
|
||||
bool do_object_b(oop p) {
|
||||
if (p != NULL) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
bool G1STWIsAliveClosure::do_object_b(oop p) {
|
||||
// An object is reachable if it is outside the collection set,
|
||||
// or is inside and copied.
|
||||
return !_g1h->is_in_cset(p) || p->is_forwarded();
|
||||
}
|
||||
|
||||
bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
|
||||
assert(obj != NULL, "must not be NULL");
|
||||
assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
|
||||
// The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
|
||||
// may falsely indicate that this is not the case here: however the collection set only
|
||||
// contains old regions when concurrent mark is not running.
|
||||
return _g1h->is_in_cset(obj) || _g1h->heap_region_containing(obj)->is_survivor();
|
||||
}
|
||||
|
||||
// Non Copying Keep Alive closure
|
||||
class G1KeepAliveClosure: public OopClosure {
|
||||
G1CollectedHeap*_g1h;
|
||||
@ -3892,126 +3879,6 @@ void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
|
||||
|
||||
// End of weak reference support closures
|
||||
|
||||
// Abstract task used to preserve (i.e. copy) any referent objects
|
||||
// that are in the collection set and are pointed to by reference
|
||||
// objects discovered by the CM ref processor.
|
||||
|
||||
class G1ParPreserveCMReferentsTask: public AbstractGangTask {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ParScanThreadStateSet* _pss;
|
||||
RefToScanQueueSet* _queues;
|
||||
ParallelTaskTerminator _terminator;
|
||||
uint _n_workers;
|
||||
|
||||
public:
|
||||
G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, int workers, RefToScanQueueSet *task_queues) :
|
||||
AbstractGangTask("ParPreserveCMReferents"),
|
||||
_g1h(g1h),
|
||||
_pss(per_thread_states),
|
||||
_queues(task_queues),
|
||||
_terminator(workers, _queues),
|
||||
_n_workers(workers)
|
||||
{
|
||||
g1h->ref_processor_cm()->set_active_mt_degree(workers);
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
|
||||
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
|
||||
G1ParScanThreadState* pss = _pss->state_for_worker(worker_id);
|
||||
pss->set_ref_discoverer(NULL);
|
||||
assert(pss->queue_is_empty(), "both queue and overflow should be empty");
|
||||
|
||||
// Is alive closure
|
||||
G1AlwaysAliveClosure always_alive;
|
||||
|
||||
// Copying keep alive closure. Applied to referent objects that need
|
||||
// to be copied.
|
||||
G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
|
||||
|
||||
ReferenceProcessor* rp = _g1h->ref_processor_cm();
|
||||
|
||||
uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
|
||||
uint stride = MIN2(MAX2(_n_workers, 1U), limit);
|
||||
|
||||
// limit is set using max_num_q() - which was set using ParallelGCThreads.
|
||||
// So this must be true - but assert just in case someone decides to
|
||||
// change the worker ids.
|
||||
assert(worker_id < limit, "sanity");
|
||||
assert(!rp->discovery_is_atomic(), "check this code");
|
||||
|
||||
// Select discovered lists [i, i+stride, i+2*stride,...,limit)
|
||||
for (uint idx = worker_id; idx < limit; idx += stride) {
|
||||
DiscoveredList& ref_list = rp->discovered_refs()[idx];
|
||||
|
||||
DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
|
||||
while (iter.has_next()) {
|
||||
// Since discovery is not atomic for the CM ref processor, we
|
||||
// can see some null referent objects.
|
||||
iter.load_ptrs(DEBUG_ONLY(true));
|
||||
oop ref = iter.obj();
|
||||
|
||||
// This will filter nulls.
|
||||
if (iter.is_referent_alive()) {
|
||||
iter.make_referent_alive();
|
||||
}
|
||||
iter.move_to_next();
|
||||
}
|
||||
}
|
||||
|
||||
// Drain the queue - which may cause stealing
|
||||
G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
|
||||
drain_queue.do_void();
|
||||
// Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
|
||||
assert(pss->queue_is_empty(), "should be");
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
|
||||
// Any reference objects, in the collection set, that were 'discovered'
|
||||
// by the CM ref processor should have already been copied (either by
|
||||
// applying the external root copy closure to the discovered lists, or
|
||||
// by following an RSet entry).
|
||||
//
|
||||
// But some of the referents, that are in the collection set, that these
|
||||
// reference objects point to may not have been copied: the STW ref
|
||||
// processor would have seen that the reference object had already
|
||||
// been 'discovered' and would have skipped discovering the reference,
|
||||
// but would not have treated the reference object as a regular oop.
|
||||
// As a result the copy closure would not have been applied to the
|
||||
// referent object.
|
||||
//
|
||||
// We need to explicitly copy these referent objects - the references
|
||||
// will be processed at the end of remarking.
|
||||
//
|
||||
// We also need to do this copying before we process the reference
|
||||
// objects discovered by the STW ref processor in case one of these
|
||||
// referents points to another object which is also referenced by an
|
||||
// object discovered by the STW ref processor.
|
||||
double preserve_cm_referents_time = 0.0;
|
||||
|
||||
// To avoid spawning task when there is no work to do, check that
|
||||
// a concurrent cycle is active and that some references have been
|
||||
// discovered.
|
||||
if (concurrent_mark()->cm_thread()->during_cycle() &&
|
||||
ref_processor_cm()->has_discovered_references()) {
|
||||
double preserve_cm_referents_start = os::elapsedTime();
|
||||
uint no_of_gc_workers = workers()->active_workers();
|
||||
G1ParPreserveCMReferentsTask keep_cm_referents(this,
|
||||
per_thread_states,
|
||||
no_of_gc_workers,
|
||||
_task_queues);
|
||||
workers()->run_task(&keep_cm_referents);
|
||||
preserve_cm_referents_time = os::elapsedTime() - preserve_cm_referents_start;
|
||||
}
|
||||
|
||||
g1_policy()->phase_times()->record_preserve_cm_referents_time_ms(preserve_cm_referents_time * 1000.0);
|
||||
}
|
||||
|
||||
// Weak Reference processing during an evacuation pause (part 1).
|
||||
void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
|
||||
double ref_proc_start = os::elapsedTime();
|
||||
@ -4055,9 +3922,9 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
|
||||
uint no_of_gc_workers = workers()->active_workers();
|
||||
|
||||
// Parallel reference processing
|
||||
assert(no_of_gc_workers <= rp->max_num_q(),
|
||||
assert(no_of_gc_workers <= rp->max_num_queues(),
|
||||
"Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
|
||||
no_of_gc_workers, rp->max_num_q());
|
||||
no_of_gc_workers, rp->max_num_queues());
|
||||
|
||||
G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
|
||||
stats = rp->process_discovered_references(&is_alive,
|
||||
@ -4095,9 +3962,9 @@ void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per
|
||||
|
||||
uint n_workers = workers()->active_workers();
|
||||
|
||||
assert(n_workers <= rp->max_num_q(),
|
||||
assert(n_workers <= rp->max_num_queues(),
|
||||
"Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
|
||||
n_workers, rp->max_num_q());
|
||||
n_workers, rp->max_num_queues());
|
||||
|
||||
G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
|
||||
rp->enqueue_discovered_references(&par_task_executor, pt);
|
||||
@ -4192,13 +4059,17 @@ void G1CollectedHeap::evacuate_collection_set(G1ParScanThreadStateSet* per_threa
|
||||
}
|
||||
|
||||
void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
|
||||
// Also cleans the card table from temporary duplicate detection information used
|
||||
// during UpdateRS/ScanRS.
|
||||
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
||||
|
||||
// Process any discovered reference objects - we have
|
||||
// to do this _before_ we retire the GC alloc regions
|
||||
// as we may have to copy some 'reachable' referent
|
||||
// objects (and their reachable sub-graphs) that were
|
||||
// not copied during the pause.
|
||||
preserve_cm_referents(per_thread_states);
|
||||
process_discovered_references(per_thread_states);
|
||||
enqueue_discovered_references(per_thread_states);
|
||||
|
||||
G1STWIsAliveClosure is_alive(this);
|
||||
G1KeepAliveClosure keep_alive(this);
|
||||
@ -4221,8 +4092,6 @@ void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_in
|
||||
g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
|
||||
}
|
||||
|
||||
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
|
||||
|
||||
if (evacuation_failed()) {
|
||||
restore_after_evac_failure();
|
||||
|
||||
@ -4234,15 +4103,6 @@ void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_in
|
||||
|
||||
_preserved_marks_set.assert_empty();
|
||||
|
||||
// Enqueue any remaining references remaining on the STW
|
||||
// reference processor's discovered lists. We need to do
|
||||
// this after the card table is cleaned (and verified) as
|
||||
// the act of enqueueing entries on to the pending list
|
||||
// will log these updates (and dirty their associated
|
||||
// cards). We need these updates logged to update any
|
||||
// RSets.
|
||||
enqueue_discovered_references(per_thread_states);
|
||||
|
||||
_allocator->release_gc_alloc_regions(evacuation_info);
|
||||
|
||||
merge_per_thread_state_info(per_thread_states);
|
||||
|
@ -107,13 +107,20 @@ typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
|
||||
// (optional) _is_alive_non_header closure in the STW
|
||||
// reference processor. It is also extensively used during
|
||||
// reference processing during STW evacuation pauses.
|
||||
class G1STWIsAliveClosure: public BoolObjectClosure {
|
||||
class G1STWIsAliveClosure : public BoolObjectClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
public:
|
||||
G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
|
||||
bool do_object_b(oop p);
|
||||
};
|
||||
|
||||
class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
public:
|
||||
G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
|
||||
bool do_object_b(oop p);
|
||||
};
|
||||
|
||||
class G1RegionMappingChangedListener : public G1MappingChangedListener {
|
||||
private:
|
||||
void reset_from_card_cache(uint start_idx, size_t num_regions);
|
||||
@ -506,9 +513,6 @@ private:
|
||||
// allocated block, or else "NULL".
|
||||
HeapWord* expand_and_allocate(size_t word_size);
|
||||
|
||||
// Preserve any referents discovered by concurrent marking that have not yet been
|
||||
// copied by the STW pause.
|
||||
void preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states);
|
||||
// Process any reference objects discovered during
|
||||
// an incremental evacuation pause.
|
||||
void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
|
||||
@ -897,6 +901,8 @@ private:
|
||||
// the discovered lists during reference discovery.
|
||||
G1STWIsAliveClosure _is_alive_closure_stw;
|
||||
|
||||
G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
|
||||
|
||||
// The (concurrent marking) reference processor...
|
||||
ReferenceProcessor* _ref_processor_cm;
|
||||
|
||||
@ -908,6 +914,7 @@ private:
|
||||
// discovery.
|
||||
G1CMIsAliveClosure _is_alive_closure_cm;
|
||||
|
||||
G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
|
||||
public:
|
||||
|
||||
RefToScanQueue *task_queue(uint i) const;
|
||||
|
@ -1389,15 +1389,6 @@ void G1ConcurrentMark::cleanup() {
|
||||
}
|
||||
}
|
||||
|
||||
// Supporting Object and Oop closures for reference discovery
|
||||
// and processing in during marking
|
||||
|
||||
bool G1CMIsAliveClosure::do_object_b(oop obj) {
|
||||
HeapWord* addr = (HeapWord*)obj;
|
||||
return addr != NULL &&
|
||||
(!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_ill(obj));
|
||||
}
|
||||
|
||||
// 'Keep Alive' oop closure used by both serial parallel reference processing.
|
||||
// Uses the G1CMTask associated with a worker thread (for serial reference
|
||||
// processing the G1CMTask for worker 0 is used) to preserve (mark) and
|
||||
@ -1665,7 +1656,7 @@ void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
|
||||
// Reference lists are balanced (see balance_all_queues() and balance_queues()).
|
||||
rp->set_active_mt_degree(active_workers);
|
||||
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q());
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues());
|
||||
|
||||
// Process the weak references.
|
||||
const ReferenceProcessorStats& stats =
|
||||
@ -1684,7 +1675,7 @@ void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
|
||||
assert(has_overflown() || _global_mark_stack.is_empty(),
|
||||
"Mark stack should be empty (unless it has overflown)");
|
||||
|
||||
assert(rp->num_q() == active_workers, "why not");
|
||||
assert(rp->num_queues() == active_workers, "why not");
|
||||
|
||||
rp->enqueue_discovered_references(executor, &pt);
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
class ConcurrentGCTimer;
|
||||
class G1ConcurrentMarkThread;
|
||||
class G1CollectedHeap;
|
||||
class G1CMOopClosure;
|
||||
class G1CMTask;
|
||||
class G1ConcurrentMark;
|
||||
class G1OldTracer;
|
||||
@ -109,7 +110,13 @@ class G1CMIsAliveClosure : public BoolObjectClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
public:
|
||||
G1CMIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
|
||||
bool do_object_b(oop obj);
|
||||
};
|
||||
|
||||
class G1CMSubjectToDiscoveryClosure : public BoolObjectClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
public:
|
||||
G1CMSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
|
||||
bool do_object_b(oop obj);
|
||||
};
|
||||
|
||||
|
@ -38,6 +38,22 @@
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
|
||||
return !_g1h->is_obj_ill(obj);
|
||||
}
|
||||
|
||||
inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
|
||||
// Re-check whether the passed object is null. With ReferentBasedDiscovery the
|
||||
// mutator may have changed the referent's value (i.e. cleared it) between the
|
||||
// time the referent was determined to be potentially alive and calling this
|
||||
// method.
|
||||
if (obj == NULL) {
|
||||
return false;
|
||||
}
|
||||
assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
|
||||
return _g1h->heap_region_containing(obj)->is_old_or_humongous();
|
||||
}
|
||||
|
||||
inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj, size_t const obj_size) {
|
||||
HeapRegion* const hr = _g1h->heap_region_containing(obj);
|
||||
return mark_in_next_bitmap(worker_id, hr, obj, obj_size);
|
||||
|
@ -112,7 +112,9 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_
|
||||
_preserved_marks_set(true),
|
||||
_serial_compaction_point(),
|
||||
_is_alive(heap->concurrent_mark()->next_mark_bitmap()),
|
||||
_is_alive_mutator(heap->ref_processor_stw(), &_is_alive) {
|
||||
_is_alive_mutator(heap->ref_processor_stw(), &_is_alive),
|
||||
_always_subject_to_discovery(),
|
||||
_is_subject_mutator(heap->ref_processor_stw(), &_always_subject_to_discovery) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
|
||||
|
||||
_preserved_marks_set.init(_num_workers);
|
||||
|
@ -42,6 +42,16 @@ class G1FullGCCompactionPoint;
|
||||
class GCMemoryManager;
|
||||
class ReferenceProcessor;
|
||||
|
||||
// Subject-to-discovery closure for reference processing during Full GC. During
|
||||
// Full GC the whole heap is subject to discovery.
|
||||
class G1FullGCSubjectToDiscoveryClosure: public BoolObjectClosure {
|
||||
public:
|
||||
bool do_object_b(oop p) {
|
||||
assert(p != NULL, "must be");
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
// The G1FullCollector holds data associated with the current Full GC.
|
||||
class G1FullCollector : StackObj {
|
||||
G1CollectedHeap* _heap;
|
||||
@ -58,6 +68,9 @@ class G1FullCollector : StackObj {
|
||||
|
||||
static uint calc_active_workers();
|
||||
|
||||
G1FullGCSubjectToDiscoveryClosure _always_subject_to_discovery;
|
||||
ReferenceProcessorSubjectToDiscoveryMutator _is_subject_mutator;
|
||||
|
||||
public:
|
||||
G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs);
|
||||
~G1FullCollector();
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1FullGCMarker.inline.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
|
||||
G1FullGCMarker::G1FullGCMarker(uint worker_id, PreservedMarks* preserved_stack, G1CMBitMap* bitmap) :
|
||||
_worker_id(worker_id),
|
||||
|
@ -34,7 +34,7 @@
|
||||
G1FullGCReferenceProcessingExecutor::G1FullGCReferenceProcessingExecutor(G1FullCollector* collector) :
|
||||
_collector(collector),
|
||||
_reference_processor(collector->reference_processor()),
|
||||
_old_mt_degree(_reference_processor->num_q()) {
|
||||
_old_mt_degree(_reference_processor->num_queues()) {
|
||||
if (_reference_processor->processing_is_mt()) {
|
||||
_reference_processor->set_active_mt_degree(_collector->workers());
|
||||
}
|
||||
@ -92,7 +92,7 @@ void G1FullGCReferenceProcessingExecutor::execute(STWGCTimer* timer, G1FullGCTra
|
||||
G1FullGCMarker* marker = _collector->marker(0);
|
||||
G1IsAliveClosure is_alive(_collector->mark_bitmap());
|
||||
G1FullKeepAliveClosure keep_alive(marker);
|
||||
ReferenceProcessorPhaseTimes pt(timer, _reference_processor->num_q());
|
||||
ReferenceProcessorPhaseTimes pt(timer, _reference_processor->num_queues());
|
||||
AbstractRefProcTaskExecutor* executor = _reference_processor->processing_is_mt() ? this : NULL;
|
||||
|
||||
// Process discovered references, use this executor if multi-threaded
|
||||
|
@ -113,8 +113,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
_gc_par_phases[YoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Young Free Collection Set (ms):");
|
||||
_gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Non-Young Free Collection Set (ms):");
|
||||
|
||||
_gc_par_phases[PreserveCMReferents] = new WorkerDataArray<double>(max_gc_threads, "Parallel Preserve CM Refs (ms):");
|
||||
|
||||
reset();
|
||||
}
|
||||
|
||||
@ -399,8 +397,7 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set() const {
|
||||
|
||||
debug_time("Code Roots Fixup", _cur_collection_code_root_fixup_time_ms);
|
||||
|
||||
debug_time("Preserve CM Refs", _recorded_preserve_cm_referents_time_ms);
|
||||
trace_phase(_gc_par_phases[PreserveCMReferents]);
|
||||
debug_time("Clear Card Table", _cur_clear_ct_time_ms);
|
||||
|
||||
debug_time_for_reference("Reference Processing", _cur_ref_proc_time_ms);
|
||||
_ref_phase_times.print_all_references(2, false);
|
||||
@ -413,8 +410,6 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set() const {
|
||||
debug_phase(_gc_par_phases[StringDedupTableFixup]);
|
||||
}
|
||||
|
||||
debug_time("Clear Card Table", _cur_clear_ct_time_ms);
|
||||
|
||||
if (G1CollectedHeap::heap()->evacuation_failed()) {
|
||||
debug_time("Evacuation Failure", evac_fail_handling);
|
||||
trace_time("Recalculate Used", _cur_evac_fail_recalc_used);
|
||||
|
@ -73,7 +73,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
StringDedupQueueFixup,
|
||||
StringDedupTableFixup,
|
||||
RedirtyCards,
|
||||
PreserveCMReferents,
|
||||
YoungFreeCSet,
|
||||
NonYoungFreeCSet,
|
||||
GCParPhasesSentinel
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1MonitoringSupport.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/shared/collectorCounters.hpp"
|
||||
#include "gc/shared/hSpaceCounters.hpp"
|
||||
#include "memory/metaspaceCounters.hpp"
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "gc/g1/g1RootClosures.hpp"
|
||||
#include "gc/g1/g1RootProcessor.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shared/weakProcessor.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
|
@ -496,7 +496,7 @@ void ASPSYoungGen::reset_after_change() {
|
||||
|
||||
_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
|
||||
(HeapWord*)virtual_space()->high_boundary());
|
||||
PSScavenge::reference_processor()->set_span(_reserved);
|
||||
PSScavenge::set_subject_to_discovery_span(_reserved);
|
||||
|
||||
HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low();
|
||||
HeapWord* eden_bottom = eden_space()->bottom();
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
|
||||
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
|
||||
#include "gc/parallel/psMarkSweep.hpp"
|
||||
#include "gc/parallel/psMarkSweepProxy.hpp"
|
||||
#include "gc/parallel/psMemoryPool.hpp"
|
||||
#include "gc/parallel/psParallelCompact.inline.hpp"
|
||||
#include "gc/parallel/psPromotionManager.hpp"
|
||||
@ -48,6 +48,7 @@
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "services/memoryManager.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/vmError.hpp"
|
||||
|
||||
PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
|
||||
@ -155,7 +156,7 @@ void ParallelScavengeHeap::post_initialize() {
|
||||
if (UseParallelOldGC) {
|
||||
PSParallelCompact::post_initialize();
|
||||
} else {
|
||||
PSMarkSweep::initialize();
|
||||
PSMarkSweepProxy::initialize();
|
||||
}
|
||||
PSPromotionManager::initialize();
|
||||
}
|
||||
@ -406,7 +407,7 @@ void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
bool maximum_compaction = clear_all_soft_refs;
|
||||
PSParallelCompact::invoke(maximum_compaction);
|
||||
} else {
|
||||
PSMarkSweep::invoke(clear_all_soft_refs);
|
||||
PSMarkSweepProxy::invoke(clear_all_soft_refs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -545,7 +546,7 @@ bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
|
||||
jlong ParallelScavengeHeap::millis_since_last_gc() {
|
||||
return UseParallelOldGC ?
|
||||
PSParallelCompact::millis_since_last_gc() :
|
||||
PSMarkSweep::millis_since_last_gc();
|
||||
PSMarkSweepProxy::millis_since_last_gc();
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::prepare_for_verify() {
|
||||
@ -602,7 +603,7 @@ void ParallelScavengeHeap::print_tracing_info() const {
|
||||
AdaptiveSizePolicyOutput::print();
|
||||
log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
|
||||
log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
|
||||
UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds());
|
||||
UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds());
|
||||
}
|
||||
|
||||
|
||||
|
@ -26,13 +26,13 @@
|
||||
#define SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_INLINE_HPP
|
||||
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/psMarkSweep.hpp"
|
||||
#include "gc/parallel/psMarkSweepProxy.hpp"
|
||||
#include "gc/parallel/psParallelCompact.inline.hpp"
|
||||
#include "gc/parallel/psScavenge.hpp"
|
||||
|
||||
inline size_t ParallelScavengeHeap::total_invocations() {
|
||||
return UseParallelOldGC ? PSParallelCompact::total_invocations() :
|
||||
PSMarkSweep::total_invocations();
|
||||
PSMarkSweepProxy::total_invocations();
|
||||
}
|
||||
|
||||
inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
|
||||
|
@ -65,9 +65,11 @@ elapsedTimer PSMarkSweep::_accumulated_time;
|
||||
jlong PSMarkSweep::_time_of_last_gc = 0;
|
||||
CollectorCounters* PSMarkSweep::_counters = NULL;
|
||||
|
||||
SpanSubjectToDiscoveryClosure PSMarkSweep::_span_based_discoverer;
|
||||
|
||||
void PSMarkSweep::initialize() {
|
||||
MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
|
||||
set_ref_processor(new ReferenceProcessor(mr)); // a vanilla ref proc
|
||||
_span_based_discoverer.set_span(ParallelScavengeHeap::heap()->reserved_region());
|
||||
set_ref_processor(new ReferenceProcessor(&_span_based_discoverer)); // a vanilla ref proc
|
||||
_counters = new CollectorCounters("PSMarkSweep", 1);
|
||||
}
|
||||
|
||||
@ -258,7 +260,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
|
||||
DerivedPointerTable::update_pointers();
|
||||
#endif
|
||||
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues());
|
||||
|
||||
ref_processor()->enqueue_discovered_references(NULL, &pt);
|
||||
|
||||
@ -537,7 +539,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
|
||||
|
||||
ref_processor()->setup_policy(clear_all_softrefs);
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues());
|
||||
const ReferenceProcessorStats& stats =
|
||||
ref_processor()->process_discovered_references(
|
||||
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
|
||||
@ -563,7 +565,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
CodeCache::do_unloading(is_alive_closure(), purged_class);
|
||||
|
||||
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||
Klass::clean_weak_klass_links();
|
||||
Klass::clean_weak_klass_links(purged_class);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/serial/markSweep.hpp"
|
||||
#include "gc/shared/collectorCounters.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "utilities/stack.hpp"
|
||||
|
||||
class PSAdaptiveSizePolicy;
|
||||
@ -39,6 +40,8 @@ class PSMarkSweep : public MarkSweep {
|
||||
static jlong _time_of_last_gc; // ms
|
||||
static CollectorCounters* _counters;
|
||||
|
||||
static SpanSubjectToDiscoveryClosure _span_based_discoverer;
|
||||
|
||||
// Closure accessors
|
||||
static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; }
|
||||
static VoidClosure* follow_stack_closure() { return &MarkSweep::follow_stack_closure; }
|
||||
|
53
src/hotspot/share/gc/parallel/psMarkSweepProxy.hpp
Normal file
53
src/hotspot/share/gc/parallel/psMarkSweepProxy.hpp
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
|
||||
#define SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
|
||||
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_SERIALGC
|
||||
#include "gc/parallel/psMarkSweep.hpp"
|
||||
#endif
|
||||
|
||||
#if INCLUDE_SERIALGC
|
||||
namespace PSMarkSweepProxy {
|
||||
inline void initialize() { PSMarkSweep::initialize(); }
|
||||
inline void invoke(bool maximum_heap_compaction) { PSMarkSweep::invoke(maximum_heap_compaction); }
|
||||
inline bool invoke_no_policy(bool clear_all_softrefs) { return PSMarkSweep::invoke_no_policy(clear_all_softrefs); }
|
||||
inline jlong millis_since_last_gc() { return PSMarkSweep::millis_since_last_gc(); }
|
||||
inline elapsedTimer* accumulated_time() { return PSMarkSweep::accumulated_time(); }
|
||||
inline uint total_invocations() { return PSMarkSweep::total_invocations(); }
|
||||
};
|
||||
#else
|
||||
namespace PSMarkSweepProxy {
|
||||
inline void initialize() { fatal("Serial GC excluded from build"); }
|
||||
inline void invoke(bool) { fatal("Serial GC excluded from build"); }
|
||||
inline bool invoke_no_policy(bool) { fatal("Serial GC excluded from build"); return false;}
|
||||
inline jlong millis_since_last_gc() { fatal("Serial GC excluded from build"); return 0L; }
|
||||
inline elapsedTimer* accumulated_time() { fatal("Serial GC excluded from build"); return NULL; }
|
||||
inline uint total_invocations() { fatal("Serial GC excluded from build"); return 0u; }
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif // SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
|
@ -139,10 +139,13 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
|
||||
SpaceDecorator::Clear,
|
||||
SpaceDecorator::Mangle);
|
||||
|
||||
#if INCLUDE_SERIALGC
|
||||
_object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
|
||||
|
||||
if (_object_mark_sweep == NULL)
|
||||
if (_object_mark_sweep == NULL) {
|
||||
vm_exit_during_initialization("Could not complete allocation of old generation");
|
||||
}
|
||||
#endif // INCLUDE_SERIALGC
|
||||
|
||||
// Update the start_array
|
||||
start_array()->set_covered_region(cmr);
|
||||
@ -163,6 +166,8 @@ bool PSOldGen::is_allocated() {
|
||||
return virtual_space()->reserved_size() != 0;
|
||||
}
|
||||
|
||||
#if INCLUDE_SERIALGC
|
||||
|
||||
void PSOldGen::precompact() {
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
|
||||
@ -183,6 +188,8 @@ void PSOldGen::compact() {
|
||||
object_mark_sweep()->compact(ZapUnusedHeapArea);
|
||||
}
|
||||
|
||||
#endif // INCLUDE_SERIALGC
|
||||
|
||||
size_t PSOldGen::contiguous_available() const {
|
||||
return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
|
||||
}
|
||||
|
@ -45,7 +45,9 @@ class PSOldGen : public CHeapObj<mtGC> {
|
||||
PSVirtualSpace* _virtual_space; // Controls mapping and unmapping of virtual mem
|
||||
ObjectStartArray _start_array; // Keeps track of where objects start in a 512b block
|
||||
MutableSpace* _object_space; // Where all the objects live
|
||||
#if INCLUDE_SERIALGC
|
||||
PSMarkSweepDecorator* _object_mark_sweep; // The mark sweep view of _object_space
|
||||
#endif
|
||||
const char* const _name; // Name of this generation.
|
||||
|
||||
// Performance Counters
|
||||
@ -150,17 +152,21 @@ class PSOldGen : public CHeapObj<mtGC> {
|
||||
}
|
||||
|
||||
MutableSpace* object_space() const { return _object_space; }
|
||||
#if INCLUDE_SERIALGC
|
||||
PSMarkSweepDecorator* object_mark_sweep() const { return _object_mark_sweep; }
|
||||
#endif
|
||||
ObjectStartArray* start_array() { return &_start_array; }
|
||||
PSVirtualSpace* virtual_space() const { return _virtual_space;}
|
||||
|
||||
// Has the generation been successfully allocated?
|
||||
bool is_allocated();
|
||||
|
||||
#if INCLUDE_SERIALGC
|
||||
// MarkSweep methods
|
||||
virtual void precompact();
|
||||
void adjust_pointers();
|
||||
void compact();
|
||||
#endif
|
||||
|
||||
// Size info
|
||||
size_t capacity_in_bytes() const { return object_space()->capacity_in_bytes(); }
|
||||
|
@ -34,8 +34,6 @@
|
||||
#include "gc/parallel/pcTasks.hpp"
|
||||
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
|
||||
#include "gc/parallel/psCompactionManager.inline.hpp"
|
||||
#include "gc/parallel/psMarkSweep.hpp"
|
||||
#include "gc/parallel/psMarkSweepDecorator.hpp"
|
||||
#include "gc/parallel/psOldGen.hpp"
|
||||
#include "gc/parallel/psParallelCompact.inline.hpp"
|
||||
#include "gc/parallel/psPromotionManager.inline.hpp"
|
||||
@ -72,6 +70,7 @@
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/formatBuffer.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
#include <math.h>
|
||||
@ -117,6 +116,7 @@ ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
|
||||
|
||||
SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
|
||||
|
||||
SpanSubjectToDiscoveryClosure PSParallelCompact::_span_based_discoverer;
|
||||
ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
|
||||
|
||||
double PSParallelCompact::_dwl_mean;
|
||||
@ -843,14 +843,14 @@ bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap(
|
||||
|
||||
void PSParallelCompact::post_initialize() {
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
MemRegion mr = heap->reserved_region();
|
||||
_span_based_discoverer.set_span(heap->reserved_region());
|
||||
_ref_processor =
|
||||
new ReferenceProcessor(mr, // span
|
||||
new ReferenceProcessor(&_span_based_discoverer,
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
|
||||
ParallelGCThreads, // mt processing degree
|
||||
true, // mt discovery
|
||||
ParallelGCThreads, // mt discovery degree
|
||||
true, // atomic_discovery
|
||||
ParallelGCThreads, // mt processing degree
|
||||
true, // mt discovery
|
||||
ParallelGCThreads, // mt discovery degree
|
||||
true, // atomic_discovery
|
||||
&_is_alive_closure); // non-header is alive closure
|
||||
_counters = new CollectorCounters("PSParallelCompact", 1);
|
||||
|
||||
@ -1038,7 +1038,7 @@ void PSParallelCompact::post_compact()
|
||||
DerivedPointerTable::update_pointers();
|
||||
#endif
|
||||
|
||||
ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_q());
|
||||
ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_queues());
|
||||
|
||||
ref_processor()->enqueue_discovered_references(NULL, &pt);
|
||||
|
||||
@ -2105,7 +2105,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
|
||||
GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
|
||||
|
||||
ReferenceProcessorStats stats;
|
||||
ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_q());
|
||||
ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_queues());
|
||||
if (ref_processor()->processing_is_mt()) {
|
||||
RefProcTaskExecutor task_executor;
|
||||
stats = ref_processor()->process_discovered_references(
|
||||
@ -2139,7 +2139,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
|
||||
CodeCache::do_unloading(is_alive_closure(), purged_class);
|
||||
|
||||
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||
Klass::clean_weak_klass_links();
|
||||
Klass::clean_weak_klass_links(purged_class);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -968,6 +968,7 @@ class PSParallelCompact : AllStatic {
|
||||
static SpaceInfo _space_info[last_space_id];
|
||||
|
||||
// Reference processing (used in ...follow_contents)
|
||||
static SpanSubjectToDiscoveryClosure _span_based_discoverer;
|
||||
static ReferenceProcessor* _ref_processor;
|
||||
|
||||
// Values computed at initialization and used by dead_wood_limiter().
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include "gc/parallel/gcTaskManager.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.hpp"
|
||||
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
|
||||
#include "gc/parallel/psMarkSweep.hpp"
|
||||
#include "gc/parallel/psMarkSweepProxy.hpp"
|
||||
#include "gc/parallel/psParallelCompact.inline.hpp"
|
||||
#include "gc/parallel/psScavenge.inline.hpp"
|
||||
#include "gc/parallel/psTasks.hpp"
|
||||
@ -58,18 +58,19 @@
|
||||
#include "services/memoryService.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
|
||||
HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
|
||||
int PSScavenge::_consecutive_skipped_scavenges = 0;
|
||||
ReferenceProcessor* PSScavenge::_ref_processor = NULL;
|
||||
PSCardTable* PSScavenge::_card_table = NULL;
|
||||
bool PSScavenge::_survivor_overflow = false;
|
||||
uint PSScavenge::_tenuring_threshold = 0;
|
||||
HeapWord* PSScavenge::_young_generation_boundary = NULL;
|
||||
uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
|
||||
elapsedTimer PSScavenge::_accumulated_time;
|
||||
STWGCTimer PSScavenge::_gc_timer;
|
||||
ParallelScavengeTracer PSScavenge::_gc_tracer;
|
||||
CollectorCounters* PSScavenge::_counters = NULL;
|
||||
HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
|
||||
int PSScavenge::_consecutive_skipped_scavenges = 0;
|
||||
SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer;
|
||||
ReferenceProcessor* PSScavenge::_ref_processor = NULL;
|
||||
PSCardTable* PSScavenge::_card_table = NULL;
|
||||
bool PSScavenge::_survivor_overflow = false;
|
||||
uint PSScavenge::_tenuring_threshold = 0;
|
||||
HeapWord* PSScavenge::_young_generation_boundary = NULL;
|
||||
uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
|
||||
elapsedTimer PSScavenge::_accumulated_time;
|
||||
STWGCTimer PSScavenge::_gc_timer;
|
||||
ParallelScavengeTracer PSScavenge::_gc_tracer;
|
||||
CollectorCounters* PSScavenge::_counters = NULL;
|
||||
|
||||
// Define before use
|
||||
class PSIsAliveClosure: public BoolObjectClosure {
|
||||
@ -234,7 +235,7 @@ bool PSScavenge::invoke() {
|
||||
if (UseParallelOldGC) {
|
||||
full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
|
||||
} else {
|
||||
full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
|
||||
full_gc_done = PSMarkSweepProxy::invoke_no_policy(clear_all_softrefs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -416,7 +417,7 @@ bool PSScavenge::invoke_no_policy() {
|
||||
PSKeepAliveClosure keep_alive(promotion_manager);
|
||||
PSEvacuateFollowersClosure evac_followers(promotion_manager);
|
||||
ReferenceProcessorStats stats;
|
||||
ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_q());
|
||||
ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_queues());
|
||||
if (reference_processor()->processing_is_mt()) {
|
||||
PSRefProcTaskExecutor task_executor;
|
||||
stats = reference_processor()->process_discovered_references(
|
||||
@ -766,10 +767,9 @@ void PSScavenge::initialize() {
|
||||
set_young_generation_boundary(young_gen->eden_space()->bottom());
|
||||
|
||||
// Initialize ref handling object for scavenging.
|
||||
MemRegion mr = young_gen->reserved();
|
||||
|
||||
_span_based_discoverer.set_span(young_gen->reserved());
|
||||
_ref_processor =
|
||||
new ReferenceProcessor(mr, // span
|
||||
new ReferenceProcessor(&_span_based_discoverer,
|
||||
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
|
||||
ParallelGCThreads, // mt processing degree
|
||||
true, // mt discovery
|
||||
|
@ -65,14 +65,15 @@ class PSScavenge: AllStatic {
|
||||
|
||||
protected:
|
||||
// Flags/counters
|
||||
static ReferenceProcessor* _ref_processor; // Reference processor for scavenging.
|
||||
static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing
|
||||
static PSCardTable* _card_table; // We cache the card table for fast access.
|
||||
static bool _survivor_overflow; // Overflow this collection
|
||||
static uint _tenuring_threshold; // tenuring threshold for next scavenge
|
||||
static elapsedTimer _accumulated_time; // total time spent on scavenge
|
||||
static STWGCTimer _gc_timer; // GC time book keeper
|
||||
static ParallelScavengeTracer _gc_tracer; // GC tracing
|
||||
static SpanSubjectToDiscoveryClosure _span_based_discoverer;
|
||||
static ReferenceProcessor* _ref_processor; // Reference processor for scavenging.
|
||||
static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing
|
||||
static PSCardTable* _card_table; // We cache the card table for fast access.
|
||||
static bool _survivor_overflow; // Overflow this collection
|
||||
static uint _tenuring_threshold; // tenuring threshold for next scavenge
|
||||
static elapsedTimer _accumulated_time; // total time spent on scavenge
|
||||
static STWGCTimer _gc_timer; // GC time book keeper
|
||||
static ParallelScavengeTracer _gc_tracer; // GC tracing
|
||||
// The lowest address possible for the young_gen.
|
||||
// This is used to decide if an oop should be scavenged,
|
||||
// cards should be marked, etc.
|
||||
@ -102,6 +103,9 @@ class PSScavenge: AllStatic {
|
||||
// Performance Counters
|
||||
static CollectorCounters* counters() { return _counters; }
|
||||
|
||||
static void set_subject_to_discovery_span(MemRegion mr) {
|
||||
_span_based_discoverer.set_span(mr);
|
||||
}
|
||||
// Used by scavenge_contents && psMarkSweep
|
||||
static ReferenceProcessor* const reference_processor() {
|
||||
assert(_ref_processor != NULL, "Sanity");
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc/parallel/gcTaskManager.hpp"
|
||||
#include "gc/parallel/psMarkSweep.hpp"
|
||||
#include "gc/parallel/psCardTable.hpp"
|
||||
#include "gc/parallel/psPromotionManager.hpp"
|
||||
#include "gc/parallel/psPromotionManager.inline.hpp"
|
||||
|
@ -730,6 +730,8 @@ void PSYoungGen::object_iterate(ObjectClosure* blk) {
|
||||
to_space()->object_iterate(blk);
|
||||
}
|
||||
|
||||
#if INCLUDE_SERIALGC
|
||||
|
||||
void PSYoungGen::precompact() {
|
||||
eden_mark_sweep()->precompact();
|
||||
from_mark_sweep()->precompact();
|
||||
@ -749,6 +751,8 @@ void PSYoungGen::compact() {
|
||||
to_mark_sweep()->compact(false);
|
||||
}
|
||||
|
||||
#endif // INCLUDE_SERIALGC
|
||||
|
||||
void PSYoungGen::print() const { print_on(tty); }
|
||||
void PSYoungGen::print_on(outputStream* st) const {
|
||||
st->print(" %-15s", "PSYoungGen");
|
||||
@ -839,7 +843,7 @@ void PSYoungGen::reset_after_change() {
|
||||
void PSYoungGen::reset_survivors_after_shrink() {
|
||||
_reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
|
||||
(HeapWord*)virtual_space()->high_boundary());
|
||||
PSScavenge::reference_processor()->set_span(_reserved);
|
||||
PSScavenge::set_subject_to_discovery_span(_reserved);
|
||||
|
||||
MutableSpace* space_shrinking = NULL;
|
||||
if (from_space()->end() > to_space()->end()) {
|
||||
|
@ -123,9 +123,11 @@ class PSYoungGen : public CHeapObj<mtGC> {
|
||||
PSMarkSweepDecorator* from_mark_sweep() const { return _from_mark_sweep; }
|
||||
PSMarkSweepDecorator* to_mark_sweep() const { return _to_mark_sweep; }
|
||||
|
||||
#if INCLUDE_SERIALGC
|
||||
void precompact();
|
||||
void adjust_pointers();
|
||||
void compact();
|
||||
#endif
|
||||
|
||||
// Called during/after GC
|
||||
void swap_spaces();
|
||||
|
@ -24,7 +24,6 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
|
||||
#include "gc/parallel/psMarkSweep.hpp"
|
||||
#include "gc/parallel/psScavenge.hpp"
|
||||
#include "gc/parallel/vmPSOperations.hpp"
|
||||
#include "gc/shared/gcLocker.hpp"
|
||||
|
@ -56,7 +56,7 @@
|
||||
#include "utilities/copy.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_CMSGC
|
||||
#include "gc/cms/parOopClosures.hpp"
|
||||
#endif
|
||||
|
||||
@ -646,7 +646,7 @@ void DefNewGeneration::collect(bool full,
|
||||
FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
|
||||
ReferenceProcessor* rp = ref_processor();
|
||||
rp->setup_policy(clear_all_soft_refs);
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_queues());
|
||||
const ReferenceProcessorStats& stats =
|
||||
rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
|
||||
NULL, &pt);
|
||||
@ -1006,7 +1006,7 @@ HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
|
||||
// have to use it here, as well.
|
||||
HeapWord* result = eden()->par_allocate(word_size);
|
||||
if (result != NULL) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_CMSGC
|
||||
if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
|
||||
_old_gen->sample_eden_chunk();
|
||||
}
|
||||
@ -1024,7 +1024,7 @@ HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
|
||||
HeapWord* DefNewGeneration::par_allocate(size_t word_size,
|
||||
bool is_tlab) {
|
||||
HeapWord* res = eden()->par_allocate(word_size);
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_CMSGC
|
||||
if (CMSEdenChunksRecordAlways && _old_gen != NULL) {
|
||||
_old_gen->sample_eden_chunk();
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
|
||||
|
||||
ref_processor()->setup_policy(clear_all_softrefs);
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q());
|
||||
ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues());
|
||||
const ReferenceProcessorStats& stats =
|
||||
ref_processor()->process_discovered_references(
|
||||
&is_alive, &keep_alive, &follow_stack_closure, NULL, &pt);
|
||||
@ -234,7 +234,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
CodeCache::do_unloading(&is_alive, purged_class);
|
||||
|
||||
// Prune dead klasses from subklass/sibling/implementor lists.
|
||||
Klass::clean_weak_klass_links();
|
||||
Klass::clean_weak_klass_links(purged_class);
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -39,7 +39,7 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_CMSGC
|
||||
#include "gc/cms/parOopClosures.hpp"
|
||||
#endif
|
||||
|
||||
|
@ -32,7 +32,14 @@
|
||||
volatile_nonstatic_field, \
|
||||
static_field) \
|
||||
nonstatic_field(TenuredGeneration, _min_heap_delta_bytes, size_t) \
|
||||
nonstatic_field(TenuredGeneration, _the_space, ContiguousSpace*)
|
||||
nonstatic_field(TenuredGeneration, _the_space, ContiguousSpace*) \
|
||||
\
|
||||
nonstatic_field(DefNewGeneration, _old_gen, Generation*) \
|
||||
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
|
||||
nonstatic_field(DefNewGeneration, _age_table, AgeTable) \
|
||||
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
|
||||
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
|
||||
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*)
|
||||
|
||||
#define VM_TYPES_SERIALGC(declare_type, \
|
||||
declare_toplevel_type, \
|
||||
@ -41,6 +48,8 @@
|
||||
declare_type(TenuredGeneration, CardGeneration) \
|
||||
declare_type(TenuredSpace, OffsetTableContigSpace) \
|
||||
\
|
||||
declare_type(DefNewGeneration, Generation) \
|
||||
\
|
||||
declare_toplevel_type(TenuredGeneration*)
|
||||
|
||||
#define VM_INT_CONSTANTS_SERIALGC(declare_constant, \
|
||||
|
@ -27,17 +27,10 @@
|
||||
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
|
||||
f(G1BarrierSet)
|
||||
#else
|
||||
#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
|
||||
#endif
|
||||
|
||||
// Do something for each concrete barrier set part of the build.
|
||||
#define FOR_EACH_CONCRETE_BARRIER_SET_DO(f) \
|
||||
f(CardTableBarrierSet) \
|
||||
FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
|
||||
G1GC_ONLY(f(G1BarrierSet))
|
||||
|
||||
#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
|
||||
f(ModRef)
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "gc/shared/modRefBarrierSet.inline.hpp"
|
||||
#include "gc/shared/cardTableBarrierSet.inline.hpp"
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/g1BarrierSet.inline.hpp" // G1 support
|
||||
#endif
|
||||
|
||||
|
@ -153,14 +153,14 @@ class BlockOffsetSharedArray: public CHeapObj<mtGC> {
|
||||
|
||||
void fill_range(size_t start, size_t num_cards, u_char offset) {
|
||||
void* start_ptr = &_offset_array[start];
|
||||
#if INCLUDE_ALL_GCS
|
||||
// If collector is concurrent, special handling may be needed.
|
||||
assert(!UseG1GC, "Shouldn't be here when using G1");
|
||||
G1GC_ONLY(assert(!UseG1GC, "Shouldn't be here when using G1");)
|
||||
#if INCLUDE_CMSGC
|
||||
if (UseConcMarkSweepGC) {
|
||||
memset_with_concurrent_readers(start_ptr, offset, num_cards);
|
||||
return;
|
||||
}
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#endif // INCLUDE_CMSGC
|
||||
memset(start_ptr, offset, num_cards);
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ void CardGeneration::compute_new_size() {
|
||||
|
||||
const size_t free_after_gc = free();
|
||||
const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
|
||||
log_trace(gc, heap)("TenuredGeneration::compute_new_size:");
|
||||
log_trace(gc, heap)("CardGeneration::compute_new_size:");
|
||||
log_trace(gc, heap)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
|
||||
minimum_free_percentage,
|
||||
maximum_used_percentage);
|
||||
|
@ -126,7 +126,7 @@ void CardTableBarrierSet::print_on(outputStream* st) const {
|
||||
// that specific collector in mind, and the documentation above suitably
|
||||
// extended and updated.
|
||||
void CardTableBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if (!ReduceInitialCardMarks) {
|
||||
return;
|
||||
}
|
||||
@ -148,13 +148,13 @@ void CardTableBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop ne
|
||||
invalidate(mr);
|
||||
}
|
||||
}
|
||||
#endif // COMPILER2 || JVMCI
|
||||
#endif // COMPILER2_OR_JVMCI
|
||||
}
|
||||
|
||||
void CardTableBarrierSet::initialize_deferred_card_mark_barriers() {
|
||||
// Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used);
|
||||
// otherwise remains unused.
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
#if COMPILER2_OR_JVMCI
|
||||
_defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers()
|
||||
&& (DeferInitialCardMark || card_mark_must_follow_store());
|
||||
#else
|
||||
@ -163,7 +163,7 @@ void CardTableBarrierSet::initialize_deferred_card_mark_barriers() {
|
||||
}
|
||||
|
||||
void CardTableBarrierSet::flush_deferred_card_mark_barrier(JavaThread* thread) {
|
||||
#if defined(COMPILER2) || INCLUDE_JVMCI
|
||||
#if COMPILER2_OR_JVMCI
|
||||
MemRegion deferred = thread->deferred_card_mark();
|
||||
if (!deferred.is_empty()) {
|
||||
assert(_defer_initial_card_mark, "Otherwise should be empty");
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/cardTableRS.hpp"
|
||||
#include "gc/shared/genCollectedHeap.hpp"
|
||||
#include "gc/shared/genOopClosures.hpp"
|
||||
#include "gc/shared/generation.hpp"
|
||||
#include "gc/shared/space.inline.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
|
@ -45,13 +45,13 @@ void CollectedHeap::post_allocation_setup_common(Klass* klass,
|
||||
HeapWord* obj_ptr) {
|
||||
post_allocation_setup_no_klass_install(klass, obj_ptr);
|
||||
oop obj = (oop)obj_ptr;
|
||||
#if ! INCLUDE_ALL_GCS
|
||||
obj->set_klass(klass);
|
||||
#else
|
||||
#if (INCLUDE_G1GC || INCLUDE_CMSGC)
|
||||
// Need a release store to ensure array/class length, mark word, and
|
||||
// object zeroing are visible before setting the klass non-NULL, for
|
||||
// concurrent collectors.
|
||||
obj->release_set_klass(klass);
|
||||
#else
|
||||
obj->set_klass(klass);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -48,11 +48,8 @@
|
||||
// Forward declarations.
|
||||
class GenCollectorPolicy;
|
||||
class AdaptiveSizePolicy;
|
||||
#if INCLUDE_ALL_GCS
|
||||
class ConcurrentMarkSweepPolicy;
|
||||
class G1CollectorPolicy;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
class MarkSweepPolicy;
|
||||
|
||||
class CollectorPolicy : public CHeapObj<mtGC> {
|
||||
|
@ -23,16 +23,23 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/serial/serialArguments.hpp"
|
||||
#include "gc/shared/gcConfig.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/parallel/parallelArguments.hpp"
|
||||
#if INCLUDE_CMSGC
|
||||
#include "gc/cms/cmsArguments.hpp"
|
||||
#endif
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/g1Arguments.hpp"
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#endif
|
||||
#if INCLUDE_PARALLELGC
|
||||
#include "gc/parallel/parallelArguments.hpp"
|
||||
#endif
|
||||
#if INCLUDE_SERIALGC
|
||||
#include "gc/serial/serialArguments.hpp"
|
||||
#endif
|
||||
|
||||
struct SupportedGC {
|
||||
bool& _flag;
|
||||
@ -44,23 +51,19 @@ struct SupportedGC {
|
||||
_flag(flag), _name(name), _arguments(arguments), _hs_err_name(hs_err_name) {}
|
||||
};
|
||||
|
||||
static SerialArguments serialArguments;
|
||||
#if INCLUDE_ALL_GCS
|
||||
static ParallelArguments parallelArguments;
|
||||
static CMSArguments cmsArguments;
|
||||
static G1Arguments g1Arguments;
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
CMSGC_ONLY(static CMSArguments cmsArguments;)
|
||||
G1GC_ONLY(static G1Arguments g1Arguments;)
|
||||
PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
|
||||
SERIALGC_ONLY(static SerialArguments serialArguments;)
|
||||
|
||||
// Table of supported GCs, for translating between command
|
||||
// line flag, CollectedHeap::Name and GCArguments instance.
|
||||
static const SupportedGC SupportedGCs[] = {
|
||||
SupportedGC(UseSerialGC, CollectedHeap::Serial, serialArguments, "serial gc"),
|
||||
#if INCLUDE_ALL_GCS
|
||||
SupportedGC(UseParallelGC, CollectedHeap::Parallel, parallelArguments, "parallel gc"),
|
||||
SupportedGC(UseParallelOldGC, CollectedHeap::Parallel, parallelArguments, "parallel gc"),
|
||||
SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS, cmsArguments, "concurrent mark sweep gc"),
|
||||
SupportedGC(UseG1GC, CollectedHeap::G1, g1Arguments, "g1 gc"),
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS, cmsArguments, "concurrent mark sweep gc"))
|
||||
G1GC_ONLY_ARG(SupportedGC(UseG1GC, CollectedHeap::G1, g1Arguments, "g1 gc"))
|
||||
PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC, CollectedHeap::Parallel, parallelArguments, "parallel gc"))
|
||||
PARALLELGC_ONLY_ARG(SupportedGC(UseParallelOldGC, CollectedHeap::Parallel, parallelArguments, "parallel gc"))
|
||||
SERIALGC_ONLY_ARG(SupportedGC(UseSerialGC, CollectedHeap::Serial, serialArguments, "serial gc"))
|
||||
};
|
||||
|
||||
#define FOR_EACH_SUPPORTED_GC(var) \
|
||||
@ -70,19 +73,25 @@ GCArguments* GCConfig::_arguments = NULL;
|
||||
bool GCConfig::_gc_selected_ergonomically = false;
|
||||
|
||||
void GCConfig::select_gc_ergonomically() {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (os::is_server_class_machine()) {
|
||||
#if INCLUDE_G1GC
|
||||
FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
|
||||
} else {
|
||||
#elif INCLUDE_PARALLELGC
|
||||
FLAG_SET_ERGO_IF_DEFAULT(bool, UseParallelGC, true);
|
||||
#elif INCLUDE_SERIALGC
|
||||
FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
|
||||
#endif
|
||||
} else {
|
||||
#if INCLUDE_SERIALGC
|
||||
FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
UNSUPPORTED_OPTION(UseG1GC);
|
||||
UNSUPPORTED_OPTION(UseParallelGC);
|
||||
UNSUPPORTED_OPTION(UseParallelOldGC);
|
||||
UNSUPPORTED_OPTION(UseConcMarkSweepGC);
|
||||
FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
NOT_CMSGC( UNSUPPORTED_OPTION(UseConcMarkSweepGC));
|
||||
NOT_G1GC( UNSUPPORTED_OPTION(UseG1GC);)
|
||||
NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelGC);)
|
||||
NOT_PARALLELGC(UNSUPPORTED_OPTION(UseParallelOldGC));
|
||||
NOT_SERIALGC( UNSUPPORTED_OPTION(UseSerialGC);)
|
||||
}
|
||||
|
||||
bool GCConfig::is_no_gc_selected() {
|
||||
@ -128,17 +137,25 @@ GCArguments* GCConfig::select_gc() {
|
||||
_gc_selected_ergonomically = true;
|
||||
}
|
||||
|
||||
if (is_exactly_one_gc_selected()) {
|
||||
// Exacly one GC selected
|
||||
FOR_EACH_SUPPORTED_GC(gc) {
|
||||
if (gc->_flag) {
|
||||
return &gc->_arguments;
|
||||
}
|
||||
if (!is_exactly_one_gc_selected()) {
|
||||
// More than one GC selected
|
||||
vm_exit_during_initialization("Multiple garbage collectors selected", NULL);
|
||||
}
|
||||
|
||||
#if INCLUDE_PARALLELGC && !INCLUDE_SERIALGC
|
||||
if (FLAG_IS_CMDLINE(UseParallelOldGC) && !UseParallelOldGC) {
|
||||
vm_exit_during_initialization("This JVM build only supports UseParallelOldGC as the full GC");
|
||||
}
|
||||
#endif
|
||||
|
||||
// Exactly one GC selected
|
||||
FOR_EACH_SUPPORTED_GC(gc) {
|
||||
if (gc->_flag) {
|
||||
return &gc->_arguments;
|
||||
}
|
||||
}
|
||||
|
||||
// More than one GC selected
|
||||
vm_exit_during_initialization("Multiple garbage collectors selected", NULL);
|
||||
fatal("Should have found the selected GC");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -36,7 +36,7 @@
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ticks.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/evacuationInfo.hpp"
|
||||
#endif
|
||||
|
||||
@ -184,7 +184,7 @@ void OldGCTracer::report_concurrent_mode_failure() {
|
||||
send_concurrent_mode_failure_event();
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_G1GC
|
||||
void G1MMUTracer::report_mmu(double time_slice_sec, double gc_time_sec, double max_time_sec) {
|
||||
send_g1_mmu_event(time_slice_sec * MILLIUNITS,
|
||||
gc_time_sec * MILLIUNITS,
|
||||
@ -252,4 +252,4 @@ void G1OldTracer::set_gc_cause(GCCause::Cause cause) {
|
||||
_shared_gc_info.set_cause(cause);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif // INCLUDE_G1GC
|
||||
|
@ -34,7 +34,7 @@
|
||||
#include "memory/referenceType.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/g1YCTypes.hpp"
|
||||
#endif
|
||||
|
||||
@ -97,7 +97,7 @@ class ParallelOldGCInfo {
|
||||
void* dense_prefix() const { return _dense_prefix; }
|
||||
};
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_G1GC
|
||||
|
||||
class G1YoungGCInfo {
|
||||
G1YCType _type;
|
||||
@ -109,7 +109,7 @@ class G1YoungGCInfo {
|
||||
G1YCType type() const { return _type; }
|
||||
};
|
||||
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
#endif // INCLUDE_G1GC
|
||||
|
||||
class GCTracer : public ResourceObj {
|
||||
protected:
|
||||
@ -232,7 +232,7 @@ class ParNewTracer : public YoungGCTracer {
|
||||
ParNewTracer() : YoungGCTracer(ParNew) {}
|
||||
};
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_G1GC
|
||||
class G1MMUTracer : public AllStatic {
|
||||
static void send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms);
|
||||
|
||||
@ -294,7 +294,7 @@ class G1FullGCTracer : public OldGCTracer {
|
||||
G1FullGCTracer() : OldGCTracer(G1Full) {}
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif // INCLUDE_G1GC
|
||||
|
||||
class CMSTracer : public OldGCTracer {
|
||||
public:
|
||||
|
@ -31,11 +31,11 @@
|
||||
#include "runtime/os.hpp"
|
||||
#include "trace/traceBackend.hpp"
|
||||
#include "trace/tracing.hpp"
|
||||
#include "tracefiles/traceEventClasses.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/evacuationInfo.hpp"
|
||||
#include "gc/g1/g1YCTypes.hpp"
|
||||
#include "tracefiles/traceEventClasses.hpp"
|
||||
#endif
|
||||
|
||||
// All GC dependencies against the trace framework is contained within this file.
|
||||
@ -188,7 +188,7 @@ void OldGCTracer::send_concurrent_mode_failure_event() {
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#if INCLUDE_G1GC
|
||||
void G1NewTracer::send_g1_young_gc_event() {
|
||||
EventG1GarbageCollection e(UNTIMED);
|
||||
if (e.should_commit()) {
|
||||
@ -311,7 +311,7 @@ void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold,
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif // INCLUDE_G1GC
|
||||
|
||||
static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) {
|
||||
TraceStructVirtualSpace space;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user