This commit is contained in:
Lana Steuck 2011-12-15 19:49:58 -08:00
commit fad054567f
534 changed files with 14585 additions and 20872 deletions

View File

@ -136,3 +136,6 @@ cc1f5ce8e504d350e0b0c28c5f84333f8d540132 jdk8-b11
86db042b3385c338e17f7664447fdc7d406dd19e jdk8-b12
4cc0ef72c812943743ef4765f1100e2fbe2b1a08 jdk8-b13
9ffaa48dbfb0f5936c2b789867d0785faec7071d jdk8-b14
b5060eae3b32fd9f884a09774338cd8186d7fafa jdk8-b15
736a63b854f321c7824b7e47890135f80aee05e3 jdk8-b16
f0eccb2946986fb9626efde7d8ed9c8192623f5c jdk8-b17

View File

@ -136,3 +136,6 @@ a6c4c248e8fa350c35014fa94bab5ac1a1ac3299 jdk8-b10
8e2104d565baee473895d5eba20e39f85ab4bf9f jdk8-b12
26fb81a1e9ceb9baffba216acd9ded62e9e9d5ab jdk8-b13
23aa7f2c80a2fa354c80decf03e7c2018177ef4e jdk8-b14
a4f28069d44a379cda99dd1d921d19f819726d22 jdk8-b15
4e06ae613e99549835896720c7a68c29ad5543f5 jdk8-b17
4e06ae613e99549835896720c7a68c29ad5543f5 jdk8-b16

View File

@ -136,3 +136,6 @@ cda87f7fefcee3b89742a57ce5ad9b03a54c210d jdk8-b10
31d70911b712c6b4e580a3110363d5f044cfed7a jdk8-b12
5b9d9b839d3d7fe02347827221c97c6d242a6f96 jdk8-b13
e59c47de1ad8982ff3b0e843773a6902b36c2337 jdk8-b14
7da69e7175a7c7564ee6d0e52255cbb8a57ef577 jdk8-b15
82dc033975bb9b553b4ef97b6d483eda8de32e0f jdk8-b17
82dc033975bb9b553b4ef97b6d483eda8de32e0f jdk8-b16

View File

@ -200,3 +200,8 @@ d815de2e85e511b7deab2a83cf80c0224d011da9 jdk8-b10
b92ca8e229d29004f840c67e620833d23a346761 jdk8-b13
088d09a130ff02d8f5f05e92256baabe412f0439 jdk8-b14
6c2a55d4902f202e1c2de1df17b7da083a2c31e8 hs23-b06
fde2a39ed7f39233b287fbc278f437aac06c275b jdk8-b15
d1f29d4e0bc60e8bd7ae961f1306d8ab33290212 jdk8-b17
d1f29d4e0bc60e8bd7ae961f1306d8ab33290212 jdk8-b16
6de8c9ba5907e4c5ca05ac4b8d84a8e2cbd92399 hs23-b07
a2fef924d8e6f37dac2a887315e3502876cc8e24 hs23-b08

View File

@ -55,6 +55,9 @@
# The makefiles are split this way so that "make foo" will run faster by not
# having to read the dependency files for the vm.
# needs to be set here since this Makefile doesn't include defs.make
OS_VENDOR:=$(shell uname -s)
include $(GAMMADIR)/make/scm.make
include $(GAMMADIR)/make/altsrc.make
@ -159,8 +162,15 @@ ifndef HOTSPOT_VM_DISTRO
endif
endif
# MACOSX FIXME: we should be able to run test_gamma (see MACOSX_PORT-214)
ifdef ALWAYS_PASS_TEST_GAMMA
ifeq ($(OS_VENDOR), Darwin)
# MACOSX FIXME: we should be able to run test_gamma (see MACOSX_PORT-214)
ifeq ($(ALWAYS_PASS_TEST_GAMMA),)
# ALWAYS_PASS_TEST_GAMMA wasn't set so we default to true on MacOS X
# until MACOSX_PORT-214 is fixed
ALWAYS_PASS_TEST_GAMMA=true
endif
endif
ifeq ($(ALWAYS_PASS_TEST_GAMMA), true)
TEST_GAMMA_STATUS= echo 'exit 0';
else
TEST_GAMMA_STATUS=

View File

@ -86,7 +86,6 @@ CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0"
# Allow the user to turn off precompiled headers from the command line.
ifneq ($(USE_PRECOMPILED_HEADER),0)
USE_PRECOMPILED_HEADER=1
PRECOMPILED_HEADER_DIR=.
PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
@ -216,7 +215,7 @@ DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
endif
# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
ifneq ($(USE_PRECOMPILED_HEADER),1)
ifeq ($(USE_PRECOMPILED_HEADER),0)
CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
endif

View File

@ -37,11 +37,24 @@ include $(GAMMADIR)/make/sa.files
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
# tools.jar is needed by the JDI - SA binding
ifeq ($(SA_APPLE_BOOT_JAVA),true)
SA_CLASSPATH = $(BOOT_JAVA_HOME)/bundle/Classes/classes.jar
# SA-JDI depends on the standard JDI classes.
# Default SA_CLASSPATH location:
DEF_SA_CLASSPATH=$(BOOT_JAVA_HOME)/lib/tools.jar
ifeq ($(ALT_SA_CLASSPATH),)
# no alternate specified; see if default exists
SA_CLASSPATH=$(shell test -f $(DEF_SA_CLASSPATH) && echo $(DEF_SA_CLASSPATH))
ifeq ($(SA_CLASSPATH),)
# the default doesn't exist
ifeq ($(OS_VENDOR), Darwin)
# A JDK from Apple doesn't have tools.jar; the JDI classes are
# are in the regular classes.jar file.
APPLE_JAR=$(BOOT_JAVA_HOME)/bundle/Classes/classes.jar
SA_CLASSPATH=$(shell test -f $(APPLE_JAR) && echo $(APPLE_JAR))
endif
endif
else
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
_JUNK_ := $(shell echo >&2 "INFO: ALT_SA_CLASSPATH=$(ALT_SA_CLASSPATH)")
SA_CLASSPATH=$(shell test -f $(ALT_SA_CLASSPATH) && echo $(ALT_SA_CLASSPATH))
endif
# TODO: if it's a modules image, check if SA module is installed.
@ -72,8 +85,8 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_FILES)
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
exit 1; \
fi
$(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \
echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\
$(QUIETLY) if [ ! -f "$(SA_CLASSPATH)" -a ! -d $(MODULELIB_PATH) ] ; then \
echo "Cannot find JDI classes. Use 1.6.0 or later version of JDK."; \
echo ""; \
exit 1; \
fi

View File

@ -47,12 +47,10 @@ VM = $(GAMMADIR)/src/share/vm
Plat_File = $(Platform_file)
CDG = cd $(GENERATED);
ifdef USE_PRECOMPILED_HEADER
PrecompiledOption = -DUSE_PRECOMPILED_HEADER
UpdatePCH = $(MAKE) -f vm.make $(PRECOMPILED_HEADER) $(MFLAGS)
ifneq ($(USE_PRECOMPILED_HEADER),0)
UpdatePCH = $(MAKE) -f vm.make $(PRECOMPILED_HEADER) $(MFLAGS)
else
UpdatePCH = \# precompiled header is not used
PrecompiledOption =
UpdatePCH = \# precompiled header is not used
endif
Cached_plat = $(GENERATED)/platform.current

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=23
HS_MINOR_VER=0
HS_BUILD_NUMBER=06
HS_BUILD_NUMBER=08
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View File

@ -248,7 +248,7 @@ jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
jprt.my.solaris.sparc.test.targets= \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jvm98, \
${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jvm98_tiered, \
${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jvm98_nontiered, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark, \
${jprt.my.solaris.sparc}-product-{c1|c2}-runThese, \
${jprt.my.solaris.sparc}-fastdebug-c1-runThese_Xshare, \
@ -267,7 +267,7 @@ jprt.my.solaris.sparc.test.targets= \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_G1, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParOldGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_default, \
${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jbb_default_tiered, \
${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jbb_default_nontiered, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_SerialGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParallelGC, \
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_CMS, \
@ -276,7 +276,7 @@ jprt.my.solaris.sparc.test.targets= \
jprt.my.solaris.sparcv9.test.targets= \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_tiered, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \
${jprt.my.solaris.sparcv9}-product-c2-runThese, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_default, \
@ -294,7 +294,7 @@ jprt.my.solaris.sparcv9.test.targets= \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_tiered, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_nontiered, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_CMS, \
@ -303,7 +303,7 @@ jprt.my.solaris.sparcv9.test.targets= \
jprt.my.solaris.x64.test.targets= \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98_tiered, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-scimark, \
${jprt.my.solaris.x64}-product-c2-runThese, \
${jprt.my.solaris.x64}-product-c2-runThese_Xcomp, \
@ -322,7 +322,7 @@ jprt.my.solaris.x64.test.targets= \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
@ -331,7 +331,7 @@ jprt.my.solaris.x64.test.targets= \
jprt.my.solaris.i586.test.targets= \
${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
${jprt.my.solaris.i586}-{product|fastdebug}-c2-jvm98_tiered, \
${jprt.my.solaris.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-scimark, \
${jprt.my.solaris.i586}-product-{c1|c2}-runThese_Xcomp, \
${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xcomp, \
@ -358,7 +358,7 @@ jprt.my.solaris.i586.test.targets= \
${jprt.my.solaris.i586}-product-c1-GCOld_G1, \
${jprt.my.solaris.i586}-product-c1-GCOld_ParOldGC, \
${jprt.my.solaris.i586}-fastdebug-c2-jbb_default, \
${jprt.my.solaris.i586}-fastdebug-c2-jbb_default_tiered, \
${jprt.my.solaris.i586}-fastdebug-c2-jbb_default_nontiered, \
${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParallelGC, \
${jprt.my.solaris.i586}-fastdebug-c2-jbb_CMS, \
${jprt.my.solaris.i586}-fastdebug-c2-jbb_G1, \
@ -366,7 +366,7 @@ jprt.my.solaris.i586.test.targets= \
jprt.my.linux.i586.test.targets = \
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_tiered, \
${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-scimark, \
${jprt.my.linux.i586}-product-c1-runThese_Xcomp, \
${jprt.my.linux.i586}-fastdebug-c1-runThese_Xshare, \
@ -386,7 +386,7 @@ jprt.my.linux.i586.test.targets = \
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_default, \
${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_tiered, \
${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_G1, \
@ -394,7 +394,7 @@ jprt.my.linux.i586.test.targets = \
jprt.my.linux.x64.test.targets = \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_tiered, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_default, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
@ -411,14 +411,14 @@ jprt.my.linux.x64.test.targets = \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
jprt.my.windows.i586.test.targets = \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_tiered, \
${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-scimark, \
${jprt.my.windows.i586}-product-{c1|c2}-runThese, \
${jprt.my.windows.i586}-product-{c1|c2}-runThese_Xcomp, \
@ -438,7 +438,7 @@ jprt.my.windows.i586.test.targets = \
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_G1, \
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParOldGC, \
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jbb_default, \
${jprt.my.windows.i586}-{product|fastdebug}-c2-jbb_default_tiered, \
${jprt.my.windows.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParallelGC, \
${jprt.my.windows.i586}-product-{c1|c2}-jbb_CMS, \
${jprt.my.windows.i586}-product-{c1|c2}-jbb_G1, \
@ -446,7 +446,7 @@ jprt.my.windows.i586.test.targets = \
jprt.my.windows.x64.test.targets = \
${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98_tiered, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark, \
${jprt.my.windows.x64}-product-c2-runThese, \
${jprt.my.windows.x64}-product-c2-runThese_Xcomp, \
@ -465,7 +465,7 @@ jprt.my.windows.x64.test.targets = \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_G1, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
${jprt.my.windows.x64}-product-c2-jbb_CMS, \
${jprt.my.windows.x64}-product-c2-jbb_ParallelGC, \
${jprt.my.windows.x64}-product-c2-jbb_G1, \
@ -473,9 +473,9 @@ jprt.my.windows.x64.test.targets = \
# Some basic "smoke" tests for OpenJDK builds
jprt.test.targets.open = \
${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98_tiered, \
${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98_tiered, \
${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98_tiered
${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98, \
${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98, \
${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98
# Testing for actual embedded builds is different to standard
jprt.my.linux.i586.test.targets.embedded = \

View File

@ -50,7 +50,6 @@ CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0"
# Allow the user to turn off precompiled headers from the command line.
ifneq ($(USE_PRECOMPILED_HEADER),0)
USE_PRECOMPILED_HEADER=1
PRECOMPILED_HEADER_DIR=.
PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
@ -165,7 +164,7 @@ DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
endif
# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
ifneq ($(USE_PRECOMPILED_HEADER),1)
ifeq ($(USE_PRECOMPILED_HEADER),0)
CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
endif

View File

@ -1,7 +1,3 @@
#
# @(#)mapfile-vers-debug 1.18 07/10/25 16:47:35
#
#
# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -221,6 +217,7 @@ SUNWprivate_1.1 {
JVM_SetArrayElement;
JVM_SetClassSigners;
JVM_SetLength;
JVM_SetNativeThreadName;
JVM_SetPrimitiveArrayElement;
JVM_SetProtectionDomain;
JVM_SetSockOpt;

View File

@ -1,7 +1,3 @@
#
# @(#)mapfile-vers-product 1.19 08/02/12 10:56:37
#
#
# Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -221,6 +217,7 @@ SUNWprivate_1.1 {
JVM_SetArrayElement;
JVM_SetClassSigners;
JVM_SetLength;
JVM_SetNativeThreadName;
JVM_SetPrimitiveArrayElement;
JVM_SetProtectionDomain;
JVM_SetSockOpt;

View File

@ -47,12 +47,10 @@ VM = $(GAMMADIR)/src/share/vm
Plat_File = $(Platform_file)
CDG = cd $(GENERATED);
ifdef USE_PRECOMPILED_HEADER
PrecompiledOption = -DUSE_PRECOMPILED_HEADER
UpdatePCH = $(MAKE) -f vm.make $(PRECOMPILED_HEADER) $(MFLAGS)
ifneq ($(USE_PRECOMPILED_HEADER),0)
UpdatePCH = $(MAKE) -f vm.make $(PRECOMPILED_HEADER) $(MFLAGS)
else
UpdatePCH = \# precompiled header is not used
PrecompiledOption =
UpdatePCH = \# precompiled header is not used
endif
Cached_plat = $(GENERATED)/platform.current

View File

@ -49,7 +49,6 @@ $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2)
ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 4 \) \))" "0"
# Allow the user to turn off precompiled headers from the command line.
ifneq ($(USE_PRECOMPILED_HEADER),0)
USE_PRECOMPILED_HEADER=1
PRECOMPILED_HEADER_DIR=.
PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
@ -142,7 +141,7 @@ DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d)
endif
# -DDONT_USE_PRECOMPILED_HEADER will exclude all includes in precompiled.hpp.
ifneq ($(USE_PRECOMPILED_HEADER),1)
ifeq ($(USE_PRECOMPILED_HEADER),0)
CFLAGS += -DDONT_USE_PRECOMPILED_HEADER
endif

View File

@ -1,7 +1,3 @@
#
# @(#)mapfile-vers 1.32 07/10/25 16:47:36
#
#
# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -221,6 +217,7 @@ SUNWprivate_1.1 {
JVM_SetArrayElement;
JVM_SetClassSigners;
JVM_SetLength;
JVM_SetNativeThreadName;
JVM_SetPrimitiveArrayElement;
JVM_SetProtectionDomain;
JVM_SetSockOpt;

View File

@ -50,6 +50,7 @@ ProjectCreatorIncludesPRIVATE=\
-relativeInclude src\closed\os_cpu\windows_$(Platform_arch)\vm \
-relativeInclude src\closed\cpu\$(Platform_arch)\vm \
-relativeInclude src\share\vm \
-relativeInclude src\share\vm\precompiled \
-relativeInclude src\share\vm\prims \
-relativeInclude src\os\windows\vm \
-relativeInclude src\os_cpu\windows_$(Platform_arch)\vm \

View File

@ -855,12 +855,6 @@ class Assembler : public AbstractAssembler {
Lookaside = 1 << 4
};
// test if x is within signed immediate range for nbits
static bool is_simm(intptr_t x, int nbits) { return -( intptr_t(1) << nbits-1 ) <= x && x < ( intptr_t(1) << nbits-1 ); }
// test if -4096 <= x <= 4095
static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
static bool is_in_wdisp_range(address a, address b, int nbits) {
intptr_t d = intptr_t(b) - intptr_t(a);
return is_simm(d, nbits + 2);
@ -1203,7 +1197,7 @@ public:
if (!UseCBCond || cbcond_before()) return false;
intptr_t x = intptr_t(target_distance(L)) - intptr_t(pc());
assert( (x & 3) == 0, "not word aligned");
return is_simm(x, 12);
return is_simm12(x);
}
// Tells assembler you know that next instruction is delayed

View File

@ -765,7 +765,7 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
add_debug_info_for_null_check_here(op->info());
__ load_klass(O0, G3_scratch);
if (__ is_simm13(op->vtable_offset())) {
if (Assembler::is_simm13(op->vtable_offset())) {
__ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
} else {
// This will generate 2 instructions

View File

@ -42,7 +42,7 @@ define_pd_global(bool, ProfileInterpreter, false);
#else
define_pd_global(bool, ProfileInterpreter, true);
#endif // CC_INTERP
define_pd_global(bool, TieredCompilation, false);
define_pd_global(bool, TieredCompilation, true);
define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, BackEdgeThreshold, 140000);

View File

@ -315,7 +315,7 @@ void MethodHandles::RicochetFrame::verify_clean(MacroAssembler* _masm) {
__ cmp_and_br_short(O7_temp, T_VOID, Assembler::equal, Assembler::pt, L_ok_4);
extract_conversion_vminfo(_masm, L5_conversion, O5_temp);
__ ld_ptr(L4_saved_args_base, __ argument_offset(O5_temp, O5_temp), O7_temp);
assert(__ is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13");
assert(Assembler::is_simm13(RETURN_VALUE_PLACEHOLDER), "must be simm13");
__ cmp_and_brx_short(O7_temp, (int32_t) RETURN_VALUE_PLACEHOLDER, Assembler::equal, Assembler::pt, L_ok_4);
__ stop("damaged ricochet frame: RETURN_VALUE_PLACEHOLDER not found");
__ BIND(L_ok_4);

View File

@ -767,7 +767,7 @@ void AdapterGenerator::gen_c2i_adapter(
// In the 64bit build because of wider slots and STACKBIAS we can run
// out of bits in the displacement to do loads and stores. Use g3 as
// temporary displacement.
if (! __ is_simm13(extraspace)) {
if (!Assembler::is_simm13(extraspace)) {
__ set(extraspace, G3_scratch);
__ sub(SP, G3_scratch, SP);
} else {

View File

@ -566,7 +566,7 @@ int MachCallDynamicJavaNode::ret_addr_offset() {
} else {
klass_load_size = 1*BytesPerInstWord;
}
if( Assembler::is_simm13(v_off) ) {
if (Assembler::is_simm13(v_off)) {
return klass_load_size +
(2*BytesPerInstWord + // ld_ptr, ld_ptr
NativeCall::instruction_size); // call; delay slot
@ -1019,8 +1019,21 @@ void emit_hi(CodeBuffer &cbuf, int val) { }
//=============================================================================
const bool Matcher::constant_table_absolute_addressing = false;
const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask;
const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask();
int Compile::ConstantTable::calculate_table_base_offset() const {
if (UseRDPCForConstantTableBase) {
// The table base offset might be less but then it fits into
// simm13 anyway and we are good (cf. MachConstantBaseNode::emit).
return Assembler::min_simm13();
} else {
int offset = -(size() / 2);
if (!Assembler::is_simm13(offset)) {
offset = Assembler::min_simm13();
}
return offset;
}
}
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
Compile* C = ra_->C;
@ -1028,8 +1041,9 @@ void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
MacroAssembler _masm(&cbuf);
Register r = as_Register(ra_->get_encode(this));
CodeSection* cs = __ code()->consts();
int consts_size = cs->align_at_start(cs->size());
CodeSection* consts_section = __ code()->consts();
int consts_size = consts_section->align_at_start(consts_section->size());
assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size));
if (UseRDPCForConstantTableBase) {
// For the following RDPC logic to work correctly the consts
@ -1037,30 +1051,37 @@ void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
// assert checks for that. The layout and the SECT_* constants
// are defined in src/share/vm/asm/codeBuffer.hpp.
assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be");
int offset = __ offset();
int insts_offset = __ offset();
// Layout:
//
// |----------- consts section ------------|----------- insts section -----------...
// |------ constant table -----|- padding -|------------------x----
// \ current PC (RDPC instruction)
// |<------------- consts_size ----------->|<- insts_offset ->|
// \ table base
// The table base offset is later added to the load displacement
// so it has to be negative.
int table_base_offset = -(consts_size + insts_offset);
int disp;
// If the displacement from the current PC to the constant table
// base fits into simm13 we set the constant table base to the
// current PC.
if (__ is_simm13(-(consts_size + offset))) {
constant_table.set_table_base_offset(-(consts_size + offset));
if (Assembler::is_simm13(table_base_offset)) {
constant_table.set_table_base_offset(table_base_offset);
disp = 0;
} else {
// If the offset of the top constant (last entry in the table)
// fits into simm13 we set the constant table base to the actual
// table base.
if (__ is_simm13(constant_table.top_offset())) {
constant_table.set_table_base_offset(0);
disp = consts_size + offset;
} else {
// Otherwise we set the constant table base in the middle of the
// constant table.
int half_consts_size = consts_size / 2;
assert(half_consts_size * 2 == consts_size, "sanity");
constant_table.set_table_base_offset(-half_consts_size); // table base offset gets added to the load displacement.
disp = half_consts_size + offset;
}
// Otherwise we set the constant table base offset to the
// maximum negative displacement of load instructions to keep
// the disp as small as possible:
//
// |<------------- consts_size ----------->|<- insts_offset ->|
// |<--------- min_simm13 --------->|<-------- disp --------->|
// \ table base
table_base_offset = Assembler::min_simm13();
constant_table.set_table_base_offset(table_base_offset);
disp = (consts_size + insts_offset) + table_base_offset;
}
__ rdpc(r);
@ -1072,8 +1093,7 @@ void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
}
else {
// Materialize the constant table base.
assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size));
address baseaddr = cs->start() + -(constant_table.table_base_offset());
address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
AddressLiteral base(baseaddr, rspec);
__ set(base, r);
@ -1169,6 +1189,13 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ save(SP, G3, SP);
}
C->set_frame_complete( __ offset() );
if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
// emitted before MachConstantBaseNode.
Compile::ConstantTable& constant_table = C->constant_table();
constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
}
}
uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
@ -1843,7 +1870,7 @@ const bool Matcher::convL2FSupported(void) {
bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
// The passed offset is relative to address of the branch.
// Don't need to adjust the offset.
return UseCBCond && Assembler::is_simm(offset, 12);
return UseCBCond && Assembler::is_simm12(offset);
}
const bool Matcher::isSimpleConstant64(jlong value) {
@ -1997,7 +2024,7 @@ RegMask Matcher::modL_proj_mask() {
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return L7_REGP_mask;
return L7_REGP_mask();
}
%}
@ -2072,8 +2099,8 @@ encode %{
%}
enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{
assert( Assembler::is_simm13($mem$$disp ), "need disp and disp+4" );
assert( Assembler::is_simm13($mem$$disp+4), "need disp and disp+4" );
assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4");
assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
guarantee($mem$$index == R_G0_enc, "double index?");
emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc );
emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg );
@ -2082,8 +2109,8 @@ encode %{
%}
enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{
assert( Assembler::is_simm13($mem$$disp ), "need disp and disp+4" );
assert( Assembler::is_simm13($mem$$disp+4), "need disp and disp+4" );
assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4");
assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
guarantee($mem$$index == R_G0_enc, "double index?");
// Load long with 2 instructions
emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 );
@ -2563,7 +2590,7 @@ encode %{
}
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
if( __ is_simm13(v_off) ) {
if (Assembler::is_simm13(v_off)) {
__ ld_ptr(G3, v_off, G5_method);
} else {
// Generate 2 instructions
@ -3336,7 +3363,7 @@ operand immI() %{
// Integer Immediate: 8-bit
operand immI8() %{
predicate(Assembler::is_simm(n->get_int(), 8));
predicate(Assembler::is_simm8(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
@ -3365,7 +3392,7 @@ operand immI13m7() %{
// Integer Immediate: 16-bit
operand immI16() %{
predicate(Assembler::is_simm(n->get_int(), 16));
predicate(Assembler::is_simm16(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
@ -3393,7 +3420,7 @@ operand immU6() %{
// Integer Immediate: 11-bit
operand immI11() %{
predicate(Assembler::is_simm(n->get_int(),11));
predicate(Assembler::is_simm11(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
@ -3402,7 +3429,7 @@ operand immI11() %{
// Integer Immediate: 5-bit
operand immI5() %{
predicate(Assembler::is_simm(n->get_int(), 5));
predicate(Assembler::is_simm5(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
@ -3634,7 +3661,7 @@ operand immL0() %{
// Integer Immediate: 5-bit
operand immL5() %{
predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm((int)n->get_long(), 5));
predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long()));
match(ConL);
op_cost(0);
format %{ %}
@ -9251,13 +9278,16 @@ instruct jumpXtnd(iRegX switch_val, o7RegI table) %{
format %{ "ADD $constanttablebase, $constantoffset, O7\n\t"
"LD [O7 + $switch_val], O7\n\t"
"JUMP O7"
%}
"JUMP O7" %}
ins_encode %{
// Calculate table address into a register.
Register table_reg;
Register label_reg = O7;
if (constant_offset() == 0) {
// If we are calculating the size of this instruction don't trust
// zero offsets because they might change when
// MachConstantBaseNode decides to optimize the constant table
// base.
if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) {
table_reg = $constanttablebase;
} else {
table_reg = O7;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -83,7 +83,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
}
#endif
int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
if( __ is_simm13(v_off) ) {
if (Assembler::is_simm13(v_off)) {
__ ld_ptr(G3, v_off, G5_method);
} else {
__ set(v_off,G5);

View File

@ -3535,7 +3535,8 @@ bool Assembler::reachable(AddressLiteral adr) {
// addressing.
bool Assembler::is_polling_page_far() {
intptr_t addr = (intptr_t)os::get_polling_page();
return !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
return ForceUnreachable ||
!is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
!is_simm32(addr - (intptr_t)CodeCache::high_bound());
}

View File

@ -693,17 +693,6 @@ private:
static address locate_next_instruction(address inst);
// Utilities
#ifdef _LP64
static bool is_simm(int64_t x, int nbits) { return -(CONST64(1) << (nbits-1)) <= x &&
x < (CONST64(1) << (nbits-1)); }
static bool is_simm32(int64_t x) { return x == (int64_t)(int32_t)x; }
#else
static bool is_simm(int32_t x, int nbits) { return -(1 << (nbits-1)) <= x &&
x < (1 << (nbits-1)); }
static bool is_simm32(int32_t x) { return true; }
#endif // _LP64
static bool is_polling_page_far() NOT_LP64({ return false;});
// Generic instructions

View File

@ -44,7 +44,7 @@ define_pd_global(bool, ProfileInterpreter, false);
#else
define_pd_global(bool, ProfileInterpreter, true);
#endif // CC_INTERP
define_pd_global(bool, TieredCompilation, false);
define_pd_global(bool, TieredCompilation, true);
define_pd_global(intx, CompileThreshold, 10000);
define_pd_global(intx, BackEdgeThreshold, 100000);

View File

@ -27,7 +27,7 @@
// Adapters
enum /* platform_dependent_constants */ {
adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 15000)) LP64_ONLY(32000 DEBUG_ONLY(+ 80000))
adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 15000)) LP64_ONLY(32000 DEBUG_ONLY(+ 120000))
};
public:

View File

@ -95,6 +95,7 @@ class StubGenerator: public StubCodeGenerator {
#define inc_counter_np(counter) (0)
#else
void inc_counter_np_(int& counter) {
// This can destroy rscratch1 if counter is far from the code cache
__ incrementl(ExternalAddress((address)&counter));
}
#define inc_counter_np(counter) \
@ -1268,7 +1269,7 @@ class StubGenerator: public StubCodeGenerator {
__ subptr(end, start); // number of bytes to copy
intptr_t disp = (intptr_t) ct->byte_map_base;
if (__ is_simm32(disp)) {
if (Assembler::is_simm32(disp)) {
Address cardtable(noreg, noreg, Address::no_scale, disp);
__ lea(scratch, cardtable);
} else {
@ -1466,8 +1467,8 @@ class StubGenerator: public StubCodeGenerator {
__ movb(Address(end_to, 8), rax);
__ BIND(L_exit);
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -1555,8 +1556,8 @@ class StubGenerator: public StubCodeGenerator {
__ decrement(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -1564,8 +1565,8 @@ class StubGenerator: public StubCodeGenerator {
// Copy in 32-bytes chunks
copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -1658,8 +1659,8 @@ class StubGenerator: public StubCodeGenerator {
__ movw(Address(end_to, 8), rax);
__ BIND(L_exit);
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -1759,8 +1760,8 @@ class StubGenerator: public StubCodeGenerator {
__ decrement(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -1768,8 +1769,8 @@ class StubGenerator: public StubCodeGenerator {
// Copy in 32-bytes chunks
copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -1859,8 +1860,8 @@ class StubGenerator: public StubCodeGenerator {
__ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
gen_write_ref_array_post_barrier(saved_to, end_to, rax);
}
inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -1940,11 +1941,11 @@ class StubGenerator: public StubCodeGenerator {
__ decrement(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
if (is_oop) {
__ jmp(L_exit);
}
restore_arg_regs();
inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -1952,7 +1953,6 @@ class StubGenerator: public StubCodeGenerator {
// Copy in 32-bytes chunks
copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
__ bind(L_exit);
if (is_oop) {
Register end_to = rdx;
@ -1960,6 +1960,7 @@ class StubGenerator: public StubCodeGenerator {
gen_write_ref_array_post_barrier(to, end_to, rax);
}
restore_arg_regs();
inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -2032,8 +2033,8 @@ class StubGenerator: public StubCodeGenerator {
if (is_oop) {
__ jmp(L_exit);
} else {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -2045,11 +2046,13 @@ class StubGenerator: public StubCodeGenerator {
if (is_oop) {
__ BIND(L_exit);
gen_write_ref_array_post_barrier(saved_to, end_to, rax);
inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
} else {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
}
restore_arg_regs();
if (is_oop) {
inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
} else {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
}
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -2113,8 +2116,8 @@ class StubGenerator: public StubCodeGenerator {
if (is_oop) {
__ jmp(L_exit);
} else {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -2127,11 +2130,13 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_exit);
__ lea(rcx, Address(to, saved_count, Address::times_8, -8));
gen_write_ref_array_post_barrier(to, rcx, rax);
inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
} else {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
}
restore_arg_regs();
if (is_oop) {
inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
} else {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free
}
__ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@ -2331,8 +2336,8 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_done);
__ movptr(r13, Address(rsp, saved_r13_offset * wordSize));
__ movptr(r14, Address(rsp, saved_r14_offset * wordSize));
inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
restore_arg_regs();
inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);

View File

@ -507,9 +507,12 @@ void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
//=============================================================================
const bool Matcher::constant_table_absolute_addressing = true;
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
int Compile::ConstantTable::calculate_table_base_offset() const {
return 0; // absolute addressing, no offset
}
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
// Empty encoding
}
@ -639,6 +642,12 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
}
#endif
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
// emitted before MachConstantBaseNode.
Compile::ConstantTable& constant_table = C->constant_table();
constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
}
}
uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
@ -1515,12 +1524,12 @@ bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
// Register for DIVI projection of divmodI
RegMask Matcher::divI_proj_mask() {
return EAX_REG_mask;
return EAX_REG_mask();
}
// Register for MODI projection of divmodI
RegMask Matcher::modI_proj_mask() {
return EDX_REG_mask;
return EDX_REG_mask();
}
// Register for DIVL projection of divmodL
@ -1536,7 +1545,7 @@ RegMask Matcher::modL_proj_mask() {
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return EBP_REG_mask;
return EBP_REG_mask();
}
// Returns true if the high 32 bits of the value is known to be zero.

View File

@ -843,9 +843,12 @@ void emit_cmpfp_fixup(MacroAssembler& _masm) {
//=============================================================================
const bool Matcher::constant_table_absolute_addressing = true;
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
int Compile::ConstantTable::calculate_table_base_offset() const {
return 0; // absolute addressing, no offset
}
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
// Empty encoding
}
@ -977,6 +980,13 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
masm.bind(L);
}
#endif
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
// emitted before MachConstantBaseNode.
Compile::ConstantTable& constant_table = C->constant_table();
constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
}
}
uint MachPrologNode::size(PhaseRegAlloc* ra_) const
@ -2079,26 +2089,26 @@ bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
// Register for DIVI projection of divmodI
RegMask Matcher::divI_proj_mask() {
return INT_RAX_REG_mask;
return INT_RAX_REG_mask();
}
// Register for MODI projection of divmodI
RegMask Matcher::modI_proj_mask() {
return INT_RDX_REG_mask;
return INT_RDX_REG_mask();
}
// Register for DIVL projection of divmodL
RegMask Matcher::divL_proj_mask() {
return LONG_RAX_REG_mask;
return LONG_RAX_REG_mask();
}
// Register for MODL projection of divmodL
RegMask Matcher::modL_proj_mask() {
return LONG_RDX_REG_mask;
return LONG_RDX_REG_mask();
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return PTR_RBP_REG_mask;
return PTR_RBP_REG_mask();
}
static Address build_address(int b, int i, int s, int d) {

View File

@ -5778,15 +5778,18 @@ int os::fork_and_exec(char* cmd) {
// is_headless_jre()
//
// Test for the existence of libmawt in motif21 or xawt directories
// Test for the existence of xawt/libmawt.so or libawt_xawt.so
// in order to report if we are running in a headless jre
//
// Since JDK8 xawt/libmawt.so was moved into the same directory
// as libawt.so, and renamed libawt_xawt.so
//
bool os::is_headless_jre() {
struct stat statbuf;
char buf[MAXPATHLEN];
char libmawtpath[MAXPATHLEN];
const char *xawtstr = "/xawt/libmawt" JNI_LIB_SUFFIX;
const char *motifstr = "/motif21/libmawt" JNI_LIB_SUFFIX;
const char *new_xawtstr = "/libawt_xawt" JNI_LIB_SUFFIX;
char *p;
// Get path to libjvm.so
@ -5807,9 +5810,9 @@ bool os::is_headless_jre() {
strcat(libmawtpath, xawtstr);
if (::stat(libmawtpath, &statbuf) == 0) return false;
// check motif21/libmawt.so
// check libawt_xawt.so
strcpy(libmawtpath, buf);
strcat(libmawtpath, motifstr);
strcat(libmawtpath, new_xawtstr);
if (::stat(libmawtpath, &statbuf) == 0) return false;
return true;

View File

@ -5425,15 +5425,18 @@ int os::fork_and_exec(char* cmd) {
// is_headless_jre()
//
// Test for the existence of libmawt in motif21 or xawt directories
// Test for the existence of xawt/libmawt.so or libawt_xawt.so
// in order to report if we are running in a headless jre
//
// Since JDK8 xawt/libmawt.so was moved into the same directory
// as libawt.so, and renamed libawt_xawt.so
//
bool os::is_headless_jre() {
struct stat statbuf;
char buf[MAXPATHLEN];
char libmawtpath[MAXPATHLEN];
const char *xawtstr = "/xawt/libmawt.so";
const char *motifstr = "/motif21/libmawt.so";
const char *new_xawtstr = "/libawt_xawt.so";
char *p;
// Get path to libjvm.so
@ -5454,9 +5457,9 @@ bool os::is_headless_jre() {
strcat(libmawtpath, xawtstr);
if (::stat(libmawtpath, &statbuf) == 0) return false;
// check motif21/libmawt.so
// check libawt_xawt.so
strcpy(libmawtpath, buf);
strcat(libmawtpath, motifstr);
strcat(libmawtpath, new_xawtstr);
if (::stat(libmawtpath, &statbuf) == 0) return false;
return true;

View File

@ -6311,15 +6311,18 @@ int os::fork_and_exec(char* cmd) {
// is_headless_jre()
//
// Test for the existence of libmawt in motif21 or xawt directories
// Test for the existence of xawt/libmawt.so or libawt_xawt.so
// in order to report if we are running in a headless jre
//
// Since JDK8 xawt/libmawt.so was moved into the same directory
// as libawt.so, and renamed libawt_xawt.so
//
bool os::is_headless_jre() {
struct stat statbuf;
char buf[MAXPATHLEN];
char libmawtpath[MAXPATHLEN];
const char *xawtstr = "/xawt/libmawt.so";
const char *motifstr = "/motif21/libmawt.so";
const char *new_xawtstr = "/libawt_xawt.so";
char *p;
// Get path to libjvm.so
@ -6340,9 +6343,9 @@ bool os::is_headless_jre() {
strcat(libmawtpath, xawtstr);
if (::stat(libmawtpath, &statbuf) == 0) return false;
// check motif21/libmawt.so
// check libawt_xawt.so
strcpy(libmawtpath, buf);
strcat(libmawtpath, motifstr);
strcat(libmawtpath, new_xawtstr);
if (::stat(libmawtpath, &statbuf) == 0) return false;
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -982,27 +982,9 @@ void ADLParser::frame_parse(void) {
}
if (strcmp(token,"interpreter_frame_pointer")==0) {
interpreter_frame_pointer_parse(frame, false);
// Add reg_class interpreter_frame_pointer_reg
if( _AD._register != NULL ) {
RegClass *reg_class = _AD._register->addRegClass("interpreter_frame_pointer_reg");
char *interpreter_frame_pointer_reg = frame->_interpreter_frame_pointer_reg;
if( interpreter_frame_pointer_reg != NULL ) {
RegDef *regDef = _AD._register->getRegDef(interpreter_frame_pointer_reg);
reg_class->addReg(regDef); // add regDef to regClass
}
}
}
if (strcmp(token,"inline_cache_reg")==0) {
inline_cache_parse(frame, false);
// Add reg_class inline_cache_reg
if( _AD._register != NULL ) {
RegClass *reg_class = _AD._register->addRegClass("inline_cache_reg");
char *inline_cache_reg = frame->_inline_cache_reg;
if( inline_cache_reg != NULL ) {
RegDef *regDef = _AD._register->getRegDef(inline_cache_reg);
reg_class->addReg(regDef); // add regDef to regClass
}
}
}
if (strcmp(token,"compiler_method_oop_reg")==0) {
parse_err(WARN, "Using obsolete Token, compiler_method_oop_reg");
@ -1010,15 +992,6 @@ void ADLParser::frame_parse(void) {
}
if (strcmp(token,"interpreter_method_oop_reg")==0) {
interpreter_method_oop_parse(frame, false);
// Add reg_class interpreter_method_oop_reg
if( _AD._register != NULL ) {
RegClass *reg_class = _AD._register->addRegClass("interpreter_method_oop_reg");
char *method_oop_reg = frame->_interpreter_method_oop_reg;
if( method_oop_reg != NULL ) {
RegDef *regDef = _AD._register->getRegDef(method_oop_reg);
reg_class->addReg(regDef); // add regDef to regClass
}
}
}
if (strcmp(token,"cisc_spilling_operand_name")==0) {
cisc_spilling_operand_name_parse(frame, false);
@ -2363,6 +2336,14 @@ void ADLParser::reg_class_parse(void) {
}
}
next_char(); // Skip closing ')'
} else if (_curchar == '%') {
char *code = find_cpp_block("reg class");
if (code == NULL) {
parse_err(SYNERR, "missing code declaration for reg class.\n");
return;
}
reg_class->_user_defined = code;
return;
}
// Check for terminating ';'
@ -3115,7 +3096,7 @@ void ADLParser::constant_parse_expression(EncClass* encoding, char* ec_name) {
encoding->add_code(" _constant = C->constant_table().add");
// Parse everything in ( ) expression.
encoding->add_code("(");
encoding->add_code("(this, ");
next_char(); // Skip '('
int parens_depth = 1;
@ -3130,7 +3111,8 @@ void ADLParser::constant_parse_expression(EncClass* encoding, char* ec_name) {
}
else if (_curchar == ')') {
parens_depth--;
encoding->add_code(")");
if (parens_depth > 0)
encoding->add_code(")");
next_char();
}
else {
@ -3157,7 +3139,7 @@ void ADLParser::constant_parse_expression(EncClass* encoding, char* ec_name) {
}
// Finish code line.
encoding->add_code(";");
encoding->add_code(");");
if (_AD._adlocation_debug) {
encoding->add_code(end_line_marker());
@ -3817,7 +3799,7 @@ void ADLParser::effect_parse(InstructForm *instr) {
return;
}
// Get list of effect-operand pairs and insert into dictionary
else get_effectlist(instr->_effects, instr->_localNames);
else get_effectlist(instr->_effects, instr->_localNames, instr->_has_call);
// Debug Stuff
if (_AD._adl_debug > 1) fprintf(stderr,"Effect description: %s\n", desc);
@ -4595,7 +4577,7 @@ void ADLParser::get_oplist(NameList &parameters, FormDict &operands) {
// effect, and the second must be the name of an operand defined in the
// operand list of this instruction. Stores the names with a pointer to the
// effect form in a local effects table.
void ADLParser::get_effectlist(FormDict &effects, FormDict &operands) {
void ADLParser::get_effectlist(FormDict &effects, FormDict &operands, bool& has_call) {
OperandForm *opForm;
Effect *eForm;
char *ident;
@ -4628,26 +4610,31 @@ void ADLParser::get_effectlist(FormDict &effects, FormDict &operands) {
// Debugging Stuff
if (_AD._adl_debug > 1) fprintf(stderr, "\tEffect Type: %s\t", ident);
skipws();
// Get name of operand and check that it is in the local name table
if( (ident = get_unique_ident(effects, "effect")) == NULL) {
parse_err(SYNERR, "missing operand identifier in effect list\n");
return;
}
const Form *form = operands[ident];
opForm = form ? form->is_operand() : NULL;
if( opForm == NULL ) {
if( form && form->is_opclass() ) {
const char* cname = form->is_opclass()->_ident;
parse_err(SYNERR, "operand classes are illegal in effect lists (found %s %s)\n", cname, ident);
} else {
parse_err(SYNERR, "undefined operand %s in effect list\n", ident);
if (eForm->is(Component::CALL)) {
if (_AD._adl_debug > 1) fprintf(stderr, "\n");
has_call = true;
} else {
// Get name of operand and check that it is in the local name table
if( (ident = get_unique_ident(effects, "effect")) == NULL) {
parse_err(SYNERR, "missing operand identifier in effect list\n");
return;
}
return;
const Form *form = operands[ident];
opForm = form ? form->is_operand() : NULL;
if( opForm == NULL ) {
if( form && form->is_opclass() ) {
const char* cname = form->is_opclass()->_ident;
parse_err(SYNERR, "operand classes are illegal in effect lists (found %s %s)\n", cname, ident);
} else {
parse_err(SYNERR, "undefined operand %s in effect list\n", ident);
}
return;
}
// Add the pair to the effects table
effects.Insert(ident, eForm);
// Debugging Stuff
if (_AD._adl_debug > 1) fprintf(stderr, "\tOperand Name: %s\n", ident);
}
// Add the pair to the effects table
effects.Insert(ident, eForm);
// Debugging Stuff
if (_AD._adl_debug > 1) fprintf(stderr, "\tOperand Name: %s\n", ident);
skipws();
} while(_curchar == ',');

View File

@ -232,7 +232,7 @@ protected:
char *get_relation_dup(void);
void get_oplist(NameList &parameters, FormDict &operands);// Parse type-operand pairs
void get_effectlist(FormDict &effects, FormDict &operands); // Parse effect-operand pairs
void get_effectlist(FormDict &effects, FormDict &operands, bool& has_call); // Parse effect-operand pairs
// Return the contents of a parenthesized expression.
// Requires initial '(' and consumes final ')', which is replaced by '\0'.
char *get_paren_expr(const char *description, bool include_location = false);

View File

@ -823,9 +823,9 @@ static const char *getRegMask(const char *reg_class_name) {
} else {
char *rc_name = toUpper(reg_class_name);
const char *mask = "_mask";
int length = (int)strlen(rc_name) + (int)strlen(mask) + 3;
int length = (int)strlen(rc_name) + (int)strlen(mask) + 5;
char *regMask = new char[length];
sprintf(regMask,"%s%s", rc_name, mask);
sprintf(regMask,"%s%s()", rc_name, mask);
return regMask;
}
}
@ -1018,6 +1018,9 @@ void ArchDesc::initBaseOpTypes() {
ident = "TEMP";
eForm = new Effect(ident);
_globalNames.Insert(ident, eForm);
ident = "CALL";
eForm = new Effect(ident);
_globalNames.Insert(ident, eForm);
}
//

View File

@ -219,7 +219,9 @@ void RegDef::output(FILE *fp) { // Write info to output files
//------------------------------RegClass---------------------------------------
// Construct a register class into which registers will be inserted
RegClass::RegClass(const char *classid) : _stack_or_reg(false), _classid(classid), _regDef(cmpstr,hashstr, Form::arena) {
RegClass::RegClass(const char *classid) : _stack_or_reg(false), _classid(classid), _regDef(cmpstr,hashstr, Form::arena),
_user_defined(NULL)
{
}
// record a register in this class

View File

@ -161,6 +161,7 @@ public:
NameList _regDefs; // List of registers in class
Dict _regDef; // Dictionary of registers in class
bool _stack_or_reg; // Allowed on any stack slot
char* _user_defined;
// Public Methods
RegClass(const char *classid);// Constructor

View File

@ -31,7 +31,8 @@ InstructForm::InstructForm(const char *id, bool ideal_only)
: _ident(id), _ideal_only(ideal_only),
_localNames(cmpstr, hashstr, Form::arena),
_effects(cmpstr, hashstr, Form::arena),
_is_mach_constant(false)
_is_mach_constant(false),
_has_call(false)
{
_ftype = Form::INS;
@ -62,7 +63,8 @@ InstructForm::InstructForm(const char *id, InstructForm *instr, MatchRule *rule)
: _ident(id), _ideal_only(false),
_localNames(instr->_localNames),
_effects(instr->_effects),
_is_mach_constant(false)
_is_mach_constant(false),
_has_call(false)
{
_ftype = Form::INS;
@ -1754,6 +1756,7 @@ static int effect_lookup(const char *name) {
if(!strcmp(name, "USE_KILL")) return Component::USE_KILL;
if(!strcmp(name, "TEMP")) return Component::TEMP;
if(!strcmp(name, "INVALID")) return Component::INVALID;
if(!strcmp(name, "CALL")) return Component::CALL;
assert( false,"Invalid effect name specified\n");
return Component::INVALID;
}

View File

@ -111,6 +111,8 @@ public:
ComponentList _components; // List of Components matches MachNode's
// operand structure
bool _has_call; // contain a call and caller save registers should be saved?
// Public Methods
InstructForm(const char *id, bool ideal_only = false);
InstructForm(const char *id, InstructForm *instr, MatchRule *rule);
@ -895,7 +897,8 @@ public:
DEF = 0x2, USE_DEF = 0x3,
KILL = 0x4, USE_KILL = 0x5,
SYNTHETIC = 0x8,
TEMP = USE | SYNTHETIC
TEMP = USE | SYNTHETIC,
CALL = 0x10
};
};

View File

@ -162,11 +162,17 @@ void ArchDesc::declare_register_masks(FILE *fp_hpp) {
RegClass *reg_class = _register->getRegClass(rc_name);
assert( reg_class, "Using an undefined register class");
int len = RegisterForm::RegMask_Size();
fprintf(fp_hpp, "extern const RegMask %s%s_mask;\n", prefix, toUpper( rc_name ) );
if (reg_class->_user_defined == NULL) {
fprintf(fp_hpp, "extern const RegMask _%s%s_mask;\n", prefix, toUpper( rc_name ) );
fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { return _%s%s_mask; }\n", prefix, toUpper( rc_name ), prefix, toUpper( rc_name ));
} else {
fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { %s }\n", prefix, toUpper( rc_name ), reg_class->_user_defined);
}
if( reg_class->_stack_or_reg ) {
fprintf(fp_hpp, "extern const RegMask %sSTACK_OR_%s_mask;\n", prefix, toUpper( rc_name ) );
assert(reg_class->_user_defined == NULL, "no user defined reg class here");
fprintf(fp_hpp, "extern const RegMask _%sSTACK_OR_%s_mask;\n", prefix, toUpper( rc_name ) );
fprintf(fp_hpp, "inline const RegMask &%sSTACK_OR_%s_mask() { return _%sSTACK_OR_%s_mask; }\n", prefix, toUpper( rc_name ), prefix, toUpper( rc_name ) );
}
}
}
@ -188,8 +194,10 @@ void ArchDesc::build_register_masks(FILE *fp_cpp) {
RegClass *reg_class = _register->getRegClass(rc_name);
assert( reg_class, "Using an undefined register class");
if (reg_class->_user_defined != NULL) continue;
int len = RegisterForm::RegMask_Size();
fprintf(fp_cpp, "const RegMask %s%s_mask(", prefix, toUpper( rc_name ) );
fprintf(fp_cpp, "const RegMask _%s%s_mask(", prefix, toUpper( rc_name ) );
{ int i;
for( i = 0; i < len-1; i++ )
fprintf(fp_cpp," 0x%x,",reg_class->regs_in_word(i,false));
@ -198,7 +206,7 @@ void ArchDesc::build_register_masks(FILE *fp_cpp) {
if( reg_class->_stack_or_reg ) {
int i;
fprintf(fp_cpp, "const RegMask %sSTACK_OR_%s_mask(", prefix, toUpper( rc_name ) );
fprintf(fp_cpp, "const RegMask _%sSTACK_OR_%s_mask(", prefix, toUpper( rc_name ) );
for( i = 0; i < len-1; i++ )
fprintf(fp_cpp," 0x%x,",reg_class->regs_in_word(i,true));
fprintf(fp_cpp," 0x%x );\n",reg_class->regs_in_word(i,true));
@ -2585,9 +2593,9 @@ void ArchDesc::defineEvalConstant(FILE* fp, InstructForm& inst) {
// Output instruction's emit prototype
fprintf(fp, "void %sNode::eval_constant(Compile* C) {\n", inst._ident);
// For ideal jump nodes, allocate a jump table.
// For ideal jump nodes, add a jump-table entry.
if (inst.is_ideal_jump()) {
fprintf(fp, " _constant = C->constant_table().allocate_jump_table(this);\n");
fprintf(fp, " _constant = C->constant_table().add_jump_table(this);\n");
}
// If user did not define an encode section,
@ -2690,7 +2698,7 @@ static void defineIn_RegMask(FILE *fp, FormDict &globals, OperandForm &oper) {
if (strcmp(first_reg_class, "stack_slots") == 0) {
fprintf(fp," return &(Compile::current()->FIRST_STACK_mask());\n");
} else {
fprintf(fp," return &%s_mask;\n", toUpper(first_reg_class));
fprintf(fp," return &%s_mask();\n", toUpper(first_reg_class));
}
} else {
// Build a switch statement to return the desired mask.
@ -2702,7 +2710,7 @@ static void defineIn_RegMask(FILE *fp, FormDict &globals, OperandForm &oper) {
if( !strcmp(reg_class, "stack_slots") ) {
fprintf(fp, " case %d: return &(Compile::current()->FIRST_STACK_mask());\n", index);
} else {
fprintf(fp, " case %d: return &%s_mask;\n", index, toUpper(reg_class));
fprintf(fp, " case %d: return &%s_mask();\n", index, toUpper(reg_class));
}
}
fprintf(fp," }\n");
@ -4080,8 +4088,6 @@ void ArchDesc::buildFrameMethods(FILE *fp_cpp) {
fprintf(fp_cpp,"OptoReg::Name Matcher::inline_cache_reg() {");
fprintf(fp_cpp," return OptoReg::Name(%s_num); }\n\n",
_frame->_inline_cache_reg);
fprintf(fp_cpp,"const RegMask &Matcher::inline_cache_reg_mask() {");
fprintf(fp_cpp," return INLINE_CACHE_REG_mask; }\n\n");
fprintf(fp_cpp,"int Matcher::inline_cache_reg_encode() {");
fprintf(fp_cpp," return _regEncode[inline_cache_reg()]; }\n\n");
@ -4089,8 +4095,6 @@ void ArchDesc::buildFrameMethods(FILE *fp_cpp) {
fprintf(fp_cpp,"OptoReg::Name Matcher::interpreter_method_oop_reg() {");
fprintf(fp_cpp," return OptoReg::Name(%s_num); }\n\n",
_frame->_interpreter_method_oop_reg);
fprintf(fp_cpp,"const RegMask &Matcher::interpreter_method_oop_reg_mask() {");
fprintf(fp_cpp," return INTERPRETER_METHOD_OOP_REG_mask; }\n\n");
fprintf(fp_cpp,"int Matcher::interpreter_method_oop_reg_encode() {");
fprintf(fp_cpp," return _regEncode[interpreter_method_oop_reg()]; }\n\n");
@ -4101,11 +4105,6 @@ void ArchDesc::buildFrameMethods(FILE *fp_cpp) {
else
fprintf(fp_cpp," return OptoReg::Name(%s_num); }\n\n",
_frame->_interpreter_frame_pointer_reg);
fprintf(fp_cpp,"const RegMask &Matcher::interpreter_frame_pointer_reg_mask() {");
if (_frame->_interpreter_frame_pointer_reg == NULL)
fprintf(fp_cpp," static RegMask dummy; return dummy; }\n\n");
else
fprintf(fp_cpp," return INTERPRETER_FRAME_POINTER_REG_mask; }\n\n");
// Frame Pointer definition
/* CNC - I can not contemplate having a different frame pointer between

View File

@ -1720,6 +1720,16 @@ void ArchDesc::declareClasses(FILE *fp) {
}
}
// flag: if this instruction is implemented with a call
if ( instr->_has_call ) {
if ( node_flags_set ) {
fprintf(fp," | Flag_has_call");
} else {
fprintf(fp,"init_flags(Flag_has_call");
node_flags_set = true;
}
}
if ( node_flags_set ) {
fprintf(fp,"); ");
}

View File

@ -257,6 +257,29 @@ class AbstractAssembler : public ResourceObj {
// ensure buf contains all code (call this before using/copying the code)
void flush();
// min and max values for signed immediate ranges
static int min_simm(int nbits) { return -(intptr_t(1) << (nbits - 1)) ; }
static int max_simm(int nbits) { return (intptr_t(1) << (nbits - 1)) - 1; }
// Define some:
static int min_simm10() { return min_simm(10); }
static int min_simm13() { return min_simm(13); }
static int min_simm16() { return min_simm(16); }
// Test if x is within signed immediate range for nbits
static bool is_simm(intptr_t x, int nbits) { return min_simm(nbits) <= x && x <= max_simm(nbits); }
// Define some:
static bool is_simm5( intptr_t x) { return is_simm(x, 5 ); }
static bool is_simm8( intptr_t x) { return is_simm(x, 8 ); }
static bool is_simm10(intptr_t x) { return is_simm(x, 10); }
static bool is_simm11(intptr_t x) { return is_simm(x, 11); }
static bool is_simm12(intptr_t x) { return is_simm(x, 12); }
static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
static bool is_simm16(intptr_t x) { return is_simm(x, 16); }
static bool is_simm26(intptr_t x) { return is_simm(x, 26); }
static bool is_simm32(intptr_t x) { return is_simm(x, 32); }
// Accessors
CodeBuffer* code() const; // _code_section->outer()
CodeSection* code_section() const { return _code_section; }

View File

@ -3495,9 +3495,6 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
if (profile_calls()) {
profile_call(recv, holder_known ? callee->holder() : NULL);
}
if (profile_inlined_calls()) {
profile_invocation(callee, copy_state_before());
}
}
// Introduce a new callee continuation point - if the callee has
@ -3571,6 +3568,10 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
append(new RuntimeCall(voidType, "dtrace_method_entry", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args));
}
if (profile_inlined_calls()) {
profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI));
}
BlockBegin* callee_start_block = block_at(0);
if (callee_start_block != NULL) {
assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header");

View File

@ -501,6 +501,7 @@ class Instruction: public CompilationResourceObj {
virtual RoundFP* as_RoundFP() { return NULL; }
virtual ExceptionObject* as_ExceptionObject() { return NULL; }
virtual UnsafeOp* as_UnsafeOp() { return NULL; }
virtual ProfileInvoke* as_ProfileInvoke() { return NULL; }
virtual void visit(InstructionVisitor* v) = 0;

View File

@ -429,7 +429,7 @@ CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ig
// all locals are dead on exit from the synthetic unlocker
liveness.clear();
} else {
assert(x->as_MonitorEnter(), "only other case is MonitorEnter");
assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
}
}
if (!liveness.is_valid()) {

View File

@ -150,11 +150,23 @@ void BCEscapeAnalyzer::set_method_escape(ArgumentMap vars) {
clear_bits(vars, _arg_local);
}
void BCEscapeAnalyzer::set_global_escape(ArgumentMap vars) {
void BCEscapeAnalyzer::set_global_escape(ArgumentMap vars, bool merge) {
clear_bits(vars, _arg_local);
clear_bits(vars, _arg_stack);
if (vars.contains_allocated())
_allocated_escapes = true;
if (merge && !vars.is_empty()) {
// Merge new state into already processed block.
// New state is not taken into account and
// it may invalidate set_returned() result.
if (vars.contains_unknown() || vars.contains_allocated()) {
_return_local = false;
}
if (vars.contains_unknown() || vars.contains_vars()) {
_return_allocated = false;
}
}
}
void BCEscapeAnalyzer::set_dirty(ArgumentMap vars) {
@ -999,7 +1011,7 @@ void BCEscapeAnalyzer::merge_block_states(StateInfo *blockstates, ciBlock *dest,
t.set_difference(d_state->_stack[i]);
extra_vars.set_union(t);
}
set_global_escape(extra_vars);
set_global_escape(extra_vars, true);
}
}

View File

@ -81,7 +81,7 @@ class BCEscapeAnalyzer : public ResourceObj {
bool is_arg_stack(ArgumentMap vars);
void clear_bits(ArgumentMap vars, VectorSet &bs);
void set_method_escape(ArgumentMap vars);
void set_global_escape(ArgumentMap vars);
void set_global_escape(ArgumentMap vars, bool merge = false);
void set_dirty(ArgumentMap vars);
void set_modified(ArgumentMap vars, int offs, int size);

View File

@ -295,12 +295,6 @@ class ciMethod : public ciObject {
// Print the name of this method in various incarnations.
void print_name(outputStream* st = tty);
void print_short_name(outputStream* st = tty);
methodOop get_method_handle_target() {
KlassHandle receiver_limit; int flags = 0;
methodHandle m = MethodHandles::decode_method(get_oop(), receiver_limit, flags);
return m();
}
};
#endif // SHARE_VM_CI_CIMETHOD_HPP

View File

@ -1748,7 +1748,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
tty->print("%4d ", compile_id); // print compilation number
tty->print("%s ", (is_osr ? "%" : " "));
int code_size = (task->code() == NULL) ? 0 : task->code()->total_size();
tty->print_cr("size: %d time: %d inlined: %d bytes", code_size, time.milliseconds(), task->num_inlined_bytecodes());
tty->print_cr("size: %d time: %d inlined: %d bytes", code_size, (int)time.milliseconds(), task->num_inlined_bytecodes());
}
if (compilable == ciEnv::MethodCompilable_never) {

View File

@ -668,12 +668,16 @@ public:
// We de-virtualize the block-related calls below, since we know that our
// space is a CompactibleFreeListSpace.
#define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \
HeapWord* bottom, \
HeapWord* top, \
ClosureType* cl) { \
if (SharedHeap::heap()->n_par_threads() > 0) { \
bool is_par = SharedHeap::heap()->n_par_threads() > 0; \
if (is_par) { \
assert(SharedHeap::heap()->n_par_threads() == \
SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \
walk_mem_region_with_cl_par(mr, bottom, top, cl); \
} else { \
walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
@ -1925,6 +1929,9 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
if (rem_size < SmallForDictionary) {
bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
if (is_par) _indexedFreeListParLocks[rem_size]->lock();
assert(!is_par ||
(SharedHeap::heap()->n_par_threads() ==
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
returnChunkToFreeList(ffc);
split(size, rem_size);
if (is_par) _indexedFreeListParLocks[rem_size]->unlock();

View File

@ -3582,16 +3582,6 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
" or no bits are set in the gc_prologue before the start of the next "
"subsequent marking phase.");
// Temporarily disabled, since pre/post-consumption closures don't
// care about precleaned cards
#if 0
{
MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(),
(HeapWord*)_virtual_space.high());
_ct->ct_bs()->preclean_dirty_cards(mr);
}
#endif
// Save the end of the used_region of the constituent generations
// to be used to limit the extent of sweep in each generation.
save_sweep_limits();
@ -4062,7 +4052,7 @@ class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
CMSMarkStack* revisit_stack):
Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
Par_KlassRememberingOopClosure(collector, collector->ref_processor(), revisit_stack),
_task(task),
_span(collector->_span),
_work_queue(work_queue),
@ -4244,9 +4234,11 @@ void CMSConcMarkingTask::coordinator_yield() {
bool CMSCollector::do_marking_mt(bool asynch) {
assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
// In the future this would be determined ergonomically, based
// on #cpu's, # active mutator threads (and load), and mutation rate.
int num_workers = ConcGCThreads;
int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
conc_workers()->total_workers(),
conc_workers()->active_workers(),
Threads::number_of_non_daemon_threads());
conc_workers()->set_active_workers(num_workers);
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
@ -5062,6 +5054,8 @@ class CMSParRemarkTask: public AbstractGangTask {
ParallelTaskTerminator _term;
public:
// A value of 0 passed to n_workers will cause the number of
// workers to be taken from the active workers in the work gang.
CMSParRemarkTask(CMSCollector* collector,
CompactibleFreeListSpace* cms_space,
CompactibleFreeListSpace* perm_space,
@ -5544,7 +5538,15 @@ void CMSCollector::do_remark_parallel() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
int n_workers = workers->total_workers();
// Choose to use the number of GC workers most recently set
// into "active_workers". If active_workers is not set, set it
// to ParallelGCThreads.
int n_workers = workers->active_workers();
if (n_workers == 0) {
assert(n_workers > 0, "Should have been set during scavenge");
n_workers = ParallelGCThreads;
workers->set_active_workers(n_workers);
}
CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
@ -5884,8 +5886,17 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
// and a different number of discovered lists may have Ref objects.
// That is OK as long as the Reference lists are balanced (see
// balance_all_queues() and balance_queues()).
rp->set_active_mt_degree(ParallelGCThreads);
GenCollectedHeap* gch = GenCollectedHeap::heap();
int active_workers = ParallelGCThreads;
FlexibleWorkGang* workers = gch->workers();
if (workers != NULL) {
active_workers = workers->active_workers();
// The expectation is that active_workers will have already
// been set to a reasonable value. If it has not been set,
// investigate.
assert(active_workers > 0, "Should have been set during scavenge");
}
rp->set_active_mt_degree(active_workers);
CMSRefProcTaskExecutor task_executor(*this);
rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure,

View File

@ -255,7 +255,18 @@ void
CollectionSetChooser::
prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
_first_par_unreserved_idx = 0;
size_t max_waste = ParallelGCThreads * chunkSize;
int n_threads = ParallelGCThreads;
if (UseDynamicNumberOfGCThreads) {
assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
"Should have been set earlier");
// This is defensive code. As the assertion above says, the number
// of active threads should be > 0, but in case there is some path
// or some improperly initialized variable with leads to no
// active threads, protect against that in a product build.
n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
1);
}
size_t max_waste = n_threads * chunkSize;
// it should be aligned with respect to chunkSize
size_t aligned_n_regions =
(n_regions + (chunkSize - 1)) / chunkSize * chunkSize;
@ -265,6 +276,11 @@ prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
jint
CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
// Don't do this assert because this can be called at a point
// where the loop up stream will not execute again but might
// try to claim more chunks (loop test has not been done yet).
// assert(_markedRegions.length() > _first_par_unreserved_idx,
// "Striding beyond the marked regions");
jint res = Atomic::add(n_regions, &_first_par_unreserved_idx);
assert(_markedRegions.length() > res + n_regions - 1,
"Should already have been expanded");

View File

@ -44,7 +44,7 @@
//
// CMS Bit Map Wrapper
CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter):
CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) :
_bm((uintptr_t*)NULL,0),
_shifter(shifter) {
_bmStartWord = (HeapWord*)(rs.base());
@ -458,12 +458,17 @@ bool ConcurrentMark::not_yet_marked(oop obj) const {
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER
size_t ConcurrentMark::scale_parallel_threads(size_t n_par_threads) {
return MAX2((n_par_threads + 2) / 4, (size_t)1);
}
ConcurrentMark::ConcurrentMark(ReservedSpace rs,
int max_regions) :
_markBitMap1(rs, MinObjAlignment - 1),
_markBitMap2(rs, MinObjAlignment - 1),
_parallel_marking_threads(0),
_max_parallel_marking_threads(0),
_sleep_factor(0.0),
_marking_task_overhead(1.0),
_cleanup_sleep_factor(0.0),
@ -554,15 +559,17 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
if (ParallelGCThreads == 0) {
// if we are not running with any parallel GC threads we will not
// spawn any marking threads either
_parallel_marking_threads = 0;
_sleep_factor = 0.0;
_marking_task_overhead = 1.0;
_parallel_marking_threads = 0;
_max_parallel_marking_threads = 0;
_sleep_factor = 0.0;
_marking_task_overhead = 1.0;
} else {
if (ConcGCThreads > 0) {
// notice that ConcGCThreads overwrites G1MarkingOverheadPercent
// if both are set
_parallel_marking_threads = ConcGCThreads;
_max_parallel_marking_threads = _parallel_marking_threads;
_sleep_factor = 0.0;
_marking_task_overhead = 1.0;
} else if (G1MarkingOverheadPercent > 0) {
@ -583,10 +590,12 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
(1.0 - marking_task_overhead) / marking_task_overhead;
_parallel_marking_threads = (size_t) marking_thread_num;
_max_parallel_marking_threads = _parallel_marking_threads;
_sleep_factor = sleep_factor;
_marking_task_overhead = marking_task_overhead;
} else {
_parallel_marking_threads = MAX2((ParallelGCThreads + 2) / 4, (size_t)1);
_parallel_marking_threads = scale_parallel_threads(ParallelGCThreads);
_max_parallel_marking_threads = _parallel_marking_threads;
_sleep_factor = 0.0;
_marking_task_overhead = 1.0;
}
@ -609,7 +618,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
guarantee(parallel_marking_threads() > 0, "peace of mind");
_parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
(int) _parallel_marking_threads, false, true);
(int) _max_parallel_marking_threads, false, true);
if (_parallel_workers == NULL) {
vm_exit_during_initialization("Failed necessary allocation.");
} else {
@ -1106,6 +1115,33 @@ public:
~CMConcurrentMarkingTask() { }
};
// Calculates the number of active workers for a concurrent
// phase.
int ConcurrentMark::calc_parallel_marking_threads() {
size_t n_conc_workers;
if (!G1CollectedHeap::use_parallel_gc_threads()) {
n_conc_workers = 1;
} else {
if (!UseDynamicNumberOfGCThreads ||
(!FLAG_IS_DEFAULT(ConcGCThreads) &&
!ForceDynamicNumberOfGCThreads)) {
n_conc_workers = max_parallel_marking_threads();
} else {
n_conc_workers =
AdaptiveSizePolicy::calc_default_active_workers(
max_parallel_marking_threads(),
1, /* Minimum workers */
parallel_marking_threads(),
Threads::number_of_non_daemon_threads());
// Don't scale down "n_conc_workers" by scale_parallel_threads() because
// that scaling has already gone into "_max_parallel_marking_threads".
}
}
assert(n_conc_workers > 0, "Always need at least 1");
return (int) MAX2(n_conc_workers, (size_t) 1);
}
void ConcurrentMark::markFromRoots() {
// we might be tempted to assert that:
// assert(asynch == !SafepointSynchronize::is_at_safepoint(),
@ -1116,9 +1152,20 @@ void ConcurrentMark::markFromRoots() {
_restart_for_overflow = false;
size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
// Parallel task terminator is set in "set_phase()".
force_overflow_conc()->init();
set_phase(active_workers, true /* concurrent */);
// _g1h has _n_par_threads
_parallel_marking_threads = calc_parallel_marking_threads();
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
"Maximum number of marking threads exceeded");
_parallel_workers->set_active_workers((int)_parallel_marking_threads);
// Don't set _n_par_threads because it affects MT in proceess_strong_roots()
// and the decisions on that MT processing is made elsewhere.
assert( _parallel_workers->active_workers() > 0, "Should have been set");
set_phase(_parallel_workers->active_workers(), true /* concurrent */);
CMConcurrentMarkingTask markingTask(this, cmThread());
if (parallel_marking_threads() > 0) {
@ -1181,6 +1228,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
true /* expected_active */);
if (VerifyDuringGC) {
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(after)");
Universe::heap()->prepare_for_verify();
@ -1463,12 +1511,20 @@ public:
G1ParFinalCountTask(G1CollectedHeap* g1h, CMBitMap* bm,
BitMap* region_bm, BitMap* card_bm)
: AbstractGangTask("G1 final counting"), _g1h(g1h),
_bm(bm), _region_bm(region_bm), _card_bm(card_bm) {
if (ParallelGCThreads > 0) {
_n_workers = _g1h->workers()->total_workers();
_bm(bm), _region_bm(region_bm), _card_bm(card_bm),
_n_workers(0)
{
// Use the value already set as the number of active threads
// in the call to run_task(). Needed for the allocation of
// _live_bytes and _used_bytes.
if (G1CollectedHeap::use_parallel_gc_threads()) {
assert( _g1h->workers()->active_workers() > 0,
"Should have been previously set");
_n_workers = _g1h->workers()->active_workers();
} else {
_n_workers = 1;
}
_live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
_used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
}
@ -1485,6 +1541,7 @@ public:
calccl.no_yield();
if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1h->heap_region_par_iterate_chunked(&calccl, i,
(int) _n_workers,
HeapRegion::FinalCountClaimValue);
} else {
_g1h->heap_region_iterate(&calccl);
@ -1530,10 +1587,42 @@ public:
FreeRegionList* local_cleanup_list,
OldRegionSet* old_proxy_set,
HumongousRegionSet* humongous_proxy_set,
HRRSCleanupTask* hrrs_cleanup_task);
HRRSCleanupTask* hrrs_cleanup_task) :
_g1(g1), _worker_num(worker_num),
_max_live_bytes(0), _regions_claimed(0),
_freed_bytes(0),
_claimed_region_time(0.0), _max_region_time(0.0),
_local_cleanup_list(local_cleanup_list),
_old_proxy_set(old_proxy_set),
_humongous_proxy_set(humongous_proxy_set),
_hrrs_cleanup_task(hrrs_cleanup_task) { }
size_t freed_bytes() { return _freed_bytes; }
bool doHeapRegion(HeapRegion *r);
bool doHeapRegion(HeapRegion *hr) {
// We use a claim value of zero here because all regions
// were claimed with value 1 in the FinalCount task.
hr->reset_gc_time_stamp();
if (!hr->continuesHumongous()) {
double start = os::elapsedTime();
_regions_claimed++;
hr->note_end_of_marking();
_max_live_bytes += hr->max_live_bytes();
_g1->free_region_if_empty(hr,
&_freed_bytes,
_local_cleanup_list,
_old_proxy_set,
_humongous_proxy_set,
_hrrs_cleanup_task,
true /* par */);
double region_time = (os::elapsedTime() - start);
_claimed_region_time += region_time;
if (region_time > _max_region_time) {
_max_region_time = region_time;
}
}
return false;
}
size_t max_live_bytes() { return _max_live_bytes; }
size_t regions_claimed() { return _regions_claimed; }
@ -1568,6 +1657,7 @@ public:
&hrrs_cleanup_task);
if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
_g1h->workers()->active_workers(),
HeapRegion::NoteEndClaimValue);
} else {
_g1h->heap_region_iterate(&g1_note_end);
@ -1644,47 +1734,6 @@ public:
};
G1NoteEndOfConcMarkClosure::
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
int worker_num,
FreeRegionList* local_cleanup_list,
OldRegionSet* old_proxy_set,
HumongousRegionSet* humongous_proxy_set,
HRRSCleanupTask* hrrs_cleanup_task)
: _g1(g1), _worker_num(worker_num),
_max_live_bytes(0), _regions_claimed(0),
_freed_bytes(0),
_claimed_region_time(0.0), _max_region_time(0.0),
_local_cleanup_list(local_cleanup_list),
_old_proxy_set(old_proxy_set),
_humongous_proxy_set(humongous_proxy_set),
_hrrs_cleanup_task(hrrs_cleanup_task) { }
bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) {
// We use a claim value of zero here because all regions
// were claimed with value 1 in the FinalCount task.
hr->reset_gc_time_stamp();
if (!hr->continuesHumongous()) {
double start = os::elapsedTime();
_regions_claimed++;
hr->note_end_of_marking();
_max_live_bytes += hr->max_live_bytes();
_g1->free_region_if_empty(hr,
&_freed_bytes,
_local_cleanup_list,
_old_proxy_set,
_humongous_proxy_set,
_hrrs_cleanup_task,
true /* par */);
double region_time = (os::elapsedTime() - start);
_claimed_region_time += region_time;
if (region_time > _max_region_time) {
_max_region_time = region_time;
}
}
return false;
}
void ConcurrentMark::cleanup() {
// world is stopped at this checkpoint
assert(SafepointSynchronize::is_at_safepoint(),
@ -1716,6 +1765,9 @@ void ConcurrentMark::cleanup() {
HeapRegionRemSet::reset_for_cleanup_tasks();
g1h->set_par_threads();
size_t n_workers = g1h->n_par_threads();
// Do counting once more with the world stopped for good measure.
G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
&_region_bm, &_card_bm);
@ -1724,9 +1776,10 @@ void ConcurrentMark::cleanup() {
HeapRegion::InitialClaimValue),
"sanity check");
int n_workers = g1h->workers()->total_workers();
g1h->set_par_threads(n_workers);
assert(g1h->n_par_threads() == (int) n_workers,
"Should not have been reset");
g1h->workers()->run_task(&g1_par_count_task);
// Done with the parallel phase so reset to 0.
g1h->set_par_threads(0);
assert(g1h->check_heap_region_claim_values(
@ -1776,8 +1829,7 @@ void ConcurrentMark::cleanup() {
double note_end_start = os::elapsedTime();
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
if (G1CollectedHeap::use_parallel_gc_threads()) {
int n_workers = g1h->workers()->total_workers();
g1h->set_par_threads(n_workers);
g1h->set_par_threads((int)n_workers);
g1h->workers()->run_task(&g1_par_note_end_task);
g1h->set_par_threads(0);
@ -1806,8 +1858,7 @@ void ConcurrentMark::cleanup() {
double rs_scrub_start = os::elapsedTime();
G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
if (G1CollectedHeap::use_parallel_gc_threads()) {
int n_workers = g1h->workers()->total_workers();
g1h->set_par_threads(n_workers);
g1h->set_par_threads((int)n_workers);
g1h->workers()->run_task(&g1_par_scrub_rs_task);
g1h->set_par_threads(0);
@ -1825,7 +1876,7 @@ void ConcurrentMark::cleanup() {
// this will also free any regions totally full of garbage objects,
// and sort the regions.
g1h->g1_policy()->record_concurrent_mark_cleanup_end();
g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
// Statistics.
double end = os::elapsedTime();
@ -1991,16 +2042,12 @@ class G1CMDrainMarkingStackClosure: public VoidClosure {
class G1CMParKeepAliveAndDrainClosure: public OopClosure {
ConcurrentMark* _cm;
CMTask* _task;
CMBitMap* _bitMap;
int _ref_counter_limit;
int _ref_counter;
public:
G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm,
CMTask* task,
CMBitMap* bitMap) :
_cm(cm), _task(task), _bitMap(bitMap),
_ref_counter_limit(G1RefProcDrainInterval)
{
G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) :
_cm(cm), _task(task),
_ref_counter_limit(G1RefProcDrainInterval) {
assert(_ref_counter_limit > 0, "sanity");
_ref_counter = _ref_counter_limit;
}
@ -2091,19 +2138,16 @@ class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
private:
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
CMBitMap* _bitmap;
WorkGang* _workers;
int _active_workers;
public:
G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
ConcurrentMark* cm,
CMBitMap* bitmap,
WorkGang* workers,
int n_workers) :
_g1h(g1h), _cm(cm), _bitmap(bitmap),
_workers(workers), _active_workers(n_workers)
{ }
_g1h(g1h), _cm(cm),
_workers(workers), _active_workers(n_workers) { }
// Executes the given task using concurrent marking worker threads.
virtual void execute(ProcessTask& task);
@ -2115,21 +2159,18 @@ class G1CMRefProcTaskProxy: public AbstractGangTask {
ProcessTask& _proc_task;
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
CMBitMap* _bitmap;
public:
G1CMRefProcTaskProxy(ProcessTask& proc_task,
G1CollectedHeap* g1h,
ConcurrentMark* cm,
CMBitMap* bitmap) :
ConcurrentMark* cm) :
AbstractGangTask("Process reference objects in parallel"),
_proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap)
{}
_proc_task(proc_task), _g1h(g1h), _cm(cm) { }
virtual void work(int i) {
CMTask* marking_task = _cm->task(i);
G1CMIsAliveClosure g1_is_alive(_g1h);
G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap);
G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
_proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
@ -2139,7 +2180,7 @@ public:
void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
assert(_workers != NULL, "Need parallel worker threads.");
G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
// We need to reset the phase for each task execution so that
// the termination protocol of CMTask::do_marking_step works.
@ -2156,8 +2197,7 @@ class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
public:
G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
AbstractGangTask("Enqueue reference objects in parallel"),
_enq_task(enq_task)
{ }
_enq_task(enq_task) { }
virtual void work(int i) {
_enq_task.work(i);
@ -2207,10 +2247,10 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// We use the work gang from the G1CollectedHeap and we utilize all
// the worker threads.
int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
int active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1;
active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
G1CMRefProcTaskExecutor par_task_executor(g1h, this,
g1h->workers(), active_workers);
if (rp->processing_is_mt()) {
@ -2290,7 +2330,9 @@ public:
}
CMRemarkTask(ConcurrentMark* cm) :
AbstractGangTask("Par Remark"), _cm(cm) { }
AbstractGangTask("Par Remark"), _cm(cm) {
_cm->terminator()->reset_for_reuse(cm->_g1h->workers()->active_workers());
}
};
void ConcurrentMark::checkpointRootsFinalWork() {
@ -2302,16 +2344,21 @@ void ConcurrentMark::checkpointRootsFinalWork() {
if (G1CollectedHeap::use_parallel_gc_threads()) {
G1CollectedHeap::StrongRootsScope srs(g1h);
// this is remark, so we'll use up all available threads
int active_workers = ParallelGCThreads;
// this is remark, so we'll use up all active threads
int active_workers = g1h->workers()->active_workers();
if (active_workers == 0) {
assert(active_workers > 0, "Should have been set earlier");
active_workers = ParallelGCThreads;
g1h->workers()->set_active_workers(active_workers);
}
set_phase(active_workers, false /* concurrent */);
// Leave _parallel_marking_threads at it's
// value originally calculated in the ConcurrentMark
// constructor and pass values of the active workers
// through the gang in the task.
CMRemarkTask remarkTask(this);
// We will start all available threads, even if we decide that the
// active_workers will be fewer. The extra ones will just bail out
// immediately.
int n_workers = g1h->workers()->total_workers();
g1h->set_par_threads(n_workers);
g1h->set_par_threads(active_workers);
g1h->workers()->run_task(&remarkTask);
g1h->set_par_threads(0);
} else {
@ -2859,8 +2906,10 @@ void ConcurrentMark::print_stats() {
}
}
class CSMarkOopClosure: public OopClosure {
friend class CSMarkBitMapClosure;
// Closures used by ConcurrentMark::complete_marking_in_collection_set().
class CSetMarkOopClosure: public OopClosure {
friend class CSetMarkBitMapClosure;
G1CollectedHeap* _g1h;
CMBitMap* _bm;
@ -2870,6 +2919,7 @@ class CSMarkOopClosure: public OopClosure {
int _ms_size;
int _ms_ind;
int _array_increment;
int _worker_i;
bool push(oop obj, int arr_ind = 0) {
if (_ms_ind == _ms_size) {
@ -2910,7 +2960,6 @@ class CSMarkOopClosure: public OopClosure {
for (int j = arr_ind; j < lim; j++) {
do_oop(aobj->objArrayOopDesc::obj_at_addr<T>(j));
}
} else {
obj->oop_iterate(this);
}
@ -2920,17 +2969,17 @@ class CSMarkOopClosure: public OopClosure {
}
public:
CSMarkOopClosure(ConcurrentMark* cm, int ms_size) :
CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, int worker_i) :
_g1h(G1CollectedHeap::heap()),
_cm(cm),
_bm(cm->nextMarkBitMap()),
_ms_size(ms_size), _ms_ind(0),
_ms(NEW_C_HEAP_ARRAY(oop, ms_size)),
_array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)),
_array_increment(MAX2(ms_size/8, 16))
{}
_array_increment(MAX2(ms_size/8, 16)),
_worker_i(worker_i) { }
~CSMarkOopClosure() {
~CSetMarkOopClosure() {
FREE_C_HEAP_ARRAY(oop, _ms);
FREE_C_HEAP_ARRAY(jint, _array_ind_stack);
}
@ -2953,10 +3002,11 @@ public:
if (hr != NULL) {
if (hr->in_collection_set()) {
if (_g1h->is_obj_ill(obj)) {
_bm->mark((HeapWord*)obj);
if (!push(obj)) {
gclog_or_tty->print_cr("Setting abort in CSMarkOopClosure because push failed.");
set_abort();
if (_bm->parMark((HeapWord*)obj)) {
if (!push(obj)) {
gclog_or_tty->print_cr("Setting abort in CSetMarkOopClosure because push failed.");
set_abort();
}
}
}
} else {
@ -2967,19 +3017,19 @@ public:
}
};
class CSMarkBitMapClosure: public BitMapClosure {
G1CollectedHeap* _g1h;
CMBitMap* _bitMap;
ConcurrentMark* _cm;
CSMarkOopClosure _oop_cl;
class CSetMarkBitMapClosure: public BitMapClosure {
G1CollectedHeap* _g1h;
CMBitMap* _bitMap;
ConcurrentMark* _cm;
CSetMarkOopClosure _oop_cl;
int _worker_i;
public:
CSMarkBitMapClosure(ConcurrentMark* cm, int ms_size) :
CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_i) :
_g1h(G1CollectedHeap::heap()),
_bitMap(cm->nextMarkBitMap()),
_oop_cl(cm, ms_size)
{}
~CSMarkBitMapClosure() {}
_oop_cl(cm, ms_size, worker_i),
_worker_i(worker_i) { }
bool do_bit(size_t offset) {
// convert offset into a HeapWord*
@ -3001,53 +3051,69 @@ public:
}
};
class CompleteMarkingInCSetHRClosure: public HeapRegionClosure {
CMBitMap* _bm;
CSetMarkBitMapClosure _bit_cl;
int _worker_i;
class CompleteMarkingInCSHRClosure: public HeapRegionClosure {
CMBitMap* _bm;
CSMarkBitMapClosure _bit_cl;
enum SomePrivateConstants {
MSSize = 1000
};
bool _completed;
public:
CompleteMarkingInCSHRClosure(ConcurrentMark* cm) :
CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_i) :
_bm(cm->nextMarkBitMap()),
_bit_cl(cm, MSSize),
_completed(true)
{}
_bit_cl(cm, MSSize, worker_i),
_worker_i(worker_i) { }
~CompleteMarkingInCSHRClosure() {}
bool doHeapRegion(HeapRegion* r) {
if (!r->evacuation_failed()) {
MemRegion mr = MemRegion(r->bottom(), r->next_top_at_mark_start());
if (!mr.is_empty()) {
if (!_bm->iterate(&_bit_cl, mr)) {
_completed = false;
return true;
bool doHeapRegion(HeapRegion* hr) {
if (hr->claimHeapRegion(HeapRegion::CompleteMarkCSetClaimValue)) {
// The current worker has successfully claimed the region.
if (!hr->evacuation_failed()) {
MemRegion mr = MemRegion(hr->bottom(), hr->next_top_at_mark_start());
if (!mr.is_empty()) {
bool done = false;
while (!done) {
done = _bm->iterate(&_bit_cl, mr);
}
}
}
}
return false;
}
bool completed() { return _completed; }
};
class ClearMarksInHRClosure: public HeapRegionClosure {
CMBitMap* _bm;
public:
ClearMarksInHRClosure(CMBitMap* bm): _bm(bm) { }
class SetClaimValuesInCSetHRClosure: public HeapRegionClosure {
jint _claim_value;
bool doHeapRegion(HeapRegion* r) {
if (!r->used_region().is_empty() && !r->evacuation_failed()) {
MemRegion usedMR = r->used_region();
_bm->clearRange(r->used_region());
}
public:
SetClaimValuesInCSetHRClosure(jint claim_value) :
_claim_value(claim_value) { }
bool doHeapRegion(HeapRegion* hr) {
hr->set_claim_value(_claim_value);
return false;
}
};
class G1ParCompleteMarkInCSetTask: public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
public:
G1ParCompleteMarkInCSetTask(G1CollectedHeap* g1h,
ConcurrentMark* cm) :
AbstractGangTask("Complete Mark in CSet"),
_g1h(g1h), _cm(cm) { }
void work(int worker_i) {
CompleteMarkingInCSetHRClosure cmplt(_cm, worker_i);
HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_i);
_g1h->collection_set_iterate_from(hr, &cmplt);
}
};
void ConcurrentMark::complete_marking_in_collection_set() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@ -3056,20 +3122,32 @@ void ConcurrentMark::complete_marking_in_collection_set() {
return;
}
int i = 1;
double start = os::elapsedTime();
while (true) {
i++;
CompleteMarkingInCSHRClosure cmplt(this);
g1h->collection_set_iterate(&cmplt);
if (cmplt.completed()) break;
int n_workers = g1h->workers()->total_workers();
G1ParCompleteMarkInCSetTask complete_mark_task(g1h, this);
assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
if (G1CollectedHeap::use_parallel_gc_threads()) {
g1h->set_par_threads(n_workers);
g1h->workers()->run_task(&complete_mark_task);
g1h->set_par_threads(0);
} else {
complete_mark_task.work(0);
}
assert(g1h->check_cset_heap_region_claim_values(HeapRegion::CompleteMarkCSetClaimValue), "sanity");
// Now reset the claim values in the regions in the collection set.
SetClaimValuesInCSetHRClosure set_cv_cl(HeapRegion::InitialClaimValue);
g1h->collection_set_iterate(&set_cv_cl);
assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
double end_time = os::elapsedTime();
double elapsed_time_ms = (end_time - start) * 1000.0;
g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms);
ClearMarksInHRClosure clr(nextMarkBitMap());
g1h->collection_set_iterate(&clr);
}
// The next two methods deal with the following optimisation. Some

View File

@ -360,7 +360,7 @@ class ConcurrentMark: public CHeapObj {
friend class ConcurrentMarkThread;
friend class CMTask;
friend class CMBitMapClosure;
friend class CSMarkOopClosure;
friend class CSetMarkOopClosure;
friend class CMGlobalObjectClosure;
friend class CMRemarkTask;
friend class CMConcurrentMarkingTask;
@ -375,7 +375,9 @@ protected:
ConcurrentMarkThread* _cmThread; // the thread doing the work
G1CollectedHeap* _g1h; // the heap.
size_t _parallel_marking_threads; // the number of marking
// threads we'll use
// threads we're use
size_t _max_parallel_marking_threads; // max number of marking
// threads we'll ever use
double _sleep_factor; // how much we have to sleep, with
// respect to the work we just did, to
// meet the marking overhead goal
@ -473,7 +475,7 @@ protected:
double* _accum_task_vtime; // accumulated task vtime
WorkGang* _parallel_workers;
FlexibleWorkGang* _parallel_workers;
ForceOverflowSettings _force_overflow_conc;
ForceOverflowSettings _force_overflow_stw;
@ -504,6 +506,7 @@ protected:
// accessor methods
size_t parallel_marking_threads() { return _parallel_marking_threads; }
size_t max_parallel_marking_threads() { return _max_parallel_marking_threads;}
double sleep_factor() { return _sleep_factor; }
double marking_task_overhead() { return _marking_task_overhead;}
double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
@ -709,6 +712,14 @@ public:
CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
// Returns the number of GC threads to be used in a concurrent
// phase based on the number of GC threads being used in a STW
// phase.
size_t scale_parallel_threads(size_t n_par_threads);
// Calculates the number of GC threads to be used in a concurrent phase.
int calc_parallel_marking_threads();
// The following three are interaction between CM and
// G1CollectedHeap

View File

@ -191,7 +191,11 @@ void ConcurrentMarkThread::run() {
VM_CGC_Operation op(&cl_cl, verbose_str);
VMThread::execute(&op);
} else {
// We don't want to update the marking status if a GC pause
// is already underway.
_sts.join();
g1h->set_marking_complete();
_sts.leave();
}
// Check if cleanup set the free_regions_coming flag. If it

View File

@ -66,6 +66,18 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
// apply to TLAB allocation, which is not part of this interface: it
// is done by clients of this interface.)
// Notes on implementation of parallelism in different tasks.
//
// G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
// The number of GC workers is passed to heap_region_par_iterate_chunked().
// It does use run_task() which sets _n_workers in the task.
// G1ParTask executes g1_process_strong_roots() ->
// SharedHeap::process_strong_roots() which calls eventuall to
// CardTableModRefBS::par_non_clean_card_iterate_work() which uses
// SequentialSubTasksDone. SharedHeap::process_strong_roots() also
// directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
//
// Local to this file.
class RefineCardTableEntryClosure: public CardTableEntryClosure {
@ -176,8 +188,7 @@ void YoungList::push_region(HeapRegion *hr) {
hr->set_next_young_region(_head);
_head = hr;
hr->set_young();
double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
_g1h->g1_policy()->set_region_eden(hr, (int) _length);
++_length;
}
@ -190,7 +201,6 @@ void YoungList::add_survivor_region(HeapRegion* hr) {
_survivor_tail = hr;
}
_survivor_head = hr;
++_survivor_length;
}
@ -315,16 +325,20 @@ YoungList::reset_auxilary_lists() {
_g1h->g1_policy()->note_start_adding_survivor_regions();
_g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
int young_index_in_cset = 0;
for (HeapRegion* curr = _survivor_head;
curr != NULL;
curr = curr->get_next_young_region()) {
_g1h->g1_policy()->set_region_survivors(curr);
_g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
// The region is a non-empty survivor so let's add it to
// the incremental collection set for the next evacuation
// pause.
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
young_index_in_cset += 1;
}
assert((size_t) young_index_in_cset == _survivor_length,
"post-condition");
_g1h->g1_policy()->note_stop_adding_survivor_regions();
_head = _survivor_head;
@ -1154,6 +1168,7 @@ public:
void work(int i) {
RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
_g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
_g1->workers()->active_workers(),
HeapRegion::RebuildRSClaimValue);
}
};
@ -1358,12 +1373,32 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
}
// Rebuild remembered sets of all regions.
if (G1CollectedHeap::use_parallel_gc_threads()) {
int n_workers =
AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
assert(UseDynamicNumberOfGCThreads ||
n_workers == workers()->total_workers(),
"If not dynamic should be using all the workers");
workers()->set_active_workers(n_workers);
// Set parallel threads in the heap (_n_par_threads) only
// before a parallel phase and always reset it to 0 after
// the phase so that the number of parallel threads does
// no get carried forward to a serial phase where there
// may be code that is "possibly_parallel".
set_par_threads(n_workers);
ParRebuildRSTask rebuild_rs_task(this);
assert(check_heap_region_claim_values(
HeapRegion::InitialClaimValue), "sanity check");
set_par_threads(workers()->total_workers());
assert(UseDynamicNumberOfGCThreads ||
workers()->active_workers() == workers()->total_workers(),
"Unless dynamic should use total workers");
// Use the most recent number of active workers
assert(workers()->active_workers() > 0,
"Active workers not properly set");
set_par_threads(workers()->active_workers());
workers()->run_task(&rebuild_rs_task);
set_par_threads(0);
assert(check_heap_region_claim_values(
@ -2475,11 +2510,17 @@ void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
void
G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
int worker,
int no_of_par_workers,
jint claim_value) {
const size_t regions = n_regions();
const size_t worker_num = (G1CollectedHeap::use_parallel_gc_threads() ? ParallelGCThreads : 1);
const size_t max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
no_of_par_workers :
1);
assert(UseDynamicNumberOfGCThreads ||
no_of_par_workers == workers()->total_workers(),
"Non dynamic should use fixed number of workers");
// try to spread out the starting points of the workers
const size_t start_index = regions / worker_num * (size_t) worker;
const size_t start_index = regions / max_workers * (size_t) worker;
// each worker will actually look at all regions
for (size_t count = 0; count < regions; ++count) {
@ -2576,10 +2617,10 @@ public:
_claim_value(claim_value), _failures(0), _sh_region(NULL) { }
bool doHeapRegion(HeapRegion* r) {
if (r->claim_value() != _claim_value) {
gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
gclog_or_tty->print_cr("Region " HR_FORMAT ", "
"claim value = %d, should be %d",
r->bottom(), r->end(), r->claim_value(),
_claim_value);
HR_FORMAT_PARAMS(r),
r->claim_value(), _claim_value);
++_failures;
}
if (!r->isHumongous()) {
@ -2588,9 +2629,9 @@ public:
_sh_region = r;
} else if (r->continuesHumongous()) {
if (r->humongous_start_region() != _sh_region) {
gclog_or_tty->print_cr("Region ["PTR_FORMAT","PTR_FORMAT"), "
gclog_or_tty->print_cr("Region " HR_FORMAT ", "
"HS = "PTR_FORMAT", should be "PTR_FORMAT,
r->bottom(), r->end(),
HR_FORMAT_PARAMS(r),
r->humongous_start_region(),
_sh_region);
++_failures;
@ -2608,8 +2649,63 @@ bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
heap_region_iterate(&cl);
return cl.failures() == 0;
}
class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
jint _claim_value;
size_t _failures;
public:
CheckClaimValuesInCSetHRClosure(jint claim_value) :
_claim_value(claim_value),
_failures(0) { }
size_t failures() {
return _failures;
}
bool doHeapRegion(HeapRegion* hr) {
assert(hr->in_collection_set(), "how?");
assert(!hr->isHumongous(), "H-region in CSet");
if (hr->claim_value() != _claim_value) {
gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
"claim value = %d, should be %d",
HR_FORMAT_PARAMS(hr),
hr->claim_value(), _claim_value);
_failures += 1;
}
return false;
}
};
bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
CheckClaimValuesInCSetHRClosure cl(claim_value);
collection_set_iterate(&cl);
return cl.failures() == 0;
}
#endif // ASSERT
// We want the parallel threads to start their collection
// set iteration at different collection set regions to
// avoid contention.
// If we have:
// n collection set regions
// p threads
// Then thread t will start at region t * floor (n/p)
HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
HeapRegion* result = g1_policy()->collection_set();
if (G1CollectedHeap::use_parallel_gc_threads()) {
size_t cs_size = g1_policy()->cset_region_length();
int n_workers = workers()->total_workers();
size_t cs_spans = cs_size / n_workers;
size_t ind = cs_spans * worker_i;
for (size_t i = 0; i < ind; i++) {
result = result->next_in_collection_set();
}
}
return result;
}
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
HeapRegion* r = g1_policy()->collection_set();
while (r != NULL) {
@ -2918,6 +3014,7 @@ public:
HandleMark hm;
VerifyRegionClosure blk(_allow_dirty, true, _vo);
_g1h->heap_region_par_iterate_chunked(&blk, worker_i,
_g1h->workers()->active_workers(),
HeapRegion::ParVerifyClaimValue);
if (blk.failures()) {
_failures = true;
@ -2935,6 +3032,10 @@ void G1CollectedHeap::verify(bool allow_dirty,
if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
VerifyRootsClosure rootsCl(vo);
assert(Thread::current()->is_VM_thread(),
"Expected to be executed serially by the VM thread at this point");
CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
// We apply the relevant closures to all the oops in the
@ -2979,7 +3080,10 @@ void G1CollectedHeap::verify(bool allow_dirty,
"sanity check");
G1ParVerifyTask task(this, allow_dirty, vo);
int n_workers = workers()->total_workers();
assert(UseDynamicNumberOfGCThreads ||
workers()->active_workers() == workers()->total_workers(),
"If not dynamic should be using all the workers");
int n_workers = workers()->active_workers();
set_par_threads(n_workers);
workers()->run_task(&task);
set_par_threads(0);
@ -2987,6 +3091,8 @@ void G1CollectedHeap::verify(bool allow_dirty,
failures = true;
}
// Checks that the expected amount of parallel work was done.
// The implication is that n_workers is > 0.
assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
"sanity check");
@ -3210,8 +3316,6 @@ G1CollectedHeap::doConcurrentMark() {
}
}
// <NEW PREDICTION>
double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
bool young) {
return _g1_policy->predict_region_elapsed_time_ms(hr, young);
@ -3251,7 +3355,7 @@ size_t G1CollectedHeap::cards_scanned() {
void
G1CollectedHeap::setup_surviving_young_words() {
guarantee( _surviving_young_words == NULL, "pre-condition" );
size_t array_length = g1_policy()->young_cset_length();
size_t array_length = g1_policy()->young_cset_region_length();
_surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
if (_surviving_young_words == NULL) {
vm_exit_out_of_memory(sizeof(size_t) * array_length,
@ -3268,7 +3372,7 @@ G1CollectedHeap::setup_surviving_young_words() {
void
G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
size_t array_length = g1_policy()->young_cset_length();
size_t array_length = g1_policy()->young_cset_region_length();
for (size_t i = 0; i < array_length; ++i)
_surviving_young_words[i] += surv_young_words[i];
}
@ -3280,8 +3384,6 @@ G1CollectedHeap::cleanup_surviving_young_words() {
_surviving_young_words = NULL;
}
// </NEW PREDICTION>
#ifdef ASSERT
class VerifyCSetClosure: public HeapRegionClosure {
public:
@ -3404,6 +3506,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert(check_young_list_well_formed(),
"young list should be well formed");
// Don't dynamically change the number of GC threads this early. A value of
// 0 is used to indicate serial work. When parallel work is done,
// it will be set.
{ // Call to jvmpi::post_class_unload_events must occur outside of active GC
IsGCActiveMark x;
@ -3617,7 +3723,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
double end_time_sec = os::elapsedTime();
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
g1_policy()->record_pause_time_ms(pause_time_ms);
g1_policy()->record_collection_pause_end();
int active_gc_threads = workers()->active_workers();
g1_policy()->record_collection_pause_end(active_gc_threads);
MemoryService::track_memory_usage();
@ -4158,7 +4265,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
// non-young regions (where the age is -1)
// We also add a few elements at the beginning and at the end in
// an attempt to eliminate cache contention
size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
size_t real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
size_t array_length = PADDING_ELEM_NUM +
real_length +
PADDING_ELEM_NUM;
@ -4564,13 +4671,13 @@ protected:
}
public:
G1ParTask(G1CollectedHeap* g1h, int workers, RefToScanQueueSet *task_queues)
G1ParTask(G1CollectedHeap* g1h,
RefToScanQueueSet *task_queues)
: AbstractGangTask("G1 collection"),
_g1h(g1h),
_queues(task_queues),
_terminator(workers, _queues),
_stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
_n_workers(workers)
_terminator(0, _queues),
_stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
{}
RefToScanQueueSet* queues() { return _queues; }
@ -4579,6 +4686,20 @@ public:
return queues()->queue(i);
}
ParallelTaskTerminator* terminator() { return &_terminator; }
virtual void set_for_termination(int active_workers) {
// This task calls set_n_termination() in par_non_clean_card_iterate_work()
// in the young space (_par_seq_tasks) in the G1 heap
// for SequentialSubTasksDone.
// This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
// both of which need setting by set_n_termination().
_g1h->SharedHeap::set_n_termination(active_workers);
_g1h->set_n_termination(active_workers);
terminator()->reset_for_reuse(active_workers);
_n_workers = active_workers;
}
void work(int i) {
if (i >= _n_workers) return; // no work needed this round
@ -4863,12 +4984,12 @@ class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
private:
G1CollectedHeap* _g1h;
RefToScanQueueSet* _queues;
WorkGang* _workers;
FlexibleWorkGang* _workers;
int _active_workers;
public:
G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
WorkGang* workers,
FlexibleWorkGang* workers,
RefToScanQueueSet *task_queues,
int n_workers) :
_g1h(g1h),
@ -5124,11 +5245,13 @@ void G1CollectedHeap::process_discovered_references() {
// referents points to another object which is also referenced by an
// object discovered by the STW ref processor.
int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
workers()->total_workers() : 1);
int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
workers()->active_workers() : 1);
set_par_threads(n_workers);
G1ParPreserveCMReferentsTask keep_cm_referents(this, n_workers, _task_queues);
assert(active_workers == workers()->active_workers(),
"Need to reset active_workers");
set_par_threads(active_workers);
G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues);
if (G1CollectedHeap::use_parallel_gc_threads()) {
workers()->run_task(&keep_cm_referents);
@ -5194,7 +5317,6 @@ void G1CollectedHeap::process_discovered_references() {
NULL);
} else {
// Parallel reference processing
int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
assert(rp->num_q() == active_workers, "sanity");
assert(active_workers <= rp->max_num_q(), "sanity");
@ -5227,7 +5349,9 @@ void G1CollectedHeap::enqueue_discovered_references() {
} else {
// Parallel reference enqueuing
int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
int active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
assert(active_workers == workers()->active_workers(),
"Need to reset active_workers");
assert(rp->num_q() == active_workers, "sanity");
assert(active_workers <= rp->max_num_q(), "sanity");
@ -5254,9 +5378,24 @@ void G1CollectedHeap::evacuate_collection_set() {
concurrent_g1_refine()->set_use_cache(false);
concurrent_g1_refine()->clear_hot_cache_claimed_index();
int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
set_par_threads(n_workers);
G1ParTask g1_par_task(this, n_workers, _task_queues);
int n_workers;
if (G1CollectedHeap::use_parallel_gc_threads()) {
n_workers =
AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
assert(UseDynamicNumberOfGCThreads ||
n_workers == workers()->total_workers(),
"If not dynamic should be using all the workers");
set_par_threads(n_workers);
} else {
assert(n_par_threads() == 0,
"Should be the original non-parallel value");
n_workers = 1;
}
workers()->set_active_workers(n_workers);
G1ParTask g1_par_task(this, _task_queues);
init_for_evac_failure(NULL);
@ -5269,6 +5408,10 @@ void G1CollectedHeap::evacuate_collection_set() {
// The individual threads will set their evac-failure closures.
StrongRootsScope srs(this);
if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
// These tasks use ShareHeap::_process_strong_tasks
assert(UseDynamicNumberOfGCThreads ||
workers()->active_workers() == workers()->total_workers(),
"If not dynamic should be using all the workers");
workers()->run_task(&g1_par_task);
} else {
StrongRootsScope srs(this);
@ -5277,6 +5420,7 @@ void G1CollectedHeap::evacuate_collection_set() {
double par_time = (os::elapsedTime() - start_par) * 1000.0;
g1_policy()->record_par_time(par_time);
set_par_threads(0);
// Process any discovered reference objects - we have
@ -5304,8 +5448,11 @@ void G1CollectedHeap::evacuate_collection_set() {
finalize_for_evac_failure();
// Must do this before removing self-forwarding pointers, which clears
// the per-region evac-failure flags.
// Must do this before clearing the per-region evac-failure flags
// (which is currently done when we free the collection set).
// We also only do this if marking is actually in progress and so
// have to do this before we set the mark_in_progress flag at the
// end of an initial mark pause.
concurrent_mark()->complete_marking_in_collection_set();
if (evacuation_failed()) {
@ -5567,7 +5714,6 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
while (cur != NULL) {
assert(!is_on_master_free_list(cur), "sanity");
if (non_young) {
if (cur->is_young()) {
double end_sec = os::elapsedTime();
@ -5578,12 +5724,14 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
non_young = false;
}
} else {
double end_sec = os::elapsedTime();
double elapsed_ms = (end_sec - start_sec) * 1000.0;
young_time_ms += elapsed_ms;
if (!cur->is_young()) {
double end_sec = os::elapsedTime();
double elapsed_ms = (end_sec - start_sec) * 1000.0;
young_time_ms += elapsed_ms;
start_sec = os::elapsedTime();
non_young = true;
start_sec = os::elapsedTime();
non_young = true;
}
}
rs_lengths += cur->rem_set()->occupied();
@ -5595,8 +5743,8 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
if (cur->is_young()) {
int index = cur->young_index_in_cset();
guarantee( index != -1, "invariant" );
guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
assert(index != -1, "invariant");
assert((size_t) index < policy->young_cset_region_length(), "invariant");
size_t words_survived = _surviving_young_words[index];
cur->record_surv_words_in_group(words_survived);
@ -5607,7 +5755,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
cur->set_next_young_region(NULL);
} else {
int index = cur->young_index_in_cset();
guarantee( index == -1, "invariant" );
assert(index == -1, "invariant");
}
assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
@ -5615,13 +5763,26 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
"invariant" );
if (!cur->evacuation_failed()) {
MemRegion used_mr = cur->used_region();
// And the region is empty.
assert(!cur->is_empty(), "Should not have empty regions in a CS.");
assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
// If marking is in progress then clear any objects marked in
// the current region. Note mark_in_progress() returns false,
// even during an initial mark pause, until the set_marking_started()
// call which takes place later in the pause.
if (mark_in_progress()) {
assert(!g1_policy()->during_initial_mark_pause(), "sanity");
_cm->nextMarkBitMap()->clearRange(used_mr);
}
free_region(cur, &pre_used, &local_free_list, false /* par */);
} else {
cur->uninstall_surv_rate_group();
if (cur->is_young())
if (cur->is_young()) {
cur->set_young_index_in_cset(-1);
}
cur->set_not_young();
cur->set_evacuation_failed(false);
// The region is now considered to be old.
@ -5635,10 +5796,12 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
double end_sec = os::elapsedTime();
double elapsed_ms = (end_sec - start_sec) * 1000.0;
if (non_young)
if (non_young) {
non_young_time_ms += elapsed_ms;
else
} else {
young_time_ms += elapsed_ms;
}
update_sets_after_freeing_regions(pre_used, &local_free_list,
NULL /* old_proxy_set */,
@ -5722,7 +5885,6 @@ void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
assert(heap_lock_held_for_gc(),
"the heap lock should already be held by or for this thread");
_young_list->push_region(hr);
g1_policy()->set_region_short_lived(hr);
}
class NoYoungRegionsClosure: public HeapRegionClosure {
@ -5880,7 +6042,6 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
HeapRegion* new_alloc_region = new_region(word_size,
false /* do_expand */);
if (new_alloc_region != NULL) {
g1_policy()->update_region_num(true /* next_is_young */);
set_region_short_lived_locked(new_alloc_region);
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
return new_alloc_region;
@ -5908,6 +6069,21 @@ HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
return _g1h->new_mutator_alloc_region(word_size, force);
}
void G1CollectedHeap::set_par_threads() {
// Don't change the number of workers. Use the value previously set
// in the workgroup.
int n_workers = workers()->active_workers();
assert(UseDynamicNumberOfGCThreads ||
n_workers == workers()->total_workers(),
"Otherwise should be using the total number of workers");
if (n_workers == 0) {
assert(false, "Should have been set in prior evacuation pause.");
n_workers = ParallelGCThreads;
workers()->set_active_workers(n_workers);
}
set_par_threads(n_workers);
}
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);

View File

@ -987,6 +987,16 @@ public:
void set_par_threads(int t) {
SharedHeap::set_par_threads(t);
// Done in SharedHeap but oddly there are
// two _process_strong_tasks's in a G1CollectedHeap
// so do it here too.
_process_strong_tasks->set_n_threads(t);
}
// Set _n_par_threads according to a policy TBD.
void set_par_threads();
void set_n_termination(int t) {
_process_strong_tasks->set_n_threads(t);
}
@ -1276,6 +1286,7 @@ public:
// i.e., that a closure never attempt to abort a traversal.
void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
int worker,
int no_of_par_workers,
jint claim_value);
// It resets all the region claim values to the default.
@ -1283,8 +1294,17 @@ public:
#ifdef ASSERT
bool check_heap_region_claim_values(jint claim_value);
// Same as the routine above but only checks regions in the
// current collection set.
bool check_cset_heap_region_claim_values(jint claim_value);
#endif // ASSERT
// Given the id of a worker, calculate a suitable
// starting region for iterating over the current
// collection set.
HeapRegion* start_cset_region_for_worker(int worker_i);
// Iterate over the regions (if any) in the current collection set.
void collection_set_iterate(HeapRegionClosure* blk);
@ -1610,16 +1630,12 @@ public:
public:
void stop_conc_gc_threads();
// <NEW PREDICTION>
double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
void check_if_region_is_too_expensive(double predicted_time_ms);
size_t pending_card_num();
size_t max_pending_card_num();
size_t cards_scanned();
// </NEW PREDICTION>
protected:
size_t _max_heap_capacity;
};

View File

@ -85,13 +85,13 @@ public:
class G1CollectorPolicy: public CollectorPolicy {
private:
// The number of pauses during the execution.
long _n_pauses;
// either equal to the number of parallel threads, if ParallelGCThreads
// has been set, or 1 otherwise
int _parallel_gc_threads;
// The number of GC threads currently active.
uintx _no_of_gc_threads;
enum SomePrivateConstants {
NumPrevPausesForHeuristics = 10
};
@ -127,18 +127,9 @@ private:
jlong _num_cc_clears; // number of times the card count cache has been cleared
#endif
// Statistics for recent GC pauses. See below for how indexed.
TruncatedSeq* _recent_rs_scan_times_ms;
// These exclude marking times.
TruncatedSeq* _recent_pause_times_ms;
TruncatedSeq* _recent_gc_times_ms;
TruncatedSeq* _recent_CS_bytes_used_before;
TruncatedSeq* _recent_CS_bytes_surviving;
TruncatedSeq* _recent_rs_sizes;
TruncatedSeq* _concurrent_mark_remark_times_ms;
TruncatedSeq* _concurrent_mark_cleanup_times_ms;
@ -150,13 +141,6 @@ private:
NumberSeq* _all_stop_world_times_ms;
NumberSeq* _all_yield_times_ms;
size_t _region_num_young;
size_t _region_num_tenured;
size_t _prev_region_num_young;
size_t _prev_region_num_tenured;
NumberSeq* _all_mod_union_times_ms;
int _aux_num;
NumberSeq* _all_aux_times_ms;
double* _cur_aux_start_times_ms;
@ -194,7 +178,6 @@ private:
// locker is active. This should be >= _young_list_target_length;
size_t _young_list_max_length;
size_t _young_cset_length;
bool _last_young_gc_full;
unsigned _full_young_pause_num;
@ -217,8 +200,6 @@ private:
return _during_marking;
}
// <NEW PREDICTION>
private:
enum PredictionConstants {
TruncatedSeqLength = 10
@ -240,47 +221,32 @@ private:
TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
TruncatedSeq* _pending_cards_seq;
TruncatedSeq* _scanned_cards_seq;
TruncatedSeq* _rs_lengths_seq;
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
TruncatedSeq* _young_gc_eff_seq;
TruncatedSeq* _max_conc_overhead_seq;
bool _using_new_ratio_calculations;
size_t _min_desired_young_length; // as set on the command line or default calculations
size_t _max_desired_young_length; // as set on the command line or default calculations
size_t _recorded_young_regions;
size_t _recorded_non_young_regions;
size_t _recorded_region_num;
size_t _eden_cset_region_length;
size_t _survivor_cset_region_length;
size_t _old_cset_region_length;
void init_cset_region_lengths(size_t eden_cset_region_length,
size_t survivor_cset_region_length);
size_t eden_cset_region_length() { return _eden_cset_region_length; }
size_t survivor_cset_region_length() { return _survivor_cset_region_length; }
size_t old_cset_region_length() { return _old_cset_region_length; }
size_t _free_regions_at_end_of_collection;
size_t _recorded_rs_lengths;
size_t _max_rs_lengths;
size_t _recorded_marked_bytes;
size_t _recorded_young_bytes;
size_t _predicted_pending_cards;
size_t _predicted_cards_scanned;
size_t _predicted_rs_lengths;
size_t _predicted_bytes_to_copy;
double _predicted_survival_ratio;
double _predicted_rs_update_time_ms;
double _predicted_rs_scan_time_ms;
double _predicted_object_copy_time_ms;
double _predicted_constant_other_time_ms;
double _predicted_young_other_time_ms;
double _predicted_non_young_other_time_ms;
double _predicted_pause_time_ms;
double _vtime_diff_ms;
double _recorded_young_free_cset_time_ms;
double _recorded_non_young_free_cset_time_ms;
@ -317,21 +283,28 @@ private:
double update_rs_processed_buffers,
double goal_ms);
uintx no_of_gc_threads() { return _no_of_gc_threads; }
void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
double _pause_time_target_ms;
double _recorded_young_cset_choice_time_ms;
double _recorded_non_young_cset_choice_time_ms;
bool _within_target;
size_t _pending_cards;
size_t _max_pending_cards;
public:
// Accessors
void set_region_short_lived(HeapRegion* hr) {
void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
hr->set_young();
hr->install_surv_rate_group(_short_lived_surv_rate_group);
hr->set_young_index_in_cset(young_index_in_cset);
}
void set_region_survivors(HeapRegion* hr) {
void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
assert(hr->is_young() && hr->is_survivor(), "pre-condition");
hr->install_surv_rate_group(_survivor_surv_rate_group);
hr->set_young_index_in_cset(young_index_in_cset);
}
#ifndef PRODUCT
@ -343,10 +316,6 @@ public:
seq->davg() * confidence_factor(seq->num()));
}
size_t young_cset_length() {
return _young_cset_length;
}
void record_max_rs_lengths(size_t rs_lengths) {
_max_rs_lengths = rs_lengths;
}
@ -465,20 +434,12 @@ public:
size_t predict_bytes_to_copy(HeapRegion* hr);
double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
void start_recording_regions();
void record_cset_region_info(HeapRegion* hr, bool young);
void record_non_young_cset_region(HeapRegion* hr);
void set_recorded_young_regions(size_t n_regions);
void set_recorded_young_bytes(size_t bytes);
void set_recorded_rs_lengths(size_t rs_lengths);
void set_predicted_bytes_to_copy(size_t bytes);
void end_recording_regions();
void record_vtime_diff_ms(double vtime_diff_ms) {
_vtime_diff_ms = vtime_diff_ms;
}
size_t cset_region_length() { return young_cset_region_length() +
old_cset_region_length(); }
size_t young_cset_region_length() { return eden_cset_region_length() +
survivor_cset_region_length(); }
void record_young_free_cset_time_ms(double time_ms) {
_recorded_young_free_cset_time_ms = time_ms;
@ -494,8 +455,6 @@ public:
double predict_survivor_regions_evac_time();
// </NEW PREDICTION>
void cset_regions_freed() {
bool propagate = _last_young_gc_full && !_in_marking_window;
_short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
@ -575,8 +534,6 @@ private:
double sum_of_values (double* data);
double max_sum (double* data1, double* data2);
int _last_satb_drain_processed_buffers;
int _last_update_rs_processed_buffers;
double _last_pause_time_ms;
size_t _bytes_in_collection_set_before_gc;
@ -596,10 +553,6 @@ private:
// set at the start of the pause.
HeapRegion* _collection_set;
// The number of regions in the collection set. Set from the incrementally
// built collection set at the start of an evacuation pause.
size_t _collection_set_size;
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
// pause.
@ -622,16 +575,6 @@ private:
// The tail of the incrementally built collection set.
HeapRegion* _inc_cset_tail;
// The number of regions in the incrementally built collection set.
// Used to set _collection_set_size at the start of an evacuation
// pause.
size_t _inc_cset_size;
// Used as the index in the surving young words structure
// which tracks the amount of space, for each young region,
// that survives the pause.
size_t _inc_cset_young_index;
// The number of bytes in the incrementally built collection set.
// Used to set _collection_set_bytes_used_before at the start of
// an evacuation pause.
@ -640,11 +583,6 @@ private:
// Used to record the highest end of heap region in collection set
HeapWord* _inc_cset_max_finger;
// The number of recorded used bytes in the young regions
// of the collection set. This is the sum of the used() bytes
// of retired young regions in the collection set.
size_t _inc_cset_recorded_young_bytes;
// The RSet lengths recorded for regions in the collection set
// (updated by the periodic sampling of the regions in the
// young list/collection set).
@ -655,68 +593,9 @@ private:
// regions in the young list/collection set).
double _inc_cset_predicted_elapsed_time_ms;
// The predicted bytes to copy for the regions in the collection
// set (updated by the periodic sampling of the regions in the
// young list/collection set).
size_t _inc_cset_predicted_bytes_to_copy;
// Stash a pointer to the g1 heap.
G1CollectedHeap* _g1;
// The average time in ms per collection pause, averaged over recent pauses.
double recent_avg_time_for_pauses_ms();
// The average time in ms for RS scanning, per pause, averaged
// over recent pauses. (Note the RS scanning time for a pause
// is itself an average of the RS scanning time for each worker
// thread.)
double recent_avg_time_for_rs_scan_ms();
// The number of "recent" GCs recorded in the number sequences
int number_of_recent_gcs();
// The average survival ratio, computed by the total number of bytes
// suriviving / total number of bytes before collection over the last
// several recent pauses.
double recent_avg_survival_fraction();
// The survival fraction of the most recent pause; if there have been no
// pauses, returns 1.0.
double last_survival_fraction();
// Returns a "conservative" estimate of the recent survival rate, i.e.,
// one that may be higher than "recent_avg_survival_fraction".
// This is conservative in several ways:
// If there have been few pauses, it will assume a potential high
// variance, and err on the side of caution.
// It puts a lower bound (currently 0.1) on the value it will return.
// To try to detect phase changes, if the most recent pause ("latest") has a
// higher-than average ("avg") survival rate, it returns that rate.
// "work" version is a utility function; young is restricted to young regions.
double conservative_avg_survival_fraction_work(double avg,
double latest);
// The arguments are the two sequences that keep track of the number of bytes
// surviving and the total number of bytes before collection, resp.,
// over the last evereal recent pauses
// Returns the survival rate for the category in the most recent pause.
// If there have been no pauses, returns 1.0.
double last_survival_fraction_work(TruncatedSeq* surviving,
TruncatedSeq* before);
// The arguments are the two sequences that keep track of the number of bytes
// surviving and the total number of bytes before collection, resp.,
// over the last several recent pauses
// Returns the average survival ration over the last several recent pauses
// If there have been no pauses, return 1.0
double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
TruncatedSeq* before);
double conservative_avg_survival_fraction() {
double avg = recent_avg_survival_fraction();
double latest = last_survival_fraction();
return conservative_avg_survival_fraction_work(avg, latest);
}
// The ratio of gc time to elapsed time, computed over recent pauses.
double _recent_avg_pause_time_ratio;
@ -724,9 +603,6 @@ private:
return _recent_avg_pause_time_ratio;
}
// Number of pauses between concurrent marking.
size_t _pauses_btwn_concurrent_mark;
// At the end of a pause we check the heap occupancy and we decide
// whether we will start a marking cycle during the next pause. If
// we decide that we want to do that, we will set this parameter to
@ -849,9 +725,6 @@ public:
GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
// The number of collection pauses so far.
long n_pauses() const { return _n_pauses; }
// Update the heuristic info to record a collection pause of the given
// start time, where the given number of bytes were used at the start.
// This may involve changing the desired size of a collection set.
@ -864,19 +737,21 @@ public:
void record_concurrent_mark_init_end(double
mark_init_elapsed_time_ms);
void record_mark_closure_time(double mark_closure_time_ms);
void record_mark_closure_time(double mark_closure_time_ms) {
_mark_closure_time_ms = mark_closure_time_ms;
}
void record_concurrent_mark_remark_start();
void record_concurrent_mark_remark_end();
void record_concurrent_mark_cleanup_start();
void record_concurrent_mark_cleanup_end();
void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
void record_concurrent_mark_cleanup_completed();
void record_concurrent_pause();
void record_concurrent_pause_end();
void record_collection_pause_end();
void record_collection_pause_end(int no_of_gc_threads);
void print_heap_transition();
// Record the fact that a full collection occurred.
@ -900,15 +775,6 @@ public:
_cur_satb_drain_time_ms = ms;
}
void record_satb_drain_processed_buffers(int processed_buffers) {
assert(_g1->mark_in_progress(), "shouldn't be here otherwise");
_last_satb_drain_processed_buffers = processed_buffers;
}
void record_mod_union_time(double ms) {
_all_mod_union_times_ms->add(ms);
}
void record_update_rs_time(int thread, double ms) {
_par_last_update_rs_times_ms[thread] = ms;
}
@ -1009,11 +875,8 @@ public:
void clear_collection_set() { _collection_set = NULL; }
// The number of elements in the current collection set.
size_t collection_set_size() { return _collection_set_size; }
// Add "hr" to the CS.
void add_to_collection_set(HeapRegion* hr);
// Add old region "hr" to the CSet.
void add_old_region_to_cset(HeapRegion* hr);
// Incremental CSet Support
@ -1023,9 +886,6 @@ public:
// The tail of the incrementally built collection set.
HeapRegion* inc_set_tail() { return _inc_cset_tail; }
// The number of elements in the incrementally built collection set.
size_t inc_cset_size() { return _inc_cset_size; }
// Initialize incremental collection set info.
void start_incremental_cset_building();
@ -1125,8 +985,6 @@ public:
return _young_list_max_length;
}
void update_region_num(bool young);
bool full_young_gcs() {
return _full_young_gcs;
}

View File

@ -84,8 +84,11 @@ template <class T> inline void G1ParScanClosure::do_oop_nv(T* p) {
// slightly paranoid test; I'm trying to catch potential
// problems before we go into push_on_queue to know where the
// problem is coming from
assert(obj == oopDesc::load_decode_heap_oop(p),
"p should still be pointing to obj");
assert((obj == oopDesc::load_decode_heap_oop(p)) ||
(obj->is_forwarded() &&
obj->forwardee() == oopDesc::load_decode_heap_oop(p)),
"p should still be pointing to obj or to its forwardee");
_par_scan_state->push_on_queue(p);
} else {
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());

View File

@ -209,29 +209,9 @@ public:
size_t cards_looked_up() { return _cards;}
};
// We want the parallel threads to start their scanning at
// different collection set regions to avoid contention.
// If we have:
// n collection set regions
// p threads
// Then thread t will start at region t * floor (n/p)
HeapRegion* G1RemSet::calculateStartRegion(int worker_i) {
HeapRegion* result = _g1p->collection_set();
if (ParallelGCThreads > 0) {
size_t cs_size = _g1p->collection_set_size();
int n_workers = _g1->workers()->total_workers();
size_t cs_spans = cs_size / n_workers;
size_t ind = cs_spans * worker_i;
for (size_t i = 0; i < ind; i++)
result = result->next_in_collection_set();
}
return result;
}
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
double rs_time_start = os::elapsedTime();
HeapRegion *startRegion = calculateStartRegion(worker_i);
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
ScanRSClosure scanRScl(oc, worker_i);
@ -430,8 +410,10 @@ void G1RemSet::prepare_for_oops_into_collection_set_do() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
dcqs.concatenate_logs();
if (ParallelGCThreads > 0) {
_seq_task->set_n_threads((int)n_workers());
if (G1CollectedHeap::use_parallel_gc_threads()) {
// Don't set the number of workers here. It will be set
// when the task is run
// _seq_task->set_n_termination((int)n_workers());
}
guarantee( _cards_scanned == NULL, "invariant" );
_cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
@ -578,7 +560,10 @@ void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
int worker_num, int claim_val) {
ScrubRSClosure scrub_cl(region_bm, card_bm);
_g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
_g1->heap_region_par_iterate_chunked(&scrub_cl,
worker_num,
(int) n_workers(),
claim_val);
}

View File

@ -104,8 +104,6 @@ public:
void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
HeapRegion* calculateStartRegion(int i);
CardTableModRefBS* ct_bs() { return _ct_bs; }
size_t cardsScanned() { return _total_cards_scanned; }

View File

@ -39,10 +39,6 @@
develop(intx, G1MarkingOverheadPercent, 0, \
"Overhead of concurrent marking") \
\
\
develop(intx, G1PolicyVerbose, 0, \
"The verbosity level on G1 policy decisions") \
\
develop(intx, G1MarkingVerboseLevel, 0, \
"Level (0-4) of verboseness of the marking code") \
\
@ -58,9 +54,6 @@
develop(bool, G1TraceMarkStackOverflow, false, \
"If true, extra debugging code for CM restart for ovflw.") \
\
develop(intx, G1PausesBtwnConcMark, -1, \
"If positive, fixed number of pauses between conc markings") \
\
diagnostic(bool, G1SummarizeConcMark, false, \
"Summarize concurrent mark info") \
\

View File

@ -367,12 +367,13 @@ class HeapRegion: public G1OffsetTableContigSpace {
static void setup_heap_region_size(uintx min_heap_size);
enum ClaimValues {
InitialClaimValue = 0,
FinalCountClaimValue = 1,
NoteEndClaimValue = 2,
ScrubRemSetClaimValue = 3,
ParVerifyClaimValue = 4,
RebuildRSClaimValue = 5
InitialClaimValue = 0,
FinalCountClaimValue = 1,
NoteEndClaimValue = 2,
ScrubRemSetClaimValue = 3,
ParVerifyClaimValue = 4,
RebuildRSClaimValue = 5,
CompleteMarkCSetClaimValue = 6
};
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
@ -416,7 +417,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
void add_to_marked_bytes(size_t incr_bytes) {
_next_marked_bytes = _next_marked_bytes + incr_bytes;
guarantee( _next_marked_bytes <= used(), "invariant" );
assert(_next_marked_bytes <= used(), "invariant" );
}
void zero_marked_bytes() {

View File

@ -33,6 +33,7 @@
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
#include "runtime/vmThread.hpp"
void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl,
@ -42,6 +43,11 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegio
assert((n_threads == 1 && ParallelGCThreads == 0) ||
n_threads <= (int)ParallelGCThreads,
"# worker threads != # requested!");
assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread");
assert(UseDynamicNumberOfGCThreads ||
!FLAG_IS_DEFAULT(ParallelGCThreads) ||
n_threads == (int)ParallelGCThreads,
"# worker threads != # requested!");
// Make sure the LNC array is valid for the space.
jbyte** lowest_non_clean;
uintptr_t lowest_non_clean_base_chunk_index;
@ -52,6 +58,8 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegio
int n_strides = n_threads * ParGCStridesPerThread;
SequentialSubTasksDone* pst = sp->par_seq_tasks();
// Sets the condition for completion of the subtask (how many threads
// need to finish in order to be done).
pst->set_n_threads(n_threads);
pst->set_n_tasks(n_strides);

View File

@ -305,7 +305,7 @@ public:
inline ParScanThreadState& thread_state(int i);
void reset(bool promotion_failed);
void reset(int active_workers, bool promotion_failed);
void flush();
#if TASKQUEUE_STATS
@ -322,6 +322,9 @@ private:
ParallelTaskTerminator& _term;
ParNewGeneration& _gen;
Generation& _next_gen;
public:
bool is_valid(int id) const { return id < length(); }
ParallelTaskTerminator* terminator() { return &_term; }
};
@ -351,9 +354,9 @@ inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
}
void ParScanThreadStateSet::reset(bool promotion_failed)
void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
{
_term.reset_for_reuse();
_term.reset_for_reuse(active_threads);
if (promotion_failed) {
for (int i = 0; i < length(); ++i) {
thread_state(i).print_and_clear_promotion_failure_size();
@ -569,6 +572,24 @@ ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
_state_set(state_set)
{}
// Reset the terminator for the given number of
// active threads.
void ParNewGenTask::set_for_termination(int active_workers) {
_state_set->reset(active_workers, _gen->promotion_failed());
// Should the heap be passed in? There's only 1 for now so
// grab it instead.
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->set_n_termination(active_workers);
}
// The "i" passed to this method is the part of the work for
// this thread. It is not the worker ID. The "i" is derived
// from _started_workers which is incremented in internal_note_start()
// called in GangWorker loop() and which is called under the
// which is called under the protection of the gang monitor and is
// called after a task is started. So "i" is based on
// first-come-first-served.
void ParNewGenTask::work(int i) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
// Since this is being done in a separate thread, need new resource
@ -581,6 +602,8 @@ void ParNewGenTask::work(int i) {
Generation* old_gen = gch->next_gen(_gen);
ParScanThreadState& par_scan_state = _state_set->thread_state(i);
assert(_state_set->is_valid(i), "Should not have been called");
par_scan_state.set_young_old_boundary(_young_old_boundary);
par_scan_state.start_strong_roots();
@ -733,7 +756,9 @@ public:
private:
virtual void work(int i);
virtual void set_for_termination(int active_workers) {
_state_set.terminator()->reset_for_reuse(active_workers);
}
private:
ParNewGeneration& _gen;
ProcessTask& _task;
@ -789,18 +814,20 @@ void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"not a generational heap");
WorkGang* workers = gch->workers();
FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
_state_set.reset(workers->active_workers(), _generation.promotion_failed());
ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
_generation.reserved().end(), _state_set);
workers->run_task(&rp_task);
_state_set.reset(_generation.promotion_failed());
_state_set.reset(0 /* bad value in debug if not reset */,
_generation.promotion_failed());
}
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
{
GenCollectedHeap* gch = GenCollectedHeap::heap();
WorkGang* workers = gch->workers();
FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads.");
ParNewRefEnqueueTaskProxy enq_task(task);
workers->run_task(&enq_task);
@ -856,7 +883,13 @@ void ParNewGeneration::collect(bool full,
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"not a CMS generational heap");
AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
WorkGang* workers = gch->workers();
FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need workgang for parallel work");
int active_workers =
AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
workers->active_workers(),
Threads::number_of_non_daemon_threads());
workers->set_active_workers(active_workers);
_next_gen = gch->next_gen(this);
assert(_next_gen != NULL,
"This must be the youngest gen, and not the only gen");
@ -894,13 +927,19 @@ void ParNewGeneration::collect(bool full,
gch->save_marks();
assert(workers != NULL, "Need parallel worker threads.");
ParallelTaskTerminator _term(workers->total_workers(), task_queues());
ParScanThreadStateSet thread_state_set(workers->total_workers(),
int n_workers = active_workers;
// Set the correct parallelism (number of queues) in the reference processor
ref_processor()->set_active_mt_degree(n_workers);
// Always set the terminator for the active number of workers
// because only those workers go through the termination protocol.
ParallelTaskTerminator _term(n_workers, task_queues());
ParScanThreadStateSet thread_state_set(workers->active_workers(),
*to(), *this, *_next_gen, *task_queues(),
_overflow_stacks, desired_plab_sz(), _term);
ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
int n_workers = workers->total_workers();
gch->set_par_threads(n_workers);
gch->rem_set()->prepare_for_younger_refs_iterate(true);
// It turns out that even when we're using 1 thread, doing the work in a
@ -914,7 +953,8 @@ void ParNewGeneration::collect(bool full,
GenCollectedHeap::StrongRootsScope srs(gch);
tsk.work(0);
}
thread_state_set.reset(promotion_failed());
thread_state_set.reset(0 /* Bad value in debug if not reset */,
promotion_failed());
// Process (weak) reference objects found during scavenge.
ReferenceProcessor* rp = ref_processor();
@ -927,6 +967,8 @@ void ParNewGeneration::collect(bool full,
EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
&scan_without_gc_barrier, &scan_with_gc_barrier);
rp->setup_policy(clear_all_soft_refs);
// Can the mt_degree be set later (at run_task() time would be best)?
rp->set_active_mt_degree(active_workers);
if (rp->processing_is_mt()) {
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
rp->process_discovered_references(&is_alive, &keep_alive,

View File

@ -240,6 +240,10 @@ public:
HeapWord* young_old_boundary() { return _young_old_boundary; }
void work(int i);
// Reset the terminator in ParScanThreadStateSet for
// "active_workers" threads.
virtual void set_for_termination(int active_workers);
};
class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {

View File

@ -223,7 +223,8 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
MutableSpace* sp,
HeapWord* space_top,
PSPromotionManager* pm,
uint stripe_number) {
uint stripe_number,
uint stripe_total) {
int ssize = 128; // Naked constant! Work unit = 64k.
int dirty_card_count = 0;
@ -231,7 +232,11 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
jbyte* start_card = byte_for(sp->bottom());
jbyte* end_card = byte_for(sp_top - 1) + 1;
oop* last_scanned = NULL; // Prevent scanning objects more than once
for (jbyte* slice = start_card; slice < end_card; slice += ssize*ParallelGCThreads) {
// The width of the stripe ssize*stripe_total must be
// consistent with the number of stripes so that the complete slice
// is covered.
size_t slice_width = ssize * stripe_total;
for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
jbyte* worker_start_card = slice + stripe_number * ssize;
if (worker_start_card >= end_card)
return; // We're done.

View File

@ -69,7 +69,8 @@ class CardTableExtension : public CardTableModRefBS {
MutableSpace* sp,
HeapWord* space_top,
PSPromotionManager* pm,
uint stripe_number);
uint stripe_number,
uint stripe_total);
// Verification
static void verify_all_young_refs_imprecise();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
#include "gc_implementation/parallelScavenge/gcTaskThread.hpp"
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/mutex.hpp"
@ -52,6 +53,9 @@ const char* GCTask::Kind::to_string(kind value) {
case noop_task:
result = "noop task";
break;
case idle_task:
result = "idle task";
break;
}
return result;
};
@ -181,6 +185,7 @@ void GCTaskQueue::enqueue(GCTask* task) {
}
set_insert_end(task);
increment_length();
verify_length();
if (TraceGCTaskQueue) {
print("after:");
}
@ -192,7 +197,7 @@ void GCTaskQueue::enqueue(GCTaskQueue* list) {
tty->print_cr("[" INTPTR_FORMAT "]"
" GCTaskQueue::enqueue(list: "
INTPTR_FORMAT ")",
this);
this, list);
print("before:");
list->print("list:");
}
@ -211,14 +216,15 @@ void GCTaskQueue::enqueue(GCTaskQueue* list) {
list->remove_end()->set_older(insert_end());
insert_end()->set_newer(list->remove_end());
set_insert_end(list->insert_end());
set_length(length() + list_length);
// empty the argument list.
}
set_length(length() + list_length);
list->initialize();
if (TraceGCTaskQueue) {
print("after:");
list->print("list:");
}
verify_length();
}
// Dequeue one task.
@ -288,6 +294,7 @@ GCTask* GCTaskQueue::remove() {
decrement_length();
assert(result->newer() == NULL, "shouldn't be on queue");
assert(result->older() == NULL, "shouldn't be on queue");
verify_length();
return result;
}
@ -311,22 +318,40 @@ GCTask* GCTaskQueue::remove(GCTask* task) {
result->set_newer(NULL);
result->set_older(NULL);
decrement_length();
verify_length();
return result;
}
NOT_PRODUCT(
// Count the elements in the queue and verify the length against
// that count.
void GCTaskQueue::verify_length() const {
uint count = 0;
for (GCTask* element = insert_end();
element != NULL;
element = element->older()) {
count++;
}
assert(count == length(), "Length does not match queue");
}
void GCTaskQueue::print(const char* message) const {
tty->print_cr("[" INTPTR_FORMAT "] GCTaskQueue:"
" insert_end: " INTPTR_FORMAT
" remove_end: " INTPTR_FORMAT
" length: %d"
" %s",
this, insert_end(), remove_end(), message);
this, insert_end(), remove_end(), length(), message);
uint count = 0;
for (GCTask* element = insert_end();
element != NULL;
element = element->older()) {
element->print(" ");
count++;
tty->cr();
}
tty->print("Total tasks: %d", count);
}
)
@ -351,12 +376,16 @@ SynchronizedGCTaskQueue::~SynchronizedGCTaskQueue() {
//
GCTaskManager::GCTaskManager(uint workers) :
_workers(workers),
_active_workers(0),
_idle_workers(0),
_ndc(NULL) {
initialize();
}
GCTaskManager::GCTaskManager(uint workers, NotifyDoneClosure* ndc) :
_workers(workers),
_active_workers(0),
_idle_workers(0),
_ndc(ndc) {
initialize();
}
@ -373,6 +402,7 @@ void GCTaskManager::initialize() {
GCTaskQueue* unsynchronized_queue = GCTaskQueue::create_on_c_heap();
_queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock());
_noop_task = NoopGCTask::create_on_c_heap();
_idle_inactive_task = WaitForBarrierGCTask::create_on_c_heap();
_resource_flag = NEW_C_HEAP_ARRAY(bool, workers());
{
// Set up worker threads.
@ -418,6 +448,8 @@ GCTaskManager::~GCTaskManager() {
assert(queue()->is_empty(), "still have queued work");
NoopGCTask::destroy(_noop_task);
_noop_task = NULL;
WaitForBarrierGCTask::destroy(_idle_inactive_task);
_idle_inactive_task = NULL;
if (_thread != NULL) {
for (uint i = 0; i < workers(); i += 1) {
GCTaskThread::destroy(thread(i));
@ -442,6 +474,86 @@ GCTaskManager::~GCTaskManager() {
}
}
void GCTaskManager::set_active_gang() {
_active_workers =
AdaptiveSizePolicy::calc_active_workers(workers(),
active_workers(),
Threads::number_of_non_daemon_threads());
assert(!all_workers_active() || active_workers() == ParallelGCThreads,
err_msg("all_workers_active() is incorrect: "
"active %d ParallelGCThreads %d", active_workers(),
ParallelGCThreads));
if (TraceDynamicGCThreads) {
gclog_or_tty->print_cr("GCTaskManager::set_active_gang(): "
"all_workers_active() %d workers %d "
"active %d ParallelGCThreads %d ",
all_workers_active(), workers(), active_workers(),
ParallelGCThreads);
}
}
// Create IdleGCTasks for inactive workers.
// Creates tasks in a ResourceArea and assumes
// an appropriate ResourceMark.
void GCTaskManager::task_idle_workers() {
{
int more_inactive_workers = 0;
{
// Stop any idle tasks from exiting their IdleGCTask's
// and get the count for additional IdleGCTask's under
// the GCTaskManager's monitor so that the "more_inactive_workers"
// count is correct.
MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
_idle_inactive_task->set_should_wait(true);
// active_workers are a number being requested. idle_workers
// are the number currently idle. If all the workers are being
// requested to be active but some are already idle, reduce
// the number of active_workers to be consistent with the
// number of idle_workers. The idle_workers are stuck in
// idle tasks and will no longer be release (since a new GC
// is starting). Try later to release enough idle_workers
// to allow the desired number of active_workers.
more_inactive_workers =
workers() - active_workers() - idle_workers();
if (more_inactive_workers < 0) {
int reduced_active_workers = active_workers() + more_inactive_workers;
set_active_workers(reduced_active_workers);
more_inactive_workers = 0;
}
if (TraceDynamicGCThreads) {
gclog_or_tty->print_cr("JT: %d workers %d active %d "
"idle %d more %d",
Threads::number_of_non_daemon_threads(),
workers(),
active_workers(),
idle_workers(),
more_inactive_workers);
}
}
GCTaskQueue* q = GCTaskQueue::create();
for(uint i = 0; i < (uint) more_inactive_workers; i++) {
q->enqueue(IdleGCTask::create_on_c_heap());
increment_idle_workers();
}
assert(workers() == active_workers() + idle_workers(),
"total workers should equal active + inactive");
add_list(q);
// GCTaskQueue* q was created in a ResourceArea so a
// destroy() call is not needed.
}
}
void GCTaskManager::release_idle_workers() {
{
MutexLockerEx ml(monitor(),
Mutex::_no_safepoint_check_flag);
_idle_inactive_task->set_should_wait(false);
monitor()->notify_all();
// Release monitor
}
}
void GCTaskManager::print_task_time_stamps() {
for(uint i=0; i<ParallelGCThreads; i++) {
GCTaskThread* t = thread(i);
@ -510,6 +622,13 @@ void GCTaskManager::add_list(GCTaskQueue* list) {
// Release monitor().
}
// GC workers wait in get_task() for new work to be added
// to the GCTaskManager's queue. When new work is added,
// a notify is sent to the waiting GC workers which then
// compete to get tasks. If a GC worker wakes up and there
// is no work on the queue, it is given a noop_task to execute
// and then loops to find more work.
GCTask* GCTaskManager::get_task(uint which) {
GCTask* result = NULL;
// Grab the queue lock.
@ -558,8 +677,10 @@ GCTask* GCTaskManager::get_task(uint which) {
which, result, GCTask::Kind::to_string(result->kind()));
tty->print_cr(" %s", result->name());
}
increment_busy_workers();
increment_delivered_tasks();
if (!result->is_idle_task()) {
increment_busy_workers();
increment_delivered_tasks();
}
return result;
// Release monitor().
}
@ -622,6 +743,7 @@ uint GCTaskManager::increment_busy_workers() {
uint GCTaskManager::decrement_busy_workers() {
assert(queue()->own_lock(), "don't own the lock");
assert(_busy_workers > 0, "About to make a mistake");
_busy_workers -= 1;
return _busy_workers;
}
@ -643,11 +765,34 @@ void GCTaskManager::note_release(uint which) {
set_resource_flag(which, false);
}
// "list" contains tasks that are ready to execute. Those
// tasks are added to the GCTaskManager's queue of tasks and
// then the GC workers are notified that there is new work to
// do.
//
// Typically different types of tasks can be added to the "list".
// For example in PSScavenge OldToYoungRootsTask, SerialOldToYoungRootsTask,
// ScavengeRootsTask, and StealTask tasks are all added to the list
// and then the GC workers are notified of new work. The tasks are
// handed out in the order in which they are added to the list
// (although execution is not necessarily in that order). As long
// as any tasks are running the GCTaskManager will wait for execution
// to complete. GC workers that execute a stealing task remain in
// the stealing task until all stealing tasks have completed. The load
// balancing afforded by the stealing tasks work best if the stealing
// tasks are added last to the list.
void GCTaskManager::execute_and_wait(GCTaskQueue* list) {
WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
list->enqueue(fin);
// The barrier task will be read by one of the GC
// workers once it is added to the list of tasks.
// Be sure that is globally visible before the
// GC worker reads it (which is after the task is added
// to the list of tasks below).
OrderAccess::storestore();
add_list(list);
fin->wait_for();
fin->wait_for(true /* reset */);
// We have to release the barrier tasks!
WaitForBarrierGCTask::destroy(fin);
}
@ -691,6 +836,76 @@ void NoopGCTask::destruct() {
// Nothing else to do.
}
//
// IdleGCTask
//
IdleGCTask* IdleGCTask::create() {
IdleGCTask* result = new IdleGCTask(false);
assert(UseDynamicNumberOfGCThreads,
"Should only be used with dynamic GC thread");
return result;
}
IdleGCTask* IdleGCTask::create_on_c_heap() {
IdleGCTask* result = new(ResourceObj::C_HEAP) IdleGCTask(true);
assert(UseDynamicNumberOfGCThreads,
"Should only be used with dynamic GC thread");
return result;
}
void IdleGCTask::do_it(GCTaskManager* manager, uint which) {
WaitForBarrierGCTask* wait_for_task = manager->idle_inactive_task();
if (TraceGCTaskManager) {
tty->print_cr("[" INTPTR_FORMAT "]"
" IdleGCTask:::do_it()"
" should_wait: %s",
this, wait_for_task->should_wait() ? "true" : "false");
}
MutexLockerEx ml(manager->monitor(), Mutex::_no_safepoint_check_flag);
if (TraceDynamicGCThreads) {
gclog_or_tty->print_cr("--- idle %d", which);
}
// Increment has to be done when the idle tasks are created.
// manager->increment_idle_workers();
manager->monitor()->notify_all();
while (wait_for_task->should_wait()) {
if (TraceGCTaskManager) {
tty->print_cr("[" INTPTR_FORMAT "]"
" IdleGCTask::do_it()"
" [" INTPTR_FORMAT "] (%s)->wait()",
this, manager->monitor(), manager->monitor()->name());
}
manager->monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
}
manager->decrement_idle_workers();
if (TraceDynamicGCThreads) {
gclog_or_tty->print_cr("--- release %d", which);
}
if (TraceGCTaskManager) {
tty->print_cr("[" INTPTR_FORMAT "]"
" IdleGCTask::do_it() returns"
" should_wait: %s",
this, wait_for_task->should_wait() ? "true" : "false");
}
// Release monitor().
}
void IdleGCTask::destroy(IdleGCTask* that) {
if (that != NULL) {
that->destruct();
if (that->is_c_heap_obj()) {
FreeHeap(that);
}
}
}
void IdleGCTask::destruct() {
// This has to know it's superclass structure, just like the constructor.
this->GCTask::destruct();
// Nothing else to do.
}
//
// BarrierGCTask
//
@ -768,7 +983,8 @@ WaitForBarrierGCTask* WaitForBarrierGCTask::create() {
}
WaitForBarrierGCTask* WaitForBarrierGCTask::create_on_c_heap() {
WaitForBarrierGCTask* result = new WaitForBarrierGCTask(true);
WaitForBarrierGCTask* result =
new (ResourceObj::C_HEAP) WaitForBarrierGCTask(true);
return result;
}
@ -849,7 +1065,7 @@ void WaitForBarrierGCTask::do_it(GCTaskManager* manager, uint which) {
}
}
void WaitForBarrierGCTask::wait_for() {
void WaitForBarrierGCTask::wait_for(bool reset) {
if (TraceGCTaskManager) {
tty->print_cr("[" INTPTR_FORMAT "]"
" WaitForBarrierGCTask::wait_for()"
@ -869,7 +1085,9 @@ void WaitForBarrierGCTask::wait_for() {
monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
}
// Reset the flag in case someone reuses this task.
set_should_wait(true);
if (reset) {
set_should_wait(true);
}
if (TraceGCTaskManager) {
tty->print_cr("[" INTPTR_FORMAT "]"
" WaitForBarrierGCTask::wait_for() returns"

View File

@ -45,6 +45,7 @@ class BarrierGCTask;
class ReleasingBarrierGCTask;
class NotifyingBarrierGCTask;
class WaitForBarrierGCTask;
class IdleGCTask;
// A free list of Monitor*'s.
class MonitorSupply;
@ -64,7 +65,8 @@ public:
unknown_task,
ordinary_task,
barrier_task,
noop_task
noop_task,
idle_task
};
static const char* to_string(kind value);
};
@ -108,6 +110,9 @@ public:
bool is_noop_task() const {
return kind()==Kind::noop_task;
}
bool is_idle_task() const {
return kind()==Kind::idle_task;
}
void print(const char* message) const PRODUCT_RETURN;
protected:
// Constructors: Only create subclasses.
@ -153,6 +158,7 @@ public:
assert(((insert_end() == NULL && remove_end() == NULL) ||
(insert_end() != NULL && remove_end() != NULL)),
"insert_end and remove_end don't match");
assert((insert_end() != NULL) || (_length == 0), "Not empty");
return insert_end() == NULL;
}
uint length() const {
@ -204,6 +210,8 @@ protected:
GCTask* remove(); // Remove from remove end.
GCTask* remove(GCTask* task); // Remove from the middle.
void print(const char* message) const PRODUCT_RETURN;
// Debug support
void verify_length() const PRODUCT_RETURN;
};
// A GCTaskQueue that can be synchronized.
@ -285,12 +293,76 @@ protected:
}
};
// Dynamic number of GC threads
//
// GC threads wait in get_task() for work (i.e., a task) to perform.
// When the number of GC threads was static, the number of tasks
// created to do a job was equal to or greater than the maximum
// number of GC threads (ParallelGCThreads). The job might be divided
// into a number of tasks greater than the number of GC threads for
// load balancing (i.e., over partitioning). The last task to be
// executed by a GC thread in a job is a work stealing task. A
// GC thread that gets a work stealing task continues to execute
// that task until the job is done. In the static number of GC theads
// case, tasks are added to a queue (FIFO). The work stealing tasks are
// the last to be added. Once the tasks are added, the GC threads grab
// a task and go. A single thread can do all the non-work stealing tasks
// and then execute a work stealing and wait for all the other GC threads
// to execute their work stealing task.
// In the dynamic number of GC threads implementation, idle-tasks are
// created to occupy the non-participating or "inactive" threads. An
// idle-task makes the GC thread wait on a barrier that is part of the
// GCTaskManager. The GC threads that have been "idled" in a IdleGCTask
// are released once all the active GC threads have finished their work
// stealing tasks. The GCTaskManager does not wait for all the "idled"
// GC threads to resume execution. When those GC threads do resume
// execution in the course of the thread scheduling, they call get_tasks()
// as all the other GC threads do. Because all the "idled" threads are
// not required to execute in order to finish a job, it is possible for
// a GC thread to still be "idled" when the next job is started. Such
// a thread stays "idled" for the next job. This can result in a new
// job not having all the expected active workers. For example if on
// job requests 4 active workers out of a total of 10 workers so the
// remaining 6 are "idled", if the next job requests 6 active workers
// but all 6 of the "idled" workers are still idle, then the next job
// will only get 4 active workers.
// The implementation for the parallel old compaction phase has an
// added complication. In the static case parold partitions the chunks
// ready to be filled into stacks, one for each GC thread. A GC thread
// executing a draining task (drains the stack of ready chunks)
// claims a stack according to it's id (the unique ordinal value assigned
// to each GC thread). In the dynamic case not all GC threads will
// actively participate so stacks with ready to fill chunks can only be
// given to the active threads. An initial implementation chose stacks
// number 1-n to get the ready chunks and required that GC threads
// 1-n be the active workers. This was undesirable because it required
// certain threads to participate. In the final implementation a
// list of stacks equal in number to the active workers are filled
// with ready chunks. GC threads that participate get a stack from
// the task (DrainStacksCompactionTask), empty the stack, and then add it to a
// recycling list at the end of the task. If the same GC thread gets
// a second task, it gets a second stack to drain and returns it. The
// stacks are added to a recycling list so that later stealing tasks
// for this tasks can get a stack from the recycling list. Stealing tasks
// use the stacks in its work in a way similar to the draining tasks.
// A thread is not guaranteed to get anything but a stealing task and
// a thread that only gets a stealing task has to get a stack. A failed
// implementation tried to have the GC threads keep the stack they used
// during a draining task for later use in the stealing task but that didn't
// work because as noted a thread is not guaranteed to get a draining task.
//
// For PSScavenge and ParCompactionManager the GC threads are
// held in the GCTaskThread** _thread array in GCTaskManager.
class GCTaskManager : public CHeapObj {
friend class ParCompactionManager;
friend class PSParallelCompact;
friend class PSScavenge;
friend class PSRefProcTaskExecutor;
friend class RefProcTaskExecutor;
friend class GCTaskThread;
friend class IdleGCTask;
private:
// Instance state.
NotifyDoneClosure* _ndc; // Notify on completion.
@ -298,6 +370,7 @@ private:
Monitor* _monitor; // Notification of changes.
SynchronizedGCTaskQueue* _queue; // Queue of tasks.
GCTaskThread** _thread; // Array of worker threads.
uint _active_workers; // Number of active workers.
uint _busy_workers; // Number of busy workers.
uint _blocking_worker; // The worker that's blocking.
bool* _resource_flag; // Array of flag per threads.
@ -307,6 +380,8 @@ private:
uint _emptied_queue; // Times we emptied the queue.
NoopGCTask* _noop_task; // The NoopGCTask instance.
uint _noop_tasks; // Count of noop tasks.
WaitForBarrierGCTask* _idle_inactive_task;// Task for inactive workers
volatile uint _idle_workers; // Number of idled workers
public:
// Factory create and destroy methods.
static GCTaskManager* create(uint workers) {
@ -324,6 +399,9 @@ public:
uint busy_workers() const {
return _busy_workers;
}
volatile uint idle_workers() const {
return _idle_workers;
}
// Pun between Monitor* and Mutex*
Monitor* monitor() const {
return _monitor;
@ -331,6 +409,9 @@ public:
Monitor * lock() const {
return _monitor;
}
WaitForBarrierGCTask* idle_inactive_task() {
return _idle_inactive_task;
}
// Methods.
// Add the argument task to be run.
void add_task(GCTask* task);
@ -350,6 +431,10 @@ public:
bool should_release_resources(uint which); // Predicate.
// Note the release of resources by the argument worker.
void note_release(uint which);
// Create IdleGCTasks for inactive workers and start workers
void task_idle_workers();
// Release the workers in IdleGCTasks
void release_idle_workers();
// Constants.
// A sentinel worker identifier.
static uint sentinel_worker() {
@ -375,6 +460,15 @@ protected:
uint workers() const {
return _workers;
}
void set_active_workers(uint v) {
assert(v <= _workers, "Trying to set more workers active than there are");
_active_workers = MIN2(v, _workers);
assert(v != 0, "Trying to set active workers to 0");
_active_workers = MAX2(1U, _active_workers);
}
// Sets the number of threads that will be used in a collection
void set_active_gang();
NotifyDoneClosure* notify_done_closure() const {
return _ndc;
}
@ -457,8 +551,21 @@ protected:
void reset_noop_tasks() {
_noop_tasks = 0;
}
void increment_idle_workers() {
_idle_workers++;
}
void decrement_idle_workers() {
_idle_workers--;
}
// Other methods.
void initialize();
public:
// Return true if all workers are currently active.
bool all_workers_active() { return workers() == active_workers(); }
uint active_workers() const {
return _active_workers;
}
};
//
@ -475,6 +582,8 @@ public:
static NoopGCTask* create();
static NoopGCTask* create_on_c_heap();
static void destroy(NoopGCTask* that);
virtual char* name() { return (char *)"noop task"; }
// Methods from GCTask.
void do_it(GCTaskManager* manager, uint which) {
// Nothing to do.
@ -518,6 +627,8 @@ protected:
}
// Destructor-like method.
void destruct();
virtual char* name() { return (char *)"barrier task"; }
// Methods.
// Wait for this to be the only task running.
void do_it_internal(GCTaskManager* manager, uint which);
@ -586,11 +697,13 @@ protected:
// the BarrierGCTask is done.
// This may cover many of the uses of NotifyingBarrierGCTasks.
class WaitForBarrierGCTask : public BarrierGCTask {
friend class GCTaskManager;
friend class IdleGCTask;
private:
// Instance state.
Monitor* _monitor; // Guard and notify changes.
bool _should_wait; // true=>wait, false=>proceed.
const bool _is_c_heap_obj; // Was allocated on the heap.
Monitor* _monitor; // Guard and notify changes.
volatile bool _should_wait; // true=>wait, false=>proceed.
const bool _is_c_heap_obj; // Was allocated on the heap.
public:
virtual char* name() { return (char *) "waitfor-barrier-task"; }
@ -600,7 +713,10 @@ public:
static void destroy(WaitForBarrierGCTask* that);
// Methods.
void do_it(GCTaskManager* manager, uint which);
void wait_for();
void wait_for(bool reset);
void set_should_wait(bool value) {
_should_wait = value;
}
protected:
// Constructor. Clients use factory, but there might be subclasses.
WaitForBarrierGCTask(bool on_c_heap);
@ -613,14 +729,38 @@ protected:
bool should_wait() const {
return _should_wait;
}
void set_should_wait(bool value) {
_should_wait = value;
}
bool is_c_heap_obj() {
return _is_c_heap_obj;
}
};
// Task that is used to idle a GC task when fewer than
// the maximum workers are wanted.
class IdleGCTask : public GCTask {
const bool _is_c_heap_obj; // Was allocated on the heap.
public:
bool is_c_heap_obj() {
return _is_c_heap_obj;
}
// Factory create and destroy methods.
static IdleGCTask* create();
static IdleGCTask* create_on_c_heap();
static void destroy(IdleGCTask* that);
virtual char* name() { return (char *)"idle task"; }
// Methods from GCTask.
virtual void do_it(GCTaskManager* manager, uint which);
protected:
// Constructor.
IdleGCTask(bool on_c_heap) :
GCTask(GCTask::Kind::idle_task),
_is_c_heap_obj(on_c_heap) {
// Nothing to do.
}
// Destructor-like method.
void destruct();
};
class MonitorSupply : public AllStatic {
private:
// State.

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -93,6 +93,11 @@ void GCTaskThread::print_on(outputStream* st) const {
st->cr();
}
// GC workers get tasks from the GCTaskManager and execute
// them in this method. If there are no tasks to execute,
// the GC workers wait in the GCTaskManager's get_task()
// for tasks to be enqueued for execution.
void GCTaskThread::run() {
// Set up the thread for stack overflow support
this->record_stack_base_and_size();
@ -124,7 +129,8 @@ void GCTaskThread::run() {
for (; /* break */; ) {
// This will block until there is a task to be gotten.
GCTask* task = manager()->get_task(which());
// Record if this is an idle task for later use.
bool is_idle_task = task->is_idle_task();
// In case the update is costly
if (PrintGCTaskTimeStamps) {
timer.update();
@ -133,19 +139,33 @@ void GCTaskThread::run() {
jlong entry_time = timer.ticks();
char* name = task->name();
// If this is the barrier task, it can be destroyed
// by the GC task manager once the do_it() executes.
task->do_it(manager(), which());
manager()->note_completion(which());
if (PrintGCTaskTimeStamps) {
assert(_time_stamps != NULL, "Sanity (PrintGCTaskTimeStamps set late?)");
// Use the saved value of is_idle_task because references
// using "task" are not reliable for the barrier task.
if (!is_idle_task) {
manager()->note_completion(which());
timer.update();
if (PrintGCTaskTimeStamps) {
assert(_time_stamps != NULL,
"Sanity (PrintGCTaskTimeStamps set late?)");
GCTaskTimeStamp* time_stamp = time_stamp_at(_time_stamp_index++);
timer.update();
time_stamp->set_name(name);
time_stamp->set_entry_time(entry_time);
time_stamp->set_exit_time(timer.ticks());
GCTaskTimeStamp* time_stamp = time_stamp_at(_time_stamp_index++);
time_stamp->set_name(name);
time_stamp->set_entry_time(entry_time);
time_stamp->set_exit_time(timer.ticks());
}
} else {
// idle tasks complete outside the normal accounting
// so that a task can complete without waiting for idle tasks.
// They have to be terminated separately.
IdleGCTask::destroy((IdleGCTask*)task);
set_is_working(true);
}
// Check if we should release our inner resources.

View File

@ -35,6 +35,7 @@ class GCTaskTimeStamp;
class GCTaskManager;
class GCTaskThread : public WorkerThread {
friend class GCTaskManager;
private:
// Instance state.
GCTaskManager* _manager; // Manager for worker.
@ -45,6 +46,8 @@ private:
GCTaskTimeStamp* time_stamp_at(uint index);
bool _is_working; // True if participating in GC tasks
public:
// Factory create and destroy methods.
static GCTaskThread* create(GCTaskManager* manager,
@ -84,6 +87,7 @@ protected:
uint processor_id() const {
return _processor_id;
}
void set_is_working(bool v) { _is_working = v; }
};
class GCTaskTimeStamp : public CHeapObj

View File

@ -152,15 +152,16 @@ void RefProcTaskExecutor::execute(ProcessTask& task)
{
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
uint active_gc_threads = heap->gc_task_manager()->active_workers();
RegionTaskQueueSet* qset = ParCompactionManager::region_array();
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
ParallelTaskTerminator terminator(active_gc_threads, qset);
GCTaskQueue* q = GCTaskQueue::create();
for(uint i=0; i<parallel_gc_threads; i++) {
q->enqueue(new RefProcTaskProxy(task, i));
}
if (task.marks_oops_alive()) {
if (parallel_gc_threads>1) {
for (uint j=0; j<parallel_gc_threads; j++) {
for (uint j=0; j<active_gc_threads; j++) {
q->enqueue(new StealMarkingTask(&terminator));
}
}
@ -216,7 +217,6 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
// StealRegionCompactionTask
//
StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
_terminator(t) {}
@ -229,6 +229,32 @@ void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
// If not all threads are active, get a draining stack
// from the list. Else, just use this threads draining stack.
uint which_stack_index;
bool use_all_workers = manager->all_workers_active();
if (use_all_workers) {
which_stack_index = which;
assert(manager->active_workers() == ParallelGCThreads,
err_msg("all_workers_active has been incorrectly set: "
" active %d ParallelGCThreads %d", manager->active_workers(),
ParallelGCThreads));
} else {
which_stack_index = ParCompactionManager::pop_recycled_stack_index();
}
cm->set_region_stack_index(which_stack_index);
cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
if (TraceDynamicGCThreads) {
gclog_or_tty->print_cr("StealRegionCompactionTask::do_it "
"region_stack_index %d region_stack = 0x%x "
" empty (%d) use all workers %d",
which_stack_index, ParCompactionManager::region_list(which_stack_index),
cm->region_stack()->is_empty(),
use_all_workers);
}
// Has to drain stacks first because there may be regions on
// preloaded onto the stack and this thread may never have
// done a draining task. Are the draining tasks needed?
@ -285,6 +311,50 @@ void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
uint which_stack_index;
bool use_all_workers = manager->all_workers_active();
if (use_all_workers) {
which_stack_index = which;
assert(manager->active_workers() == ParallelGCThreads,
err_msg("all_workers_active has been incorrectly set: "
" active %d ParallelGCThreads %d", manager->active_workers(),
ParallelGCThreads));
} else {
which_stack_index = stack_index();
}
cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
if (TraceDynamicGCThreads) {
gclog_or_tty->print_cr("DrainStacksCompactionTask::do_it which = %d "
"which_stack_index = %d/empty(%d) "
"use all workers %d",
which, which_stack_index,
cm->region_stack()->is_empty(),
use_all_workers);
}
cm->set_region_stack_index(which_stack_index);
// Process any regions already in the compaction managers stacks.
cm->drain_region_stacks();
assert(cm->region_stack()->is_empty(), "Not empty");
if (!use_all_workers) {
// Always give up the region stack.
assert(cm->region_stack() ==
ParCompactionManager::region_list(cm->region_stack_index()),
"region_stack and region_stack_index are inconsistent");
ParCompactionManager::push_recycled_stack_index(cm->region_stack_index());
if (TraceDynamicGCThreads) {
void* old_region_stack = (void*) cm->region_stack();
int old_region_stack_index = cm->region_stack_index();
gclog_or_tty->print_cr("Pushing region stack 0x%x/%d",
old_region_stack, old_region_stack_index);
}
cm->set_region_stack(NULL);
cm->set_region_stack_index((uint)max_uintx);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,6 +39,9 @@
PSOldGen* ParCompactionManager::_old_gen = NULL;
ParCompactionManager** ParCompactionManager::_manager_array = NULL;
RegionTaskQueue** ParCompactionManager::_region_list = NULL;
OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
ParCompactionManager::ObjArrayTaskQueueSet*
ParCompactionManager::_objarray_queues = NULL;
@ -46,8 +49,14 @@ ObjectStartArray* ParCompactionManager::_start_array = NULL;
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
uint* ParCompactionManager::_recycled_stack_index = NULL;
int ParCompactionManager::_recycled_top = -1;
int ParCompactionManager::_recycled_bottom = -1;
ParCompactionManager::ParCompactionManager() :
_action(CopyAndUpdate) {
_action(CopyAndUpdate),
_region_stack(NULL),
_region_stack_index((uint)max_uintx) {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
@ -57,7 +66,10 @@ ParCompactionManager::ParCompactionManager() :
marking_stack()->initialize();
_objarray_stack.initialize();
region_stack()->initialize();
}
ParCompactionManager::~ParCompactionManager() {
delete _recycled_stack_index;
}
void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
@ -72,6 +84,19 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
guarantee(_manager_array != NULL, "Could not allocate manager_array");
_region_list = NEW_C_HEAP_ARRAY(RegionTaskQueue*,
parallel_gc_threads+1);
guarantee(_region_list != NULL, "Could not initialize promotion manager");
_recycled_stack_index = NEW_C_HEAP_ARRAY(uint, parallel_gc_threads);
// parallel_gc-threads + 1 to be consistent with the number of
// compaction managers.
for(uint i=0; i<parallel_gc_threads + 1; i++) {
_region_list[i] = new RegionTaskQueue();
region_list(i)->initialize();
}
_stack_array = new OopTaskQueueSet(parallel_gc_threads);
guarantee(_stack_array != NULL, "Could not allocate stack_array");
_objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
@ -85,7 +110,7 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
stack_array()->register_queue(i, _manager_array[i]->marking_stack());
_objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
region_array()->register_queue(i, _manager_array[i]->region_stack());
region_array()->register_queue(i, region_list(i));
}
// The VMThread gets its own ParCompactionManager, which is not available
@ -97,6 +122,29 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
"Not initialized?");
}
int ParCompactionManager::pop_recycled_stack_index() {
assert(_recycled_bottom <= _recycled_top, "list is empty");
// Get the next available index
if (_recycled_bottom < _recycled_top) {
uint cur, next, last;
do {
cur = _recycled_bottom;
next = cur + 1;
last = Atomic::cmpxchg(next, &_recycled_bottom, cur);
} while (cur != last);
return _recycled_stack_index[next];
} else {
return -1;
}
}
void ParCompactionManager::push_recycled_stack_index(uint v) {
// Get the next available index
int cur = Atomic::add(1, &_recycled_top);
_recycled_stack_index[cur] = v;
assert(_recycled_bottom <= _recycled_top, "list top and bottom are wrong");
}
bool ParCompactionManager::should_update() {
assert(action() != NotValid, "Action is not set");
return (action() == ParCompactionManager::Update) ||
@ -111,14 +159,13 @@ bool ParCompactionManager::should_copy() {
(action() == ParCompactionManager::UpdateAndCopy);
}
bool ParCompactionManager::should_verify_only() {
assert(action() != NotValid, "Action is not set");
return action() == ParCompactionManager::VerifyUpdate;
void ParCompactionManager::region_list_push(uint list_index,
size_t region_index) {
region_list(list_index)->push(region_index);
}
bool ParCompactionManager::should_reset_only() {
assert(action() != NotValid, "Action is not set");
return action() == ParCompactionManager::ResetObjects;
void ParCompactionManager::verify_region_list_empty(uint list_index) {
assert(region_list(list_index)->is_empty(), "Not empty");
}
ParCompactionManager*

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,6 +48,7 @@ class ParCompactionManager : public CHeapObj {
friend class StealRegionCompactionTask;
friend class UpdateAndFillClosure;
friend class RefProcTaskExecutor;
friend class IdleGCTask;
public:
@ -58,8 +59,6 @@ class ParCompactionManager : public CHeapObj {
Copy,
UpdateAndCopy,
CopyAndUpdate,
VerifyUpdate,
ResetObjects,
NotValid
};
// ------------------------ End don't putback if not needed
@ -85,7 +84,31 @@ private:
// Is there a way to reuse the _marking_stack for the
// saving empty regions? For now just create a different
// type of TaskQueue.
RegionTaskQueue _region_stack;
RegionTaskQueue* _region_stack;
static RegionTaskQueue** _region_list;
// Index in _region_list for current _region_stack.
uint _region_stack_index;
// Indexes of recycled region stacks/overflow stacks
// Stacks of regions to be compacted are embedded in the tasks doing
// the compaction. A thread that executes the task extracts the
// region stack and drains it. These threads keep these region
// stacks for use during compaction task stealing. If a thread
// gets a second draining task, it pushed its current region stack
// index into the array _recycled_stack_index and gets a new
// region stack from the task. A thread that is executing a
// compaction stealing task without ever having executing a
// draining task, will get a region stack from _recycled_stack_index.
//
// Array of indexes into the array of region stacks.
static uint* _recycled_stack_index;
// The index into _recycled_stack_index of the last region stack index
// pushed. If -1, there are no entries into _recycled_stack_index.
static int _recycled_top;
// The index into _recycled_stack_index of the last region stack index
// popped. If -1, there has not been any entry popped.
static int _recycled_bottom;
Stack<Klass*> _revisit_klass_stack;
Stack<DataLayout*> _revisit_mdo_stack;
@ -104,7 +127,6 @@ private:
// Array of tasks. Needed by the ParallelTaskTerminator.
static RegionTaskQueueSet* region_array() { return _region_array; }
OverflowTaskQueue<oop>* marking_stack() { return &_marking_stack; }
RegionTaskQueue* region_stack() { return &_region_stack; }
// Pushes onto the marking stack. If the marking stack is full,
// pushes onto the overflow stack.
@ -116,10 +138,33 @@ private:
Action action() { return _action; }
void set_action(Action v) { _action = v; }
RegionTaskQueue* region_stack() { return _region_stack; }
void set_region_stack(RegionTaskQueue* v) { _region_stack = v; }
inline static ParCompactionManager* manager_array(int index);
ParCompactionManager();
inline static RegionTaskQueue* region_list(int index) {
return _region_list[index];
}
uint region_stack_index() { return _region_stack_index; }
void set_region_stack_index(uint v) { _region_stack_index = v; }
// Pop and push unique reusable stack index
static int pop_recycled_stack_index();
static void push_recycled_stack_index(uint v);
static void reset_recycled_stack_index() {
_recycled_bottom = _recycled_top = -1;
}
ParCompactionManager();
~ParCompactionManager();
// Pushes onto the region stack at the given index. If the
// region stack is full,
// pushes onto the region overflow stack.
static void region_list_push(uint stack_index, size_t region_index);
static void verify_region_list_empty(uint stack_index);
ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
// Take actions in preparation for a compaction.
@ -129,8 +174,6 @@ private:
bool should_update();
bool should_copy();
bool should_verify_only();
bool should_reset_only();
Stack<Klass*>* revisit_klass_stack() { return &_revisit_klass_stack; }
Stack<DataLayout*>* revisit_mdo_stack() { return &_revisit_mdo_stack; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,7 +96,8 @@ void PSMarkSweepDecorator::precompact() {
* by the MarkSweepAlwaysCompactCount parameter. This is a significant
* performance improvement!
*/
bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
bool skip_dead = (MarkSweepAlwaysCompactCount < 1)
|| ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
size_t allowed_deadspace = 0;
if (skip_dead) {

View File

@ -2045,6 +2045,11 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
ResourceMark rm;
HandleMark hm;
// Set the number of GC threads to be used in this collection
gc_task_manager()->set_active_gang();
gc_task_manager()->task_idle_workers();
heap->set_par_threads(gc_task_manager()->active_workers());
const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
// This is useful for debugging but don't change the output the
@ -2197,6 +2202,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
// Track memory usage and detect low memory
MemoryService::track_memory_usage();
heap->update_counters();
gc_task_manager()->release_idle_workers();
}
#ifdef ASSERT
@ -2204,7 +2210,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
ParCompactionManager* const cm =
ParCompactionManager::manager_array(int(i));
assert(cm->marking_stack()->is_empty(), "should be empty");
assert(cm->region_stack()->is_empty(), "should be empty");
assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
assert(cm->revisit_klass_stack()->is_empty(), "should be empty");
}
#endif // ASSERT
@ -2351,8 +2357,9 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
ParallelScavengeHeap* heap = gc_heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
uint active_gc_threads = heap->gc_task_manager()->active_workers();
TaskQueueSetSuper* qset = ParCompactionManager::region_array();
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
ParallelTaskTerminator terminator(active_gc_threads, qset);
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
@ -2374,21 +2381,13 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
if (parallel_gc_threads > 1) {
for (uint j = 0; j < parallel_gc_threads; j++) {
if (active_gc_threads > 1) {
for (uint j = 0; j < active_gc_threads; j++) {
q->enqueue(new StealMarkingTask(&terminator));
}
}
WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
q->enqueue(fin);
gc_task_manager()->add_list(q);
fin->wait_for();
// We have to release the barrier tasks!
WaitForBarrierGCTask::destroy(fin);
gc_task_manager()->execute_and_wait(q);
}
// Process reference objects found during marking
@ -2483,10 +2482,22 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
{
TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
for (unsigned int j = 0; j < task_count; j++) {
// Find the threads that are active
unsigned int which = 0;
const uint task_count = MAX2(parallel_gc_threads, 1U);
for (uint j = 0; j < task_count; j++) {
q->enqueue(new DrainStacksCompactionTask(j));
ParCompactionManager::verify_region_list_empty(j);
// Set the region stacks variables to "no" region stack values
// so that they will be recognized and needing a region stack
// in the stealing tasks if they do not get one by executing
// a draining stack.
ParCompactionManager* cm = ParCompactionManager::manager_array(j);
cm->set_region_stack(NULL);
cm->set_region_stack_index((uint)max_uintx);
}
ParCompactionManager::reset_recycled_stack_index();
// Find all regions that are available (can be filled immediately) and
// distribute them to the thread stacks. The iteration is done in reverse
@ -2495,8 +2506,10 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
const ParallelCompactData& sd = PSParallelCompact::summary_data();
size_t fillable_regions = 0; // A count for diagnostic purposes.
unsigned int which = 0; // The worker thread number.
// A region index which corresponds to the tasks created above.
// "which" must be 0 <= which < task_count
which = 0;
for (unsigned int id = to_space_id; id > perm_space_id; --id) {
SpaceInfo* const space_info = _space_info + id;
MutableSpace* const space = space_info->space();
@ -2509,8 +2522,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
if (sd.region(cur)->claim_unsafe()) {
ParCompactionManager* cm = ParCompactionManager::manager_array(which);
cm->push_region(cur);
ParCompactionManager::region_list_push(which, cur);
if (TraceParallelOldGCCompactionPhase && Verbose) {
const size_t count_mod_8 = fillable_regions & 7;
@ -2521,8 +2533,10 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
NOT_PRODUCT(++fillable_regions;)
// Assign regions to threads in round-robin fashion.
// Assign regions to tasks in round-robin fashion.
if (++which == task_count) {
assert(which <= parallel_gc_threads,
"Inconsistent number of workers");
which = 0;
}
}
@ -2642,26 +2656,19 @@ void PSParallelCompact::compact() {
PSOldGen* old_gen = heap->old_gen();
old_gen->start_array()->reset();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
uint active_gc_threads = heap->gc_task_manager()->active_workers();
TaskQueueSetSuper* qset = ParCompactionManager::region_array();
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
ParallelTaskTerminator terminator(active_gc_threads, qset);
GCTaskQueue* q = GCTaskQueue::create();
enqueue_region_draining_tasks(q, parallel_gc_threads);
enqueue_dense_prefix_tasks(q, parallel_gc_threads);
enqueue_region_stealing_tasks(q, &terminator, parallel_gc_threads);
enqueue_region_draining_tasks(q, active_gc_threads);
enqueue_dense_prefix_tasks(q, active_gc_threads);
enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
{
TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
q->enqueue(fin);
gc_task_manager()->add_list(q);
fin->wait_for();
// We have to release the barrier tasks!
WaitForBarrierGCTask::destroy(fin);
gc_task_manager()->execute_and_wait(q);
#ifdef ASSERT
// Verify that all regions have been processed before the deferred updates.
@ -2729,6 +2736,9 @@ void
PSParallelCompact::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point.
// Update and follow all subklass, sibling and implementor links.
// Check all the stacks here even if not all the workers are active.
// There is no accounting which indicates which stacks might have
// contents to be followed.
if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d",
SystemDictionary::number_of_classes());
@ -3360,20 +3370,7 @@ PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
HeapWord* beg_addr = sp->bottom();
HeapWord* end_addr = sp->top();
#ifdef ASSERT
assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
if (cm->should_verify_only()) {
VerifyUpdateClosure verify_update(cm, sp);
bitmap->iterate(&verify_update, beg_addr, end_addr);
return;
}
if (cm->should_reset_only()) {
ResetObjectsClosure reset_objects(cm);
bitmap->iterate(&reset_objects, beg_addr, end_addr);
return;
}
#endif
const size_t beg_region = sd.addr_to_region_idx(beg_addr);
const size_t dp_region = sd.addr_to_region_idx(dp_addr);
@ -3492,35 +3489,6 @@ UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
return ParMarkBitMap::incomplete;
}
// Verify the new location using the forwarding pointer
// from MarkSweep::mark_sweep_phase2(). Set the mark_word
// to the initial value.
ParMarkBitMapClosure::IterationStatus
PSParallelCompact::VerifyUpdateClosure::do_addr(HeapWord* addr, size_t words) {
// The second arg (words) is not used.
oop obj = (oop) addr;
HeapWord* forwarding_ptr = (HeapWord*) obj->mark()->decode_pointer();
HeapWord* new_pointer = summary_data().calc_new_pointer(obj);
if (forwarding_ptr == NULL) {
// The object is dead or not moving.
assert(bitmap()->is_unmarked(obj) || (new_pointer == (HeapWord*) obj),
"Object liveness is wrong.");
return ParMarkBitMap::incomplete;
}
assert(HeapMaximumCompactionInterval > 1 || MarkSweepAlwaysCompactCount > 1 ||
forwarding_ptr == new_pointer, "new location is incorrect");
return ParMarkBitMap::incomplete;
}
// Reset objects modified for debug checking.
ParMarkBitMapClosure::IterationStatus
PSParallelCompact::ResetObjectsClosure::do_addr(HeapWord* addr, size_t words) {
// The second arg (words) is not used.
oop obj = (oop) addr;
obj->init_mark();
return ParMarkBitMap::incomplete;
}
// Prepare for compaction. This method is executed once
// (i.e., by a single thread) before compaction.
// Save the updated location of the intArrayKlassObj for

View File

@ -832,31 +832,6 @@ class PSParallelCompact : AllStatic {
virtual void do_code_blob(CodeBlob* cb) const { }
};
// Closure for verifying update of pointers. Does not
// have any side effects.
class VerifyUpdateClosure: public ParMarkBitMapClosure {
const MutableSpace* _space; // Is this ever used?
public:
VerifyUpdateClosure(ParCompactionManager* cm, const MutableSpace* sp) :
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), _space(sp)
{ }
virtual IterationStatus do_addr(HeapWord* addr, size_t words);
const MutableSpace* space() { return _space; }
};
// Closure for updating objects altered for debug checking
class ResetObjectsClosure: public ParMarkBitMapClosure {
public:
ResetObjectsClosure(ParCompactionManager* cm):
ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm)
{ }
virtual IterationStatus do_addr(HeapWord* addr, size_t words);
};
friend class KeepAliveClosure;
friend class FollowStackClosure;
friend class AdjustPointerClosure;
@ -1183,10 +1158,6 @@ class PSParallelCompact : AllStatic {
// Update the deferred objects in the space.
static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
// Mark pointer and follow contents.
template <class T>
static inline void mark_and_follow(ParCompactionManager* cm, T* p);
static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
static ParallelCompactData& summary_data() { return _summary_data; }
@ -1282,20 +1253,6 @@ inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
cm->follow_marking_stacks();
}
template <class T>
inline void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (mark_bitmap()->is_unmarked(obj)) {
if (mark_obj(obj)) {
obj->follow_contents(cm);
}
}
}
}
template <class T>
inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
T heap_oop = oopDesc::load_heap_oop(p);

View File

@ -181,28 +181,29 @@ class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
void PSRefProcTaskExecutor::execute(ProcessTask& task)
{
GCTaskQueue* q = GCTaskQueue::create();
for(uint i=0; i<ParallelGCThreads; i++) {
GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
for(uint i=0; i < manager->active_workers(); i++) {
q->enqueue(new PSRefProcTaskProxy(task, i));
}
ParallelTaskTerminator terminator(
ParallelScavengeHeap::gc_task_manager()->workers(),
ParallelTaskTerminator terminator(manager->active_workers(),
(TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
if (task.marks_oops_alive() && ParallelGCThreads > 1) {
for (uint j=0; j<ParallelGCThreads; j++) {
if (task.marks_oops_alive() && manager->active_workers() > 1) {
for (uint j = 0; j < manager->active_workers(); j++) {
q->enqueue(new StealTask(&terminator));
}
}
ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
manager->execute_and_wait(q);
}
void PSRefProcTaskExecutor::execute(EnqueueTask& task)
{
GCTaskQueue* q = GCTaskQueue::create();
for(uint i=0; i<ParallelGCThreads; i++) {
GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
for(uint i=0; i < manager->active_workers(); i++) {
q->enqueue(new PSRefEnqueueTaskProxy(task, i));
}
ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
manager->execute_and_wait(q);
}
// This method contains all heap specific policy for invoking scavenge.
@ -375,6 +376,14 @@ bool PSScavenge::invoke_no_policy() {
// Release all previously held resources
gc_task_manager()->release_all_resources();
// Set the number of GC threads to be used in this collection
gc_task_manager()->set_active_gang();
gc_task_manager()->task_idle_workers();
// Get the active number of workers here and use that value
// throughout the methods.
uint active_workers = gc_task_manager()->active_workers();
heap->set_par_threads(active_workers);
PSPromotionManager::pre_scavenge();
// We'll use the promotion manager again later.
@ -385,8 +394,9 @@ bool PSScavenge::invoke_no_policy() {
GCTaskQueue* q = GCTaskQueue::create();
for(uint i=0; i<ParallelGCThreads; i++) {
q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
uint stripe_total = active_workers;
for(uint i=0; i < stripe_total; i++) {
q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
}
q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));
@ -403,10 +413,10 @@ bool PSScavenge::invoke_no_policy() {
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
ParallelTaskTerminator terminator(
gc_task_manager()->workers(),
active_workers,
(TaskQueueSetSuper*) promotion_manager->stack_array_depth());
if (ParallelGCThreads>1) {
for (uint j=0; j<ParallelGCThreads; j++) {
if (active_workers > 1) {
for (uint j = 0; j < active_workers; j++) {
q->enqueue(new StealTask(&terminator));
}
}
@ -419,6 +429,7 @@ bool PSScavenge::invoke_no_policy() {
// Process reference objects discovered during scavenge
{
reference_processor()->setup_policy(false); // not always_clear
reference_processor()->set_active_mt_degree(active_workers);
PSKeepAliveClosure keep_alive(promotion_manager);
PSEvacuateFollowersClosure evac_followers(promotion_manager);
if (reference_processor()->processing_is_mt()) {
@ -622,6 +633,8 @@ bool PSScavenge::invoke_no_policy() {
// Track memory usage and detect low memory
MemoryService::track_memory_usage();
heap->update_counters();
gc_task_manager()->release_idle_workers();
}
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
@ -804,6 +817,7 @@ void PSScavenge::initialize() {
// Initialize ref handling object for scavenging.
MemRegion mr = young_gen->reserved();
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing

View File

@ -202,7 +202,8 @@ void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
_gen->object_space(),
_gen_top,
pm,
_stripe_number);
_stripe_number,
_stripe_total);
// Do the real work
pm->drain_stacks(false);

View File

@ -135,16 +135,63 @@ class SerialOldToYoungRootsTask : public GCTask {
// OldToYoungRootsTask
//
// This task is used to scan old to young roots in parallel
//
// A GC thread executing this tasks divides the generation (old gen)
// into slices and takes a stripe in the slice as its part of the
// work.
//
// +===============+ slice 0
// | stripe 0 |
// +---------------+
// | stripe 1 |
// +---------------+
// | stripe 2 |
// +---------------+
// | stripe 3 |
// +===============+ slice 1
// | stripe 0 |
// +---------------+
// | stripe 1 |
// +---------------+
// | stripe 2 |
// +---------------+
// | stripe 3 |
// +===============+ slice 2
// ...
//
// A task is created for each stripe. In this case there are 4 tasks
// created. A GC thread first works on its stripe within slice 0
// and then moves to its stripe in the next slice until all stripes
// exceed the top of the generation. Note that having fewer GC threads
// than stripes works because all the tasks are executed so all stripes
// will be covered. In this example if 4 tasks have been created to cover
// all the stripes and there are only 3 threads, one of the threads will
// get the tasks with the 4th stripe. However, there is a dependence in
// CardTableExtension::scavenge_contents_parallel() on the number
// of tasks created. In scavenge_contents_parallel the distance
// to the next stripe is calculated based on the number of tasks.
// If the stripe width is ssize, a task's next stripe is at
// ssize * number_of_tasks (= slice_stride). In this case after
// finishing stripe 0 in slice 0, the thread finds the stripe 0 in slice1
// by adding slice_stride to the start of stripe 0 in slice 0 to get
// to the start of stride 0 in slice 1.
class OldToYoungRootsTask : public GCTask {
private:
PSOldGen* _gen;
HeapWord* _gen_top;
uint _stripe_number;
uint _stripe_total;
public:
OldToYoungRootsTask(PSOldGen *gen, HeapWord* gen_top, uint stripe_number) :
_gen(gen), _gen_top(gen_top), _stripe_number(stripe_number) { }
OldToYoungRootsTask(PSOldGen *gen,
HeapWord* gen_top,
uint stripe_number,
uint stripe_total) :
_gen(gen),
_gen_top(gen_top),
_stripe_number(stripe_number),
_stripe_total(stripe_total) { }
char* name() { return (char *)"old-to-young-roots-task"; }

View File

@ -28,8 +28,10 @@
#include "memory/collectorPolicy.hpp"
#include "runtime/timer.hpp"
#include "utilities/ostream.hpp"
#include "utilities/workgroup.hpp"
elapsedTimer AdaptiveSizePolicy::_minor_timer;
elapsedTimer AdaptiveSizePolicy::_major_timer;
bool AdaptiveSizePolicy::_debug_perturbation = false;
// The throughput goal is implemented as
// _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
@ -88,6 +90,134 @@ AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
_young_gen_policy_is_ready = false;
}
// If the number of GC threads was set on the command line,
// use it.
// Else
// Calculate the number of GC threads based on the number of Java threads.
// Calculate the number of GC threads based on the size of the heap.
// Use the larger.
int AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
const uintx min_workers,
uintx active_workers,
uintx application_workers) {
// If the user has specifically set the number of
// GC threads, use them.
// If the user has turned off using a dynamic number of GC threads
// or the users has requested a specific number, set the active
// number of workers to all the workers.
uintx new_active_workers = total_workers;
uintx prev_active_workers = active_workers;
uintx active_workers_by_JT = 0;
uintx active_workers_by_heap_size = 0;
// Always use at least min_workers but use up to
// GCThreadsPerJavaThreads * application threads.
active_workers_by_JT =
MAX2((uintx) GCWorkersPerJavaThread * application_workers,
min_workers);
// Choose a number of GC threads based on the current size
// of the heap. This may be complicated because the size of
// the heap depends on factors such as the thoughput goal.
// Still a large heap should be collected by more GC threads.
active_workers_by_heap_size =
MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
uintx max_active_workers =
MAX2(active_workers_by_JT, active_workers_by_heap_size);
// Limit the number of workers to the the number created,
// (workers()).
new_active_workers = MIN2(max_active_workers,
(uintx) total_workers);
// Increase GC workers instantly but decrease them more
// slowly.
if (new_active_workers < prev_active_workers) {
new_active_workers =
MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
}
// Check once more that the number of workers is within the limits.
assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
assert(new_active_workers >= min_workers, "Minimum workers not observed");
assert(new_active_workers <= total_workers, "Total workers not observed");
if (ForceDynamicNumberOfGCThreads) {
// Assume this is debugging and jiggle the number of GC threads.
if (new_active_workers == prev_active_workers) {
if (new_active_workers < total_workers) {
new_active_workers++;
} else if (new_active_workers > min_workers) {
new_active_workers--;
}
}
if (new_active_workers == total_workers) {
if (_debug_perturbation) {
new_active_workers = min_workers;
}
_debug_perturbation = !_debug_perturbation;
}
assert((new_active_workers <= (uintx) ParallelGCThreads) &&
(new_active_workers >= min_workers),
"Jiggled active workers too much");
}
if (TraceDynamicGCThreads) {
gclog_or_tty->print_cr("GCTaskManager::calc_default_active_workers() : "
"active_workers(): %d new_acitve_workers: %d "
"prev_active_workers: %d\n"
" active_workers_by_JT: %d active_workers_by_heap_size: %d",
active_workers, new_active_workers, prev_active_workers,
active_workers_by_JT, active_workers_by_heap_size);
}
assert(new_active_workers > 0, "Always need at least 1");
return new_active_workers;
}
int AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
uintx active_workers,
uintx application_workers) {
// If the user has specifically set the number of
// GC threads, use them.
// If the user has turned off using a dynamic number of GC threads
// or the users has requested a specific number, set the active
// number of workers to all the workers.
int new_active_workers;
if (!UseDynamicNumberOfGCThreads ||
(!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
new_active_workers = total_workers;
} else {
new_active_workers = calc_default_active_workers(total_workers,
2, /* Minimum number of workers */
active_workers,
application_workers);
}
assert(new_active_workers > 0, "Always need at least 1");
return new_active_workers;
}
int AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
uintx active_workers,
uintx application_workers) {
if (!UseDynamicNumberOfGCThreads ||
(!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
return ConcGCThreads;
} else {
int no_of_gc_threads = calc_default_active_workers(
total_workers,
1, /* Minimum number of workers */
active_workers,
application_workers);
return no_of_gc_threads;
}
}
bool AdaptiveSizePolicy::tenuring_threshold_change() const {
return decrement_tenuring_threshold_for_gc_cost() ||
increment_tenuring_threshold_for_gc_cost() ||

View File

@ -187,6 +187,8 @@ class AdaptiveSizePolicy : public CHeapObj {
julong _young_gen_change_for_minor_throughput;
julong _old_gen_change_for_major_throughput;
static const uint GCWorkersPerJavaThread = 2;
// Accessors
double gc_pause_goal_sec() const { return _gc_pause_goal_sec; }
@ -331,6 +333,8 @@ class AdaptiveSizePolicy : public CHeapObj {
// Return true if the policy suggested a change.
bool tenuring_threshold_change() const;
static bool _debug_perturbation;
public:
AdaptiveSizePolicy(size_t init_eden_size,
size_t init_promo_size,
@ -338,6 +342,31 @@ class AdaptiveSizePolicy : public CHeapObj {
double gc_pause_goal_sec,
uint gc_cost_ratio);
// Return number default GC threads to use in the next GC.
static int calc_default_active_workers(uintx total_workers,
const uintx min_workers,
uintx active_workers,
uintx application_workers);
// Return number of GC threads to use in the next GC.
// This is called sparingly so as not to change the
// number of GC workers gratuitously.
// For ParNew collections
// For PS scavenge and ParOld collections
// For G1 evacuation pauses (subject to update)
// Other collection phases inherit the number of
// GC workers from the calls above. For example,
// a CMS parallel remark uses the same number of GC
// workers as the most recent ParNew collection.
static int calc_active_workers(uintx total_workers,
uintx active_workers,
uintx application_workers);
// Return number of GC threads to use in the next concurrent GC phase.
static int calc_active_conc_workers(uintx total_workers,
uintx active_workers,
uintx application_workers);
bool is_gc_cms_adaptive_size_policy() {
return kind() == _gc_cms_adaptive_size_policy;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -196,8 +196,6 @@ class MarkSweep : AllStatic {
static void mark_object(oop obj);
// Mark pointer and follow contents. Empty marking stack afterwards.
template <class T> static inline void follow_root(T* p);
// Mark pointer and follow contents.
template <class T> static inline void mark_and_follow(T* p);
// Check mark and maybe push on marking stack
template <class T> static inline void mark_and_push(T* p);
static inline void push_objarray(oop obj, size_t index);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,18 +63,6 @@ template <class T> inline void MarkSweep::follow_root(T* p) {
follow_stack();
}
template <class T> inline void MarkSweep::mark_and_follow(T* p) {
// assert(Universe::heap()->is_in_reserved(p), "should be in object space");
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (!obj->mark()->is_marked()) {
mark_object(obj);
obj->follow_contents();
}
}
}
template <class T> inline void MarkSweep::mark_and_push(T* p) {
// assert(Universe::heap()->is_in_reserved(p), "should be in object space");
T heap_oop = oopDesc::load_heap_oop(p);

View File

@ -460,9 +460,43 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
OopsInGenClosure* cl,
CardTableRS* ct) {
if (!mr.is_empty()) {
int n_threads = SharedHeap::heap()->n_par_threads();
if (n_threads > 0) {
// Caller (process_strong_roots()) claims that all GC threads
// execute this call. With UseDynamicNumberOfGCThreads now all
// active GC threads execute this call. The number of active GC
// threads needs to be passed to par_non_clean_card_iterate_work()
// to get proper partitioning and termination.
//
// This is an example of where n_par_threads() is used instead
// of workers()->active_workers(). n_par_threads can be set to 0 to
// turn off parallelism. For example when this code is called as
// part of verification and SharedHeap::process_strong_roots() is being
// used, then n_par_threads() may have been set to 0. active_workers
// is not overloaded with the meaning that it is a switch to disable
// parallelism and so keeps the meaning of the number of
// active gc workers. If parallelism has not been shut off by
// setting n_par_threads to 0, then n_par_threads should be
// equal to active_workers. When a different mechanism for shutting
// off parallelism is used, then active_workers can be used in
// place of n_par_threads.
// This is an example of a path where n_par_threads is
// set to 0 to turn off parallism.
// [7] CardTableModRefBS::non_clean_card_iterate()
// [8] CardTableRS::younger_refs_in_space_iterate()
// [9] Generation::younger_refs_in_space_iterate()
// [10] OneContigSpaceCardGeneration::younger_refs_iterate()
// [11] CompactingPermGenGen::younger_refs_iterate()
// [12] CardTableRS::younger_refs_iterate()
// [13] SharedHeap::process_strong_roots()
// [14] G1CollectedHeap::verify()
// [15] Universe::verify()
// [16] G1CollectedHeap::do_collection_pause_at_safepoint()
//
int n_threads = SharedHeap::heap()->n_par_threads();
bool is_par = n_threads > 0;
if (is_par) {
#ifndef SERIALGC
assert(SharedHeap::heap()->n_par_threads() ==
SharedHeap::heap()->workers()->active_workers(), "Mismatch");
non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
#else // SERIALGC
fatal("Parallel gc not supported here.");
@ -489,6 +523,10 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
// change their values in any manner.
void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
MemRegionClosure* cl) {
bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
assert(!is_par ||
(SharedHeap::heap()->n_par_threads() ==
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (mri.word_size() > 0) {
@ -624,23 +662,6 @@ MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
return MemRegion(mr.end(), mr.end());
}
// Set all the dirty cards in the given region to "precleaned" state.
void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) {
jbyte *cur_entry, *limit;
for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
cur_entry <= limit;
cur_entry++) {
if (*cur_entry == dirty_card) {
*cur_entry = precleaned_card;
}
}
}
}
}
uintx CardTableModRefBS::ct_max_alignment_constraint() {
return card_size * os::vm_page_size();
}

View File

@ -435,9 +435,6 @@ public:
MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
int reset_val);
// Set all the dirty cards in the given region to precleaned state.
void preclean_dirty_cards(MemRegion mr);
// Provide read-only access to the card table array.
const jbyte* byte_for_const(const void* p) const {
return byte_for(p);

View File

@ -164,7 +164,13 @@ inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) {
ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) :
_dirty_card_closure(dirty_card_closure), _ct(ct) {
// Cannot yet substitute active_workers for n_par_threads
// in the case where parallelism is being turned off by
// setting n_par_threads to 0.
_is_par = (SharedHeap::heap()->n_par_threads() > 0);
assert(!_is_par ||
(SharedHeap::heap()->n_par_threads() ==
SharedHeap::heap()->workers()->active_workers()), "Mismatch");
}
void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {

View File

@ -58,7 +58,6 @@ SharedHeap::SharedHeap(CollectorPolicy* policy_) :
_perm_gen(NULL), _rem_set(NULL),
_strong_roots_parity(0),
_process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
_n_par_threads(0),
_workers(NULL)
{
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
@ -80,6 +79,14 @@ SharedHeap::SharedHeap(CollectorPolicy* policy_) :
}
}
int SharedHeap::n_termination() {
return _process_strong_tasks->n_threads();
}
void SharedHeap::set_n_termination(int t) {
_process_strong_tasks->set_n_threads(t);
}
bool SharedHeap::heap_lock_held_for_gc() {
Thread* t = Thread::current();
return Heap_lock->owned_by_self()
@ -144,6 +151,10 @@ void SharedHeap::process_strong_roots(bool activate_scope,
StrongRootsScope srs(this, activate_scope);
// General strong roots.
assert(_strong_roots_parity != 0, "must have called prologue code");
// _n_termination for _process_strong_tasks should be set up stream
// in a method not running in a GC worker. Otherwise the GC worker
// could be trying to change the termination condition while the task
// is executing in another GC worker.
if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
Universe::oops_do(roots);
// Consider perm-gen discovered lists to be strong.

View File

@ -49,6 +49,62 @@ class FlexibleWorkGang;
class CollectorPolicy;
class KlassHandle;
// Note on use of FlexibleWorkGang's for GC.
// There are three places where task completion is determined.
// In
// 1) ParallelTaskTerminator::offer_termination() where _n_threads
// must be set to the correct value so that count of workers that
// have offered termination will exactly match the number
// working on the task. Tasks such as those derived from GCTask
// use ParallelTaskTerminator's. Tasks that want load balancing
// by work stealing use this method to gauge completion.
// 2) SubTasksDone has a variable _n_threads that is used in
// all_tasks_completed() to determine completion. all_tasks_complete()
// counts the number of tasks that have been done and then reset
// the SubTasksDone so that it can be used again. When the number of
// tasks is set to the number of GC workers, then _n_threads must
// be set to the number of active GC workers. G1CollectedHeap,
// HRInto_G1RemSet, GenCollectedHeap and SharedHeap have SubTasksDone.
// This seems too many.
// 3) SequentialSubTasksDone has an _n_threads that is used in
// a way similar to SubTasksDone and has the same dependency on the
// number of active GC workers. CompactibleFreeListSpace and Space
// have SequentialSubTasksDone's.
// Example of using SubTasksDone and SequentialSubTasksDone
// G1CollectedHeap::g1_process_strong_roots() calls
// process_strong_roots(false, // no scoping; this is parallel code
// collecting_perm_gen, so,
// &buf_scan_non_heap_roots,
// &eager_scan_code_roots,
// &buf_scan_perm);
// which delegates to SharedHeap::process_strong_roots() and uses
// SubTasksDone* _process_strong_tasks to claim tasks.
// process_strong_roots() calls
// rem_set()->younger_refs_iterate(perm_gen(), perm_blk);
// to scan the card table and which eventually calls down into
// CardTableModRefBS::par_non_clean_card_iterate_work(). This method
// uses SequentialSubTasksDone* _pst to claim tasks.
// Both SubTasksDone and SequentialSubTasksDone call their method
// all_tasks_completed() to count the number of GC workers that have
// finished their work. That logic is "when all the workers are
// finished the tasks are finished".
//
// The pattern that appears in the code is to set _n_threads
// to a value > 1 before a task that you would like executed in parallel
// and then to set it to 0 after that task has completed. A value of
// 0 is a "special" value in set_n_threads() which translates to
// setting _n_threads to 1.
//
// Some code uses _n_terminiation to decide if work should be done in
// parallel. The notorious possibly_parallel_oops_do() in threads.cpp
// is an example of such code. Look for variable "is_par" for other
// examples.
//
// The active_workers is not reset to 0 after a parallel phase. It's
// value may be used in later phases and in one instance at least
// (the parallel remark) it has to be used (the parallel remark depends
// on the partitioning done in the previous parallel scavenge).
class SharedHeap : public CollectedHeap {
friend class VMStructs;
@ -84,11 +140,6 @@ protected:
// If we're doing parallel GC, use this gang of threads.
FlexibleWorkGang* _workers;
// Number of parallel threads currently working on GC tasks.
// O indicates use sequential code; 1 means use parallel code even with
// only one thread, for performance testing purposes.
int _n_par_threads;
// Full initialization is done in a concrete subtype's "initialize"
// function.
SharedHeap(CollectorPolicy* policy_);
@ -107,6 +158,7 @@ public:
CollectorPolicy *collector_policy() const { return _collector_policy; }
void set_barrier_set(BarrierSet* bs);
SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
// Does operations required after initialization has been done.
virtual void post_initialize();
@ -198,13 +250,6 @@ public:
FlexibleWorkGang* workers() const { return _workers; }
// Sets the number of parallel threads that will be doing tasks
// (such as process strong roots) subsequently.
virtual void set_par_threads(int t);
// Number of threads currently working on GC tasks.
int n_par_threads() { return _n_par_threads; }
// Invoke the "do_oop" method the closure "roots" on all root locations.
// If "collecting_perm_gen" is false, then roots that may only contain
// references to permGen objects are not scanned; instead, in that case,
@ -240,6 +285,13 @@ public:
virtual void gc_prologue(bool full) = 0;
virtual void gc_epilogue(bool full) = 0;
// Sets the number of parallel threads that will be doing tasks
// (such as process strong roots) subsequently.
virtual void set_par_threads(int t);
int n_termination();
void set_n_termination(int t);
//
// New methods from CollectedHeap
//

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -533,7 +533,8 @@ protected:
* by the MarkSweepAlwaysCompactCount parameter. \
*/ \
int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\
bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \
bool skip_dead = (MarkSweepAlwaysCompactCount < 1) \
||((invocations % MarkSweepAlwaysCompactCount) != 0); \
\
size_t allowed_deadspace = 0; \
if (skip_dead) { \

View File

@ -34,7 +34,7 @@ class objArrayOopDesc : public arrayOopDesc {
friend class objArrayKlass;
friend class Runtime1;
friend class psPromotionManager;
friend class CSMarkOopClosure;
friend class CSetMarkOopClosure;
friend class G1ParScanPartialArrayClosure;
template <class T> T* obj_at_addr(int index) const {

View File

@ -898,45 +898,41 @@ void PhaseCFG::dump_headers() {
void PhaseCFG::verify( ) const {
#ifdef ASSERT
// Verify sane CFG
for( uint i = 0; i < _num_blocks; i++ ) {
for (uint i = 0; i < _num_blocks; i++) {
Block *b = _blocks[i];
uint cnt = b->_nodes.size();
uint j;
for( j = 0; j < cnt; j++ ) {
for (j = 0; j < cnt; j++) {
Node *n = b->_nodes[j];
assert( _bbs[n->_idx] == b, "" );
if( j >= 1 && n->is_Mach() &&
n->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
assert( j == 1 || b->_nodes[j-1]->is_Phi(),
"CreateEx must be first instruction in block" );
if (j >= 1 && n->is_Mach() &&
n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
assert(j == 1 || b->_nodes[j-1]->is_Phi(),
"CreateEx must be first instruction in block");
}
for( uint k = 0; k < n->req(); k++ ) {
for (uint k = 0; k < n->req(); k++) {
Node *def = n->in(k);
if( def && def != n ) {
assert( _bbs[def->_idx] || def->is_Con(),
"must have block; constants for debug info ok" );
if (def && def != n) {
assert(_bbs[def->_idx] || def->is_Con(),
"must have block; constants for debug info ok");
// Verify that instructions in the block is in correct order.
// Uses must follow their definition if they are at the same block.
// Mostly done to check that MachSpillCopy nodes are placed correctly
// when CreateEx node is moved in build_ifg_physical().
if( _bbs[def->_idx] == b &&
if (_bbs[def->_idx] == b &&
!(b->head()->is_Loop() && n->is_Phi()) &&
// See (+++) comment in reg_split.cpp
!(n->jvms() != NULL && n->jvms()->is_monitor_use(k)) ) {
!(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
bool is_loop = false;
if (n->is_Phi()) {
for( uint l = 1; l < def->req(); l++ ) {
for (uint l = 1; l < def->req(); l++) {
if (n == def->in(l)) {
is_loop = true;
break; // Some kind of loop
}
}
}
assert( is_loop || b->find_node(def) < j, "uses must follow definitions" );
}
if( def->is_SafePointScalarObject() ) {
assert(_bbs[def->_idx] == b, "SafePointScalarObject Node should be at the same block as its SafePoint node");
assert(_bbs[def->_idx] == _bbs[def->in(0)->_idx], "SafePointScalarObject Node should be at the same block as its control edge");
assert(is_loop || b->find_node(def) < j, "uses must follow definitions");
}
}
}
@ -946,12 +942,11 @@ void PhaseCFG::verify( ) const {
Node *bp = (Node*)b->_nodes[b->_nodes.size()-1]->is_block_proj();
assert( bp, "last instruction must be a block proj" );
assert( bp == b->_nodes[j], "wrong number of successors for this block" );
if( bp->is_Catch() ) {
while( b->_nodes[--j]->is_MachProj() ) ;
assert( b->_nodes[j]->is_MachCall(), "CatchProj must follow call" );
}
else if( bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If ) {
assert( b->_num_succs == 2, "Conditional branch must have two targets");
if (bp->is_Catch()) {
while (b->_nodes[--j]->is_MachProj()) ;
assert(b->_nodes[j]->is_MachCall(), "CatchProj must follow call");
} else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
assert(b->_num_succs == 2, "Conditional branch must have two targets");
}
}
#endif

View File

@ -281,6 +281,8 @@ class Block : public CFGElement {
// Find and remove n from block list
void find_remove( const Node *n );
// helper function that adds caller save registers to MachProjNode
void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
// Schedule a call next in the block
uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call);

View File

@ -456,6 +456,12 @@
product(intx, EliminateAllocationArraySizeLimit, 64, \
"Array size (number of elements) limit for scalar replacement") \
\
product(bool, OptimizePtrCompare, true, \
"Use escape analysis to optimize pointers compare") \
\
notproduct(bool, PrintOptimizePtrCompare, false, \
"Print information about optimized pointers compare") \
\
product(bool, UseOptoBiasInlining, true, \
"Generate biased locking code in C2 ideal graph") \
\

View File

@ -318,17 +318,17 @@ CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj
return new DirectCallGenerator(m, separate_io_proj);
}
CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch");
return new DynamicCallGenerator(m);
}
CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
assert(!m->is_static(), "for_virtual_call mismatch");
assert(!m->is_method_handle_invoke(), "should be a direct call");
return new VirtualCallGenerator(m, vtable_index);
}
CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch");
return new DynamicCallGenerator(m);
}
// Allow inlining decisions to be delayed
class LateInlineCallGenerator : public DirectCallGenerator {
CallGenerator* _inline_cg;
@ -576,7 +576,9 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
kit.set_control(slow_ctl);
if (!kit.stopped()) {
slow_jvms = _if_missed->generate(kit.sync_jvms());
assert(slow_jvms != NULL, "miss path must not fail to generate");
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
assert(slow_jvms != NULL, "must be");
kit.add_exception_states_from(slow_jvms);
kit.set_map(slow_jvms->map());
if (!kit.stopped())
@ -682,6 +684,15 @@ CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predict
}
CallGenerator* CallGenerator::for_method_handle_call(Node* method_handle, JVMState* jvms,
ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_method_handle_call mismatch");
CallGenerator* cg = CallGenerator::for_method_handle_inline(method_handle, jvms, caller, callee, profile);
if (cg != NULL)
return cg;
return CallGenerator::for_direct_call(callee);
}
CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms,
ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
if (method_handle->Opcode() == Op_ConP) {
@ -721,8 +732,8 @@ CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMS
// Generate a guard so that each can be inlined. We might want to
// do more inputs at later point but this gets the most common
// case.
CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob));
CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile.rescale(prob));
CallGenerator* cg1 = for_method_handle_call(method_handle->in(1), jvms, caller, callee, profile.rescale(1.0 - prob));
CallGenerator* cg2 = for_method_handle_call(method_handle->in(2), jvms, caller, callee, profile.rescale(prob));
if (cg1 != NULL && cg2 != NULL) {
const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr();
ciObject* const_oop = oop_ptr->const_oop();
@ -733,6 +744,17 @@ CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMS
return NULL;
}
CallGenerator* CallGenerator::for_invokedynamic_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
assert(callee->is_method_handle_invoke() || callee->is_method_handle_adapter(), "for_invokedynamic_call mismatch");
// Get the CallSite object.
ciBytecodeStream str(caller);
str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
ciCallSite* call_site = str.get_call_site();
CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, callee, profile);
if (cg != NULL)
return cg;
return CallGenerator::for_dynamic_call(callee);
}
CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms,
ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
@ -819,7 +841,9 @@ JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
kit.set_control(slow_ctl);
if (!kit.stopped()) {
slow_jvms = _if_missed->generate(kit.sync_jvms());
assert(slow_jvms != NULL, "miss path must not fail to generate");
if (kit.failing())
return NULL; // might happen because of NodeCountInliningCutoff
assert(slow_jvms != NULL, "must be");
kit.add_exception_states_from(slow_jvms);
kit.set_map(slow_jvms->map());
if (!kit.stopped())

View File

@ -108,8 +108,11 @@ class CallGenerator : public ResourceObj {
// How to generate vanilla out-of-line call sites:
static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false); // static, special
static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic
static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic
static CallGenerator* for_method_handle_call(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
static CallGenerator* for_invokedynamic_call( JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
static CallGenerator* for_method_handle_inline(Node* method_handle, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);
static CallGenerator* for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms, ciMethod* caller, ciMethod* callee, ciCallProfile profile);

Some files were not shown because too many files have changed in this diff Show More