This commit is contained in:
Coleen Phillimore 2016-03-18 22:27:19 +00:00
commit 539958f76c
193 changed files with 4541 additions and 3600 deletions

View File

@ -9,12 +9,15 @@
#
fix_lines() {
# repair bare #line directives in $1 to refer to $2
awk < $1 > $1+ '
# and add an override of __FILE__ with just the basename on the
# first line of the file.
awk < $1 > $1+ -v F2=$2 '
BEGIN { print "#line 1 \"" F2 "\""; }
/^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
{print}
' F2=$2
'
mv $1+ $1
}
fix_lines $2/$1 $3/$1
fix_lines $2/$1 $1
[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )

View File

@ -27,14 +27,17 @@
#
# It knows how to build and run the tools to generate trace files.
include $(GAMMADIR)/make/linux/makefiles/rules.make
include $(GAMMADIR)/make/aix/makefiles/rules.make
include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
HAS_ALT_SRC := false
ifndef OPENJDK
ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), )
HAS_ALT_SRC := true
endif
endif
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
@ -50,23 +53,30 @@ VPATH += $(Src_Dirs_V:%=%:)
TraceGeneratedNames = \
traceEventClasses.hpp \
traceEventIds.hpp \
traceTypes.hpp
traceEventIds.hpp \
traceTypes.hpp
ifeq ($(HAS_ALT_SRC), true)
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp
endif
TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
TraceXml = $(TraceSrcDir)/trace.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
TraceXml = $(TraceAltSrcDir)/trace.xml
endif
XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \
$(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \
$(TraceAltSrcDir)/traceeventtypes.xml
endif
.PHONY: all clean cleanall
@ -79,26 +89,26 @@ GENERATE_CODE= \
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif

View File

@ -9,12 +9,15 @@
#
fix_lines() {
# repair bare #line directives in $1 to refer to $2
awk < $1 > $1+ '
# and add an override of __FILE__ with just the basename on the
# first line of the file.
awk < $1 > $1+ -v F2=$2 '
BEGIN { print "#line 1 \"" F2 "\""; }
/^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
{print}
' F2=$2
'
mv $1+ $1
}
fix_lines $2/$1 $3/$1
fix_lines $2/$1 $1
[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )

View File

@ -32,9 +32,12 @@ include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
HAS_ALT_SRC := false
ifndef OPENJDK
ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), )
HAS_ALT_SRC := true
endif
endif
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
@ -50,24 +53,30 @@ VPATH += $(Src_Dirs_V:%=%:)
TraceGeneratedNames = \
traceEventClasses.hpp \
traceEventIds.hpp \
traceTypes.hpp
traceEventIds.hpp \
traceTypes.hpp
ifeq ($(HAS_ALT_SRC), true)
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp
endif
TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
TraceXml = $(TraceSrcDir)/trace.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
TraceXml = $(TraceAltSrcDir)/trace.xml
endif
XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \
$(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \
$(TraceAltSrcDir)/traceeventtypes.xml
endif
.PHONY: all clean cleanall
@ -80,32 +89,31 @@ GENERATE_CODE= \
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif
# #########################################################################
clean cleanall:
rm $(TraceGeneratedFiles)

View File

@ -9,12 +9,15 @@
#
fix_lines() {
# repair bare #line directives in $1 to refer to $2
awk < $1 > $1+ '
# and add an override of __FILE__ with just the basename on the
# first line of the file.
awk < $1 > $1+ -v F2=$2 '
BEGIN { print "#line 1 \"" F2 "\""; }
/^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
{print}
' F2=$2
'
mv $1+ $1
}
fix_lines $2/$1 $3/$1
fix_lines $2/$1 $1
[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )

View File

@ -32,9 +32,12 @@ include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
HAS_ALT_SRC := false
ifndef OPENJDK
ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), )
HAS_ALT_SRC := true
endif
endif
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
@ -50,23 +53,30 @@ VPATH += $(Src_Dirs_V:%=%:)
TraceGeneratedNames = \
traceEventClasses.hpp \
traceEventIds.hpp \
traceTypes.hpp
traceEventIds.hpp \
traceTypes.hpp
ifeq ($(HAS_ALT_SRC), true)
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp
endif
TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
TraceXml = $(TraceSrcDir)/trace.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
TraceXml = $(TraceAltSrcDir)/trace.xml
endif
XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \
$(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \
$(TraceAltSrcDir)/traceeventtypes.xml
endif
.PHONY: all clean cleanall
@ -79,26 +89,26 @@ GENERATE_CODE= \
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif
@ -107,5 +117,3 @@ endif
clean cleanall:
rm $(TraceGeneratedFiles)

View File

@ -109,8 +109,7 @@
JVM_GetPrimitiveArrayElement;
JVM_GetProtectionDomain;
JVM_GetStackAccessControlContext;
JVM_GetStackTraceDepth;
JVM_GetStackTraceElement;
JVM_GetStackTraceElements;
JVM_GetSystemPackage;
JVM_GetSystemPackages;
JVM_GetTemporaryDirectory;

View File

@ -9,12 +9,15 @@
#
fix_lines() {
# repair bare #line directives in $1 to refer to $2
awk < $1 > $1+ '
# and add an override of __FILE__ with just the basename on the
# first line of the file.
nawk < $1 > $1+ -v F2=$2 '
BEGIN { print "#line 1 \"" F2 "\""; }
/^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
{print}
' F2=$2
'
mv $1+ $1
}
fix_lines $2/$1 $3/$1
fix_lines $2/$1 $1
[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )

View File

@ -32,9 +32,12 @@ include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
HAS_ALT_SRC := false
ifndef OPENJDK
ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), )
HAS_ALT_SRC := true
endif
endif
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
@ -50,23 +53,30 @@ VPATH += $(Src_Dirs_V:%=%:)
TraceGeneratedNames = \
traceEventClasses.hpp \
traceEventIds.hpp \
traceTypes.hpp
traceEventIds.hpp \
traceTypes.hpp
ifeq ($(HAS_ALT_SRC), true)
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp
TraceGeneratedNames += \
traceRequestables.hpp \
traceEventControl.hpp
endif
TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
TraceXml = $(TraceSrcDir)/trace.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
TraceXml = $(TraceAltSrcDir)/trace.xml
endif
XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \
$(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \
$(TraceAltSrcDir)/traceeventtypes.xml
endif
.PHONY: all clean cleanall
@ -79,26 +89,26 @@ GENERATE_CODE= \
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif

View File

@ -114,11 +114,15 @@ VARIANT_TEXT=Tiered
# Define HOTSPOT_VM_DISTRO based on settings in make/openjdk_distro
# or make/hotspot_distro.
!ifndef HOTSPOT_VM_DISTRO
!ifndef OPENJDK
!if exists($(WorkSpace)\src\closed)
!include $(WorkSpace)\make\hotspot_distro
!else
!include $(WorkSpace)\make\openjdk_distro
!endif
!else
!include $(WorkSpace)\make\openjdk_distro
!endif
!endif
HS_FILEDESC=$(HOTSPOT_VM_DISTRO) $(ARCH_TEXT) $(VARIANT_TEXT) VM

View File

@ -55,7 +55,11 @@ COMMONSRC_REL=src
ALTSRC_REL=src/closed # Change this to pick up alt sources from somewhere else
COMMONSRC=${WorkSpace}/${COMMONSRC_REL}
ALTSRC=${WorkSpace}/${ALTSRC_REL}
if [ "x$OPENJDK" != "xtrue" ]; then
ALTSRC=${WorkSpace}/${ALTSRC_REL}
else
ALTSRC=PATH_THAT_DOES_NOT_EXIST
fi
BASE_PATHS="`if [ -d ${ALTSRC}/share/vm ]; then $FIND ${ALTSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc -o -name opto -o -name shark -o -name libadt \); fi`"
BASE_PATHS="${BASE_PATHS} ` $FIND ${COMMONSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc -o -name opto -o -name shark -o -name libadt \)`"
@ -158,6 +162,6 @@ for e in ${Src_Files}; do
fi
Obj_Files="${Obj_Files}$o "
done
Obj_Files=`echo ${Obj_Files} | tr ' ' '\n' | sort`
Obj_Files=`echo ${Obj_Files} | tr ' ' '\n' | LC_ALL=C sort`
echo Obj_Files=${Obj_Files}

View File

@ -276,3 +276,7 @@ ifneq ($(SPEC),)
MAKE_ARGS += MT="$(subst /,\\,$(MT))"
endif
endif
ifdef OPENJDK
MAKE_ARGS += OPENJDK="$(OPENJDK)"
endif

View File

@ -32,15 +32,21 @@
# #########################################################################
TraceAltSrcDir = $(WorkSpace)/src/closed/share/vm/trace
TraceSrcDir = $(WorkSpace)/src/share/vm/trace
TraceAltSrcDir = $(WorkSpace)\src\closed\share\vm\trace
TraceSrcDir = $(WorkSpace)\src\share\vm\trace
!ifndef OPENJDK
!if EXISTS($(TraceAltSrcDir))
HAS_ALT_SRC = true
!endif
!endif
TraceGeneratedNames = \
traceEventClasses.hpp \
traceEventIds.hpp \
traceTypes.hpp
!if EXISTS($(TraceAltSrcDir))
!ifdef HAS_ALT_SRC
TraceGeneratedNames = $(TraceGeneratedNames) \
traceRequestables.hpp \
traceEventControl.hpp
@ -51,22 +57,30 @@ TraceGeneratedNames = $(TraceGeneratedNames) \
#Should be equivalent to "TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)"
TraceGeneratedFiles = \
$(TraceOutDir)/traceEventClasses.hpp \
$(TraceOutDir)/traceEventIds.hpp \
$(TraceOutDir)/traceTypes.hpp
$(TraceOutDir)/traceEventIds.hpp \
$(TraceOutDir)/traceTypes.hpp
!if EXISTS($(TraceAltSrcDir))
!ifdef HAS_ALT_SRC
TraceGeneratedFiles = $(TraceGeneratedFiles) \
$(TraceOutDir)/traceRequestables.hpp \
$(TraceOutDir)/traceRequestables.hpp \
$(TraceOutDir)/traceEventControl.hpp
!endif
XSLT = $(QUIETLY) $(REMOTE) $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
TraceXml = $(TraceSrcDir)/trace.xml
!if EXISTS($(TraceAltSrcDir))
XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml
!ifdef HAS_ALT_SRC
TraceXml = $(TraceAltSrcDir)/trace.xml
!endif
XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \
$(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml
!ifdef HAS_ALT_SRC
XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceeventscustom.xml \
$(TraceAltSrcDir)/traceeventtypes.xml
!endif
.PHONY: all clean cleanall
@ -76,33 +90,33 @@ XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml
default::
@if not exist $(TraceOutDir) mkdir $(TraceOutDir)
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
@echo Generating $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
@echo Generating $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp
!if !EXISTS($(TraceAltSrcDir))
!ifndef HAS_ALT_SRC
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
@echo Generating OpenJDK $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
!else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
@echo Generating AltSrc $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(TraceOutDir)/traceRequestables.hpp: $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
@echo Generating AltSrc $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventControl.hpp: $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
@echo Generating AltSrc $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
!endif
@ -110,5 +124,3 @@ $(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)
cleanall :
rm $(TraceGeneratedFiles)

View File

@ -118,6 +118,7 @@ LD_FLAGS=$(LD_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 $(EXP
CXX_INCLUDE_DIRS=/I "..\generated"
!ifndef OPENJDK
!if exists($(ALTSRC)\share\vm)
CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\share\vm"
!endif
@ -133,6 +134,7 @@ CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\os_cpu\windows_$(Platform_arc
!if exists($(ALTSRC)\cpu\$(Platform_arch)\vm)
CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\cpu\$(Platform_arch)\vm"
!endif
!endif # OPENJDK
CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) \
/I "$(COMMONSRC)\share\vm" \
@ -187,10 +189,12 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm
VM_PATH=$(VM_PATH);$(WorkSpace)/src/cpu/$(Platform_arch)/vm
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto
!ifndef OPENJDK
!if exists($(ALTSRC)\share\vm\jfr)
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/buffers
!endif
!endif # OPENJDK
VM_PATH={$(VM_PATH)}
@ -310,6 +314,7 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
{$(COMMONSRC)\os_cpu\windows_$(Platform_arch)\vm}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
!ifndef OPENJDK
{$(ALTSRC)\share\vm\c1}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
@ -392,6 +397,13 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
{$(ALTSRC)\os_cpu\windows_$(Platform_arch)\vm}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
!endif
{..\generated\incls}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
@ -404,12 +416,6 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
{..\generated\tracefiles}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
default::
_build_pch_file.obj:

View File

@ -84,7 +84,11 @@ public class SAGetopt {
}
else {
// Mixed style options --file name
extractOptarg(ca[0]);
try {
extractOptarg(ca[0]);
} catch (ArrayIndexOutOfBoundsException e) {
throw new RuntimeException("Argument is expected for '" + ca[0] + "'");
}
}
return ca[0];

View File

@ -30,6 +30,7 @@ import java.util.Arrays;
import sun.jvm.hotspot.tools.JStack;
import sun.jvm.hotspot.tools.JMap;
import sun.jvm.hotspot.tools.JInfo;
import sun.jvm.hotspot.tools.JSnap;
public class SALauncher {
@ -39,6 +40,7 @@ public class SALauncher {
System.out.println(" jstack --help\tto get more information");
System.out.println(" jmap --help\tto get more information");
System.out.println(" jinfo --help\tto get more information");
System.out.println(" jsnap --help\tto get more information");
return false;
}
@ -85,6 +87,11 @@ public class SALauncher {
return commonHelp();
}
private static boolean jsnapHelp() {
System.out.println(" <no option>\tdump performance counters");
return commonHelp();
}
private static boolean toolHelp(String toolName) {
if (toolName.equals("jstack")) {
return jstackHelp();
@ -95,24 +102,62 @@ public class SALauncher {
if (toolName.equals("jmap")) {
return jmapHelp();
}
if (toolName.equals("jsnap")) {
return jsnapHelp();
}
if (toolName.equals("hsdb") || toolName.equals("clhsdb")) {
return commonHelp();
}
return launcherHelp();
}
private static void buildAttachArgs(ArrayList<String> newArgs,
String pid, String exe, String core) {
if ((pid == null) && (exe == null)) {
throw new IllegalArgumentException(
"You have to set --pid or --exe.");
}
if (pid != null) { // Attach to live process
if (exe != null) {
throw new IllegalArgumentException(
"Unnecessary argument: --exe");
} else if (core != null) {
throw new IllegalArgumentException(
"Unnecessary argument: --core");
} else if (!pid.matches("^\\d+$")) {
throw new IllegalArgumentException("Invalid pid: " + pid);
}
newArgs.add(pid);
} else {
if (exe.length() == 0) {
throw new IllegalArgumentException("You have to set --exe.");
}
newArgs.add(exe);
if ((core == null) || (core.length() == 0)) {
throw new IllegalArgumentException("You have to set --core.");
}
newArgs.add(core);
}
}
private static void runCLHSDB(String[] oldArgs) {
SAGetopt sg = new SAGetopt(oldArgs);
String[] longOpts = {"exe=", "core=", "pid="};
ArrayList<String> newArgs = new ArrayList();
String exeORpid = null;
String pid = null;
String exe = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exeORpid = sg.getOptarg();
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
@ -120,17 +165,12 @@ public class SALauncher {
continue;
}
if (s.equals("pid")) {
exeORpid = sg.getOptarg();
pid = sg.getOptarg();
continue;
}
}
if (exeORpid != null) {
newArgs.add(exeORpid);
if (core != null) {
newArgs.add(core);
}
}
buildAttachArgs(newArgs, pid, exe, core);
CLHSDB.main(newArgs.toArray(new String[newArgs.size()]));
}
@ -139,13 +179,14 @@ public class SALauncher {
String[] longOpts = {"exe=", "core=", "pid="};
ArrayList<String> newArgs = new ArrayList();
String exeORpid = null;
String pid = null;
String exe = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exeORpid = sg.getOptarg();
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
@ -153,17 +194,12 @@ public class SALauncher {
continue;
}
if (s.equals("pid")) {
exeORpid = sg.getOptarg();
pid = sg.getOptarg();
continue;
}
}
if (exeORpid != null) {
newArgs.add(exeORpid);
if (core != null) {
newArgs.add(core);
}
}
buildAttachArgs(newArgs, pid, exe, core);
HSDB.main(newArgs.toArray(new String[newArgs.size()]));
}
@ -173,13 +209,14 @@ public class SALauncher {
"mixed", "locks"};
ArrayList<String> newArgs = new ArrayList();
String exeORpid = null;
String pid = null;
String exe = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exeORpid = sg.getOptarg();
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
@ -187,7 +224,7 @@ public class SALauncher {
continue;
}
if (s.equals("pid")) {
exeORpid = sg.getOptarg();
pid = sg.getOptarg();
continue;
}
if (s.equals("mixed")) {
@ -200,13 +237,7 @@ public class SALauncher {
}
}
if (exeORpid != null) {
newArgs.add(exeORpid);
if (core != null) {
newArgs.add(core);
}
}
buildAttachArgs(newArgs, pid, exe, core);
JStack.main(newArgs.toArray(new String[newArgs.size()]));
}
@ -216,13 +247,14 @@ public class SALauncher {
"heap", "binaryheap", "histo", "clstats", "finalizerinfo"};
ArrayList<String> newArgs = new ArrayList();
String exeORpid = null;
String pid = null;
String exe = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exeORpid = sg.getOptarg();
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
@ -230,7 +262,7 @@ public class SALauncher {
continue;
}
if (s.equals("pid")) {
exeORpid = sg.getOptarg();
pid = sg.getOptarg();
continue;
}
if (s.equals("heap")) {
@ -255,13 +287,7 @@ public class SALauncher {
}
}
if (exeORpid != null) {
newArgs.add(exeORpid);
if (core != null) {
newArgs.add(core);
}
}
buildAttachArgs(newArgs, pid, exe, core);
JMap.main(newArgs.toArray(new String[newArgs.size()]));
}
@ -271,13 +297,14 @@ public class SALauncher {
"flags", "sysprops"};
ArrayList<String> newArgs = new ArrayList();
String exeORpid = null;
String exe = null;
String pid = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exeORpid = sg.getOptarg();
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
@ -285,7 +312,7 @@ public class SALauncher {
continue;
}
if (s.equals("pid")) {
exeORpid = sg.getOptarg();
pid = sg.getOptarg();
continue;
}
if (s.equals("flags")) {
@ -298,14 +325,37 @@ public class SALauncher {
}
}
if (exeORpid != null) {
newArgs.add(exeORpid);
if (core != null) {
newArgs.add(core);
buildAttachArgs(newArgs, pid, exe, core);
JInfo.main(newArgs.toArray(new String[newArgs.size()]));
}
private static void runJSNAP(String[] oldArgs) {
SAGetopt sg = new SAGetopt(oldArgs);
String[] longOpts = {"exe=", "core=", "pid="};
ArrayList<String> newArgs = new ArrayList();
String exe = null;
String pid = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
core = sg.getOptarg();
continue;
}
if (s.equals("pid")) {
pid = sg.getOptarg();
continue;
}
}
JInfo.main(newArgs.toArray(new String[newArgs.size()]));
buildAttachArgs(newArgs, pid, exe, core);
JSnap.main(newArgs.toArray(new String[newArgs.size()]));
}
public static void main(String[] args) {
@ -329,31 +379,43 @@ public class SALauncher {
String[] oldArgs = Arrays.copyOfRange(args, 1, args.length);
// Run SA interactive mode
if (args[0].equals("clhsdb")) {
runCLHSDB(oldArgs);
return;
}
try {
// Run SA interactive mode
if (args[0].equals("clhsdb")) {
runCLHSDB(oldArgs);
return;
}
if (args[0].equals("hsdb")) {
runHSDB(oldArgs);
return;
}
if (args[0].equals("hsdb")) {
runHSDB(oldArgs);
return;
}
// Run SA tmtools mode
if (args[0].equals("jstack")) {
runJSTACK(oldArgs);
return;
}
// Run SA tmtools mode
if (args[0].equals("jstack")) {
runJSTACK(oldArgs);
return;
}
if (args[0].equals("jmap")) {
runJMAP(oldArgs);
return;
}
if (args[0].equals("jmap")) {
runJMAP(oldArgs);
return;
}
if (args[0].equals("jinfo")) {
runJINFO(oldArgs);
return;
if (args[0].equals("jinfo")) {
runJINFO(oldArgs);
return;
}
if (args[0].equals("jsnap")) {
runJSNAP(oldArgs);
return;
}
throw new IllegalArgumentException("Unknown tool: " + args[0]);
} catch (Exception e) {
System.err.println(e.getMessage());
toolHelp(args[0]);
}
}
}

View File

@ -35,6 +35,11 @@ public enum GCCause {
_gc_locker ("GCLocker Initiated GC"),
_heap_inspection ("Heap Inspection Initiated GC"),
_heap_dump ("Heap Dump Initiated GC"),
_wb_young_gc ("WhiteBox Initiated Young GC"),
_wb_conc_mark ("WhiteBox Initiated Concurrent Mark"),
_wb_full_gc ("WhiteBox Initiated Full GC"),
_update_allocation_context_stats_inc ("Update Allocation Context Stats"),
_update_allocation_context_stats_full ("Update Allocation Context Stats"),
_no_gc ("No GC"),
_no_cause_specified ("Unknown GCCause"),
@ -42,6 +47,7 @@ public enum GCCause {
_tenured_generation_full ("Tenured Generation Full"),
_metadata_GC_threshold ("Metadata GC Threshold"),
_metadata_GC_clear_soft_refs ("Metadata GC Clear Soft References"),
_cms_generation_full ("CMS Generation Full"),
_cms_initial_mark ("CMS Initial Mark"),
@ -55,7 +61,8 @@ public enum GCCause {
_g1_inc_collection_pause ("G1 Evacuation Pause"),
_g1_humongous_allocation ("G1 Humongous Allocation"),
_last_ditch_collection ("Last ditch collection"),
_dcmd_gc_run ("Diagnostic Command"),
_last_gc_cause ("ILLEGAL VALUE - last gc cause - ILLEGAL VALUE");
private final String value;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -130,7 +130,7 @@ public class Threads {
virtualConstructor.addMapping("CodeCacheSweeperThread", CodeCacheSweeperThread.class);
}
// for now, use JavaThread itself. fix it later with appropriate class if needed
virtualConstructor.addMapping("SurrogateLockerThread", JavaThread.class);
virtualConstructor.addMapping("ReferencePendingListLockerThread", JavaThread.class);
virtualConstructor.addMapping("JvmtiAgentThread", JvmtiAgentThread.class);
virtualConstructor.addMapping("ServiceThread", ServiceThread.class);
}
@ -172,7 +172,7 @@ public class Threads {
return thread;
} catch (Exception e) {
throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr +
" (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, SurrogateLockerThread, or CodeCacheSweeperThread)", e);
" (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, ReferencePendingListLockerThread, or CodeCacheSweeperThread)", e);
}
}

View File

@ -81,6 +81,12 @@ public class CompactHashTable extends VMObject {
}
public Symbol probe(byte[] name, long hash) {
if (bucketCount() == 0) {
// The table is invalid, so don't try to lookup
return null;
}
long symOffset;
Symbol sym;
Address baseAddress = baseAddressField.getValue(addr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -837,7 +837,7 @@ vmType2Class["InterpreterCodelet"] = sapkg.interpreter.InterpreterCodelet;
vmType2Class["JavaThread"] = sapkg.runtime.JavaThread;
vmType2Class["CompilerThread"] = sapkg.runtime.CompilerThread;
vmType2Class["CodeCacheSweeperThread"] = sapkg.runtime.CodeCacheSweeperThread;
vmType2Class["SurrogateLockerThread"] = sapkg.runtime.JavaThread;
vmType2Class["ReferencePendingListLockerThread"] = sapkg.runtime.JavaThread;
vmType2Class["DebuggerThread"] = sapkg.runtime.DebuggerThread;
// gc

View File

@ -910,8 +910,8 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
(uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
} else {
log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
}
pthread_attr_destroy(&attr);
@ -1178,7 +1178,7 @@ void os::die() {
size_t os::lasterror(char *buf, size_t len) {
if (errno == 0) return 0;
const char *s = ::strerror(errno);
const char *s = os::strerror(errno);
size_t n = ::strlen(s);
if (n >= len) {
n = len - 1;
@ -1714,14 +1714,14 @@ static void local_sem_post() {
if (os::Aix::on_aix()) {
int rc = ::sem_post(&sig_sem);
if (rc == -1 && !warn_only_once) {
trcVerbose("sem_post failed (errno = %d, %s)", errno, strerror(errno));
trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
warn_only_once = true;
}
} else {
guarantee0(p_sig_msem != NULL);
int rc = ::msem_unlock(p_sig_msem, 0);
if (rc == -1 && !warn_only_once) {
trcVerbose("msem_unlock failed (errno = %d, %s)", errno, strerror(errno));
trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
warn_only_once = true;
}
}
@ -1732,14 +1732,14 @@ static void local_sem_wait() {
if (os::Aix::on_aix()) {
int rc = ::sem_wait(&sig_sem);
if (rc == -1 && !warn_only_once) {
trcVerbose("sem_wait failed (errno = %d, %s)", errno, strerror(errno));
trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
warn_only_once = true;
}
} else {
guarantee0(p_sig_msem != NULL); // must init before use
int rc = ::msem_lock(p_sig_msem, 0);
if (rc == -1 && !warn_only_once) {
trcVerbose("msem_lock failed (errno = %d, %s)", errno, strerror(errno));
trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
warn_only_once = true;
}
}
@ -2203,7 +2203,7 @@ static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
strerror(err), err);
os::errno_name(err), err);
}
#endif
@ -2412,7 +2412,7 @@ static bool checked_mprotect(char* addr, size_t size, int prot) {
bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
if (!rc) {
const char* const s_errno = strerror(errno);
const char* const s_errno = os::errno_name(errno);
warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
return false;
}
@ -2634,7 +2634,7 @@ OSReturn os::set_native_priority(Thread* thread, int newpri) {
if (ret != 0) {
trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
(int)thr, newpri, ret, strerror(ret));
(int)thr, newpri, ret, os::errno_name(ret));
}
return (ret == 0) ? OS_OK : OS_ERR;
}

View File

@ -30,6 +30,7 @@
#include "oops/oop.inline.hpp"
#include "os_aix.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
@ -101,7 +102,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not create Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
} else {
int fd = result;
@ -112,7 +113,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not write Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
break;
}
@ -124,7 +125,7 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
}
}
}
@ -397,7 +398,7 @@ static DIR *open_directory_secure(const char* dirname) {
if (errno == ELOOP) {
warning("directory %s is a symlink and is not secure\n", dirname);
} else {
warning("could not open directory %s: %s\n", dirname, strerror(errno));
warning("could not open directory %s: %s\n", dirname, os::strerror(errno));
}
}
return dirp;
@ -507,7 +508,7 @@ static bool is_file_secure(int fd, const char *filename) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed on %s: %s\n", filename, strerror(errno));
warning("fstat failed on %s: %s\n", filename, os::strerror(errno));
}
return false;
}
@ -543,7 +544,7 @@ static char* get_user_name(uid_t uid) {
if (PrintMiscellaneous && Verbose) {
if (result != 0) {
warning("Could not retrieve passwd entry: %s\n",
strerror(result));
os::strerror(result));
}
else if (p == NULL) {
// this check is added to protect against an observed problem
@ -557,7 +558,7 @@ static char* get_user_name(uid_t uid) {
// Bug Id 89052 was opened with RedHat.
//
warning("Could not retrieve passwd entry: %s\n",
strerror(errno));
os::strerror(errno));
}
else {
warning("Could not determine user name: %s\n",
@ -593,7 +594,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
"Process not found");
}
else /* EPERM */ {
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno));
}
}
@ -746,7 +747,7 @@ static void remove_file(const char* path) {
if (PrintMiscellaneous && Verbose && result == OS_ERR) {
if (errno != ENOENT) {
warning("Could not unlink shared memory backing"
" store file %s : %s\n", path, strerror(errno));
" store file %s : %s\n", path, os::strerror(errno));
}
}
}
@ -849,7 +850,7 @@ static bool make_user_tmp_dir(const char* dirname) {
//
if (PrintMiscellaneous && Verbose) {
warning("could not create directory %s: %s\n",
dirname, strerror(errno));
dirname, os::strerror(errno));
}
return false;
}
@ -900,7 +901,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (errno == ELOOP) {
warning("file %s is a symlink and is not secure\n", filename);
} else {
warning("could not create file %s: %s\n", filename, strerror(errno));
warning("could not create file %s: %s\n", filename, os::strerror(errno));
}
}
// Close the directory and reset the current working directory.
@ -924,7 +925,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)0), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not truncate shared memory file: %s\n", strerror(errno));
warning("could not truncate shared memory file: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -933,7 +934,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)size), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not set shared memory file size: %s\n", strerror(errno));
warning("could not set shared memory file size: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -968,7 +969,7 @@ static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
"Permission denied");
}
else {
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno));
}
}
int fd = result;
@ -1041,7 +1042,7 @@ static char* mmap_create_shared(size_t size) {
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed - %s\n", strerror(errno));
warning("mmap failed - %s\n", os::strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename);
@ -1109,7 +1110,7 @@ static size_t sharedmem_filesize(int fd, TRAPS) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed: %s\n", strerror(errno));
warning("fstat failed: %s\n", os::strerror(errno));
}
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");
@ -1231,7 +1232,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed: %s\n", strerror(errno));
warning("mmap failed: %s\n", os::strerror(errno));
}
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
"Could not map PerfMemory");

View File

@ -789,7 +789,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
(uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
} else {
log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
}
pthread_attr_destroy(&attr);
@ -1122,7 +1122,7 @@ void os::die() {
size_t os::lasterror(char *buf, size_t len) {
if (errno == 0) return 0;
const char *s = ::strerror(errno);
const char *s = os::strerror(errno);
size_t n = ::strlen(s);
if (n >= len) {
n = len - 1;
@ -2141,7 +2141,7 @@ static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
strerror(err), err);
os::errno_name(err), err);
}
// NOTE: Bsd kernel does not really reserve the pages for us.
@ -3422,7 +3422,7 @@ void os::init(void) {
Bsd::set_page_size(getpagesize());
if (Bsd::page_size() == -1) {
fatal("os_bsd.cpp: os::init: sysconf failed (%s)", strerror(errno));
fatal("os_bsd.cpp: os::init: sysconf failed (%s)", os::strerror(errno));
}
init_page_sizes((size_t) Bsd::page_size());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "oops/oop.inline.hpp"
#include "os_bsd.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
@ -100,7 +101,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not create Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
} else {
int fd = result;
@ -111,7 +112,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not write Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
break;
}
@ -123,7 +124,7 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
}
}
}
@ -309,7 +310,7 @@ static DIR *open_directory_secure(const char* dirname) {
if (errno == ELOOP) {
warning("directory %s is a symlink and is not secure\n", dirname);
} else {
warning("could not open directory %s: %s\n", dirname, strerror(errno));
warning("could not open directory %s: %s\n", dirname, os::strerror(errno));
}
}
return dirp;
@ -420,7 +421,7 @@ static bool is_file_secure(int fd, const char *filename) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed on %s: %s\n", filename, strerror(errno));
warning("fstat failed on %s: %s\n", filename, os::strerror(errno));
}
return false;
}
@ -459,7 +460,7 @@ static char* get_user_name(uid_t uid) {
if (PrintMiscellaneous && Verbose) {
if (result != 0) {
warning("Could not retrieve passwd entry: %s\n",
strerror(result));
os::strerror(result));
}
else if (p == NULL) {
// this check is added to protect against an observed problem
@ -473,7 +474,7 @@ static char* get_user_name(uid_t uid) {
// Bug Id 89052 was opened with RedHat.
//
warning("Could not retrieve passwd entry: %s\n",
strerror(errno));
os::strerror(errno));
}
else {
warning("Could not determine user name: %s\n",
@ -509,7 +510,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
"Process not found");
}
else /* EPERM */ {
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno));
}
}
@ -652,7 +653,7 @@ static void remove_file(const char* path) {
if (PrintMiscellaneous && Verbose && result == OS_ERR) {
if (errno != ENOENT) {
warning("Could not unlink shared memory backing"
" store file %s : %s\n", path, strerror(errno));
" store file %s : %s\n", path, os::strerror(errno));
}
}
}
@ -762,7 +763,7 @@ static bool make_user_tmp_dir(const char* dirname) {
//
if (PrintMiscellaneous && Verbose) {
warning("could not create directory %s: %s\n",
dirname, strerror(errno));
dirname, os::strerror(errno));
}
return false;
}
@ -804,7 +805,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (errno == ELOOP) {
warning("file %s is a symlink and is not secure\n", filename);
} else {
warning("could not create file %s: %s\n", filename, strerror(errno));
warning("could not create file %s: %s\n", filename, os::strerror(errno));
}
}
// close the directory and reset the current working directory
@ -828,7 +829,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)0), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not truncate shared memory file: %s\n", strerror(errno));
warning("could not truncate shared memory file: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -837,7 +838,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)size), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not set shared memory file size: %s\n", strerror(errno));
warning("could not set shared memory file size: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -887,7 +888,7 @@ static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
"Permission denied", OS_ERR);
}
else {
THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
THROW_MSG_(vmSymbols::java_io_IOException(), os::strerror(errno), OS_ERR);
}
}
int fd = result;
@ -961,7 +962,7 @@ static char* mmap_create_shared(size_t size) {
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed - %s\n", strerror(errno));
warning("mmap failed - %s\n", os::strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename);
@ -1025,7 +1026,7 @@ static size_t sharedmem_filesize(int fd, TRAPS) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed: %s\n", strerror(errno));
warning("fstat failed: %s\n", os::strerror(errno));
}
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");
@ -1136,7 +1137,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed: %s\n", strerror(errno));
warning("mmap failed: %s\n", os::strerror(errno));
}
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
"Could not map PerfMemory");

View File

@ -593,15 +593,7 @@ void os::Linux::libpthread_init() {
// _expand_stack_to() assumes its frame size is less than page size, which
// should always be true if the function is not inlined.
#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute
#define NOINLINE
#else
#define NOINLINE __attribute__ ((noinline))
#endif
static void _expand_stack_to(address bottom) NOINLINE;
static void _expand_stack_to(address bottom) {
static void NOINLINE _expand_stack_to(address bottom) {
address sp;
size_t size;
volatile char *p;
@ -768,7 +760,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
(uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
} else {
log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
}
pthread_attr_destroy(&attr);
@ -889,6 +881,13 @@ void os::free_thread(OSThread* osthread) {
assert(osthread != NULL, "osthread not set");
if (Thread::current()->osthread() == osthread) {
#ifdef ASSERT
sigset_t current;
sigemptyset(&current);
pthread_sigmask(SIG_SETMASK, NULL, &current);
assert(!sigismember(&current, SR_signum), "SR signal should not be blocked!");
#endif
// Restore caller's signal mask
sigset_t sigmask = osthread->caller_sigmask();
pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
@ -1394,7 +1393,7 @@ void os::die() {
size_t os::lasterror(char *buf, size_t len) {
if (errno == 0) return 0;
const char *s = ::strerror(errno);
const char *s = os::strerror(errno);
size_t n = ::strlen(s);
if (n >= len) {
n = len - 1;
@ -2600,7 +2599,7 @@ static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec,
strerror(err), err);
os::strerror(err), err);
}
static void warn_fail_commit_memory(char* addr, size_t size,
@ -2608,7 +2607,7 @@ static void warn_fail_commit_memory(char* addr, size_t size,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", p2i(addr), size,
alignment_hint, exec, strerror(err), err);
alignment_hint, exec, os::strerror(err), err);
}
// NOTE: Linux kernel does not really reserve the pages for us.
@ -3911,7 +3910,8 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
// after sigsuspend.
int old_errno = errno;
Thread* thread = Thread::current();
Thread* thread = Thread::current_or_null_safe();
assert(thread != NULL, "Missing current thread in SR_handler");
OSThread* osthread = thread->osthread();
assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
@ -3923,7 +3923,7 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
os::SuspendResume::State state = osthread->sr.suspended();
if (state == os::SuspendResume::SR_SUSPENDED) {
sigset_t suspend_set; // signals for sigsuspend()
sigemptyset(&suspend_set);
// get current set of blocked signals and unblock resume signal
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
sigdelset(&suspend_set, SR_signum);
@ -4177,6 +4177,7 @@ static bool call_chained_handler(struct sigaction *actp, int sig,
// try to honor the signal mask
sigset_t oset;
sigemptyset(&oset);
pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
// call into the chained handler
@ -4187,7 +4188,7 @@ static bool call_chained_handler(struct sigaction *actp, int sig,
}
// restore the signal mask
pthread_sigmask(SIG_SETMASK, &oset, 0);
pthread_sigmask(SIG_SETMASK, &oset, NULL);
}
// Tell jvm's signal handler the signal is taken care of.
return true;
@ -4572,7 +4573,7 @@ void os::init(void) {
Linux::set_page_size(sysconf(_SC_PAGESIZE));
if (Linux::page_size() == -1) {
fatal("os_linux.cpp: os::init: sysconf failed (%s)",
strerror(errno));
os::strerror(errno));
}
init_page_sizes((size_t) Linux::page_size());
@ -4588,7 +4589,7 @@ void os::init(void) {
int status;
pthread_condattr_t* _condattr = os::Linux::condAttr();
if ((status = pthread_condattr_init(_condattr)) != 0) {
fatal("pthread_condattr_init: %s", strerror(status));
fatal("pthread_condattr_init: %s", os::strerror(status));
}
// Only set the clock if CLOCK_MONOTONIC is available
if (os::supports_monotonic_clock()) {
@ -4597,7 +4598,7 @@ void os::init(void) {
warning("Unable to use monotonic clock with relative timed-waits" \
" - changes to the time-of-day clock may have adverse affects");
} else {
fatal("pthread_condattr_setclock: %s", strerror(status));
fatal("pthread_condattr_setclock: %s", os::strerror(status));
}
}
}
@ -4843,7 +4844,7 @@ int os::active_processor_count() {
log_trace(os)("active_processor_count: "
"CPU_ALLOC failed (%s) - using "
"online processor count: %d",
strerror(errno), online_cpus);
os::strerror(errno), online_cpus);
return online_cpus;
}
}
@ -4873,7 +4874,7 @@ int os::active_processor_count() {
else {
cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN);
warning("sched_getaffinity failed (%s)- using online processor count (%d) "
"which may exceed available processors", strerror(errno), cpu_count);
"which may exceed available processors", os::strerror(errno), cpu_count);
}
if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used
@ -5724,6 +5725,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// Don't catch signals while blocked; let the running threads have the signals.
// (This allows a debugger to break into the running thread.)
sigset_t oldsigs;
sigemptyset(&oldsigs);
sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "oops/oop.inline.hpp"
#include "os_linux.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
@ -100,7 +101,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not create Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
} else {
int fd = result;
@ -111,7 +112,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not write Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
break;
}
@ -123,7 +124,7 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
}
}
}
@ -308,7 +309,7 @@ static DIR *open_directory_secure(const char* dirname) {
if (errno == ELOOP) {
warning("directory %s is a symlink and is not secure\n", dirname);
} else {
warning("could not open directory %s: %s\n", dirname, strerror(errno));
warning("could not open directory %s: %s\n", dirname, os::strerror(errno));
}
}
return dirp;
@ -419,7 +420,7 @@ static bool is_file_secure(int fd, const char *filename) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed on %s: %s\n", filename, strerror(errno));
warning("fstat failed on %s: %s\n", filename, os::strerror(errno));
}
return false;
}
@ -459,7 +460,7 @@ static char* get_user_name(uid_t uid) {
if (PrintMiscellaneous && Verbose) {
if (result != 0) {
warning("Could not retrieve passwd entry: %s\n",
strerror(result));
os::strerror(result));
}
else if (p == NULL) {
// this check is added to protect against an observed problem
@ -473,7 +474,7 @@ static char* get_user_name(uid_t uid) {
// Bug Id 89052 was opened with RedHat.
//
warning("Could not retrieve passwd entry: %s\n",
strerror(errno));
os::strerror(errno));
}
else {
warning("Could not determine user name: %s\n",
@ -509,7 +510,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
"Process not found");
}
else /* EPERM */ {
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno));
}
}
@ -664,7 +665,7 @@ static void remove_file(const char* path) {
if (PrintMiscellaneous && Verbose && result == OS_ERR) {
if (errno != ENOENT) {
warning("Could not unlink shared memory backing"
" store file %s : %s\n", path, strerror(errno));
" store file %s : %s\n", path, os::strerror(errno));
}
}
}
@ -772,7 +773,7 @@ static bool make_user_tmp_dir(const char* dirname) {
//
if (PrintMiscellaneous && Verbose) {
warning("could not create directory %s: %s\n",
dirname, strerror(errno));
dirname, os::strerror(errno));
}
return false;
}
@ -814,7 +815,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (errno == ELOOP) {
warning("file %s is a symlink and is not secure\n", filename);
} else {
warning("could not create file %s: %s\n", filename, strerror(errno));
warning("could not create file %s: %s\n", filename, os::strerror(errno));
}
}
// close the directory and reset the current working directory
@ -838,7 +839,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)0), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not truncate shared memory file: %s\n", strerror(errno));
warning("could not truncate shared memory file: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -847,7 +848,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)size), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not set shared memory file size: %s\n", strerror(errno));
warning("could not set shared memory file size: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -897,7 +898,7 @@ static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
"Permission denied", OS_ERR);
}
else {
THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
THROW_MSG_(vmSymbols::java_io_IOException(), os::strerror(errno), OS_ERR);
}
}
int fd = result;
@ -970,7 +971,7 @@ static char* mmap_create_shared(size_t size) {
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed - %s\n", strerror(errno));
warning("mmap failed - %s\n", os::strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename);
@ -1034,7 +1035,7 @@ static size_t sharedmem_filesize(int fd, TRAPS) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed: %s\n", strerror(errno));
warning("fstat failed: %s\n", os::strerror(errno));
}
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");
@ -1151,7 +1152,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed: %s\n", strerror(errno));
warning("mmap failed: %s\n", os::strerror(errno));
}
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
"Could not map PerfMemory");

View File

@ -1144,7 +1144,8 @@ void os::WatcherThreadCrashProtection::check_crash_protection(int sig,
#define check_with_errno(check_type, cond, msg) \
do { \
int err = errno; \
check_type(cond, "%s; error='%s' (errno=%d)", msg, strerror(err), err); \
check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err), \
os::errno_name(err)); \
} while (false)
#define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -461,7 +461,7 @@ SolarisAttachOperation* SolarisAttachListener::dequeue() {
while ((res = ::sema_wait(wakeup())) == EINTR)
;
if (res) {
warning("sema_wait failed: %s", strerror(res));
warning("sema_wait failed: %s", os::strerror(res));
return NULL;
}

View File

@ -1009,7 +1009,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
(uintx) tid, describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
} else {
log_warning(os, thread)("Failed to start thread - thr_create failed (%s) for attributes: %s.",
strerror(status), describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
os::errno_name(status), describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
}
if (status != 0) {
@ -1354,7 +1354,7 @@ jlong getTimeMillis() {
jlong os::javaTimeMillis() {
timeval t;
if (gettimeofday(&t, NULL) == -1) {
fatal("os::javaTimeMillis: gettimeofday (%s)", strerror(errno));
fatal("os::javaTimeMillis: gettimeofday (%s)", os::strerror(errno));
}
return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000;
}
@ -1362,7 +1362,7 @@ jlong os::javaTimeMillis() {
void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
timeval t;
if (gettimeofday(&t, NULL) == -1) {
fatal("os::javaTimeSystemUTC: gettimeofday (%s)", strerror(errno));
fatal("os::javaTimeSystemUTC: gettimeofday (%s)", os::strerror(errno));
}
seconds = jlong(t.tv_sec);
nanos = jlong(t.tv_usec) * 1000;
@ -1898,7 +1898,7 @@ static bool check_addr0(outputStream* st) {
int fd = ::open("/proc/self/map",O_RDONLY);
if (fd >= 0) {
prmap_t *p = NULL;
char *mbuff = (char *) calloc(read_chunk, sizeof(prmap_t) + 1);
char *mbuff = (char *) calloc(read_chunk, sizeof(prmap_t));
if (NULL == mbuff) {
::close(fd);
return status;
@ -1912,7 +1912,7 @@ static bool check_addr0(outputStream* st) {
p = (prmap_t *)mbuff;
for(int i = 0; i < nmap; i++){
if (p->pr_vaddr == 0x0) {
st->print("Warning: Address: " PTR_FORMAT ", Size: %dK, ",p->pr_vaddr, p->pr_size/1024);
st->print("Warning: Address: " PTR_FORMAT ", Size: " SIZE_FORMAT "K, ",p->pr_vaddr, p->pr_size/1024);
st->print("Mapped file: %s, ", p->pr_mapname[0] == '\0' ? "None" : p->pr_mapname);
st->print("Access: ");
st->print("%s",(p->pr_mflags & MA_READ) ? "r" : "-");
@ -1921,13 +1921,12 @@ static bool check_addr0(outputStream* st) {
st->cr();
status = true;
}
p = (prmap_t *)(mbuff + sizeof(prmap_t));
p++;
}
memset(mbuff, 0, read_chunk*sizeof(prmap_t)+1);
}
free(mbuff);
::close(fd);
}
::close(fd);
return status;
}
@ -2161,7 +2160,7 @@ void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
size_t os::lasterror(char *buf, size_t len) {
if (errno == 0) return 0;
const char *s = ::strerror(errno);
const char *s = os::strerror(errno);
size_t n = ::strlen(s);
if (n >= len) {
n = len - 1;
@ -2370,7 +2369,7 @@ static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
strerror(err), err);
os::strerror(err), err);
}
static void warn_fail_commit_memory(char* addr, size_t bytes,
@ -2378,7 +2377,7 @@ static void warn_fail_commit_memory(char* addr, size_t bytes,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
alignment_hint, exec, strerror(err), err);
alignment_hint, exec, os::strerror(err), err);
}
int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
@ -2759,7 +2758,7 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
char buf[256];
buf[0] = '\0';
if (addr == NULL) {
jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
jio_snprintf(buf, sizeof(buf), ": %s", os::strerror(err));
}
warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
@ -4373,7 +4372,7 @@ void os::init(void) {
page_size = sysconf(_SC_PAGESIZE);
if (page_size == -1) {
fatal("os_solaris.cpp: os::init: sysconf failed (%s)", strerror(errno));
fatal("os_solaris.cpp: os::init: sysconf failed (%s)", os::strerror(errno));
}
init_page_sizes((size_t) page_size);
@ -4385,7 +4384,7 @@ void os::init(void) {
int fd = ::open("/dev/zero", O_RDWR);
if (fd < 0) {
fatal("os::init: cannot open /dev/zero (%s)", strerror(errno));
fatal("os::init: cannot open /dev/zero (%s)", os::strerror(errno));
} else {
Solaris::set_dev_zero_fd(fd);
@ -5626,7 +5625,7 @@ int os::fork_and_exec(char* cmd) {
if (pid < 0) {
// fork failed
warning("fork failed: %s", strerror(errno));
warning("fork failed: %s", os::strerror(errno));
return -1;
} else if (pid == 0) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,7 +102,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not create Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
} else {
@ -114,7 +114,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not write Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
break;
}
@ -125,7 +125,7 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
}
}
}
@ -311,7 +311,7 @@ static DIR *open_directory_secure(const char* dirname) {
if (errno == ELOOP) {
warning("directory %s is a symlink and is not secure\n", dirname);
} else {
warning("could not open directory %s: %s\n", dirname, strerror(errno));
warning("could not open directory %s: %s\n", dirname, os::strerror(errno));
}
}
return dirp;
@ -422,7 +422,7 @@ static bool is_file_secure(int fd, const char *filename) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed on %s: %s\n", filename, strerror(errno));
warning("fstat failed on %s: %s\n", filename, os::strerror(errno));
}
return false;
}
@ -464,7 +464,7 @@ static char* get_user_name(uid_t uid) {
if (PrintMiscellaneous && Verbose) {
if (p == NULL) {
warning("Could not retrieve passwd entry: %s\n",
strerror(errno));
os::strerror(errno));
}
else {
warning("Could not determine user name: %s\n",
@ -500,7 +500,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
"Process not found");
}
else /* EPERM */ {
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno));
}
}
@ -657,7 +657,7 @@ static char* get_user_name(int vmid, TRAPS) {
// In this case, the psinfo file for the process id existed,
// but we didn't have permission to access it.
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
strerror(errno));
os::strerror(errno));
}
// at this point, we don't know if the process id itself doesn't
@ -703,7 +703,7 @@ static void remove_file(const char* path) {
if (PrintMiscellaneous && Verbose && result == OS_ERR) {
if (errno != ENOENT) {
warning("Could not unlink shared memory backing"
" store file %s : %s\n", path, strerror(errno));
" store file %s : %s\n", path, os::strerror(errno));
}
}
}
@ -813,7 +813,7 @@ static bool make_user_tmp_dir(const char* dirname) {
//
if (PrintMiscellaneous && Verbose) {
warning("could not create directory %s: %s\n",
dirname, strerror(errno));
dirname, os::strerror(errno));
}
return false;
}
@ -855,7 +855,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (errno == ELOOP) {
warning("file %s is a symlink and is not secure\n", filename);
} else {
warning("could not create file %s: %s\n", filename, strerror(errno));
warning("could not create file %s: %s\n", filename, os::strerror(errno));
}
}
// close the directory and reset the current working directory
@ -879,7 +879,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)0), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not truncate shared memory file: %s\n", strerror(errno));
warning("could not truncate shared memory file: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -888,7 +888,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)size), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not set shared memory file size: %s\n", strerror(errno));
warning("could not set shared memory file size: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -916,7 +916,7 @@ static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
"Permission denied", OS_ERR);
}
else {
THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
THROW_MSG_(vmSymbols::java_io_IOException(), os::strerror(errno), OS_ERR);
}
}
int fd = result;
@ -990,7 +990,7 @@ static char* mmap_create_shared(size_t size) {
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed - %s\n", strerror(errno));
warning("mmap failed - %s\n", os::strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename);
@ -1055,7 +1055,7 @@ static size_t sharedmem_filesize(int fd, TRAPS) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed: %s\n", strerror(errno));
warning("fstat failed: %s\n", os::strerror(errno));
}
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");
@ -1172,7 +1172,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed: %s\n", strerror(errno));
warning("mmap failed: %s\n", os::strerror(errno));
}
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
"Could not map PerfMemory");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
@ -49,7 +50,7 @@ ThreadCritical::ThreadCritical() {
if (global_mut_owner != owner) {
if (os::Solaris::mutex_lock(&global_mut))
fatal("ThreadCritical::ThreadCritical: mutex_lock failed (%s)",
strerror(errno));
os::strerror(errno));
assert(global_mut_count == 0, "must have clean count");
assert(global_mut_owner == -1, "must have clean owner");
}
@ -68,7 +69,7 @@ ThreadCritical::~ThreadCritical() {
if (global_mut_count == 0) {
global_mut_owner = -1;
if (os::Solaris::mutex_unlock(&global_mut))
fatal("ThreadCritical::~ThreadCritical: mutex_unlock failed (%s)", strerror(errno));
fatal("ThreadCritical::~ThreadCritical: mutex_unlock failed (%s)", os::strerror(errno));
}
} else {
assert (Threads::number_of_threads() == 0, "valid only during initialization");

View File

@ -642,7 +642,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
} else {
log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
strerror(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
}
if (thread_handle == NULL) {
@ -1898,7 +1898,7 @@ size_t os::lasterror(char* buf, size_t len) {
if (errno != 0) {
// C runtime error that has no corresponding DOS error code
const char* s = strerror(errno);
const char* s = os::strerror(errno);
size_t n = strlen(s);
if (n >= len) n = len - 1;
strncpy(buf, s, n);
@ -2441,7 +2441,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
jio_snprintf(buf, sizeof(buf), "Execution protection violation "
"at " INTPTR_FORMAT
", unguarding " INTPTR_FORMAT ": %s", addr,
page_start, (res ? "success" : strerror(errno)));
page_start, (res ? "success" : os::strerror(errno)));
tty->print_raw_cr(buf);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,7 +96,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (fd == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not create Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
} else {
for (size_t remaining = size; remaining > 0;) {
@ -105,7 +105,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (nbytes == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not write Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
break;
}
@ -117,7 +117,7 @@ static void save_memory_to_file(char* addr, size_t size) {
int result = ::_close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
}
}
}
@ -497,7 +497,7 @@ static void remove_file(const char* dirname, const char* filename) {
if (PrintMiscellaneous && Verbose) {
if (errno != ENOENT) {
warning("Could not unlink shared memory backing"
" store file %s : %s\n", path, strerror(errno));
" store file %s : %s\n", path, os::strerror(errno));
}
}
}
@ -1358,7 +1358,7 @@ static HANDLE create_sharedmem_resources(const char* dirname, const char* filena
if (ret_code == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not get status information from file %s: %s\n",
filename, strerror(errno));
filename, os::strerror(errno));
}
CloseHandle(fmh);
CloseHandle(fh);
@ -1553,7 +1553,7 @@ static size_t sharedmem_filesize(const char* filename, TRAPS) {
//
if (::stat(filename, &statbuf) == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("stat %s failed: %s\n", filename, strerror(errno));
warning("stat %s failed: %s\n", filename, os::strerror(errno));
}
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");

View File

@ -1,4 +1,5 @@
/* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
/*
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +29,7 @@
#include "ci/ciKlass.hpp"
#include "ci/ciUtilities.hpp"
#include "compiler/compileBroker.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
@ -574,7 +576,7 @@ class CompileReplay : public StackObj {
Method* method = parse_method(CHECK);
if (had_error()) return;
/* just copied from Method, to build interpret data*/
if (InstanceRefKlass::owns_pending_list_lock((JavaThread*)THREAD)) {
if (ReferencePendingListLocker::is_locked_by_self()) {
return;
}
// To be properly initialized, some profiling in the MDO needs the

View File

@ -5358,12 +5358,12 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, TRAPS) {
ik->print_loading_log(LogLevel::Debug, _loader_data, _stream);
}
if (log_is_enabled(Info, classresolve)) {
if (log_is_enabled(Debug, classresolve)) {
ResourceMark rm;
// print out the superclass.
const char * from = ik->external_name();
if (ik->java_super() != NULL) {
log_info(classresolve)("%s %s (super)",
log_debug(classresolve)("%s %s (super)",
from,
ik->java_super()->external_name());
}
@ -5374,7 +5374,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, TRAPS) {
for (int i = 0; i < length; i++) {
const Klass* const k = local_interfaces->at(i);
const char * to = k->external_name();
log_info(classresolve)("%s %s (interface)", from, to);
log_debug(classresolve)("%s %s (interface)", from, to);
}
}
}
@ -5684,15 +5684,16 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
}
if (!is_internal()) {
if (TraceClassLoadingPreorder) {
tty->print("[Loading %s",
_class_name->as_klass_external_name());
if (log_is_enabled(Debug, classload, preorder)){
ResourceMark rm(THREAD);
outputStream* log = LogHandle(classload, preorder)::debug_stream();
log->print("%s", _class_name->as_klass_external_name());
if (stream->source() != NULL) {
tty->print(" from %s", stream->source());
log->print(" source: %s", stream->source());
}
tty->print_cr("]");
log->cr();
}
#if INCLUDE_CDS
if (DumpLoadedClassList != NULL && stream->source() != NULL && classlist_file->is_open()) {
// Only dump the classes that can be stored into CDS archive

View File

@ -1439,6 +1439,12 @@ void java_lang_ThreadGroup::compute_offsets() {
compute_offset(_ngroups_offset, k, vmSymbols::ngroups_name(), vmSymbols::int_signature());
}
void java_lang_Throwable::compute_offsets() {
Klass* k = SystemDictionary::Throwable_klass();
compute_offset(depth_offset, k, vmSymbols::depth_name(), vmSymbols::int_signature());
}
oop java_lang_Throwable::unassigned_stacktrace() {
InstanceKlass* ik = SystemDictionary::Throwable_klass();
address addr = ik->static_field_addr(static_unassigned_stacktrace_offset);
@ -1458,11 +1464,13 @@ void java_lang_Throwable::set_backtrace(oop throwable, oop value) {
throwable->release_obj_field_put(backtrace_offset, value);
}
oop java_lang_Throwable::message(oop throwable) {
return throwable->obj_field(detailMessage_offset);
int java_lang_Throwable::depth(oop throwable) {
return throwable->int_field(depth_offset);
}
void java_lang_Throwable::set_depth(oop throwable, int value) {
throwable->int_field_put(depth_offset, value);
}
oop java_lang_Throwable::message(Handle throwable) {
return throwable->obj_field(detailMessage_offset);
@ -1512,10 +1520,12 @@ static inline bool version_matches(Method* method, int version) {
return method != NULL && (method->constants()->version() == version);
}
// This class provides a simple wrapper over the internal structure of
// exception backtrace to insulate users of the backtrace from needing
// to know what it looks like.
class BacktraceBuilder: public StackObj {
friend class BacktraceIterator;
private:
Handle _backtrace;
objArrayOop _head;
@ -1526,8 +1536,6 @@ class BacktraceBuilder: public StackObj {
int _index;
NoSafepointVerifier _nsv;
public:
enum {
trace_methods_offset = java_lang_Throwable::trace_methods_offset,
trace_bcis_offset = java_lang_Throwable::trace_bcis_offset,
@ -1560,6 +1568,8 @@ class BacktraceBuilder: public StackObj {
return cprefs;
}
public:
// constructor for new backtrace
BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL), _cprefs(NULL) {
expand(CHECK);
@ -1645,9 +1655,68 @@ class BacktraceBuilder: public StackObj {
};
struct BacktraceElement : public StackObj {
int _method_id;
int _bci;
int _version;
int _cpref;
Handle _mirror;
BacktraceElement(Handle mirror, int mid, int version, int bci, int cpref) :
_mirror(mirror), _method_id(mid), _version(version), _bci(bci), _cpref(cpref) {}
};
class BacktraceIterator : public StackObj {
int _index;
objArrayHandle _result;
objArrayHandle _mirrors;
typeArrayHandle _methods;
typeArrayHandle _bcis;
typeArrayHandle _cprefs;
void init(objArrayHandle result, Thread* thread) {
// Get method id, bci, version and mirror from chunk
_result = result;
if (_result.not_null()) {
_methods = typeArrayHandle(thread, BacktraceBuilder::get_methods(_result));
_bcis = typeArrayHandle(thread, BacktraceBuilder::get_bcis(_result));
_mirrors = objArrayHandle(thread, BacktraceBuilder::get_mirrors(_result));
_cprefs = typeArrayHandle(thread, BacktraceBuilder::get_cprefs(_result));
_index = 0;
}
}
public:
BacktraceIterator(objArrayHandle result, Thread* thread) {
init(result, thread);
assert(_methods.is_null() || _methods->length() == java_lang_Throwable::trace_chunk_size, "lengths don't match");
}
BacktraceElement next(Thread* thread) {
BacktraceElement e (Handle(thread, _mirrors->obj_at(_index)),
_methods->short_at(_index),
Backtrace::version_at(_bcis->int_at(_index)),
Backtrace::bci_at(_bcis->int_at(_index)),
_cprefs->short_at(_index));
_index++;
if (_index >= java_lang_Throwable::trace_chunk_size) {
int next_offset = java_lang_Throwable::trace_next_offset;
// Get next chunk
objArrayHandle result (thread, objArrayOop(_result->obj_at(next_offset)));
init(result, thread);
}
return e;
}
bool repeat() {
return _result.not_null() && _mirrors->obj_at(_index) != NULL;
}
};
// Print stack trace element to resource allocated buffer
char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
int method_id, int version, int bci, int cpref) {
static void print_stack_element_to_stream(outputStream* st, Handle mirror, int method_id,
int version, int bci, int cpref) {
ResourceMark rm;
// Get strings and string lengths
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror()));
@ -1698,26 +1767,16 @@ char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
}
}
return buf;
}
void java_lang_Throwable::print_stack_element(outputStream *st, Handle mirror,
int method_id, int version, int bci, int cpref) {
ResourceMark rm;
char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci, cpref);
st->print_cr("%s", buf);
}
void java_lang_Throwable::print_stack_element(outputStream *st, const methodHandle& method, int bci) {
Handle mirror = method->method_holder()->java_mirror();
int method_id = method->orig_method_idnum();
int version = method->constants()->version();
int cpref = method->name_index();
print_stack_element(st, mirror, method_id, version, bci, cpref);
}
const char* java_lang_Throwable::no_stack_trace_message() {
return "\t<<no stack trace available>>";
print_stack_element_to_stream(st, mirror, method_id, version, bci, cpref);
}
/**
@ -1734,32 +1793,17 @@ void java_lang_Throwable::print_stack_trace(Handle throwable, outputStream* st)
while (throwable.not_null()) {
objArrayHandle result (THREAD, objArrayOop(backtrace(throwable())));
if (result.is_null()) {
st->print_raw_cr(no_stack_trace_message());
st->print_raw_cr("\t<<no stack trace available>>");
return;
}
BacktraceIterator iter(result, THREAD);
while (result.not_null()) {
// Get method id, bci, version and mirror from chunk
typeArrayHandle methods (THREAD, BacktraceBuilder::get_methods(result));
typeArrayHandle bcis (THREAD, BacktraceBuilder::get_bcis(result));
objArrayHandle mirrors (THREAD, BacktraceBuilder::get_mirrors(result));
typeArrayHandle cprefs (THREAD, BacktraceBuilder::get_cprefs(result));
int length = methods()->length();
for (int index = 0; index < length; index++) {
Handle mirror(THREAD, mirrors->obj_at(index));
// NULL mirror means end of stack trace
if (mirror.is_null()) goto handle_cause;
int method = methods->short_at(index);
int version = Backtrace::version_at(bcis->int_at(index));
int bci = Backtrace::bci_at(bcis->int_at(index));
int cpref = cprefs->short_at(index);
print_stack_element(st, mirror, method, version, bci, cpref);
}
result = objArrayHandle(THREAD, objArrayOop(result->obj_at(trace_next_offset)));
while (iter.repeat()) {
BacktraceElement bte = iter.next(THREAD);
print_stack_element_to_stream(st, bte._mirror, bte._method_id, bte._version, bte._bci, bte._cpref);
}
handle_cause:
{
// Call getCause() which doesn't necessarily return the _cause field.
EXCEPTION_MARK;
JavaValue cause(T_OBJECT);
JavaCalls::call_virtual(&cause,
@ -1811,6 +1855,7 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
int max_depth = MaxJavaStackTraceDepth;
JavaThread* thread = (JavaThread*)THREAD;
BacktraceBuilder bt(CHECK);
// If there is no Java frame just return the method that was being called
@ -1818,6 +1863,8 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
if (!thread->has_last_Java_frame()) {
if (max_depth >= 1 && method() != NULL) {
bt.push(method(), 0, CHECK);
log_info(stacktrace)("%s, %d", throwable->klass()->external_name(), 1);
set_depth(throwable(), 1);
set_backtrace(throwable(), bt.backtrace());
}
return;
@ -1925,8 +1972,11 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
total_count++;
}
log_info(stacktrace)("%s, %d", throwable->klass()->external_name(), total_count);
// Put completed stack trace into throwable object
set_backtrace(throwable(), bt.backtrace());
set_depth(throwable(), total_count);
}
void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHandle& method) {
@ -1980,94 +2030,60 @@ void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle t
// methods as preallocated errors aren't created by "java" code.
// fill in as much stack trace as possible
typeArrayOop methods = BacktraceBuilder::get_methods(backtrace);
int max_chunks = MIN2(methods->length(), (int)MaxJavaStackTraceDepth);
int chunk_count = 0;
for (;!st.at_end(); st.next()) {
bt.push(st.method(), st.bci(), CHECK);
chunk_count++;
// Bail-out for deep stacks
if (chunk_count >= max_chunks) break;
if (chunk_count >= trace_chunk_size) break;
}
set_depth(throwable(), chunk_count);
log_info(stacktrace)("%s, %d", throwable->klass()->external_name(), chunk_count);
// We support the Throwable immutability protocol defined for Java 7.
java_lang_Throwable::set_stacktrace(throwable(), java_lang_Throwable::unassigned_stacktrace());
assert(java_lang_Throwable::unassigned_stacktrace() != NULL, "not initialized");
}
void java_lang_Throwable::get_stack_trace_elements(Handle throwable,
objArrayHandle stack_trace_array_h, TRAPS) {
int java_lang_Throwable::get_stack_trace_depth(oop throwable, TRAPS) {
if (throwable == NULL) {
THROW_0(vmSymbols::java_lang_NullPointerException());
if (throwable.is_null() || stack_trace_array_h.is_null()) {
THROW(vmSymbols::java_lang_NullPointerException());
}
objArrayOop chunk = objArrayOop(backtrace(throwable));
int depth = 0;
if (chunk != NULL) {
// Iterate over chunks and count full ones
while (true) {
objArrayOop next = objArrayOop(chunk->obj_at(trace_next_offset));
if (next == NULL) break;
depth += trace_chunk_size;
chunk = next;
}
assert(chunk != NULL && chunk->obj_at(trace_next_offset) == NULL, "sanity check");
// Count element in remaining partial chunk. NULL value for mirror
// marks the end of the stack trace elements that are saved.
objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk);
assert(mirrors != NULL, "sanity check");
for (int i = 0; i < mirrors->length(); i++) {
if (mirrors->obj_at(i) == NULL) break;
depth++;
}
assert(stack_trace_array_h->is_objArray(), "Stack trace array should be an array of StackTraceElenent");
if (stack_trace_array_h->length() != depth(throwable())) {
THROW(vmSymbols::java_lang_IndexOutOfBoundsException());
}
objArrayHandle result(THREAD, objArrayOop(backtrace(throwable())));
BacktraceIterator iter(result, THREAD);
int index = 0;
while (iter.repeat()) {
BacktraceElement bte = iter.next(THREAD);
Handle stack_trace_element(THREAD, stack_trace_array_h->obj_at(index++));
if (stack_trace_element.is_null()) {
THROW(vmSymbols::java_lang_NullPointerException());
}
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(bte._mirror()));
methodHandle method (THREAD, holder->method_with_orig_idnum(bte._method_id, bte._version));
java_lang_StackTraceElement::fill_in(stack_trace_element, holder,
method,
bte._version,
bte._bci,
bte._cpref, CHECK);
}
return depth;
}
oop java_lang_Throwable::get_stack_trace_element(oop throwable, int index, TRAPS) {
if (throwable == NULL) {
THROW_0(vmSymbols::java_lang_NullPointerException());
}
if (index < 0) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
// Compute how many chunks to skip and index into actual chunk
objArrayOop chunk = objArrayOop(backtrace(throwable));
int skip_chunks = index / trace_chunk_size;
int chunk_index = index % trace_chunk_size;
while (chunk != NULL && skip_chunks > 0) {
chunk = objArrayOop(chunk->obj_at(trace_next_offset));
skip_chunks--;
}
if (chunk == NULL) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
// Get method id, bci, version, mirror and cpref from chunk
typeArrayOop methods = BacktraceBuilder::get_methods(chunk);
typeArrayOop bcis = BacktraceBuilder::get_bcis(chunk);
objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk);
typeArrayOop cprefs = BacktraceBuilder::get_cprefs(chunk);
assert(methods != NULL && bcis != NULL && mirrors != NULL, "sanity check");
int method = methods->short_at(chunk_index);
int version = Backtrace::version_at(bcis->int_at(chunk_index));
int bci = Backtrace::bci_at(bcis->int_at(chunk_index));
int cpref = cprefs->short_at(chunk_index);
Handle mirror(THREAD, mirrors->obj_at(chunk_index));
// Chunk can be partial full
if (mirror.is_null()) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, cpref, CHECK_0);
return element;
}
oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
int version, int bci, int cpref, TRAPS) {
oop java_lang_StackTraceElement::create(const methodHandle& method, int bci, TRAPS) {
// Allocate java.lang.StackTraceElement instance
Klass* k = SystemDictionary::StackTraceElement_klass();
assert(k != NULL, "must be loaded in 1.4+");
@ -2078,23 +2094,31 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
Handle element = ik->allocate_instance_handle(CHECK_0);
int cpref = method->name_index();
int version = method->constants()->version();
fill_in(element, method->method_holder(), method, version, bci, cpref, CHECK_0);
return element();
}
void java_lang_StackTraceElement::fill_in(Handle element,
InstanceKlass* holder, const methodHandle& method,
int version, int bci, int cpref, TRAPS) {
assert(element->is_a(SystemDictionary::StackTraceElement_klass()), "sanity check");
// Fill in class name
ResourceMark rm(THREAD);
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror()));
const char* str = holder->external_name();
oop classname = StringTable::intern((char*) str, CHECK_0);
oop classname = StringTable::intern((char*) str, CHECK);
java_lang_StackTraceElement::set_declaringClass(element(), classname);
Method* method = holder->method_with_orig_idnum(method_id, version);
// The method can be NULL if the requested class version is gone
Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref);
Symbol* sym = !method.is_null() ? method->name() : holder->constants()->symbol_at(cpref);
// Fill in method name
oop methodname = StringTable::intern(sym, CHECK_0);
oop methodname = StringTable::intern(sym, CHECK);
java_lang_StackTraceElement::set_methodName(element(), methodname);
if (!version_matches(method, version)) {
if (!version_matches(method(), version)) {
// The method was redefined, accurate line number information isn't available
java_lang_StackTraceElement::set_fileName(element(), NULL);
java_lang_StackTraceElement::set_lineNumber(element(), -1);
@ -2103,20 +2127,12 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
Symbol* source = Backtrace::get_source_file_name(holder, version);
if (ShowHiddenFrames && source == NULL)
source = vmSymbols::unknown_class_name();
oop filename = StringTable::intern(source, CHECK_0);
oop filename = StringTable::intern(source, CHECK);
java_lang_StackTraceElement::set_fileName(element(), filename);
int line_number = Backtrace::get_line_number(method, bci);
java_lang_StackTraceElement::set_lineNumber(element(), line_number);
}
return element();
}
oop java_lang_StackTraceElement::create(const methodHandle& method, int bci, TRAPS) {
Handle mirror (THREAD, method->method_holder()->java_mirror());
int method_id = method->orig_method_idnum();
int cpref = method->name_index();
return create(mirror, method_id, method->constants()->version(), bci, cpref, THREAD);
}
Method* java_lang_StackFrameInfo::get_method(Handle stackFrame, InstanceKlass* holder, TRAPS) {
@ -3477,8 +3493,8 @@ int java_lang_Class::_signers_offset;
GrowableArray<Klass*>* java_lang_Class::_fixup_mirror_list = NULL;
int java_lang_Throwable::backtrace_offset;
int java_lang_Throwable::detailMessage_offset;
int java_lang_Throwable::cause_offset;
int java_lang_Throwable::stackTrace_offset;
int java_lang_Throwable::depth_offset;
int java_lang_Throwable::static_unassigned_stacktrace_offset;
int java_lang_reflect_AccessibleObject::override_offset;
int java_lang_reflect_Method::clazz_offset;
@ -3679,7 +3695,6 @@ void JavaClasses::compute_hard_coded_offsets() {
// Throwable Class
java_lang_Throwable::backtrace_offset = java_lang_Throwable::hc_backtrace_offset * x + header;
java_lang_Throwable::detailMessage_offset = java_lang_Throwable::hc_detailMessage_offset * x + header;
java_lang_Throwable::cause_offset = java_lang_Throwable::hc_cause_offset * x + header;
java_lang_Throwable::stackTrace_offset = java_lang_Throwable::hc_stackTrace_offset * x + header;
java_lang_Throwable::static_unassigned_stacktrace_offset = java_lang_Throwable::hc_static_unassigned_stacktrace_offset * x;
@ -3730,6 +3745,7 @@ void JavaClasses::compute_hard_coded_offsets() {
void JavaClasses::compute_offsets() {
// java_lang_Class::compute_offsets was called earlier in bootstrap
java_lang_ClassLoader::compute_offsets();
java_lang_Throwable::compute_offsets();
java_lang_Thread::compute_offsets();
java_lang_ThreadGroup::compute_offsets();
java_lang_invoke_MethodHandle::compute_offsets();
@ -3883,8 +3899,8 @@ void JavaClasses::check_offsets() {
CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, backtrace, "Ljava/lang/Object;");
CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, detailMessage, "Ljava/lang/String;");
CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, cause, "Ljava/lang/Throwable;");
CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, stackTrace, "[Ljava/lang/StackTraceElement;");
CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, depth, "I");
// Boxed primitive objects (java_lang_boxing_object)

View File

@ -440,6 +440,7 @@ class java_lang_ThreadGroup : AllStatic {
class java_lang_Throwable: AllStatic {
friend class BacktraceBuilder;
friend class BacktraceIterator;
private:
// Offsets
@ -465,16 +466,12 @@ class java_lang_Throwable: AllStatic {
static int backtrace_offset;
static int detailMessage_offset;
static int cause_offset;
static int stackTrace_offset;
static int depth_offset;
static int static_unassigned_stacktrace_offset;
// Printing
static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci, int cpref);
// StackTrace (programmatic access, new since 1.4)
static void clear_stacktrace(oop throwable);
// No stack trace available
static const char* no_stack_trace_message();
// Stacktrace (post JDK 1.7.0 to allow immutability protocol to be followed)
static void set_stacktrace(oop throwable, oop st_element_array);
static oop unassigned_stacktrace();
@ -483,19 +480,20 @@ class java_lang_Throwable: AllStatic {
// Backtrace
static oop backtrace(oop throwable);
static void set_backtrace(oop throwable, oop value);
static int depth(oop throwable);
static void set_depth(oop throwable, int value);
// Needed by JVMTI to filter out this internal field.
static int get_backtrace_offset() { return backtrace_offset;}
static int get_detailMessage_offset() { return detailMessage_offset;}
// Message
static oop message(oop throwable);
static oop message(Handle throwable);
static void set_message(oop throwable, oop value);
static Symbol* detail_message(oop throwable);
static void print_stack_element(outputStream *st, Handle mirror, int method,
int version, int bci, int cpref);
static void print_stack_element(outputStream *st, const methodHandle& method, int bci);
static void print_stack_usage(Handle stream);
static void compute_offsets();
// Allocate space for backtrace (created but stack trace not filled in)
static void allocate_backtrace(Handle throwable, TRAPS);
// Fill in current stack trace for throwable with preallocated backtrace (no GC)
@ -504,8 +502,7 @@ class java_lang_Throwable: AllStatic {
static void fill_in_stack_trace(Handle throwable, const methodHandle& method, TRAPS);
static void fill_in_stack_trace(Handle throwable, const methodHandle& method = methodHandle());
// Programmatic access to stack trace
static oop get_stack_trace_element(oop throwable, int index, TRAPS);
static int get_stack_trace_depth(oop throwable, TRAPS);
static void get_stack_trace_elements(Handle throwable, objArrayHandle stack_trace, TRAPS);
// Printing
static void print(Handle throwable, outputStream* st);
static void print_stack_trace(Handle throwable, outputStream* st);
@ -1277,17 +1274,19 @@ class java_lang_StackTraceElement: AllStatic {
static int fileName_offset;
static int lineNumber_offset;
public:
// Setters
static void set_declaringClass(oop element, oop value);
static void set_methodName(oop element, oop value);
static void set_fileName(oop element, oop value);
static void set_lineNumber(oop element, int value);
public:
// Create an instance of StackTraceElement
static oop create(Handle mirror, int method, int version, int bci, int cpref, TRAPS);
static oop create(const methodHandle& method, int bci, TRAPS);
static void fill_in(Handle element, InstanceKlass* holder, const methodHandle& method,
int version, int bci, int cpref, TRAPS);
// Debugging
friend class JavaClasses;
};

View File

@ -61,7 +61,7 @@ bool VerificationType::is_reference_assignable_from(
Klass* obj = SystemDictionary::resolve_or_fail(
name(), Handle(THREAD, klass->class_loader()),
Handle(THREAD, klass->protection_domain()), true, CHECK_false);
if (log_is_enabled(Info, classresolve)) {
if (log_is_enabled(Debug, classresolve)) {
Verifier::trace_class_resolution(obj, klass());
}
@ -80,7 +80,7 @@ bool VerificationType::is_reference_assignable_from(
Klass* from_class = SystemDictionary::resolve_or_fail(
from.name(), Handle(THREAD, klass->class_loader()),
Handle(THREAD, klass->protection_domain()), true, CHECK_false);
if (log_is_enabled(Info, classresolve)) {
if (log_is_enabled(Debug, classresolve)) {
Verifier::trace_class_resolution(from_class, klass());
}
return InstanceKlass::cast(from_class)->is_subclass_of(this_class());

View File

@ -107,9 +107,9 @@ void Verifier::trace_class_resolution(Klass* resolve_class, InstanceKlass* verif
const char* resolve = resolve_class->external_name();
// print in a single call to reduce interleaving between threads
if (source_file != NULL) {
log_info(classresolve)("%s %s %s (verification)", verify, resolve, source_file);
log_debug(classresolve)("%s %s %s (verification)", verify, resolve, source_file);
} else {
log_info(classresolve)("%s %s (verification)", verify, resolve);
log_debug(classresolve)("%s %s (verification)", verify, resolve);
}
}
@ -205,7 +205,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
ResourceMark rm(THREAD);
instanceKlassHandle kls =
SystemDictionary::resolve_or_fail(exception_name, true, CHECK_false);
if (log_is_enabled(Info, classresolve)) {
if (log_is_enabled(Debug, classresolve)) {
Verifier::trace_class_resolution(kls(), klass());
}
@ -1994,7 +1994,7 @@ Klass* ClassVerifier::load_class(Symbol* name, TRAPS) {
name, Handle(THREAD, loader), Handle(THREAD, protection_domain),
true, THREAD);
if (log_is_enabled(Info, classresolve)) {
if (log_is_enabled(Debug, classresolve)) {
instanceKlassHandle cur_class = current_class();
Verifier::trace_class_resolution(kls, cur_class());
}

View File

@ -376,6 +376,7 @@
template(fillInStackTrace_name, "fillInStackTrace") \
template(getCause_name, "getCause") \
template(initCause_name, "initCause") \
template(depth_name, "depth") \
template(setProperty_name, "setProperty") \
template(getProperty_name, "getProperty") \
template(context_name, "context") \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,6 +32,7 @@
#include "compiler/compileLog.hpp"
#include "compiler/compilerOracle.hpp"
#include "compiler/directivesParser.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/methodData.hpp"
@ -904,7 +905,7 @@ void CompileBroker::compile_method_base(const methodHandle& method,
// the pending list lock or a 3-way deadlock may occur
// between the reference handler thread, a GC (instigated
// by a compiler thread), and compiled method registration.
if (InstanceRefKlass::owns_pending_list_lock(JavaThread::current())) {
if (ReferencePendingListLocker::is_locked_by_self()) {
return;
}
@ -1309,7 +1310,7 @@ uint CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandl
* has been fulfilled?
*/
bool CompileBroker::is_compile_blocking() {
assert(!InstanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock");
assert(!ReferencePendingListLocker::is_locked_by_self(), "possible deadlock");
return !BackgroundCompilation;
}

View File

@ -1931,11 +1931,6 @@ CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk)
if (blk->_ptr == NULL) {
refillLinearAllocBlock(blk);
}
if (PrintMiscellaneous && Verbose) {
if (blk->_word_size == 0) {
warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
}
}
}
void

View File

@ -502,7 +502,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
{
MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
if (!_markBitMap.allocate(_span)) {
warning("Failed to allocate CMS Bit Map");
log_warning(gc)("Failed to allocate CMS Bit Map");
return;
}
assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
@ -513,7 +513,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
}
if (!_markStack.allocate(MarkStackSize)) {
warning("Failed to allocate CMS Marking Stack");
log_warning(gc)("Failed to allocate CMS Marking Stack");
return;
}
@ -527,8 +527,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
ConcGCThreads, true);
if (_conc_workers == NULL) {
warning("GC/CMS: _conc_workers allocation failure: "
"forcing -CMSConcurrentMTEnabled");
log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled");
CMSConcurrentMTEnabled = false;
} else {
_conc_workers->initialize_workers();
@ -559,7 +558,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
&& num_queues > 0) {
_task_queues = new OopTaskQueueSet(num_queues);
if (_task_queues == NULL) {
warning("task_queues allocation failure.");
log_warning(gc)("task_queues allocation failure.");
return;
}
_hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
@ -567,7 +566,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
for (i = 0; i < num_queues; i++) {
PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
if (q == NULL) {
warning("work_queue allocation failure.");
log_warning(gc)("work_queue allocation failure.");
return;
}
_task_queues->register_queue(i, q);
@ -1413,7 +1412,7 @@ void CMSCollector::acquire_control_and_collect(bool full,
if (_foregroundGCShouldWait) {
// We are going to be waiting for action for the CMS thread;
// it had better not be gone (for instance at shutdown)!
assert(ConcurrentMarkSweepThread::cmst() != NULL,
assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
"CMS thread must be running");
// Wait here until the background collector gives us the go-ahead
ConcurrentMarkSweepThread::clear_CMS_flag(
@ -1519,7 +1518,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
gch->pre_full_gc_dump(gc_timer);
GCTraceTime(Trace, gc) t("CMS:MSC");
GCTraceTime(Trace, gc, phases) t("CMS:MSC");
// Temporarily widen the span of the weak reference processing to
// the entire heap.
@ -2235,7 +2234,7 @@ class VerifyMarkedClosure: public BitMapClosure {
};
bool CMSCollector::verify_after_remark() {
GCTraceTime(Info, gc, verify) tm("Verifying CMS Marking.");
GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
static bool init = false;
@ -2287,17 +2286,16 @@ bool CMSCollector::verify_after_remark() {
// all marking, then check if the new marks-vector is
// a subset of the CMS marks-vector.
verify_after_remark_work_1();
} else if (CMSRemarkVerifyVariant == 2) {
} else {
guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
// In this second variant of verification, we flag an error
// (i.e. an object reachable in the new marks-vector not reachable
// in the CMS marks-vector) immediately, also indicating the
// identify of an object (A) that references the unmarked object (B) --
// presumably, a mutation to A failed to be picked up by preclean/remark?
verify_after_remark_work_2();
} else {
warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
CMSRemarkVerifyVariant);
}
return true;
}
@ -2820,7 +2818,7 @@ void CMSCollector::checkpointRootsInitialWork() {
// CMS collection cycle.
setup_cms_unloading_and_verification_state();
GCTraceTime(Trace, gc) ts("checkpointRootsInitialWork", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
// Reset all the PLAB chunk arrays if necessary.
if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
@ -3650,7 +3648,7 @@ void CMSCollector::abortable_preclean() {
// XXX FIX ME!!! YSR
size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
while (!(should_abort_preclean() ||
ConcurrentMarkSweepThread::should_terminate())) {
ConcurrentMarkSweepThread::cmst()->should_terminate())) {
workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
cumworkdone += workdone;
loops++;
@ -4104,8 +4102,6 @@ void CMSCollector::checkpointRootsFinal() {
// expect it to be false and set to true
FlagSetting fl(gch->_is_gc_active, false);
GCTraceTime(Trace, gc) tm("Pause Scavenge Before Remark", _gc_timer_cm);
gch->do_collection(true, // full (i.e. force, see below)
false, // !clear_all_soft_refs
0, // size
@ -4123,7 +4119,7 @@ void CMSCollector::checkpointRootsFinal() {
}
void CMSCollector::checkpointRootsFinalWork() {
GCTraceTime(Trace, gc) tm("checkpointRootsFinalWork", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock());
@ -4173,10 +4169,10 @@ void CMSCollector::checkpointRootsFinalWork() {
// the most recent young generation GC, minus those cleaned up by the
// concurrent precleaning.
if (CMSParallelRemarkEnabled) {
GCTraceTime(Debug, gc) t("Rescan (parallel)", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm);
do_remark_parallel();
} else {
GCTraceTime(Debug, gc) t("Rescan (non-parallel)", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm);
do_remark_non_parallel();
}
}
@ -4184,7 +4180,7 @@ void CMSCollector::checkpointRootsFinalWork() {
verify_overflow_empty();
{
GCTraceTime(Trace, gc) ts("refProcessingWork", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm);
refProcessingWork();
}
verify_work_stacks_empty();
@ -4907,7 +4903,7 @@ void CMSCollector::do_remark_non_parallel() {
NULL, // space is set further below
&_markBitMap, &_markStack, &mrias_cl);
{
GCTraceTime(Trace, gc) t("Grey Object Rescan", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm);
// Iterate over the dirty cards, setting the corresponding bits in the
// mod union table.
{
@ -4941,7 +4937,7 @@ void CMSCollector::do_remark_non_parallel() {
Universe::verify();
}
{
GCTraceTime(Trace, gc) t("Root Rescan", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm);
verify_work_stacks_empty();
@ -4963,7 +4959,7 @@ void CMSCollector::do_remark_non_parallel() {
}
{
GCTraceTime(Trace, gc) t("Visit Unhandled CLDs", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
verify_work_stacks_empty();
@ -4982,7 +4978,7 @@ void CMSCollector::do_remark_non_parallel() {
}
{
GCTraceTime(Trace, gc) t("Dirty Klass Scan", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm);
verify_work_stacks_empty();
@ -5186,7 +5182,7 @@ void CMSCollector::refProcessingWork() {
_span, &_markBitMap, &_markStack,
&cmsKeepAliveClosure, false /* !preclean */);
{
GCTraceTime(Debug, gc) t("Weak Refs Processing", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
ReferenceProcessorStats stats;
if (rp->processing_is_mt()) {
@ -5228,7 +5224,7 @@ void CMSCollector::refProcessingWork() {
if (should_unload_classes()) {
{
GCTraceTime(Debug, gc) t("Class Unloading", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
@ -5241,13 +5237,13 @@ void CMSCollector::refProcessingWork() {
}
{
GCTraceTime(Debug, gc) t("Scrub Symbol Table", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer_cm);
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
{
GCTraceTime(Debug, gc) t("Scrub String Table", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer_cm);
// Delete entries for dead interned strings.
StringTable::unlink(&_is_alive_closure);
}
@ -5657,13 +5653,13 @@ bool CMSBitMap::allocate(MemRegion mr) {
ReservedSpace brs(ReservedSpace::allocation_align_size_up(
(_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
if (!brs.is_reserved()) {
warning("CMS bit map allocation failure");
log_warning(gc)("CMS bit map allocation failure");
return false;
}
// For now we'll just commit all of the bit map up front.
// Later on we'll try to be more parsimonious with swap.
if (!_virtual_space.initialize(brs, brs.size())) {
warning("CMS bit map backing store failure");
log_warning(gc)("CMS bit map backing store failure");
return false;
}
assert(_virtual_space.committed_size() == brs.size(),
@ -5749,11 +5745,11 @@ bool CMSMarkStack::allocate(size_t size) {
ReservedSpace rs(ReservedSpace::allocation_align_size_up(
size * sizeof(oop)));
if (!rs.is_reserved()) {
warning("CMSMarkStack allocation failure");
log_warning(gc)("CMSMarkStack allocation failure");
return false;
}
if (!_virtual_space.initialize(rs, rs.size())) {
warning("CMSMarkStack backing store failure");
log_warning(gc)("CMSMarkStack backing store failure");
return false;
}
assert(_virtual_space.committed_size() == rs.size(),
@ -7047,13 +7043,13 @@ SweepClosure::SweepClosure(CMSCollector* collector,
}
void SweepClosure::print_on(outputStream* st) const {
tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(_sp->bottom()), p2i(_sp->end()));
tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
_inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(_sp->bottom()), p2i(_sp->end()));
st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
_inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
}
#ifndef PRODUCT
@ -7066,8 +7062,10 @@ SweepClosure::~SweepClosure() {
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds");
if (inFreeRange()) {
warning("inFreeRange() should have been reset; dumping state of SweepClosure");
print();
LogHandle(gc, sweep) log;
log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
ResourceMark rm;
print_on(log.error_stream());
ShouldNotReachHere();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "oops/instanceRefKlass.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp"
@ -42,16 +42,10 @@
ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
bool ConcurrentMarkSweepThread::_should_terminate = false;
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
SurrogateLockerThread* ConcurrentMarkSweepThread::_slt = NULL;
SurrogateLockerThread::SLT_msg_type
ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty;
Monitor* ConcurrentMarkSweepThread::_sltMonitor = NULL;
ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
: ConcurrentGCThread() {
assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
@ -62,88 +56,58 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
set_name("CMS Main Thread");
if (os::create_thread(this, os::cgc_thread)) {
// An old comment here said: "Priority should be just less
// than that of VMThread". Since the VMThread runs at
// NearMaxPriority, the old comment was inaccurate, but
// changing the default priority to NearMaxPriority-1
// could change current behavior, so the default of
// NearMaxPriority stays in place.
//
// Note that there's a possibility of the VMThread
// starving if UseCriticalCMSThreadPriority is on.
// That won't happen on Solaris for various reasons,
// but may well happen on non-Solaris platforms.
int native_prio;
if (UseCriticalCMSThreadPriority) {
native_prio = os::java_to_os_priority[CriticalPriority];
} else {
native_prio = os::java_to_os_priority[NearMaxPriority];
}
os::set_native_priority(this, native_prio);
if (!DisableStartThread) {
os::start_thread(this);
}
}
_sltMonitor = SLT_lock;
// An old comment here said: "Priority should be just less
// than that of VMThread". Since the VMThread runs at
// NearMaxPriority, the old comment was inaccurate, but
// changing the default priority to NearMaxPriority-1
// could change current behavior, so the default of
// NearMaxPriority stays in place.
//
// Note that there's a possibility of the VMThread
// starving if UseCriticalCMSThreadPriority is on.
// That won't happen on Solaris for various reasons,
// but may well happen on non-Solaris platforms.
create_and_start(UseCriticalCMSThreadPriority ? CriticalPriority : NearMaxPriority);
}
void ConcurrentMarkSweepThread::run() {
void ConcurrentMarkSweepThread::run_service() {
assert(this == cmst(), "just checking");
initialize_in_thread();
// From this time Thread::current() should be working.
assert(this == Thread::current(), "just checking");
if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
warning("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
}
// Wait until Universe::is_fully_initialized()
{
CMSLoopCountWarn loopX("CMS::run", "waiting for "
"Universe::is_fully_initialized()", 2);
MutexLockerEx x(CGC_lock, true);
set_CMS_flag(CMS_cms_wants_token);
// Wait until Universe is initialized and all initialization is completed.
while (!is_init_completed() && !Universe::is_fully_initialized() &&
!_should_terminate) {
CGC_lock->wait(true, 200);
loopX.tick();
}
assert(is_init_completed() && Universe::is_fully_initialized(), "ConcurrentGCThread::run() should have waited for this.");
// Wait until the surrogate locker thread that will do
// pending list locking on our behalf has been created.
// We cannot start the SLT thread ourselves since we need
// to be a JavaThread to do so.
CMSLoopCountWarn loopY("CMS::run", "waiting for SLT installation", 2);
while (_slt == NULL && !_should_terminate) {
while (!ReferencePendingListLocker::is_initialized() && !should_terminate()) {
CGC_lock->wait(true, 200);
loopY.tick();
}
clear_CMS_flag(CMS_cms_wants_token);
}
while (!_should_terminate) {
while (!should_terminate()) {
sleepBeforeNextCycle();
if (_should_terminate) break;
if (should_terminate()) break;
GCIdMark gc_id_mark;
GCCause::Cause cause = _collector->_full_gc_requested ?
_collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
_collector->collect_in_background(cause);
}
assert(_should_terminate, "just checking");
// Check that the state of any protocol for synchronization
// between background (CMS) and foreground collector is "clean"
// (i.e. will not potentially block the foreground collector,
// requiring action by us).
verify_ok_to_terminate();
// Signal that it is terminated
{
MutexLockerEx mu(Terminator_lock,
Mutex::_no_safepoint_check_flag);
assert(_cmst == this, "Weird!");
_cmst = NULL;
Terminator_lock->notify();
}
}
#ifndef PRODUCT
@ -157,39 +121,24 @@ void ConcurrentMarkSweepThread::verify_ok_to_terminate() const {
// create and start a new ConcurrentMarkSweep Thread for given CMS generation
ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collector) {
if (!_should_terminate) {
assert(cmst() == NULL, "start() called twice?");
ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
assert(cmst() == th, "Where did the just-created CMS thread go?");
return th;
}
return NULL;
guarantee(_cmst == NULL, "start() called twice!");
ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
assert(_cmst == th, "Where did the just-created CMS thread go?");
return th;
}
void ConcurrentMarkSweepThread::stop() {
// it is ok to take late safepoints here, if needed
{
MutexLockerEx x(Terminator_lock);
_should_terminate = true;
}
{ // Now post a notify on CGC_lock so as to nudge
// CMS thread(s) that might be slumbering in
// sleepBeforeNextCycle.
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
CGC_lock->notify_all();
}
{ // Now wait until (all) CMS thread(s) have exited
MutexLockerEx x(Terminator_lock);
while(cmst() != NULL) {
Terminator_lock->wait();
}
}
void ConcurrentMarkSweepThread::stop_service() {
// Now post a notify on CGC_lock so as to nudge
// CMS thread(s) that might be slumbering in
// sleepBeforeNextCycle.
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
CGC_lock->notify_all();
}
void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
assert(tc != NULL, "Null ThreadClosure");
if (_cmst != NULL) {
tc->do_thread(_cmst);
if (cmst() != NULL && !cmst()->has_terminated()) {
tc->do_thread(cmst());
}
assert(Universe::is_fully_initialized(),
"Called too early, make sure heap is fully initialized");
@ -202,8 +151,8 @@ void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
}
void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
if (_cmst != NULL) {
_cmst->print_on(st);
if (cmst() != NULL && !cmst()->has_terminated()) {
cmst()->print_on(st);
st->cr();
}
if (_collector != NULL) {
@ -278,7 +227,7 @@ void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
MutexLockerEx x(CGC_lock,
Mutex::_no_safepoint_check_flag);
if (_should_terminate || _collector->_full_gc_requested) {
if (should_terminate() || _collector->_full_gc_requested) {
return;
}
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
@ -307,7 +256,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
unsigned int loop_count = 0;
while(!_should_terminate) {
while(!should_terminate()) {
double now_time = os::elapsedTime();
long wait_time_millis;
@ -327,7 +276,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
{
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (_should_terminate || _collector->_full_gc_requested) {
if (should_terminate() || _collector->_full_gc_requested) {
return;
}
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
@ -358,13 +307,13 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
// Too many loops warning
if(++loop_count == 0) {
warning("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
log_warning(gc)("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
}
}
}
void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
while (!_should_terminate) {
while (!should_terminate()) {
if(CMSWaitDuration >= 0) {
// Wait until the next synchronous GC, a concurrent full gc
// request or a timeout, whichever is earlier.
@ -381,15 +330,3 @@ void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
// and wait some more
}
}
// Note: this method, although exported by the ConcurrentMarkSweepThread,
// which is a non-JavaThread, can only be called by a JavaThread.
// Currently this is done at vm creation time (post-vm-init) by the
// main/Primordial (Java)Thread.
// XXX Consider changing this in the future to allow the CMS thread
// itself to create this thread?
void ConcurrentMarkSweepThread::makeSurrogateLockerThread(TRAPS) {
assert(UseConcMarkSweepGC, "SLT thread needed only for CMS GC");
assert(_slt == NULL, "SLT already created");
_slt = SurrogateLockerThread::make(THREAD);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,17 +37,10 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
friend class VMStructs;
friend class ConcurrentMarkSweepGeneration; // XXX should remove friendship
friend class CMSCollector;
public:
virtual void run();
private:
static ConcurrentMarkSweepThread* _cmst;
static CMSCollector* _collector;
static SurrogateLockerThread* _slt;
static SurrogateLockerThread::SLT_msg_type _sltBuffer;
static Monitor* _sltMonitor;
static bool _should_terminate;
static ConcurrentMarkSweepThread* _cmst;
static CMSCollector* _collector;
enum CMS_flag_type {
CMS_nil = NoBits,
@ -72,13 +65,13 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
// debugging
void verify_ok_to_terminate() const PRODUCT_RETURN;
void run_service();
void stop_service();
public:
// Constructor
ConcurrentMarkSweepThread(CMSCollector* collector);
static void makeSurrogateLockerThread(TRAPS);
static SurrogateLockerThread* slt() { return _slt; }
static void threads_do(ThreadClosure* tc);
// Printing
@ -91,8 +84,6 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
// Create and start the CMS Thread, or stop it on shutdown
static ConcurrentMarkSweepThread* start(CMSCollector* collector);
static void stop();
static bool should_terminate() { return _should_terminate; }
// Synchronization using CMS token
static void synchronize(bool is_cms_thread);
@ -170,7 +161,7 @@ class CMSLoopCountWarn: public StackObj {
inline void tick() {
_ticks++;
if (CMSLoopWarn && _ticks % _threshold == 0) {
warning("%s has looped " INTX_FORMAT " times %s", _src, _ticks, _msg);
log_warning(gc)("%s has looped " INTX_FORMAT " times %s", _src, _ticks, _msg);
}
}
};

View File

@ -161,15 +161,6 @@ process_stride(Space* sp,
}
}
// If you want a talkative process_chunk_boundaries,
// then #define NOISY(x) x
#ifdef NOISY
#error "Encountered a global preprocessor flag, NOISY, which might clash with local definition to follow"
#else
#define NOISY(x)
#endif
void
CardTableModRefBSForCTRS::
process_chunk_boundaries(Space* sp,
@ -197,10 +188,6 @@ process_chunk_boundaries(Space* sp,
assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error.");
uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index;
NOISY(tty->print_cr("===========================================================================");)
NOISY(tty->print_cr(" process_chunk_boundary: Called with [" PTR_FORMAT "," PTR_FORMAT ")",
chunk_mr.start(), chunk_mr.end());)
// First, set "our" lowest_non_clean entry, which would be
// used by the thread scanning an adjoining left chunk with
// a non-array object straddling the mutual boundary.
@ -239,36 +226,18 @@ process_chunk_boundaries(Space* sp,
}
}
if (first_dirty_card != NULL) {
NOISY(tty->print_cr(" LNC: Found a dirty card at " PTR_FORMAT " in current chunk",
first_dirty_card);)
assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error.");
assert(lowest_non_clean[cur_chunk_index] == NULL,
"Write exactly once : value should be stable hereafter for this round");
lowest_non_clean[cur_chunk_index] = first_dirty_card;
} NOISY(else {
tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL");
// In the future, we could have this thread look for a non-NULL value to copy from its
// right neighbor (up to the end of the first object).
if (last_card_of_cur_chunk < last_card_of_first_obj) {
tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n"
" might be efficient to get value from right neighbor?");
}
})
}
} else {
// In this case we can help our neighbor by just asking them
// to stop at our first card (even though it may not be dirty).
NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");)
assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
}
NOISY(tty->print_cr(" process_chunk_boundary: lowest_non_clean[" INTPTR_FORMAT "] = " PTR_FORMAT
" which corresponds to the heap address " PTR_FORMAT,
cur_chunk_index, lowest_non_clean[cur_chunk_index],
(lowest_non_clean[cur_chunk_index] != NULL)
? addr_for(lowest_non_clean[cur_chunk_index])
: NULL);)
NOISY(tty->print_cr("---------------------------------------------------------------------------");)
// Next, set our own max_to_do, which will strictly/exclusively bound
// the highest address that we will scan past the right end of our chunk.
@ -285,8 +254,6 @@ process_chunk_boundaries(Space* sp,
|| oop(last_block)->is_objArray() // last_block is an array (precisely marked)
|| oop(last_block)->is_typeArray()) {
max_to_do = chunk_mr.end();
NOISY(tty->print_cr(" process_chunk_boundary: Last block on this card is not a non-array object;\n"
" max_to_do left at " PTR_FORMAT, max_to_do);)
} else {
assert(last_block < chunk_mr.end(), "Tautology");
// It is a non-array object that straddles the right boundary of this chunk.
@ -301,9 +268,6 @@ process_chunk_boundaries(Space* sp,
// subsequent cards still in this chunk must have been made
// precisely; we can cap processing at the end of our chunk.
max_to_do = chunk_mr.end();
NOISY(tty->print_cr(" process_chunk_boundary: Head of last object on this card is not dirty;\n"
" max_to_do left at " PTR_FORMAT,
max_to_do);)
} else {
// The last object must be considered dirty, and extends onto the
// following chunk. Look for a dirty card in that chunk that will
@ -323,8 +287,6 @@ process_chunk_boundaries(Space* sp,
cur <= last_card_of_last_obj; cur++) {
const jbyte val = *cur;
if (card_will_be_scanned(val)) {
NOISY(tty->print_cr(" Found a non-clean card " PTR_FORMAT " with value 0x%x",
cur, (int)val);)
limit_card = cur; break;
} else {
assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");
@ -333,10 +295,6 @@ process_chunk_boundaries(Space* sp,
if (limit_card != NULL) {
max_to_do = addr_for(limit_card);
assert(limit_card != NULL && max_to_do != NULL, "Error");
NOISY(tty->print_cr(" process_chunk_boundary: Found a dirty card at " PTR_FORMAT
" max_to_do set at " PTR_FORMAT " which is before end of last block in chunk: "
PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT,
limit_card, max_to_do, last_block, last_block_size, (last_block+last_block_size));)
} else {
// The following is a pessimistic value, because it's possible
// that a dirty card on a subsequent chunk has been cleared by
@ -346,10 +304,6 @@ process_chunk_boundaries(Space* sp,
limit_card = last_card_of_last_obj;
max_to_do = last_block + last_block_size;
assert(limit_card != NULL && max_to_do != NULL, "Error");
NOISY(tty->print_cr(" process_chunk_boundary: Found no dirty card before end of last block in chunk\n"
" Setting limit_card to " PTR_FORMAT
" and max_to_do " PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT,
limit_card, last_block, last_block_size, max_to_do);)
}
assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,
"Bounds error.");
@ -382,7 +336,6 @@ process_chunk_boundaries(Space* sp,
"[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(sp->used_region().start()), p2i(sp->used_region().end()),
p2i(used.start()), p2i(used.end()));
NOISY(tty->print_cr(" process_chunk_boundary: heap expanded; explicitly bounding last_chunk");)
last_chunk_index_to_check = last_chunk_index;
}
for (uintptr_t lnc_index = cur_chunk_index + 1;
@ -392,9 +345,6 @@ process_chunk_boundaries(Space* sp,
if (lnc_card != NULL) {
// we can stop at the first non-NULL entry we find
if (lnc_card <= limit_card) {
NOISY(tty->print_cr(" process_chunk_boundary: LNC card " PTR_FORMAT " is lower than limit_card " PTR_FORMAT,
" max_to_do will be lowered to " PTR_FORMAT " from " PTR_FORMAT,
lnc_card, limit_card, addr_for(lnc_card), max_to_do);)
limit_card = lnc_card;
max_to_do = addr_for(limit_card);
assert(limit_card != NULL && max_to_do != NULL, "Error");
@ -410,9 +360,6 @@ process_chunk_boundaries(Space* sp,
assert(max_to_do != NULL, "OOPS 2!");
} else {
max_to_do = used.end();
NOISY(tty->print_cr(" process_chunk_boundary: Last chunk of this space;\n"
" max_to_do left at " PTR_FORMAT,
max_to_do);)
}
assert(max_to_do != NULL, "OOPS 3!");
// Now we can set the closure we're using so it doesn't to beyond
@ -421,11 +368,8 @@ process_chunk_boundaries(Space* sp,
#ifndef PRODUCT
dcto_cl->set_last_bottom(max_to_do);
#endif
NOISY(tty->print_cr("===========================================================================\n");)
}
#undef NOISY
void
CardTableModRefBSForCTRS::
get_LNC_array_for_space(Space* sp,

View File

@ -901,7 +901,7 @@ void ParNewGeneration::collect(bool full,
size_policy->minor_collection_begin();
}
GCTraceTime(Trace, gc) t1("ParNew", NULL, gch->gc_cause());
GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause());
age_table()->clear();
to()->clear(SpaceDecorator::Mangle);

View File

@ -82,18 +82,19 @@ inline void ParScanClosure::do_oop_work(T* p,
if ((HeapWord*)obj < _boundary) {
#ifndef PRODUCT
if (_g->to()->is_in_reserved(obj)) {
tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
LogHandle(gc) log;
log.error("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
GenCollectedHeap* gch = GenCollectedHeap::heap();
Space* sp = gch->space_containing(p);
oop obj = oop(sp->block_start(p));
assert((HeapWord*)obj < (HeapWord*)p, "Error");
tty->print_cr("Object: " PTR_FORMAT, p2i((void *)obj));
tty->print_cr("-------");
obj->print();
tty->print_cr("-----");
tty->print_cr("Heap:");
tty->print_cr("-----");
gch->print();
log.error("Object: " PTR_FORMAT, p2i((void *)obj));
log.error("-------");
obj->print_on(log.error_stream());
log.error("-----");
log.error("Heap:");
log.error("-----");
gch->print_on(log.error_stream());
ShouldNotReachHere();
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,27 +38,17 @@
// Methods in abstract class VM_CMS_Operation
//////////////////////////////////////////////////////////
void VM_CMS_Operation::acquire_pending_list_lock() {
// The caller may block while communicating
// with the SLT thread in order to acquire/release the PLL.
SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt();
if (slt != NULL) {
slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
} else {
SurrogateLockerThread::report_missing_slt();
}
_pending_list_locker.lock();
}
void VM_CMS_Operation::release_and_notify_pending_list_lock() {
// The caller may block while communicating
// with the SLT thread in order to acquire/release the PLL.
ConcurrentMarkSweepThread::slt()->
manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
_pending_list_locker.unlock();
}
void VM_CMS_Operation::verify_before_gc() {
if (VerifyBeforeGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime(Info, gc, verify) tm("Verify Before", _collector->_gc_timer_cm);
GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@ -70,7 +60,7 @@ void VM_CMS_Operation::verify_before_gc() {
void VM_CMS_Operation::verify_after_gc() {
if (VerifyAfterGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime(Info, gc, verify) tm("Verify After", _collector->_gc_timer_cm);
GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@ -95,7 +85,7 @@ bool VM_CMS_Operation::doit_prologue() {
assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"Possible deadlock");
if (needs_pll()) {
if (needs_pending_list_lock()) {
acquire_pending_list_lock();
}
// Get the Heap_lock after the pending_list_lock.
@ -103,7 +93,7 @@ bool VM_CMS_Operation::doit_prologue() {
if (lost_race()) {
assert(_prologue_succeeded == false, "Initialized in c'tor");
Heap_lock->unlock();
if (needs_pll()) {
if (needs_pending_list_lock()) {
release_and_notify_pending_list_lock();
}
} else {
@ -120,7 +110,7 @@ void VM_CMS_Operation::doit_epilogue() {
// Release the Heap_lock first.
Heap_lock->unlock();
if (needs_pll()) {
if (needs_pending_list_lock()) {
release_and_notify_pending_list_lock();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "gc/shared/vmGCOperations.hpp"
#include "runtime/vm_operations.hpp"
@ -51,6 +52,9 @@
class CMSCollector;
class VM_CMS_Operation: public VM_Operation {
private:
ReferencePendingListLocker _pending_list_locker;
protected:
CMSCollector* _collector; // associated collector
bool _prologue_succeeded; // whether doit_prologue succeeded
@ -73,7 +77,7 @@ class VM_CMS_Operation: public VM_Operation {
virtual const CMSCollector::CollectorState legal_state() const = 0;
// Whether the pending list lock needs to be held
virtual const bool needs_pll() const = 0;
virtual const bool needs_pending_list_lock() const = 0;
// Execute operations in the context of the caller,
// prior to execution of the vm operation itself.
@ -105,7 +109,7 @@ class VM_CMS_Initial_Mark: public VM_CMS_Operation {
return CMSCollector::InitialMarking;
}
virtual const bool needs_pll() const {
virtual const bool needs_pending_list_lock() const {
return false;
}
};
@ -122,7 +126,7 @@ class VM_CMS_Final_Remark: public VM_CMS_Operation {
return CMSCollector::FinalMarking;
}
virtual const bool needs_pll() const {
virtual const bool needs_pending_list_lock() const {
return true;
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,14 +51,12 @@
declare_type(ConcurrentMarkSweepGeneration,CardGeneration) \
declare_type(CompactibleFreeListSpace, CompactibleSpace) \
declare_type(ConcurrentMarkSweepThread, NamedThread) \
declare_type(SurrogateLockerThread, JavaThread) \
declare_toplevel_type(CMSCollector) \
declare_toplevel_type(CMSBitMap) \
declare_toplevel_type(FreeChunk) \
declare_toplevel_type(Metablock) \
declare_toplevel_type(ConcurrentMarkSweepThread*) \
declare_toplevel_type(ConcurrentMarkSweepGeneration*) \
declare_toplevel_type(SurrogateLockerThread*) \
declare_toplevel_type(CompactibleFreeListSpace*) \
declare_toplevel_type(CMSCollector*) \
declare_toplevel_type(AFLBinaryTreeDictionary) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -145,7 +145,6 @@ void CollectionSetChooser::sort_regions() {
verify();
}
void CollectionSetChooser::add_region(HeapRegion* hr) {
assert(!hr->is_pinned(),
"Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index());
@ -210,4 +209,67 @@ void CollectionSetChooser::clear() {
_front = 0;
_end = 0;
_remaining_reclaimable_bytes = 0;
}
class ParKnownGarbageHRClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
CSetChooserParUpdater _cset_updater;
public:
ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
uint chunk_size) :
_g1h(G1CollectedHeap::heap()),
_cset_updater(hrSorted, true /* parallel */, chunk_size) { }
bool doHeapRegion(HeapRegion* r) {
// Do we have any marking information for this region?
if (r->is_marked()) {
// We will skip any region that's currently used as an old GC
// alloc region (we should not consider those for collection
// before we fill them up).
if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
_cset_updater.add_region(r);
}
}
return false;
}
};
class ParKnownGarbageTask: public AbstractGangTask {
CollectionSetChooser* _hrSorted;
uint _chunk_size;
G1CollectedHeap* _g1;
HeapRegionClaimer _hrclaimer;
public:
ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
AbstractGangTask("ParKnownGarbageTask"),
_hrSorted(hrSorted), _chunk_size(chunk_size),
_g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
void work(uint worker_id) {
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
_g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
}
};
uint CollectionSetChooser::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
assert(n_workers > 0, "Active gc workers should be greater than 0");
const uint overpartition_factor = 4;
const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
}
void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) {
clear();
uint n_workers = workers->active_workers();
uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
ParKnownGarbageTask par_known_garbage_task(this, chunk_size, n_workers);
workers->run_task(&par_known_garbage_task);
sort_regions();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,6 +65,9 @@ class CollectionSetChooser: public CHeapObj<mtGC> {
// The sum of reclaimable bytes over all the regions in the CSet chooser.
size_t _remaining_reclaimable_bytes;
// Calculate and return chunk size (in number of regions) for parallel
// addition of regions
uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const;
public:
// Return the current candidate region to be considered for
@ -132,6 +135,8 @@ public:
void clear();
void rebuild(WorkGang* workers, uint n_regions);
// Return the number of candidate regions that remain to be collected.
uint remaining_regions() { return _end - _front; }

View File

@ -78,7 +78,7 @@ void ConcurrentG1RefineThread::initialize() {
void ConcurrentG1RefineThread::wait_for_completed_buffers() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
while (!_should_terminate && !is_active()) {
while (!should_terminate() && !is_active()) {
_monitor->wait(Mutex::_no_safepoint_check_flag);
}
}
@ -109,22 +109,13 @@ void ConcurrentG1RefineThread::deactivate() {
}
}
void ConcurrentG1RefineThread::run() {
initialize_in_thread();
wait_for_universe_init();
run_service();
terminate();
}
void ConcurrentG1RefineThread::run_service() {
_vtime_start = os::elapsedVTime();
while (!_should_terminate) {
while (!should_terminate()) {
// Wait for work
wait_for_completed_buffers();
if (_should_terminate) {
if (should_terminate()) {
break;
}
@ -168,23 +159,6 @@ void ConcurrentG1RefineThread::run_service() {
log_debug(gc, refine)("Stopping %d", _worker_id);
}
void ConcurrentG1RefineThread::stop() {
// it is ok to take late safepoints here, if needed
{
MutexLockerEx mu(Terminator_lock);
_should_terminate = true;
}
stop_service();
{
MutexLockerEx mu(Terminator_lock);
while (!_has_terminated) {
Terminator_lock->wait();
}
}
}
void ConcurrentG1RefineThread::stop_service() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
_monitor->notify();

View File

@ -72,7 +72,6 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
void stop_service();
public:
virtual void run();
// Constructor
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
CardTableEntryClosure* refine_closure,
@ -84,9 +83,6 @@ public:
double vtime_accum() { return _vtime_accum; }
ConcurrentG1Refine* cg1r() { return _cg1r; }
// shutdown
void stop();
};
#endif // SHARE_VM_GC_G1_CONCURRENTG1REFINETHREAD_HPP

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1MMUTracker.hpp"
@ -41,9 +42,6 @@
// The CM thread is created when the G1 garbage collector is used
SurrogateLockerThread*
ConcurrentMarkThread::_slt = NULL;
ConcurrentMarkThread::ConcurrentMarkThread(G1ConcurrentMark* cm) :
ConcurrentGCThread(),
_cm(cm),
@ -82,60 +80,59 @@ public:
// Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
const G1Analytics* analytics = g1_policy->analytics();
if (g1_policy->adaptive_young_list_length()) {
double now = os::elapsedTime();
double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
: g1_policy->predict_cleanup_time_ms();
double prediction_ms = remark ? analytics->predict_remark_time_ms()
: analytics->predict_cleanup_time_ms();
G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
os::sleep(this, sleep_time_ms, false);
}
}
class GCConcPhaseTimer : StackObj {
class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
G1ConcurrentMark* _cm;
public:
GCConcPhaseTimer(G1ConcurrentMark* cm, const char* title) : _cm(cm) {
_cm->register_concurrent_phase_start(title);
G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
GCTraceConcTimeImpl<LogLevel::Info, LogTag::_gc, LogTag::_marking>(title),
_cm(cm) {
_cm->gc_timer_cm()->register_gc_concurrent_start(title);
}
~GCConcPhaseTimer() {
_cm->register_concurrent_phase_end();
~G1ConcPhaseTimer() {
_cm->gc_timer_cm()->register_gc_concurrent_end();
}
};
void ConcurrentMarkThread::run() {
initialize_in_thread();
wait_for_universe_init();
run_service();
terminate();
}
void ConcurrentMarkThread::run_service() {
_vtime_start = os::elapsedVTime();
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1_policy = g1h->g1_policy();
while (!_should_terminate) {
while (!should_terminate()) {
// wait until started is set.
sleepBeforeNextCycle();
if (_should_terminate) {
_cm->root_regions()->cancel_scan();
if (should_terminate()) {
break;
}
GCIdMark gc_id_mark;
cm()->concurrent_cycle_start();
assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
{
ResourceMark rm;
HandleMark hm;
double cycle_start = os::elapsedVTime();
{
GCConcPhaseTimer(_cm, "Concurrent Clearing of Claimed Marks");
G1ConcPhaseTimer t(_cm, "Concurrent Clear Claimed Marks");
ClassLoaderDataGraph::clear_claimed_marks();
}
@ -148,22 +145,22 @@ void ConcurrentMarkThread::run_service() {
// correctness issue.
{
GCConcPhaseTimer(_cm, "Concurrent Root Region Scanning");
_cm->scanRootRegions();
G1ConcPhaseTimer t(_cm, "Concurrent Scan Root Regions");
_cm->scan_root_regions();
}
// It would be nice to use the GCTraceConcTime class here but
// the "end" logging is inside the loop and not at the end of
// a scope. Mimicking the same log output as GCTraceConcTime instead.
jlong mark_start = os::elapsed_counter();
log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
log_info(gc, marking)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
int iter = 0;
do {
iter++;
if (!cm()->has_aborted()) {
GCConcPhaseTimer(_cm, "Concurrent Mark");
_cm->markFromRoots();
G1ConcPhaseTimer t(_cm, "Concurrent Mark From Roots");
_cm->mark_from_roots();
}
double mark_end_time = os::elapsedVTime();
@ -171,18 +168,18 @@ void ConcurrentMarkThread::run_service() {
_vtime_mark_accum += (mark_end_time - cycle_start);
if (!cm()->has_aborted()) {
delay_to_keep_mmu(g1_policy, true /* remark */);
log_info(gc)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
TimeHelper::counter_to_seconds(mark_start),
TimeHelper::counter_to_seconds(mark_end),
TimeHelper::counter_to_millis(mark_end - mark_start));
log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
TimeHelper::counter_to_seconds(mark_start),
TimeHelper::counter_to_seconds(mark_end),
TimeHelper::counter_to_millis(mark_end - mark_start));
CMCheckpointRootsFinalClosure final_cl(_cm);
VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */);
VMThread::execute(&op);
}
if (cm()->restart_for_overflow()) {
log_debug(gc)("Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
log_info(gc)("Concurrent Mark restart for overflow");
log_debug(gc, marking)("Restarting Concurrent Marking because of Mark Stack Overflow in Remark (Iteration #%d).", iter);
log_info(gc, marking)("Concurrent Mark Restart due to overflow");
}
} while (cm()->restart_for_overflow());
@ -216,11 +213,9 @@ void ConcurrentMarkThread::run_service() {
// place, it would wait for us to process the regions
// reclaimed by cleanup.
GCTraceConcTime(Info, gc) tt("Concurrent Cleanup");
GCConcPhaseTimer(_cm, "Concurrent Cleanup");
G1ConcPhaseTimer t(_cm, "Concurrent Complete Cleanup");
// Now do the concurrent cleanup operation.
_cm->completeCleanup();
_cm->complete_cleanup();
// Notify anyone who's waiting that there are no more free
// regions coming. We have to do this before we join the STS
@ -265,7 +260,7 @@ void ConcurrentMarkThread::run_service() {
if (!cm()->has_aborted()) {
g1_policy->record_concurrent_mark_cleanup_completed();
} else {
log_info(gc)("Concurrent Mark abort");
log_info(gc, marking)("Concurrent Mark Abort");
}
}
@ -274,8 +269,8 @@ void ConcurrentMarkThread::run_service() {
// We may have aborted just before the remark. Do not bother clearing the
// bitmap then, as it has been done during mark abort.
if (!cm()->has_aborted()) {
GCConcPhaseTimer(_cm, "Concurrent Bitmap Clearing");
_cm->clearNextBitmap();
G1ConcPhaseTimer t(_cm, "Concurrent Cleanup for Next Mark");
_cm->cleanup_for_next_mark();
} else {
assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
}
@ -288,25 +283,11 @@ void ConcurrentMarkThread::run_service() {
{
SuspendibleThreadSetJoiner sts_join;
g1h->increment_old_marking_cycles_completed(true /* concurrent */);
g1h->register_concurrent_cycle_end();
}
}
}
void ConcurrentMarkThread::stop() {
{
MutexLockerEx ml(Terminator_lock);
_should_terminate = true;
}
stop_service();
{
MutexLockerEx ml(Terminator_lock);
while (!_has_terminated) {
Terminator_lock->wait();
cm()->concurrent_cycle_end();
}
}
_cm->root_regions()->cancel_scan();
}
void ConcurrentMarkThread::stop_service() {
@ -320,7 +301,7 @@ void ConcurrentMarkThread::sleepBeforeNextCycle() {
assert(!in_progress(), "should have been cleared");
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
while (!started() && !_should_terminate) {
while (!started() && !should_terminate()) {
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
}
@ -328,16 +309,3 @@ void ConcurrentMarkThread::sleepBeforeNextCycle() {
set_in_progress();
}
}
// Note: As is the case with CMS - this method, although exported
// by the ConcurrentMarkThread, which is a non-JavaThread, can only
// be called by a JavaThread. Currently this is done at vm creation
// time (post-vm-init) by the main/Primordial (Java)Thread.
// XXX Consider changing this in the future to allow the CM thread
// itself to create this thread?
void ConcurrentMarkThread::makeSurrogateLockerThread(TRAPS) {
assert(UseG1GC, "SLT thread needed only for concurrent GC");
assert(THREAD->is_Java_thread(), "must be a Java thread");
assert(_slt == NULL, "SLT already created");
_slt = SurrogateLockerThread::make(THREAD);
}

View File

@ -38,13 +38,8 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
double _vtime_start; // Initial virtual time.
double _vtime_accum; // Accumulated virtual time.
double _vtime_mark_accum;
public:
virtual void run();
private:
G1ConcurrentMark* _cm;
enum State {
@ -61,15 +56,10 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
void run_service();
void stop_service();
static SurrogateLockerThread* _slt;
public:
// Constructor
ConcurrentMarkThread(G1ConcurrentMark* cm);
static void makeSurrogateLockerThread(TRAPS);
static SurrogateLockerThread* slt() { return _slt; }
// Total virtual time so far for this thread and concurrent marking tasks.
double vtime_accum();
// Marking virtual time so far this thread and concurrent marking tasks.
@ -93,9 +83,6 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
// as the CM thread might take some time to wake up before noticing
// that started() is set and set in_progress().
bool during_cycle() { return !idle(); }
// shutdown
void stop();
};
#endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP

View File

@ -110,44 +110,6 @@ DirtyCardQueue::~DirtyCardQueue() {
}
}
bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
bool consume,
uint worker_i) {
bool res = true;
if (_buf != NULL) {
res = apply_closure_to_buffer(cl, _buf, _index, _sz,
consume,
worker_i);
if (res && consume) {
_index = _sz;
}
}
return res;
}
bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
void** buf,
size_t index, size_t sz,
bool consume,
uint worker_i) {
if (cl == NULL) return true;
size_t limit = byte_index_to_index(sz);
for (size_t i = byte_index_to_index(index); i < limit; ++i) {
jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
if (card_ptr != NULL) {
// Set the entry to null, so we don't do it again (via the test
// above) if we reconsider this buffer.
if (consume) {
buf[i] = NULL;
}
if (!cl->do_card_ptr(card_ptr, worker_i)) {
return false;
}
}
}
return true;
}
DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
PtrQueueSet(notify_when_complete),
_mut_process_closure(NULL),
@ -188,14 +150,39 @@ void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
t->dirty_card_queue().handle_zero_index();
}
bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl,
BufferNode* node,
bool consume,
uint worker_i) {
if (cl == NULL) return true;
void** buf = BufferNode::make_buffer_from_node(node);
size_t limit = DirtyCardQueue::byte_index_to_index(buffer_size());
size_t start = DirtyCardQueue::byte_index_to_index(node->index());
for (size_t i = start; i < limit; ++i) {
jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
assert(card_ptr != NULL, "invariant");
if (!cl->do_card_ptr(card_ptr, worker_i)) {
if (consume) {
size_t new_index = DirtyCardQueue::index_to_byte_index(i + 1);
assert(new_index <= buffer_size(), "invariant");
node->set_index(new_index);
}
return false;
}
}
if (consume) {
node->set_index(buffer_size());
}
return true;
}
bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
guarantee(_free_ids != NULL, "must be");
// claim a par id
uint worker_i = _free_ids->claim_par_id();
bool b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure, buf, 0,
_sz, true, worker_i);
bool b = apply_closure_to_buffer(_mut_process_closure, node, true, worker_i);
if (b) {
Atomic::inc(&_processed_buffers_mut);
}
@ -239,49 +226,30 @@ bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure*
if (nd == NULL) {
return false;
} else {
void** buf = BufferNode::make_buffer_from_node(nd);
size_t index = nd->index();
if (DirtyCardQueue::apply_closure_to_buffer(cl,
buf, index, _sz,
true, worker_i)) {
if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
// Done with fully processed buffer.
deallocate_buffer(buf);
deallocate_buffer(nd);
Atomic::inc(&_processed_buffers_rs_thread);
return true;
} else {
// Return partially processed buffer to the queue.
enqueue_complete_buffer(buf, index);
enqueue_complete_buffer(nd);
return false;
}
}
}
void DirtyCardQueueSet::apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
BufferNode* nd = _completed_buffers_head;
while (nd != NULL) {
bool b =
DirtyCardQueue::apply_closure_to_buffer(cl,
BufferNode::make_buffer_from_node(nd),
0, _sz, false);
guarantee(b, "Should not stop early.");
nd = nd->next();
}
}
void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
BufferNode* nd = _cur_par_buffer_node;
while (nd != NULL) {
BufferNode* next = (BufferNode*)nd->next();
BufferNode* actual = (BufferNode*)Atomic::cmpxchg_ptr((void*)next, (volatile void*)&_cur_par_buffer_node, (void*)nd);
BufferNode* next = nd->next();
void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd);
if (actual == nd) {
bool b =
DirtyCardQueue::apply_closure_to_buffer(cl,
BufferNode::make_buffer_from_node(actual),
0, _sz, false);
bool b = apply_closure_to_buffer(cl, nd, false);
guarantee(b, "Should not stop early.");
nd = next;
} else {
nd = actual;
nd = static_cast<BufferNode*>(actual);
}
}
}
@ -304,7 +272,7 @@ void DirtyCardQueueSet::clear() {
while (buffers_to_delete != NULL) {
BufferNode* nd = buffers_to_delete;
buffers_to_delete = nd->next();
deallocate_buffer(BufferNode::make_buffer_from_node(nd));
deallocate_buffer(nd);
}
}
@ -320,6 +288,13 @@ void DirtyCardQueueSet::abandon_logs() {
shared_dirty_card_queue()->reset();
}
void DirtyCardQueueSet::concatenate_log(DirtyCardQueue& dcq) {
if (!dcq.is_empty()) {
enqueue_complete_buffer(
BufferNode::make_node_from_buffer(dcq.get_buf(), dcq.get_index()));
dcq.reinitialize();
}
}
void DirtyCardQueueSet::concatenate_logs() {
// Iterate over all the threads, if we find a partial log add it to
@ -329,23 +304,9 @@ void DirtyCardQueueSet::concatenate_logs() {
_max_completed_queue = max_jint;
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
for (JavaThread* t = Threads::first(); t; t = t->next()) {
DirtyCardQueue& dcq = t->dirty_card_queue();
if (dcq.size() != 0) {
void** buf = dcq.get_buf();
// We must NULL out the unused entries, then enqueue.
size_t limit = dcq.byte_index_to_index(dcq.get_index());
for (size_t i = 0; i < limit; ++i) {
buf[i] = NULL;
}
enqueue_complete_buffer(dcq.get_buf(), dcq.get_index());
dcq.reinitialize();
}
}
if (_shared_dirty_card_queue.size() != 0) {
enqueue_complete_buffer(_shared_dirty_card_queue.get_buf(),
_shared_dirty_card_queue.get_index());
_shared_dirty_card_queue.reinitialize();
concatenate_log(t->dirty_card_queue());
}
concatenate_log(_shared_dirty_card_queue);
// Restore the completed buffer queue limit.
_max_completed_queue = save_max_completed_queue;
}

View File

@ -37,7 +37,7 @@ class CardTableEntryClosure: public CHeapObj<mtGC> {
public:
// Process the card whose card table entry is "card_ptr". If returns
// "false", terminate the iteration early.
virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i = 0) = 0;
virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0;
};
// A ptrQueue whose elements are "oops", pointers to object heads.
@ -52,23 +52,6 @@ public:
// Process queue entries and release resources.
void flush() { flush_impl(); }
// Apply the closure to all elements, and reset the index to make the
// buffer empty. If a closure application returns "false", return
// "false" immediately, halting the iteration. If "consume" is true,
// deletes processed entries from logs.
bool apply_closure(CardTableEntryClosure* cl,
bool consume = true,
uint worker_i = 0);
// Apply the closure to all elements of "buf", down to "index"
// (inclusive.) If returns "false", then a closure application returned
// "false", and we return immediately. If "consume" is true, entries are
// set to NULL as they are processed, so they will not be processed again
// later.
static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
void** buf, size_t index, size_t sz,
bool consume = true,
uint worker_i = 0);
void **get_buf() { return _buf;}
size_t get_index() { return _index;}
void reinitialize() { _buf = 0; _sz = 0; _index = 0;}
@ -94,8 +77,18 @@ class DirtyCardQueueSet: public PtrQueueSet {
DirtyCardQueue _shared_dirty_card_queue;
// Override.
bool mut_process_buffer(void** buf);
// Apply the closure to the elements of "node" from it's index to
// buffer_size. If all closure applications return true, then
// returns true. Stops processing after the first closure
// application that returns false, and returns false from this
// function. If "consume" is true, the node's index is updated to
// follow the last processed element.
bool apply_closure_to_buffer(CardTableEntryClosure* cl,
BufferNode* node,
bool consume,
uint worker_i = 0);
bool mut_process_buffer(BufferNode* node);
// Protected by the _cbl_mon.
FreeIdSet* _free_ids;
@ -107,6 +100,9 @@ class DirtyCardQueueSet: public PtrQueueSet {
// Current buffer node used for parallel iteration.
BufferNode* volatile _cur_par_buffer_node;
void concatenate_log(DirtyCardQueue& dcq);
public:
DirtyCardQueueSet(bool notify_when_complete = true);
@ -126,12 +122,13 @@ public:
static void handle_zero_index_for_thread(JavaThread* t);
// If there exists some completed buffer, pop it, then apply the
// specified closure to all its elements, nulling out those elements
// processed. If all elements are processed, returns "true". If no
// completed buffers exist, returns false. If a completed buffer exists,
// but is only partially completed before a "yield" happens, the
// partially completed buffer (with its processed elements set to NULL)
// is returned to the completed buffer set, and this call returns false.
// specified closure to its active elements. If all active elements
// are processed, returns "true". If no completed buffers exist,
// returns false. If a completed buffer exists, but is only
// partially completed before a "yield" happens, the partially
// completed buffer (with its index updated to exclude the processed
// elements) is returned to the completed buffer set, and this call
// returns false.
bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
uint worker_i,
size_t stop_at,
@ -139,13 +136,10 @@ public:
BufferNode* get_completed_buffer(size_t stop_at);
// Applies the current closure to all completed buffers,
// non-consumptively.
void apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; }
// Applies the current closure to all completed buffers, non-consumptively.
// Parallel version.
// Can be used in parallel, all callers using the iteration state initialized
// by reset_for_par_iteration.
void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
DirtyCardQueue* shared_dirty_card_queue() {

View File

@ -0,0 +1,329 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "gc/g1/g1Predictions.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/numberSeq.hpp"
// Different defaults for different number of GC threads
// They were chosen by running GCOld and SPECjbb on debris with different
// numbers of GC threads and choosing them based on the results
// all the same
static double rs_length_diff_defaults[] = {
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
};
static double cost_per_card_ms_defaults[] = {
0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
};
// all the same
static double young_cards_per_entry_ratio_defaults[] = {
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
};
static double cost_per_entry_ms_defaults[] = {
0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
};
static double cost_per_byte_ms_defaults[] = {
0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
};
// these should be pretty consistent
static double constant_other_time_ms_defaults[] = {
5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
};
static double young_other_cost_per_region_ms_defaults[] = {
0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
};
static double non_young_other_cost_per_region_ms_defaults[] = {
1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
};
G1Analytics::G1Analytics(const G1Predictions* predictor) :
_predictor(predictor),
_recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
_concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
_concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
_alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_prev_collection_pause_end_ms(0.0),
_rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
_mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
_constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_non_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
_rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
_recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)) {
// Seed sequences with initial values.
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
int index = MIN2(ParallelGCThreads - 1, 7u);
_rs_length_diff_seq->add(rs_length_diff_defaults[index]);
_cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
_cost_scan_hcc_seq->add(0.0);
_young_cards_per_entry_ratio_seq->add(young_cards_per_entry_ratio_defaults[index]);
_cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
_cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
_constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
_young_other_cost_per_region_ms_seq->add(young_other_cost_per_region_ms_defaults[index]);
_non_young_other_cost_per_region_ms_seq->add(non_young_other_cost_per_region_ms_defaults[index]);
// start conservatively (around 50ms is about right)
_concurrent_mark_remark_times_ms->add(0.05);
_concurrent_mark_cleanup_times_ms->add(0.20);
}
double G1Analytics::get_new_prediction(TruncatedSeq const* seq) const {
return _predictor->get_new_prediction(seq);
}
size_t G1Analytics::get_new_size_prediction(TruncatedSeq const* seq) const {
return (size_t)get_new_prediction(seq);
}
int G1Analytics::num_alloc_rate_ms() const {
return _alloc_rate_ms_seq->num();
}
void G1Analytics::report_concurrent_mark_remark_times_ms(double ms) {
_concurrent_mark_remark_times_ms->add(ms);
}
void G1Analytics::report_alloc_rate_ms(double alloc_rate) {
_alloc_rate_ms_seq->add(alloc_rate);
}
void G1Analytics::compute_pause_time_ratio(double interval_ms, double pause_time_ms) {
_recent_avg_pause_time_ratio = _recent_gc_times_ms->sum() / interval_ms;
if (_recent_avg_pause_time_ratio < 0.0 ||
(_recent_avg_pause_time_ratio - 1.0 > 0.0)) {
// Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
// CR 6902692 by redoing the manner in which the ratio is incrementally computed.
if (_recent_avg_pause_time_ratio < 0.0) {
_recent_avg_pause_time_ratio = 0.0;
} else {
assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
_recent_avg_pause_time_ratio = 1.0;
}
}
// Compute the ratio of just this last pause time to the entire time range stored
// in the vectors. Comparing this pause to the entire range, rather than only the
// most recent interval, has the effect of smoothing over a possible transient 'burst'
// of more frequent pauses that don't really reflect a change in heap occupancy.
// This reduces the likelihood of a needless heap expansion being triggered.
_last_pause_time_ratio =
(pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms;
}
void G1Analytics::report_cost_per_card_ms(double cost_per_card_ms) {
_cost_per_card_ms_seq->add(cost_per_card_ms);
}
void G1Analytics::report_cost_scan_hcc(double cost_scan_hcc) {
_cost_scan_hcc_seq->add(cost_scan_hcc);
}
void G1Analytics::report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young) {
if (last_gc_was_young) {
_cost_per_entry_ms_seq->add(cost_per_entry_ms);
} else {
_mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
}
}
void G1Analytics::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young) {
if (last_gc_was_young) {
_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
} else {
_mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
}
}
void G1Analytics::report_rs_length_diff(double rs_length_diff) {
_rs_length_diff_seq->add(rs_length_diff);
}
void G1Analytics::report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window) {
if (in_marking_window) {
_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
} else {
_cost_per_byte_ms_seq->add(cost_per_byte_ms);
}
}
void G1Analytics::report_young_other_cost_per_region_ms(double other_cost_per_region_ms) {
_young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms);
}
void G1Analytics::report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms) {
_non_young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms);
}
void G1Analytics::report_constant_other_time_ms(double constant_other_time_ms) {
_constant_other_time_ms_seq->add(constant_other_time_ms);
}
void G1Analytics::report_pending_cards(double pending_cards) {
_pending_cards_seq->add(pending_cards);
}
void G1Analytics::report_rs_lengths(double rs_lengths) {
_rs_lengths_seq->add(rs_lengths);
}
size_t G1Analytics::predict_rs_length_diff() const {
return get_new_size_prediction(_rs_length_diff_seq);
}
double G1Analytics::predict_alloc_rate_ms() const {
return get_new_prediction(_alloc_rate_ms_seq);
}
double G1Analytics::predict_cost_per_card_ms() const {
return get_new_prediction(_cost_per_card_ms_seq);
}
double G1Analytics::predict_scan_hcc_ms() const {
return get_new_prediction(_cost_scan_hcc_seq);
}
double G1Analytics::predict_rs_update_time_ms(size_t pending_cards) const {
return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms();
}
double G1Analytics::predict_young_cards_per_entry_ratio() const {
return get_new_prediction(_young_cards_per_entry_ratio_seq);
}
double G1Analytics::predict_mixed_cards_per_entry_ratio() const {
if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
return predict_young_cards_per_entry_ratio();
} else {
return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
}
}
size_t G1Analytics::predict_card_num(size_t rs_length, bool gcs_are_young) const {
if (gcs_are_young) {
return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
} else {
return (size_t) (rs_length * predict_mixed_cards_per_entry_ratio());
}
}
double G1Analytics::predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const {
if (gcs_are_young) {
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
} else {
return predict_mixed_rs_scan_time_ms(card_num);
}
}
double G1Analytics::predict_mixed_rs_scan_time_ms(size_t card_num) const {
if (_mixed_cost_per_entry_ms_seq->num() < 3) {
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
} else {
return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq);
}
}
double G1Analytics::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq);
} else {
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq);
}
}
double G1Analytics::predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const {
if (during_concurrent_mark) {
return predict_object_copy_time_ms_during_cm(bytes_to_copy);
} else {
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq);
}
}
double G1Analytics::predict_constant_other_time_ms() const {
return get_new_prediction(_constant_other_time_ms_seq);
}
double G1Analytics::predict_young_other_time_ms(size_t young_num) const {
return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq);
}
double G1Analytics::predict_non_young_other_time_ms(size_t non_young_num) const {
return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq);
}
double G1Analytics::predict_remark_time_ms() const {
return get_new_prediction(_concurrent_mark_remark_times_ms);
}
double G1Analytics::predict_cleanup_time_ms() const {
return get_new_prediction(_concurrent_mark_cleanup_times_ms);
}
size_t G1Analytics::predict_rs_lengths() const {
return get_new_size_prediction(_rs_lengths_seq);
}
size_t G1Analytics::predict_pending_cards() const {
return get_new_size_prediction(_pending_cards_seq);
}
double G1Analytics::last_known_gc_end_time_sec() const {
return _recent_prev_end_times_for_all_gcs_sec->oldest();
}
void G1Analytics::update_recent_gc_times(double end_time_sec,
double pause_time_ms) {
_recent_gc_times_ms->add(pause_time_ms);
_recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
_prev_collection_pause_end_ms = end_time_sec * 1000.0;
}
void G1Analytics::report_concurrent_mark_cleanup_times_ms(double ms) {
_concurrent_mark_cleanup_times_ms->add(ms);
}

View File

@ -0,0 +1,159 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1MEASUREMENTS_HPP
#define SHARE_VM_GC_G1_G1MEASUREMENTS_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
class TruncatedSeq;
class G1Predictions;
class G1Analytics: public CHeapObj<mtGC> {
const static int TruncatedSeqLength = 10;
const static int NumPrevPausesForHeuristics = 10;
const G1Predictions* _predictor;
// These exclude marking times.
TruncatedSeq* _recent_gc_times_ms;
TruncatedSeq* _concurrent_mark_remark_times_ms;
TruncatedSeq* _concurrent_mark_cleanup_times_ms;
TruncatedSeq* _alloc_rate_ms_seq;
double _prev_collection_pause_end_ms;
TruncatedSeq* _rs_length_diff_seq;
TruncatedSeq* _cost_per_card_ms_seq;
TruncatedSeq* _cost_scan_hcc_seq;
TruncatedSeq* _young_cards_per_entry_ratio_seq;
TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
TruncatedSeq* _cost_per_entry_ms_seq;
TruncatedSeq* _mixed_cost_per_entry_ms_seq;
TruncatedSeq* _cost_per_byte_ms_seq;
TruncatedSeq* _constant_other_time_ms_seq;
TruncatedSeq* _young_other_cost_per_region_ms_seq;
TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
TruncatedSeq* _pending_cards_seq;
TruncatedSeq* _rs_lengths_seq;
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
// Statistics kept per GC stoppage, pause or full.
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
// The ratio of gc time to elapsed time, computed over recent pauses,
// and the ratio for just the last pause.
double _recent_avg_pause_time_ratio;
double _last_pause_time_ratio;
double get_new_prediction(TruncatedSeq const* seq) const;
size_t get_new_size_prediction(TruncatedSeq const* seq) const;
public:
G1Analytics(const G1Predictions* predictor);
double prev_collection_pause_end_ms() const {
return _prev_collection_pause_end_ms;
}
double recent_avg_pause_time_ratio() const {
return _recent_avg_pause_time_ratio;
}
double last_pause_time_ratio() const {
return _last_pause_time_ratio;
}
uint number_of_recorded_pause_times() const {
return NumPrevPausesForHeuristics;
}
void append_prev_collection_pause_end_ms(double ms) {
_prev_collection_pause_end_ms += ms;
}
void report_concurrent_mark_remark_times_ms(double ms);
void report_concurrent_mark_cleanup_times_ms(double ms);
void report_alloc_rate_ms(double alloc_rate);
void report_cost_per_card_ms(double cost_per_card_ms);
void report_cost_scan_hcc(double cost_scan_hcc);
void report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young);
void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young);
void report_rs_length_diff(double rs_length_diff);
void report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window);
void report_young_other_cost_per_region_ms(double other_cost_per_region_ms);
void report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms);
void report_constant_other_time_ms(double constant_other_time_ms);
void report_pending_cards(double pending_cards);
void report_rs_lengths(double rs_lengths);
size_t predict_rs_length_diff() const;
double predict_alloc_rate_ms() const;
int num_alloc_rate_ms() const;
double predict_cost_per_card_ms() const;
double predict_scan_hcc_ms() const;
double predict_rs_update_time_ms(size_t pending_cards) const;
double predict_young_cards_per_entry_ratio() const;
double predict_mixed_cards_per_entry_ratio() const;
size_t predict_card_num(size_t rs_length, bool gcs_are_young) const;
double predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const;
double predict_mixed_rs_scan_time_ms(size_t card_num) const;
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const;
double predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const;
double predict_constant_other_time_ms() const;
double predict_young_other_time_ms(size_t young_num) const;
double predict_non_young_other_time_ms(size_t non_young_num) const;
double predict_remark_time_ms() const;
double predict_cleanup_time_ms() const;
size_t predict_rs_lengths() const;
size_t predict_pending_cards() const;
// Add a new GC of the given duration and end time to the record.
void update_recent_gc_times(double end_time_sec, double elapsed_ms);
void compute_pause_time_ratio(double interval_ms, double pause_time_ms);
double last_known_gc_end_time_sec() const;
};
#endif // SHARE_VM_GC_G1_G1MEASUREMENTS_HPP

View File

@ -39,6 +39,7 @@
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
#include "gc/g1/g1HeapTransition.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1MarkSweep.hpp"
@ -567,7 +568,7 @@ G1CollectedHeap::mem_allocate(size_t word_size,
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
log_warning(gc)("G1CollectedHeap::mem_allocate retries %d times", try_count);
}
}
@ -676,8 +677,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
warning("G1CollectedHeap::attempt_allocation_slow() "
"retries %d times", try_count);
log_warning(gc)("G1CollectedHeap::attempt_allocation_slow() "
"retries %d times", try_count);
}
}
@ -1092,8 +1093,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
warning("G1CollectedHeap::attempt_allocation_humongous() "
"retries %d times", try_count);
log_warning(gc)("G1CollectedHeap::attempt_allocation_humongous() "
"retries %d times", try_count);
}
}
@ -1229,6 +1230,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
ResourceMark rm;
print_heap_before_gc();
print_heap_regions();
trace_heap_before_gc(gc_tracer);
size_t metadata_prev_used = MetaspaceAux::used_bytes();
@ -1422,7 +1424,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
// the full GC has compacted objects and updated TAMS but not updated
// the prev bitmap.
if (G1VerifyBitmaps) {
((G1CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
_cm->clear_prev_bitmap(workers());
}
_verifier->check_bitmaps("Full GC End");
@ -1447,6 +1449,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
heap_transition.print();
print_heap_after_gc();
print_heap_regions();
trace_heap_after_gc(gc_tracer);
post_full_gc_dump(gc_timer);
@ -1767,15 +1770,12 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_expand_heap_after_alloc_failure(true),
_old_marking_cycles_started(0),
_old_marking_cycles_completed(0),
_heap_summary_sent(false),
_in_cset_fast_test(),
_dirty_cards_region_list(NULL),
_worker_cset_start_region(NULL),
_worker_cset_start_region_time_stamp(NULL),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
_workers = new WorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
@ -1784,6 +1784,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_verifier = new G1HeapVerifier(this);
_allocator = G1Allocator::create_allocator(this);
_heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
// Override the default _filler_array_max_size so that no humongous filler
@ -2316,52 +2319,6 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
FullGCCount_lock->notify_all();
}
void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
GCIdMarkAndRestore conc_gc_id_mark;
collector_state()->set_concurrent_cycle_started(true);
_gc_timer_cm->register_gc_start(start_time);
_gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
trace_heap_before_gc(_gc_tracer_cm);
_cmThread->set_gc_id(GCId::current());
}
void G1CollectedHeap::register_concurrent_cycle_end() {
if (collector_state()->concurrent_cycle_started()) {
GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure();
// ConcurrentGCTimer will be ended as well.
_cm->register_concurrent_gc_end_and_stop_timer();
} else {
_gc_timer_cm->register_gc_end();
}
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
// Clear state variables to prepare for the next concurrent cycle.
collector_state()->set_concurrent_cycle_started(false);
_heap_summary_sent = false;
}
}
void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
if (collector_state()->concurrent_cycle_started()) {
// This function can be called when:
// the cleanup pause is run
// the concurrent cycle is aborted before the cleanup pause.
// the concurrent cycle is aborted after the cleanup pause,
// but before the concurrent cycle end has been registered.
// Make sure that we only send the heap information once.
if (!_heap_summary_sent) {
GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
trace_heap_after_gc(_gc_tracer_cm);
_heap_summary_sent = true;
}
}
}
void G1CollectedHeap::collect(GCCause::Cause cause) {
assert_heap_not_locked();
@ -2718,6 +2675,14 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
return false; // keep some compilers happy
}
void G1CollectedHeap::print_heap_regions() const {
LogHandle(gc, heap, region) log;
if (log.is_trace()) {
ResourceMark rm;
print_regions_on(log.trace_stream());
}
}
void G1CollectedHeap::print_on(outputStream* st) const {
st->print(" %-20s", "garbage-first heap");
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
@ -2731,18 +2696,14 @@ void G1CollectedHeap::print_on(outputStream* st) const {
uint young_regions = _young_list->length();
st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
(size_t) young_regions * HeapRegion::GrainBytes / K);
uint survivor_regions = g1_policy()->recorded_survivor_regions();
uint survivor_regions = _young_list->survivor_length();
st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
(size_t) survivor_regions * HeapRegion::GrainBytes / K);
st->cr();
MetaspaceAux::print_on(st);
}
void G1CollectedHeap::print_extended_on(outputStream* st) const {
print_on(st);
// Print the per-region information.
st->cr();
void G1CollectedHeap::print_regions_on(outputStream* st) const {
st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
"HS=humongous(starts), HC=humongous(continues), "
"CS=collection set, F=free, A=archive, TS=gc time stamp, "
@ -2752,6 +2713,13 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const {
heap_region_iterate(&blk);
}
void G1CollectedHeap::print_extended_on(outputStream* st) const {
print_on(st);
// Print the per-region information.
print_regions_on(st);
}
void G1CollectedHeap::print_on_error(outputStream* st) const {
this->CollectedHeap::print_on_error(st);
@ -2841,12 +2809,14 @@ G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
size_t eden_used_bytes = young_list->eden_used_bytes();
size_t survivor_used_bytes = young_list->survivor_used_bytes();
size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
size_t eden_capacity_bytes =
(g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
VirtualSpaceSummary heap_summary = create_heap_space_summary();
return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes, num_regions());
return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
eden_capacity_bytes, survivor_used_bytes, num_regions());
}
G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
@ -2864,7 +2834,6 @@ void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
gc_tracer->report_metaspace_summary(when, metaspace_summary);
}
G1CollectedHeap* G1CollectedHeap::heap() {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
@ -3203,15 +3172,19 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
wait_for_root_region_scanning();
print_heap_before_gc();
print_heap_regions();
trace_heap_before_gc(_gc_tracer_stw);
_verifier->verify_region_sets_optional();
_verifier->verify_dirty_young_regions();
// This call will decide whether this pause is an initial-mark
// pause. If it is, during_initial_mark_pause() will return true
// for the duration of this pause.
g1_policy()->decide_on_conc_mark_initiation();
// We should not be doing initial mark unless the conc mark thread is running
if (!_cmThread->should_terminate()) {
// This call will decide whether this pause is an initial-mark
// pause. If it is, during_initial_mark_pause() will return true
// for the duration of this pause.
g1_policy()->decide_on_conc_mark_initiation();
}
// We do not allow initial-mark to be piggy-backed on a mixed GC.
assert(!collector_state()->during_initial_mark_pause() ||
@ -3233,7 +3206,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// We are about to start a marking cycle, so we increment the
// full collection counter.
increment_old_marking_cycles_started();
register_concurrent_cycle_start(_gc_timer_stw->gc_start());
_cm->gc_tracer_cm()->set_gc_cause(gc_cause());
}
_gc_tracer_stw->report_yc_type(collector_state()->yc_type());
@ -3405,10 +3378,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert(check_young_list_empty(false /* check_heap */),
"young list should be empty");
g1_policy()->record_survivor_regions(_young_list->survivor_length(),
_young_list->first_survivor_region(),
_young_list->last_survivor_region());
_young_list->reset_auxilary_lists();
if (evacuation_failed()) {
@ -3443,7 +3412,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_allocator->init_mutator_alloc_region();
{
size_t expand_bytes = g1_policy()->expansion_amount();
size_t expand_bytes = _heap_sizing_policy->expansion_amount();
if (expand_bytes > 0) {
size_t bytes_before = capacity();
// No need for an ergo logging here,
@ -3539,6 +3508,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
print_heap_after_gc();
print_heap_regions();
trace_heap_after_gc(_gc_tracer_stw);
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
@ -3777,11 +3747,12 @@ public:
"claim value %d after unlink less than initial symbol table size %d",
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
log_debug(gc, stringdedup)("Cleaned string and symbol table, "
"strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
"symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
strings_processed(), strings_removed(),
symbols_processed(), symbols_removed());
log_info(gc, stringtable)(
"Cleaned string and symbol table, "
"strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
"symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
strings_processed(), strings_removed(),
symbols_processed(), symbols_removed());
}
void work(uint worker_id) {
@ -4084,14 +4055,10 @@ void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
bool process_strings, bool process_symbols) {
{
{ // Timing scope
G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
workers()->run_task(&g1_unlink_task);
}
if (G1StringDedup::is_enabled()) {
G1StringDedup::unlink(is_alive);
}
}
class G1RedirtyLoggedCardsTask : public AbstractGangTask {

View File

@ -73,11 +73,9 @@ class HeapRegionRemSetIterator;
class G1ConcurrentMark;
class ConcurrentMarkThread;
class ConcurrentG1Refine;
class ConcurrentGCTimer;
class GenerationCounters;
class STWGCTimer;
class G1NewTracer;
class G1OldTracer;
class EvacuationFailedInfo;
class nmethod;
class Ticks;
@ -85,6 +83,7 @@ class WorkGang;
class G1Allocator;
class G1ArchiveAllocator;
class G1HeapVerifier;
class G1HeapSizingPolicy;
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
@ -270,8 +269,6 @@ private:
// concurrent cycles) we have completed.
volatile uint _old_marking_cycles_completed;
bool _heap_summary_sent;
// This is a non-product method that is helpful for testing. It is
// called at the end of a GC and artificially expands the heap by
// allocating a number of dead regions. This way we can induce very
@ -364,6 +361,7 @@ protected:
// The current policy object for the collector.
G1CollectorPolicy* _g1_policy;
G1HeapSizingPolicy* _heap_sizing_policy;
G1CollectionSet _collection_set;
@ -622,10 +620,6 @@ public:
return _old_marking_cycles_completed;
}
void register_concurrent_cycle_start(const Ticks& start_time);
void register_concurrent_cycle_end();
void trace_heap_after_concurrent_cycle();
G1HRPrinter* hr_printer() { return &_hr_printer; }
// Allocates a new heap region instance.
@ -900,9 +894,7 @@ protected:
ReferenceProcessor* _ref_processor_stw;
STWGCTimer* _gc_timer_stw;
ConcurrentGCTimer* _gc_timer_cm;
G1OldTracer* _gc_tracer_cm;
G1NewTracer* _gc_tracer_stw;
// During reference object discovery, the _is_alive_non_header
@ -1036,9 +1028,6 @@ public:
// The Concurrent Marking reference processor...
ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
virtual size_t capacity() const;
virtual size_t used() const;
// This should be called when we're not holding the heap lock. The
@ -1292,6 +1281,12 @@ public:
return true;
}
// The reference pending list lock is acquired from from the
// ConcurrentMarkThread.
virtual bool needs_reference_pending_list_locker_thread() const {
return true;
}
inline bool is_in_young(const oop obj);
virtual bool is_scavengable(const void* addr);
@ -1470,7 +1465,11 @@ public:
G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
// Printing
private:
void print_heap_regions() const;
void print_regions_on(outputStream* st) const;
public:
virtual void print_on(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
@ -41,90 +42,14 @@
#include "utilities/debug.hpp"
#include "utilities/pair.hpp"
// Different defaults for different number of GC threads
// They were chosen by running GCOld and SPECjbb on debris with different
// numbers of GC threads and choosing them based on the results
// all the same
static double rs_length_diff_defaults[] = {
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
};
static double cost_per_card_ms_defaults[] = {
0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
};
// all the same
static double young_cards_per_entry_ratio_defaults[] = {
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
};
static double cost_per_entry_ms_defaults[] = {
0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
};
static double cost_per_byte_ms_defaults[] = {
0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
};
// these should be pretty consistent
static double constant_other_time_ms_defaults[] = {
5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
};
static double young_other_cost_per_region_ms_defaults[] = {
0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
};
static double non_young_other_cost_per_region_ms_defaults[] = {
1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
};
G1CollectorPolicy::G1CollectorPolicy() :
_predictor(G1ConfidencePercent / 100.0),
_recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
_concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
_concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
_alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_prev_collection_pause_end_ms(0.0),
_rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
_mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
_constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_non_young_other_cost_per_region_ms_seq(
new TruncatedSeq(TruncatedSeqLength)),
_pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
_rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
_analytics(new G1Analytics(&_predictor)),
_pause_time_target_ms((double) MaxGCPauseMillis),
_recent_prev_end_times_for_all_gcs_sec(
new TruncatedSeq(NumPrevPausesForHeuristics)),
_recent_avg_pause_time_ratio(0.0),
_rs_lengths_prediction(0),
_max_survivor_regions(0),
// add here any more surv rate groups
_recorded_survivor_regions(0),
_recorded_survivor_head(NULL),
_recorded_survivor_tail(NULL),
_survivors_age_table(true),
_gc_overhead_perc(0.0),
_bytes_allocated_in_old_since_last_gc(0),
_ihop_control(NULL),
_initial_mark_to_mixed() {
@ -150,27 +75,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
HeapRegionRemSet::setup_remset_size();
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
clear_ratio_check_data();
_phase_times = new G1GCPhaseTimes(ParallelGCThreads);
int index = MIN2(ParallelGCThreads - 1, 7u);
_rs_length_diff_seq->add(rs_length_diff_defaults[index]);
_cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
_cost_scan_hcc_seq->add(0.0);
_young_cards_per_entry_ratio_seq->add(
young_cards_per_entry_ratio_defaults[index]);
_cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
_cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
_constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
_young_other_cost_per_region_ms_seq->add(
young_other_cost_per_region_ms_defaults[index]);
_non_young_other_cost_per_region_ms_seq->add(
non_young_other_cost_per_region_ms_defaults[index]);
// Below, we might need to calculate the pause time target based on
// the pause interval. When we do so we are going to give G1 maximum
// flexibility and allow it to do pauses when it needs to. So, we'll
@ -183,18 +89,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
// First make sure that, if either parameter is set, its value is
// reasonable.
if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
if (MaxGCPauseMillis < 1) {
vm_exit_during_initialization("MaxGCPauseMillis should be "
"greater than 0");
}
}
if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
if (GCPauseIntervalMillis < 1) {
vm_exit_during_initialization("GCPauseIntervalMillis should be "
"greater than 0");
}
}
guarantee(MaxGCPauseMillis >= 1, "Range checking for MaxGCPauseMillis should guarantee that value is >= 1");
// Then, if the pause time target parameter was not set, set it to
// the default value.
@ -216,39 +111,18 @@ G1CollectorPolicy::G1CollectorPolicy() :
if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
}
// Finally, make sure that the two parameters are consistent.
if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
char buffer[256];
jio_snprintf(buffer, 256,
"MaxGCPauseMillis (%u) should be less than "
"GCPauseIntervalMillis (%u)",
MaxGCPauseMillis, GCPauseIntervalMillis);
vm_exit_during_initialization(buffer);
}
guarantee(GCPauseIntervalMillis >= 1, "Constraint for GCPauseIntervalMillis should guarantee that value is >= 1");
guarantee(GCPauseIntervalMillis > MaxGCPauseMillis, "Constraint for GCPauseIntervalMillis should guarantee that GCPauseIntervalMillis > MaxGCPauseMillis");
double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
double time_slice = (double) GCPauseIntervalMillis / 1000.0;
_mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
// start conservatively (around 50ms is about right)
_concurrent_mark_remark_times_ms->add(0.05);
_concurrent_mark_cleanup_times_ms->add(0.20);
_tenuring_threshold = MaxTenuringThreshold;
assert(GCTimeRatio > 0,
"we should have set it to a default value set_g1_gc_flags() "
"if a user set it to 0");
_gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
uintx reserve_perc = G1ReservePercent;
// Put an artificial ceiling on this so that it's not set to a silly value.
if (reserve_perc > 50) {
reserve_perc = 50;
warning("G1ReservePercent is set to a value that is too large, "
"it's been updated to " UINTX_FORMAT, reserve_perc);
}
_reserve_factor = (double) reserve_perc / 100.0;
guarantee(G1ReservePercent <= 50, "Range checking should not allow values over 50.");
_reserve_factor = (double) G1ReservePercent / 100.0;
// This will be set when the heap is expanded
// for the first time during initialization.
_reserve_regions = 0;
@ -260,14 +134,6 @@ G1CollectorPolicy::~G1CollectorPolicy() {
delete _ihop_control;
}
double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
return _predictor.get_new_prediction(seq);
}
size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const {
return (size_t)get_new_prediction(seq);
}
void G1CollectorPolicy::initialize_alignments() {
_space_alignment = HeapRegion::GrainBytes;
size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
@ -290,9 +156,8 @@ void G1CollectorPolicy::initialize_flags() {
FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
}
if (SurvivorRatio < 1) {
vm_exit_during_initialization("Invalid survivor ratio specified");
}
guarantee(SurvivorRatio >= 1, "Range checking for SurvivorRatio should guarantee that value is >= 1");
CollectorPolicy::initialize_flags();
_young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
}
@ -342,8 +207,9 @@ bool G1CollectorPolicy::predict_will_fit(uint young_length,
double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
size_t bytes_to_copy =
(size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
double young_other_time_ms = predict_young_other_time_ms(young_length);
double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy,
collector_state()->during_concurrent_mark());
double young_other_time_ms = _analytics->predict_young_other_time_ms(young_length);
double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
if (pause_time_ms > target_pause_time_ms) {
// end condition 2: prediction is over the target pause time
@ -387,10 +253,10 @@ uint G1CollectorPolicy::calculate_young_list_desired_min_length(
uint base_min_length) const {
uint desired_min_length = 0;
if (adaptive_young_list_length()) {
if (_alloc_rate_ms_seq->num() > 3) {
if (_analytics->num_alloc_rate_ms() > 3) {
double now_sec = os::elapsedTime();
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
double alloc_rate_ms = predict_alloc_rate_ms();
double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
} else {
// otherwise we don't have enough info to make the prediction
@ -409,7 +275,7 @@ uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
}
uint G1CollectorPolicy::update_young_list_max_and_target_length() {
return update_young_list_max_and_target_length(predict_rs_lengths());
return update_young_list_max_and_target_length(_analytics->predict_rs_lengths());
}
uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
@ -430,7 +296,7 @@ G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengt
// Calculate the absolute and desired min bounds first.
// This is how many young regions we already have (currently: the survivors).
uint base_min_length = recorded_survivor_regions();
const uint base_min_length = _g1->young_list()->survivor_length();
uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
// This is the absolute minimum young length. Ensure that we
// will at least have one eden region available for allocation.
@ -481,7 +347,7 @@ G1CollectorPolicy::YoungTargetLengths G1CollectorPolicy::young_list_target_lengt
young_list_target_length = desired_min_length;
}
assert(young_list_target_length > recorded_survivor_regions(),
assert(young_list_target_length > base_min_length,
"we should be able to allocate at least one eden region");
assert(young_list_target_length >= absolute_min_length, "post-condition");
@ -514,9 +380,9 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
double survivor_regions_evac_time = predict_survivor_regions_evac_time();
size_t pending_cards = get_new_size_prediction(_pending_cards_seq);
size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
size_t pending_cards = _analytics->predict_pending_cards();
size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
double base_time_ms =
predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
survivor_regions_evac_time;
@ -595,8 +461,8 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
double G1CollectorPolicy::predict_survivor_regions_evac_time() const {
double survivor_regions_evac_time = 0.0;
for (HeapRegion * r = _recorded_survivor_head;
r != NULL && r != _recorded_survivor_tail->get_next_young_region();
for (HeapRegion * r = _g1->young_list()->first_survivor_region();
r != NULL && r != _g1->young_list()->last_survivor_region()->get_next_young_region();
r = r->get_next_young_region()) {
survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young());
}
@ -616,7 +482,7 @@ void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_l
}
void G1CollectorPolicy::update_rs_lengths_prediction() {
update_rs_lengths_prediction(predict_rs_lengths());
update_rs_lengths_prediction(_analytics->predict_rs_lengths());
}
void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
@ -684,7 +550,7 @@ void G1CollectorPolicy::record_full_collection_end() {
double full_gc_time_sec = end_sec - _full_collection_start_sec;
double full_gc_time_ms = full_gc_time_sec * 1000.0;
update_recent_gc_times(end_sec, full_gc_time_ms);
_analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
collector_state()->set_full_collection(false);
@ -700,8 +566,6 @@ void G1CollectorPolicy::record_full_collection_end() {
_short_lived_surv_rate_group->start_adding_regions();
// also call this on any additional surv rate groups
record_survivor_regions(0, NULL, NULL);
_free_regions_at_end_of_collection = _g1->num_free_regions();
// Reset survivors SurvRateGroup.
_survivor_surv_rate_group->reset();
@ -754,8 +618,8 @@ void G1CollectorPolicy::record_concurrent_mark_remark_start() {
void G1CollectorPolicy::record_concurrent_mark_remark_end() {
double end_time_sec = os::elapsedTime();
double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
_concurrent_mark_remark_times_ms->add(elapsed_time_ms);
_prev_collection_pause_end_ms += elapsed_time_ms;
_analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
_analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
record_pause(Remark, _mark_remark_start_sec, end_time_sec);
}
@ -854,7 +718,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
maybe_start_marking();
}
double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
if (app_time_ms < MIN_TIMER_GRANULARITY) {
// This usually happens due to the timer not having the required
// granularity. Some Linuxes are the usual culprits.
@ -873,31 +737,12 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
// place we can safely ignore them here.
uint regions_allocated = _collection_set->eden_region_length();
double alloc_rate_ms = (double) regions_allocated / app_time_ms;
_alloc_rate_ms_seq->add(alloc_rate_ms);
_analytics->report_alloc_rate_ms(alloc_rate_ms);
double interval_ms =
(end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
update_recent_gc_times(end_time_sec, pause_time_ms);
_recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
if (recent_avg_pause_time_ratio() < 0.0 ||
(recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
// Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
// CR 6902692 by redoing the manner in which the ratio is incrementally computed.
if (_recent_avg_pause_time_ratio < 0.0) {
_recent_avg_pause_time_ratio = 0.0;
} else {
assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
_recent_avg_pause_time_ratio = 1.0;
}
}
// Compute the ratio of just this last pause time to the entire time range stored
// in the vectors. Comparing this pause to the entire range, rather than only the
// most recent interval, has the effect of smoothing over a possible transient 'burst'
// of more frequent pauses that don't really reflect a change in heap occupancy.
// This reduces the likelihood of a needless heap expansion being triggered.
_last_pause_time_ratio =
(pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms;
(end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
_analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
_analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
}
bool new_in_marking_window = collector_state()->in_marking_window();
@ -943,28 +788,20 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
double cost_per_card_ms = 0.0;
if (_pending_cards > 0) {
cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
_cost_per_card_ms_seq->add(cost_per_card_ms);
_analytics->report_cost_per_card_ms(cost_per_card_ms);
}
_cost_scan_hcc_seq->add(scan_hcc_time_ms);
_analytics->report_cost_scan_hcc(scan_hcc_time_ms);
double cost_per_entry_ms = 0.0;
if (cards_scanned > 10) {
cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
if (collector_state()->last_gc_was_young()) {
_cost_per_entry_ms_seq->add(cost_per_entry_ms);
} else {
_mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
}
_analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());
}
if (_max_rs_lengths > 0) {
double cards_per_entry_ratio =
(double) cards_scanned / (double) _max_rs_lengths;
if (collector_state()->last_gc_was_young()) {
_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
} else {
_mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
}
_analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());
}
// This is defensive. For a while _max_rs_lengths could get
@ -985,7 +822,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
if (_max_rs_lengths > recorded_rs_lengths) {
rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
}
_rs_length_diff_seq->add((double) rs_length_diff);
_analytics->report_rs_length_diff((double) rs_length_diff);
size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
@ -993,27 +830,23 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
if (copied_bytes > 0) {
cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
if (collector_state()->in_marking_window()) {
_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
} else {
_cost_per_byte_ms_seq->add(cost_per_byte_ms);
}
_analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());
}
if (_collection_set->young_region_length() > 0) {
_young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
_collection_set->young_region_length());
_analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
_collection_set->young_region_length());
}
if (_collection_set->old_region_length() > 0) {
_non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
_collection_set->old_region_length());
_analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
_collection_set->old_region_length());
}
_constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
_analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
_pending_cards_seq->add((double) _pending_cards);
_rs_lengths_seq->add((double) _max_rs_lengths);
_analytics->report_pending_cards((double) _pending_cards);
_analytics->report_rs_lengths((double) _max_rs_lengths);
}
collector_state()->set_in_marking_window(new_in_marking_window);
@ -1150,106 +983,10 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
dcqs.notify_if_necessary();
}
size_t G1CollectorPolicy::predict_rs_lengths() const {
return get_new_size_prediction(_rs_lengths_seq);
}
size_t G1CollectorPolicy::predict_rs_length_diff() const {
return get_new_size_prediction(_rs_length_diff_seq);
}
double G1CollectorPolicy::predict_alloc_rate_ms() const {
return get_new_prediction(_alloc_rate_ms_seq);
}
double G1CollectorPolicy::predict_cost_per_card_ms() const {
return get_new_prediction(_cost_per_card_ms_seq);
}
double G1CollectorPolicy::predict_scan_hcc_ms() const {
return get_new_prediction(_cost_scan_hcc_seq);
}
double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const {
return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms();
}
double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const {
return get_new_prediction(_young_cards_per_entry_ratio_seq);
}
double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const {
if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
return predict_young_cards_per_entry_ratio();
} else {
return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
}
}
size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const {
return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
}
size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const {
return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio());
}
double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const {
if (collector_state()->gcs_are_young()) {
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
} else {
return predict_mixed_rs_scan_time_ms(card_num);
}
}
double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const {
if (_mixed_cost_per_entry_ms_seq->num() < 3) {
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
} else {
return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq);
}
}
double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq);
} else {
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq);
}
}
double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const {
if (collector_state()->during_concurrent_mark()) {
return predict_object_copy_time_ms_during_cm(bytes_to_copy);
} else {
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq);
}
}
double G1CollectorPolicy::predict_constant_other_time_ms() const {
return get_new_prediction(_constant_other_time_ms_seq);
}
double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const {
return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq);
}
double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const {
return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq);
}
double G1CollectorPolicy::predict_remark_time_ms() const {
return get_new_prediction(_concurrent_mark_remark_times_ms);
}
double G1CollectorPolicy::predict_cleanup_time_ms() const {
return get_new_prediction(_concurrent_mark_cleanup_times_ms);
}
double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
TruncatedSeq* seq = surv_rate_group->get_seq(age);
guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
double pred = get_new_prediction(seq);
double pred = _predictor.get_new_prediction(seq);
if (pred > 1.0) {
pred = 1.0;
}
@ -1267,19 +1004,14 @@ double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
size_t scanned_cards) const {
return
predict_rs_update_time_ms(pending_cards) +
predict_rs_scan_time_ms(scanned_cards) +
predict_constant_other_time_ms();
_analytics->predict_rs_update_time_ms(pending_cards) +
_analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
_analytics->predict_constant_other_time_ms();
}
double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
size_t rs_length = predict_rs_lengths() + predict_rs_length_diff();
size_t card_num;
if (collector_state()->gcs_are_young()) {
card_num = predict_young_card_num(rs_length);
} else {
card_num = predict_non_young_card_num(rs_length);
}
size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());
return predict_base_elapsed_time_ms(pending_cards, card_num);
}
@ -1299,149 +1031,25 @@ size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
bool for_young_gc) const {
size_t rs_length = hr->rem_set()->occupied();
size_t card_num;
// Predicting the number of cards is based on which type of GC
// we're predicting for.
if (for_young_gc) {
card_num = predict_young_card_num(rs_length);
} else {
card_num = predict_non_young_card_num(rs_length);
}
size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
size_t bytes_to_copy = predict_bytes_to_copy(hr);
double region_elapsed_time_ms =
predict_rs_scan_time_ms(card_num) +
predict_object_copy_time_ms(bytes_to_copy);
_analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
_analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark());
// The prediction of the "other" time for this region is based
// upon the region type and NOT the GC type.
if (hr->is_young()) {
region_elapsed_time_ms += predict_young_other_time_ms(1);
region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
} else {
region_elapsed_time_ms += predict_non_young_other_time_ms(1);
region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
}
return region_elapsed_time_ms;
}
void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
double elapsed_ms) {
_recent_gc_times_ms->add(elapsed_ms);
_recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
_prev_collection_pause_end_ms = end_time_sec * 1000.0;
}
void G1CollectorPolicy::clear_ratio_check_data() {
_ratio_over_threshold_count = 0;
_ratio_over_threshold_sum = 0.0;
_pauses_since_start = 0;
}
size_t G1CollectorPolicy::expansion_amount() {
double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
double last_gc_overhead = _last_pause_time_ratio * 100.0;
double threshold = _gc_overhead_perc;
size_t expand_bytes = 0;
// If the heap is at less than half its maximum size, scale the threshold down,
// to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
// though the scaling code will likely keep the increase small.
if (_g1->capacity() <= _g1->max_capacity() / 2) {
threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
threshold = MAX2(threshold, 1.0);
}
// If the last GC time ratio is over the threshold, increment the count of
// times it has been exceeded, and add this ratio to the sum of exceeded
// ratios.
if (last_gc_overhead > threshold) {
_ratio_over_threshold_count++;
_ratio_over_threshold_sum += last_gc_overhead;
}
// Check if we've had enough GC time ratio checks that were over the
// threshold to trigger an expansion. We'll also expand if we've
// reached the end of the history buffer and the average of all entries
// is still over the threshold. This indicates a smaller number of GCs were
// long enough to make the average exceed the threshold.
bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics;
if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
(filled_history_buffer && (recent_gc_overhead > threshold))) {
size_t min_expand_bytes = HeapRegion::GrainBytes;
size_t reserved_bytes = _g1->max_capacity();
size_t committed_bytes = _g1->capacity();
size_t uncommitted_bytes = reserved_bytes - committed_bytes;
size_t expand_bytes_via_pct =
uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
double scale_factor = 1.0;
// If the current size is less than 1/4 of the Initial heap size, expand
// by half of the delta between the current and Initial sizes. IE, grow
// back quickly.
//
// Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
// the available expansion space, whichever is smaller, as the base
// expansion size. Then possibly scale this size according to how much the
// threshold has (on average) been exceeded by. If the delta is small
// (less than the StartScaleDownAt value), scale the size down linearly, but
// not by less than MinScaleDownFactor. If the delta is large (greater than
// the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
// times the base size. The scaling will be linear in the range from
// StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
// ScaleUpRange sets the rate of scaling up.
if (committed_bytes < InitialHeapSize / 4) {
expand_bytes = (InitialHeapSize - committed_bytes) / 2;
} else {
double const MinScaleDownFactor = 0.2;
double const MaxScaleUpFactor = 2;
double const StartScaleDownAt = _gc_overhead_perc;
double const StartScaleUpAt = _gc_overhead_perc * 1.5;
double const ScaleUpRange = _gc_overhead_perc * 2.0;
double ratio_delta;
if (filled_history_buffer) {
ratio_delta = recent_gc_overhead - threshold;
} else {
ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
}
expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
if (ratio_delta < StartScaleDownAt) {
scale_factor = ratio_delta / StartScaleDownAt;
scale_factor = MAX2(scale_factor, MinScaleDownFactor);
} else if (ratio_delta > StartScaleUpAt) {
scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
}
}
log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
"recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);
// Ensure the expansion size is at least the minimum growth amount
// and at most the remaining uncommitted byte size.
expand_bytes = MAX2(expand_bytes, min_expand_bytes);
expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
clear_ratio_check_data();
} else {
// An expansion was not triggered. If we've started counting, increment
// the number of checks we've made in the current window. If we've
// reached the end of the window without resizing, clear the counters to
// start again the next time we see a ratio above the threshold.
if (_ratio_over_threshold_count > 0) {
_pauses_since_start++;
if (_pauses_since_start > NumPrevPausesForHeuristics) {
clear_ratio_check_data();
}
}
}
return expand_bytes;
}
void G1CollectorPolicy::print_yg_surv_rate_info() const {
#ifndef PRODUCT
@ -1559,73 +1167,13 @@ void G1CollectorPolicy::decide_on_conc_mark_initiation() {
}
}
class ParKnownGarbageHRClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
CSetChooserParUpdater _cset_updater;
public:
ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
uint chunk_size) :
_g1h(G1CollectedHeap::heap()),
_cset_updater(hrSorted, true /* parallel */, chunk_size) { }
bool doHeapRegion(HeapRegion* r) {
// Do we have any marking information for this region?
if (r->is_marked()) {
// We will skip any region that's currently used as an old GC
// alloc region (we should not consider those for collection
// before we fill them up).
if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
_cset_updater.add_region(r);
}
}
return false;
}
};
class ParKnownGarbageTask: public AbstractGangTask {
CollectionSetChooser* _hrSorted;
uint _chunk_size;
G1CollectedHeap* _g1;
HeapRegionClaimer _hrclaimer;
public:
ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
AbstractGangTask("ParKnownGarbageTask"),
_hrSorted(hrSorted), _chunk_size(chunk_size),
_g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
void work(uint worker_id) {
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
_g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
}
};
uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
assert(n_workers > 0, "Active gc workers should be greater than 0");
const uint overpartition_factor = 4;
const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
}
void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
cset_chooser()->clear();
WorkGang* workers = _g1->workers();
uint n_workers = workers->active_workers();
uint n_regions = _g1->num_regions();
uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
cset_chooser()->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
ParKnownGarbageTask par_known_garbage_task(cset_chooser(), chunk_size, n_workers);
workers->run_task(&par_known_garbage_task);
cset_chooser()->sort_regions();
cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
double end_sec = os::elapsedTime();
double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
_prev_collection_pause_end_ms += elapsed_time_ms;
_analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
_analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
}
@ -1763,4 +1311,3 @@ void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) {
double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms);
_collection_set->finalize_old_part(time_remaining_ms);
}

View File

@ -43,6 +43,7 @@ class HeapRegion;
class G1CollectionSet;
class CollectionSetChooser;
class G1IHOPControl;
class G1Analytics;
class G1YoungGenSizer;
class G1CollectorPolicy: public CollectorPolicy {
@ -57,10 +58,7 @@ class G1CollectorPolicy: public CollectorPolicy {
void report_ihop_statistics();
G1Predictions _predictor;
double get_new_prediction(TruncatedSeq const* seq) const;
size_t get_new_size_prediction(TruncatedSeq const* seq) const;
G1Analytics* _analytics;
G1MMUTracker* _mmu_tracker;
void initialize_alignments();
@ -68,17 +66,6 @@ class G1CollectorPolicy: public CollectorPolicy {
double _full_collection_start_sec;
// These exclude marking times.
TruncatedSeq* _recent_gc_times_ms;
TruncatedSeq* _concurrent_mark_remark_times_ms;
TruncatedSeq* _concurrent_mark_cleanup_times_ms;
// Ratio check data for determining if heap growth is necessary.
uint _ratio_over_threshold_count;
double _ratio_over_threshold_sum;
uint _pauses_since_start;
uint _young_list_target_length;
uint _young_list_fixed_length;
@ -88,42 +75,10 @@ class G1CollectorPolicy: public CollectorPolicy {
SurvRateGroup* _short_lived_surv_rate_group;
SurvRateGroup* _survivor_surv_rate_group;
// add here any more surv rate groups
double _gc_overhead_perc;
double _reserve_factor;
uint _reserve_regions;
enum PredictionConstants {
TruncatedSeqLength = 10,
NumPrevPausesForHeuristics = 10,
// MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics,
// representing the minimum number of pause time ratios that exceed
// GCTimeRatio before a heap expansion will be triggered.
MinOverThresholdForGrowth = 4
};
TruncatedSeq* _alloc_rate_ms_seq;
double _prev_collection_pause_end_ms;
TruncatedSeq* _rs_length_diff_seq;
TruncatedSeq* _cost_per_card_ms_seq;
TruncatedSeq* _cost_scan_hcc_seq;
TruncatedSeq* _young_cards_per_entry_ratio_seq;
TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
TruncatedSeq* _cost_per_entry_ms_seq;
TruncatedSeq* _mixed_cost_per_entry_ms_seq;
TruncatedSeq* _cost_per_byte_ms_seq;
TruncatedSeq* _constant_other_time_ms_seq;
TruncatedSeq* _young_other_cost_per_region_ms_seq;
TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
TruncatedSeq* _pending_cards_seq;
TruncatedSeq* _rs_lengths_seq;
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
G1YoungGenSizer* _young_gen_sizer;
uint _free_regions_at_end_of_collection;
@ -151,6 +106,7 @@ class G1CollectorPolicy: public CollectorPolicy {
G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
public:
const G1Predictions& predictor() const { return _predictor; }
const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); }
// Add the given number of bytes to the total number of allocated bytes in the old gen.
void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
@ -177,39 +133,6 @@ public:
_max_rs_lengths = rs_lengths;
}
size_t predict_rs_lengths() const;
size_t predict_rs_length_diff() const;
double predict_alloc_rate_ms() const;
double predict_cost_per_card_ms() const;
double predict_scan_hcc_ms() const;
double predict_rs_update_time_ms(size_t pending_cards) const;
double predict_young_cards_per_entry_ratio() const;
double predict_mixed_cards_per_entry_ratio() const;
size_t predict_young_card_num(size_t rs_length) const;
size_t predict_non_young_card_num(size_t rs_length) const;
double predict_rs_scan_time_ms(size_t card_num) const;
double predict_mixed_rs_scan_time_ms(size_t card_num) const;
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const;
double predict_object_copy_time_ms(size_t bytes_to_copy) const;
double predict_constant_other_time_ms() const;
double predict_young_other_time_ms(size_t young_num) const;
double predict_non_young_other_time_ms(size_t non_young_num) const;
double predict_base_elapsed_time_ms(size_t pending_cards) const;
double predict_base_elapsed_time_ms(size_t pending_cards,
@ -242,10 +165,6 @@ public:
return _mmu_tracker->max_gc_time() * 1000.0;
}
double predict_remark_time_ms() const;
double predict_cleanup_time_ms() const;
// Returns an estimate of the survival rate of the region at yg-age
// "yg_age".
double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
@ -265,11 +184,6 @@ protected:
CollectionSetChooser* cset_chooser() const;
private:
// Statistics kept per GC stoppage, pause or full.
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
// Add a new GC of the given duration and end time to the record.
void update_recent_gc_times(double end_time_sec, double elapsed_ms);
// The number of bytes copied during the GC.
size_t _bytes_copied_during_gc;
@ -279,15 +193,6 @@ private:
G1GCPhaseTimes* _phase_times;
// The ratio of gc time to elapsed time, computed over recent pauses,
// and the ratio for just the last pause.
double _recent_avg_pause_time_ratio;
double _last_pause_time_ratio;
double recent_avg_pause_time_ratio() const {
return _recent_avg_pause_time_ratio;
}
// This set of variables tracks the collector efficiency, in order to
// determine whether we should initiate a new marking.
double _mark_remark_start_sec;
@ -335,10 +240,6 @@ private:
void update_rs_lengths_prediction();
void update_rs_lengths_prediction(size_t prediction);
// Calculate and return chunk size (in number of regions) for parallel
// concurrent mark cleanup.
uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const;
// Check whether a given young length (young_length) fits into the
// given target pause time and whether the prediction for the amount
// of objects to be copied for the given length will fit into the
@ -475,13 +376,6 @@ public:
// the initial-mark work and start a marking cycle.
void decide_on_conc_mark_initiation();
// If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by.
virtual size_t expansion_amount();
// Clear ratio tracking data used by expansion_amount().
void clear_ratio_check_data();
// Print stats on young survival ratio
void print_yg_surv_rate_info() const;
@ -491,7 +385,6 @@ public:
} else {
_short_lived_surv_rate_group->finished_recalculating_age_indexes();
}
// do that for any other surv rate groups
}
size_t young_list_target_length() const { return _young_list_target_length; }
@ -522,16 +415,6 @@ private:
// The limit on the number of regions allocated for survivors.
uint _max_survivor_regions;
// For reporting purposes.
// The value of _heap_bytes_before_gc is also used to calculate
// the cost of copying.
// The amount of survivor regions after a collection.
uint _recorded_survivor_regions;
// List of survivor regions.
HeapRegion* _recorded_survivor_head;
HeapRegion* _recorded_survivor_tail;
AgeTable _survivors_age_table;
public:
@ -565,18 +448,6 @@ public:
_survivor_surv_rate_group->stop_adding_regions();
}
void record_survivor_regions(uint regions,
HeapRegion* head,
HeapRegion* tail) {
_recorded_survivor_regions = regions;
_recorded_survivor_head = head;
_recorded_survivor_tail = tail;
}
uint recorded_survivor_regions() const {
return _recorded_survivor_regions;
}
void record_age_table(AgeTable* age_table) {
_survivors_age_table.merge(age_table);
}

View File

@ -72,7 +72,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
bool _in_marking_window;
bool _in_marking_window_im;
bool _concurrent_cycle_started;
bool _full_collection;
public:
@ -88,7 +87,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
_mark_in_progress(false),
_in_marking_window(false),
_in_marking_window_im(false),
_concurrent_cycle_started(false),
_full_collection(false) {}
// Setters
@ -101,7 +99,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
void set_mark_in_progress(bool v) { _mark_in_progress = v; }
void set_in_marking_window(bool v) { _in_marking_window = v; }
void set_in_marking_window_im(bool v) { _in_marking_window_im = v; }
void set_concurrent_cycle_started(bool v) { _concurrent_cycle_started = v; }
void set_full_collection(bool v) { _full_collection = v; }
// Getters
@ -114,7 +111,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
bool mark_in_progress() const { return _mark_in_progress; }
bool in_marking_window() const { return _in_marking_window; }
bool in_marking_window_im() const { return _in_marking_window_im; }
bool concurrent_cycle_started() const { return _concurrent_cycle_started; }
bool full_collection() const { return _full_collection; }
// Composite booleans (clients worry about flickering)

View File

@ -120,74 +120,10 @@ void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_r
}
// We need to clear the bitmap on commit, removing any existing information.
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
_bm->clearRange(mr);
_bm->clear_range(mr);
}
// Closure used for clearing the given mark bitmap.
class ClearBitmapHRClosure : public HeapRegionClosure {
private:
G1ConcurrentMark* _cm;
G1CMBitMap* _bitmap;
bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration.
public:
ClearBitmapHRClosure(G1ConcurrentMark* cm, G1CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
}
virtual bool doHeapRegion(HeapRegion* r) {
size_t const chunk_size_in_words = M / HeapWordSize;
HeapWord* cur = r->bottom();
HeapWord* const end = r->end();
while (cur < end) {
MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
_bitmap->clearRange(mr);
cur += chunk_size_in_words;
// Abort iteration if after yielding the marking has been aborted.
if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
return true;
}
// Repeat the asserts from before the start of the closure. We will do them
// as asserts here to minimize their overhead on the product. However, we
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
}
return false;
}
};
class ParClearNextMarkBitmapTask : public AbstractGangTask {
ClearBitmapHRClosure* _cl;
HeapRegionClaimer _hrclaimer;
bool _suspendible; // If the task is suspendible, workers must join the STS.
public:
ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
_cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
void work(uint worker_id) {
SuspendibleThreadSetJoiner sts_join(_suspendible);
G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
}
};
void G1CMBitMap::clearAll() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
uint n_workers = g1h->workers()->active_workers();
ParClearNextMarkBitmapTask task(&cl, n_workers, false);
g1h->workers()->run_task(&task);
guarantee(cl.complete(), "Must have completed iteration.");
return;
}
void G1CMBitMap::clearRange(MemRegion mr) {
void G1CMBitMap::clear_range(MemRegion mr) {
mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
assert(!mr.is_empty(), "unexpected empty region");
// convert address range into offset range
@ -203,12 +139,12 @@ bool G1CMMarkStack::allocate(size_t capacity) {
// allocate a stack of the requisite depth
ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
if (!rs.is_reserved()) {
warning("ConcurrentMark MarkStack allocation failure");
log_warning(gc)("ConcurrentMark MarkStack allocation failure");
return false;
}
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
if (!_virtual_space.initialize(rs, rs.size())) {
warning("ConcurrentMark MarkStack backing store failure");
log_warning(gc)("ConcurrentMark MarkStack backing store failure");
// Release the virtual memory reserved for the marking stack
rs.release();
return false;
@ -441,7 +377,8 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
_has_aborted(false),
_restart_for_overflow(false),
_concurrent_marking_in_progress(false),
_concurrent_phase_status(ConcPhaseNotStarted),
_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
// _verbose_level set below
@ -478,9 +415,8 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
_root_regions.init(_g1h, this);
if (ConcGCThreads > ParallelGCThreads) {
warning("Can't have more ConcGCThreads (%u) "
"than ParallelGCThreads (%u).",
ConcGCThreads, ParallelGCThreads);
log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).",
ConcGCThreads, ParallelGCThreads);
return;
}
if (!FLAG_IS_DEFAULT(ConcGCThreads) && ConcGCThreads > 0) {
@ -534,9 +470,9 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
// Verify that the calculated value for MarkStackSize is in range.
// It would be nice to use the private utility routine from Arguments.
if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
"must be between 1 and " SIZE_FORMAT,
mark_stack_size, MarkStackSizeMax);
log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
"must be between 1 and " SIZE_FORMAT,
mark_stack_size, MarkStackSizeMax);
return;
}
FLAG_SET_ERGO(size_t, MarkStackSize, mark_stack_size);
@ -545,16 +481,16 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
if (FLAG_IS_CMDLINE(MarkStackSize)) {
if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
"must be between 1 and " SIZE_FORMAT,
MarkStackSize, MarkStackSizeMax);
log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
"must be between 1 and " SIZE_FORMAT,
MarkStackSize, MarkStackSizeMax);
return;
}
} else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
" or for MarkStackSizeMax (" SIZE_FORMAT ")",
MarkStackSize, MarkStackSizeMax);
log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
" or for MarkStackSizeMax (" SIZE_FORMAT ")",
MarkStackSize, MarkStackSizeMax);
return;
}
}
@ -562,7 +498,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
}
if (!_markStack.allocate(MarkStackSize)) {
warning("Failed to allocate CM marking stack");
log_warning(gc)("Failed to allocate CM marking stack");
return;
}
@ -698,9 +634,76 @@ G1ConcurrentMark::~G1ConcurrentMark() {
ShouldNotReachHere();
}
void G1ConcurrentMark::clearNextBitmap() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
class G1ClearBitMapTask : public AbstractGangTask {
// Heap region closure used for clearing the given mark bitmap.
class G1ClearBitmapHRClosure : public HeapRegionClosure {
private:
G1CMBitMap* _bitmap;
G1ConcurrentMark* _cm;
public:
G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
}
virtual bool doHeapRegion(HeapRegion* r) {
size_t const chunk_size_in_words = M / HeapWordSize;
HeapWord* cur = r->bottom();
HeapWord* const end = r->end();
while (cur < end) {
MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
_bitmap->clear_range(mr);
cur += chunk_size_in_words;
// Abort iteration if after yielding the marking has been aborted.
if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
return true;
}
// Repeat the asserts from before the start of the closure. We will do them
// as asserts here to minimize their overhead on the product. However, we
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant");
assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
}
assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
return false;
}
};
G1ClearBitmapHRClosure _cl;
HeapRegionClaimer _hr_claimer;
bool _suspendible; // If the task is suspendible, workers must join the STS.
public:
G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
AbstractGangTask("Parallel Clear Bitmap Task"),
_cl(bitmap, suspendible ? cm : NULL),
_hr_claimer(n_workers),
_suspendible(suspendible)
{ }
void work(uint worker_id) {
SuspendibleThreadSetJoiner sts_join(_suspendible);
G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true);
}
bool is_complete() {
return _cl.complete();
}
};
void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
G1ClearBitMapTask task(bitmap, this, workers->active_workers(), may_yield);
workers->run_task(&task);
guarantee(!may_yield || task.is_complete(), "Must have completed iteration when not yielding.");
}
void G1ConcurrentMark::cleanup_for_next_mark() {
// Make sure that the concurrent mark thread looks to still be in
// the current cycle.
guarantee(cmThread()->during_cycle(), "invariant");
@ -709,21 +712,24 @@ void G1ConcurrentMark::clearNextBitmap() {
// marking bitmap and getting it ready for the next cycle. During
// this time no other cycle can start. So, let's make sure that this
// is the case.
guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
_parallel_workers->run_task(&task);
clear_bitmap(_nextMarkBitMap, _parallel_workers, true);
// Clear the liveness counting data. If the marking has been aborted, the abort()
// call already did that.
if (cl.complete()) {
if (!has_aborted()) {
clear_all_count_data();
}
// Repeat the asserts from above.
guarantee(cmThread()->during_cycle(), "invariant");
guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
}
void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false);
}
class CheckBitmapClearHRClosure : public HeapRegionClosure {
@ -848,7 +854,7 @@ void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
// marking.
reset_marking_state(true /* clear_overflow */);
log_info(gc)("Concurrent Mark reset for overflow");
log_info(gc, marking)("Concurrent Mark reset for overflow");
}
}
@ -983,13 +989,12 @@ public:
}
};
void G1ConcurrentMark::scanRootRegions() {
void G1ConcurrentMark::scan_root_regions() {
// scan_in_progress() will have been set to true only if there was
// at least one root region to scan. So, if it's false, we
// should not attempt to do any further work.
if (root_regions()->scan_in_progress()) {
assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");
_parallel_marking_threads = calc_parallel_marking_threads();
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
@ -1007,47 +1012,27 @@ void G1ConcurrentMark::scanRootRegions() {
}
}
void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
uint old_val = 0;
do {
old_val = Atomic::cmpxchg(ConcPhaseStarted, &_concurrent_phase_status, ConcPhaseNotStarted);
} while (old_val != ConcPhaseNotStarted);
_g1h->gc_timer_cm()->register_gc_concurrent_start(title);
void G1ConcurrentMark::concurrent_cycle_start() {
_gc_timer_cm->register_gc_start();
_gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
_g1h->trace_heap_before_gc(_gc_tracer_cm);
}
void G1ConcurrentMark::register_concurrent_phase_end_common(bool end_timer) {
if (_concurrent_phase_status == ConcPhaseNotStarted) {
return;
void G1ConcurrentMark::concurrent_cycle_end() {
_g1h->trace_heap_after_gc(_gc_tracer_cm);
if (has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure();
}
uint old_val = Atomic::cmpxchg(ConcPhaseStopping, &_concurrent_phase_status, ConcPhaseStarted);
if (old_val == ConcPhaseStarted) {
_g1h->gc_timer_cm()->register_gc_concurrent_end();
// If 'end_timer' is true, we came here to end timer which needs concurrent phase ended.
// We need to end it before changing the status to 'ConcPhaseNotStarted' to prevent
// starting a new concurrent phase by 'ConcurrentMarkThread'.
if (end_timer) {
_g1h->gc_timer_cm()->register_gc_end();
}
old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
} else {
do {
// Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
os::naked_short_sleep(1);
} while (_concurrent_phase_status != ConcPhaseNotStarted);
}
_gc_timer_cm->register_gc_end();
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
}
void G1ConcurrentMark::register_concurrent_phase_end() {
register_concurrent_phase_end_common(false);
}
void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
register_concurrent_phase_end_common(true);
}
void G1ConcurrentMark::markFromRoots() {
void G1ConcurrentMark::mark_from_roots() {
// we might be tempted to assert that:
// assert(asynch == !SafepointSynchronize::is_at_safepoint(),
// "inconsistent argument?");
@ -1110,7 +1095,6 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
if (has_overflown()) {
// Oops. We overflowed. Restart concurrent marking.
_restart_for_overflow = true;
log_develop_trace(gc)("Remark led to restart for overflow.");
// Verify the heap w.r.t. the previous marking bitmap.
if (VerifyDuringGC) {
@ -1124,7 +1108,7 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
reset_marking_state();
} else {
{
GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm());
GCTraceTime(Debug, gc, phases) trace("Aggregate Data", _gc_timer_cm);
// Aggregate the per-task counting data that we have accumulated
// while marking.
@ -1163,7 +1147,7 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
g1p->record_concurrent_mark_remark_end();
G1CMIsAliveClosure is_alive(g1h);
g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
_gc_tracer_cm->report_object_count_after_gc(&is_alive);
}
// Base class of the closures that finalize and verify the
@ -1752,11 +1736,9 @@ void G1ConcurrentMark::cleanup() {
// sure we update the old gen/space data.
g1h->g1mm()->update_sizes();
g1h->allocation_context_stats().update_after_mark();
g1h->trace_heap_after_concurrent_cycle();
}
void G1ConcurrentMark::completeCleanup() {
void G1ConcurrentMark::complete_cleanup() {
if (has_aborted()) return;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@ -2045,7 +2027,7 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// Inner scope to exclude the cleaning of the string and symbol
// tables from the displayed time.
{
GCTraceTime(Debug, gc) trace("Reference Processing", g1h->gc_timer_cm());
GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
ReferenceProcessor* rp = g1h->ref_processor_cm();
@ -2102,8 +2084,8 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
&g1_keep_alive,
&g1_drain_mark_stack,
executor,
g1h->gc_timer_cm());
g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
_gc_timer_cm);
_gc_tracer_cm->report_gc_reference_stats(stats);
// The do_oop work routines of the keep_alive and drain_marking_stack
// oop closures will set the has_overflown flag if we overflow the
@ -2134,28 +2116,24 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
assert(_markStack.isEmpty(), "Marking should have completed");
// Unload Klasses, String, Symbols, Code Cache, etc.
{
GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm());
if (ClassUnloadingWithConcurrentMark) {
bool purged_classes;
if (ClassUnloadingWithConcurrentMark) {
bool purged_classes;
{
GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm());
purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
}
{
GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm());
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
}
{
GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm);
purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
}
if (G1StringDedup::is_enabled()) {
GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm());
G1StringDedup::unlink(&g1_is_alive);
{
GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm);
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
}
}
if (G1StringDedup::is_enabled()) {
GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm);
G1StringDedup::unlink(&g1_is_alive);
}
}
void G1ConcurrentMark::swapMarkBitMaps() {
@ -2273,7 +2251,7 @@ void G1ConcurrentMark::checkpointRootsFinalWork() {
HandleMark hm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm());
GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
g1h->ensure_parsability(false);
@ -2308,7 +2286,7 @@ void G1ConcurrentMark::checkpointRootsFinalWork() {
void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
// Note we are overriding the read-only view of the prev map here, via
// the cast.
((G1CMBitMap*)_prevMarkBitMap)->clearRange(mr);
((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr);
}
HeapRegion*
@ -2605,7 +2583,7 @@ void G1ConcurrentMark::abort() {
// Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
// concurrent bitmap clearing.
_nextMarkBitMap->clearAll();
clear_bitmap(_nextMarkBitMap, _g1h->workers(), false);
// Note we cannot clear the previous marking bitmap here
// since VerifyDuringGC verifies the objects marked during
@ -2629,10 +2607,6 @@ void G1ConcurrentMark::abort() {
satb_mq_set.set_active_all_threads(
false, /* new active value */
satb_mq_set.is_active() /* expected_active */);
_g1h->trace_heap_after_concurrent_cycle();
_g1h->register_concurrent_cycle_end();
}
static void print_ms_time_info(const char* prefix, const char* name,
@ -3554,8 +3528,6 @@ G1PrintRegionLivenessInfoClosure::
G1PrintRegionLivenessInfoClosure(const char* phase_name)
: _total_used_bytes(0), _total_capacity_bytes(0),
_total_prev_live_bytes(0), _total_next_live_bytes(0),
_hum_used_bytes(0), _hum_capacity_bytes(0),
_hum_prev_live_bytes(0), _hum_next_live_bytes(0),
_total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
MemRegion g1_reserved = g1h->g1_reserved();
@ -3595,36 +3567,6 @@ G1PrintRegionLivenessInfoClosure(const char* phase_name)
"(bytes)", "(bytes)");
}
// It takes as a parameter a reference to one of the _hum_* fields, it
// deduces the corresponding value for a region in a humongous region
// series (either the region size, or what's left if the _hum_* field
// is < the region size), and updates the _hum_* field accordingly.
size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
size_t bytes = 0;
// The > 0 check is to deal with the prev and next live bytes which
// could be 0.
if (*hum_bytes > 0) {
bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
*hum_bytes -= bytes;
}
return bytes;
}
// It deduces the values for a region in a humongous region series
// from the _hum_* fields and updates those accordingly. It assumes
// that that _hum_* fields have already been set up from the "starts
// humongous" region and we visit the regions in address order.
void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
size_t* capacity_bytes,
size_t* prev_live_bytes,
size_t* next_live_bytes) {
assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
*used_bytes = get_hum_bytes(&_hum_used_bytes);
*capacity_bytes = get_hum_bytes(&_hum_capacity_bytes);
*prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
*next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
}
bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
const char* type = r->get_type_str();
HeapWord* bottom = r->bottom();
@ -3637,24 +3579,6 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
size_t remset_bytes = r->rem_set()->mem_size();
size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
if (r->is_starts_humongous()) {
assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
_hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
"they should have been zeroed after the last time we used them");
// Set up the _hum_* fields.
_hum_capacity_bytes = capacity_bytes;
_hum_used_bytes = used_bytes;
_hum_prev_live_bytes = prev_live_bytes;
_hum_next_live_bytes = next_live_bytes;
get_hum_bytes(&used_bytes, &capacity_bytes,
&prev_live_bytes, &next_live_bytes);
end = bottom + HeapRegion::GrainWords;
} else if (r->is_continues_humongous()) {
get_hum_bytes(&used_bytes, &capacity_bytes,
&prev_live_bytes, &next_live_bytes);
assert(end == bottom + HeapRegion::GrainWords, "invariant");
}
_total_used_bytes += used_bytes;
_total_capacity_bytes += capacity_bytes;
_total_prev_live_bytes += prev_live_bytes;

View File

@ -34,6 +34,8 @@ class G1CollectedHeap;
class G1CMBitMap;
class G1CMTask;
class G1ConcurrentMark;
class ConcurrentGCTimer;
class G1OldTracer;
typedef GenericTaskQueue<oop, mtGC> G1CMTaskQueue;
typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
@ -139,10 +141,7 @@ class G1CMBitMap : public G1CMBitMapRO {
inline void clear(HeapWord* addr);
inline bool parMark(HeapWord* addr);
void clearRange(MemRegion mr);
// Clear the whole mark bitmap.
void clearAll();
void clear_range(MemRegion mr);
};
// Represents a marking stack used by ConcurrentMarking in the G1 collector.
@ -352,17 +351,9 @@ protected:
// time of remark.
volatile bool _concurrent_marking_in_progress;
// There would be a race between ConcurrentMarkThread and VMThread(ConcurrentMark::abort())
// to call ConcurrentGCTimer::register_gc_concurrent_end().
// And this variable is used to keep track of concurrent phase.
volatile uint _concurrent_phase_status;
// Concurrent phase is not yet started.
static const uint ConcPhaseNotStarted = 0;
// Concurrent phase is started.
static const uint ConcPhaseStarted = 1;
// Caller thread of ConcurrentGCTimer::register_gc_concurrent_end() is ending concurrent phase.
// So other thread should wait until the status to be changed to ConcPhaseNotStarted.
static const uint ConcPhaseStopping = 2;
ConcurrentGCTimer* _gc_timer_cm;
G1OldTracer* _gc_tracer_cm;
// All of these times are in ms
NumberSeq _init_times;
@ -497,6 +488,9 @@ protected:
// end_timer, true to end gc timer after ending concurrent phase.
void register_concurrent_phase_end_common(bool end_timer);
// Clear the given bitmap in parallel using the given WorkGang. If may_yield is
// true, periodically insert checks to see if this method should exit prematurely.
void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
public:
// Manipulation of the global mark stack.
// The push and pop operations are used by tasks for transfers
@ -530,10 +524,8 @@ public:
_concurrent_marking_in_progress = false;
}
void register_concurrent_phase_start(const char* title);
void register_concurrent_phase_end();
// Ends both concurrent phase and timer.
void register_concurrent_gc_end_and_stop_timer();
void concurrent_cycle_start();
void concurrent_cycle_end();
void update_accum_task_vtime(int i, double vtime) {
_accum_task_vtime[i] += vtime;
@ -585,8 +577,13 @@ public:
uint worker_id,
HeapRegion* hr = NULL);
// Clear the next marking bitmap (will be called concurrently).
void clearNextBitmap();
// Prepare internal data structures for the next mark cycle. This includes clearing
// the next mark bitmap and some internal data structures. This method is intended
// to be called concurrently to the mutator. It will yield to safepoint requests.
void cleanup_for_next_mark();
// Clear the previous marking bitmap during safepoint.
void clear_prev_bitmap(WorkGang* workers);
// Return whether the next mark bitmap has no marks set. To be used for assertions
// only. Will not yield to pause requests.
@ -603,18 +600,18 @@ public:
// Scan all the root regions and mark everything reachable from
// them.
void scanRootRegions();
void scan_root_regions();
// Scan a single root region and mark everything reachable from it.
void scanRootRegion(HeapRegion* hr, uint worker_id);
// Do concurrent phase of marking, to a tentative transitive closure.
void markFromRoots();
void mark_from_roots();
void checkpointRootsFinal(bool clear_all_soft_refs);
void checkpointRootsFinalWork();
void cleanup();
void completeCleanup();
void complete_cleanup();
// Mark in the previous bitmap. NB: this is usually read-only, so use
// this carefully!
@ -730,6 +727,9 @@ public:
return _completed_initialization;
}
ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
protected:
// Clear all the per-task bitmaps and arrays used to store the
// counting data.
@ -996,18 +996,6 @@ private:
size_t _total_prev_live_bytes;
size_t _total_next_live_bytes;
// These are set up when we come across a "stars humongous" region
// (as this is where most of this information is stored, not in the
// subsequent "continues humongous" regions). After that, for every
// region in a given humongous region series we deduce the right
// values for it by simply subtracting the appropriate amount from
// these fields. All these values should reach 0 after we've visited
// the last region in the series.
size_t _hum_used_bytes;
size_t _hum_capacity_bytes;
size_t _hum_prev_live_bytes;
size_t _hum_next_live_bytes;
// Accumulator for the remembered set size
size_t _total_remset_bytes;
@ -1026,11 +1014,6 @@ private:
return (double) val / (double) M;
}
// See the .cpp file.
size_t get_hum_bytes(size_t* hum_bytes);
void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
size_t* prev_live_bytes, size_t* next_live_bytes);
public:
// The header and footer are printed in the constructor and
// destructor respectively.

View File

@ -110,15 +110,9 @@ void G1EvacStats::adjust_desired_plab_sz() {
size_t const cur_plab_sz = (size_t)((double)total_waste_allowed / G1LastPLABAverageOccupancy);
// Take historical weighted average
_filter.sample(cur_plab_sz);
// Clip from above and below, and align to object boundary
size_t plab_sz;
plab_sz = MAX2(min_size(), (size_t)_filter.average());
plab_sz = MIN2(max_size(), plab_sz);
plab_sz = align_object_size(plab_sz);
// Latch the result
_desired_net_plab_sz = plab_sz;
_desired_net_plab_sz = MAX2(min_size(), (size_t)_filter.average());
log_sizing(cur_plab_sz, plab_sz);
log_sizing(cur_plab_sz, _desired_net_plab_sz);
// Clear accumulators for next round.
reset();
}

View File

@ -0,0 +1,157 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics) :
_g1(g1),
_analytics(analytics),
_num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) {
assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics);
clear_ratio_check_data();
}
void G1HeapSizingPolicy::clear_ratio_check_data() {
_ratio_over_threshold_count = 0;
_ratio_over_threshold_sum = 0.0;
_pauses_since_start = 0;
}
size_t G1HeapSizingPolicy::expansion_amount() {
double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
assert(GCTimeRatio > 0,
"we should have set it to a default value set_g1_gc_flags() "
"if a user set it to 0");
const double gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
double threshold = gc_overhead_perc;
size_t expand_bytes = 0;
// If the heap is at less than half its maximum size, scale the threshold down,
// to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
// though the scaling code will likely keep the increase small.
if (_g1->capacity() <= _g1->max_capacity() / 2) {
threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
threshold = MAX2(threshold, 1.0);
}
// If the last GC time ratio is over the threshold, increment the count of
// times it has been exceeded, and add this ratio to the sum of exceeded
// ratios.
if (last_gc_overhead > threshold) {
_ratio_over_threshold_count++;
_ratio_over_threshold_sum += last_gc_overhead;
}
// Check if we've had enough GC time ratio checks that were over the
// threshold to trigger an expansion. We'll also expand if we've
// reached the end of the history buffer and the average of all entries
// is still over the threshold. This indicates a smaller number of GCs were
// long enough to make the average exceed the threshold.
bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics;
if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
(filled_history_buffer && (recent_gc_overhead > threshold))) {
size_t min_expand_bytes = HeapRegion::GrainBytes;
size_t reserved_bytes = _g1->max_capacity();
size_t committed_bytes = _g1->capacity();
size_t uncommitted_bytes = reserved_bytes - committed_bytes;
size_t expand_bytes_via_pct =
uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
double scale_factor = 1.0;
// If the current size is less than 1/4 of the Initial heap size, expand
// by half of the delta between the current and Initial sizes. IE, grow
// back quickly.
//
// Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
// the available expansion space, whichever is smaller, as the base
// expansion size. Then possibly scale this size according to how much the
// threshold has (on average) been exceeded by. If the delta is small
// (less than the StartScaleDownAt value), scale the size down linearly, but
// not by less than MinScaleDownFactor. If the delta is large (greater than
// the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
// times the base size. The scaling will be linear in the range from
// StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
// ScaleUpRange sets the rate of scaling up.
if (committed_bytes < InitialHeapSize / 4) {
expand_bytes = (InitialHeapSize - committed_bytes) / 2;
} else {
double const MinScaleDownFactor = 0.2;
double const MaxScaleUpFactor = 2;
double const StartScaleDownAt = gc_overhead_perc;
double const StartScaleUpAt = gc_overhead_perc * 1.5;
double const ScaleUpRange = gc_overhead_perc * 2.0;
double ratio_delta;
if (filled_history_buffer) {
ratio_delta = recent_gc_overhead - threshold;
} else {
ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
}
expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
if (ratio_delta < StartScaleDownAt) {
scale_factor = ratio_delta / StartScaleDownAt;
scale_factor = MAX2(scale_factor, MinScaleDownFactor);
} else if (ratio_delta > StartScaleUpAt) {
scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
}
}
log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
"recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);
// Ensure the expansion size is at least the minimum growth amount
// and at most the remaining uncommitted byte size.
expand_bytes = MAX2(expand_bytes, min_expand_bytes);
expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
clear_ratio_check_data();
} else {
// An expansion was not triggered. If we've started counting, increment
// the number of checks we've made in the current window. If we've
// reached the end of the window without resizing, clear the counters to
// start again the next time we see a ratio above the threshold.
if (_ratio_over_threshold_count > 0) {
_pauses_since_start++;
if (_pauses_since_start > _num_prev_pauses_for_heuristics) {
clear_ratio_check_data();
}
}
}
return expand_bytes;
}

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP
#define SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP
#include "memory/allocation.hpp"
class G1Analytics;
class G1CollectedHeap;
class G1HeapSizingPolicy: public CHeapObj<mtGC> {
// MinOverThresholdForGrowth must be less than the number of recorded
// pause times in G1Analytics, representing the minimum number of pause
// time ratios that exceed GCTimeRatio before a heap expansion will be triggered.
const static uint MinOverThresholdForGrowth = 4;
const G1CollectedHeap* _g1;
const G1Analytics* _analytics;
const uint _num_prev_pauses_for_heuristics;
// Ratio check data for determining if heap growth is necessary.
uint _ratio_over_threshold_count;
double _ratio_over_threshold_sum;
uint _pauses_since_start;
protected:
G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics);
public:
// If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by.
virtual size_t expansion_amount();
// Clear ratio tracking data used by expansion_amount().
void clear_ratio_check_data();
static G1HeapSizingPolicy* create(const G1CollectedHeap* g1, const G1Analytics* analytics);
};
#endif // SRC_SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
G1HeapSizingPolicy* G1HeapSizingPolicy::create(const G1CollectedHeap* g1, const G1Analytics* analytics) {
return new G1HeapSizingPolicy(g1, analytics);
}

View File

@ -82,8 +82,8 @@ public:
void G1HeapTransition::print() {
Data after(_g1_heap);
size_t eden_capacity_bytes_after_gc = _g1_heap->g1_policy()->young_list_target_length() - after._survivor_length;
size_t survivor_capacity_bytes_after_gc = _g1_heap->g1_policy()->max_survivor_regions();
size_t eden_capacity_length_after_gc = _g1_heap->g1_policy()->young_list_target_length() - after._survivor_length;
size_t survivor_capacity_length_after_gc = _g1_heap->g1_policy()->max_survivor_regions();
DetailedUsage usage;
if (log_is_enabled(Trace, gc, heap)) {
@ -100,11 +100,11 @@ void G1HeapTransition::print() {
}
log_info(gc, heap)("Eden regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
_before._eden_length, after._eden_length, eden_capacity_bytes_after_gc);
_before._eden_length, after._eden_length, eden_capacity_length_after_gc);
log_trace(gc, heap)(" Used: 0K, Waste: 0K");
log_info(gc, heap)("Survivor regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
_before._survivor_length, after._survivor_length, survivor_capacity_bytes_after_gc);
_before._survivor_length, after._survivor_length, survivor_capacity_length_after_gc);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._survivor_used / K, ((after._survivor_length * HeapRegion::GrainBytes) - usage._survivor_used) / K);

View File

@ -36,7 +36,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
_use_cache = true;
_hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
_hot_cache = _hot_cache_memory.allocate(_hot_cache_size);
_hot_cache = ArrayAllocator<jbyte*, mtGC>::allocate(_hot_cache_size);
reset_hot_cache_internal();
@ -51,7 +51,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
G1HotCardCache::~G1HotCardCache() {
if (default_use_cache()) {
assert(_hot_cache != NULL, "Logic");
_hot_cache_memory.free();
ArrayAllocator<jbyte*, mtGC>::free(_hot_cache, _hot_cache_size);
_hot_cache = NULL;
}
}

View File

@ -61,7 +61,6 @@ class G1HotCardCache: public CHeapObj<mtGC> {
G1CardCounts _card_counts;
ArrayAllocator<jbyte*, mtGC> _hot_cache_memory;
// The card cache table
jbyte** _hot_cache;

View File

@ -122,7 +122,7 @@ void G1MarkSweep::allocate_stacks() {
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", gc_timer());
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@ -137,34 +137,49 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
&follow_code_closure);
}
// Process reference objects found during marking
ReferenceProcessor* rp = GenMarkSweep::ref_processor();
assert(rp == g1h->ref_processor_stw(), "Sanity");
{
GCTraceTime(Debug, gc, phases) trace("Reference Processing", gc_timer());
rp->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats =
rp->process_discovered_references(&GenMarkSweep::is_alive,
&GenMarkSweep::keep_alive,
&GenMarkSweep::follow_stack_closure,
NULL,
gc_timer());
gc_tracer()->report_gc_reference_stats(stats);
// Process reference objects found during marking
ReferenceProcessor* rp = GenMarkSweep::ref_processor();
assert(rp == g1h->ref_processor_stw(), "Sanity");
rp->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats =
rp->process_discovered_references(&GenMarkSweep::is_alive,
&GenMarkSweep::keep_alive,
&GenMarkSweep::follow_stack_closure,
NULL,
gc_timer());
gc_tracer()->report_gc_reference_stats(stats);
}
// This is the point where the entire marking should have completed.
assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
{
GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer());
// Unload nmethods.
CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class);
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
// Unload nmethods.
CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class);
// Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
}
{
GCTraceTime(Debug, gc, phases) trace("Scrub String and Symbol Tables", gc_timer());
// Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
}
if (G1StringDedup::is_enabled()) {
GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", gc_timer());
G1StringDedup::unlink(&GenMarkSweep::is_alive);
}
if (VerifyDuringGC) {
HandleMark hm; // handle scope
@ -197,7 +212,7 @@ void G1MarkSweep::mark_sweep_phase2() {
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
// tracking expects us to do so. See comment under phase4.
GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", gc_timer());
prepare_compaction();
}
@ -220,17 +235,11 @@ class G1AdjustPointersClosure: public HeapRegionClosure {
}
};
class G1AlwaysTrueClosure: public BoolObjectClosure {
public:
bool do_object_b(oop p) { return true; }
};
static G1AlwaysTrueClosure always_true;
void G1MarkSweep::mark_sweep_phase3() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// Adjust the pointers to reflect the new locations
GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
// Need cleared claim bits for the roots processing
ClassLoaderDataGraph::clear_claimed_marks();
@ -248,7 +257,7 @@ void G1MarkSweep::mark_sweep_phase3() {
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
JNIHandles::weak_oops_do(&always_true, &GenMarkSweep::adjust_pointer_closure);
JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
if (G1StringDedup::is_enabled()) {
G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
@ -291,7 +300,7 @@ void G1MarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime(Trace, gc) tm("Phase 4: Move objects", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", gc_timer());
G1SpaceCompactClosure blk;
g1h->heap_region_iterate(&blk);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -178,7 +178,7 @@ void G1MonitoringSupport::recalculate_sizes() {
// of a GC).
uint young_list_length = g1->young_list()->length();
uint survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
uint survivor_list_length = g1->young_list()->survivor_length();
assert(young_list_length >= survivor_list_length, "invariant");
uint eden_list_length = young_list_length - survivor_list_length;
// Max length includes any potential extensions to the young gen

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@ void G1StringDedup::initialize() {
void G1StringDedup::stop() {
assert(is_enabled(), "String deduplication not enabled");
G1StringDedupThread::stop();
G1StringDedupThread::thread()->stop();
}
bool G1StringDedup::is_candidate_from_mark(oop obj) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -81,11 +81,9 @@ void G1StringDedupThread::deduplicate_shared_strings(G1StringDedupStat& stat) {
StringTable::shared_oops_do(&sharedStringDedup);
}
void G1StringDedupThread::run() {
void G1StringDedupThread::run_service() {
G1StringDedupStat total_stat;
initialize_in_thread();
wait_for_universe_init();
deduplicate_shared_strings(total_stat);
// Main loop
@ -96,7 +94,7 @@ void G1StringDedupThread::run() {
// Wait for the queue to become non-empty
G1StringDedupQueue::wait();
if (_should_terminate) {
if (should_terminate()) {
break;
}
@ -133,23 +131,10 @@ void G1StringDedupThread::run() {
}
}
terminate();
}
void G1StringDedupThread::stop() {
{
MonitorLockerEx ml(Terminator_lock);
_thread->_should_terminate = true;
}
void G1StringDedupThread::stop_service() {
G1StringDedupQueue::cancel_wait();
{
MonitorLockerEx ml(Terminator_lock);
while (!_thread->_has_terminated) {
ml.wait();
}
}
}
void G1StringDedupThread::print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,14 +45,14 @@ private:
void print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
void run_service();
void stop_service();
public:
static void create();
static void stop();
static G1StringDedupThread* thread();
virtual void run();
void deduplicate_shared_strings(G1StringDedupStat& stat);
};

View File

@ -32,32 +32,6 @@
#include "gc/g1/suspendibleThreadSet.hpp"
#include "runtime/mutexLocker.hpp"
void G1YoungRemSetSamplingThread::run() {
initialize_in_thread();
wait_for_universe_init();
run_service();
terminate();
}
void G1YoungRemSetSamplingThread::stop() {
// it is ok to take late safepoints here, if needed
{
MutexLockerEx mu(Terminator_lock);
_should_terminate = true;
}
stop_service();
{
MutexLockerEx mu(Terminator_lock);
while (!_has_terminated) {
Terminator_lock->wait();
}
}
}
G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() :
ConcurrentGCThread(),
_monitor(Mutex::nonleaf,
@ -70,7 +44,7 @@ G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() :
void G1YoungRemSetSamplingThread::sleep_before_next_cycle() {
MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
if (!_should_terminate) {
if (!should_terminate()) {
uintx waitms = G1ConcRefinementServiceIntervalMillis; // 300, really should be?
_monitor.wait(Mutex::_no_safepoint_check_flag, waitms);
}
@ -79,7 +53,7 @@ void G1YoungRemSetSamplingThread::sleep_before_next_cycle() {
void G1YoungRemSetSamplingThread::run_service() {
double vtime_start = os::elapsedVTime();
while (!_should_terminate) {
while (!should_terminate()) {
sample_young_list_rs_lengths();
if (os::supports_vtime()) {

View File

@ -55,9 +55,6 @@ private:
public:
G1YoungRemSetSamplingThread();
double vtime_accum() { return _vtime_accum; }
virtual void run();
void stop();
};
#endif // SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP

View File

@ -233,10 +233,6 @@
"Raise a fatal VM exit out of memory failure in the event " \
" that heap expansion fails due to running out of swap.") \
\
develop(uintx, G1ConcMarkForceOverflow, 0, \
"The number of times we'll force an overflow during " \
"concurrent marking") \
\
experimental(uintx, G1MaxNewSizePercent, 60, \
"Percentage (0-100) of the heap size to use as default " \
" maximum young gen size.") \

View File

@ -43,16 +43,12 @@ PtrQueue::~PtrQueue() {
void PtrQueue::flush_impl() {
if (!_permanent && _buf != NULL) {
if (_index == _sz) {
BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index);
if (is_empty()) {
// No work to do.
qset()->deallocate_buffer(_buf);
qset()->deallocate_buffer(node);
} else {
// We must NULL out the unused entries, then enqueue.
size_t limit = byte_index_to_index(_index);
for (size_t i = 0; i < limit; ++i) {
_buf[i] = NULL;
}
qset()->enqueue_complete_buffer(_buf);
qset()->enqueue_complete_buffer(node);
}
_buf = NULL;
_index = 0;
@ -74,7 +70,7 @@ void PtrQueue::enqueue_known_active(void* ptr) {
assert(_index <= _sz, "Invariant.");
}
void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
assert(_lock->owned_by_self(), "Required.");
// We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
@ -82,7 +78,7 @@ void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
// have the same rank and we may get the "possible deadlock" message
_lock->unlock();
qset()->enqueue_complete_buffer(buf);
qset()->enqueue_complete_buffer(node);
// We must relock only because the caller will unlock, for the normal
// case.
_lock->lock_without_safepoint_check();
@ -157,10 +153,9 @@ void** PtrQueueSet::allocate_buffer() {
return BufferNode::make_buffer_from_node(node);
}
void PtrQueueSet::deallocate_buffer(void** buf) {
void PtrQueueSet::deallocate_buffer(BufferNode* node) {
assert(_sz > 0, "Didn't set a buffer size.");
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
BufferNode *node = BufferNode::make_node_from_buffer(buf);
node->set_next(_fl_owner->_buf_free_list);
_fl_owner->_buf_free_list = node;
_fl_owner->_buf_free_list_sz++;
@ -211,10 +206,10 @@ void PtrQueue::handle_zero_index() {
// preventing the subsequent the multiple enqueue, and
// install a newly allocated buffer below.
void** buf = _buf; // local pointer to completed buffer
BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index);
_buf = NULL; // clear shared _buf field
locking_enqueue_completed_buffer(buf); // enqueue completed buffer
locking_enqueue_completed_buffer(node); // enqueue completed buffer
// While the current thread was enqueueing the buffer another thread
// may have a allocated a new buffer and inserted it into this pointer
@ -224,9 +219,11 @@ void PtrQueue::handle_zero_index() {
if (_buf != NULL) return;
} else {
if (qset()->process_or_enqueue_complete_buffer(_buf)) {
BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index);
if (qset()->process_or_enqueue_complete_buffer(node)) {
// Recycle the buffer. No allocation.
_sz = qset()->buffer_size();
assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
assert(_sz == qset()->buffer_size(), "invariant");
_index = _sz;
return;
}
@ -238,12 +235,12 @@ void PtrQueue::handle_zero_index() {
_index = _sz;
}
bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
if (Thread::current()->is_Java_thread()) {
// We don't lock. It is fine to be epsilon-precise here.
if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
_n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
bool b = mut_process_buffer(buf);
bool b = mut_process_buffer(node);
if (b) {
// True here means that the buffer hasn't been deallocated and the caller may reuse it.
return true;
@ -251,14 +248,12 @@ bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
}
}
// The buffer will be enqueued. The caller will have to get a new one.
enqueue_complete_buffer(buf);
enqueue_complete_buffer(node);
return false;
}
void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) {
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
BufferNode* cbn = BufferNode::make_node_from_buffer(buf);
cbn->set_index(index);
cbn->set_next(NULL);
if (_completed_buffers_tail == NULL) {
assert(_completed_buffers_head == NULL, "Well-formedness");

View File

@ -33,6 +33,7 @@
// the addresses of modified old-generation objects. This type supports
// this operation.
class BufferNode;
class PtrQueueSet;
class PtrQueue VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
@ -104,7 +105,7 @@ public:
// get into an infinite loop).
virtual bool should_enqueue_buffer() { return true; }
void handle_zero_index();
void locking_enqueue_completed_buffer(void** buf);
void locking_enqueue_completed_buffer(BufferNode* node);
void enqueue_known_active(void* ptr);
@ -136,6 +137,10 @@ public:
return ind / sizeof(void*);
}
static size_t index_to_byte_index(size_t ind) {
return ind * sizeof(void*);
}
// To support compiler.
protected:
@ -186,10 +191,13 @@ public:
// Free a BufferNode.
static void deallocate(BufferNode* node);
// Return the BufferNode containing the buffer.
static BufferNode* make_node_from_buffer(void** buffer) {
return reinterpret_cast<BufferNode*>(
reinterpret_cast<char*>(buffer) - buffer_offset());
// Return the BufferNode containing the buffer, after setting its index.
static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
BufferNode* node =
reinterpret_cast<BufferNode*>(
reinterpret_cast<char*>(buffer) - buffer_offset());
node->set_index(index);
return node;
}
// Return the buffer for node.
@ -243,7 +251,7 @@ protected:
// A mutator thread does the the work of processing a buffer.
// Returns "true" iff the work is complete (and the buffer may be
// deallocated).
virtual bool mut_process_buffer(void** buf) {
virtual bool mut_process_buffer(BufferNode* node) {
ShouldNotReachHere();
return false;
}
@ -267,13 +275,13 @@ public:
// Return an empty buffer to the free list. The "buf" argument is
// required to be a pointer to the head of an array of length "_sz".
void deallocate_buffer(void** buf);
void deallocate_buffer(BufferNode* node);
// Declares that "buf" is a complete buffer.
void enqueue_complete_buffer(void** buf, size_t index = 0);
void enqueue_complete_buffer(BufferNode* node);
// To be invoked by the mutator.
bool process_or_enqueue_complete_buffer(void** buf);
bool process_or_enqueue_complete_buffer(BufferNode* node);
bool completed_buffers_exist_dirty() {
return _n_completed_buffers > 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -100,6 +100,10 @@ inline bool requires_marking(const void* entry, G1CollectedHeap* heap) {
return true;
}
inline bool retain_entry(const void* entry, G1CollectedHeap* heap) {
return requires_marking(entry, heap) && !heap->isMarkedNext((oop)entry);
}
// This method removes entries from a SATB buffer that will not be
// useful to the concurrent marking threads. Entries are retained if
// they require marking and are not already marked. Retained entries
@ -114,43 +118,28 @@ void SATBMarkQueue::filter() {
return;
}
// Used for sanity checking at the end of the loop.
DEBUG_ONLY(size_t entries = 0; size_t retained = 0;)
assert(_index <= _sz, "invariant");
void** limit = &buf[byte_index_to_index(_index)];
void** src = &buf[byte_index_to_index(_sz)];
void** dst = src;
while (limit < src) {
DEBUG_ONLY(entries += 1;)
--src;
// Two-fingered compaction toward the end.
void** src = &buf[byte_index_to_index(_index)];
void** dst = &buf[byte_index_to_index(_sz)];
for ( ; src < dst; ++src) {
// Search low to high for an entry to keep.
void* entry = *src;
// NULL the entry so that unused parts of the buffer contain NULLs
// at the end. If we are going to retain it we will copy it to its
// final place. If we have retained all entries we have visited so
// far, we'll just end up copying it to the same place.
*src = NULL;
if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) {
--dst;
assert(*dst == NULL, "filtering destination should be clear");
*dst = entry;
DEBUG_ONLY(retained += 1;);
if (retain_entry(entry, g1h)) {
// Found keeper. Search high to low for an entry to discard.
while (src < --dst) {
if (!retain_entry(*dst, g1h)) {
*dst = entry; // Replace discard with keeper.
break;
}
}
// If discard search failed (src == dst), the outer loop will also end.
}
}
size_t new_index = pointer_delta(dst, buf, 1);
#ifdef ASSERT
size_t entries_calc = (_sz - _index) / sizeof(void*);
assert(entries == entries_calc, "the number of entries we counted "
"should match the number of entries we calculated");
size_t retained_calc = (_sz - new_index) / sizeof(void*);
assert(retained == retained_calc, "the number of retained entries we counted "
"should match the number of retained entries we calculated");
#endif // ASSERT
_index = new_index;
// dst points to the lowest retained entry, or the end of the buffer
// if all the entries were filtered out.
_index = pointer_delta(dst, buf, 1);
}
// This method will first apply the above filtering to the buffer. If
@ -286,19 +275,11 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl)
}
if (nd != NULL) {
void **buf = BufferNode::make_buffer_from_node(nd);
// Skip over NULL entries at beginning (e.g. push end) of buffer.
// Filtering can result in non-full completed buffers; see
// should_enqueue_buffer.
assert(_sz % sizeof(void*) == 0, "invariant");
size_t limit = SATBMarkQueue::byte_index_to_index(_sz);
for (size_t i = 0; i < limit; ++i) {
if (buf[i] != NULL) {
// Found the end of the block of NULLs; process the remainder.
cl->do_buffer(buf + i, limit - i);
break;
}
}
deallocate_buffer(buf);
size_t index = SATBMarkQueue::byte_index_to_index(nd->index());
size_t size = SATBMarkQueue::byte_index_to_index(_sz);
assert(index <= size, "invariant");
cl->do_buffer(buf + index, size - index);
deallocate_buffer(nd);
return true;
} else {
return false;
@ -355,7 +336,7 @@ void SATBMarkQueueSet::abandon_partial_marking() {
while (buffers_to_delete != NULL) {
BufferNode* nd = buffers_to_delete;
buffers_to_delete = nd->next();
deallocate_buffer(BufferNode::make_buffer_from_node(nd));
deallocate_buffer(nd);
}
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
// So we can safely manipulate these queues.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -115,9 +115,8 @@ public:
// If there exists some completed buffer, pop and process it, and
// return true. Otherwise return false. Processing a buffer
// consists of applying the closure to the buffer range starting
// with the first non-NULL entry to the end of the buffer; the
// leading entries may be NULL due to filtering.
// consists of applying the closure to the active range of the
// buffer; the leading entries may be excluded due to filtering.
bool apply_closure_to_completed_buffer(SATBBufferClosure* cl);
#ifndef PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -205,30 +205,18 @@ void VM_G1IncCollectionPause::doit_epilogue() {
}
void VM_CGC_Operation::acquire_pending_list_lock() {
assert(_needs_pll, "don't call this otherwise");
// The caller may block while communicating
// with the SLT thread in order to acquire/release the PLL.
SurrogateLockerThread* slt = ConcurrentMarkThread::slt();
if (slt != NULL) {
slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
} else {
SurrogateLockerThread::report_missing_slt();
}
_pending_list_locker.lock();
}
void VM_CGC_Operation::release_and_notify_pending_list_lock() {
assert(_needs_pll, "don't call this otherwise");
// The caller may block while communicating
// with the SLT thread in order to acquire/release the PLL.
ConcurrentMarkThread::slt()->
manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
_pending_list_locker.unlock();
}
void VM_CGC_Operation::doit() {
GCIdMark gc_id_mark(_gc_id);
GCTraceCPUTime tcpu;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime(Info, gc) t(_printGCMessage, g1h->gc_timer_cm(), GCCause::_no_gc, true);
GCTraceTime(Info, gc) t(_printGCMessage, g1h->concurrent_mark()->gc_timer_cm(), GCCause::_no_gc, true);
IsGCActiveMark x;
_cl->do_void();
}
@ -236,10 +224,9 @@ void VM_CGC_Operation::doit() {
bool VM_CGC_Operation::doit_prologue() {
// Note the relative order of the locks must match that in
// VM_GC_Operation::doit_prologue() or deadlocks can occur
if (_needs_pll) {
if (_needs_pending_list_lock) {
acquire_pending_list_lock();
}
Heap_lock->lock();
return true;
}
@ -248,7 +235,7 @@ void VM_CGC_Operation::doit_epilogue() {
// Note the relative order of the unlocks must match that in
// VM_GC_Operation::doit_epilogue()
Heap_lock->unlock();
if (_needs_pll) {
if (_needs_pending_list_lock) {
release_and_notify_pending_list_lock();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "gc/g1/g1AllocationContext.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "gc/shared/vmGCOperations.hpp"
// VM_operations for the G1 collector.
@ -102,10 +103,11 @@ public:
// Concurrent GC stop-the-world operations such as remark and cleanup;
// consider sharing these with CMS's counterparts.
class VM_CGC_Operation: public VM_Operation {
VoidClosure* _cl;
const char* _printGCMessage;
bool _needs_pll;
uint _gc_id;
VoidClosure* _cl;
const char* _printGCMessage;
bool _needs_pending_list_lock;
ReferencePendingListLocker _pending_list_locker;
uint _gc_id;
protected:
// java.lang.ref.Reference support
@ -113,8 +115,8 @@ protected:
void release_and_notify_pending_list_lock();
public:
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pll)
: _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll), _gc_id(GCId::current()) { }
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pending_list_lock)
: _cl(cl), _printGCMessage(printGCMsg), _needs_pending_list_lock(needs_pending_list_lock), _gc_id(GCId::current()) {}
virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
virtual void doit();
virtual bool doit_prologue();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -113,8 +113,8 @@ void GCTaskThread::run() {
}
if (!os::bind_to_processor(processor_id())) {
DEBUG_ONLY(
warning("Couldn't bind GCTaskThread %u to processor %u",
which(), processor_id());
log_warning(gc)("Couldn't bind GCTaskThread %u to processor %u",
which(), processor_id());
)
}
}

View File

@ -325,8 +325,8 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
loop_count++;
if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
(loop_count % QueuedAllocationWarningCount == 0)) {
warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
" size=" SIZE_FORMAT, loop_count, size);
log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
log_warning(gc)("\tsize=" SIZE_FORMAT, size);
}
}

View File

@ -493,7 +493,7 @@ void PSMarkSweep::deallocate_stacks() {
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
@ -523,6 +523,8 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Process reference objects found during marking
{
GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
ref_processor()->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references(
@ -533,26 +535,37 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// This is the point where the entire marking should have completed.
assert(_marking_stack.is_empty(), "Marking should have completed");
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
{
GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
// Unload nmethods.
CodeCache::do_unloading(is_alive_closure(), purged_class);
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(is_alive_closure());
// Unload nmethods.
CodeCache::do_unloading(is_alive_closure(), purged_class);
// Delete entries for dead interned strings.
StringTable::unlink(is_alive_closure());
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(is_alive_closure());
}
{
GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer);
// Delete entries for dead interned strings.
StringTable::unlink(is_alive_closure());
}
{
GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer);
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
_gc_tracer->report_object_count_after_gc(is_alive_closure());
}
void PSMarkSweep::mark_sweep_phase2() {
GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
// Now all live objects are marked, compute the new object addresses.
@ -570,16 +583,9 @@ void PSMarkSweep::mark_sweep_phase2() {
old_gen->precompact();
}
// This should be moved to the shared markSweep code!
class PSAlwaysTrueClosure: public BoolObjectClosure {
public:
bool do_object_b(oop p) { return true; }
};
static PSAlwaysTrueClosure always_true;
void PSMarkSweep::mark_sweep_phase3() {
// Adjust the pointers to reflect the new locations
GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSYoungGen* young_gen = heap->young_gen();
@ -603,7 +609,7 @@ void PSMarkSweep::mark_sweep_phase3() {
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
// Global (weak) JNI handles
JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
JNIHandles::weak_oops_do(adjust_pointer_closure());
CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
CodeCache::blobs_do(&adjust_from_blobs);
@ -619,7 +625,7 @@ void PSMarkSweep::mark_sweep_phase3() {
void PSMarkSweep::mark_sweep_phase4() {
EventMark m("4 compact heap");
GCTraceTime(Trace, gc) tm("Phase 4: Move objects", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
// All pointers are now adjusted, move objects accordingly
@ -638,7 +644,7 @@ jlong PSMarkSweep::millis_since_last_gc() {
jlong ret_val = now - _time_of_last_gc;
// XXX See note in genCollectedHeap::millis_since_last_gc().
if (ret_val < 0) {
NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
return 0;
}
return ret_val;

View File

@ -309,7 +309,7 @@ bool PSOldGen::expand_to_reserved() {
const size_t remaining_bytes = virtual_space()->uncommitted_size();
if (remaining_bytes > 0) {
result = expand_by(remaining_bytes);
DEBUG_ONLY(if (!result) warning("grow to reserve failed"));
DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed"));
}
return result;
}

View File

@ -195,10 +195,10 @@ const char* PSParallelCompact::space_names[] = {
};
void PSParallelCompact::print_region_ranges() {
if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
if (!log_develop_is_enabled(Trace, gc, compaction)) {
return;
}
LogHandle(gc, compaction, phases) log;
LogHandle(gc, compaction) log;
ResourceMark rm;
Universe::print_on(log.trace_stream());
log.trace("space bottom top end new_top");
@ -225,7 +225,7 @@ print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
ParallelCompactData& sd = PSParallelCompact::summary_data();
size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
REGION_IDX_FORMAT " " PTR_FORMAT " "
REGION_IDX_FORMAT " " PTR_FORMAT " "
REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
@ -258,14 +258,14 @@ print_generic_summary_data(ParallelCompactData& summary_data,
++i;
}
log_develop_trace(gc, compaction, phases)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
log_develop_trace(gc, compaction)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
}
void
print_generic_summary_data(ParallelCompactData& summary_data,
SpaceInfo* space_info)
{
if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
if (!log_develop_is_enabled(Trace, gc, compaction)) {
return;
}
@ -296,7 +296,7 @@ print_initial_summary_data(ParallelCompactData& summary_data,
size_t i = summary_data.addr_to_region_idx(space->bottom());
while (i < end_region && summary_data.region(i)->data_size() == region_size) {
ParallelCompactData::RegionData* c = summary_data.region(i);
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
i, p2i(c->destination()),
c->partial_obj_size(), c->live_obj_size(),
@ -330,7 +330,7 @@ print_initial_summary_data(ParallelCompactData& summary_data,
}
ParallelCompactData::RegionData* c = summary_data.region(i);
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d"
"%12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
i, p2i(c->destination()),
@ -346,21 +346,21 @@ print_initial_summary_data(ParallelCompactData& summary_data,
// Any remaining regions are empty. Print one more if there is one.
if (i < end_region) {
ParallelCompactData::RegionData* c = summary_data.region(i);
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
i, p2i(c->destination()),
c->partial_obj_size(), c->live_obj_size(),
c->data_size(), c->source_region(), c->destination_count());
}
log_develop_trace(gc, compaction, phases)("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio);
log_develop_trace(gc, compaction)("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio);
}
void
print_initial_summary_data(ParallelCompactData& summary_data,
SpaceInfo* space_info) {
if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
if (!log_develop_is_enabled(Trace, gc, compaction)) {
return;
}
@ -621,7 +621,7 @@ ParallelCompactData::summarize_split_space(size_t src_region,
sr->partial_obj_size()));
const size_t end_idx = addr_to_region_idx(target_end);
log_develop_trace(gc, compaction, phases)("split: clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
log_develop_trace(gc, compaction)("split: clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
for (size_t idx = beg_idx; idx < end_idx; ++idx) {
_region_data[idx].set_source_region(0);
}
@ -641,22 +641,22 @@ ParallelCompactData::summarize_split_space(size_t src_region,
*target_next = split_destination + partial_obj_size;
HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
if (log_develop_is_enabled(Trace, gc, compaction, phases)) {
if (log_develop_is_enabled(Trace, gc, compaction)) {
const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
log_develop_trace(gc, compaction, phases)("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
split_type, p2i(source_next), split_region, partial_obj_size);
log_develop_trace(gc, compaction, phases)("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
split_type, p2i(split_destination),
addr_to_region_idx(split_destination),
p2i(*target_next));
log_develop_trace(gc, compaction)("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
split_type, p2i(source_next), split_region, partial_obj_size);
log_develop_trace(gc, compaction)("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
split_type, p2i(split_destination),
addr_to_region_idx(split_destination),
p2i(*target_next));
if (partial_obj_size != 0) {
HeapWord* const po_beg = split_info.destination();
HeapWord* const po_end = po_beg + split_info.partial_obj_size();
log_develop_trace(gc, compaction, phases)("%s split: po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
split_type,
p2i(po_beg), addr_to_region_idx(po_beg),
p2i(po_end), addr_to_region_idx(po_end));
log_develop_trace(gc, compaction)("%s split: po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
split_type,
p2i(po_beg), addr_to_region_idx(po_beg),
p2i(po_end), addr_to_region_idx(po_end));
}
}
@ -670,7 +670,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
HeapWord** target_next)
{
HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
"sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
"tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
p2i(source_beg), p2i(source_end), p2i(source_next_val),
@ -938,7 +938,7 @@ void PSParallelCompact::pre_compact()
// at each young gen gc. Do the update unconditionally (even though a
// promotion failure does not swap spaces) because an unknown number of young
// collections will have swapped the spaces an unknown number of times.
GCTraceTime(Trace, gc, phases) tm("Pre Compact", &_gc_timer);
GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_space_info[from_space_id].set_space(heap->young_gen()->from_space());
_space_info[to_space_id].set_space(heap->young_gen()->to_space());
@ -981,7 +981,7 @@ void PSParallelCompact::pre_compact()
void PSParallelCompact::post_compact()
{
GCTraceTime(Trace, gc, phases) tm("Post Compact", &_gc_timer);
GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
// Clear the marking bitmap, summary data and split info.
@ -1524,7 +1524,7 @@ PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
}
}
if (log_develop_is_enabled(Trace, gc, compaction, phases)) {
if (log_develop_is_enabled(Trace, gc, compaction)) {
const size_t region_size = ParallelCompactData::RegionSize;
HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
@ -1532,7 +1532,7 @@ PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
HeapWord* const new_top = _space_info[id].new_top();
const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
"id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
"dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
"cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
@ -1548,7 +1548,7 @@ void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
SpaceId src_space_id,
HeapWord* src_beg, HeapWord* src_end)
{
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
"Summarizing %d [%s] into %d [%s]: "
"src=" PTR_FORMAT "-" PTR_FORMAT " "
SIZE_FORMAT "-" SIZE_FORMAT " "
@ -1568,7 +1568,7 @@ void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
void PSParallelCompact::summary_phase(ParCompactionManager* cm,
bool maximum_compaction)
{
GCTraceTime(Trace, gc, phases) tm("Summary Phase", &_gc_timer);
GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
#ifdef ASSERT
if (TraceParallelOldGCMarkingPhase) {
@ -1584,7 +1584,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
// Quick summarization of each space into itself, to see how much is live.
summarize_spaces_quick();
log_develop_trace(gc, compaction, phases)("summary phase: after summarizing each space to self");
log_develop_trace(gc, compaction)("summary phase: after summarizing each space to self");
NOT_PRODUCT(print_region_ranges());
NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
@ -1660,7 +1660,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
}
}
log_develop_trace(gc, compaction, phases)("Summary_phase: after final summarization");
log_develop_trace(gc, compaction)("Summary_phase: after final summarization");
NOT_PRODUCT(print_region_ranges());
NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
}
@ -2042,7 +2042,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction,
ParallelOldTracer *gc_tracer) {
// Recursively traverse all live objects and mark them
GCTraceTime(Trace, gc, phases) tm("Marking Phase", &_gc_timer);
GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
@ -2057,7 +2057,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
ClassLoaderDataGraph::clear_claimed_marks();
{
GCTraceTime(Trace, gc, phases) tm("Par Mark", &_gc_timer);
GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
ParallelScavengeHeap::ParStrongRootsScope psrs;
@ -2086,7 +2086,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Process reference objects found during marking
{
GCTraceTime(Trace, gc, phases) tm("Reference Processing", &_gc_timer);
GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
ReferenceProcessorStats stats;
if (ref_processor()->processing_is_mt()) {
@ -2103,38 +2103,40 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
gc_tracer->report_gc_reference_stats(stats);
}
GCTraceTime(Trace, gc) tm_m("Class Unloading", &_gc_timer);
// This is the point where the entire marking should have completed.
assert(cm->marking_stacks_empty(), "Marking should have completed");
// Follow system dictionary roots and unload classes.
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
{
GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
// Unload nmethods.
CodeCache::do_unloading(is_alive_closure(), purged_class);
// Follow system dictionary roots and unload classes.
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(is_alive_closure());
// Unload nmethods.
CodeCache::do_unloading(is_alive_closure(), purged_class);
// Delete entries for dead interned strings.
StringTable::unlink(is_alive_closure());
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(is_alive_closure());
}
{
GCTraceTime(Debug, gc, phases) t("Scrub String Table", &_gc_timer);
// Delete entries for dead interned strings.
StringTable::unlink(is_alive_closure());
}
{
GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", &_gc_timer);
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
_gc_tracer.report_object_count_after_gc(is_alive_closure());
}
// This should be moved to the shared markSweep code!
class PSAlwaysTrueClosure: public BoolObjectClosure {
public:
bool do_object_b(oop p) { return true; }
};
static PSAlwaysTrueClosure always_true;
void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
// Adjust the pointers to reflect the new locations
GCTraceTime(Trace, gc, phases) tm("Adjust Roots", &_gc_timer);
GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer);
// Need new claim bits when tracing through and adjusting pointers.
ClassLoaderDataGraph::clear_claimed_marks();
@ -2157,7 +2159,7 @@ void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
// Global (weak) JNI handles
JNIHandles::weak_oops_do(&always_true, &oop_closure);
JNIHandles::weak_oops_do(&oop_closure);
CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations);
CodeCache::blobs_do(&adjust_from_blobs);
@ -2408,7 +2410,7 @@ void PSParallelCompact::write_block_fill_histogram()
#endif // #ifdef ASSERT
void PSParallelCompact::compact() {
GCTraceTime(Trace, gc, phases) tm("Compaction Phase", &_gc_timer);
GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSOldGen* old_gen = heap->old_gen();
@ -2467,9 +2469,8 @@ void PSParallelCompact::verify_complete(SpaceId space_id) {
for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
const RegionData* const c = sd.region(cur_region);
if (!c->completed()) {
warning("region " SIZE_FORMAT " not filled: "
"destination_count=%u",
cur_region, c->destination_count());
log_warning(gc)("region " SIZE_FORMAT " not filled: destination_count=%u",
cur_region, c->destination_count());
issued_a_warning = true;
}
}
@ -2477,9 +2478,8 @@ void PSParallelCompact::verify_complete(SpaceId space_id) {
for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
const RegionData* const c = sd.region(cur_region);
if (!c->available()) {
warning("region " SIZE_FORMAT " not empty: "
"destination_count=%u",
cur_region, c->destination_count());
log_warning(gc)("region " SIZE_FORMAT " not empty: destination_count=%u",
cur_region, c->destination_count());
issued_a_warning = true;
}
}
@ -3013,7 +3013,7 @@ jlong PSParallelCompact::millis_since_last_gc() {
jlong ret_val = now - _time_of_last_gc;
// XXX See note in genCollectedHeap::millis_since_last_gc().
if (ret_val < 0) {
NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
return 0;
}
return ret_val;

View File

@ -404,7 +404,7 @@ bool PSScavenge::invoke_no_policy() {
// Process reference objects discovered during scavenge
{
GCTraceTime(Debug, gc, phases) tm("References", &_gc_timer);
GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
reference_processor()->setup_policy(false); // not always_clear
reference_processor()->set_active_mt_degree(active_workers);
@ -433,7 +433,7 @@ bool PSScavenge::invoke_no_policy() {
}
{
GCTraceTime(Debug, gc, phases) tm("StringTable", &_gc_timer);
GCTraceTime(Debug, gc, phases) tm("Scrub String Table", &_gc_timer);
// Unlink any dead interned Strings and process the remaining live ones.
PSScavengeRootsClosure root_closure(promotion_manager);
StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);

View File

@ -594,7 +594,7 @@ void DefNewGeneration::collect(bool full,
init_assuming_no_promotion_failure();
GCTraceTime(Trace, gc) tm("DefNew", NULL, gch->gc_cause());
GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, gch->gc_cause());
gch->trace_heap_before_gc(&gc_tracer);

View File

@ -180,7 +180,7 @@ void GenMarkSweep::deallocate_stacks() {
void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
GenCollectedHeap* gch = GenCollectedHeap::heap();
@ -208,6 +208,8 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Process reference objects found during marking
{
GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
ref_processor()->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references(
@ -218,20 +220,30 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// This is the point where the entire marking should have completed.
assert(_marking_stack.is_empty(), "Marking should have completed");
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&is_alive);
{
GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer());
// Unload nmethods.
CodeCache::do_unloading(&is_alive, purged_class);
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&is_alive);
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(&is_alive);
// Unload nmethods.
CodeCache::do_unloading(&is_alive, purged_class);
// Delete entries for dead interned strings.
StringTable::unlink(&is_alive);
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(&is_alive);
}
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
{
GCTraceTime(Debug, gc, phases) t("Scrub String Table", gc_timer());
// Delete entries for dead interned strings.
StringTable::unlink(&is_alive);
}
{
GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", gc_timer());
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
gc_tracer()->report_object_count_after_gc(&is_alive);
}
@ -253,7 +265,7 @@ void GenMarkSweep::mark_sweep_phase2() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
gch->prepare_for_compaction();
}
@ -269,7 +281,7 @@ void GenMarkSweep::mark_sweep_phase3() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
// Adjust the pointers to reflect the new locations
GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
// Need new claim bits for the pointer adjustment tracing.
ClassLoaderDataGraph::clear_claimed_marks();
@ -321,7 +333,7 @@ void GenMarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen.
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime(Trace, gc) tm("Phase 4: Move objects", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
GenCompactClosure blk;
gch->generation_iterate(&blk, true);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -144,7 +144,7 @@ bool CardGeneration::grow_to_reserved() {
const size_t remaining_bytes = _virtual_space.uncommitted_size();
if (remaining_bytes > 0) {
success = grow_by(remaining_bytes);
DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
DEBUG_ONLY(if (!success) log_warning(gc)("grow to reserved failed");)
}
return success;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -325,17 +325,17 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
// In the case of CMS+ParNew, issue a warning
if (!ur.contains(urasm)) {
assert(UseConcMarkSweepGC, "Tautology: see assert above");
warning("CMS+ParNew: Did you forget to call save_marks()? "
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
"[" PTR_FORMAT ", " PTR_FORMAT ")",
p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? "
"[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
"[" PTR_FORMAT ", " PTR_FORMAT ")",
p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
MemRegion ur2 = sp->used_region();
MemRegion urasm2 = sp->used_region_at_save_marks();
if (!ur.equals(ur2)) {
warning("CMS+ParNew: Flickering used_region()!!");
log_warning(gc)("CMS+ParNew: Flickering used_region()!!");
}
if (!urasm.equals(urasm2)) {
warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!");
}
ShouldNotReachHere();
}

View File

@ -213,7 +213,7 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
do_full_collection(false); // don't clear all soft refs
break;
}
case GCCause::_last_ditch_collection: {
case GCCause::_metadata_GC_clear_soft_refs: {
HandleMark hm;
do_full_collection(true); // do clear all soft refs
break;

Some files were not shown because too many files have changed in this diff Show More