This commit is contained in:
Jesper Wilhelmsson 2016-03-23 23:36:29 +01:00
commit b7ca1e57ee
255 changed files with 6692 additions and 5051 deletions

View File

@ -9,12 +9,15 @@
#
fix_lines() {
# repair bare #line directives in $1 to refer to $2
awk < $1 > $1+ '
# and add an override of __FILE__ with just the basename on the
# first line of the file.
awk < $1 > $1+ -v F2=$2 '
BEGIN { print "#line 1 \"" F2 "\""; }
/^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
{print}
' F2=$2
'
mv $1+ $1
}
fix_lines $2/$1 $3/$1
fix_lines $2/$1 $1
[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )

View File

@ -27,14 +27,17 @@
#
# It knows how to build and run the tools to generate trace files.
include $(GAMMADIR)/make/linux/makefiles/rules.make
include $(GAMMADIR)/make/aix/makefiles/rules.make
include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
HAS_ALT_SRC := false
ifndef OPENJDK
ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), )
HAS_ALT_SRC := true
endif
endif
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
@ -63,10 +66,17 @@ TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
TraceXml = $(TraceSrcDir)/trace.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
TraceXml = $(TraceAltSrcDir)/trace.xml
endif
XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \
$(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \
$(TraceAltSrcDir)/traceeventtypes.xml
endif
.PHONY: all clean cleanall
@ -79,26 +89,26 @@ GENERATE_CODE= \
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif

View File

@ -9,12 +9,15 @@
#
fix_lines() {
# repair bare #line directives in $1 to refer to $2
awk < $1 > $1+ '
# and add an override of __FILE__ with just the basename on the
# first line of the file.
awk < $1 > $1+ -v F2=$2 '
BEGIN { print "#line 1 \"" F2 "\""; }
/^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
{print}
' F2=$2
'
mv $1+ $1
}
fix_lines $2/$1 $3/$1
fix_lines $2/$1 $1
[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )

View File

@ -32,9 +32,12 @@ include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
HAS_ALT_SRC := false
ifndef OPENJDK
ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), )
HAS_ALT_SRC := true
endif
endif
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
@ -59,15 +62,21 @@ TraceGeneratedNames += \
traceEventControl.hpp
endif
TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
TraceXml = $(TraceSrcDir)/trace.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
TraceXml = $(TraceAltSrcDir)/trace.xml
endif
XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \
$(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \
$(TraceAltSrcDir)/traceeventtypes.xml
endif
.PHONY: all clean cleanall
@ -80,32 +89,31 @@ GENERATE_CODE= \
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif
# #########################################################################
clean cleanall:
rm $(TraceGeneratedFiles)

View File

@ -9,12 +9,15 @@
#
fix_lines() {
# repair bare #line directives in $1 to refer to $2
awk < $1 > $1+ '
# and add an override of __FILE__ with just the basename on the
# first line of the file.
awk < $1 > $1+ -v F2=$2 '
BEGIN { print "#line 1 \"" F2 "\""; }
/^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
{print}
' F2=$2
'
mv $1+ $1
}
fix_lines $2/$1 $3/$1
fix_lines $2/$1 $1
[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )

View File

@ -32,9 +32,12 @@ include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
HAS_ALT_SRC := false
ifndef OPENJDK
ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), )
HAS_ALT_SRC := true
endif
endif
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
@ -63,10 +66,17 @@ TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
TraceXml = $(TraceSrcDir)/trace.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
TraceXml = $(TraceAltSrcDir)/trace.xml
endif
XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \
$(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \
$(TraceAltSrcDir)/traceeventtypes.xml
endif
.PHONY: all clean cleanall
@ -79,26 +89,26 @@ GENERATE_CODE= \
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif
@ -107,5 +117,3 @@ endif
clean cleanall:
rm $(TraceGeneratedFiles)

View File

@ -109,8 +109,7 @@
JVM_GetPrimitiveArrayElement;
JVM_GetProtectionDomain;
JVM_GetStackAccessControlContext;
JVM_GetStackTraceDepth;
JVM_GetStackTraceElement;
JVM_GetStackTraceElements;
JVM_GetSystemPackage;
JVM_GetSystemPackages;
JVM_GetTemporaryDirectory;

View File

@ -9,12 +9,15 @@
#
fix_lines() {
# repair bare #line directives in $1 to refer to $2
awk < $1 > $1+ '
# and add an override of __FILE__ with just the basename on the
# first line of the file.
nawk < $1 > $1+ -v F2=$2 '
BEGIN { print "#line 1 \"" F2 "\""; }
/^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
{print}
' F2=$2
'
mv $1+ $1
}
fix_lines $2/$1 $3/$1
fix_lines $2/$1 $1
[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )

View File

@ -32,9 +32,12 @@ include $(GAMMADIR)/make/altsrc.make
# #########################################################################
HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
echo "true"; else echo "false";\
fi)
HAS_ALT_SRC := false
ifndef OPENJDK
ifneq ($(wildcard $(HS_ALT_SRC)/share/vm/trace), )
HAS_ALT_SRC := true
endif
endif
TOPDIR = $(shell echo `pwd`)
GENERATED = $(TOPDIR)/../generated
@ -63,10 +66,17 @@ TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
TraceXml = $(TraceSrcDir)/trace.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
TraceXml = $(TraceAltSrcDir)/trace.xml
endif
XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \
$(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml
ifeq ($(HAS_ALT_SRC), true)
XML_DEPS += $(TraceAltSrcDir)/traceeventscustom.xml \
$(TraceAltSrcDir)/traceeventtypes.xml
endif
.PHONY: all clean cleanall
@ -79,26 +89,26 @@ GENERATE_CODE= \
$(QUIETLY) echo $(LOG_INFO) Generating $@; \
$(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventIds.hpp: $(TraceXml) $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(TraceOutDir)/traceTypes.hpp: $(TraceXml) $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(GENERATE_CODE)
ifeq ($(HAS_ALT_SRC), false)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(TraceOutDir)/traceRequestables.hpp: $(TraceXml) $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(GENERATE_CODE)
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventControl.hpp: $(TraceXml) $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(GENERATE_CODE)
endif

View File

@ -114,11 +114,15 @@ VARIANT_TEXT=Tiered
# Define HOTSPOT_VM_DISTRO based on settings in make/openjdk_distro
# or make/hotspot_distro.
!ifndef HOTSPOT_VM_DISTRO
!ifndef OPENJDK
!if exists($(WorkSpace)\src\closed)
!include $(WorkSpace)\make\hotspot_distro
!else
!include $(WorkSpace)\make\openjdk_distro
!endif
!else
!include $(WorkSpace)\make\openjdk_distro
!endif
!endif
HS_FILEDESC=$(HOTSPOT_VM_DISTRO) $(ARCH_TEXT) $(VARIANT_TEXT) VM

View File

@ -55,7 +55,11 @@ COMMONSRC_REL=src
ALTSRC_REL=src/closed # Change this to pick up alt sources from somewhere else
COMMONSRC=${WorkSpace}/${COMMONSRC_REL}
if [ "x$OPENJDK" != "xtrue" ]; then
ALTSRC=${WorkSpace}/${ALTSRC_REL}
else
ALTSRC=PATH_THAT_DOES_NOT_EXIST
fi
BASE_PATHS="`if [ -d ${ALTSRC}/share/vm ]; then $FIND ${ALTSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc -o -name opto -o -name shark -o -name libadt \); fi`"
BASE_PATHS="${BASE_PATHS} ` $FIND ${COMMONSRC}/share/vm ! -name vm -prune -type d \! \( -name adlc -o -name c1 -o -name gc -o -name opto -o -name shark -o -name libadt \)`"
@ -158,6 +162,6 @@ for e in ${Src_Files}; do
fi
Obj_Files="${Obj_Files}$o "
done
Obj_Files=`echo ${Obj_Files} | tr ' ' '\n' | sort`
Obj_Files=`echo ${Obj_Files} | tr ' ' '\n' | LC_ALL=C sort`
echo Obj_Files=${Obj_Files}

View File

@ -276,3 +276,7 @@ ifneq ($(SPEC),)
MAKE_ARGS += MT="$(subst /,\\,$(MT))"
endif
endif
ifdef OPENJDK
MAKE_ARGS += OPENJDK="$(OPENJDK)"
endif

View File

@ -32,15 +32,21 @@
# #########################################################################
TraceAltSrcDir = $(WorkSpace)/src/closed/share/vm/trace
TraceSrcDir = $(WorkSpace)/src/share/vm/trace
TraceAltSrcDir = $(WorkSpace)\src\closed\share\vm\trace
TraceSrcDir = $(WorkSpace)\src\share\vm\trace
!ifndef OPENJDK
!if EXISTS($(TraceAltSrcDir))
HAS_ALT_SRC = true
!endif
!endif
TraceGeneratedNames = \
traceEventClasses.hpp \
traceEventIds.hpp \
traceTypes.hpp
!if EXISTS($(TraceAltSrcDir))
!ifdef HAS_ALT_SRC
TraceGeneratedNames = $(TraceGeneratedNames) \
traceRequestables.hpp \
traceEventControl.hpp
@ -54,7 +60,7 @@ TraceGeneratedFiles = \
$(TraceOutDir)/traceEventIds.hpp \
$(TraceOutDir)/traceTypes.hpp
!if EXISTS($(TraceAltSrcDir))
!ifdef HAS_ALT_SRC
TraceGeneratedFiles = $(TraceGeneratedFiles) \
$(TraceOutDir)/traceRequestables.hpp \
$(TraceOutDir)/traceEventControl.hpp
@ -62,11 +68,19 @@ TraceGeneratedFiles = $(TraceGeneratedFiles) \
XSLT = $(QUIETLY) $(REMOTE) $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
TraceXml = $(TraceSrcDir)/trace.xml
!if EXISTS($(TraceAltSrcDir))
XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml
!ifdef HAS_ALT_SRC
TraceXml = $(TraceAltSrcDir)/trace.xml
!endif
XML_DEPS = $(TraceXml) $(TraceSrcDir)/tracetypes.xml \
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod \
$(TraceSrcDir)/tracerelationdecls.xml $(TraceSrcDir)/traceevents.xml
!ifdef HAS_ALT_SRC
XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceeventscustom.xml \
$(TraceAltSrcDir)/traceeventtypes.xml
!endif
.PHONY: all clean cleanall
@ -76,33 +90,33 @@ XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml
default::
@if not exist $(TraceOutDir) mkdir $(TraceOutDir)
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
@echo Generating $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
@echo Generating $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp
!if !EXISTS($(TraceAltSrcDir))
!ifndef HAS_ALT_SRC
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
@echo Generating OpenJDK $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
!else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventClasses.hpp: $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
@echo Generating AltSrc $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
$(TraceOutDir)/traceRequestables.hpp: $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
@echo Generating AltSrc $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
$(TraceOutDir)/traceEventControl.hpp: $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
@echo Generating AltSrc $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
$(XSLT) -IN $(TraceXml) -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
!endif
@ -110,5 +124,3 @@ $(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)
cleanall :
rm $(TraceGeneratedFiles)

View File

@ -118,6 +118,7 @@ LD_FLAGS=$(LD_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 $(EXP
CXX_INCLUDE_DIRS=/I "..\generated"
!ifndef OPENJDK
!if exists($(ALTSRC)\share\vm)
CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\share\vm"
!endif
@ -133,6 +134,7 @@ CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\os_cpu\windows_$(Platform_arc
!if exists($(ALTSRC)\cpu\$(Platform_arch)\vm)
CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) /I "$(ALTSRC)\cpu\$(Platform_arch)\vm"
!endif
!endif # OPENJDK
CXX_INCLUDE_DIRS=$(CXX_INCLUDE_DIRS) \
/I "$(COMMONSRC)\share\vm" \
@ -187,10 +189,12 @@ VM_PATH=$(VM_PATH);$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm
VM_PATH=$(VM_PATH);$(WorkSpace)/src/cpu/$(Platform_arch)/vm
VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto
!ifndef OPENJDK
!if exists($(ALTSRC)\share\vm\jfr)
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr
VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/buffers
!endif
!endif # OPENJDK
VM_PATH={$(VM_PATH)}
@ -310,6 +314,7 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
{$(COMMONSRC)\os_cpu\windows_$(Platform_arch)\vm}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
!ifndef OPENJDK
{$(ALTSRC)\share\vm\c1}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
@ -392,6 +397,13 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
{$(ALTSRC)\os_cpu\windows_$(Platform_arch)\vm}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
!endif
{..\generated\incls}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
@ -404,12 +416,6 @@ bytecodeInterpreterWithChecks.obj: ..\generated\jvmtifiles\bytecodeInterpreterWi
{..\generated\tracefiles}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj::
$(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
default::
_build_pch_file.obj:

View File

@ -1967,7 +1967,7 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
__ push(RegSet::range(r0, r15), sp);
__ mov(c_rarg2, r0); // Pass itos
__ call_VM(noreg,
CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode),
c_rarg1, c_rarg2, c_rarg3);
__ pop(RegSet::range(r0, r15), sp);
__ pop(state);

View File

@ -2211,7 +2211,7 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
__ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
__ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
__ mflr(R31);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
__ mtlr(R31);
__ pop(state);

View File

@ -1966,7 +1966,7 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
// Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
__ mov( Otos_l2, G3_scratch );
__ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
__ mov(Lscratch, O7); // restore return address
__ pop(state);
__ retl();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,44 +54,6 @@
// <- sender sp
// ------------------------------ Asm interpreter ----------------------------------------
// ------------------------------ C++ interpreter ----------------------------------------
//
// Layout of C++ interpreter frame: (While executing in BytecodeInterpreter::run)
//
// <- SP (current esp/rsp)
// [local variables ] BytecodeInterpreter::run local variables
// ... BytecodeInterpreter::run local variables
// [local variables ] BytecodeInterpreter::run local variables
// [old frame pointer ] fp [ BytecodeInterpreter::run's ebp/rbp ]
// [return pc ] (return to frame manager)
// [interpreter_state* ] (arg to BytecodeInterpreter::run) --------------
// [expression stack ] <- last_Java_sp |
// [... ] * <- interpreter_state.stack |
// [expression stack ] * <- interpreter_state.stack_base |
// [monitors ] \ |
// ... | monitor block size |
// [monitors ] / <- interpreter_state.monitor_base |
// [struct interpretState ] <-----------------------------------------|
// [return pc ] (return to callee of frame manager [1]
// [locals and parameters ]
// <- sender sp
// [1] When the C++ interpreter calls a new method it returns to the frame
// manager which allocates a new frame on the stack. In that case there
// is no real callee of this newly allocated frame. The frame manager is
// aware of the additional frame(s) and will pop them as nested calls
// complete. However, to make it look good in the debugger the frame
// manager actually installs a dummy pc pointing to RecursiveInterpreterActivation
// with a fake interpreter_state* parameter to make it easy to debug
// nested calls.
// Note that contrary to the layout for the assembly interpreter the
// expression stack allocated for the C++ interpreter is full sized.
// However this is not as bad as it seems as the interpreter frame_manager
// will truncate the unused space on successive method calls.
//
// ------------------------------ C++ interpreter ----------------------------------------
public:
enum {
pc_return_offset = 0,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -296,7 +296,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
Label L;
cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
stop("InterpreterMacroAssembler::call_VM_base:"
" last_sp != NULL");
bind(L);
}

View File

@ -1830,7 +1830,7 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
__ push(state); // save tosca
// pass tosca registers as arguments & call tracer
__ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx);
__ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
__ pop(state); // restore tosca
@ -1847,7 +1847,7 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
__ movflt(xmm3, xmm0); // Pass ftos
#endif
__ call_VM(noreg,
CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode),
c_rarg1, c_rarg2, c_rarg3);
__ pop(c_rarg3);
__ pop(c_rarg2);

View File

@ -84,7 +84,11 @@ public class SAGetopt {
}
else {
// Mixed style options --file name
try {
extractOptarg(ca[0]);
} catch (ArrayIndexOutOfBoundsException e) {
throw new RuntimeException("Argument is expected for '" + ca[0] + "'");
}
}
return ca[0];

View File

@ -30,6 +30,7 @@ import java.util.Arrays;
import sun.jvm.hotspot.tools.JStack;
import sun.jvm.hotspot.tools.JMap;
import sun.jvm.hotspot.tools.JInfo;
import sun.jvm.hotspot.tools.JSnap;
public class SALauncher {
@ -39,6 +40,7 @@ public class SALauncher {
System.out.println(" jstack --help\tto get more information");
System.out.println(" jmap --help\tto get more information");
System.out.println(" jinfo --help\tto get more information");
System.out.println(" jsnap --help\tto get more information");
return false;
}
@ -85,6 +87,11 @@ public class SALauncher {
return commonHelp();
}
private static boolean jsnapHelp() {
System.out.println(" --all\tto print all performance counters");
return commonHelp();
}
private static boolean toolHelp(String toolName) {
if (toolName.equals("jstack")) {
return jstackHelp();
@ -95,24 +102,62 @@ public class SALauncher {
if (toolName.equals("jmap")) {
return jmapHelp();
}
if (toolName.equals("jsnap")) {
return jsnapHelp();
}
if (toolName.equals("hsdb") || toolName.equals("clhsdb")) {
return commonHelp();
}
return launcherHelp();
}
private static void buildAttachArgs(ArrayList<String> newArgs,
String pid, String exe, String core) {
if ((pid == null) && (exe == null)) {
throw new IllegalArgumentException(
"You have to set --pid or --exe.");
}
if (pid != null) { // Attach to live process
if (exe != null) {
throw new IllegalArgumentException(
"Unnecessary argument: --exe");
} else if (core != null) {
throw new IllegalArgumentException(
"Unnecessary argument: --core");
} else if (!pid.matches("^\\d+$")) {
throw new IllegalArgumentException("Invalid pid: " + pid);
}
newArgs.add(pid);
} else {
if (exe.length() == 0) {
throw new IllegalArgumentException("You have to set --exe.");
}
newArgs.add(exe);
if ((core == null) || (core.length() == 0)) {
throw new IllegalArgumentException("You have to set --core.");
}
newArgs.add(core);
}
}
private static void runCLHSDB(String[] oldArgs) {
SAGetopt sg = new SAGetopt(oldArgs);
String[] longOpts = {"exe=", "core=", "pid="};
ArrayList<String> newArgs = new ArrayList();
String exeORpid = null;
String pid = null;
String exe = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exeORpid = sg.getOptarg();
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
@ -120,17 +165,12 @@ public class SALauncher {
continue;
}
if (s.equals("pid")) {
exeORpid = sg.getOptarg();
pid = sg.getOptarg();
continue;
}
}
if (exeORpid != null) {
newArgs.add(exeORpid);
if (core != null) {
newArgs.add(core);
}
}
buildAttachArgs(newArgs, pid, exe, core);
CLHSDB.main(newArgs.toArray(new String[newArgs.size()]));
}
@ -139,13 +179,14 @@ public class SALauncher {
String[] longOpts = {"exe=", "core=", "pid="};
ArrayList<String> newArgs = new ArrayList();
String exeORpid = null;
String pid = null;
String exe = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exeORpid = sg.getOptarg();
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
@ -153,17 +194,12 @@ public class SALauncher {
continue;
}
if (s.equals("pid")) {
exeORpid = sg.getOptarg();
pid = sg.getOptarg();
continue;
}
}
if (exeORpid != null) {
newArgs.add(exeORpid);
if (core != null) {
newArgs.add(core);
}
}
buildAttachArgs(newArgs, pid, exe, core);
HSDB.main(newArgs.toArray(new String[newArgs.size()]));
}
@ -173,13 +209,14 @@ public class SALauncher {
"mixed", "locks"};
ArrayList<String> newArgs = new ArrayList();
String exeORpid = null;
String pid = null;
String exe = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exeORpid = sg.getOptarg();
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
@ -187,7 +224,7 @@ public class SALauncher {
continue;
}
if (s.equals("pid")) {
exeORpid = sg.getOptarg();
pid = sg.getOptarg();
continue;
}
if (s.equals("mixed")) {
@ -200,13 +237,7 @@ public class SALauncher {
}
}
if (exeORpid != null) {
newArgs.add(exeORpid);
if (core != null) {
newArgs.add(core);
}
}
buildAttachArgs(newArgs, pid, exe, core);
JStack.main(newArgs.toArray(new String[newArgs.size()]));
}
@ -216,13 +247,14 @@ public class SALauncher {
"heap", "binaryheap", "histo", "clstats", "finalizerinfo"};
ArrayList<String> newArgs = new ArrayList();
String exeORpid = null;
String pid = null;
String exe = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exeORpid = sg.getOptarg();
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
@ -230,7 +262,7 @@ public class SALauncher {
continue;
}
if (s.equals("pid")) {
exeORpid = sg.getOptarg();
pid = sg.getOptarg();
continue;
}
if (s.equals("heap")) {
@ -255,13 +287,7 @@ public class SALauncher {
}
}
if (exeORpid != null) {
newArgs.add(exeORpid);
if (core != null) {
newArgs.add(core);
}
}
buildAttachArgs(newArgs, pid, exe, core);
JMap.main(newArgs.toArray(new String[newArgs.size()]));
}
@ -271,13 +297,14 @@ public class SALauncher {
"flags", "sysprops"};
ArrayList<String> newArgs = new ArrayList();
String exeORpid = null;
String exe = null;
String pid = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exeORpid = sg.getOptarg();
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
@ -285,7 +312,7 @@ public class SALauncher {
continue;
}
if (s.equals("pid")) {
exeORpid = sg.getOptarg();
pid = sg.getOptarg();
continue;
}
if (s.equals("flags")) {
@ -298,14 +325,41 @@ public class SALauncher {
}
}
if (exeORpid != null) {
newArgs.add(exeORpid);
if (core != null) {
newArgs.add(core);
buildAttachArgs(newArgs, pid, exe, core);
JInfo.main(newArgs.toArray(new String[newArgs.size()]));
}
private static void runJSNAP(String[] oldArgs) {
SAGetopt sg = new SAGetopt(oldArgs);
String[] longOpts = {"exe=", "core=", "pid=", "all"};
ArrayList<String> newArgs = new ArrayList();
String exe = null;
String pid = null;
String core = null;
String s = null;
while((s = sg.next(null, longOpts)) != null) {
if (s.equals("exe")) {
exe = sg.getOptarg();
continue;
}
if (s.equals("core")) {
core = sg.getOptarg();
continue;
}
if (s.equals("pid")) {
pid = sg.getOptarg();
continue;
}
if (s.equals("all")) {
newArgs.add("-a");
continue;
}
}
JInfo.main(newArgs.toArray(new String[newArgs.size()]));
buildAttachArgs(newArgs, pid, exe, core);
JSnap.main(newArgs.toArray(new String[newArgs.size()]));
}
public static void main(String[] args) {
@ -329,6 +383,7 @@ public class SALauncher {
String[] oldArgs = Arrays.copyOfRange(args, 1, args.length);
try {
// Run SA interactive mode
if (args[0].equals("clhsdb")) {
runCLHSDB(oldArgs);
@ -355,5 +410,16 @@ public class SALauncher {
runJINFO(oldArgs);
return;
}
if (args[0].equals("jsnap")) {
runJSNAP(oldArgs);
return;
}
throw new IllegalArgumentException("Unknown tool: " + args[0]);
} catch (Exception e) {
System.err.println(e.getMessage());
toolHelp(args[0]);
}
}
}

View File

@ -35,6 +35,11 @@ public enum GCCause {
_gc_locker ("GCLocker Initiated GC"),
_heap_inspection ("Heap Inspection Initiated GC"),
_heap_dump ("Heap Dump Initiated GC"),
_wb_young_gc ("WhiteBox Initiated Young GC"),
_wb_conc_mark ("WhiteBox Initiated Concurrent Mark"),
_wb_full_gc ("WhiteBox Initiated Full GC"),
_update_allocation_context_stats_inc ("Update Allocation Context Stats"),
_update_allocation_context_stats_full ("Update Allocation Context Stats"),
_no_gc ("No GC"),
_no_cause_specified ("Unknown GCCause"),
@ -42,6 +47,7 @@ public enum GCCause {
_tenured_generation_full ("Tenured Generation Full"),
_metadata_GC_threshold ("Metadata GC Threshold"),
_metadata_GC_clear_soft_refs ("Metadata GC Clear Soft References"),
_cms_generation_full ("CMS Generation Full"),
_cms_initial_mark ("CMS Initial Mark"),
@ -55,7 +61,8 @@ public enum GCCause {
_g1_inc_collection_pause ("G1 Evacuation Pause"),
_g1_humongous_allocation ("G1 Humongous Allocation"),
_last_ditch_collection ("Last ditch collection"),
_dcmd_gc_run ("Diagnostic Command"),
_last_gc_cause ("ILLEGAL VALUE - last gc cause - ILLEGAL VALUE");
private final String value;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -130,7 +130,7 @@ public class Threads {
virtualConstructor.addMapping("CodeCacheSweeperThread", CodeCacheSweeperThread.class);
}
// for now, use JavaThread itself. fix it later with appropriate class if needed
virtualConstructor.addMapping("SurrogateLockerThread", JavaThread.class);
virtualConstructor.addMapping("ReferencePendingListLockerThread", JavaThread.class);
virtualConstructor.addMapping("JvmtiAgentThread", JvmtiAgentThread.class);
virtualConstructor.addMapping("ServiceThread", ServiceThread.class);
}
@ -172,7 +172,7 @@ public class Threads {
return thread;
} catch (Exception e) {
throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr +
" (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, SurrogateLockerThread, or CodeCacheSweeperThread)", e);
" (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, ReferencePendingListLockerThread, or CodeCacheSweeperThread)", e);
}
}

View File

@ -25,11 +25,15 @@
package sun.jvm.hotspot.tools;
import java.io.*;
import java.util.*;
import java.util.stream.*;
import sun.jvm.hotspot.debugger.JVMDebugger;
import sun.jvm.hotspot.runtime.*;
public class JSnap extends Tool {
private boolean all;
public JSnap() {
super();
}
@ -45,7 +49,7 @@ public class JSnap extends Tool {
if (prologue.accessible()) {
PerfMemory.iterate(new PerfMemory.PerfDataEntryVisitor() {
public boolean visit(PerfDataEntry pde) {
if (pde.supported()) {
if (all || pde.supported()) {
out.print(pde.name());
out.print('=');
out.println(pde.valueAsString());
@ -62,8 +66,24 @@ public class JSnap extends Tool {
}
}
@Override
protected void printFlagsUsage() {
System.out.println(" -a\tto print all performance counters");
super.printFlagsUsage();
}
public static void main(String[] args) {
JSnap js = new JSnap();
js.all = Arrays.stream(args)
.anyMatch(s -> s.equals("-a"));
if (js.all) {
args = Arrays.stream(args)
.filter(s -> !s.equals("-a"))
.collect(Collectors.toList())
.toArray(new String[0]);
}
js.execute(args);
}
}

View File

@ -81,6 +81,12 @@ public class CompactHashTable extends VMObject {
}
public Symbol probe(byte[] name, long hash) {
if (bucketCount() == 0) {
// The table is invalid, so don't try to lookup
return null;
}
long symOffset;
Symbol sym;
Address baseAddress = baseAddressField.getValue(addr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -837,7 +837,7 @@ vmType2Class["InterpreterCodelet"] = sapkg.interpreter.InterpreterCodelet;
vmType2Class["JavaThread"] = sapkg.runtime.JavaThread;
vmType2Class["CompilerThread"] = sapkg.runtime.CompilerThread;
vmType2Class["CodeCacheSweeperThread"] = sapkg.runtime.CodeCacheSweeperThread;
vmType2Class["SurrogateLockerThread"] = sapkg.runtime.JavaThread;
vmType2Class["ReferencePendingListLockerThread"] = sapkg.runtime.JavaThread;
vmType2Class["DebuggerThread"] = sapkg.runtime.DebuggerThread;
// gc

View File

@ -910,8 +910,8 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
(uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
} else {
log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
}
pthread_attr_destroy(&attr);
@ -1178,7 +1178,7 @@ void os::die() {
size_t os::lasterror(char *buf, size_t len) {
if (errno == 0) return 0;
const char *s = ::strerror(errno);
const char *s = os::strerror(errno);
size_t n = ::strlen(s);
if (n >= len) {
n = len - 1;
@ -1714,14 +1714,14 @@ static void local_sem_post() {
if (os::Aix::on_aix()) {
int rc = ::sem_post(&sig_sem);
if (rc == -1 && !warn_only_once) {
trcVerbose("sem_post failed (errno = %d, %s)", errno, strerror(errno));
trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
warn_only_once = true;
}
} else {
guarantee0(p_sig_msem != NULL);
int rc = ::msem_unlock(p_sig_msem, 0);
if (rc == -1 && !warn_only_once) {
trcVerbose("msem_unlock failed (errno = %d, %s)", errno, strerror(errno));
trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
warn_only_once = true;
}
}
@ -1732,14 +1732,14 @@ static void local_sem_wait() {
if (os::Aix::on_aix()) {
int rc = ::sem_wait(&sig_sem);
if (rc == -1 && !warn_only_once) {
trcVerbose("sem_wait failed (errno = %d, %s)", errno, strerror(errno));
trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
warn_only_once = true;
}
} else {
guarantee0(p_sig_msem != NULL); // must init before use
int rc = ::msem_lock(p_sig_msem, 0);
if (rc == -1 && !warn_only_once) {
trcVerbose("msem_lock failed (errno = %d, %s)", errno, strerror(errno));
trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
warn_only_once = true;
}
}
@ -2203,7 +2203,7 @@ static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
strerror(err), err);
os::errno_name(err), err);
}
#endif
@ -2412,7 +2412,7 @@ static bool checked_mprotect(char* addr, size_t size, int prot) {
bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
if (!rc) {
const char* const s_errno = strerror(errno);
const char* const s_errno = os::errno_name(errno);
warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
return false;
}
@ -2634,7 +2634,7 @@ OSReturn os::set_native_priority(Thread* thread, int newpri) {
if (ret != 0) {
trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
(int)thr, newpri, ret, strerror(ret));
(int)thr, newpri, ret, os::errno_name(ret));
}
return (ret == 0) ? OS_OK : OS_ERR;
}

View File

@ -30,6 +30,7 @@
#include "oops/oop.inline.hpp"
#include "os_aix.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
@ -101,7 +102,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not create Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
} else {
int fd = result;
@ -112,7 +113,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not write Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
break;
}
@ -124,7 +125,7 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
}
}
}
@ -397,7 +398,7 @@ static DIR *open_directory_secure(const char* dirname) {
if (errno == ELOOP) {
warning("directory %s is a symlink and is not secure\n", dirname);
} else {
warning("could not open directory %s: %s\n", dirname, strerror(errno));
warning("could not open directory %s: %s\n", dirname, os::strerror(errno));
}
}
return dirp;
@ -507,7 +508,7 @@ static bool is_file_secure(int fd, const char *filename) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed on %s: %s\n", filename, strerror(errno));
warning("fstat failed on %s: %s\n", filename, os::strerror(errno));
}
return false;
}
@ -543,7 +544,7 @@ static char* get_user_name(uid_t uid) {
if (PrintMiscellaneous && Verbose) {
if (result != 0) {
warning("Could not retrieve passwd entry: %s\n",
strerror(result));
os::strerror(result));
}
else if (p == NULL) {
// this check is added to protect against an observed problem
@ -557,7 +558,7 @@ static char* get_user_name(uid_t uid) {
// Bug Id 89052 was opened with RedHat.
//
warning("Could not retrieve passwd entry: %s\n",
strerror(errno));
os::strerror(errno));
}
else {
warning("Could not determine user name: %s\n",
@ -593,7 +594,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
"Process not found");
}
else /* EPERM */ {
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno));
}
}
@ -746,7 +747,7 @@ static void remove_file(const char* path) {
if (PrintMiscellaneous && Verbose && result == OS_ERR) {
if (errno != ENOENT) {
warning("Could not unlink shared memory backing"
" store file %s : %s\n", path, strerror(errno));
" store file %s : %s\n", path, os::strerror(errno));
}
}
}
@ -849,7 +850,7 @@ static bool make_user_tmp_dir(const char* dirname) {
//
if (PrintMiscellaneous && Verbose) {
warning("could not create directory %s: %s\n",
dirname, strerror(errno));
dirname, os::strerror(errno));
}
return false;
}
@ -900,7 +901,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (errno == ELOOP) {
warning("file %s is a symlink and is not secure\n", filename);
} else {
warning("could not create file %s: %s\n", filename, strerror(errno));
warning("could not create file %s: %s\n", filename, os::strerror(errno));
}
}
// Close the directory and reset the current working directory.
@ -924,7 +925,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)0), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not truncate shared memory file: %s\n", strerror(errno));
warning("could not truncate shared memory file: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -933,7 +934,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)size), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not set shared memory file size: %s\n", strerror(errno));
warning("could not set shared memory file size: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -968,7 +969,7 @@ static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
"Permission denied");
}
else {
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno));
}
}
int fd = result;
@ -1041,7 +1042,7 @@ static char* mmap_create_shared(size_t size) {
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed - %s\n", strerror(errno));
warning("mmap failed - %s\n", os::strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename);
@ -1109,7 +1110,7 @@ static size_t sharedmem_filesize(int fd, TRAPS) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed: %s\n", strerror(errno));
warning("fstat failed: %s\n", os::strerror(errno));
}
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");
@ -1231,7 +1232,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed: %s\n", strerror(errno));
warning("mmap failed: %s\n", os::strerror(errno));
}
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
"Could not map PerfMemory");

View File

@ -789,7 +789,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
(uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
} else {
log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
}
pthread_attr_destroy(&attr);
@ -1122,7 +1122,7 @@ void os::die() {
size_t os::lasterror(char *buf, size_t len) {
if (errno == 0) return 0;
const char *s = ::strerror(errno);
const char *s = os::strerror(errno);
size_t n = ::strlen(s);
if (n >= len) {
n = len - 1;
@ -2141,7 +2141,7 @@ static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", addr, size, exec,
strerror(err), err);
os::errno_name(err), err);
}
// NOTE: Bsd kernel does not really reserve the pages for us.
@ -3422,7 +3422,7 @@ void os::init(void) {
Bsd::set_page_size(getpagesize());
if (Bsd::page_size() == -1) {
fatal("os_bsd.cpp: os::init: sysconf failed (%s)", strerror(errno));
fatal("os_bsd.cpp: os::init: sysconf failed (%s)", os::strerror(errno));
}
init_page_sizes((size_t) Bsd::page_size());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "oops/oop.inline.hpp"
#include "os_bsd.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
@ -100,7 +101,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not create Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
} else {
int fd = result;
@ -111,7 +112,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not write Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
break;
}
@ -123,7 +124,7 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
}
}
}
@ -309,7 +310,7 @@ static DIR *open_directory_secure(const char* dirname) {
if (errno == ELOOP) {
warning("directory %s is a symlink and is not secure\n", dirname);
} else {
warning("could not open directory %s: %s\n", dirname, strerror(errno));
warning("could not open directory %s: %s\n", dirname, os::strerror(errno));
}
}
return dirp;
@ -420,7 +421,7 @@ static bool is_file_secure(int fd, const char *filename) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed on %s: %s\n", filename, strerror(errno));
warning("fstat failed on %s: %s\n", filename, os::strerror(errno));
}
return false;
}
@ -459,7 +460,7 @@ static char* get_user_name(uid_t uid) {
if (PrintMiscellaneous && Verbose) {
if (result != 0) {
warning("Could not retrieve passwd entry: %s\n",
strerror(result));
os::strerror(result));
}
else if (p == NULL) {
// this check is added to protect against an observed problem
@ -473,7 +474,7 @@ static char* get_user_name(uid_t uid) {
// Bug Id 89052 was opened with RedHat.
//
warning("Could not retrieve passwd entry: %s\n",
strerror(errno));
os::strerror(errno));
}
else {
warning("Could not determine user name: %s\n",
@ -509,7 +510,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
"Process not found");
}
else /* EPERM */ {
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno));
}
}
@ -652,7 +653,7 @@ static void remove_file(const char* path) {
if (PrintMiscellaneous && Verbose && result == OS_ERR) {
if (errno != ENOENT) {
warning("Could not unlink shared memory backing"
" store file %s : %s\n", path, strerror(errno));
" store file %s : %s\n", path, os::strerror(errno));
}
}
}
@ -762,7 +763,7 @@ static bool make_user_tmp_dir(const char* dirname) {
//
if (PrintMiscellaneous && Verbose) {
warning("could not create directory %s: %s\n",
dirname, strerror(errno));
dirname, os::strerror(errno));
}
return false;
}
@ -804,7 +805,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (errno == ELOOP) {
warning("file %s is a symlink and is not secure\n", filename);
} else {
warning("could not create file %s: %s\n", filename, strerror(errno));
warning("could not create file %s: %s\n", filename, os::strerror(errno));
}
}
// close the directory and reset the current working directory
@ -828,7 +829,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)0), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not truncate shared memory file: %s\n", strerror(errno));
warning("could not truncate shared memory file: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -837,7 +838,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)size), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not set shared memory file size: %s\n", strerror(errno));
warning("could not set shared memory file size: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -887,7 +888,7 @@ static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
"Permission denied", OS_ERR);
}
else {
THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
THROW_MSG_(vmSymbols::java_io_IOException(), os::strerror(errno), OS_ERR);
}
}
int fd = result;
@ -961,7 +962,7 @@ static char* mmap_create_shared(size_t size) {
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed - %s\n", strerror(errno));
warning("mmap failed - %s\n", os::strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename);
@ -1025,7 +1026,7 @@ static size_t sharedmem_filesize(int fd, TRAPS) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed: %s\n", strerror(errno));
warning("fstat failed: %s\n", os::strerror(errno));
}
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");
@ -1136,7 +1137,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed: %s\n", strerror(errno));
warning("mmap failed: %s\n", os::strerror(errno));
}
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
"Could not map PerfMemory");

View File

@ -594,15 +594,7 @@ void os::Linux::libpthread_init() {
// _expand_stack_to() assumes its frame size is less than page size, which
// should always be true if the function is not inlined.
#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute
#define NOINLINE
#else
#define NOINLINE __attribute__ ((noinline))
#endif
static void _expand_stack_to(address bottom) NOINLINE;
static void _expand_stack_to(address bottom) {
static void NOINLINE _expand_stack_to(address bottom) {
address sp;
size_t size;
volatile char *p;
@ -769,7 +761,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
(uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
} else {
log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
strerror(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
}
pthread_attr_destroy(&attr);
@ -890,6 +882,13 @@ void os::free_thread(OSThread* osthread) {
assert(osthread != NULL, "osthread not set");
if (Thread::current()->osthread() == osthread) {
#ifdef ASSERT
sigset_t current;
sigemptyset(&current);
pthread_sigmask(SIG_SETMASK, NULL, &current);
assert(!sigismember(&current, SR_signum), "SR signal should not be blocked!");
#endif
// Restore caller's signal mask
sigset_t sigmask = osthread->caller_sigmask();
pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
@ -1395,7 +1394,7 @@ void os::die() {
size_t os::lasterror(char *buf, size_t len) {
if (errno == 0) return 0;
const char *s = ::strerror(errno);
const char *s = os::strerror(errno);
size_t n = ::strlen(s);
if (n >= len) {
n = len - 1;
@ -2601,7 +2600,7 @@ static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec,
strerror(err), err);
os::strerror(err), err);
}
static void warn_fail_commit_memory(char* addr, size_t size,
@ -2609,7 +2608,7 @@ static void warn_fail_commit_memory(char* addr, size_t size,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", p2i(addr), size,
alignment_hint, exec, strerror(err), err);
alignment_hint, exec, os::strerror(err), err);
}
// NOTE: Linux kernel does not really reserve the pages for us.
@ -3912,7 +3911,8 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
// after sigsuspend.
int old_errno = errno;
Thread* thread = Thread::current();
Thread* thread = Thread::current_or_null_safe();
assert(thread != NULL, "Missing current thread in SR_handler");
OSThread* osthread = thread->osthread();
assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
@ -3924,7 +3924,7 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
os::SuspendResume::State state = osthread->sr.suspended();
if (state == os::SuspendResume::SR_SUSPENDED) {
sigset_t suspend_set; // signals for sigsuspend()
sigemptyset(&suspend_set);
// get current set of blocked signals and unblock resume signal
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
sigdelset(&suspend_set, SR_signum);
@ -4178,6 +4178,7 @@ static bool call_chained_handler(struct sigaction *actp, int sig,
// try to honor the signal mask
sigset_t oset;
sigemptyset(&oset);
pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
// call into the chained handler
@ -4188,7 +4189,7 @@ static bool call_chained_handler(struct sigaction *actp, int sig,
}
// restore the signal mask
pthread_sigmask(SIG_SETMASK, &oset, 0);
pthread_sigmask(SIG_SETMASK, &oset, NULL);
}
// Tell jvm's signal handler the signal is taken care of.
return true;
@ -4615,7 +4616,7 @@ void os::init(void) {
Linux::set_page_size(sysconf(_SC_PAGESIZE));
if (Linux::page_size() == -1) {
fatal("os_linux.cpp: os::init: sysconf failed (%s)",
strerror(errno));
os::strerror(errno));
}
init_page_sizes((size_t) Linux::page_size());
@ -4633,7 +4634,7 @@ void os::init(void) {
int status;
pthread_condattr_t* _condattr = os::Linux::condAttr();
if ((status = pthread_condattr_init(_condattr)) != 0) {
fatal("pthread_condattr_init: %s", strerror(status));
fatal("pthread_condattr_init: %s", os::strerror(status));
}
// Only set the clock if CLOCK_MONOTONIC is available
if (os::supports_monotonic_clock()) {
@ -4642,7 +4643,7 @@ void os::init(void) {
warning("Unable to use monotonic clock with relative timed-waits" \
" - changes to the time-of-day clock may have adverse affects");
} else {
fatal("pthread_condattr_setclock: %s", strerror(status));
fatal("pthread_condattr_setclock: %s", os::strerror(status));
}
}
}
@ -4888,7 +4889,7 @@ int os::active_processor_count() {
log_trace(os)("active_processor_count: "
"CPU_ALLOC failed (%s) - using "
"online processor count: %d",
strerror(errno), online_cpus);
os::strerror(errno), online_cpus);
return online_cpus;
}
}
@ -4918,7 +4919,7 @@ int os::active_processor_count() {
else {
cpu_count = ::sysconf(_SC_NPROCESSORS_ONLN);
warning("sched_getaffinity failed (%s)- using online processor count (%d) "
"which may exceed available processors", strerror(errno), cpu_count);
"which may exceed available processors", os::strerror(errno), cpu_count);
}
if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used
@ -5769,6 +5770,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// Don't catch signals while blocked; let the running threads have the signals.
// (This allows a debugger to break into the running thread.)
sigset_t oldsigs;
sigemptyset(&oldsigs);
sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "oops/oop.inline.hpp"
#include "os_linux.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
@ -100,7 +101,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not create Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
} else {
int fd = result;
@ -111,7 +112,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not write Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
break;
}
@ -123,7 +124,7 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
}
}
}
@ -308,7 +309,7 @@ static DIR *open_directory_secure(const char* dirname) {
if (errno == ELOOP) {
warning("directory %s is a symlink and is not secure\n", dirname);
} else {
warning("could not open directory %s: %s\n", dirname, strerror(errno));
warning("could not open directory %s: %s\n", dirname, os::strerror(errno));
}
}
return dirp;
@ -419,7 +420,7 @@ static bool is_file_secure(int fd, const char *filename) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed on %s: %s\n", filename, strerror(errno));
warning("fstat failed on %s: %s\n", filename, os::strerror(errno));
}
return false;
}
@ -459,7 +460,7 @@ static char* get_user_name(uid_t uid) {
if (PrintMiscellaneous && Verbose) {
if (result != 0) {
warning("Could not retrieve passwd entry: %s\n",
strerror(result));
os::strerror(result));
}
else if (p == NULL) {
// this check is added to protect against an observed problem
@ -473,7 +474,7 @@ static char* get_user_name(uid_t uid) {
// Bug Id 89052 was opened with RedHat.
//
warning("Could not retrieve passwd entry: %s\n",
strerror(errno));
os::strerror(errno));
}
else {
warning("Could not determine user name: %s\n",
@ -509,7 +510,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
"Process not found");
}
else /* EPERM */ {
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno));
}
}
@ -664,7 +665,7 @@ static void remove_file(const char* path) {
if (PrintMiscellaneous && Verbose && result == OS_ERR) {
if (errno != ENOENT) {
warning("Could not unlink shared memory backing"
" store file %s : %s\n", path, strerror(errno));
" store file %s : %s\n", path, os::strerror(errno));
}
}
}
@ -772,7 +773,7 @@ static bool make_user_tmp_dir(const char* dirname) {
//
if (PrintMiscellaneous && Verbose) {
warning("could not create directory %s: %s\n",
dirname, strerror(errno));
dirname, os::strerror(errno));
}
return false;
}
@ -814,7 +815,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (errno == ELOOP) {
warning("file %s is a symlink and is not secure\n", filename);
} else {
warning("could not create file %s: %s\n", filename, strerror(errno));
warning("could not create file %s: %s\n", filename, os::strerror(errno));
}
}
// close the directory and reset the current working directory
@ -838,7 +839,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)0), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not truncate shared memory file: %s\n", strerror(errno));
warning("could not truncate shared memory file: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -847,7 +848,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)size), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not set shared memory file size: %s\n", strerror(errno));
warning("could not set shared memory file size: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -897,7 +898,7 @@ static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
"Permission denied", OS_ERR);
}
else {
THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
THROW_MSG_(vmSymbols::java_io_IOException(), os::strerror(errno), OS_ERR);
}
}
int fd = result;
@ -970,7 +971,7 @@ static char* mmap_create_shared(size_t size) {
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed - %s\n", strerror(errno));
warning("mmap failed - %s\n", os::strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename);
@ -1034,7 +1035,7 @@ static size_t sharedmem_filesize(int fd, TRAPS) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed: %s\n", strerror(errno));
warning("fstat failed: %s\n", os::strerror(errno));
}
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");
@ -1151,7 +1152,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed: %s\n", strerror(errno));
warning("mmap failed: %s\n", os::strerror(errno));
}
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
"Could not map PerfMemory");

View File

@ -1144,7 +1144,8 @@ void os::WatcherThreadCrashProtection::check_crash_protection(int sig,
#define check_with_errno(check_type, cond, msg) \
do { \
int err = errno; \
check_type(cond, "%s; error='%s' (errno=%d)", msg, strerror(err), err); \
check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err), \
os::errno_name(err)); \
} while (false)
#define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -461,7 +461,7 @@ SolarisAttachOperation* SolarisAttachListener::dequeue() {
while ((res = ::sema_wait(wakeup())) == EINTR)
;
if (res) {
warning("sema_wait failed: %s", strerror(res));
warning("sema_wait failed: %s", os::strerror(res));
return NULL;
}

View File

@ -1009,7 +1009,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
(uintx) tid, describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
} else {
log_warning(os, thread)("Failed to start thread - thr_create failed (%s) for attributes: %s.",
strerror(status), describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
os::errno_name(status), describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
}
if (status != 0) {
@ -1354,7 +1354,7 @@ jlong getTimeMillis() {
jlong os::javaTimeMillis() {
timeval t;
if (gettimeofday(&t, NULL) == -1) {
fatal("os::javaTimeMillis: gettimeofday (%s)", strerror(errno));
fatal("os::javaTimeMillis: gettimeofday (%s)", os::strerror(errno));
}
return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000;
}
@ -1362,7 +1362,7 @@ jlong os::javaTimeMillis() {
void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
timeval t;
if (gettimeofday(&t, NULL) == -1) {
fatal("os::javaTimeSystemUTC: gettimeofday (%s)", strerror(errno));
fatal("os::javaTimeSystemUTC: gettimeofday (%s)", os::strerror(errno));
}
seconds = jlong(t.tv_sec);
nanos = jlong(t.tv_usec) * 1000;
@ -1892,21 +1892,39 @@ void os::Solaris::print_libversion_info(outputStream* st) {
static bool check_addr0(outputStream* st) {
jboolean status = false;
const int read_chunk = 200;
int ret = 0;
int nmap = 0;
int fd = ::open("/proc/self/map",O_RDONLY);
if (fd >= 0) {
prmap_t p;
while (::read(fd, &p, sizeof(p)) > 0) {
if (p.pr_vaddr == 0x0) {
st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
prmap_t *p = NULL;
char *mbuff = (char *) calloc(read_chunk, sizeof(prmap_t));
if (NULL == mbuff) {
::close(fd);
return status;
}
while ((ret = ::read(fd, mbuff, read_chunk*sizeof(prmap_t))) > 0) {
//check if read() has not read partial data
if( 0 != ret % sizeof(prmap_t)){
break;
}
nmap = ret / sizeof(prmap_t);
p = (prmap_t *)mbuff;
for(int i = 0; i < nmap; i++){
if (p->pr_vaddr == 0x0) {
st->print("Warning: Address: " PTR_FORMAT ", Size: " SIZE_FORMAT "K, ",p->pr_vaddr, p->pr_size/1024);
st->print("Mapped file: %s, ", p->pr_mapname[0] == '\0' ? "None" : p->pr_mapname);
st->print("Access: ");
st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-");
st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-");
st->print("%s",(p->pr_mflags & MA_READ) ? "r" : "-");
st->print("%s",(p->pr_mflags & MA_WRITE) ? "w" : "-");
st->print("%s",(p->pr_mflags & MA_EXEC) ? "x" : "-");
st->cr();
status = true;
}
p++;
}
}
free(mbuff);
::close(fd);
}
return status;
@ -2142,7 +2160,7 @@ void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
size_t os::lasterror(char *buf, size_t len) {
if (errno == 0) return 0;
const char *s = ::strerror(errno);
const char *s = os::strerror(errno);
size_t n = ::strlen(s);
if (n >= len) {
n = len - 1;
@ -2351,7 +2369,7 @@ static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
strerror(err), err);
os::strerror(err), err);
}
static void warn_fail_commit_memory(char* addr, size_t bytes,
@ -2359,7 +2377,7 @@ static void warn_fail_commit_memory(char* addr, size_t bytes,
int err) {
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
alignment_hint, exec, strerror(err), err);
alignment_hint, exec, os::strerror(err), err);
}
int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
@ -2740,7 +2758,7 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
char buf[256];
buf[0] = '\0';
if (addr == NULL) {
jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
jio_snprintf(buf, sizeof(buf), ": %s", os::strerror(err));
}
warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
@ -4354,7 +4372,7 @@ void os::init(void) {
page_size = sysconf(_SC_PAGESIZE);
if (page_size == -1) {
fatal("os_solaris.cpp: os::init: sysconf failed (%s)", strerror(errno));
fatal("os_solaris.cpp: os::init: sysconf failed (%s)", os::strerror(errno));
}
init_page_sizes((size_t) page_size);
@ -4366,7 +4384,7 @@ void os::init(void) {
int fd = ::open("/dev/zero", O_RDWR);
if (fd < 0) {
fatal("os::init: cannot open /dev/zero (%s)", strerror(errno));
fatal("os::init: cannot open /dev/zero (%s)", os::strerror(errno));
} else {
Solaris::set_dev_zero_fd(fd);
@ -5607,7 +5625,7 @@ int os::fork_and_exec(char* cmd) {
if (pid < 0) {
// fork failed
warning("fork failed: %s", strerror(errno));
warning("fork failed: %s", os::strerror(errno));
return -1;
} else if (pid == 0) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,7 +102,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not create Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
} else {
@ -114,7 +114,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not write Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
break;
}
@ -125,7 +125,7 @@ static void save_memory_to_file(char* addr, size_t size) {
result = ::close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
}
}
}
@ -311,7 +311,7 @@ static DIR *open_directory_secure(const char* dirname) {
if (errno == ELOOP) {
warning("directory %s is a symlink and is not secure\n", dirname);
} else {
warning("could not open directory %s: %s\n", dirname, strerror(errno));
warning("could not open directory %s: %s\n", dirname, os::strerror(errno));
}
}
return dirp;
@ -422,7 +422,7 @@ static bool is_file_secure(int fd, const char *filename) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed on %s: %s\n", filename, strerror(errno));
warning("fstat failed on %s: %s\n", filename, os::strerror(errno));
}
return false;
}
@ -464,7 +464,7 @@ static char* get_user_name(uid_t uid) {
if (PrintMiscellaneous && Verbose) {
if (p == NULL) {
warning("Could not retrieve passwd entry: %s\n",
strerror(errno));
os::strerror(errno));
}
else {
warning("Could not determine user name: %s\n",
@ -500,7 +500,7 @@ static char* get_user_name_slow(int vmid, TRAPS) {
"Process not found");
}
else /* EPERM */ {
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
THROW_MSG_0(vmSymbols::java_io_IOException(), os::strerror(errno));
}
}
@ -657,7 +657,7 @@ static char* get_user_name(int vmid, TRAPS) {
// In this case, the psinfo file for the process id existed,
// but we didn't have permission to access it.
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
strerror(errno));
os::strerror(errno));
}
// at this point, we don't know if the process id itself doesn't
@ -703,7 +703,7 @@ static void remove_file(const char* path) {
if (PrintMiscellaneous && Verbose && result == OS_ERR) {
if (errno != ENOENT) {
warning("Could not unlink shared memory backing"
" store file %s : %s\n", path, strerror(errno));
" store file %s : %s\n", path, os::strerror(errno));
}
}
}
@ -813,7 +813,7 @@ static bool make_user_tmp_dir(const char* dirname) {
//
if (PrintMiscellaneous && Verbose) {
warning("could not create directory %s: %s\n",
dirname, strerror(errno));
dirname, os::strerror(errno));
}
return false;
}
@ -855,7 +855,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
if (errno == ELOOP) {
warning("file %s is a symlink and is not secure\n", filename);
} else {
warning("could not create file %s: %s\n", filename, strerror(errno));
warning("could not create file %s: %s\n", filename, os::strerror(errno));
}
}
// close the directory and reset the current working directory
@ -879,7 +879,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)0), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not truncate shared memory file: %s\n", strerror(errno));
warning("could not truncate shared memory file: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -888,7 +888,7 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
RESTARTABLE(::ftruncate(fd, (off_t)size), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("could not set shared memory file size: %s\n", strerror(errno));
warning("could not set shared memory file size: %s\n", os::strerror(errno));
}
::close(fd);
return -1;
@ -916,7 +916,7 @@ static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
"Permission denied", OS_ERR);
}
else {
THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
THROW_MSG_(vmSymbols::java_io_IOException(), os::strerror(errno), OS_ERR);
}
}
int fd = result;
@ -990,7 +990,7 @@ static char* mmap_create_shared(size_t size) {
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed - %s\n", strerror(errno));
warning("mmap failed - %s\n", os::strerror(errno));
}
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename);
@ -1055,7 +1055,7 @@ static size_t sharedmem_filesize(int fd, TRAPS) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("fstat failed: %s\n", strerror(errno));
warning("fstat failed: %s\n", os::strerror(errno));
}
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");
@ -1172,7 +1172,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
if (mapAddress == MAP_FAILED) {
if (PrintMiscellaneous && Verbose) {
warning("mmap failed: %s\n", strerror(errno));
warning("mmap failed: %s\n", os::strerror(errno));
}
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
"Could not map PerfMemory");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
@ -49,7 +50,7 @@ ThreadCritical::ThreadCritical() {
if (global_mut_owner != owner) {
if (os::Solaris::mutex_lock(&global_mut))
fatal("ThreadCritical::ThreadCritical: mutex_lock failed (%s)",
strerror(errno));
os::strerror(errno));
assert(global_mut_count == 0, "must have clean count");
assert(global_mut_owner == -1, "must have clean owner");
}
@ -68,7 +69,7 @@ ThreadCritical::~ThreadCritical() {
if (global_mut_count == 0) {
global_mut_owner = -1;
if (os::Solaris::mutex_unlock(&global_mut))
fatal("ThreadCritical::~ThreadCritical: mutex_unlock failed (%s)", strerror(errno));
fatal("ThreadCritical::~ThreadCritical: mutex_unlock failed (%s)", os::strerror(errno));
}
} else {
assert (Threads::number_of_threads() == 0, "valid only during initialization");

View File

@ -642,7 +642,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
} else {
log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
strerror(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
}
if (thread_handle == NULL) {
@ -1898,7 +1898,7 @@ size_t os::lasterror(char* buf, size_t len) {
if (errno != 0) {
// C runtime error that has no corresponding DOS error code
const char* s = strerror(errno);
const char* s = os::strerror(errno);
size_t n = strlen(s);
if (n >= len) n = len - 1;
strncpy(buf, s, n);
@ -2186,13 +2186,6 @@ extern "C" void events();
// Windows Vista/2008 heap corruption check
#define EXCEPTION_HEAP_CORRUPTION 0xC0000374
#define def_excpt(val) #val, val
struct siglabel {
char *name;
int number;
};
// All Visual C++ exceptions thrown from code generated by the Microsoft Visual
// C++ compiler contain this error code. Because this is a compiler-generated
// error, the code is not listed in the Win32 API header files.
@ -2202,8 +2195,9 @@ struct siglabel {
#define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363
#define def_excpt(val) { #val, (val) }
struct siglabel exceptlabels[] = {
static const struct { char* name; uint number; } exceptlabels[] = {
def_excpt(EXCEPTION_ACCESS_VIOLATION),
def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
def_excpt(EXCEPTION_BREAKPOINT),
@ -2228,16 +2222,18 @@ struct siglabel exceptlabels[] = {
def_excpt(EXCEPTION_GUARD_PAGE),
def_excpt(EXCEPTION_INVALID_HANDLE),
def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
def_excpt(EXCEPTION_HEAP_CORRUPTION),
def_excpt(EXCEPTION_HEAP_CORRUPTION)
#ifdef _M_IA64
def_excpt(EXCEPTION_REG_NAT_CONSUMPTION),
, def_excpt(EXCEPTION_REG_NAT_CONSUMPTION)
#endif
NULL, 0
};
#undef def_excpt
const char* os::exception_name(int exception_code, char *buf, size_t size) {
for (int i = 0; exceptlabels[i].name != NULL; i++) {
if (exceptlabels[i].number == exception_code) {
uint code = static_cast<uint>(exception_code);
for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
if (exceptlabels[i].number == code) {
jio_snprintf(buf, size, "%s", exceptlabels[i].name);
return buf;
}
@ -2445,7 +2441,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
jio_snprintf(buf, sizeof(buf), "Execution protection violation "
"at " INTPTR_FORMAT
", unguarding " INTPTR_FORMAT ": %s", addr,
page_start, (res ? "success" : strerror(errno)));
page_start, (res ? "success" : os::strerror(errno)));
tty->print_raw_cr(buf);
}
@ -5638,9 +5634,11 @@ int os::get_signal_number(const char* name) {
"TERM", SIGTERM, // software term signal from kill
"BREAK", SIGBREAK, // Ctrl-Break sequence
"ILL", SIGILL}; // illegal instruction
for(int i=0;i<sizeof(siglabels)/sizeof(struct siglabel);i++)
if(!strcmp(name, siglabels[i].name))
for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
if (strcmp(name, siglabels[i].name) == 0) {
return siglabels[i].number;
}
}
return -1;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,7 +96,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (fd == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not create Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
} else {
for (size_t remaining = size; remaining > 0;) {
@ -105,7 +105,7 @@ static void save_memory_to_file(char* addr, size_t size) {
if (nbytes == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not write Perfdata save file: %s: %s\n",
destfile, strerror(errno));
destfile, os::strerror(errno));
}
break;
}
@ -117,7 +117,7 @@ static void save_memory_to_file(char* addr, size_t size) {
int result = ::_close(fd);
if (PrintMiscellaneous && Verbose) {
if (result == OS_ERR) {
warning("Could not close %s: %s\n", destfile, strerror(errno));
warning("Could not close %s: %s\n", destfile, os::strerror(errno));
}
}
}
@ -497,7 +497,7 @@ static void remove_file(const char* dirname, const char* filename) {
if (PrintMiscellaneous && Verbose) {
if (errno != ENOENT) {
warning("Could not unlink shared memory backing"
" store file %s : %s\n", path, strerror(errno));
" store file %s : %s\n", path, os::strerror(errno));
}
}
}
@ -1358,7 +1358,7 @@ static HANDLE create_sharedmem_resources(const char* dirname, const char* filena
if (ret_code == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("Could not get status information from file %s: %s\n",
filename, strerror(errno));
filename, os::strerror(errno));
}
CloseHandle(fmh);
CloseHandle(fh);
@ -1553,7 +1553,7 @@ static size_t sharedmem_filesize(const char* filename, TRAPS) {
//
if (::stat(filename, &statbuf) == OS_ERR) {
if (PrintMiscellaneous && Verbose) {
warning("stat %s failed: %s\n", filename, strerror(errno));
warning("stat %s failed: %s\n", filename, os::strerror(errno));
}
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,6 +36,7 @@
#include "compiler/compileLog.hpp"
#include "compiler/compilerDirectives.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/timerTrace.hpp"
typedef enum {
_t_compile,

View File

@ -32,6 +32,7 @@
#include "c1/c1_LinearScan.hpp"
#include "c1/c1_ValueStack.hpp"
#include "code/vmreg.inline.hpp"
#include "runtime/timerTrace.hpp"
#include "utilities/bitMap.inline.hpp"
#ifndef PRODUCT

View File

@ -1,4 +1,5 @@
/* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
/*
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +29,7 @@
#include "ci/ciKlass.hpp"
#include "ci/ciUtilities.hpp"
#include "compiler/compileBroker.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
@ -574,7 +576,7 @@ class CompileReplay : public StackObj {
Method* method = parse_method(CHECK);
if (had_error()) return;
/* just copied from Method, to build interpret data*/
if (InstanceRefKlass::owns_pending_list_lock((JavaThread*)THREAD)) {
if (ReferencePendingListLocker::is_locked_by_self()) {
return;
}
// To be properly initialized, some profiling in the MDO needs the

View File

@ -5372,12 +5372,12 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
}
}
if (log_is_enabled(Info, classresolve)) {
if (log_is_enabled(Debug, classresolve)) {
ResourceMark rm;
// print out the superclass.
const char * from = ik->external_name();
if (ik->java_super() != NULL) {
log_info(classresolve)("%s %s (super)",
log_debug(classresolve)("%s %s (super)",
from,
ik->java_super()->external_name());
}
@ -5388,7 +5388,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
for (int i = 0; i < length; i++) {
const Klass* const k = local_interfaces->at(i);
const char * to = k->external_name();
log_info(classresolve)("%s %s (interface)", from, to);
log_debug(classresolve)("%s %s (interface)", from, to);
}
}
}
@ -5698,15 +5698,16 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
}
if (!is_internal()) {
if (TraceClassLoadingPreorder) {
tty->print("[Loading %s",
_class_name->as_klass_external_name());
if (log_is_enabled(Debug, classload, preorder)){
ResourceMark rm(THREAD);
outputStream* log = LogHandle(classload, preorder)::debug_stream();
log->print("%s", _class_name->as_klass_external_name());
if (stream->source() != NULL) {
tty->print(" from %s", stream->source());
log->print(" source: %s", stream->source());
}
tty->print_cr("]");
log->cr();
}
#if INCLUDE_CDS
if (DumpLoadedClassList != NULL && stream->source() != NULL && classlist_file->is_open()) {
// Only dump the classes that can be stored into CDS archive

View File

@ -515,22 +515,6 @@ char* java_lang_String::as_quoted_ascii(oop java_string) {
return result;
}
unsigned int java_lang_String::hash_string(oop java_string) {
int length = java_lang_String::length(java_string);
// Zero length string doesn't necessarily hash to zero.
if (length == 0) {
return StringTable::hash_string((jchar*) NULL, 0);
}
typeArrayOop value = java_lang_String::value(java_string);
bool is_latin1 = java_lang_String::is_latin1(java_string);
if (is_latin1) {
return StringTable::hash_string(value->byte_at_addr(0), length);
} else {
return StringTable::hash_string(value->char_at_addr(0), length);
}
}
Symbol* java_lang_String::as_symbol(Handle java_string, TRAPS) {
oop obj = java_string();
typeArrayOop value = java_lang_String::value(obj);
@ -1473,6 +1457,12 @@ void java_lang_ThreadGroup::compute_offsets() {
compute_offset(_ngroups_offset, k, vmSymbols::ngroups_name(), vmSymbols::int_signature());
}
void java_lang_Throwable::compute_offsets() {
Klass* k = SystemDictionary::Throwable_klass();
compute_offset(depth_offset, k, vmSymbols::depth_name(), vmSymbols::int_signature());
}
oop java_lang_Throwable::unassigned_stacktrace() {
InstanceKlass* ik = SystemDictionary::Throwable_klass();
address addr = ik->static_field_addr(static_unassigned_stacktrace_offset);
@ -1492,11 +1482,13 @@ void java_lang_Throwable::set_backtrace(oop throwable, oop value) {
throwable->release_obj_field_put(backtrace_offset, value);
}
oop java_lang_Throwable::message(oop throwable) {
return throwable->obj_field(detailMessage_offset);
int java_lang_Throwable::depth(oop throwable) {
return throwable->int_field(depth_offset);
}
void java_lang_Throwable::set_depth(oop throwable, int value) {
throwable->int_field_put(depth_offset, value);
}
oop java_lang_Throwable::message(Handle throwable) {
return throwable->obj_field(detailMessage_offset);
@ -1546,10 +1538,12 @@ static inline bool version_matches(Method* method, int version) {
return method != NULL && (method->constants()->version() == version);
}
// This class provides a simple wrapper over the internal structure of
// exception backtrace to insulate users of the backtrace from needing
// to know what it looks like.
class BacktraceBuilder: public StackObj {
friend class BacktraceIterator;
private:
Handle _backtrace;
objArrayOop _head;
@ -1560,8 +1554,6 @@ class BacktraceBuilder: public StackObj {
int _index;
NoSafepointVerifier _nsv;
public:
enum {
trace_methods_offset = java_lang_Throwable::trace_methods_offset,
trace_bcis_offset = java_lang_Throwable::trace_bcis_offset,
@ -1594,6 +1586,8 @@ class BacktraceBuilder: public StackObj {
return cprefs;
}
public:
// constructor for new backtrace
BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL), _cprefs(NULL) {
expand(CHECK);
@ -1679,9 +1673,68 @@ class BacktraceBuilder: public StackObj {
};
struct BacktraceElement : public StackObj {
int _method_id;
int _bci;
int _version;
int _cpref;
Handle _mirror;
BacktraceElement(Handle mirror, int mid, int version, int bci, int cpref) :
_mirror(mirror), _method_id(mid), _version(version), _bci(bci), _cpref(cpref) {}
};
class BacktraceIterator : public StackObj {
int _index;
objArrayHandle _result;
objArrayHandle _mirrors;
typeArrayHandle _methods;
typeArrayHandle _bcis;
typeArrayHandle _cprefs;
void init(objArrayHandle result, Thread* thread) {
// Get method id, bci, version and mirror from chunk
_result = result;
if (_result.not_null()) {
_methods = typeArrayHandle(thread, BacktraceBuilder::get_methods(_result));
_bcis = typeArrayHandle(thread, BacktraceBuilder::get_bcis(_result));
_mirrors = objArrayHandle(thread, BacktraceBuilder::get_mirrors(_result));
_cprefs = typeArrayHandle(thread, BacktraceBuilder::get_cprefs(_result));
_index = 0;
}
}
public:
BacktraceIterator(objArrayHandle result, Thread* thread) {
init(result, thread);
assert(_methods.is_null() || _methods->length() == java_lang_Throwable::trace_chunk_size, "lengths don't match");
}
BacktraceElement next(Thread* thread) {
BacktraceElement e (Handle(thread, _mirrors->obj_at(_index)),
_methods->short_at(_index),
Backtrace::version_at(_bcis->int_at(_index)),
Backtrace::bci_at(_bcis->int_at(_index)),
_cprefs->short_at(_index));
_index++;
if (_index >= java_lang_Throwable::trace_chunk_size) {
int next_offset = java_lang_Throwable::trace_next_offset;
// Get next chunk
objArrayHandle result (thread, objArrayOop(_result->obj_at(next_offset)));
init(result, thread);
}
return e;
}
bool repeat() {
return _result.not_null() && _mirrors->obj_at(_index) != NULL;
}
};
// Print stack trace element to resource allocated buffer
char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
int method_id, int version, int bci, int cpref) {
static void print_stack_element_to_stream(outputStream* st, Handle mirror, int method_id,
int version, int bci, int cpref) {
ResourceMark rm;
// Get strings and string lengths
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror()));
@ -1752,13 +1805,6 @@ char* java_lang_Throwable::print_stack_element_to_buffer(Handle mirror,
}
}
return buf;
}
void java_lang_Throwable::print_stack_element(outputStream *st, Handle mirror,
int method_id, int version, int bci, int cpref) {
ResourceMark rm;
char* buf = print_stack_element_to_buffer(mirror, method_id, version, bci, cpref);
st->print_cr("%s", buf);
}
@ -1767,11 +1813,7 @@ void java_lang_Throwable::print_stack_element(outputStream *st, const methodHand
int method_id = method->orig_method_idnum();
int version = method->constants()->version();
int cpref = method->name_index();
print_stack_element(st, mirror, method_id, version, bci, cpref);
}
const char* java_lang_Throwable::no_stack_trace_message() {
return "\t<<no stack trace available>>";
print_stack_element_to_stream(st, mirror, method_id, version, bci, cpref);
}
/**
@ -1788,32 +1830,17 @@ void java_lang_Throwable::print_stack_trace(Handle throwable, outputStream* st)
while (throwable.not_null()) {
objArrayHandle result (THREAD, objArrayOop(backtrace(throwable())));
if (result.is_null()) {
st->print_raw_cr(no_stack_trace_message());
st->print_raw_cr("\t<<no stack trace available>>");
return;
}
BacktraceIterator iter(result, THREAD);
while (result.not_null()) {
// Get method id, bci, version and mirror from chunk
typeArrayHandle methods (THREAD, BacktraceBuilder::get_methods(result));
typeArrayHandle bcis (THREAD, BacktraceBuilder::get_bcis(result));
objArrayHandle mirrors (THREAD, BacktraceBuilder::get_mirrors(result));
typeArrayHandle cprefs (THREAD, BacktraceBuilder::get_cprefs(result));
int length = methods()->length();
for (int index = 0; index < length; index++) {
Handle mirror(THREAD, mirrors->obj_at(index));
// NULL mirror means end of stack trace
if (mirror.is_null()) goto handle_cause;
int method = methods->short_at(index);
int version = Backtrace::version_at(bcis->int_at(index));
int bci = Backtrace::bci_at(bcis->int_at(index));
int cpref = cprefs->short_at(index);
print_stack_element(st, mirror, method, version, bci, cpref);
while (iter.repeat()) {
BacktraceElement bte = iter.next(THREAD);
print_stack_element_to_stream(st, bte._mirror, bte._method_id, bte._version, bte._bci, bte._cpref);
}
result = objArrayHandle(THREAD, objArrayOop(result->obj_at(trace_next_offset)));
}
handle_cause:
{
// Call getCause() which doesn't necessarily return the _cause field.
EXCEPTION_MARK;
JavaValue cause(T_OBJECT);
JavaCalls::call_virtual(&cause,
@ -1865,6 +1892,7 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
int max_depth = MaxJavaStackTraceDepth;
JavaThread* thread = (JavaThread*)THREAD;
BacktraceBuilder bt(CHECK);
// If there is no Java frame just return the method that was being called
@ -1872,6 +1900,8 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
if (!thread->has_last_Java_frame()) {
if (max_depth >= 1 && method() != NULL) {
bt.push(method(), 0, CHECK);
log_info(stacktrace)("%s, %d", throwable->klass()->external_name(), 1);
set_depth(throwable(), 1);
set_backtrace(throwable(), bt.backtrace());
}
return;
@ -1979,8 +2009,11 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
total_count++;
}
log_info(stacktrace)("%s, %d", throwable->klass()->external_name(), total_count);
// Put completed stack trace into throwable object
set_backtrace(throwable(), bt.backtrace());
set_depth(throwable(), total_count);
}
void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHandle& method) {
@ -2034,94 +2067,60 @@ void java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(Handle t
// methods as preallocated errors aren't created by "java" code.
// fill in as much stack trace as possible
typeArrayOop methods = BacktraceBuilder::get_methods(backtrace);
int max_chunks = MIN2(methods->length(), (int)MaxJavaStackTraceDepth);
int chunk_count = 0;
for (;!st.at_end(); st.next()) {
bt.push(st.method(), st.bci(), CHECK);
chunk_count++;
// Bail-out for deep stacks
if (chunk_count >= max_chunks) break;
if (chunk_count >= trace_chunk_size) break;
}
set_depth(throwable(), chunk_count);
log_info(stacktrace)("%s, %d", throwable->klass()->external_name(), chunk_count);
// We support the Throwable immutability protocol defined for Java 7.
java_lang_Throwable::set_stacktrace(throwable(), java_lang_Throwable::unassigned_stacktrace());
assert(java_lang_Throwable::unassigned_stacktrace() != NULL, "not initialized");
}
void java_lang_Throwable::get_stack_trace_elements(Handle throwable,
objArrayHandle stack_trace_array_h, TRAPS) {
int java_lang_Throwable::get_stack_trace_depth(oop throwable, TRAPS) {
if (throwable == NULL) {
THROW_0(vmSymbols::java_lang_NullPointerException());
}
objArrayOop chunk = objArrayOop(backtrace(throwable));
int depth = 0;
if (chunk != NULL) {
// Iterate over chunks and count full ones
while (true) {
objArrayOop next = objArrayOop(chunk->obj_at(trace_next_offset));
if (next == NULL) break;
depth += trace_chunk_size;
chunk = next;
}
assert(chunk != NULL && chunk->obj_at(trace_next_offset) == NULL, "sanity check");
// Count element in remaining partial chunk. NULL value for mirror
// marks the end of the stack trace elements that are saved.
objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk);
assert(mirrors != NULL, "sanity check");
for (int i = 0; i < mirrors->length(); i++) {
if (mirrors->obj_at(i) == NULL) break;
depth++;
}
}
return depth;
if (throwable.is_null() || stack_trace_array_h.is_null()) {
THROW(vmSymbols::java_lang_NullPointerException());
}
assert(stack_trace_array_h->is_objArray(), "Stack trace array should be an array of StackTraceElenent");
oop java_lang_Throwable::get_stack_trace_element(oop throwable, int index, TRAPS) {
if (throwable == NULL) {
THROW_0(vmSymbols::java_lang_NullPointerException());
}
if (index < 0) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
// Compute how many chunks to skip and index into actual chunk
objArrayOop chunk = objArrayOop(backtrace(throwable));
int skip_chunks = index / trace_chunk_size;
int chunk_index = index % trace_chunk_size;
while (chunk != NULL && skip_chunks > 0) {
chunk = objArrayOop(chunk->obj_at(trace_next_offset));
skip_chunks--;
}
if (chunk == NULL) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
// Get method id, bci, version, mirror and cpref from chunk
typeArrayOop methods = BacktraceBuilder::get_methods(chunk);
typeArrayOop bcis = BacktraceBuilder::get_bcis(chunk);
objArrayOop mirrors = BacktraceBuilder::get_mirrors(chunk);
typeArrayOop cprefs = BacktraceBuilder::get_cprefs(chunk);
assert(methods != NULL && bcis != NULL && mirrors != NULL, "sanity check");
int method = methods->short_at(chunk_index);
int version = Backtrace::version_at(bcis->int_at(chunk_index));
int bci = Backtrace::bci_at(bcis->int_at(chunk_index));
int cpref = cprefs->short_at(chunk_index);
Handle mirror(THREAD, mirrors->obj_at(chunk_index));
// Chunk can be partial full
if (mirror.is_null()) {
THROW_(vmSymbols::java_lang_IndexOutOfBoundsException(), NULL);
}
oop element = java_lang_StackTraceElement::create(mirror, method, version, bci, cpref, CHECK_0);
return element;
if (stack_trace_array_h->length() != depth(throwable())) {
THROW(vmSymbols::java_lang_IndexOutOfBoundsException());
}
oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
int version, int bci, int cpref, TRAPS) {
objArrayHandle result(THREAD, objArrayOop(backtrace(throwable())));
BacktraceIterator iter(result, THREAD);
int index = 0;
while (iter.repeat()) {
BacktraceElement bte = iter.next(THREAD);
Handle stack_trace_element(THREAD, stack_trace_array_h->obj_at(index++));
if (stack_trace_element.is_null()) {
THROW(vmSymbols::java_lang_NullPointerException());
}
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(bte._mirror()));
methodHandle method (THREAD, holder->method_with_orig_idnum(bte._method_id, bte._version));
java_lang_StackTraceElement::fill_in(stack_trace_element, holder,
method,
bte._version,
bte._bci,
bte._cpref, CHECK);
}
}
oop java_lang_StackTraceElement::create(const methodHandle& method, int bci, TRAPS) {
// Allocate java.lang.StackTraceElement instance
Klass* k = SystemDictionary::StackTraceElement_klass();
assert(k != NULL, "must be loaded in 1.4+");
@ -2132,37 +2131,45 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
Handle element = ik->allocate_instance_handle(CHECK_0);
int cpref = method->name_index();
int version = method->constants()->version();
fill_in(element, method->method_holder(), method, version, bci, cpref, CHECK_0);
return element();
}
void java_lang_StackTraceElement::fill_in(Handle element,
InstanceKlass* holder, const methodHandle& method,
int version, int bci, int cpref, TRAPS) {
assert(element->is_a(SystemDictionary::StackTraceElement_klass()), "sanity check");
// Fill in class name
ResourceMark rm(THREAD);
InstanceKlass* holder = InstanceKlass::cast(java_lang_Class::as_Klass(mirror()));
const char* str = holder->external_name();
oop classname = StringTable::intern((char*) str, CHECK_0);
oop classname = StringTable::intern((char*) str, CHECK);
java_lang_StackTraceElement::set_declaringClass(element(), classname);
Method* method = holder->method_with_orig_idnum(method_id, version);
// The method can be NULL if the requested class version is gone
Symbol* sym = (method != NULL) ? method->name() : holder->constants()->symbol_at(cpref);
Symbol* sym = !method.is_null() ? method->name() : holder->constants()->symbol_at(cpref);
// Fill in method name
oop methodname = StringTable::intern(sym, CHECK_0);
oop methodname = StringTable::intern(sym, CHECK);
java_lang_StackTraceElement::set_methodName(element(), methodname);
// Fill in module name and version
ModuleEntry* module = holder->module();
if (module->is_named()) {
oop module_name = StringTable::intern(module->name(), CHECK_0);
oop module_name = StringTable::intern(module->name(), CHECK);
java_lang_StackTraceElement::set_moduleName(element(), module_name);
oop module_version;
if (module->version() != NULL) {
module_version = StringTable::intern(module->version(), CHECK_0);
module_version = StringTable::intern(module->version(), CHECK);
} else {
module_version = NULL;
}
java_lang_StackTraceElement::set_moduleVersion(element(), module_version);
}
if (!version_matches(method, version)) {
if (!version_matches(method(), version)) {
// The method was redefined, accurate line number information isn't available
java_lang_StackTraceElement::set_fileName(element(), NULL);
java_lang_StackTraceElement::set_lineNumber(element(), -1);
@ -2171,20 +2178,12 @@ oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
Symbol* source = Backtrace::get_source_file_name(holder, version);
if (ShowHiddenFrames && source == NULL)
source = vmSymbols::unknown_class_name();
oop filename = StringTable::intern(source, CHECK_0);
oop filename = StringTable::intern(source, CHECK);
java_lang_StackTraceElement::set_fileName(element(), filename);
int line_number = Backtrace::get_line_number(method, bci);
java_lang_StackTraceElement::set_lineNumber(element(), line_number);
}
return element();
}
oop java_lang_StackTraceElement::create(const methodHandle& method, int bci, TRAPS) {
Handle mirror (THREAD, method->method_holder()->java_mirror());
int method_id = method->orig_method_idnum();
int cpref = method->name_index();
return create(mirror, method_id, method->constants()->version(), bci, cpref, THREAD);
}
Method* java_lang_StackFrameInfo::get_method(Handle stackFrame, InstanceKlass* holder, TRAPS) {
@ -3629,8 +3628,8 @@ GrowableArray<Klass*>* java_lang_Class::_fixup_mirror_list = NULL;
GrowableArray<Klass*>* java_lang_Class::_fixup_module_field_list = NULL;
int java_lang_Throwable::backtrace_offset;
int java_lang_Throwable::detailMessage_offset;
int java_lang_Throwable::cause_offset;
int java_lang_Throwable::stackTrace_offset;
int java_lang_Throwable::depth_offset;
int java_lang_Throwable::static_unassigned_stacktrace_offset;
int java_lang_reflect_AccessibleObject::override_offset;
int java_lang_reflect_Method::clazz_offset;
@ -3841,7 +3840,6 @@ void JavaClasses::compute_hard_coded_offsets() {
// Throwable Class
java_lang_Throwable::backtrace_offset = java_lang_Throwable::hc_backtrace_offset * x + header;
java_lang_Throwable::detailMessage_offset = java_lang_Throwable::hc_detailMessage_offset * x + header;
java_lang_Throwable::cause_offset = java_lang_Throwable::hc_cause_offset * x + header;
java_lang_Throwable::stackTrace_offset = java_lang_Throwable::hc_stackTrace_offset * x + header;
java_lang_Throwable::static_unassigned_stacktrace_offset = java_lang_Throwable::hc_static_unassigned_stacktrace_offset * x;
@ -3894,6 +3892,7 @@ void JavaClasses::compute_hard_coded_offsets() {
void JavaClasses::compute_offsets() {
// java_lang_Class::compute_offsets was called earlier in bootstrap
java_lang_ClassLoader::compute_offsets();
java_lang_Throwable::compute_offsets();
java_lang_Thread::compute_offsets();
java_lang_ThreadGroup::compute_offsets();
java_lang_invoke_MethodHandle::compute_offsets();
@ -4048,8 +4047,8 @@ void JavaClasses::check_offsets() {
CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, backtrace, "Ljava/lang/Object;");
CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, detailMessage, "Ljava/lang/String;");
CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, cause, "Ljava/lang/Throwable;");
CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, stackTrace, "[Ljava/lang/StackTraceElement;");
CHECK_OFFSET("java/lang/Throwable", java_lang_Throwable, depth, "I");
// Boxed primitive objects (java_lang_boxing_object)

View File

@ -155,11 +155,6 @@ class java_lang_String : AllStatic {
}
static unsigned int hash_code(oop java_string);
static unsigned int latin1_hash_code(typeArrayOop value, int len);
// This is the string hash code used by the StringTable, which may be
// the same as String.hashCode or an alternate hash code.
static unsigned int hash_string(oop java_string);
static bool equals(oop java_string, jchar* chars, int len);
static bool equals(oop str1, oop str2);
@ -456,6 +451,7 @@ class java_lang_ThreadGroup : AllStatic {
class java_lang_Throwable: AllStatic {
friend class BacktraceBuilder;
friend class BacktraceIterator;
private:
// Offsets
@ -481,16 +477,12 @@ class java_lang_Throwable: AllStatic {
static int backtrace_offset;
static int detailMessage_offset;
static int cause_offset;
static int stackTrace_offset;
static int depth_offset;
static int static_unassigned_stacktrace_offset;
// Printing
static char* print_stack_element_to_buffer(Handle mirror, int method, int version, int bci, int cpref);
// StackTrace (programmatic access, new since 1.4)
static void clear_stacktrace(oop throwable);
// No stack trace available
static const char* no_stack_trace_message();
// Stacktrace (post JDK 1.7.0 to allow immutability protocol to be followed)
static void set_stacktrace(oop throwable, oop st_element_array);
static oop unassigned_stacktrace();
@ -499,19 +491,20 @@ class java_lang_Throwable: AllStatic {
// Backtrace
static oop backtrace(oop throwable);
static void set_backtrace(oop throwable, oop value);
static int depth(oop throwable);
static void set_depth(oop throwable, int value);
// Needed by JVMTI to filter out this internal field.
static int get_backtrace_offset() { return backtrace_offset;}
static int get_detailMessage_offset() { return detailMessage_offset;}
// Message
static oop message(oop throwable);
static oop message(Handle throwable);
static void set_message(oop throwable, oop value);
static Symbol* detail_message(oop throwable);
static void print_stack_element(outputStream *st, Handle mirror, int method,
int version, int bci, int cpref);
static void print_stack_element(outputStream *st, const methodHandle& method, int bci);
static void print_stack_usage(Handle stream);
static void compute_offsets();
// Allocate space for backtrace (created but stack trace not filled in)
static void allocate_backtrace(Handle throwable, TRAPS);
// Fill in current stack trace for throwable with preallocated backtrace (no GC)
@ -520,8 +513,7 @@ class java_lang_Throwable: AllStatic {
static void fill_in_stack_trace(Handle throwable, const methodHandle& method, TRAPS);
static void fill_in_stack_trace(Handle throwable, const methodHandle& method = methodHandle());
// Programmatic access to stack trace
static oop get_stack_trace_element(oop throwable, int index, TRAPS);
static int get_stack_trace_depth(oop throwable, TRAPS);
static void get_stack_trace_elements(Handle throwable, objArrayHandle stack_trace, TRAPS);
// Printing
static void print(Handle throwable, outputStream* st);
static void print_stack_trace(Handle throwable, outputStream* st);
@ -1333,7 +1325,6 @@ class java_lang_StackTraceElement: AllStatic {
static int fileName_offset;
static int lineNumber_offset;
public:
// Setters
static void set_moduleName(oop element, oop value);
static void set_moduleVersion(oop element, oop value);
@ -1342,10 +1333,13 @@ class java_lang_StackTraceElement: AllStatic {
static void set_fileName(oop element, oop value);
static void set_lineNumber(oop element, int value);
public:
// Create an instance of StackTraceElement
static oop create(Handle mirror, int method, int version, int bci, int cpref, TRAPS);
static oop create(const methodHandle& method, int bci, TRAPS);
static void fill_in(Handle element, InstanceKlass* holder, const methodHandle& method,
int version, int bci, int cpref, TRAPS);
// Debugging
friend class JavaClasses;
};

View File

@ -94,15 +94,27 @@ volatile int StringTable::_parallel_claimed_idx = 0;
CompactHashtable<oop, char> StringTable::_shared_table;
// Pick hashing algorithm
template<typename T>
unsigned int StringTable::hash_string(const T* s, int len) {
unsigned int StringTable::hash_string(const jchar* s, int len) {
return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
java_lang_String::hash_code(s, len);
}
// Explicit instantiation for all supported types.
template unsigned int StringTable::hash_string<jchar>(const jchar* s, int len);
template unsigned int StringTable::hash_string<jbyte>(const jbyte* s, int len);
unsigned int StringTable::hash_string(oop string) {
EXCEPTION_MARK;
if (string == NULL) {
return hash_string((jchar*)NULL, 0);
}
ResourceMark rm(THREAD);
// All String oops are hashed as unicode
int length;
jchar* chars = java_lang_String::as_unicode_string(string, length, THREAD);
if (chars != NULL) {
return hash_string(chars, length);
} else {
vm_exit_out_of_memory(length, OOM_MALLOC_ERROR, "unable to create Unicode string for verification");
return 0;
}
}
oop StringTable::lookup_shared(jchar* name, int len) {
// java_lang_String::hash_code() was used to compute hash values in the shared table. Don't
@ -398,7 +410,7 @@ void StringTable::verify() {
for ( ; p != NULL; p = p->next()) {
oop s = p->literal();
guarantee(s != NULL, "interned string is NULL");
unsigned int h = java_lang_String::hash_string(s);
unsigned int h = hash_string(s);
guarantee(p->hash() == h, "broken hash in string table entry");
guarantee(the_table()->hash_to_index(h) == i,
"wrong index in string table");
@ -498,7 +510,7 @@ StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt,
return _verify_fail_done;
}
unsigned int h = java_lang_String::hash_string(str);
unsigned int h = hash_string(str);
if (e_ptr->hash() != h) {
if (mesg_mode == _verify_with_mesgs) {
tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], "

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -111,7 +111,8 @@ public:
// Hashing algorithm, used as the hash value used by the
// StringTable for bucket selection and comparison (stored in the
// HashtableEntry structures). This is used in the String.intern() method.
template<typename T> static unsigned int hash_string(const T* s, int len);
static unsigned int hash_string(const jchar* s, int len);
static unsigned int hash_string(oop string);
// Internal test.
static void test_alt_hash() PRODUCT_RETURN;

View File

@ -67,6 +67,7 @@
#include "runtime/signature.hpp"
#include "services/classLoadingService.hpp"
#include "services/threadService.hpp"
#include "trace/traceMacros.hpp"
#include "utilities/macros.hpp"
#include "utilities/ticks.hpp"
#if INCLUDE_CDS
@ -1650,6 +1651,8 @@ void SystemDictionary::define_instance_class(instanceKlassHandle k, TRAPS) {
}
TRACE_KLASS_DEFINITION(k, THREAD);
}
// Support parallel classloading

View File

@ -61,7 +61,7 @@ bool VerificationType::is_reference_assignable_from(
Klass* obj = SystemDictionary::resolve_or_fail(
name(), Handle(THREAD, klass->class_loader()),
Handle(THREAD, klass->protection_domain()), true, CHECK_false);
if (log_is_enabled(Info, classresolve)) {
if (log_is_enabled(Debug, classresolve)) {
Verifier::trace_class_resolution(obj, klass());
}
@ -80,7 +80,7 @@ bool VerificationType::is_reference_assignable_from(
Klass* from_class = SystemDictionary::resolve_or_fail(
from.name(), Handle(THREAD, klass->class_loader()),
Handle(THREAD, klass->protection_domain()), true, CHECK_false);
if (log_is_enabled(Info, classresolve)) {
if (log_is_enabled(Debug, classresolve)) {
Verifier::trace_class_resolution(from_class, klass());
}
return InstanceKlass::cast(from_class)->is_subclass_of(this_class());

View File

@ -33,6 +33,7 @@
#include "classfile/vmSymbols.hpp"
#include "interpreter/bytecodes.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "logging/log.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
@ -106,9 +107,9 @@ void Verifier::trace_class_resolution(Klass* resolve_class, InstanceKlass* verif
const char* resolve = resolve_class->external_name();
// print in a single call to reduce interleaving between threads
if (source_file != NULL) {
log_info(classresolve)("%s %s %s (verification)", verify, resolve, source_file);
log_debug(classresolve)("%s %s %s (verification)", verify, resolve, source_file);
} else {
log_info(classresolve)("%s %s (verification)", verify, resolve);
log_debug(classresolve)("%s %s (verification)", verify, resolve);
}
}
@ -176,9 +177,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
if (can_failover && !HAS_PENDING_EXCEPTION &&
(exception_name == vmSymbols::java_lang_VerifyError() ||
exception_name == vmSymbols::java_lang_ClassFormatError())) {
if (VerboseVerification) {
tty->print_cr("Fail over class verification to old verifier for: %s", klassName);
}
log_info(verboseverification)("Fail over class verification to old verifier for: %s", klassName);
log_info(classinit)("Fail over class verification to old verifier for: %s", klassName);
exception_name = inference_verify(
klass, message_buffer, message_buffer_len, THREAD);
@ -194,8 +193,8 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
if (log_is_enabled(Info, classinit)){
log_end_verification(LogHandle(classinit)::info_stream(), klassName, exception_name, THREAD);
}
if (VerboseVerification){
log_end_verification(tty, klassName, exception_name, THREAD);
if (log_is_enabled(Info, verboseverification)){
log_end_verification(LogHandle(verboseverification)::info_stream(), klassName, exception_name, THREAD);
}
if (HAS_PENDING_EXCEPTION) {
@ -206,7 +205,7 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
ResourceMark rm(THREAD);
instanceKlassHandle kls =
SystemDictionary::resolve_or_fail(exception_name, true, CHECK_false);
if (log_is_enabled(Info, classresolve)) {
if (log_is_enabled(Debug, classresolve)) {
Verifier::trace_class_resolution(kls(), klass());
}
@ -269,9 +268,7 @@ Symbol* Verifier::inference_verify(
}
ResourceMark rm(THREAD);
if (VerboseVerification) {
tty->print_cr("Verifying class %s with old format", klass->external_name());
}
log_info(verboseverification)("Verifying class %s with old format", klass->external_name());
jclass cls = (jclass) JNIHandles::make_local(env, klass->java_mirror());
jint result;
@ -583,10 +580,7 @@ TypeOrigin ClassVerifier::ref_ctx(const char* sig, TRAPS) {
}
void ClassVerifier::verify_class(TRAPS) {
if (VerboseVerification) {
tty->print_cr("Verifying class %s with new format",
_klass->external_name());
}
log_info(verboseverification)("Verifying class %s with new format", _klass->external_name());
Array<Method*>* methods = _klass->methods();
int num_methods = methods->length();
@ -606,10 +600,7 @@ void ClassVerifier::verify_class(TRAPS) {
}
if (was_recursively_verified()){
if (VerboseVerification){
tty->print_cr("Recursive verification detected for: %s",
_klass->external_name());
}
log_info(verboseverification)("Recursive verification detected for: %s", _klass->external_name());
log_info(classinit)("Recursive verification detected for: %s",
_klass->external_name());
}
@ -618,9 +609,7 @@ void ClassVerifier::verify_class(TRAPS) {
void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
HandleMark hm(THREAD);
_method = m; // initialize _method
if (VerboseVerification) {
tty->print_cr("Verifying method %s", m->name_and_sig_as_C_string());
}
log_info(verboseverification)("Verifying method %s", m->name_and_sig_as_C_string());
// For clang, the only good constant format string is a literal constant format string.
#define bad_type_msg "Bad type on operand stack in %s"
@ -667,8 +656,9 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
StackMapTable stackmap_table(&reader, &current_frame, max_locals, max_stack,
code_data, code_length, CHECK_VERIFY(this));
if (VerboseVerification) {
stackmap_table.print_on(tty);
if (log_is_enabled(Info, verboseverification)) {
ResourceMark rm(THREAD);
stackmap_table.print_on(LogHandle(verboseverification)::info_stream());
}
RawBytecodeStream bcs(m);
@ -708,12 +698,11 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
VerificationType type, type2;
VerificationType atype;
#ifndef PRODUCT
if (VerboseVerification) {
current_frame.print_on(tty);
tty->print_cr("offset = %d, opcode = %s", bci, Bytecodes::name(opcode));
if (log_is_enabled(Info, verboseverification)) {
ResourceMark rm(THREAD);
current_frame.print_on(LogHandle(verboseverification)::info_stream());
log_info(verboseverification)("offset = %d, opcode = %s", bci, Bytecodes::name(opcode));
}
#endif
// Make sure wide instruction is in correct format
if (bcs.is_wide()) {
@ -2005,7 +1994,7 @@ Klass* ClassVerifier::load_class(Symbol* name, TRAPS) {
name, Handle(THREAD, loader), Handle(THREAD, protection_domain),
true, THREAD);
if (log_is_enabled(Info, classresolve)) {
if (log_is_enabled(Debug, classresolve)) {
instanceKlassHandle cur_class = current_class();
Verifier::trace_class_resolution(kls, cur_class());
}
@ -2533,10 +2522,9 @@ void ClassVerifier::verify_invoke_init(
verify_error(ErrorContext::bad_code(bci),
"Bad <init> method call from after the start of a try block");
return;
} else if (VerboseVerification) {
ResourceMark rm;
tty->print_cr(
"Survived call to ends_in_athrow(): %s",
} else if (log_is_enabled(Info, verboseverification)) {
ResourceMark rm(THREAD);
log_info(verboseverification)("Survived call to ends_in_athrow(): %s",
current_class()->name()->as_C_string());
}
}

View File

@ -385,6 +385,7 @@
template(fillInStackTrace_name, "fillInStackTrace") \
template(getCause_name, "getCause") \
template(initCause_name, "initCause") \
template(depth_name, "depth") \
template(setProperty_name, "setProperty") \
template(getProperty_name, "getProperty") \
template(context_name, "context") \

View File

@ -32,6 +32,7 @@
#include "compiler/compileLog.hpp"
#include "compiler/compilerOracle.hpp"
#include "compiler/directivesParser.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/methodData.hpp"
@ -48,6 +49,7 @@
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/timerTrace.hpp"
#include "trace/tracing.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
@ -900,7 +902,7 @@ void CompileBroker::compile_method_base(const methodHandle& method,
// the pending list lock or a 3-way deadlock may occur
// between the reference handler thread, a GC (instigated
// by a compiler thread), and compiled method registration.
if (InstanceRefKlass::owns_pending_list_lock(JavaThread::current())) {
if (ReferencePendingListLocker::is_locked_by_self()) {
return;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,6 +30,7 @@
#include "interpreter/bytecode.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/timerTrace.hpp"
#include "utilities/bitMap.inline.hpp"
// The MethodLiveness class performs a simple liveness analysis on a method

View File

@ -1931,11 +1931,6 @@ CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk)
if (blk->_ptr == NULL) {
refillLinearAllocBlock(blk);
}
if (PrintMiscellaneous && Verbose) {
if (blk->_word_size == 0) {
warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
}
}
}
void

View File

@ -502,7 +502,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
{
MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
if (!_markBitMap.allocate(_span)) {
warning("Failed to allocate CMS Bit Map");
log_warning(gc)("Failed to allocate CMS Bit Map");
return;
}
assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
@ -513,7 +513,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
}
if (!_markStack.allocate(MarkStackSize)) {
warning("Failed to allocate CMS Marking Stack");
log_warning(gc)("Failed to allocate CMS Marking Stack");
return;
}
@ -527,8 +527,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
ConcGCThreads, true);
if (_conc_workers == NULL) {
warning("GC/CMS: _conc_workers allocation failure: "
"forcing -CMSConcurrentMTEnabled");
log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled");
CMSConcurrentMTEnabled = false;
} else {
_conc_workers->initialize_workers();
@ -559,7 +558,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
&& num_queues > 0) {
_task_queues = new OopTaskQueueSet(num_queues);
if (_task_queues == NULL) {
warning("task_queues allocation failure.");
log_warning(gc)("task_queues allocation failure.");
return;
}
_hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
@ -567,7 +566,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
for (i = 0; i < num_queues; i++) {
PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
if (q == NULL) {
warning("work_queue allocation failure.");
log_warning(gc)("work_queue allocation failure.");
return;
}
_task_queues->register_queue(i, q);
@ -1413,7 +1412,7 @@ void CMSCollector::acquire_control_and_collect(bool full,
if (_foregroundGCShouldWait) {
// We are going to be waiting for action for the CMS thread;
// it had better not be gone (for instance at shutdown)!
assert(ConcurrentMarkSweepThread::cmst() != NULL,
assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
"CMS thread must be running");
// Wait here until the background collector gives us the go-ahead
ConcurrentMarkSweepThread::clear_CMS_flag(
@ -1519,7 +1518,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
gch->pre_full_gc_dump(gc_timer);
GCTraceTime(Trace, gc) t("CMS:MSC");
GCTraceTime(Trace, gc, phases) t("CMS:MSC");
// Temporarily widen the span of the weak reference processing to
// the entire heap.
@ -2235,7 +2234,7 @@ class VerifyMarkedClosure: public BitMapClosure {
};
bool CMSCollector::verify_after_remark() {
GCTraceTime(Info, gc, verify) tm("Verifying CMS Marking.");
GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
static bool init = false;
@ -2287,17 +2286,16 @@ bool CMSCollector::verify_after_remark() {
// all marking, then check if the new marks-vector is
// a subset of the CMS marks-vector.
verify_after_remark_work_1();
} else if (CMSRemarkVerifyVariant == 2) {
} else {
guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
// In this second variant of verification, we flag an error
// (i.e. an object reachable in the new marks-vector not reachable
// in the CMS marks-vector) immediately, also indicating the
// identify of an object (A) that references the unmarked object (B) --
// presumably, a mutation to A failed to be picked up by preclean/remark?
verify_after_remark_work_2();
} else {
warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
CMSRemarkVerifyVariant);
}
return true;
}
@ -2820,7 +2818,7 @@ void CMSCollector::checkpointRootsInitialWork() {
// CMS collection cycle.
setup_cms_unloading_and_verification_state();
GCTraceTime(Trace, gc) ts("checkpointRootsInitialWork", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
// Reset all the PLAB chunk arrays if necessary.
if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
@ -3650,7 +3648,7 @@ void CMSCollector::abortable_preclean() {
// XXX FIX ME!!! YSR
size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
while (!(should_abort_preclean() ||
ConcurrentMarkSweepThread::should_terminate())) {
ConcurrentMarkSweepThread::cmst()->should_terminate())) {
workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
cumworkdone += workdone;
loops++;
@ -4104,8 +4102,6 @@ void CMSCollector::checkpointRootsFinal() {
// expect it to be false and set to true
FlagSetting fl(gch->_is_gc_active, false);
GCTraceTime(Trace, gc) tm("Pause Scavenge Before Remark", _gc_timer_cm);
gch->do_collection(true, // full (i.e. force, see below)
false, // !clear_all_soft_refs
0, // size
@ -4123,7 +4119,7 @@ void CMSCollector::checkpointRootsFinal() {
}
void CMSCollector::checkpointRootsFinalWork() {
GCTraceTime(Trace, gc) tm("checkpointRootsFinalWork", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock());
@ -4173,10 +4169,10 @@ void CMSCollector::checkpointRootsFinalWork() {
// the most recent young generation GC, minus those cleaned up by the
// concurrent precleaning.
if (CMSParallelRemarkEnabled) {
GCTraceTime(Debug, gc) t("Rescan (parallel)", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm);
do_remark_parallel();
} else {
GCTraceTime(Debug, gc) t("Rescan (non-parallel)", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm);
do_remark_non_parallel();
}
}
@ -4184,7 +4180,7 @@ void CMSCollector::checkpointRootsFinalWork() {
verify_overflow_empty();
{
GCTraceTime(Trace, gc) ts("refProcessingWork", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm);
refProcessingWork();
}
verify_work_stacks_empty();
@ -4907,7 +4903,7 @@ void CMSCollector::do_remark_non_parallel() {
NULL, // space is set further below
&_markBitMap, &_markStack, &mrias_cl);
{
GCTraceTime(Trace, gc) t("Grey Object Rescan", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm);
// Iterate over the dirty cards, setting the corresponding bits in the
// mod union table.
{
@ -4941,7 +4937,7 @@ void CMSCollector::do_remark_non_parallel() {
Universe::verify();
}
{
GCTraceTime(Trace, gc) t("Root Rescan", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm);
verify_work_stacks_empty();
@ -4963,7 +4959,7 @@ void CMSCollector::do_remark_non_parallel() {
}
{
GCTraceTime(Trace, gc) t("Visit Unhandled CLDs", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
verify_work_stacks_empty();
@ -4982,7 +4978,7 @@ void CMSCollector::do_remark_non_parallel() {
}
{
GCTraceTime(Trace, gc) t("Dirty Klass Scan", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm);
verify_work_stacks_empty();
@ -5186,7 +5182,7 @@ void CMSCollector::refProcessingWork() {
_span, &_markBitMap, &_markStack,
&cmsKeepAliveClosure, false /* !preclean */);
{
GCTraceTime(Debug, gc) t("Weak Refs Processing", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
ReferenceProcessorStats stats;
if (rp->processing_is_mt()) {
@ -5228,7 +5224,7 @@ void CMSCollector::refProcessingWork() {
if (should_unload_classes()) {
{
GCTraceTime(Debug, gc) t("Class Unloading", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
@ -5241,13 +5237,13 @@ void CMSCollector::refProcessingWork() {
}
{
GCTraceTime(Debug, gc) t("Scrub Symbol Table", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer_cm);
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
{
GCTraceTime(Debug, gc) t("Scrub String Table", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer_cm);
// Delete entries for dead interned strings.
StringTable::unlink(&_is_alive_closure);
}
@ -5657,13 +5653,13 @@ bool CMSBitMap::allocate(MemRegion mr) {
ReservedSpace brs(ReservedSpace::allocation_align_size_up(
(_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
if (!brs.is_reserved()) {
warning("CMS bit map allocation failure");
log_warning(gc)("CMS bit map allocation failure");
return false;
}
// For now we'll just commit all of the bit map up front.
// Later on we'll try to be more parsimonious with swap.
if (!_virtual_space.initialize(brs, brs.size())) {
warning("CMS bit map backing store failure");
log_warning(gc)("CMS bit map backing store failure");
return false;
}
assert(_virtual_space.committed_size() == brs.size(),
@ -5749,11 +5745,11 @@ bool CMSMarkStack::allocate(size_t size) {
ReservedSpace rs(ReservedSpace::allocation_align_size_up(
size * sizeof(oop)));
if (!rs.is_reserved()) {
warning("CMSMarkStack allocation failure");
log_warning(gc)("CMSMarkStack allocation failure");
return false;
}
if (!_virtual_space.initialize(rs, rs.size())) {
warning("CMSMarkStack backing store failure");
log_warning(gc)("CMSMarkStack backing store failure");
return false;
}
assert(_virtual_space.committed_size() == rs.size(),
@ -7047,12 +7043,12 @@ SweepClosure::SweepClosure(CMSCollector* collector,
}
void SweepClosure::print_on(outputStream* st) const {
tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(_sp->bottom()), p2i(_sp->end()));
tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
_inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
}
@ -7066,8 +7062,10 @@ SweepClosure::~SweepClosure() {
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds");
if (inFreeRange()) {
warning("inFreeRange() should have been reset; dumping state of SweepClosure");
print();
LogHandle(gc, sweep) log;
log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
ResourceMark rm;
print_on(log.error_stream());
ShouldNotReachHere();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "oops/instanceRefKlass.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.hpp"
@ -42,16 +42,10 @@
ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
bool ConcurrentMarkSweepThread::_should_terminate = false;
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
SurrogateLockerThread* ConcurrentMarkSweepThread::_slt = NULL;
SurrogateLockerThread::SLT_msg_type
ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty;
Monitor* ConcurrentMarkSweepThread::_sltMonitor = NULL;
ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
: ConcurrentGCThread() {
assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
@ -62,7 +56,6 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
set_name("CMS Main Thread");
if (os::create_thread(this, os::cgc_thread)) {
// An old comment here said: "Priority should be just less
// than that of VMThread". Since the VMThread runs at
// NearMaxPriority, the old comment was inaccurate, but
@ -74,76 +67,47 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
// starving if UseCriticalCMSThreadPriority is on.
// That won't happen on Solaris for various reasons,
// but may well happen on non-Solaris platforms.
int native_prio;
if (UseCriticalCMSThreadPriority) {
native_prio = os::java_to_os_priority[CriticalPriority];
} else {
native_prio = os::java_to_os_priority[NearMaxPriority];
}
os::set_native_priority(this, native_prio);
if (!DisableStartThread) {
os::start_thread(this);
}
}
_sltMonitor = SLT_lock;
create_and_start(UseCriticalCMSThreadPriority ? CriticalPriority : NearMaxPriority);
}
void ConcurrentMarkSweepThread::run() {
void ConcurrentMarkSweepThread::run_service() {
assert(this == cmst(), "just checking");
initialize_in_thread();
// From this time Thread::current() should be working.
assert(this == Thread::current(), "just checking");
if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
warning("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
}
// Wait until Universe::is_fully_initialized()
{
CMSLoopCountWarn loopX("CMS::run", "waiting for "
"Universe::is_fully_initialized()", 2);
MutexLockerEx x(CGC_lock, true);
set_CMS_flag(CMS_cms_wants_token);
// Wait until Universe is initialized and all initialization is completed.
while (!is_init_completed() && !Universe::is_fully_initialized() &&
!_should_terminate) {
CGC_lock->wait(true, 200);
loopX.tick();
}
assert(is_init_completed() && Universe::is_fully_initialized(), "ConcurrentGCThread::run() should have waited for this.");
// Wait until the surrogate locker thread that will do
// pending list locking on our behalf has been created.
// We cannot start the SLT thread ourselves since we need
// to be a JavaThread to do so.
CMSLoopCountWarn loopY("CMS::run", "waiting for SLT installation", 2);
while (_slt == NULL && !_should_terminate) {
while (!ReferencePendingListLocker::is_initialized() && !should_terminate()) {
CGC_lock->wait(true, 200);
loopY.tick();
}
clear_CMS_flag(CMS_cms_wants_token);
}
while (!_should_terminate) {
while (!should_terminate()) {
sleepBeforeNextCycle();
if (_should_terminate) break;
if (should_terminate()) break;
GCIdMark gc_id_mark;
GCCause::Cause cause = _collector->_full_gc_requested ?
_collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
_collector->collect_in_background(cause);
}
assert(_should_terminate, "just checking");
// Check that the state of any protocol for synchronization
// between background (CMS) and foreground collector is "clean"
// (i.e. will not potentially block the foreground collector,
// requiring action by us).
verify_ok_to_terminate();
// Signal that it is terminated
{
MutexLockerEx mu(Terminator_lock,
Mutex::_no_safepoint_check_flag);
assert(_cmst == this, "Weird!");
_cmst = NULL;
Terminator_lock->notify();
}
}
#ifndef PRODUCT
@ -157,39 +121,24 @@ void ConcurrentMarkSweepThread::verify_ok_to_terminate() const {
// create and start a new ConcurrentMarkSweep Thread for given CMS generation
ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collector) {
if (!_should_terminate) {
assert(cmst() == NULL, "start() called twice?");
guarantee(_cmst == NULL, "start() called twice!");
ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
assert(cmst() == th, "Where did the just-created CMS thread go?");
assert(_cmst == th, "Where did the just-created CMS thread go?");
return th;
}
return NULL;
}
void ConcurrentMarkSweepThread::stop() {
// it is ok to take late safepoints here, if needed
{
MutexLockerEx x(Terminator_lock);
_should_terminate = true;
}
{ // Now post a notify on CGC_lock so as to nudge
void ConcurrentMarkSweepThread::stop_service() {
// Now post a notify on CGC_lock so as to nudge
// CMS thread(s) that might be slumbering in
// sleepBeforeNextCycle.
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
CGC_lock->notify_all();
}
{ // Now wait until (all) CMS thread(s) have exited
MutexLockerEx x(Terminator_lock);
while(cmst() != NULL) {
Terminator_lock->wait();
}
}
}
void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
assert(tc != NULL, "Null ThreadClosure");
if (_cmst != NULL) {
tc->do_thread(_cmst);
if (cmst() != NULL && !cmst()->has_terminated()) {
tc->do_thread(cmst());
}
assert(Universe::is_fully_initialized(),
"Called too early, make sure heap is fully initialized");
@ -202,8 +151,8 @@ void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
}
void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
if (_cmst != NULL) {
_cmst->print_on(st);
if (cmst() != NULL && !cmst()->has_terminated()) {
cmst()->print_on(st);
st->cr();
}
if (_collector != NULL) {
@ -278,7 +227,7 @@ void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
MutexLockerEx x(CGC_lock,
Mutex::_no_safepoint_check_flag);
if (_should_terminate || _collector->_full_gc_requested) {
if (should_terminate() || _collector->_full_gc_requested) {
return;
}
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
@ -307,7 +256,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
unsigned int loop_count = 0;
while(!_should_terminate) {
while(!should_terminate()) {
double now_time = os::elapsedTime();
long wait_time_millis;
@ -327,7 +276,7 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
{
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (_should_terminate || _collector->_full_gc_requested) {
if (should_terminate() || _collector->_full_gc_requested) {
return;
}
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
@ -358,13 +307,13 @@ void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
// Too many loops warning
if(++loop_count == 0) {
warning("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
log_warning(gc)("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
}
}
}
void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
while (!_should_terminate) {
while (!should_terminate()) {
if(CMSWaitDuration >= 0) {
// Wait until the next synchronous GC, a concurrent full gc
// request or a timeout, whichever is earlier.
@ -381,15 +330,3 @@ void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
// and wait some more
}
}
// Note: this method, although exported by the ConcurrentMarkSweepThread,
// which is a non-JavaThread, can only be called by a JavaThread.
// Currently this is done at vm creation time (post-vm-init) by the
// main/Primordial (Java)Thread.
// XXX Consider changing this in the future to allow the CMS thread
// itself to create this thread?
void ConcurrentMarkSweepThread::makeSurrogateLockerThread(TRAPS) {
assert(UseConcMarkSweepGC, "SLT thread needed only for CMS GC");
assert(_slt == NULL, "SLT already created");
_slt = SurrogateLockerThread::make(THREAD);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,17 +37,10 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
friend class VMStructs;
friend class ConcurrentMarkSweepGeneration; // XXX should remove friendship
friend class CMSCollector;
public:
virtual void run();
private:
static ConcurrentMarkSweepThread* _cmst;
static CMSCollector* _collector;
static SurrogateLockerThread* _slt;
static SurrogateLockerThread::SLT_msg_type _sltBuffer;
static Monitor* _sltMonitor;
static bool _should_terminate;
enum CMS_flag_type {
CMS_nil = NoBits,
@ -72,13 +65,13 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
// debugging
void verify_ok_to_terminate() const PRODUCT_RETURN;
void run_service();
void stop_service();
public:
// Constructor
ConcurrentMarkSweepThread(CMSCollector* collector);
static void makeSurrogateLockerThread(TRAPS);
static SurrogateLockerThread* slt() { return _slt; }
static void threads_do(ThreadClosure* tc);
// Printing
@ -91,8 +84,6 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
// Create and start the CMS Thread, or stop it on shutdown
static ConcurrentMarkSweepThread* start(CMSCollector* collector);
static void stop();
static bool should_terminate() { return _should_terminate; }
// Synchronization using CMS token
static void synchronize(bool is_cms_thread);
@ -170,7 +161,7 @@ class CMSLoopCountWarn: public StackObj {
inline void tick() {
_ticks++;
if (CMSLoopWarn && _ticks % _threshold == 0) {
warning("%s has looped " INTX_FORMAT " times %s", _src, _ticks, _msg);
log_warning(gc)("%s has looped " INTX_FORMAT " times %s", _src, _ticks, _msg);
}
}
};

View File

@ -161,15 +161,6 @@ process_stride(Space* sp,
}
}
// If you want a talkative process_chunk_boundaries,
// then #define NOISY(x) x
#ifdef NOISY
#error "Encountered a global preprocessor flag, NOISY, which might clash with local definition to follow"
#else
#define NOISY(x)
#endif
void
CardTableModRefBSForCTRS::
process_chunk_boundaries(Space* sp,
@ -197,10 +188,6 @@ process_chunk_boundaries(Space* sp,
assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error.");
uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index;
NOISY(tty->print_cr("===========================================================================");)
NOISY(tty->print_cr(" process_chunk_boundary: Called with [" PTR_FORMAT "," PTR_FORMAT ")",
chunk_mr.start(), chunk_mr.end());)
// First, set "our" lowest_non_clean entry, which would be
// used by the thread scanning an adjoining left chunk with
// a non-array object straddling the mutual boundary.
@ -239,36 +226,18 @@ process_chunk_boundaries(Space* sp,
}
}
if (first_dirty_card != NULL) {
NOISY(tty->print_cr(" LNC: Found a dirty card at " PTR_FORMAT " in current chunk",
first_dirty_card);)
assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error.");
assert(lowest_non_clean[cur_chunk_index] == NULL,
"Write exactly once : value should be stable hereafter for this round");
lowest_non_clean[cur_chunk_index] = first_dirty_card;
} NOISY(else {
tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL");
// In the future, we could have this thread look for a non-NULL value to copy from its
// right neighbor (up to the end of the first object).
if (last_card_of_cur_chunk < last_card_of_first_obj) {
tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n"
" might be efficient to get value from right neighbor?");
}
})
} else {
// In this case we can help our neighbor by just asking them
// to stop at our first card (even though it may not be dirty).
NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");)
assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
}
NOISY(tty->print_cr(" process_chunk_boundary: lowest_non_clean[" INTPTR_FORMAT "] = " PTR_FORMAT
" which corresponds to the heap address " PTR_FORMAT,
cur_chunk_index, lowest_non_clean[cur_chunk_index],
(lowest_non_clean[cur_chunk_index] != NULL)
? addr_for(lowest_non_clean[cur_chunk_index])
: NULL);)
NOISY(tty->print_cr("---------------------------------------------------------------------------");)
// Next, set our own max_to_do, which will strictly/exclusively bound
// the highest address that we will scan past the right end of our chunk.
@ -285,8 +254,6 @@ process_chunk_boundaries(Space* sp,
|| oop(last_block)->is_objArray() // last_block is an array (precisely marked)
|| oop(last_block)->is_typeArray()) {
max_to_do = chunk_mr.end();
NOISY(tty->print_cr(" process_chunk_boundary: Last block on this card is not a non-array object;\n"
" max_to_do left at " PTR_FORMAT, max_to_do);)
} else {
assert(last_block < chunk_mr.end(), "Tautology");
// It is a non-array object that straddles the right boundary of this chunk.
@ -301,9 +268,6 @@ process_chunk_boundaries(Space* sp,
// subsequent cards still in this chunk must have been made
// precisely; we can cap processing at the end of our chunk.
max_to_do = chunk_mr.end();
NOISY(tty->print_cr(" process_chunk_boundary: Head of last object on this card is not dirty;\n"
" max_to_do left at " PTR_FORMAT,
max_to_do);)
} else {
// The last object must be considered dirty, and extends onto the
// following chunk. Look for a dirty card in that chunk that will
@ -323,8 +287,6 @@ process_chunk_boundaries(Space* sp,
cur <= last_card_of_last_obj; cur++) {
const jbyte val = *cur;
if (card_will_be_scanned(val)) {
NOISY(tty->print_cr(" Found a non-clean card " PTR_FORMAT " with value 0x%x",
cur, (int)val);)
limit_card = cur; break;
} else {
assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");
@ -333,10 +295,6 @@ process_chunk_boundaries(Space* sp,
if (limit_card != NULL) {
max_to_do = addr_for(limit_card);
assert(limit_card != NULL && max_to_do != NULL, "Error");
NOISY(tty->print_cr(" process_chunk_boundary: Found a dirty card at " PTR_FORMAT
" max_to_do set at " PTR_FORMAT " which is before end of last block in chunk: "
PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT,
limit_card, max_to_do, last_block, last_block_size, (last_block+last_block_size));)
} else {
// The following is a pessimistic value, because it's possible
// that a dirty card on a subsequent chunk has been cleared by
@ -346,10 +304,6 @@ process_chunk_boundaries(Space* sp,
limit_card = last_card_of_last_obj;
max_to_do = last_block + last_block_size;
assert(limit_card != NULL && max_to_do != NULL, "Error");
NOISY(tty->print_cr(" process_chunk_boundary: Found no dirty card before end of last block in chunk\n"
" Setting limit_card to " PTR_FORMAT
" and max_to_do " PTR_FORMAT " + " PTR_FORMAT " = " PTR_FORMAT,
limit_card, last_block, last_block_size, max_to_do);)
}
assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,
"Bounds error.");
@ -382,7 +336,6 @@ process_chunk_boundaries(Space* sp,
"[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
p2i(sp->used_region().start()), p2i(sp->used_region().end()),
p2i(used.start()), p2i(used.end()));
NOISY(tty->print_cr(" process_chunk_boundary: heap expanded; explicitly bounding last_chunk");)
last_chunk_index_to_check = last_chunk_index;
}
for (uintptr_t lnc_index = cur_chunk_index + 1;
@ -392,9 +345,6 @@ process_chunk_boundaries(Space* sp,
if (lnc_card != NULL) {
// we can stop at the first non-NULL entry we find
if (lnc_card <= limit_card) {
NOISY(tty->print_cr(" process_chunk_boundary: LNC card " PTR_FORMAT " is lower than limit_card " PTR_FORMAT,
" max_to_do will be lowered to " PTR_FORMAT " from " PTR_FORMAT,
lnc_card, limit_card, addr_for(lnc_card), max_to_do);)
limit_card = lnc_card;
max_to_do = addr_for(limit_card);
assert(limit_card != NULL && max_to_do != NULL, "Error");
@ -410,9 +360,6 @@ process_chunk_boundaries(Space* sp,
assert(max_to_do != NULL, "OOPS 2!");
} else {
max_to_do = used.end();
NOISY(tty->print_cr(" process_chunk_boundary: Last chunk of this space;\n"
" max_to_do left at " PTR_FORMAT,
max_to_do);)
}
assert(max_to_do != NULL, "OOPS 3!");
// Now we can set the closure we're using so it doesn't to beyond
@ -421,11 +368,8 @@ process_chunk_boundaries(Space* sp,
#ifndef PRODUCT
dcto_cl->set_last_bottom(max_to_do);
#endif
NOISY(tty->print_cr("===========================================================================\n");)
}
#undef NOISY
void
CardTableModRefBSForCTRS::
get_LNC_array_for_space(Space* sp,

View File

@ -901,7 +901,7 @@ void ParNewGeneration::collect(bool full,
size_policy->minor_collection_begin();
}
GCTraceTime(Trace, gc) t1("ParNew", NULL, gch->gc_cause());
GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause());
age_table()->clear();
to()->clear(SpaceDecorator::Mangle);

View File

@ -82,18 +82,19 @@ inline void ParScanClosure::do_oop_work(T* p,
if ((HeapWord*)obj < _boundary) {
#ifndef PRODUCT
if (_g->to()->is_in_reserved(obj)) {
tty->print_cr("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
LogHandle(gc) log;
log.error("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
GenCollectedHeap* gch = GenCollectedHeap::heap();
Space* sp = gch->space_containing(p);
oop obj = oop(sp->block_start(p));
assert((HeapWord*)obj < (HeapWord*)p, "Error");
tty->print_cr("Object: " PTR_FORMAT, p2i((void *)obj));
tty->print_cr("-------");
obj->print();
tty->print_cr("-----");
tty->print_cr("Heap:");
tty->print_cr("-----");
gch->print();
log.error("Object: " PTR_FORMAT, p2i((void *)obj));
log.error("-------");
obj->print_on(log.error_stream());
log.error("-----");
log.error("Heap:");
log.error("-----");
gch->print_on(log.error_stream());
ShouldNotReachHere();
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,27 +38,17 @@
// Methods in abstract class VM_CMS_Operation
//////////////////////////////////////////////////////////
void VM_CMS_Operation::acquire_pending_list_lock() {
// The caller may block while communicating
// with the SLT thread in order to acquire/release the PLL.
SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt();
if (slt != NULL) {
slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
} else {
SurrogateLockerThread::report_missing_slt();
}
_pending_list_locker.lock();
}
void VM_CMS_Operation::release_and_notify_pending_list_lock() {
// The caller may block while communicating
// with the SLT thread in order to acquire/release the PLL.
ConcurrentMarkSweepThread::slt()->
manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
_pending_list_locker.unlock();
}
void VM_CMS_Operation::verify_before_gc() {
if (VerifyBeforeGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime(Info, gc, verify) tm("Verify Before", _collector->_gc_timer_cm);
GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@ -70,7 +60,7 @@ void VM_CMS_Operation::verify_before_gc() {
void VM_CMS_Operation::verify_after_gc() {
if (VerifyAfterGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime(Info, gc, verify) tm("Verify After", _collector->_gc_timer_cm);
GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@ -95,7 +85,7 @@ bool VM_CMS_Operation::doit_prologue() {
assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
"Possible deadlock");
if (needs_pll()) {
if (needs_pending_list_lock()) {
acquire_pending_list_lock();
}
// Get the Heap_lock after the pending_list_lock.
@ -103,7 +93,7 @@ bool VM_CMS_Operation::doit_prologue() {
if (lost_race()) {
assert(_prologue_succeeded == false, "Initialized in c'tor");
Heap_lock->unlock();
if (needs_pll()) {
if (needs_pending_list_lock()) {
release_and_notify_pending_list_lock();
}
} else {
@ -120,7 +110,7 @@ void VM_CMS_Operation::doit_epilogue() {
// Release the Heap_lock first.
Heap_lock->unlock();
if (needs_pll()) {
if (needs_pending_list_lock()) {
release_and_notify_pending_list_lock();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/referencePendingListLocker.hpp"
#include "gc/shared/vmGCOperations.hpp"
#include "runtime/vm_operations.hpp"
@ -51,6 +52,9 @@
class CMSCollector;
class VM_CMS_Operation: public VM_Operation {
private:
ReferencePendingListLocker _pending_list_locker;
protected:
CMSCollector* _collector; // associated collector
bool _prologue_succeeded; // whether doit_prologue succeeded
@ -73,7 +77,7 @@ class VM_CMS_Operation: public VM_Operation {
virtual const CMSCollector::CollectorState legal_state() const = 0;
// Whether the pending list lock needs to be held
virtual const bool needs_pll() const = 0;
virtual const bool needs_pending_list_lock() const = 0;
// Execute operations in the context of the caller,
// prior to execution of the vm operation itself.
@ -105,7 +109,7 @@ class VM_CMS_Initial_Mark: public VM_CMS_Operation {
return CMSCollector::InitialMarking;
}
virtual const bool needs_pll() const {
virtual const bool needs_pending_list_lock() const {
return false;
}
};
@ -122,7 +126,7 @@ class VM_CMS_Final_Remark: public VM_CMS_Operation {
return CMSCollector::FinalMarking;
}
virtual const bool needs_pll() const {
virtual const bool needs_pending_list_lock() const {
return true;
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,14 +51,12 @@
declare_type(ConcurrentMarkSweepGeneration,CardGeneration) \
declare_type(CompactibleFreeListSpace, CompactibleSpace) \
declare_type(ConcurrentMarkSweepThread, NamedThread) \
declare_type(SurrogateLockerThread, JavaThread) \
declare_toplevel_type(CMSCollector) \
declare_toplevel_type(CMSBitMap) \
declare_toplevel_type(FreeChunk) \
declare_toplevel_type(Metablock) \
declare_toplevel_type(ConcurrentMarkSweepThread*) \
declare_toplevel_type(ConcurrentMarkSweepGeneration*) \
declare_toplevel_type(SurrogateLockerThread*) \
declare_toplevel_type(CompactibleFreeListSpace*) \
declare_toplevel_type(CMSCollector*) \
declare_toplevel_type(AFLBinaryTreeDictionary) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -145,7 +145,6 @@ void CollectionSetChooser::sort_regions() {
verify();
}
void CollectionSetChooser::add_region(HeapRegion* hr) {
assert(!hr->is_pinned(),
"Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index());
@ -210,4 +209,67 @@ void CollectionSetChooser::clear() {
_front = 0;
_end = 0;
_remaining_reclaimable_bytes = 0;
}
class ParKnownGarbageHRClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
CSetChooserParUpdater _cset_updater;
public:
ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
uint chunk_size) :
_g1h(G1CollectedHeap::heap()),
_cset_updater(hrSorted, true /* parallel */, chunk_size) { }
bool doHeapRegion(HeapRegion* r) {
// Do we have any marking information for this region?
if (r->is_marked()) {
// We will skip any region that's currently used as an old GC
// alloc region (we should not consider those for collection
// before we fill them up).
if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
_cset_updater.add_region(r);
}
}
return false;
}
};
class ParKnownGarbageTask: public AbstractGangTask {
CollectionSetChooser* _hrSorted;
uint _chunk_size;
G1CollectedHeap* _g1;
HeapRegionClaimer _hrclaimer;
public:
ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
AbstractGangTask("ParKnownGarbageTask"),
_hrSorted(hrSorted), _chunk_size(chunk_size),
_g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
void work(uint worker_id) {
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
_g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
}
};
uint CollectionSetChooser::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const {
assert(n_workers > 0, "Active gc workers should be greater than 0");
const uint overpartition_factor = 4;
const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
}
void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) {
clear();
uint n_workers = workers->active_workers();
uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
ParKnownGarbageTask par_known_garbage_task(this, chunk_size, n_workers);
workers->run_task(&par_known_garbage_task);
sort_regions();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,6 +65,9 @@ class CollectionSetChooser: public CHeapObj<mtGC> {
// The sum of reclaimable bytes over all the regions in the CSet chooser.
size_t _remaining_reclaimable_bytes;
// Calculate and return chunk size (in number of regions) for parallel
// addition of regions
uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const;
public:
// Return the current candidate region to be considered for
@ -132,6 +135,8 @@ public:
void clear();
void rebuild(WorkGang* workers, uint n_regions);
// Return the number of candidate regions that remain to be collected.
uint remaining_regions() { return _end - _front; }

View File

@ -27,11 +27,13 @@
#include "gc/g1/concurrentG1RefineThread.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1HotCardCache.hpp"
#include "gc/g1/g1Predictions.hpp"
#include "runtime/java.hpp"
ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h, const G1Predictions* predictor) :
_threads(NULL),
_sample_thread(NULL),
_predictor_sigma(predictor->sigma()),
_hot_card_cache(g1h)
{
// Ergonomically select initial concurrent refinement parameters
@ -49,10 +51,12 @@ ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
}
set_red_zone(MAX2(G1ConcRefinementRedZone, yellow_zone()));
}
ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure, jint* ecode) {
ConcurrentG1Refine* cg1r = new ConcurrentG1Refine(g1h);
G1CollectorPolicy* policy = g1h->g1_policy();
ConcurrentG1Refine* cg1r = new ConcurrentG1Refine(g1h, &policy->predictor());
if (cg1r == NULL) {
*ecode = JNI_ENOMEM;
vm_shutdown_during_initialization("Could not create ConcurrentG1Refine");
@ -155,3 +159,43 @@ void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
_sample_thread->print_on(st);
st->cr();
}
void ConcurrentG1Refine::adjust(double update_rs_time,
double update_rs_processed_buffers,
double goal_ms) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
if (G1UseAdaptiveConcRefinement) {
const int k_gy = 3, k_gr = 6;
const double inc_k = 1.1, dec_k = 0.9;
size_t g = green_zone();
if (update_rs_time > goal_ms) {
g = (size_t)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
} else {
if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
g = (size_t)MAX2(g * inc_k, g + 1.0);
}
}
// Change the refinement threads params
set_green_zone(g);
set_yellow_zone(g * k_gy);
set_red_zone(g * k_gr);
reinitialize_threads();
size_t processing_threshold_delta = MAX2<size_t>(green_zone() * _predictor_sigma, 1);
size_t processing_threshold = MIN2(green_zone() + processing_threshold_delta,
yellow_zone());
// Change the barrier params
dcqs.set_process_completed_threshold((int)processing_threshold);
dcqs.set_max_completed_queue((int)red_zone());
}
size_t curr_queue_size = dcqs.completed_buffers_num();
if (curr_queue_size >= yellow_zone()) {
dcqs.set_completed_queue_padding(curr_queue_size);
} else {
dcqs.set_completed_queue_padding(0);
}
dcqs.notify_if_necessary();
}

View File

@ -35,6 +35,7 @@
class ConcurrentG1RefineThread;
class G1CollectedHeap;
class G1HotCardCache;
class G1Predictions;
class G1RegionToSpaceMapper;
class G1RemSet;
class DirtyCardQueue;
@ -67,13 +68,15 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
size_t _thread_threshold_step;
double _predictor_sigma;
// We delay the refinement of 'hot' cards using the hot card cache.
G1HotCardCache _hot_card_cache;
// Reset the threshold step value based of the current zone boundaries.
void reset_threshold_step();
ConcurrentG1Refine(G1CollectedHeap* g1h);
ConcurrentG1Refine(G1CollectedHeap* g1h, const G1Predictions* predictions);
public:
~ConcurrentG1Refine();
@ -85,6 +88,8 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
void init(G1RegionToSpaceMapper* card_counts_storage);
void stop();
void adjust(double update_rs_time, double update_rs_processed_buffers, double goal_ms);
void reinitialize_threads();
// Iterate over all concurrent refinement threads

View File

@ -78,7 +78,7 @@ void ConcurrentG1RefineThread::initialize() {
void ConcurrentG1RefineThread::wait_for_completed_buffers() {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
while (!_should_terminate && !is_active()) {
while (!should_terminate() && !is_active()) {
_monitor->wait(Mutex::_no_safepoint_check_flag);
}
}
@ -109,22 +109,13 @@ void ConcurrentG1RefineThread::deactivate() {
}
}
void ConcurrentG1RefineThread::run() {
initialize_in_thread();
wait_for_universe_init();
run_service();
terminate();
}
void ConcurrentG1RefineThread::run_service() {
_vtime_start = os::elapsedVTime();
while (!_should_terminate) {
while (!should_terminate()) {
// Wait for work
wait_for_completed_buffers();
if (_should_terminate) {
if (should_terminate()) {
break;
}
@ -168,23 +159,6 @@ void ConcurrentG1RefineThread::run_service() {
log_debug(gc, refine)("Stopping %d", _worker_id);
}
void ConcurrentG1RefineThread::stop() {
// it is ok to take late safepoints here, if needed
{
MutexLockerEx mu(Terminator_lock);
_should_terminate = true;
}
stop_service();
{
MutexLockerEx mu(Terminator_lock);
while (!_has_terminated) {
Terminator_lock->wait();
}
}
}
void ConcurrentG1RefineThread::stop_service() {
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
_monitor->notify();

View File

@ -72,7 +72,6 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
void stop_service();
public:
virtual void run();
// Constructor
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
CardTableEntryClosure* refine_closure,
@ -84,9 +83,6 @@ public:
double vtime_accum() { return _vtime_accum; }
ConcurrentG1Refine* cg1r() { return _cg1r; }
// shutdown
void stop();
};
#endif // SHARE_VM_GC_G1_CONCURRENTG1REFINETHREAD_HPP

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1MMUTracker.hpp"
@ -41,9 +42,6 @@
// The CM thread is created when the G1 garbage collector is used
SurrogateLockerThread*
ConcurrentMarkThread::_slt = NULL;
ConcurrentMarkThread::ConcurrentMarkThread(G1ConcurrentMark* cm) :
ConcurrentGCThread(),
_cm(cm),
@ -82,60 +80,59 @@ public:
// Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
const G1Analytics* analytics = g1_policy->analytics();
if (g1_policy->adaptive_young_list_length()) {
double now = os::elapsedTime();
double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
: g1_policy->predict_cleanup_time_ms();
double prediction_ms = remark ? analytics->predict_remark_time_ms()
: analytics->predict_cleanup_time_ms();
G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
os::sleep(this, sleep_time_ms, false);
}
}
class GCConcPhaseTimer : StackObj {
class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
G1ConcurrentMark* _cm;
public:
GCConcPhaseTimer(G1ConcurrentMark* cm, const char* title) : _cm(cm) {
_cm->register_concurrent_phase_start(title);
G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
GCTraceConcTimeImpl<LogLevel::Info, LogTag::_gc, LogTag::_marking>(title),
_cm(cm) {
_cm->gc_timer_cm()->register_gc_concurrent_start(title);
}
~GCConcPhaseTimer() {
_cm->register_concurrent_phase_end();
~G1ConcPhaseTimer() {
_cm->gc_timer_cm()->register_gc_concurrent_end();
}
};
void ConcurrentMarkThread::run() {
initialize_in_thread();
wait_for_universe_init();
run_service();
terminate();
}
void ConcurrentMarkThread::run_service() {
_vtime_start = os::elapsedVTime();
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1CollectorPolicy* g1_policy = g1h->g1_policy();
while (!_should_terminate) {
while (!should_terminate()) {
// wait until started is set.
sleepBeforeNextCycle();
if (_should_terminate) {
_cm->root_regions()->cancel_scan();
if (should_terminate()) {
break;
}
GCIdMark gc_id_mark;
cm()->concurrent_cycle_start();
assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
{
ResourceMark rm;
HandleMark hm;
double cycle_start = os::elapsedVTime();
{
GCConcPhaseTimer(_cm, "Concurrent Clearing of Claimed Marks");
G1ConcPhaseTimer t(_cm, "Concurrent Clear Claimed Marks");
ClassLoaderDataGraph::clear_claimed_marks();
}
@ -148,22 +145,22 @@ void ConcurrentMarkThread::run_service() {
// correctness issue.
{
GCConcPhaseTimer(_cm, "Concurrent Root Region Scanning");
_cm->scanRootRegions();
G1ConcPhaseTimer t(_cm, "Concurrent Scan Root Regions");
_cm->scan_root_regions();
}
// It would be nice to use the GCTraceConcTime class here but
// the "end" logging is inside the loop and not at the end of
// a scope. Mimicking the same log output as GCTraceConcTime instead.
jlong mark_start = os::elapsed_counter();
log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
log_info(gc, marking)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start));
int iter = 0;
do {
iter++;
if (!cm()->has_aborted()) {
GCConcPhaseTimer(_cm, "Concurrent Mark");
_cm->markFromRoots();
G1ConcPhaseTimer t(_cm, "Concurrent Mark From Roots");
_cm->mark_from_roots();
}
double mark_end_time = os::elapsedVTime();
@ -171,7 +168,7 @@ void ConcurrentMarkThread::run_service() {
_vtime_mark_accum += (mark_end_time - cycle_start);
if (!cm()->has_aborted()) {
delay_to_keep_mmu(g1_policy, true /* remark */);
log_info(gc)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
log_info(gc, marking)("Concurrent Mark (%.3fs, %.3fs) %.3fms",
TimeHelper::counter_to_seconds(mark_start),
TimeHelper::counter_to_seconds(mark_end),
TimeHelper::counter_to_millis(mark_end - mark_start));
@ -181,8 +178,8 @@ void ConcurrentMarkThread::run_service() {
VMThread::execute(&op);
}
if (cm()->restart_for_overflow()) {
log_debug(gc)("Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
log_info(gc)("Concurrent Mark restart for overflow");
log_debug(gc, marking)("Restarting Concurrent Marking because of Mark Stack Overflow in Remark (Iteration #%d).", iter);
log_info(gc, marking)("Concurrent Mark Restart due to overflow");
}
} while (cm()->restart_for_overflow());
@ -216,11 +213,9 @@ void ConcurrentMarkThread::run_service() {
// place, it would wait for us to process the regions
// reclaimed by cleanup.
GCTraceConcTime(Info, gc) tt("Concurrent Cleanup");
GCConcPhaseTimer(_cm, "Concurrent Cleanup");
G1ConcPhaseTimer t(_cm, "Concurrent Complete Cleanup");
// Now do the concurrent cleanup operation.
_cm->completeCleanup();
_cm->complete_cleanup();
// Notify anyone who's waiting that there are no more free
// regions coming. We have to do this before we join the STS
@ -265,7 +260,7 @@ void ConcurrentMarkThread::run_service() {
if (!cm()->has_aborted()) {
g1_policy->record_concurrent_mark_cleanup_completed();
} else {
log_info(gc)("Concurrent Mark abort");
log_info(gc, marking)("Concurrent Mark Abort");
}
}
@ -274,8 +269,8 @@ void ConcurrentMarkThread::run_service() {
// We may have aborted just before the remark. Do not bother clearing the
// bitmap then, as it has been done during mark abort.
if (!cm()->has_aborted()) {
GCConcPhaseTimer(_cm, "Concurrent Bitmap Clearing");
_cm->clearNextBitmap();
G1ConcPhaseTimer t(_cm, "Concurrent Cleanup for Next Mark");
_cm->cleanup_for_next_mark();
} else {
assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
}
@ -288,25 +283,11 @@ void ConcurrentMarkThread::run_service() {
{
SuspendibleThreadSetJoiner sts_join;
g1h->increment_old_marking_cycles_completed(true /* concurrent */);
g1h->register_concurrent_cycle_end();
}
}
}
void ConcurrentMarkThread::stop() {
{
MutexLockerEx ml(Terminator_lock);
_should_terminate = true;
}
stop_service();
{
MutexLockerEx ml(Terminator_lock);
while (!_has_terminated) {
Terminator_lock->wait();
cm()->concurrent_cycle_end();
}
}
_cm->root_regions()->cancel_scan();
}
void ConcurrentMarkThread::stop_service() {
@ -320,7 +301,7 @@ void ConcurrentMarkThread::sleepBeforeNextCycle() {
assert(!in_progress(), "should have been cleared");
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
while (!started() && !_should_terminate) {
while (!started() && !should_terminate()) {
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
}
@ -328,16 +309,3 @@ void ConcurrentMarkThread::sleepBeforeNextCycle() {
set_in_progress();
}
}
// Note: As is the case with CMS - this method, although exported
// by the ConcurrentMarkThread, which is a non-JavaThread, can only
// be called by a JavaThread. Currently this is done at vm creation
// time (post-vm-init) by the main/Primordial (Java)Thread.
// XXX Consider changing this in the future to allow the CM thread
// itself to create this thread?
void ConcurrentMarkThread::makeSurrogateLockerThread(TRAPS) {
assert(UseG1GC, "SLT thread needed only for concurrent GC");
assert(THREAD->is_Java_thread(), "must be a Java thread");
assert(_slt == NULL, "SLT already created");
_slt = SurrogateLockerThread::make(THREAD);
}

View File

@ -38,13 +38,8 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
double _vtime_start; // Initial virtual time.
double _vtime_accum; // Accumulated virtual time.
double _vtime_mark_accum;
public:
virtual void run();
private:
G1ConcurrentMark* _cm;
enum State {
@ -61,15 +56,10 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
void run_service();
void stop_service();
static SurrogateLockerThread* _slt;
public:
// Constructor
ConcurrentMarkThread(G1ConcurrentMark* cm);
static void makeSurrogateLockerThread(TRAPS);
static SurrogateLockerThread* slt() { return _slt; }
// Total virtual time so far for this thread and concurrent marking tasks.
double vtime_accum();
// Marking virtual time so far this thread and concurrent marking tasks.
@ -93,9 +83,6 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
// as the CM thread might take some time to wake up before noticing
// that started() is set and set in_progress().
bool during_cycle() { return !idle(); }
// shutdown
void stop();
};
#endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP

View File

@ -110,44 +110,6 @@ DirtyCardQueue::~DirtyCardQueue() {
}
}
bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
bool consume,
uint worker_i) {
bool res = true;
if (_buf != NULL) {
res = apply_closure_to_buffer(cl, _buf, _index, _sz,
consume,
worker_i);
if (res && consume) {
_index = _sz;
}
}
return res;
}
bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
void** buf,
size_t index, size_t sz,
bool consume,
uint worker_i) {
if (cl == NULL) return true;
size_t limit = byte_index_to_index(sz);
for (size_t i = byte_index_to_index(index); i < limit; ++i) {
jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
if (card_ptr != NULL) {
// Set the entry to null, so we don't do it again (via the test
// above) if we reconsider this buffer.
if (consume) {
buf[i] = NULL;
}
if (!cl->do_card_ptr(card_ptr, worker_i)) {
return false;
}
}
}
return true;
}
DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
PtrQueueSet(notify_when_complete),
_mut_process_closure(NULL),
@ -188,14 +150,39 @@ void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
t->dirty_card_queue().handle_zero_index();
}
bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl,
BufferNode* node,
bool consume,
uint worker_i) {
if (cl == NULL) return true;
void** buf = BufferNode::make_buffer_from_node(node);
size_t limit = DirtyCardQueue::byte_index_to_index(buffer_size());
size_t start = DirtyCardQueue::byte_index_to_index(node->index());
for (size_t i = start; i < limit; ++i) {
jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
assert(card_ptr != NULL, "invariant");
if (!cl->do_card_ptr(card_ptr, worker_i)) {
if (consume) {
size_t new_index = DirtyCardQueue::index_to_byte_index(i + 1);
assert(new_index <= buffer_size(), "invariant");
node->set_index(new_index);
}
return false;
}
}
if (consume) {
node->set_index(buffer_size());
}
return true;
}
bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
guarantee(_free_ids != NULL, "must be");
// claim a par id
uint worker_i = _free_ids->claim_par_id();
bool b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure, buf, 0,
_sz, true, worker_i);
bool b = apply_closure_to_buffer(_mut_process_closure, node, true, worker_i);
if (b) {
Atomic::inc(&_processed_buffers_mut);
}
@ -239,49 +226,30 @@ bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure*
if (nd == NULL) {
return false;
} else {
void** buf = BufferNode::make_buffer_from_node(nd);
size_t index = nd->index();
if (DirtyCardQueue::apply_closure_to_buffer(cl,
buf, index, _sz,
true, worker_i)) {
if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
// Done with fully processed buffer.
deallocate_buffer(buf);
deallocate_buffer(nd);
Atomic::inc(&_processed_buffers_rs_thread);
return true;
} else {
// Return partially processed buffer to the queue.
enqueue_complete_buffer(buf, index);
enqueue_complete_buffer(nd);
return false;
}
}
}
void DirtyCardQueueSet::apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
BufferNode* nd = _completed_buffers_head;
while (nd != NULL) {
bool b =
DirtyCardQueue::apply_closure_to_buffer(cl,
BufferNode::make_buffer_from_node(nd),
0, _sz, false);
guarantee(b, "Should not stop early.");
nd = nd->next();
}
}
void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
BufferNode* nd = _cur_par_buffer_node;
while (nd != NULL) {
BufferNode* next = (BufferNode*)nd->next();
BufferNode* actual = (BufferNode*)Atomic::cmpxchg_ptr((void*)next, (volatile void*)&_cur_par_buffer_node, (void*)nd);
BufferNode* next = nd->next();
void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd);
if (actual == nd) {
bool b =
DirtyCardQueue::apply_closure_to_buffer(cl,
BufferNode::make_buffer_from_node(actual),
0, _sz, false);
bool b = apply_closure_to_buffer(cl, nd, false);
guarantee(b, "Should not stop early.");
nd = next;
} else {
nd = actual;
nd = static_cast<BufferNode*>(actual);
}
}
}
@ -304,7 +272,7 @@ void DirtyCardQueueSet::clear() {
while (buffers_to_delete != NULL) {
BufferNode* nd = buffers_to_delete;
buffers_to_delete = nd->next();
deallocate_buffer(BufferNode::make_buffer_from_node(nd));
deallocate_buffer(nd);
}
}
@ -320,6 +288,13 @@ void DirtyCardQueueSet::abandon_logs() {
shared_dirty_card_queue()->reset();
}
void DirtyCardQueueSet::concatenate_log(DirtyCardQueue& dcq) {
if (!dcq.is_empty()) {
enqueue_complete_buffer(
BufferNode::make_node_from_buffer(dcq.get_buf(), dcq.get_index()));
dcq.reinitialize();
}
}
void DirtyCardQueueSet::concatenate_logs() {
// Iterate over all the threads, if we find a partial log add it to
@ -329,23 +304,9 @@ void DirtyCardQueueSet::concatenate_logs() {
_max_completed_queue = max_jint;
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
for (JavaThread* t = Threads::first(); t; t = t->next()) {
DirtyCardQueue& dcq = t->dirty_card_queue();
if (dcq.size() != 0) {
void** buf = dcq.get_buf();
// We must NULL out the unused entries, then enqueue.
size_t limit = dcq.byte_index_to_index(dcq.get_index());
for (size_t i = 0; i < limit; ++i) {
buf[i] = NULL;
}
enqueue_complete_buffer(dcq.get_buf(), dcq.get_index());
dcq.reinitialize();
}
}
if (_shared_dirty_card_queue.size() != 0) {
enqueue_complete_buffer(_shared_dirty_card_queue.get_buf(),
_shared_dirty_card_queue.get_index());
_shared_dirty_card_queue.reinitialize();
concatenate_log(t->dirty_card_queue());
}
concatenate_log(_shared_dirty_card_queue);
// Restore the completed buffer queue limit.
_max_completed_queue = save_max_completed_queue;
}

View File

@ -37,7 +37,7 @@ class CardTableEntryClosure: public CHeapObj<mtGC> {
public:
// Process the card whose card table entry is "card_ptr". If returns
// "false", terminate the iteration early.
virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i = 0) = 0;
virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i) = 0;
};
// A ptrQueue whose elements are "oops", pointers to object heads.
@ -52,23 +52,6 @@ public:
// Process queue entries and release resources.
void flush() { flush_impl(); }
// Apply the closure to all elements, and reset the index to make the
// buffer empty. If a closure application returns "false", return
// "false" immediately, halting the iteration. If "consume" is true,
// deletes processed entries from logs.
bool apply_closure(CardTableEntryClosure* cl,
bool consume = true,
uint worker_i = 0);
// Apply the closure to all elements of "buf", down to "index"
// (inclusive.) If returns "false", then a closure application returned
// "false", and we return immediately. If "consume" is true, entries are
// set to NULL as they are processed, so they will not be processed again
// later.
static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
void** buf, size_t index, size_t sz,
bool consume = true,
uint worker_i = 0);
void **get_buf() { return _buf;}
size_t get_index() { return _index;}
void reinitialize() { _buf = 0; _sz = 0; _index = 0;}
@ -94,8 +77,18 @@ class DirtyCardQueueSet: public PtrQueueSet {
DirtyCardQueue _shared_dirty_card_queue;
// Override.
bool mut_process_buffer(void** buf);
// Apply the closure to the elements of "node" from it's index to
// buffer_size. If all closure applications return true, then
// returns true. Stops processing after the first closure
// application that returns false, and returns false from this
// function. If "consume" is true, the node's index is updated to
// follow the last processed element.
bool apply_closure_to_buffer(CardTableEntryClosure* cl,
BufferNode* node,
bool consume,
uint worker_i = 0);
bool mut_process_buffer(BufferNode* node);
// Protected by the _cbl_mon.
FreeIdSet* _free_ids;
@ -107,6 +100,9 @@ class DirtyCardQueueSet: public PtrQueueSet {
// Current buffer node used for parallel iteration.
BufferNode* volatile _cur_par_buffer_node;
void concatenate_log(DirtyCardQueue& dcq);
public:
DirtyCardQueueSet(bool notify_when_complete = true);
@ -126,12 +122,13 @@ public:
static void handle_zero_index_for_thread(JavaThread* t);
// If there exists some completed buffer, pop it, then apply the
// specified closure to all its elements, nulling out those elements
// processed. If all elements are processed, returns "true". If no
// completed buffers exist, returns false. If a completed buffer exists,
// but is only partially completed before a "yield" happens, the
// partially completed buffer (with its processed elements set to NULL)
// is returned to the completed buffer set, and this call returns false.
// specified closure to its active elements. If all active elements
// are processed, returns "true". If no completed buffers exist,
// returns false. If a completed buffer exists, but is only
// partially completed before a "yield" happens, the partially
// completed buffer (with its index updated to exclude the processed
// elements) is returned to the completed buffer set, and this call
// returns false.
bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
uint worker_i,
size_t stop_at,
@ -139,13 +136,10 @@ public:
BufferNode* get_completed_buffer(size_t stop_at);
// Applies the current closure to all completed buffers,
// non-consumptively.
void apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; }
// Applies the current closure to all completed buffers, non-consumptively.
// Parallel version.
// Can be used in parallel, all callers using the iteration state initialized
// by reset_for_par_iteration.
void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl);
DirtyCardQueue* shared_dirty_card_queue() {

View File

@ -26,6 +26,7 @@
#include "gc/g1/g1AllocRegion.inline.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/orderAccess.inline.hpp"
G1CollectedHeap* G1AllocRegion::_g1h = NULL;
@ -194,44 +195,53 @@ HeapRegion* G1AllocRegion::release() {
return (alloc_region == _dummy_region) ? NULL : alloc_region;
}
#if G1_ALLOC_REGION_TRACING
#ifndef PRODUCT
void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_word_size, size_t actual_word_size, HeapWord* result) {
// All the calls to trace that set either just the size or the size
// and the result are considered part of level 2 tracing and are
// skipped during level 1 tracing.
if ((actual_word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) {
const size_t buffer_length = 128;
char hr_buffer[buffer_length];
char rest_buffer[buffer_length];
// and the result are considered part of detailed tracing and are
// skipped during other tracing.
HeapRegion* alloc_region = _alloc_region;
if (alloc_region == NULL) {
jio_snprintf(hr_buffer, buffer_length, "NULL");
} else if (alloc_region == _dummy_region) {
jio_snprintf(hr_buffer, buffer_length, "DUMMY");
} else {
jio_snprintf(hr_buffer, buffer_length,
HR_FORMAT, HR_FORMAT_PARAMS(alloc_region));
LogHandle(gc, alloc, region) log;
if (!log.is_debug()) {
return;
}
if (G1_ALLOC_REGION_TRACING > 1) {
bool detailed_info = log.is_trace();
if ((actual_word_size == 0 && result == NULL) || detailed_info) {
ResourceMark rm;
outputStream* out;
if (detailed_info) {
out = log.trace_stream();
} else {
out = log.debug_stream();
}
out->print("%s: %u ", _name, _count);
if (_alloc_region == NULL) {
out->print("NULL");
} else if (_alloc_region == _dummy_region) {
out->print("DUMMY");
} else {
out->print(HR_FORMAT, HR_FORMAT_PARAMS(_alloc_region));
}
out->print(" : %s", str);
if (detailed_info) {
if (result != NULL) {
jio_snprintf(rest_buffer, buffer_length, "min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT,
min_word_size, desired_word_size, actual_word_size, result);
out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT,
min_word_size, desired_word_size, actual_word_size, p2i(result));
} else if (min_word_size != 0) {
jio_snprintf(rest_buffer, buffer_length, "min " SIZE_FORMAT " desired " SIZE_FORMAT, min_word_size, desired_word_size);
} else {
jio_snprintf(rest_buffer, buffer_length, "");
}
} else {
jio_snprintf(rest_buffer, buffer_length, "");
}
tty->print_cr("[%s] %u %s : %s %s",
_name, _count, hr_buffer, str, rest_buffer);
out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT, min_word_size, desired_word_size);
}
}
#endif // G1_ALLOC_REGION_TRACING
out->cr();
}
}
#endif // PRODUCT
G1AllocRegion::G1AllocRegion(const char* name,
bool bot_updates)
@ -253,7 +263,7 @@ void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size,
bool force) {
assert(!force, "not supported for GC alloc regions");
return _g1h->new_gc_alloc_region(word_size, count(), _purpose);
return _g1h->new_gc_alloc_region(word_size, _purpose);
}
void G1GCAllocRegion::retire_region(HeapRegion* alloc_region,

View File

@ -31,9 +31,6 @@
class G1CollectedHeap;
// 0 -> no tracing, 1 -> basic tracing, 2 -> basic + allocation tracing
#define G1_ALLOC_REGION_TRACING 0
// A class that holds a region that is active in satisfying allocation
// requests, potentially issued in parallel. When the active region is
// full it will be retired and replaced with a new one. The
@ -213,19 +210,11 @@ public:
// is returned after it's been retired.
virtual HeapRegion* release();
#if G1_ALLOC_REGION_TRACING
void trace(const char* str,
size_t min_word_size = 0,
size_t desired_word_size = 0,
size_t actual_word_size = 0,
HeapWord* result = NULL);
#else // G1_ALLOC_REGION_TRACING
void trace(const char* str,
size_t min_word_size = 0,
size_t desired_word_size = 0,
size_t actual_word_size = 0,
HeapWord* result = NULL) { }
#endif // G1_ALLOC_REGION_TRACING
HeapWord* result = NULL) PRODUCT_RETURN;
};
class MutatorAllocRegion : public G1AllocRegion {

View File

@ -0,0 +1,329 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "gc/g1/g1Predictions.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/numberSeq.hpp"
// Different defaults for different number of GC threads
// They were chosen by running GCOld and SPECjbb on debris with different
// numbers of GC threads and choosing them based on the results
// all the same
static double rs_length_diff_defaults[] = {
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
};
static double cost_per_card_ms_defaults[] = {
0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
};
// all the same
static double young_cards_per_entry_ratio_defaults[] = {
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
};
static double cost_per_entry_ms_defaults[] = {
0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
};
static double cost_per_byte_ms_defaults[] = {
0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
};
// these should be pretty consistent
static double constant_other_time_ms_defaults[] = {
5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
};
static double young_other_cost_per_region_ms_defaults[] = {
0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
};
static double non_young_other_cost_per_region_ms_defaults[] = {
1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
};
G1Analytics::G1Analytics(const G1Predictions* predictor) :
_predictor(predictor),
_recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
_concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
_concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
_alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_prev_collection_pause_end_ms(0.0),
_rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
_mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
_constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_non_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
_pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
_rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
_recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)) {
// Seed sequences with initial values.
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
int index = MIN2(ParallelGCThreads - 1, 7u);
_rs_length_diff_seq->add(rs_length_diff_defaults[index]);
_cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
_cost_scan_hcc_seq->add(0.0);
_young_cards_per_entry_ratio_seq->add(young_cards_per_entry_ratio_defaults[index]);
_cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
_cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
_constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
_young_other_cost_per_region_ms_seq->add(young_other_cost_per_region_ms_defaults[index]);
_non_young_other_cost_per_region_ms_seq->add(non_young_other_cost_per_region_ms_defaults[index]);
// start conservatively (around 50ms is about right)
_concurrent_mark_remark_times_ms->add(0.05);
_concurrent_mark_cleanup_times_ms->add(0.20);
}
double G1Analytics::get_new_prediction(TruncatedSeq const* seq) const {
return _predictor->get_new_prediction(seq);
}
size_t G1Analytics::get_new_size_prediction(TruncatedSeq const* seq) const {
return (size_t)get_new_prediction(seq);
}
int G1Analytics::num_alloc_rate_ms() const {
return _alloc_rate_ms_seq->num();
}
void G1Analytics::report_concurrent_mark_remark_times_ms(double ms) {
_concurrent_mark_remark_times_ms->add(ms);
}
void G1Analytics::report_alloc_rate_ms(double alloc_rate) {
_alloc_rate_ms_seq->add(alloc_rate);
}
void G1Analytics::compute_pause_time_ratio(double interval_ms, double pause_time_ms) {
_recent_avg_pause_time_ratio = _recent_gc_times_ms->sum() / interval_ms;
if (_recent_avg_pause_time_ratio < 0.0 ||
(_recent_avg_pause_time_ratio - 1.0 > 0.0)) {
// Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
// CR 6902692 by redoing the manner in which the ratio is incrementally computed.
if (_recent_avg_pause_time_ratio < 0.0) {
_recent_avg_pause_time_ratio = 0.0;
} else {
assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
_recent_avg_pause_time_ratio = 1.0;
}
}
// Compute the ratio of just this last pause time to the entire time range stored
// in the vectors. Comparing this pause to the entire range, rather than only the
// most recent interval, has the effect of smoothing over a possible transient 'burst'
// of more frequent pauses that don't really reflect a change in heap occupancy.
// This reduces the likelihood of a needless heap expansion being triggered.
_last_pause_time_ratio =
(pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms;
}
void G1Analytics::report_cost_per_card_ms(double cost_per_card_ms) {
_cost_per_card_ms_seq->add(cost_per_card_ms);
}
void G1Analytics::report_cost_scan_hcc(double cost_scan_hcc) {
_cost_scan_hcc_seq->add(cost_scan_hcc);
}
void G1Analytics::report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young) {
if (last_gc_was_young) {
_cost_per_entry_ms_seq->add(cost_per_entry_ms);
} else {
_mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
}
}
void G1Analytics::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young) {
if (last_gc_was_young) {
_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
} else {
_mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
}
}
void G1Analytics::report_rs_length_diff(double rs_length_diff) {
_rs_length_diff_seq->add(rs_length_diff);
}
void G1Analytics::report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window) {
if (in_marking_window) {
_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
} else {
_cost_per_byte_ms_seq->add(cost_per_byte_ms);
}
}
void G1Analytics::report_young_other_cost_per_region_ms(double other_cost_per_region_ms) {
_young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms);
}
void G1Analytics::report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms) {
_non_young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms);
}
void G1Analytics::report_constant_other_time_ms(double constant_other_time_ms) {
_constant_other_time_ms_seq->add(constant_other_time_ms);
}
void G1Analytics::report_pending_cards(double pending_cards) {
_pending_cards_seq->add(pending_cards);
}
void G1Analytics::report_rs_lengths(double rs_lengths) {
_rs_lengths_seq->add(rs_lengths);
}
size_t G1Analytics::predict_rs_length_diff() const {
return get_new_size_prediction(_rs_length_diff_seq);
}
double G1Analytics::predict_alloc_rate_ms() const {
return get_new_prediction(_alloc_rate_ms_seq);
}
double G1Analytics::predict_cost_per_card_ms() const {
return get_new_prediction(_cost_per_card_ms_seq);
}
double G1Analytics::predict_scan_hcc_ms() const {
return get_new_prediction(_cost_scan_hcc_seq);
}
double G1Analytics::predict_rs_update_time_ms(size_t pending_cards) const {
return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms();
}
double G1Analytics::predict_young_cards_per_entry_ratio() const {
return get_new_prediction(_young_cards_per_entry_ratio_seq);
}
double G1Analytics::predict_mixed_cards_per_entry_ratio() const {
if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
return predict_young_cards_per_entry_ratio();
} else {
return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
}
}
size_t G1Analytics::predict_card_num(size_t rs_length, bool gcs_are_young) const {
if (gcs_are_young) {
return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
} else {
return (size_t) (rs_length * predict_mixed_cards_per_entry_ratio());
}
}
double G1Analytics::predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const {
if (gcs_are_young) {
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
} else {
return predict_mixed_rs_scan_time_ms(card_num);
}
}
double G1Analytics::predict_mixed_rs_scan_time_ms(size_t card_num) const {
if (_mixed_cost_per_entry_ms_seq->num() < 3) {
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
} else {
return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq);
}
}
double G1Analytics::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq);
} else {
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq);
}
}
double G1Analytics::predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const {
if (during_concurrent_mark) {
return predict_object_copy_time_ms_during_cm(bytes_to_copy);
} else {
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq);
}
}
double G1Analytics::predict_constant_other_time_ms() const {
return get_new_prediction(_constant_other_time_ms_seq);
}
double G1Analytics::predict_young_other_time_ms(size_t young_num) const {
return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq);
}
double G1Analytics::predict_non_young_other_time_ms(size_t non_young_num) const {
return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq);
}
double G1Analytics::predict_remark_time_ms() const {
return get_new_prediction(_concurrent_mark_remark_times_ms);
}
double G1Analytics::predict_cleanup_time_ms() const {
return get_new_prediction(_concurrent_mark_cleanup_times_ms);
}
size_t G1Analytics::predict_rs_lengths() const {
return get_new_size_prediction(_rs_lengths_seq);
}
size_t G1Analytics::predict_pending_cards() const {
return get_new_size_prediction(_pending_cards_seq);
}
double G1Analytics::last_known_gc_end_time_sec() const {
return _recent_prev_end_times_for_all_gcs_sec->oldest();
}
void G1Analytics::update_recent_gc_times(double end_time_sec,
double pause_time_ms) {
_recent_gc_times_ms->add(pause_time_ms);
_recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
_prev_collection_pause_end_ms = end_time_sec * 1000.0;
}
void G1Analytics::report_concurrent_mark_cleanup_times_ms(double ms) {
_concurrent_mark_cleanup_times_ms->add(ms);
}

View File

@ -0,0 +1,159 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1MEASUREMENTS_HPP
#define SHARE_VM_GC_G1_G1MEASUREMENTS_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
class TruncatedSeq;
class G1Predictions;
class G1Analytics: public CHeapObj<mtGC> {
const static int TruncatedSeqLength = 10;
const static int NumPrevPausesForHeuristics = 10;
const G1Predictions* _predictor;
// These exclude marking times.
TruncatedSeq* _recent_gc_times_ms;
TruncatedSeq* _concurrent_mark_remark_times_ms;
TruncatedSeq* _concurrent_mark_cleanup_times_ms;
TruncatedSeq* _alloc_rate_ms_seq;
double _prev_collection_pause_end_ms;
TruncatedSeq* _rs_length_diff_seq;
TruncatedSeq* _cost_per_card_ms_seq;
TruncatedSeq* _cost_scan_hcc_seq;
TruncatedSeq* _young_cards_per_entry_ratio_seq;
TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
TruncatedSeq* _cost_per_entry_ms_seq;
TruncatedSeq* _mixed_cost_per_entry_ms_seq;
TruncatedSeq* _cost_per_byte_ms_seq;
TruncatedSeq* _constant_other_time_ms_seq;
TruncatedSeq* _young_other_cost_per_region_ms_seq;
TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
TruncatedSeq* _pending_cards_seq;
TruncatedSeq* _rs_lengths_seq;
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
// Statistics kept per GC stoppage, pause or full.
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
// The ratio of gc time to elapsed time, computed over recent pauses,
// and the ratio for just the last pause.
double _recent_avg_pause_time_ratio;
double _last_pause_time_ratio;
double get_new_prediction(TruncatedSeq const* seq) const;
size_t get_new_size_prediction(TruncatedSeq const* seq) const;
public:
G1Analytics(const G1Predictions* predictor);
double prev_collection_pause_end_ms() const {
return _prev_collection_pause_end_ms;
}
double recent_avg_pause_time_ratio() const {
return _recent_avg_pause_time_ratio;
}
double last_pause_time_ratio() const {
return _last_pause_time_ratio;
}
uint number_of_recorded_pause_times() const {
return NumPrevPausesForHeuristics;
}
void append_prev_collection_pause_end_ms(double ms) {
_prev_collection_pause_end_ms += ms;
}
void report_concurrent_mark_remark_times_ms(double ms);
void report_concurrent_mark_cleanup_times_ms(double ms);
void report_alloc_rate_ms(double alloc_rate);
void report_cost_per_card_ms(double cost_per_card_ms);
void report_cost_scan_hcc(double cost_scan_hcc);
void report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young);
void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young);
void report_rs_length_diff(double rs_length_diff);
void report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window);
void report_young_other_cost_per_region_ms(double other_cost_per_region_ms);
void report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms);
void report_constant_other_time_ms(double constant_other_time_ms);
void report_pending_cards(double pending_cards);
void report_rs_lengths(double rs_lengths);
size_t predict_rs_length_diff() const;
double predict_alloc_rate_ms() const;
int num_alloc_rate_ms() const;
double predict_cost_per_card_ms() const;
double predict_scan_hcc_ms() const;
double predict_rs_update_time_ms(size_t pending_cards) const;
double predict_young_cards_per_entry_ratio() const;
double predict_mixed_cards_per_entry_ratio() const;
size_t predict_card_num(size_t rs_length, bool gcs_are_young) const;
double predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const;
double predict_mixed_rs_scan_time_ms(size_t card_num) const;
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const;
double predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const;
double predict_constant_other_time_ms() const;
double predict_young_other_time_ms(size_t young_num) const;
double predict_non_young_other_time_ms(size_t non_young_num) const;
double predict_remark_time_ms() const;
double predict_cleanup_time_ms() const;
size_t predict_rs_lengths() const;
size_t predict_pending_cards() const;
// Add a new GC of the given duration and end time to the record.
void update_recent_gc_times(double end_time_sec, double elapsed_ms);
void compute_pause_time_ratio(double interval_ms, double pause_time_ms);
double last_known_gc_end_time_sec() const;
};
#endif // SHARE_VM_GC_G1_G1MEASUREMENTS_HPP

View File

@ -34,10 +34,12 @@
#include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
#include "gc/g1/g1HeapTransition.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1MarkSweep.hpp"
@ -566,7 +568,7 @@ G1CollectedHeap::mem_allocate(size_t word_size,
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
log_warning(gc)("G1CollectedHeap::mem_allocate retries %d times", try_count);
}
}
@ -675,7 +677,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
warning("G1CollectedHeap::attempt_allocation_slow() "
log_warning(gc)("G1CollectedHeap::attempt_allocation_slow() "
"retries %d times", try_count);
}
}
@ -1091,7 +1093,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
warning("G1CollectedHeap::attempt_allocation_humongous() "
log_warning(gc)("G1CollectedHeap::attempt_allocation_humongous() "
"retries %d times", try_count);
}
}
@ -1228,6 +1230,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
ResourceMark rm;
print_heap_before_gc();
print_heap_regions();
trace_heap_before_gc(gc_tracer);
size_t metadata_prev_used = MetaspaceAux::used_bytes();
@ -1302,9 +1305,9 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
// set between the last GC or pause and now. We need to clear the
// incremental collection set and then start rebuilding it afresh
// after this full GC.
abandon_collection_set(g1_policy()->inc_cset_head());
g1_policy()->clear_incremental_cset();
g1_policy()->stop_incremental_cset_building();
abandon_collection_set(collection_set()->inc_head());
collection_set()->clear_incremental();
collection_set()->stop_incremental_building();
tear_down_region_sets(false /* free_list_only */);
collector_state()->set_gcs_are_young(true);
@ -1421,13 +1424,13 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
// the full GC has compacted objects and updated TAMS but not updated
// the prev bitmap.
if (G1VerifyBitmaps) {
((G1CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
_cm->clear_prev_bitmap(workers());
}
_verifier->check_bitmaps("Full GC End");
// Start a new incremental collection set for the next pause
assert(g1_policy()->collection_set() == NULL, "must be");
g1_policy()->start_incremental_cset_building();
assert(collection_set()->head() == NULL, "must be");
collection_set()->start_incremental_building();
clear_cset_fast_test();
@ -1446,6 +1449,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
heap_transition.print();
print_heap_after_gc();
print_heap_regions();
trace_heap_after_gc(gc_tracer);
post_full_gc_dump(gc_timer);
@ -1741,6 +1745,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
CollectedHeap(),
_g1_policy(policy_),
_collection_set(this),
_dirty_card_queue_set(false),
_is_alive_closure_cm(this),
_is_alive_closure_stw(this),
@ -1765,15 +1770,12 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_expand_heap_after_alloc_failure(true),
_old_marking_cycles_started(0),
_old_marking_cycles_completed(0),
_heap_summary_sent(false),
_in_cset_fast_test(),
_dirty_cards_region_list(NULL),
_worker_cset_start_region(NULL),
_worker_cset_start_region_time_stamp(NULL),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
_workers = new WorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
@ -1782,6 +1784,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_verifier = new G1HeapVerifier(this);
_allocator = G1Allocator::create_allocator(this);
_heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
// Override the default _filler_array_max_size so that no humongous filler
@ -2314,52 +2319,6 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
FullGCCount_lock->notify_all();
}
void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
GCIdMarkAndRestore conc_gc_id_mark;
collector_state()->set_concurrent_cycle_started(true);
_gc_timer_cm->register_gc_start(start_time);
_gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
trace_heap_before_gc(_gc_tracer_cm);
_cmThread->set_gc_id(GCId::current());
}
void G1CollectedHeap::register_concurrent_cycle_end() {
if (collector_state()->concurrent_cycle_started()) {
GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure();
// ConcurrentGCTimer will be ended as well.
_cm->register_concurrent_gc_end_and_stop_timer();
} else {
_gc_timer_cm->register_gc_end();
}
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
// Clear state variables to prepare for the next concurrent cycle.
collector_state()->set_concurrent_cycle_started(false);
_heap_summary_sent = false;
}
}
void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
if (collector_state()->concurrent_cycle_started()) {
// This function can be called when:
// the cleanup pause is run
// the concurrent cycle is aborted before the cleanup pause.
// the concurrent cycle is aborted after the cleanup pause,
// but before the concurrent cycle end has been registered.
// Make sure that we only send the heap information once.
if (!_heap_summary_sent) {
GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
trace_heap_after_gc(_gc_tracer_cm);
_heap_summary_sent = true;
}
}
}
void G1CollectedHeap::collect(GCCause::Cause cause) {
assert_heap_not_locked();
@ -2545,8 +2504,8 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
// p threads
// Then thread t will start at region floor ((t * n) / p)
result = g1_policy()->collection_set();
uint cs_size = g1_policy()->cset_region_length();
result = collection_set()->head();
uint cs_size = collection_set()->region_length();
uint active_workers = workers()->active_workers();
uint end_ind = (cs_size * worker_i) / active_workers;
@ -2577,7 +2536,7 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
}
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
HeapRegion* r = g1_policy()->collection_set();
HeapRegion* r = collection_set()->head();
while (r != NULL) {
HeapRegion* next = r->next_in_collection_set();
if (cl->doHeapRegion(r)) {
@ -2606,7 +2565,7 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
}
cur = next;
}
cur = g1_policy()->collection_set();
cur = collection_set()->head();
while (cur != r) {
HeapRegion* next = cur->next_in_collection_set();
if (cl->doHeapRegion(cur) && false) {
@ -2716,6 +2675,14 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
return false; // keep some compilers happy
}
void G1CollectedHeap::print_heap_regions() const {
LogHandle(gc, heap, region) log;
if (log.is_trace()) {
ResourceMark rm;
print_regions_on(log.trace_stream());
}
}
void G1CollectedHeap::print_on(outputStream* st) const {
st->print(" %-20s", "garbage-first heap");
st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
@ -2729,18 +2696,14 @@ void G1CollectedHeap::print_on(outputStream* st) const {
uint young_regions = _young_list->length();
st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
(size_t) young_regions * HeapRegion::GrainBytes / K);
uint survivor_regions = g1_policy()->recorded_survivor_regions();
uint survivor_regions = _young_list->survivor_length();
st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
(size_t) survivor_regions * HeapRegion::GrainBytes / K);
st->cr();
MetaspaceAux::print_on(st);
}
void G1CollectedHeap::print_extended_on(outputStream* st) const {
print_on(st);
// Print the per-region information.
st->cr();
void G1CollectedHeap::print_regions_on(outputStream* st) const {
st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
"HS=humongous(starts), HC=humongous(continues), "
"CS=collection set, F=free, A=archive, TS=gc time stamp, "
@ -2750,6 +2713,13 @@ void G1CollectedHeap::print_extended_on(outputStream* st) const {
heap_region_iterate(&blk);
}
void G1CollectedHeap::print_extended_on(outputStream* st) const {
print_on(st);
// Print the per-region information.
print_regions_on(st);
}
void G1CollectedHeap::print_on_error(outputStream* st) const {
this->CollectedHeap::print_on_error(st);
@ -2839,12 +2809,14 @@ G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
size_t eden_used_bytes = young_list->eden_used_bytes();
size_t survivor_used_bytes = young_list->survivor_used_bytes();
size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
size_t eden_capacity_bytes =
(g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
VirtualSpaceSummary heap_summary = create_heap_space_summary();
return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes, num_regions());
return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
eden_capacity_bytes, survivor_used_bytes, num_regions());
}
G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
@ -2862,7 +2834,6 @@ void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
gc_tracer->report_metaspace_summary(when, metaspace_summary);
}
G1CollectedHeap* G1CollectedHeap::heap() {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
@ -3201,15 +3172,19 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
wait_for_root_region_scanning();
print_heap_before_gc();
print_heap_regions();
trace_heap_before_gc(_gc_tracer_stw);
_verifier->verify_region_sets_optional();
_verifier->verify_dirty_young_regions();
// We should not be doing initial mark unless the conc mark thread is running
if (!_cmThread->should_terminate()) {
// This call will decide whether this pause is an initial-mark
// pause. If it is, during_initial_mark_pause() will return true
// for the duration of this pause.
g1_policy()->decide_on_conc_mark_initiation();
}
// We do not allow initial-mark to be piggy-backed on a mixed GC.
assert(!collector_state()->during_initial_mark_pause() ||
@ -3231,7 +3206,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// We are about to start a marking cycle, so we increment the
// full collection counter.
increment_old_marking_cycles_started();
register_concurrent_cycle_start(_gc_timer_stw->gc_start());
_cm->gc_tracer_cm()->set_gc_cause(gc_cause());
}
_gc_tracer_stw->report_yc_type(collector_state()->yc_type());
@ -3336,10 +3311,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
concurrent_mark()->checkpointRootsInitialPre();
}
double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
g1_policy()->finalize_old_cset_part(time_remaining_ms);
g1_policy()->finalize_collection_set(target_pause_time_ms);
evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
evacuation_info.set_collectionset_regions(collection_set()->region_length());
// Make sure the remembered sets are up to date. This needs to be
// done before register_humongous_regions_with_cset(), because the
@ -3358,7 +3332,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_cm->verify_no_cset_oops();
if (_hr_printer.is_active()) {
HeapRegion* hr = g1_policy()->collection_set();
HeapRegion* hr = collection_set()->head();
while (hr != NULL) {
_hr_printer.cset(hr);
hr = hr->next_in_collection_set();
@ -3373,7 +3347,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// Initialize the GC alloc regions.
_allocator->init_gc_alloc_regions(evacuation_info);
G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), g1_policy()->young_cset_region_length());
G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
pre_evacuate_collection_set();
// Actually do the work...
@ -3382,18 +3356,18 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
post_evacuate_collection_set(evacuation_info, &per_thread_states);
const size_t* surviving_young_words = per_thread_states.surviving_young_words();
free_collection_set(g1_policy()->collection_set(), evacuation_info, surviving_young_words);
free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words);
eagerly_reclaim_humongous_regions();
g1_policy()->clear_collection_set();
collection_set()->clear_head();
record_obj_copy_mem_stats();
_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();
// Start a new incremental collection set for the next pause.
g1_policy()->start_incremental_cset_building();
collection_set()->start_incremental_building();
clear_cset_fast_test();
@ -3404,10 +3378,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert(check_young_list_empty(false /* check_heap */),
"young list should be empty");
g1_policy()->record_survivor_regions(_young_list->survivor_length(),
_young_list->first_survivor_region(),
_young_list->last_survivor_region());
_young_list->reset_auxilary_lists();
if (evacuation_failed()) {
@ -3442,7 +3412,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_allocator->init_mutator_alloc_region();
{
size_t expand_bytes = g1_policy()->expansion_amount();
size_t expand_bytes = _heap_sizing_policy->expansion_amount();
if (expand_bytes > 0) {
size_t bytes_before = capacity();
// No need for an ergo logging here,
@ -3468,7 +3438,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
size_t total_cards_scanned = per_thread_states.total_cards_scanned();
g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before());
evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
MemoryService::track_memory_usage();
@ -3538,6 +3508,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
print_heap_after_gc();
print_heap_regions();
trace_heap_after_gc(_gc_tracer_stw);
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
@ -3776,7 +3747,8 @@ public:
"claim value %d after unlink less than initial symbol table size %d",
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
log_debug(gc, stringdedup)("Cleaned string and symbol table, "
log_info(gc, stringtable)(
"Cleaned string and symbol table, "
"strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
"symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
strings_processed(), strings_removed(),
@ -4083,14 +4055,10 @@ void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
bool process_strings, bool process_symbols) {
{
{ // Timing scope
G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
workers()->run_task(&g1_unlink_task);
}
if (G1StringDedup::is_enabled()) {
G1StringDedup::unlink(is_alive);
}
}
class G1RedirtyLoggedCardsTask : public AbstractGangTask {
@ -4279,7 +4247,7 @@ public:
_workers(workers),
_active_workers(n_workers)
{
assert(n_workers > 0, "shouldn't call this otherwise");
g1h->ref_processor_stw()->set_active_mt_degree(n_workers);
}
// Executes the given task using concurrent marking worker threads.
@ -4400,7 +4368,9 @@ public:
_queues(task_queues),
_terminator(workers, _queues),
_n_workers(workers)
{ }
{
g1h->ref_processor_cm()->set_active_mt_degree(workers);
}
void work(uint worker_id) {
G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
@ -4543,8 +4513,9 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
uint no_of_gc_workers = workers()->active_workers();
// Parallel reference processing
assert(rp->num_q() == no_of_gc_workers, "sanity");
assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
assert(no_of_gc_workers <= rp->max_num_q(),
"Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
no_of_gc_workers, rp->max_num_q());
G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
stats = rp->process_discovered_references(&is_alive,
@ -4580,8 +4551,9 @@ void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per
uint n_workers = workers()->active_workers();
assert(rp->num_q() == n_workers, "sanity");
assert(n_workers <= rp->max_num_q(), "sanity");
assert(n_workers <= rp->max_num_q(),
"Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
n_workers, rp->max_num_q());
G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
rp->enqueue_discovered_references(&par_task_executor);
@ -4909,7 +4881,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& e
if (cur->is_young()) {
int index = cur->young_index_in_cset();
assert(index != -1, "invariant");
assert((uint) index < policy->young_cset_region_length(), "invariant");
assert((uint) index < collection_set()->young_region_length(), "invariant");
size_t words_survived = surviving_young_words[index];
cur->record_surv_words_in_group(words_survived);
@ -5382,7 +5354,7 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
collection_set()->add_eden_region(alloc_region);
increase_used(allocated_bytes);
_hr_printer.retire(alloc_region);
// We update the eden sizes here, when the region is retired,
@ -5393,13 +5365,23 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
// Methods for the GC alloc regions
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
uint count,
InCSetState dest) {
bool G1CollectedHeap::has_more_regions(InCSetState dest) {
if (dest.is_old()) {
return true;
} else {
return young_list()->survivor_length() < g1_policy()->max_survivor_regions();
}
}
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {
assert(FreeList_lock->owned_by_self(), "pre-condition");
if (count < g1_policy()->max_regions(dest)) {
const bool is_survivor = (dest.is_young());
if (!has_more_regions(dest)) {
return NULL;
}
const bool is_survivor = dest.is_young();
HeapRegion* new_alloc_region = new_region(word_size,
!is_survivor,
true /* do_expand */);
@ -5410,6 +5392,7 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
new_alloc_region->record_timestamp();
if (is_survivor) {
new_alloc_region->set_survivor();
young_list()->add_survivor_region(new_alloc_region);
_verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
} else {
new_alloc_region->set_old();
@ -5420,7 +5403,6 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
new_alloc_region->note_start_of_copying(during_im);
return new_alloc_region;
}
}
return NULL;
}
@ -5430,9 +5412,7 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
bool during_im = collector_state()->during_initial_mark_pause();
alloc_region->note_end_of_copying(during_im);
g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
if (dest.is_young()) {
young_list()->add_survivor_region(alloc_region);
} else {
if (dest.is_old()) {
_old_set.add(alloc_region);
}
_hr_printer.retire(alloc_region);

View File

@ -28,6 +28,7 @@
#include "gc/g1/evacuationInfo.hpp"
#include "gc/g1/g1AllocationContext.hpp"
#include "gc/g1/g1BiasedArray.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1HRPrinter.hpp"
@ -65,17 +66,16 @@ class ObjectClosure;
class SpaceClosure;
class CompactibleSpaceClosure;
class Space;
class G1CollectionSet;
class G1CollectorPolicy;
class G1RemSet;
class HeapRegionRemSetIterator;
class G1ConcurrentMark;
class ConcurrentMarkThread;
class ConcurrentG1Refine;
class ConcurrentGCTimer;
class GenerationCounters;
class STWGCTimer;
class G1NewTracer;
class G1OldTracer;
class EvacuationFailedInfo;
class nmethod;
class Ticks;
@ -83,6 +83,7 @@ class WorkGang;
class G1Allocator;
class G1ArchiveAllocator;
class G1HeapVerifier;
class G1HeapSizingPolicy;
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
@ -268,8 +269,6 @@ private:
// concurrent cycles) we have completed.
volatile uint _old_marking_cycles_completed;
bool _heap_summary_sent;
// This is a non-product method that is helpful for testing. It is
// called at the end of a GC and artificially expands the heap by
// allocating a number of dead regions. This way we can induce very
@ -362,6 +361,9 @@ protected:
// The current policy object for the collector.
G1CollectorPolicy* _g1_policy;
G1HeapSizingPolicy* _heap_sizing_policy;
G1CollectionSet _collection_set;
// This is the second level of trying to allocate a new region. If
// new_region() didn't find a region on the free_list, this call will
@ -469,8 +471,8 @@ protected:
size_t allocated_bytes);
// For GC alloc regions.
HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
InCSetState dest);
bool has_more_regions(InCSetState dest);
HeapRegion* new_gc_alloc_region(size_t word_size, InCSetState dest);
void retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes, InCSetState dest);
@ -618,10 +620,6 @@ public:
return _old_marking_cycles_completed;
}
void register_concurrent_cycle_start(const Ticks& start_time);
void register_concurrent_cycle_end();
void trace_heap_after_concurrent_cycle();
G1HRPrinter* hr_printer() { return &_hr_printer; }
// Allocates a new heap region instance.
@ -896,9 +894,7 @@ protected:
ReferenceProcessor* _ref_processor_stw;
STWGCTimer* _gc_timer_stw;
ConcurrentGCTimer* _gc_timer_cm;
G1OldTracer* _gc_tracer_cm;
G1NewTracer* _gc_tracer_stw;
// During reference object discovery, the _is_alive_non_header
@ -985,6 +981,9 @@ public:
// The current policy object for the collector.
G1CollectorPolicy* g1_policy() const { return _g1_policy; }
const G1CollectionSet* collection_set() const { return &_collection_set; }
G1CollectionSet* collection_set() { return &_collection_set; }
virtual CollectorPolicy* collector_policy() const;
// Adaptive size policy. No such thing for g1.
@ -1029,9 +1028,6 @@ public:
// The Concurrent Marking reference processor...
ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
virtual size_t capacity() const;
virtual size_t used() const;
// This should be called when we're not holding the heap lock. The
@ -1285,6 +1281,12 @@ public:
return true;
}
// The reference pending list lock is acquired from from the
// ConcurrentMarkThread.
virtual bool needs_reference_pending_list_locker_thread() const {
return true;
}
inline bool is_in_young(const oop obj);
virtual bool is_scavengable(const void* addr);
@ -1463,7 +1465,11 @@ public:
G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
// Printing
private:
void print_heap_regions() const;
void print_regions_on(outputStream* st) const;
public:
virtual void print_on(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;

View File

@ -0,0 +1,440 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.hpp"
#include "utilities/debug.hpp"
G1CollectorState* G1CollectionSet::collector_state() {
return _g1->collector_state();
}
G1GCPhaseTimes* G1CollectionSet::phase_times() {
return _policy->phase_times();
}
CollectionSetChooser* G1CollectionSet::cset_chooser() {
return _cset_chooser;
}
double G1CollectionSet::predict_region_elapsed_time_ms(HeapRegion* hr) {
return _policy->predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
}
G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h) :
_g1(g1h),
_policy(NULL),
_cset_chooser(new CollectionSetChooser()),
_eden_region_length(0),
_survivor_region_length(0),
_old_region_length(0),
_head(NULL),
_bytes_used_before(0),
_bytes_live_before(0),
_recorded_rs_lengths(0),
// Incremental CSet attributes
_inc_build_state(Inactive),
_inc_head(NULL),
_inc_tail(NULL),
_inc_bytes_used_before(0),
_inc_bytes_live_before(0),
_inc_recorded_rs_lengths(0),
_inc_recorded_rs_lengths_diffs(0),
_inc_predicted_elapsed_time_ms(0.0),
_inc_predicted_elapsed_time_ms_diffs(0.0) {}
G1CollectionSet::~G1CollectionSet() {
delete _cset_chooser;
}
void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length) {
_eden_region_length = eden_cset_region_length;
_survivor_region_length = survivor_cset_region_length;
_old_region_length = 0;
}
void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
_recorded_rs_lengths = rs_lengths;
}
// Add the heap region at the head of the non-incremental collection set
void G1CollectionSet::add_old_region(HeapRegion* hr) {
assert(_inc_build_state == Active, "Precondition");
assert(hr->is_old(), "the region should be old");
assert(!hr->in_collection_set(), "should not already be in the CSet");
_g1->register_old_region_with_cset(hr);
hr->set_next_in_collection_set(_head);
_head = hr;
_bytes_used_before += hr->used();
_bytes_live_before += hr->live_bytes();
size_t rs_length = hr->rem_set()->occupied();
_recorded_rs_lengths += rs_length;
_old_region_length += 1;
}
// Initialize the per-collection-set information
void G1CollectionSet::start_incremental_building() {
assert(_inc_build_state == Inactive, "Precondition");
_inc_head = NULL;
_inc_tail = NULL;
_inc_bytes_used_before = 0;
_inc_bytes_live_before = 0;
_inc_recorded_rs_lengths = 0;
_inc_recorded_rs_lengths_diffs = 0;
_inc_predicted_elapsed_time_ms = 0.0;
_inc_predicted_elapsed_time_ms_diffs = 0.0;
_inc_build_state = Active;
}
void G1CollectionSet::finalize_incremental_building() {
assert(_inc_build_state == Active, "Precondition");
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
// The two "main" fields, _inc_recorded_rs_lengths and
// _inc_predicted_elapsed_time_ms, are updated by the thread
// that adds a new region to the CSet. Further updates by the
// concurrent refinement thread that samples the young RSet lengths
// are accumulated in the *_diffs fields. Here we add the diffs to
// the "main" fields.
if (_inc_recorded_rs_lengths_diffs >= 0) {
_inc_recorded_rs_lengths += _inc_recorded_rs_lengths_diffs;
} else {
// This is defensive. The diff should in theory be always positive
// as RSets can only grow between GCs. However, given that we
// sample their size concurrently with other threads updating them
// it's possible that we might get the wrong size back, which
// could make the calculations somewhat inaccurate.
size_t diffs = (size_t) (-_inc_recorded_rs_lengths_diffs);
if (_inc_recorded_rs_lengths >= diffs) {
_inc_recorded_rs_lengths -= diffs;
} else {
_inc_recorded_rs_lengths = 0;
}
}
_inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diffs;
_inc_recorded_rs_lengths_diffs = 0;
_inc_predicted_elapsed_time_ms_diffs = 0.0;
}
void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
size_t new_rs_length) {
// Update the CSet information that is dependent on the new RS length
assert(hr->is_young(), "Precondition");
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
// We could have updated _inc_recorded_rs_lengths and
// _inc_predicted_elapsed_time_ms directly but we'd need to do
// that atomically, as this code is executed by a concurrent
// refinement thread, potentially concurrently with a mutator thread
// allocating a new region and also updating the same fields. To
// avoid the atomic operations we accumulate these updates on two
// separate fields (*_diffs) and we'll just add them to the "main"
// fields at the start of a GC.
ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
_inc_recorded_rs_lengths_diffs += rs_lengths_diff;
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
_inc_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
hr->set_recorded_rs_length(new_rs_length);
hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
}
void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
assert(hr->is_young(), "invariant");
assert(hr->young_index_in_cset() > -1, "should have already been set");
assert(_inc_build_state == Active, "Precondition");
// This routine is used when:
// * adding survivor regions to the incremental cset at the end of an
// evacuation pause or
// * adding the current allocation region to the incremental cset
// when it is retired.
// Therefore this routine may be called at a safepoint by the
// VM thread, or in-between safepoints by mutator threads (when
// retiring the current allocation region)
// We need to clear and set the cached recorded/cached collection set
// information in the heap region here (before the region gets added
// to the collection set). An individual heap region's cached values
// are calculated, aggregated with the policy collection set info,
// and cached in the heap region here (initially) and (subsequently)
// by the Young List sampling code.
size_t rs_length = hr->rem_set()->occupied();
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
// Cache the values we have added to the aggregated information
// in the heap region in case we have to remove this region from
// the incremental collection set, or it is updated by the
// rset sampling code
hr->set_recorded_rs_length(rs_length);
hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
size_t used_bytes = hr->used();
_inc_recorded_rs_lengths += rs_length;
_inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
_inc_bytes_used_before += used_bytes;
_inc_bytes_live_before += hr->live_bytes();
assert(!hr->in_collection_set(), "invariant");
_g1->register_young_region_with_cset(hr);
assert(hr->next_in_collection_set() == NULL, "invariant");
}
// Add the region at the RHS of the incremental cset
void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
// We should only ever be appending survivors at the end of a pause
assert(hr->is_survivor(), "Logic");
// Do the 'common' stuff
add_young_region_common(hr);
// Now add the region at the right hand side
if (_inc_tail == NULL) {
assert(_inc_head == NULL, "invariant");
_inc_head = hr;
} else {
_inc_tail->set_next_in_collection_set(hr);
}
_inc_tail = hr;
}
// Add the region to the LHS of the incremental cset
void G1CollectionSet::add_eden_region(HeapRegion* hr) {
// Survivors should be added to the RHS at the end of a pause
assert(hr->is_eden(), "Logic");
// Do the 'common' stuff
add_young_region_common(hr);
// Add the region at the left hand side
hr->set_next_in_collection_set(_inc_head);
if (_inc_head == NULL) {
assert(_inc_tail == NULL, "Invariant");
_inc_tail = hr;
}
_inc_head = hr;
}
#ifndef PRODUCT
void G1CollectionSet::print(HeapRegion* list_head, outputStream* st) {
assert(list_head == inc_head() || list_head == head(), "must be");
st->print_cr("\nCollection_set:");
HeapRegion* csr = list_head;
while (csr != NULL) {
HeapRegion* next = csr->next_in_collection_set();
assert(csr->in_collection_set(), "bad CS");
st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
HR_FORMAT_PARAMS(csr),
p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
csr->age_in_surv_rate_group_cond());
csr = next;
}
}
#endif // !PRODUCT
double G1CollectionSet::finalize_young_part(double target_pause_time_ms) {
double young_start_time_sec = os::elapsedTime();
YoungList* young_list = _g1->young_list();
finalize_incremental_building();
guarantee(target_pause_time_ms > 0.0,
"target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
guarantee(_head == NULL, "Precondition");
size_t pending_cards = _policy->pending_cards();
double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
// The young list is laid with the survivor regions from the previous
// pause are appended to the RHS of the young list, i.e.
// [Newly Young Regions ++ Survivors from last pause].
uint survivor_region_length = young_list->survivor_length();
uint eden_region_length = young_list->eden_length();
init_region_lengths(eden_region_length, survivor_region_length);
HeapRegion* hr = young_list->first_survivor_region();
while (hr != NULL) {
assert(hr->is_survivor(), "badly formed young list");
// There is a convention that all the young regions in the CSet
// are tagged as "eden", so we do this for the survivors here. We
// use the special set_eden_pre_gc() as it doesn't check that the
// region is free (which is not the case here).
hr->set_eden_pre_gc();
hr = hr->get_next_young_region();
}
// Clear the fields that point to the survivor list - they are all young now.
young_list->clear_survivors();
_head = _inc_head;
_bytes_used_before = _inc_bytes_used_before;
_bytes_live_before = _inc_bytes_live_before;
time_remaining_ms = MAX2(time_remaining_ms - _inc_predicted_elapsed_time_ms, 0.0);
log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
eden_region_length, survivor_region_length, _inc_predicted_elapsed_time_ms, target_pause_time_ms);
// The number of recorded young regions is the incremental
// collection set's current size
set_recorded_rs_lengths(_inc_recorded_rs_lengths);
double young_end_time_sec = os::elapsedTime();
phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
return time_remaining_ms;
}
void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
double non_young_start_time_sec = os::elapsedTime();
double predicted_old_time_ms = 0.0;
if (!collector_state()->gcs_are_young()) {
cset_chooser()->verify();
const uint min_old_cset_length = _policy->calc_min_old_cset_length();
const uint max_old_cset_length = _policy->calc_max_old_cset_length();
const size_t estimated_available_bytes = _policy->available_bytes_estimate();
uint expensive_region_num = 0;
bool check_time_remaining = _policy->adaptive_young_list_length();
HeapRegion* hr = cset_chooser()->peek();
while (hr != NULL) {
if (old_region_length() >= max_old_cset_length) {
// Added maximum number of old regions to the CSet.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",
old_region_length(), max_old_cset_length);
break;
}
// Stop adding regions if the remaining reclaimable space is
// not above G1HeapWastePercent.
size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
double reclaimable_perc = _policy->reclaimable_bytes_perc(reclaimable_bytes);
double threshold = (double) G1HeapWastePercent;
if (reclaimable_perc <= threshold) {
// We've added enough old regions that the amount of uncollected
// reclaimable space is at or below the waste threshold. Stop
// adding old regions to the CSet.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
"old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
old_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
break;
}
// Stop adding regions if the live bytes (according to the last marking)
// added so far would exceed the estimated free bytes.
if ((_bytes_live_before + hr->live_bytes()) > estimated_available_bytes) {
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reached estimated free space limit)");
break;
}
double predicted_time_ms = predict_region_elapsed_time_ms(hr);
if (check_time_remaining) {
if (predicted_time_ms > time_remaining_ms) {
// Too expensive for the current CSet.
if (old_region_length() >= min_old_cset_length) {
// We have added the minimum number of old regions to the CSet,
// we are done with this CSet.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
"predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
predicted_time_ms, time_remaining_ms, old_region_length(), min_old_cset_length);
break;
}
// We'll add it anyway given that we haven't reached the
// minimum number of old regions.
expensive_region_num += 1;
}
} else {
if (old_region_length() >= min_old_cset_length) {
// In the non-auto-tuning case, we'll finish adding regions
// to the CSet if we reach the minimum.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",
old_region_length(), min_old_cset_length);
break;
}
}
// We will add this region to the CSet.
time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
predicted_old_time_ms += predicted_time_ms;
cset_chooser()->pop(); // already have region via peek()
_g1->old_set_remove(hr);
add_old_region(hr);
hr = cset_chooser()->peek();
}
if (hr == NULL) {
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
}
if (expensive_region_num > 0) {
// We print the information once here at the end, predicated on
// whether we added any apparently expensive regions or not, to
// avoid generating output per region.
log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
"old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
old_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
}
cset_chooser()->verify();
}
stop_incremental_building();
log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
old_region_length(), predicted_old_time_ms, time_remaining_ms);
double non_young_end_time_sec = os::elapsedTime();
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
}

View File

@ -0,0 +1,217 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
#define SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
#include "gc/g1/collectionSetChooser.hpp"
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
class G1CollectedHeap;
class G1CollectorPolicy;
class G1CollectorState;
class G1GCPhaseTimes;
class HeapRegion;
class G1CollectionSet VALUE_OBJ_CLASS_SPEC {
G1CollectedHeap* _g1;
G1CollectorPolicy* _policy;
CollectionSetChooser* _cset_chooser;
uint _eden_region_length;
uint _survivor_region_length;
uint _old_region_length;
// The head of the list (via "next_in_collection_set()") representing the
// current collection set. Set from the incrementally built collection
// set at the start of the pause.
HeapRegion* _head;
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
// pause, and incremented in finalize_old_part() when adding old regions
// (if any) to the collection set.
size_t _bytes_used_before;
// The sum of live bytes in the collection set, set as described above.
size_t _bytes_live_before;
size_t _recorded_rs_lengths;
// The associated information that is maintained while the incremental
// collection set is being built with young regions. Used to populate
// the recorded info for the evacuation pause.
enum CSetBuildType {
Active, // We are actively building the collection set
Inactive // We are not actively building the collection set
};
CSetBuildType _inc_build_state;
// The head of the incrementally built collection set.
HeapRegion* _inc_head;
// The tail of the incrementally built collection set.
HeapRegion* _inc_tail;
// The number of bytes in the incrementally built collection set.
// Used to set _collection_set_bytes_used_before at the start of
// an evacuation pause.
size_t _inc_bytes_used_before;
// The number of live bytes in the incrementally built collection set.
size_t _inc_bytes_live_before;
// The RSet lengths recorded for regions in the CSet. It is updated
// by the thread that adds a new region to the CSet. We assume that
// only one thread can be allocating a new CSet region (currently,
// it does so after taking the Heap_lock) hence no need to
// synchronize updates to this field.
size_t _inc_recorded_rs_lengths;
// A concurrent refinement thread periodically samples the young
// region RSets and needs to update _inc_recorded_rs_lengths as
// the RSets grow. Instead of having to synchronize updates to that
// field we accumulate them in this field and add it to
// _inc_recorded_rs_lengths_diffs at the start of a GC.
ssize_t _inc_recorded_rs_lengths_diffs;
// The predicted elapsed time it will take to collect the regions in
// the CSet. This is updated by the thread that adds a new region to
// the CSet. See the comment for _inc_recorded_rs_lengths about
// MT-safety assumptions.
double _inc_predicted_elapsed_time_ms;
// See the comment for _inc_recorded_rs_lengths_diffs.
double _inc_predicted_elapsed_time_ms_diffs;
G1CollectorState* collector_state();
G1GCPhaseTimes* phase_times();
double predict_region_elapsed_time_ms(HeapRegion* hr);
public:
G1CollectionSet(G1CollectedHeap* g1h);
~G1CollectionSet();
void set_policy(G1CollectorPolicy* g1p) {
assert(_policy == NULL, "should only initialize once");
_policy = g1p;
}
CollectionSetChooser* cset_chooser();
void init_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length);
void set_recorded_rs_lengths(size_t rs_lengths);
uint region_length() const { return young_region_length() +
old_region_length(); }
uint young_region_length() const { return eden_region_length() +
survivor_region_length(); }
uint eden_region_length() const { return _eden_region_length; }
uint survivor_region_length() const { return _survivor_region_length; }
uint old_region_length() const { return _old_region_length; }
// Incremental CSet Support
// The head of the incrementally built collection set.
HeapRegion* inc_head() { return _inc_head; }
// The tail of the incrementally built collection set.
HeapRegion* inc_tail() { return _inc_tail; }
// Initialize incremental collection set info.
void start_incremental_building();
// Perform any final calculations on the incremental CSet fields
// before we can use them.
void finalize_incremental_building();
void clear_incremental() {
_inc_head = NULL;
_inc_tail = NULL;
}
// Stop adding regions to the incremental collection set
void stop_incremental_building() { _inc_build_state = Inactive; }
// The head of the list (via "next_in_collection_set()") representing the
// current collection set.
HeapRegion* head() { return _head; }
void clear_head() { _head = NULL; }
size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
size_t bytes_used_before() const {
return _bytes_used_before;
}
void reset_bytes_used_before() {
_bytes_used_before = 0;
}
void reset_bytes_live_before() {
_bytes_live_before = 0;
}
// Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods.
double finalize_young_part(double target_pause_time_ms);
void finalize_old_part(double time_remaining_ms);
// Add old region "hr" to the CSet.
void add_old_region(HeapRegion* hr);
// Update information about hr in the aggregated information for
// the incrementally built collection set.
void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
// Add hr to the LHS of the incremental collection set.
void add_eden_region(HeapRegion* hr);
// Add hr to the RHS of the incremental collection set.
void add_survivor_regions(HeapRegion* hr);
#ifndef PRODUCT
void print(HeapRegion* list_head, outputStream* st);
#endif // !PRODUCT
private:
// Update the incremental cset information when adding a region
// (should not be called directly).
void add_young_region_common(HeapRegion* hr);
};
#endif // SHARE_VM_GC_G1_G1COLLECTIONSET_HPP

File diff suppressed because it is too large Load Diff

View File

@ -25,7 +25,6 @@
#ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
#define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
#include "gc/g1/collectionSetChooser.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1InCSetState.hpp"
@ -41,8 +40,10 @@
// * when to collect.
class HeapRegion;
class G1CollectionSet;
class CollectionSetChooser;
class G1IHOPControl;
class G1Analytics;
class G1YoungGenSizer;
class G1CollectorPolicy: public CollectorPolicy {
@ -57,30 +58,14 @@ class G1CollectorPolicy: public CollectorPolicy {
void report_ihop_statistics();
G1Predictions _predictor;
double get_new_prediction(TruncatedSeq const* seq) const;
size_t get_new_size_prediction(TruncatedSeq const* seq) const;
G1Analytics* _analytics;
G1MMUTracker* _mmu_tracker;
void initialize_alignments();
void initialize_flags();
CollectionSetChooser* _cset_chooser;
double _full_collection_start_sec;
// These exclude marking times.
TruncatedSeq* _recent_gc_times_ms;
TruncatedSeq* _concurrent_mark_remark_times_ms;
TruncatedSeq* _concurrent_mark_cleanup_times_ms;
// Ratio check data for determining if heap growth is necessary.
uint _ratio_over_threshold_count;
double _ratio_over_threshold_sum;
uint _pauses_since_start;
uint _young_list_target_length;
uint _young_list_fixed_length;
@ -90,58 +75,14 @@ class G1CollectorPolicy: public CollectorPolicy {
SurvRateGroup* _short_lived_surv_rate_group;
SurvRateGroup* _survivor_surv_rate_group;
// add here any more surv rate groups
double _gc_overhead_perc;
double _reserve_factor;
uint _reserve_regions;
enum PredictionConstants {
TruncatedSeqLength = 10,
NumPrevPausesForHeuristics = 10,
// MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics,
// representing the minimum number of pause time ratios that exceed
// GCTimeRatio before a heap expansion will be triggered.
MinOverThresholdForGrowth = 4
};
TruncatedSeq* _alloc_rate_ms_seq;
double _prev_collection_pause_end_ms;
TruncatedSeq* _rs_length_diff_seq;
TruncatedSeq* _cost_per_card_ms_seq;
TruncatedSeq* _cost_scan_hcc_seq;
TruncatedSeq* _young_cards_per_entry_ratio_seq;
TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
TruncatedSeq* _cost_per_entry_ms_seq;
TruncatedSeq* _mixed_cost_per_entry_ms_seq;
TruncatedSeq* _cost_per_byte_ms_seq;
TruncatedSeq* _constant_other_time_ms_seq;
TruncatedSeq* _young_other_cost_per_region_ms_seq;
TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
TruncatedSeq* _pending_cards_seq;
TruncatedSeq* _rs_lengths_seq;
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
G1YoungGenSizer* _young_gen_sizer;
uint _eden_cset_region_length;
uint _survivor_cset_region_length;
uint _old_cset_region_length;
void init_cset_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length);
uint eden_cset_region_length() const { return _eden_cset_region_length; }
uint survivor_cset_region_length() const { return _survivor_cset_region_length; }
uint old_cset_region_length() const { return _old_cset_region_length; }
uint _free_regions_at_end_of_collection;
size_t _recorded_rs_lengths;
size_t _max_rs_lengths;
size_t _rs_lengths_prediction;
@ -150,10 +91,6 @@ class G1CollectorPolicy: public CollectorPolicy {
bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
#endif // PRODUCT
void adjust_concurrent_refinement(double update_rs_time,
double update_rs_processed_buffers,
double goal_ms);
double _pause_time_target_ms;
size_t _pending_cards;
@ -165,6 +102,7 @@ class G1CollectorPolicy: public CollectorPolicy {
G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
public:
const G1Predictions& predictor() const { return _predictor; }
const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); }
// Add the given number of bytes to the total number of allocated bytes in the old gen.
void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
@ -191,37 +129,6 @@ public:
_max_rs_lengths = rs_lengths;
}
size_t predict_rs_length_diff() const;
double predict_alloc_rate_ms() const;
double predict_cost_per_card_ms() const;
double predict_scan_hcc_ms() const;
double predict_rs_update_time_ms(size_t pending_cards) const;
double predict_young_cards_per_entry_ratio() const;
double predict_mixed_cards_per_entry_ratio() const;
size_t predict_young_card_num(size_t rs_length) const;
size_t predict_non_young_card_num(size_t rs_length) const;
double predict_rs_scan_time_ms(size_t card_num) const;
double predict_mixed_rs_scan_time_ms(size_t card_num) const;
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const;
double predict_object_copy_time_ms(size_t bytes_to_copy) const;
double predict_constant_other_time_ms() const;
double predict_young_other_time_ms(size_t young_num) const;
double predict_non_young_other_time_ms(size_t non_young_num) const;
double predict_base_elapsed_time_ms(size_t pending_cards) const;
double predict_base_elapsed_time_ms(size_t pending_cards,
@ -229,13 +136,6 @@ public:
size_t predict_bytes_to_copy(HeapRegion* hr) const;
double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const;
void set_recorded_rs_lengths(size_t rs_lengths);
uint cset_region_length() const { return young_cset_region_length() +
old_cset_region_length(); }
uint young_cset_region_length() const { return eden_cset_region_length() +
survivor_cset_region_length(); }
double predict_survivor_regions_evac_time() const;
bool should_update_surv_rate_group_predictors() {
@ -261,10 +161,6 @@ public:
return _mmu_tracker->max_gc_time() * 1000.0;
}
double predict_remark_time_ms() const;
double predict_cleanup_time_ms() const;
// Returns an estimate of the survival rate of the region at yg-age
// "yg_age".
double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
@ -273,7 +169,23 @@ public:
double accum_yg_surv_rate_pred(int age) const;
// When copying, we will likely need more bytes free than is live in the region.
// Add some safety margin to factor in the confidence of our guess, and the
// natural expected waste.
// (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
// of the calculation: the lower the confidence, the more headroom.
// (100 + TargetPLABWastePct) represents the increase in expected bytes during
// copying due to anticipated waste in the PLABs.
double safety_factor() const {
return (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
}
// Returns an estimate of the available bytes at end of collection, adjusted by
// the safety factor.
size_t available_bytes_estimate();
protected:
G1CollectionSet* _collection_set;
virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
virtual double other_time_ms(double pause_time_ms) const;
@ -281,90 +193,17 @@ protected:
double non_young_other_time_ms() const;
double constant_other_time_ms(double pause_time_ms) const;
CollectionSetChooser* cset_chooser() const {
return _cset_chooser;
}
CollectionSetChooser* cset_chooser() const;
private:
// Statistics kept per GC stoppage, pause or full.
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
// Add a new GC of the given duration and end time to the record.
void update_recent_gc_times(double end_time_sec, double elapsed_ms);
// The head of the list (via "next_in_collection_set()") representing the
// current collection set. Set from the incrementally built collection
// set at the start of the pause.
HeapRegion* _collection_set;
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
// pause, and incremented in finalize_old_cset_part() when adding old regions
// (if any) to the collection set.
size_t _collection_set_bytes_used_before;
// The number of bytes copied during the GC.
size_t _bytes_copied_during_gc;
// The associated information that is maintained while the incremental
// collection set is being built with young regions. Used to populate
// the recorded info for the evacuation pause.
enum CSetBuildType {
Active, // We are actively building the collection set
Inactive // We are not actively building the collection set
};
CSetBuildType _inc_cset_build_state;
// The head of the incrementally built collection set.
HeapRegion* _inc_cset_head;
// The tail of the incrementally built collection set.
HeapRegion* _inc_cset_tail;
// The number of bytes in the incrementally built collection set.
// Used to set _collection_set_bytes_used_before at the start of
// an evacuation pause.
size_t _inc_cset_bytes_used_before;
// The RSet lengths recorded for regions in the CSet. It is updated
// by the thread that adds a new region to the CSet. We assume that
// only one thread can be allocating a new CSet region (currently,
// it does so after taking the Heap_lock) hence no need to
// synchronize updates to this field.
size_t _inc_cset_recorded_rs_lengths;
// A concurrent refinement thread periodically samples the young
// region RSets and needs to update _inc_cset_recorded_rs_lengths as
// the RSets grow. Instead of having to synchronize updates to that
// field we accumulate them in this field and add it to
// _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
ssize_t _inc_cset_recorded_rs_lengths_diffs;
// The predicted elapsed time it will take to collect the regions in
// the CSet. This is updated by the thread that adds a new region to
// the CSet. See the comment for _inc_cset_recorded_rs_lengths about
// MT-safety assumptions.
double _inc_cset_predicted_elapsed_time_ms;
// See the comment for _inc_cset_recorded_rs_lengths_diffs.
double _inc_cset_predicted_elapsed_time_ms_diffs;
// Stash a pointer to the g1 heap.
G1CollectedHeap* _g1;
G1GCPhaseTimes* _phase_times;
// The ratio of gc time to elapsed time, computed over recent pauses,
// and the ratio for just the last pause.
double _recent_avg_pause_time_ratio;
double _last_pause_time_ratio;
double recent_avg_pause_time_ratio() const {
return _recent_avg_pause_time_ratio;
}
// This set of variables tracks the collector efficiency, in order to
// determine whether we should initiate a new marking.
double _mark_remark_start_sec;
@ -412,10 +251,6 @@ private:
void update_rs_lengths_prediction();
void update_rs_lengths_prediction(size_t prediction);
// Calculate and return chunk size (in number of regions) for parallel
// concurrent mark cleanup.
uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) const;
// Check whether a given young length (young_length) fits into the
// given target pause time and whether the prediction for the amount
// of objects to be copied for the given length will fit into the
@ -424,6 +259,9 @@ private:
bool predict_will_fit(uint young_length, double base_time_ms,
uint base_free_regions, double target_pause_time_ms) const;
public:
size_t pending_cards() const { return _pending_cards; }
// Calculate the minimum number of old regions we'll add to the CSet
// during a mixed GC.
uint calc_min_old_cset_length() const;
@ -436,6 +274,7 @@ private:
// as a percentage of the current heap capacity.
double reclaimable_bytes_perc(size_t reclaimable_bytes) const;
private:
// Sets up marking if proper conditions are met.
void maybe_start_marking();
@ -520,83 +359,20 @@ public:
return _bytes_copied_during_gc;
}
size_t collection_set_bytes_used_before() const {
return _collection_set_bytes_used_before;
}
// Determine whether there are candidate regions so that the
// next GC should be mixed. The two action strings are used
// in the ergo output when the method returns true or false.
bool next_gc_should_be_mixed(const char* true_action_str,
const char* false_action_str) const;
// Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods.
double finalize_young_cset_part(double target_pause_time_ms);
virtual void finalize_old_cset_part(double time_remaining_ms);
// The head of the list (via "next_in_collection_set()") representing the
// current collection set.
HeapRegion* collection_set() { return _collection_set; }
void clear_collection_set() { _collection_set = NULL; }
// Add old region "hr" to the CSet.
void add_old_region_to_cset(HeapRegion* hr);
// Incremental CSet Support
// The head of the incrementally built collection set.
HeapRegion* inc_cset_head() { return _inc_cset_head; }
// The tail of the incrementally built collection set.
HeapRegion* inc_set_tail() { return _inc_cset_tail; }
// Initialize incremental collection set info.
void start_incremental_cset_building();
// Perform any final calculations on the incremental CSet fields
// before we can use them.
void finalize_incremental_cset_building();
void clear_incremental_cset() {
_inc_cset_head = NULL;
_inc_cset_tail = NULL;
}
// Stop adding regions to the incremental collection set
void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
// Add information about hr to the aggregated information for the
// incrementally built collection set.
void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
// Update information about hr in the aggregated information for
// the incrementally built collection set.
void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
virtual void finalize_collection_set(double target_pause_time_ms);
private:
// Update the incremental cset information when adding a region
// (should not be called directly).
void add_region_to_incremental_cset_common(HeapRegion* hr);
// Set the state to start a concurrent marking cycle and clear
// _initiate_conc_mark_if_possible because it has now been
// acted on.
void initiate_conc_mark();
public:
// Add hr to the LHS of the incremental collection set.
void add_region_to_incremental_cset_lhs(HeapRegion* hr);
// Add hr to the RHS of the incremental collection set.
void add_region_to_incremental_cset_rhs(HeapRegion* hr);
#ifndef PRODUCT
void print_collection_set(HeapRegion* list_head, outputStream* st);
#endif // !PRODUCT
// This sets the initiate_conc_mark_if_possible() flag to start a
// new cycle, as long as we are not already in one. It's best if it
// is called during a safepoint when the test whether a cycle is in
@ -611,13 +387,6 @@ public:
// the initial-mark work and start a marking cycle.
void decide_on_conc_mark_initiation();
// If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by.
virtual size_t expansion_amount();
// Clear ratio tracking data used by expansion_amount().
void clear_ratio_check_data();
// Print stats on young survival ratio
void print_yg_surv_rate_info() const;
@ -627,7 +396,6 @@ public:
} else {
_short_lived_surv_rate_group->finished_recalculating_age_indexes();
}
// do that for any other surv rate groups
}
size_t young_list_target_length() const { return _young_list_target_length; }
@ -658,16 +426,6 @@ private:
// The limit on the number of regions allocated for survivors.
uint _max_survivor_regions;
// For reporting purposes.
// The value of _heap_bytes_before_gc is also used to calculate
// the cost of copying.
// The amount of survivor regions after a collection.
uint _recorded_survivor_regions;
// List of survivor regions.
HeapRegion* _recorded_survivor_head;
HeapRegion* _recorded_survivor_tail;
AgeTable _survivors_age_table;
public:
@ -677,22 +435,6 @@ public:
return _max_survivor_regions;
}
static const uint REGIONS_UNLIMITED = (uint) -1;
uint max_regions(InCSetState dest) const {
switch (dest.value()) {
case InCSetState::Young:
return _max_survivor_regions;
case InCSetState::Old:
return REGIONS_UNLIMITED;
default:
assert(false, "Unknown dest state: " CSETSTATE_FORMAT, dest.value());
break;
}
// keep some compilers happy
return 0;
}
void note_start_adding_survivor_regions() {
_survivor_surv_rate_group->start_adding_regions();
}
@ -701,18 +443,6 @@ public:
_survivor_surv_rate_group->stop_adding_regions();
}
void record_survivor_regions(uint regions,
HeapRegion* head,
HeapRegion* tail) {
_recorded_survivor_regions = regions;
_recorded_survivor_head = head;
_recorded_survivor_tail = tail;
}
uint recorded_survivor_regions() const {
return _recorded_survivor_regions;
}
void record_age_table(AgeTable* age_table) {
_survivors_age_table.merge(age_table);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,9 @@
#ifndef SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP
#define SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP
#include "utilities/globalDefinitions.hpp"
#include "gc/g1/g1YCTypes.hpp"
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
// Various state variables that indicate
// the phase of the G1 collection.
@ -71,7 +72,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
bool _in_marking_window;
bool _in_marking_window_im;
bool _concurrent_cycle_started;
bool _full_collection;
public:
@ -87,7 +87,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
_mark_in_progress(false),
_in_marking_window(false),
_in_marking_window_im(false),
_concurrent_cycle_started(false),
_full_collection(false) {}
// Setters
@ -100,7 +99,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
void set_mark_in_progress(bool v) { _mark_in_progress = v; }
void set_in_marking_window(bool v) { _in_marking_window = v; }
void set_in_marking_window_im(bool v) { _in_marking_window_im = v; }
void set_concurrent_cycle_started(bool v) { _concurrent_cycle_started = v; }
void set_full_collection(bool v) { _full_collection = v; }
// Getters
@ -113,7 +111,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
bool mark_in_progress() const { return _mark_in_progress; }
bool in_marking_window() const { return _in_marking_window; }
bool in_marking_window_im() const { return _in_marking_window_im; }
bool concurrent_cycle_started() const { return _concurrent_cycle_started; }
bool full_collection() const { return _full_collection; }
// Composite booleans (clients worry about flickering)

View File

@ -120,74 +120,10 @@ void G1CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_r
}
// We need to clear the bitmap on commit, removing any existing information.
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords);
_bm->clearRange(mr);
_bm->clear_range(mr);
}
// Closure used for clearing the given mark bitmap.
class ClearBitmapHRClosure : public HeapRegionClosure {
private:
G1ConcurrentMark* _cm;
G1CMBitMap* _bitmap;
bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration.
public:
ClearBitmapHRClosure(G1ConcurrentMark* cm, G1CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) {
assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield.");
}
virtual bool doHeapRegion(HeapRegion* r) {
size_t const chunk_size_in_words = M / HeapWordSize;
HeapWord* cur = r->bottom();
HeapWord* const end = r->end();
while (cur < end) {
MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
_bitmap->clearRange(mr);
cur += chunk_size_in_words;
// Abort iteration if after yielding the marking has been aborted.
if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) {
return true;
}
// Repeat the asserts from before the start of the closure. We will do them
// as asserts here to minimize their overhead on the product. However, we
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
assert(!_may_yield || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
}
return false;
}
};
class ParClearNextMarkBitmapTask : public AbstractGangTask {
ClearBitmapHRClosure* _cl;
HeapRegionClaimer _hrclaimer;
bool _suspendible; // If the task is suspendible, workers must join the STS.
public:
ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
_cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
void work(uint worker_id) {
SuspendibleThreadSetJoiner sts_join(_suspendible);
G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
}
};
void G1CMBitMap::clearAll() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
uint n_workers = g1h->workers()->active_workers();
ParClearNextMarkBitmapTask task(&cl, n_workers, false);
g1h->workers()->run_task(&task);
guarantee(cl.complete(), "Must have completed iteration.");
return;
}
void G1CMBitMap::clearRange(MemRegion mr) {
void G1CMBitMap::clear_range(MemRegion mr) {
mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
assert(!mr.is_empty(), "unexpected empty region");
// convert address range into offset range
@ -203,12 +139,12 @@ bool G1CMMarkStack::allocate(size_t capacity) {
// allocate a stack of the requisite depth
ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
if (!rs.is_reserved()) {
warning("ConcurrentMark MarkStack allocation failure");
log_warning(gc)("ConcurrentMark MarkStack allocation failure");
return false;
}
MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
if (!_virtual_space.initialize(rs, rs.size())) {
warning("ConcurrentMark MarkStack backing store failure");
log_warning(gc)("ConcurrentMark MarkStack backing store failure");
// Release the virtual memory reserved for the marking stack
rs.release();
return false;
@ -441,7 +377,8 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
_has_aborted(false),
_restart_for_overflow(false),
_concurrent_marking_in_progress(false),
_concurrent_phase_status(ConcPhaseNotStarted),
_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
// _verbose_level set below
@ -478,8 +415,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
_root_regions.init(_g1h, this);
if (ConcGCThreads > ParallelGCThreads) {
warning("Can't have more ConcGCThreads (%u) "
"than ParallelGCThreads (%u).",
log_warning(gc)("Can't have more ConcGCThreads (%u) than ParallelGCThreads (%u).",
ConcGCThreads, ParallelGCThreads);
return;
}
@ -534,7 +470,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
// Verify that the calculated value for MarkStackSize is in range.
// It would be nice to use the private utility routine from Arguments.
if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
log_warning(gc)("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
"must be between 1 and " SIZE_FORMAT,
mark_stack_size, MarkStackSizeMax);
return;
@ -545,14 +481,14 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
if (FLAG_IS_CMDLINE(MarkStackSize)) {
if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT "): "
"must be between 1 and " SIZE_FORMAT,
MarkStackSize, MarkStackSizeMax);
return;
}
} else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
warning("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
log_warning(gc)("Invalid value specified for MarkStackSize (" SIZE_FORMAT ")"
" or for MarkStackSizeMax (" SIZE_FORMAT ")",
MarkStackSize, MarkStackSizeMax);
return;
@ -562,7 +498,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
}
if (!_markStack.allocate(MarkStackSize)) {
warning("Failed to allocate CM marking stack");
log_warning(gc)("Failed to allocate CM marking stack");
return;
}
@ -698,9 +634,76 @@ G1ConcurrentMark::~G1ConcurrentMark() {
ShouldNotReachHere();
}
void G1ConcurrentMark::clearNextBitmap() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
class G1ClearBitMapTask : public AbstractGangTask {
// Heap region closure used for clearing the given mark bitmap.
class G1ClearBitmapHRClosure : public HeapRegionClosure {
private:
G1CMBitMap* _bitmap;
G1ConcurrentMark* _cm;
public:
G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
}
virtual bool doHeapRegion(HeapRegion* r) {
size_t const chunk_size_in_words = M / HeapWordSize;
HeapWord* cur = r->bottom();
HeapWord* const end = r->end();
while (cur < end) {
MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
_bitmap->clear_range(mr);
cur += chunk_size_in_words;
// Abort iteration if after yielding the marking has been aborted.
if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
return true;
}
// Repeat the asserts from before the start of the closure. We will do them
// as asserts here to minimize their overhead on the product. However, we
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant");
assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
}
assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
return false;
}
};
G1ClearBitmapHRClosure _cl;
HeapRegionClaimer _hr_claimer;
bool _suspendible; // If the task is suspendible, workers must join the STS.
public:
G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
AbstractGangTask("Parallel Clear Bitmap Task"),
_cl(bitmap, suspendible ? cm : NULL),
_hr_claimer(n_workers),
_suspendible(suspendible)
{ }
void work(uint worker_id) {
SuspendibleThreadSetJoiner sts_join(_suspendible);
G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true);
}
bool is_complete() {
return _cl.complete();
}
};
void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
G1ClearBitMapTask task(bitmap, this, workers->active_workers(), may_yield);
workers->run_task(&task);
guarantee(!may_yield || task.is_complete(), "Must have completed iteration when not yielding.");
}
void G1ConcurrentMark::cleanup_for_next_mark() {
// Make sure that the concurrent mark thread looks to still be in
// the current cycle.
guarantee(cmThread()->during_cycle(), "invariant");
@ -709,21 +712,24 @@ void G1ConcurrentMark::clearNextBitmap() {
// marking bitmap and getting it ready for the next cycle. During
// this time no other cycle can start. So, let's make sure that this
// is the case.
guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
_parallel_workers->run_task(&task);
clear_bitmap(_nextMarkBitMap, _parallel_workers, true);
// Clear the liveness counting data. If the marking has been aborted, the abort()
// call already did that.
if (cl.complete()) {
if (!has_aborted()) {
clear_all_count_data();
}
// Repeat the asserts from above.
guarantee(cmThread()->during_cycle(), "invariant");
guarantee(!g1h->collector_state()->mark_in_progress(), "invariant");
guarantee(!_g1h->collector_state()->mark_in_progress(), "invariant");
}
void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
assert(SafepointSynchronize::is_at_safepoint(), "Should only clear the entire prev bitmap at a safepoint.");
clear_bitmap((G1CMBitMap*)_prevMarkBitMap, workers, false);
}
class CheckBitmapClearHRClosure : public HeapRegionClosure {
@ -848,7 +854,7 @@ void G1ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
// marking.
reset_marking_state(true /* clear_overflow */);
log_info(gc)("Concurrent Mark reset for overflow");
log_info(gc, marking)("Concurrent Mark reset for overflow");
}
}
@ -983,13 +989,12 @@ public:
}
};
void G1ConcurrentMark::scanRootRegions() {
void G1ConcurrentMark::scan_root_regions() {
// scan_in_progress() will have been set to true only if there was
// at least one root region to scan. So, if it's false, we
// should not attempt to do any further work.
if (root_regions()->scan_in_progress()) {
assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");
_parallel_marking_threads = calc_parallel_marking_threads();
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
@ -1007,47 +1012,27 @@ void G1ConcurrentMark::scanRootRegions() {
}
}
void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
uint old_val = 0;
do {
old_val = Atomic::cmpxchg(ConcPhaseStarted, &_concurrent_phase_status, ConcPhaseNotStarted);
} while (old_val != ConcPhaseNotStarted);
_g1h->gc_timer_cm()->register_gc_concurrent_start(title);
void G1ConcurrentMark::concurrent_cycle_start() {
_gc_timer_cm->register_gc_start();
_gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
_g1h->trace_heap_before_gc(_gc_tracer_cm);
}
void G1ConcurrentMark::register_concurrent_phase_end_common(bool end_timer) {
if (_concurrent_phase_status == ConcPhaseNotStarted) {
return;
void G1ConcurrentMark::concurrent_cycle_end() {
_g1h->trace_heap_after_gc(_gc_tracer_cm);
if (has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure();
}
uint old_val = Atomic::cmpxchg(ConcPhaseStopping, &_concurrent_phase_status, ConcPhaseStarted);
if (old_val == ConcPhaseStarted) {
_g1h->gc_timer_cm()->register_gc_concurrent_end();
// If 'end_timer' is true, we came here to end timer which needs concurrent phase ended.
// We need to end it before changing the status to 'ConcPhaseNotStarted' to prevent
// starting a new concurrent phase by 'ConcurrentMarkThread'.
if (end_timer) {
_g1h->gc_timer_cm()->register_gc_end();
}
old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
} else {
do {
// Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
os::naked_short_sleep(1);
} while (_concurrent_phase_status != ConcPhaseNotStarted);
}
_gc_timer_cm->register_gc_end();
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
}
void G1ConcurrentMark::register_concurrent_phase_end() {
register_concurrent_phase_end_common(false);
}
void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
register_concurrent_phase_end_common(true);
}
void G1ConcurrentMark::markFromRoots() {
void G1ConcurrentMark::mark_from_roots() {
// we might be tempted to assert that:
// assert(asynch == !SafepointSynchronize::is_at_safepoint(),
// "inconsistent argument?");
@ -1110,7 +1095,6 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
if (has_overflown()) {
// Oops. We overflowed. Restart concurrent marking.
_restart_for_overflow = true;
log_develop_trace(gc)("Remark led to restart for overflow.");
// Verify the heap w.r.t. the previous marking bitmap.
if (VerifyDuringGC) {
@ -1124,7 +1108,7 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
reset_marking_state();
} else {
{
GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm());
GCTraceTime(Debug, gc, phases) trace("Aggregate Data", _gc_timer_cm);
// Aggregate the per-task counting data that we have accumulated
// while marking.
@ -1163,7 +1147,7 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
g1p->record_concurrent_mark_remark_end();
G1CMIsAliveClosure is_alive(g1h);
g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
_gc_tracer_cm->report_object_count_after_gc(&is_alive);
}
// Base class of the closures that finalize and verify the
@ -1752,11 +1736,9 @@ void G1ConcurrentMark::cleanup() {
// sure we update the old gen/space data.
g1h->g1mm()->update_sizes();
g1h->allocation_context_stats().update_after_mark();
g1h->trace_heap_after_concurrent_cycle();
}
void G1ConcurrentMark::completeCleanup() {
void G1ConcurrentMark::complete_cleanup() {
if (has_aborted()) return;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@ -2045,7 +2027,7 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// Inner scope to exclude the cleaning of the string and symbol
// tables from the displayed time.
{
GCTraceTime(Debug, gc) trace("Reference Processing", g1h->gc_timer_cm());
GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
ReferenceProcessor* rp = g1h->ref_processor_cm();
@ -2102,8 +2084,8 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
&g1_keep_alive,
&g1_drain_mark_stack,
executor,
g1h->gc_timer_cm());
g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
_gc_timer_cm);
_gc_tracer_cm->report_gc_reference_stats(stats);
// The do_oop work routines of the keep_alive and drain_marking_stack
// oop closures will set the has_overflown flag if we overflow the
@ -2134,29 +2116,25 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
assert(_markStack.isEmpty(), "Marking should have completed");
// Unload Klasses, String, Symbols, Code Cache, etc.
{
GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm());
if (ClassUnloadingWithConcurrentMark) {
bool purged_classes;
{
GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm());
GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm);
purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
}
{
GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm());
GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm);
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
}
}
if (G1StringDedup::is_enabled()) {
GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm());
GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm);
G1StringDedup::unlink(&g1_is_alive);
}
}
}
void G1ConcurrentMark::swapMarkBitMaps() {
G1CMBitMapRO* temp = _prevMarkBitMap;
@ -2273,7 +2251,7 @@ void G1ConcurrentMark::checkpointRootsFinalWork() {
HandleMark hm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm());
GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
g1h->ensure_parsability(false);
@ -2308,7 +2286,7 @@ void G1ConcurrentMark::checkpointRootsFinalWork() {
void G1ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
// Note we are overriding the read-only view of the prev map here, via
// the cast.
((G1CMBitMap*)_prevMarkBitMap)->clearRange(mr);
((G1CMBitMap*)_prevMarkBitMap)->clear_range(mr);
}
HeapRegion*
@ -2605,7 +2583,7 @@ void G1ConcurrentMark::abort() {
// Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
// concurrent bitmap clearing.
_nextMarkBitMap->clearAll();
clear_bitmap(_nextMarkBitMap, _g1h->workers(), false);
// Note we cannot clear the previous marking bitmap here
// since VerifyDuringGC verifies the objects marked during
@ -2629,10 +2607,6 @@ void G1ConcurrentMark::abort() {
satb_mq_set.set_active_all_threads(
false, /* new active value */
satb_mq_set.is_active() /* expected_active */);
_g1h->trace_heap_after_concurrent_cycle();
_g1h->register_concurrent_cycle_end();
}
static void print_ms_time_info(const char* prefix, const char* name,
@ -3554,8 +3528,6 @@ G1PrintRegionLivenessInfoClosure::
G1PrintRegionLivenessInfoClosure(const char* phase_name)
: _total_used_bytes(0), _total_capacity_bytes(0),
_total_prev_live_bytes(0), _total_next_live_bytes(0),
_hum_used_bytes(0), _hum_capacity_bytes(0),
_hum_prev_live_bytes(0), _hum_next_live_bytes(0),
_total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
MemRegion g1_reserved = g1h->g1_reserved();
@ -3595,36 +3567,6 @@ G1PrintRegionLivenessInfoClosure(const char* phase_name)
"(bytes)", "(bytes)");
}
// It takes as a parameter a reference to one of the _hum_* fields, it
// deduces the corresponding value for a region in a humongous region
// series (either the region size, or what's left if the _hum_* field
// is < the region size), and updates the _hum_* field accordingly.
size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
size_t bytes = 0;
// The > 0 check is to deal with the prev and next live bytes which
// could be 0.
if (*hum_bytes > 0) {
bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
*hum_bytes -= bytes;
}
return bytes;
}
// It deduces the values for a region in a humongous region series
// from the _hum_* fields and updates those accordingly. It assumes
// that that _hum_* fields have already been set up from the "starts
// humongous" region and we visit the regions in address order.
void G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* used_bytes,
size_t* capacity_bytes,
size_t* prev_live_bytes,
size_t* next_live_bytes) {
assert(_hum_used_bytes > 0 && _hum_capacity_bytes > 0, "pre-condition");
*used_bytes = get_hum_bytes(&_hum_used_bytes);
*capacity_bytes = get_hum_bytes(&_hum_capacity_bytes);
*prev_live_bytes = get_hum_bytes(&_hum_prev_live_bytes);
*next_live_bytes = get_hum_bytes(&_hum_next_live_bytes);
}
bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
const char* type = r->get_type_str();
HeapWord* bottom = r->bottom();
@ -3637,24 +3579,6 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
size_t remset_bytes = r->rem_set()->mem_size();
size_t strong_code_roots_bytes = r->rem_set()->strong_code_roots_mem_size();
if (r->is_starts_humongous()) {
assert(_hum_used_bytes == 0 && _hum_capacity_bytes == 0 &&
_hum_prev_live_bytes == 0 && _hum_next_live_bytes == 0,
"they should have been zeroed after the last time we used them");
// Set up the _hum_* fields.
_hum_capacity_bytes = capacity_bytes;
_hum_used_bytes = used_bytes;
_hum_prev_live_bytes = prev_live_bytes;
_hum_next_live_bytes = next_live_bytes;
get_hum_bytes(&used_bytes, &capacity_bytes,
&prev_live_bytes, &next_live_bytes);
end = bottom + HeapRegion::GrainWords;
} else if (r->is_continues_humongous()) {
get_hum_bytes(&used_bytes, &capacity_bytes,
&prev_live_bytes, &next_live_bytes);
assert(end == bottom + HeapRegion::GrainWords, "invariant");
}
_total_used_bytes += used_bytes;
_total_capacity_bytes += capacity_bytes;
_total_prev_live_bytes += prev_live_bytes;

View File

@ -34,6 +34,8 @@ class G1CollectedHeap;
class G1CMBitMap;
class G1CMTask;
class G1ConcurrentMark;
class ConcurrentGCTimer;
class G1OldTracer;
typedef GenericTaskQueue<oop, mtGC> G1CMTaskQueue;
typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
@ -139,10 +141,7 @@ class G1CMBitMap : public G1CMBitMapRO {
inline void clear(HeapWord* addr);
inline bool parMark(HeapWord* addr);
void clearRange(MemRegion mr);
// Clear the whole mark bitmap.
void clearAll();
void clear_range(MemRegion mr);
};
// Represents a marking stack used by ConcurrentMarking in the G1 collector.
@ -352,17 +351,9 @@ protected:
// time of remark.
volatile bool _concurrent_marking_in_progress;
// There would be a race between ConcurrentMarkThread and VMThread(ConcurrentMark::abort())
// to call ConcurrentGCTimer::register_gc_concurrent_end().
// And this variable is used to keep track of concurrent phase.
volatile uint _concurrent_phase_status;
// Concurrent phase is not yet started.
static const uint ConcPhaseNotStarted = 0;
// Concurrent phase is started.
static const uint ConcPhaseStarted = 1;
// Caller thread of ConcurrentGCTimer::register_gc_concurrent_end() is ending concurrent phase.
// So other thread should wait until the status to be changed to ConcPhaseNotStarted.
static const uint ConcPhaseStopping = 2;
ConcurrentGCTimer* _gc_timer_cm;
G1OldTracer* _gc_tracer_cm;
// All of these times are in ms
NumberSeq _init_times;
@ -497,6 +488,9 @@ protected:
// end_timer, true to end gc timer after ending concurrent phase.
void register_concurrent_phase_end_common(bool end_timer);
// Clear the given bitmap in parallel using the given WorkGang. If may_yield is
// true, periodically insert checks to see if this method should exit prematurely.
void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
public:
// Manipulation of the global mark stack.
// The push and pop operations are used by tasks for transfers
@ -530,10 +524,8 @@ public:
_concurrent_marking_in_progress = false;
}
void register_concurrent_phase_start(const char* title);
void register_concurrent_phase_end();
// Ends both concurrent phase and timer.
void register_concurrent_gc_end_and_stop_timer();
void concurrent_cycle_start();
void concurrent_cycle_end();
void update_accum_task_vtime(int i, double vtime) {
_accum_task_vtime[i] += vtime;
@ -585,8 +577,13 @@ public:
uint worker_id,
HeapRegion* hr = NULL);
// Clear the next marking bitmap (will be called concurrently).
void clearNextBitmap();
// Prepare internal data structures for the next mark cycle. This includes clearing
// the next mark bitmap and some internal data structures. This method is intended
// to be called concurrently to the mutator. It will yield to safepoint requests.
void cleanup_for_next_mark();
// Clear the previous marking bitmap during safepoint.
void clear_prev_bitmap(WorkGang* workers);
// Return whether the next mark bitmap has no marks set. To be used for assertions
// only. Will not yield to pause requests.
@ -603,18 +600,18 @@ public:
// Scan all the root regions and mark everything reachable from
// them.
void scanRootRegions();
void scan_root_regions();
// Scan a single root region and mark everything reachable from it.
void scanRootRegion(HeapRegion* hr, uint worker_id);
// Do concurrent phase of marking, to a tentative transitive closure.
void markFromRoots();
void mark_from_roots();
void checkpointRootsFinal(bool clear_all_soft_refs);
void checkpointRootsFinalWork();
void cleanup();
void completeCleanup();
void complete_cleanup();
// Mark in the previous bitmap. NB: this is usually read-only, so use
// this carefully!
@ -730,6 +727,9 @@ public:
return _completed_initialization;
}
ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
protected:
// Clear all the per-task bitmaps and arrays used to store the
// counting data.
@ -996,18 +996,6 @@ private:
size_t _total_prev_live_bytes;
size_t _total_next_live_bytes;
// These are set up when we come across a "stars humongous" region
// (as this is where most of this information is stored, not in the
// subsequent "continues humongous" regions). After that, for every
// region in a given humongous region series we deduce the right
// values for it by simply subtracting the appropriate amount from
// these fields. All these values should reach 0 after we've visited
// the last region in the series.
size_t _hum_used_bytes;
size_t _hum_capacity_bytes;
size_t _hum_prev_live_bytes;
size_t _hum_next_live_bytes;
// Accumulator for the remembered set size
size_t _total_remset_bytes;
@ -1026,11 +1014,6 @@ private:
return (double) val / (double) M;
}
// See the .cpp file.
size_t get_hum_bytes(size_t* hum_bytes);
void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
size_t* prev_live_bytes, size_t* next_live_bytes);
public:
// The header and footer are printed in the constructor and
// destructor respectively.

View File

@ -110,15 +110,9 @@ void G1EvacStats::adjust_desired_plab_sz() {
size_t const cur_plab_sz = (size_t)((double)total_waste_allowed / G1LastPLABAverageOccupancy);
// Take historical weighted average
_filter.sample(cur_plab_sz);
// Clip from above and below, and align to object boundary
size_t plab_sz;
plab_sz = MAX2(min_size(), (size_t)_filter.average());
plab_sz = MIN2(max_size(), plab_sz);
plab_sz = align_object_size(plab_sz);
// Latch the result
_desired_net_plab_sz = plab_sz;
_desired_net_plab_sz = MAX2(min_size(), (size_t)_filter.average());
log_sizing(cur_plab_sz, plab_sz);
log_sizing(cur_plab_sz, _desired_net_plab_sz);
// Clear accumulators for next round.
reset();
}

View File

@ -0,0 +1,157 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics) :
_g1(g1),
_analytics(analytics),
_num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) {
assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics);
clear_ratio_check_data();
}
void G1HeapSizingPolicy::clear_ratio_check_data() {
_ratio_over_threshold_count = 0;
_ratio_over_threshold_sum = 0.0;
_pauses_since_start = 0;
}
size_t G1HeapSizingPolicy::expansion_amount() {
double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
assert(GCTimeRatio > 0,
"we should have set it to a default value set_g1_gc_flags() "
"if a user set it to 0");
const double gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
double threshold = gc_overhead_perc;
size_t expand_bytes = 0;
// If the heap is at less than half its maximum size, scale the threshold down,
// to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
// though the scaling code will likely keep the increase small.
if (_g1->capacity() <= _g1->max_capacity() / 2) {
threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
threshold = MAX2(threshold, 1.0);
}
// If the last GC time ratio is over the threshold, increment the count of
// times it has been exceeded, and add this ratio to the sum of exceeded
// ratios.
if (last_gc_overhead > threshold) {
_ratio_over_threshold_count++;
_ratio_over_threshold_sum += last_gc_overhead;
}
// Check if we've had enough GC time ratio checks that were over the
// threshold to trigger an expansion. We'll also expand if we've
// reached the end of the history buffer and the average of all entries
// is still over the threshold. This indicates a smaller number of GCs were
// long enough to make the average exceed the threshold.
bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics;
if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
(filled_history_buffer && (recent_gc_overhead > threshold))) {
size_t min_expand_bytes = HeapRegion::GrainBytes;
size_t reserved_bytes = _g1->max_capacity();
size_t committed_bytes = _g1->capacity();
size_t uncommitted_bytes = reserved_bytes - committed_bytes;
size_t expand_bytes_via_pct =
uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
double scale_factor = 1.0;
// If the current size is less than 1/4 of the Initial heap size, expand
// by half of the delta between the current and Initial sizes. IE, grow
// back quickly.
//
// Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
// the available expansion space, whichever is smaller, as the base
// expansion size. Then possibly scale this size according to how much the
// threshold has (on average) been exceeded by. If the delta is small
// (less than the StartScaleDownAt value), scale the size down linearly, but
// not by less than MinScaleDownFactor. If the delta is large (greater than
// the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
// times the base size. The scaling will be linear in the range from
// StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
// ScaleUpRange sets the rate of scaling up.
if (committed_bytes < InitialHeapSize / 4) {
expand_bytes = (InitialHeapSize - committed_bytes) / 2;
} else {
double const MinScaleDownFactor = 0.2;
double const MaxScaleUpFactor = 2;
double const StartScaleDownAt = gc_overhead_perc;
double const StartScaleUpAt = gc_overhead_perc * 1.5;
double const ScaleUpRange = gc_overhead_perc * 2.0;
double ratio_delta;
if (filled_history_buffer) {
ratio_delta = recent_gc_overhead - threshold;
} else {
ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
}
expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
if (ratio_delta < StartScaleDownAt) {
scale_factor = ratio_delta / StartScaleDownAt;
scale_factor = MAX2(scale_factor, MinScaleDownFactor);
} else if (ratio_delta > StartScaleUpAt) {
scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
}
}
log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
"recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);
// Ensure the expansion size is at least the minimum growth amount
// and at most the remaining uncommitted byte size.
expand_bytes = MAX2(expand_bytes, min_expand_bytes);
expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
clear_ratio_check_data();
} else {
// An expansion was not triggered. If we've started counting, increment
// the number of checks we've made in the current window. If we've
// reached the end of the window without resizing, clear the counters to
// start again the next time we see a ratio above the threshold.
if (_ratio_over_threshold_count > 0) {
_pauses_since_start++;
if (_pauses_since_start > _num_prev_pauses_for_heuristics) {
clear_ratio_check_data();
}
}
}
return expand_bytes;
}

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP
#define SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP
#include "memory/allocation.hpp"
class G1Analytics;
class G1CollectedHeap;
class G1HeapSizingPolicy: public CHeapObj<mtGC> {
// MinOverThresholdForGrowth must be less than the number of recorded
// pause times in G1Analytics, representing the minimum number of pause
// time ratios that exceed GCTimeRatio before a heap expansion will be triggered.
const static uint MinOverThresholdForGrowth = 4;
const G1CollectedHeap* _g1;
const G1Analytics* _analytics;
const uint _num_prev_pauses_for_heuristics;
// Ratio check data for determining if heap growth is necessary.
uint _ratio_over_threshold_count;
double _ratio_over_threshold_sum;
uint _pauses_since_start;
protected:
G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics);
public:
// If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by.
virtual size_t expansion_amount();
// Clear ratio tracking data used by expansion_amount().
void clear_ratio_check_data();
static G1HeapSizingPolicy* create(const G1CollectedHeap* g1, const G1Analytics* analytics);
};
#endif // SRC_SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP

View File

@ -22,22 +22,9 @@
*
*/
#ifndef SHARE_VM_RUNTIME_LOG_TIMER_HPP
#define SHARE_VM_RUNTIME_LOG_TIMER_HPP
#include "precompiled.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
#include "logging/log.hpp"
#include "runtime/timer.hpp"
// TraceStartupTime is used for tracing the execution time of a block with logging
// Usage:
// { TraceStartupTime t("block time")
// some_code();
// }
//
class TraceStartupTime : public TraceTime {
public:
TraceStartupTime(const char* s) : TraceTime(s, log_is_enabled(Info, startuptime), LogTag::_startuptime) {}
};
#endif // SHARE_VM_RUNTIME_LOG_TIMER_HPP
G1HeapSizingPolicy* G1HeapSizingPolicy::create(const G1CollectedHeap* g1, const G1Analytics* analytics) {
return new G1HeapSizingPolicy(g1, analytics);
}

View File

@ -82,8 +82,8 @@ public:
void G1HeapTransition::print() {
Data after(_g1_heap);
size_t eden_capacity_bytes_after_gc = _g1_heap->g1_policy()->young_list_target_length() - after._survivor_length;
size_t survivor_capacity_bytes_after_gc = _g1_heap->g1_policy()->max_survivor_regions();
size_t eden_capacity_length_after_gc = _g1_heap->g1_policy()->young_list_target_length() - after._survivor_length;
size_t survivor_capacity_length_after_gc = _g1_heap->g1_policy()->max_survivor_regions();
DetailedUsage usage;
if (log_is_enabled(Trace, gc, heap)) {
@ -100,11 +100,11 @@ void G1HeapTransition::print() {
}
log_info(gc, heap)("Eden regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
_before._eden_length, after._eden_length, eden_capacity_bytes_after_gc);
_before._eden_length, after._eden_length, eden_capacity_length_after_gc);
log_trace(gc, heap)(" Used: 0K, Waste: 0K");
log_info(gc, heap)("Survivor regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
_before._survivor_length, after._survivor_length, survivor_capacity_bytes_after_gc);
_before._survivor_length, after._survivor_length, survivor_capacity_length_after_gc);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._survivor_used / K, ((after._survivor_length * HeapRegion::GrainBytes) - usage._survivor_used) / K);

View File

@ -36,7 +36,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
_use_cache = true;
_hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
_hot_cache = _hot_cache_memory.allocate(_hot_cache_size);
_hot_cache = ArrayAllocator<jbyte*, mtGC>::allocate(_hot_cache_size);
reset_hot_cache_internal();
@ -51,7 +51,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
G1HotCardCache::~G1HotCardCache() {
if (default_use_cache()) {
assert(_hot_cache != NULL, "Logic");
_hot_cache_memory.free();
ArrayAllocator<jbyte*, mtGC>::free(_hot_cache, _hot_cache_size);
_hot_cache = NULL;
}
}

View File

@ -61,7 +61,6 @@ class G1HotCardCache: public CHeapObj<mtGC> {
G1CardCounts _card_counts;
ArrayAllocator<jbyte*, mtGC> _hot_cache_memory;
// The card cache table
jbyte** _hot_cache;

View File

@ -122,7 +122,7 @@ void G1MarkSweep::allocate_stacks() {
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", gc_timer());
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@ -137,6 +137,9 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
&follow_code_closure);
}
{
GCTraceTime(Debug, gc, phases) trace("Reference Processing", gc_timer());
// Process reference objects found during marking
ReferenceProcessor* rp = GenMarkSweep::ref_processor();
assert(rp == g1h->ref_processor_stw(), "Sanity");
@ -149,11 +152,14 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
NULL,
gc_timer());
gc_tracer()->report_gc_reference_stats(stats);
}
// This is the point where the entire marking should have completed.
assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
{
GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer());
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
@ -162,9 +168,18 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
// Prune dead klasses from subklass/sibling/implementor lists.
Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
}
{
GCTraceTime(Debug, gc, phases) trace("Scrub String and Symbol Tables", gc_timer());
// Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
}
if (G1StringDedup::is_enabled()) {
GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", gc_timer());
G1StringDedup::unlink(&GenMarkSweep::is_alive);
}
if (VerifyDuringGC) {
HandleMark hm; // handle scope
@ -197,7 +212,7 @@ void G1MarkSweep::mark_sweep_phase2() {
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
// tracking expects us to do so. See comment under phase4.
GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", gc_timer());
prepare_compaction();
}
@ -220,17 +235,11 @@ class G1AdjustPointersClosure: public HeapRegionClosure {
}
};
class G1AlwaysTrueClosure: public BoolObjectClosure {
public:
bool do_object_b(oop p) { return true; }
};
static G1AlwaysTrueClosure always_true;
void G1MarkSweep::mark_sweep_phase3() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// Adjust the pointers to reflect the new locations
GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
// Need cleared claim bits for the roots processing
ClassLoaderDataGraph::clear_claimed_marks();
@ -248,7 +257,7 @@ void G1MarkSweep::mark_sweep_phase3() {
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
JNIHandles::weak_oops_do(&always_true, &GenMarkSweep::adjust_pointer_closure);
JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
if (G1StringDedup::is_enabled()) {
G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
@ -291,7 +300,7 @@ void G1MarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime(Trace, gc) tm("Phase 4: Move objects", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", gc_timer());
G1SpaceCompactClosure blk;
g1h->heap_region_iterate(&blk);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -178,7 +178,7 @@ void G1MonitoringSupport::recalculate_sizes() {
// of a GC).
uint young_list_length = g1->young_list()->length();
uint survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
uint survivor_list_length = g1->young_list()->survivor_length();
assert(young_list_length >= survivor_list_length, "invariant");
uint eden_list_length = young_list_length - survivor_list_length;
// Max length includes any potential extensions to the young gen

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1RootClosures.hpp"
@ -80,7 +81,7 @@ void G1ParScanThreadState::flush(size_t* surviving_young_words) {
_plab_allocator->flush_and_retire_stats();
_g1h->g1_policy()->record_age_table(&_age_table);
uint length = _g1h->g1_policy()->young_cset_region_length();
uint length = _g1h->collection_set()->young_region_length();
for (uint region_index = 0; region_index < length; region_index++) {
surviving_young_words[region_index] += _surviving_young_words[region_index];
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@ void G1StringDedup::initialize() {
void G1StringDedup::stop() {
assert(is_enabled(), "String deduplication not enabled");
G1StringDedupThread::stop();
G1StringDedupThread::thread()->stop();
}
bool G1StringDedup::is_candidate_from_mark(oop obj) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -81,11 +81,9 @@ void G1StringDedupThread::deduplicate_shared_strings(G1StringDedupStat& stat) {
StringTable::shared_oops_do(&sharedStringDedup);
}
void G1StringDedupThread::run() {
void G1StringDedupThread::run_service() {
G1StringDedupStat total_stat;
initialize_in_thread();
wait_for_universe_init();
deduplicate_shared_strings(total_stat);
// Main loop
@ -96,7 +94,7 @@ void G1StringDedupThread::run() {
// Wait for the queue to become non-empty
G1StringDedupQueue::wait();
if (_should_terminate) {
if (should_terminate()) {
break;
}
@ -133,23 +131,10 @@ void G1StringDedupThread::run() {
}
}
terminate();
}
void G1StringDedupThread::stop() {
{
MonitorLockerEx ml(Terminator_lock);
_thread->_should_terminate = true;
}
void G1StringDedupThread::stop_service() {
G1StringDedupQueue::cancel_wait();
{
MonitorLockerEx ml(Terminator_lock);
while (!_thread->_has_terminated) {
ml.wait();
}
}
}
void G1StringDedupThread::print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,14 +45,14 @@ private:
void print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
void run_service();
void stop_service();
public:
static void create();
static void stop();
static G1StringDedupThread* thread();
virtual void run();
void deduplicate_shared_strings(G1StringDedupStat& stat);
};

Some files were not shown because too many files have changed in this diff Show More