This commit is contained in:
Lana Steuck 2013-08-26 14:53:15 -07:00
commit ff0005f62f
140 changed files with 1975 additions and 1806 deletions

View File

@ -224,3 +224,4 @@ ea73f01b9053e7165e7ba80f242bafecbc6af712 jdk8-b96
3d34036aae4ea90b2ca59712d5a69db3221f0875 jdk8-b100 3d34036aae4ea90b2ca59712d5a69db3221f0875 jdk8-b100
edb01c460d4cab21ff0ff13512df7b746efaa0e7 jdk8-b101 edb01c460d4cab21ff0ff13512df7b746efaa0e7 jdk8-b101
bbe43d712fe08e650808d774861b256ccb34e500 jdk8-b102 bbe43d712fe08e650808d774861b256ccb34e500 jdk8-b102
30a1d677a20c6a95f98043d8f20ce570304e3818 jdk8-b103

View File

@ -224,3 +224,4 @@ a1c1e8bf71f354f3aec0214cf13d6668811e021d jdk8-b97
d2dcb110e9dbaf9903c05b211df800e78e4b394e jdk8-b100 d2dcb110e9dbaf9903c05b211df800e78e4b394e jdk8-b100
9f74a220677dc265a724515d8e2617548cef62f1 jdk8-b101 9f74a220677dc265a724515d8e2617548cef62f1 jdk8-b101
5eb3c1dc348f72a7f84f7d9d07834e8bbe09a799 jdk8-b102 5eb3c1dc348f72a7f84f7d9d07834e8bbe09a799 jdk8-b102
b7e64be81c8a7690703df5711f4fc2375da8a9cb jdk8-b103

File diff suppressed because it is too large Load Diff

View File

@ -412,17 +412,16 @@ AC_DEFUN([PLATFORM_SET_COMPILER_TARGET_BITS_FLAGS],
[ [
# keep track of c/cxx flags that we added outselves... # keep track of c/cxx flags that we added outselves...
# to prevent emitting warning... # to prevent emitting warning...
ADDED_CFLAGS=" -m${OPENJDK_TARGET_CPU_BITS}" TARGET_BITS_FLAG="-m${OPENJDK_TARGET_CPU_BITS}"
ADDED_CXXFLAGS=" -m${OPENJDK_TARGET_CPU_BITS}" AC_SUBST(TARGET_BITS_FLAG)
ADDED_LDFLAGS=" -m${OPENJDK_TARGET_CPU_BITS}"
CFLAGS="${CFLAGS}${ADDED_CFLAGS}" CFLAGS="${CFLAGS} ${TARGET_BITS_FLAG}"
CXXFLAGS="${CXXFLAGS}${ADDED_CXXFLAGS}" CXXFLAGS="${CXXFLAGS} ${TARGET_BITS_FLAG}"
LDFLAGS="${LDFLAGS}${ADDED_LDFLAGS}" LDFLAGS="${LDFLAGS} ${TARGET_BITS_FLAG}"
CFLAGS_JDK="${CFLAGS_JDK}${ADDED_CFLAGS}" CFLAGS_JDK="${CFLAGS_JDK} ${TARGET_BITS_FLAG}"
CXXFLAGS_JDK="${CXXFLAGS_JDK}${ADDED_CXXFLAGS}" CXXFLAGS_JDK="${CXXFLAGS_JDK} ${TARGET_BITS_FLAG}"
LDFLAGS_JDK="${LDFLAGS_JDK}${ADDED_LDFLAGS}" LDFLAGS_JDK="${LDFLAGS_JDK} ${TARGET_BITS_FLAG}"
]) ])
AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_TARGET_BITS], AC_DEFUN_ONCE([PLATFORM_SETUP_OPENJDK_TARGET_BITS],

View File

@ -304,6 +304,7 @@ MACOSX_VERSION_MIN=@MACOSX_VERSION_MIN@
COMPILER_TYPE:=@COMPILER_TYPE@ COMPILER_TYPE:=@COMPILER_TYPE@
COMPILER_NAME:=@COMPILER_NAME@ COMPILER_NAME:=@COMPILER_NAME@
TARGET_BITS_FLAG=@TARGET_BITS_FLAG@
COMPILER_SUPPORTS_TARGET_BITS_FLAG=@COMPILER_SUPPORTS_TARGET_BITS_FLAG@ COMPILER_SUPPORTS_TARGET_BITS_FLAG=@COMPILER_SUPPORTS_TARGET_BITS_FLAG@
CC_OUT_OPTION:=@CC_OUT_OPTION@ CC_OUT_OPTION:=@CC_OUT_OPTION@

View File

@ -83,9 +83,6 @@ $(foreach i,2 3 4 5 6 7 8 9 10 11 12 13 14 15, $(if $($i),$1_$(strip $($i)))$(NE
$(call LogSetupMacroEntry,SetupIdlCompilation($1),$2,$3,$4,$5,$6,$7,$8,$9,$(10),$(11),$(12),$(13),$(14),$(15)) $(call LogSetupMacroEntry,SetupIdlCompilation($1),$2,$3,$4,$5,$6,$7,$8,$9,$(10),$(11),$(12),$(13),$(14),$(15))
$(if $(16),$(error Internal makefile error: Too many arguments to SetupIdlCompilation, please update IdlCompilation.gmk)) $(if $(16),$(error Internal makefile error: Too many arguments to SetupIdlCompilation, please update IdlCompilation.gmk))
# Remove any relative addressing in the paths.
$1_SRC := $$(abspath $$($1_SRC))
$1_BIN := $$(abspath $$($1_BIN))
# Find all existing java files and existing class files. # Find all existing java files and existing class files.
$$(eval $$(call MakeDir,$$($1_BIN))) $$(eval $$(call MakeDir,$$($1_BIN)))
$1_SRCS := $$(shell find $$($1_SRC) -name "*.idl") $1_SRCS := $$(shell find $$($1_SRC) -name "*.idl")

View File

@ -204,7 +204,7 @@ clean: clean-langtools clean-corba clean-jaxp clean-jaxws clean-hotspot clean-jd
# If the output directory was created by configure and now becomes empty, remove it as well. # If the output directory was created by configure and now becomes empty, remove it as well.
# FIXME: tmp should not be here, fix ResetTimers instead. And remove spec.sh! # FIXME: tmp should not be here, fix ResetTimers instead. And remove spec.sh!
dist-clean: clean dist-clean: clean
@($(CD) $(OUTPUT_ROOT) && $(RM) -r *spec.gmk config.* configure-arguments Makefile compare.sh spec.sh tmp) @($(CD) $(OUTPUT_ROOT) && $(RM) -r *spec.gmk config.* configure-arguments Makefile compare.sh spec.sh tmp javacservers)
@$(if $(filter $(CONF_NAME),$(notdir $(OUTPUT_ROOT))), \ @$(if $(filter $(CONF_NAME),$(notdir $(OUTPUT_ROOT))), \
if test "x`$(LS) $(OUTPUT_ROOT)`" != x; then \ if test "x`$(LS) $(OUTPUT_ROOT)`" != x; then \
$(ECHO) "Warning: Not removing non-empty configuration directory for '$(CONF_NAME)'" ;\ $(ECHO) "Warning: Not removing non-empty configuration directory for '$(CONF_NAME)'" ;\

View File

@ -224,3 +224,4 @@ c8286839d0df04aba819ec4bef12b86babccf30e jdk8-b90
8d492f1dfd1b131a4c7886ee6b59528609f7e4fe jdk8-b100 8d492f1dfd1b131a4c7886ee6b59528609f7e4fe jdk8-b100
a013024b07475782f1fa8e196e950b34b4077663 jdk8-b101 a013024b07475782f1fa8e196e950b34b4077663 jdk8-b101
528c7e76eaeee022817ee085668459bc97cf5665 jdk8-b102 528c7e76eaeee022817ee085668459bc97cf5665 jdk8-b102
49c4a777fdfd648d4c3fffc940fdb97a23108ca8 jdk8-b103

View File

@ -367,3 +367,5 @@ f6921c876db192bba389cec062855a66372da01c jdk8-b101
530fe88b3b2c710f42810b3580d86a0d83ad6c1c hs25-b44 530fe88b3b2c710f42810b3580d86a0d83ad6c1c hs25-b44
c4697c1c448416108743b59118b4a2498b339d0c jdk8-b102 c4697c1c448416108743b59118b4a2498b339d0c jdk8-b102
7f55137d6aa81efc6eb0035813709f2cb6a26b8b hs25-b45 7f55137d6aa81efc6eb0035813709f2cb6a26b8b hs25-b45
6f9be7f87b9653e94fd8fb3070891a0cc91b15bf jdk8-b103
580430d131ccd475e2f2ad4006531b8c4813d102 hs25-b46

View File

@ -44,7 +44,7 @@ public class PhaseCFG extends Phase {
Type type = db.lookupType("PhaseCFG"); Type type = db.lookupType("PhaseCFG");
numBlocksField = new CIntField(type.getCIntegerField("_num_blocks"), 0); numBlocksField = new CIntField(type.getCIntegerField("_num_blocks"), 0);
blocksField = type.getAddressField("_blocks"); blocksField = type.getAddressField("_blocks");
bbsField = type.getAddressField("_bbs"); bbsField = type.getAddressField("_node_to_block_mapping");
brootField = type.getAddressField("_broot"); brootField = type.getAddressField("_broot");
} }

View File

@ -41,13 +41,11 @@ SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad
ifeq ("${Platform_arch_model}", "${Platform_arch}") ifeq ("${Platform_arch_model}", "${Platform_arch}")
SOURCES.AD = \ SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \ $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad)
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
else else
SOURCES.AD = \ SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \ $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \ $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad)
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
endif endif
EXEC = $(OUTDIR)/adlc EXEC = $(OUTDIR)/adlc

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25 HS_MAJOR_VER=25
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=45 HS_BUILD_NUMBER=46
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=8 JDK_MINOR_VER=8

View File

@ -41,13 +41,11 @@ SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad
ifeq ("${Platform_arch_model}", "${Platform_arch}") ifeq ("${Platform_arch_model}", "${Platform_arch}")
SOURCES.AD = \ SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \ $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad)
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
else else
SOURCES.AD = \ SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \ $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \ $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad)
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
endif endif
EXEC = $(OUTDIR)/adlc EXEC = $(OUTDIR)/adlc

View File

@ -42,13 +42,11 @@ SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad
ifeq ("${Platform_arch_model}", "${Platform_arch}") ifeq ("${Platform_arch_model}", "${Platform_arch}")
SOURCES.AD = \ SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \ $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad)
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
else else
SOURCES.AD = \ SOURCES.AD = \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \ $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
$(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \ $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad)
$(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
endif endif
EXEC = $(OUTDIR)/adlc EXEC = $(OUTDIR)/adlc

View File

@ -283,9 +283,9 @@ $(DTRACE.o): $(DTRACE).d $(JVMOFFS).h $(JVMOFFS)Index.h $(DTraced_Files)
$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \ $(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \
$(DTraced_Files) ||\ $(DTraced_Files) ||\
STATUS=$$?;\ STATUS=$$?;\
if [ x"$$STATUS" = x"1" -a \ if [ x"$$STATUS" = x"1" ]; then \
x`uname -r` = x"5.10" -a \ if [ x`uname -r` = x"5.10" -a \
x`uname -p` = x"sparc" ]; then\ x`uname -p` = x"sparc" ]; then\
echo "*****************************************************************";\ echo "*****************************************************************";\
echo "* If you are building server compiler, and the error message is ";\ echo "* If you are building server compiler, and the error message is ";\
echo "* \"incorrect ELF machine type...\", you have run into solaris bug ";\ echo "* \"incorrect ELF machine type...\", you have run into solaris bug ";\
@ -294,6 +294,20 @@ $(DTRACE.o): $(DTRACE).d $(JVMOFFS).h $(JVMOFFS)Index.h $(DTraced_Files)
echo "* environment variable HOTSPOT_DISABLE_DTRACE_PROBES to disable ";\ echo "* environment variable HOTSPOT_DISABLE_DTRACE_PROBES to disable ";\
echo "* dtrace probes for this build.";\ echo "* dtrace probes for this build.";\
echo "*****************************************************************";\ echo "*****************************************************************";\
elif [ x`uname -r` = x"5.10" ]; then\
echo "*****************************************************************";\
echo "* If you are seeing 'syntax error near \"umpiconninfo_t\"' on Solaris";\
echo "* 10, try doing 'cd /usr/lib/dtrace && gzip mpi.d' as root, ";\
echo "* or set the environment variable HOTSPOT_DISABLE_DTRACE_PROBES";\
echo "* to disable dtrace probes for this build.";\
echo "*****************************************************************";\
else \
echo "*****************************************************************";\
echo "* If you cannot fix dtrace build issues, try to ";\
echo "* set the environment variable HOTSPOT_DISABLE_DTRACE_PROBES";\
echo "* to disable dtrace probes for this build.";\
echo "*****************************************************************";\
fi; \
fi;\ fi;\
exit $$STATUS exit $$STATUS
# Since some DTraced_Files are in LIBJVM.o and they are touched by this # Since some DTraced_Files are in LIBJVM.o and they are touched by this

View File

@ -1,6 +1,6 @@
@echo off @echo off
REM REM
REM Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. REM Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
REM REM
REM This code is free software; you can redistribute it and/or modify it REM This code is free software; you can redistribute it and/or modify it
@ -148,7 +148,7 @@ echo HotSpotJDKDist=%HotSpotJDKDist%
REM This is now safe to do. REM This is now safe to do.
:copyfiles :copyfiles
for /D %%i in (compiler1, compiler2, tiered, core) do ( for /D %%i in (compiler1, compiler2, tiered ) do (
if NOT EXIST %HotSpotBuildSpace%\%%i\generated mkdir %HotSpotBuildSpace%\%%i\generated if NOT EXIST %HotSpotBuildSpace%\%%i\generated mkdir %HotSpotBuildSpace%\%%i\generated
copy %HotSpotWorkSpace%\make\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\generated > NUL copy %HotSpotWorkSpace%\make\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\generated > NUL
) )
@ -156,7 +156,7 @@ copy %HotSpotWorkSpace%\make\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\
REM force regneration of ProjectFile REM force regneration of ProjectFile
if exist %ProjectFile% del %ProjectFile% if exist %ProjectFile% del %ProjectFile%
for /D %%i in (compiler1, compiler2, tiered, core) do ( for /D %%i in (compiler1, compiler2, tiered ) do (
echo -- %%i -- echo -- %%i --
echo # Generated file! > %HotSpotBuildSpace%\%%i\local.make echo # Generated file! > %HotSpotBuildSpace%\%%i\local.make
echo # Changing a variable below and then deleting %ProjectFile% will cause >> %HotSpotBuildSpace%\%%i\local.make echo # Changing a variable below and then deleting %ProjectFile% will cause >> %HotSpotBuildSpace%\%%i\local.make

View File

@ -73,19 +73,17 @@ done
BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles" BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles"
if [ -d "${ALTSRC}/share/vm/jfr" ]; then if [ -d "${ALTSRC}/share/vm/jfr/buffers" ]; then
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers" BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers"
fi fi
BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods" BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods"
CORE_PATHS="${BASE_PATHS}"
# shared is already in BASE_PATHS. Should add vm/memory but that one is also in BASE_PATHS. # shared is already in BASE_PATHS. Should add vm/memory but that one is also in BASE_PATHS.
if [ -d "${ALTSRC}/share/vm/gc_implementation" ]; then if [ -d "${ALTSRC}/share/vm/gc_implementation" ]; then
CORE_PATHS="${CORE_PATHS} `$FIND ${ALTSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`" BASE_PATHS="${BASE_PATHS} `$FIND ${ALTSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
fi fi
CORE_PATHS="${CORE_PATHS} `$FIND ${COMMONSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`" BASE_PATHS="${BASE_PATHS} `$FIND ${COMMONSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
if [ -d "${ALTSRC}/share/vm/c1" ]; then if [ -d "${ALTSRC}/share/vm/c1" ]; then
COMPILER1_PATHS="${ALTSRC}/share/vm/c1" COMPILER1_PATHS="${ALTSRC}/share/vm/c1"
@ -104,12 +102,11 @@ COMPILER2_PATHS="${COMPILER2_PATHS} ${GENERATED}/adfiles"
# Include dirs per type. # Include dirs per type.
case "${TYPE}" in case "${TYPE}" in
"core") Src_Dirs="${CORE_PATHS}" ;; "compiler1") Src_Dirs="${BASE_PATHS} ${COMPILER1_PATHS}" ;;
"compiler1") Src_Dirs="${CORE_PATHS} ${COMPILER1_PATHS}" ;; "compiler2") Src_Dirs="${BASE_PATHS} ${COMPILER2_PATHS}" ;;
"compiler2") Src_Dirs="${CORE_PATHS} ${COMPILER2_PATHS}" ;; "tiered") Src_Dirs="${BASE_PATHS} ${COMPILER1_PATHS} ${COMPILER2_PATHS}" ;;
"tiered") Src_Dirs="${CORE_PATHS} ${COMPILER1_PATHS} ${COMPILER2_PATHS}" ;; "zero") Src_Dirs="${BASE_PATHS}" ;;
"zero") Src_Dirs="${CORE_PATHS}" ;; "shark") Src_Dirs="${BASE_PATHS}" ;;
"shark") Src_Dirs="${CORE_PATHS}" ;;
esac esac
COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*" COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*"
@ -122,7 +119,6 @@ Src_Files_EXCLUDE="jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp"
# Exclude per type. # Exclude per type.
case "${TYPE}" in case "${TYPE}" in
"core") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${COMPILER2_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
"compiler1") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER2_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;; "compiler1") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER2_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
"compiler2") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;; "compiler2") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
"tiered") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;; "tiered") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
@ -149,9 +145,17 @@ for e in ${Src_Dirs}; do
Src_Files="${Src_Files}`findsrc ${e}` " Src_Files="${Src_Files}`findsrc ${e}` "
done done
Obj_Files= Obj_Files=" "
for e in ${Src_Files}; do for e in ${Src_Files}; do
Obj_Files="${Obj_Files}${e%\.[!.]*}.obj " o="${e%\.[!.]*}.obj"
set +e
chk=`expr "${Obj_Files}" : ".* $o"`
set -e
if [ "$chk" != 0 ]; then
echo "# INFO: skipping duplicate $o"
continue
fi
Obj_Files="${Obj_Files}$o "
done done
echo Obj_Files=${Obj_Files} echo Obj_Files=${Obj_Files}

View File

@ -55,13 +55,11 @@ CXX_INCLUDE_DIRS=\
!if "$(Platform_arch_model)" == "$(Platform_arch)" !if "$(Platform_arch_model)" == "$(Platform_arch)"
SOURCES_AD=\ SOURCES_AD=\
$(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \ $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad
$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
!else !else
SOURCES_AD=\ SOURCES_AD=\
$(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \ $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
$(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad \ $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad
$(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
!endif !endif
# NOTE! If you add any files here, you must also update GENERATED_NAMES_IN_DIR # NOTE! If you add any files here, you must also update GENERATED_NAMES_IN_DIR

View File

@ -44,10 +44,11 @@ ProjectCreatorSources=\
# This is only used internally # This is only used internally
ProjectCreatorIncludesPRIVATE=\ ProjectCreatorIncludesPRIVATE=\
-relativeInclude src\closed\share\vm \ -relativeAltSrcInclude src\closed \
-relativeInclude src\closed\os\windows\vm \ -altRelativeInclude share\vm \
-relativeInclude src\closed\os_cpu\windows_$(Platform_arch)\vm \ -altRelativeInclude os\windows\vm \
-relativeInclude src\closed\cpu\$(Platform_arch)\vm \ -altRelativeInclude os_cpu\windows_$(Platform_arch)\vm \
-altRelativeInclude cpu\$(Platform_arch)\vm \
-relativeInclude src\share\vm \ -relativeInclude src\share\vm \
-relativeInclude src\share\vm\precompiled \ -relativeInclude src\share\vm\precompiled \
-relativeInclude src\share\vm\prims\wbtestmethods \ -relativeInclude src\share\vm\prims\wbtestmethods \
@ -91,7 +92,7 @@ ProjectCreatorIDEOptions = \
-disablePch getThread_windows_$(Platform_arch).cpp \ -disablePch getThread_windows_$(Platform_arch).cpp \
-disablePch_compiler2 opcodes.cpp -disablePch_compiler2 opcodes.cpp
# Common options for the IDE builds for core, c1, and c2 # Common options for the IDE builds for c1, and c2
ProjectCreatorIDEOptions=\ ProjectCreatorIDEOptions=\
$(ProjectCreatorIDEOptions) \ $(ProjectCreatorIDEOptions) \
-sourceBase $(HOTSPOTWORKSPACE) \ -sourceBase $(HOTSPOTWORKSPACE) \
@ -157,19 +158,11 @@ ProjectCreatorIDEOptionsIgnoreCompiler2=\
-ignoreFile_TARGET ciTypeFlow.hpp \ -ignoreFile_TARGET ciTypeFlow.hpp \
-ignoreFile_TARGET $(Platform_arch_model).ad -ignoreFile_TARGET $(Platform_arch_model).ad
##################################################
# Without compiler(core) specific options
##################################################
ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
$(ProjectCreatorIDEOptionsIgnoreCompiler1:TARGET=core) \
$(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=core)
################################################## ##################################################
# Client(C1) compiler specific options # Client(C1) compiler specific options
################################################## ##################################################
ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \ ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
-define_compiler1 COMPILER1 \ -define_compiler1 COMPILER1 \
-ignorePath_compiler1 core \
$(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=compiler1) $(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=compiler1)
################################################## ##################################################
@ -178,7 +171,6 @@ $(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=compiler1)
#NOTE! This list must be kept in sync with GENERATED_NAMES in adlc.make. #NOTE! This list must be kept in sync with GENERATED_NAMES in adlc.make.
ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \ ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
-define_compiler2 COMPILER2 \ -define_compiler2 COMPILER2 \
-ignorePath_compiler2 core \
-additionalFile_compiler2 $(Platform_arch_model).ad \ -additionalFile_compiler2 $(Platform_arch_model).ad \
-additionalFile_compiler2 ad_$(Platform_arch_model).cpp \ -additionalFile_compiler2 ad_$(Platform_arch_model).cpp \
-additionalFile_compiler2 ad_$(Platform_arch_model).hpp \ -additionalFile_compiler2 ad_$(Platform_arch_model).hpp \

View File

@ -90,25 +90,25 @@ $(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceType
!if "$(OPENJDK)" == "true" !if "$(OPENJDK)" == "true"
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS) $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
@echo Generating $@ @echo Generating OpenJDK $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
!else !else
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS) $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
@echo Generating $@ @echo Generating AltSrc $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS) $(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
@echo Generating $@ @echo Generating AltSrc $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp
$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS) $(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
@echo Generating $@ @echo Generating AltSrc $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS) $(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
@echo Generating $@ @echo Generating AltSrc $@
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp @$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
!endif !endif

View File

@ -36,10 +36,6 @@ CXX_FLAGS=$(CXX_FLAGS) /D "PRODUCT"
CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT" CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT"
!endif !endif
!if "$(Variant)" == "core"
# No need to define anything, CORE is defined as !COMPILER1 && !COMPILER2
!endif
!if "$(Variant)" == "compiler1" !if "$(Variant)" == "compiler1"
CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER1" CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER1"
!endif !endif

View File

@ -112,6 +112,7 @@ ReleaseOptions = -define HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) -def
ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) $(ReleaseOptions) ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) $(ReleaseOptions)
$(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class $(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
@if "$(MSC_VER)"=="1500" echo Make sure you have VS2008 SP1 or later, or you may see 'expanded command line too long'
@$(RUN_JAVA) -Djava.class.path="$(HOTSPOTBUILDSPACE)/classes" ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions) @$(RUN_JAVA) -Djava.class.path="$(HOTSPOTBUILDSPACE)/classes" ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)
clean: clean:

View File

@ -1887,6 +1887,27 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
if (ProfileInterpreter) { if (ProfileInterpreter) {
__ set_method_data_pointer_for_bcp(); __ set_method_data_pointer_for_bcp();
} }
#if INCLUDE_JVMTI
if (EnableInvokeDynamic) {
Label L_done;
__ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode
__ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
__ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
__ br_null(G1_scratch, false, Assembler::pn, L_done);
__ delayed()->nop();
__ st_ptr(G1_scratch, Lesp, wordSize);
__ bind(L_done);
}
#endif // INCLUDE_JVMTI
// Resume bytecode interpretation at the current bcp // Resume bytecode interpretation at the current bcp
__ dispatch_next(vtos); __ dispatch_next(vtos);
// end of JVMTI PopFrame support // end of JVMTI PopFrame support

View File

@ -1920,6 +1920,29 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ get_thread(thread); __ get_thread(thread);
__ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive); __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
#if INCLUDE_JVMTI
if (EnableInvokeDynamic) {
Label L_done;
const Register local0 = rdi;
__ cmpb(Address(rsi, 0), Bytecodes::_invokestatic);
__ jcc(Assembler::notEqual, L_done);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
__ get_method(rdx);
__ movptr(rax, Address(local0, 0));
__ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rsi);
__ testptr(rax, rax);
__ jcc(Assembler::zero, L_done);
__ movptr(Address(rbx, 0), rax);
__ bind(L_done);
}
#endif // INCLUDE_JVMTI
__ dispatch_next(vtos); __ dispatch_next(vtos);
// end of PopFrame support // end of PopFrame support

View File

@ -1929,6 +1929,29 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ movl(Address(r15_thread, JavaThread::popframe_condition_offset()), __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
JavaThread::popframe_inactive); JavaThread::popframe_inactive);
#if INCLUDE_JVMTI
if (EnableInvokeDynamic) {
Label L_done;
const Register local0 = r14;
__ cmpb(Address(r13, 0), Bytecodes::_invokestatic);
__ jcc(Assembler::notEqual, L_done);
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
__ get_method(rdx);
__ movptr(rax, Address(local0, 0));
__ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13);
__ testptr(rax, rax);
__ jcc(Assembler::zero, L_done);
__ movptr(Address(rbx, 0), rax);
__ bind(L_done);
}
#endif // INCLUDE_JVMTI
__ dispatch_next(vtos); __ dispatch_next(vtos);
// end of PopFrame support // end of PopFrame support

View File

@ -58,8 +58,8 @@ class EntryFrame : public ZeroFrame {
JavaCallWrapper* call_wrapper, JavaCallWrapper* call_wrapper,
TRAPS); TRAPS);
public: public:
JavaCallWrapper *call_wrapper() const { JavaCallWrapper **call_wrapper() const {
return (JavaCallWrapper *) value_of_word(call_wrapper_off); return (JavaCallWrapper **) addr_of_word(call_wrapper_off);
} }
public: public:

View File

@ -141,7 +141,7 @@ inline intptr_t* frame::id() const {
return fp(); return fp();
} }
inline JavaCallWrapper* frame::entry_frame_call_wrapper() const { inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
return zero_entryframe()->call_wrapper(); return zero_entryframe()->call_wrapper();
} }

View File

@ -176,6 +176,19 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_oop_arraycopy; StubRoutines::_oop_arraycopy;
} }
static int SafeFetch32(int *adr, int errValue) {
int value = errValue;
value = *adr;
return value;
}
static intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
intptr_t value = errValue;
value = *adr;
return value;
}
void generate_initial() { void generate_initial() {
// Generates all stubs and initializes the entry points // Generates all stubs and initializes the entry points
@ -225,6 +238,15 @@ class StubGenerator: public StubCodeGenerator {
// arraycopy stubs used by compilers // arraycopy stubs used by compilers
generate_arraycopy_stubs(); generate_arraycopy_stubs();
// Safefetch stubs.
StubRoutines::_safefetch32_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetch32);
StubRoutines::_safefetch32_fault_pc = NULL;
StubRoutines::_safefetch32_continuation_pc = NULL;
StubRoutines::_safefetchN_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetchN);
StubRoutines::_safefetchN_fault_pc = NULL;
StubRoutines::_safefetchN_continuation_pc = NULL;
} }
public: public:

View File

@ -445,14 +445,14 @@ AttachOperation* AttachListener::dequeue() {
void AttachListener::vm_start() { void AttachListener::vm_start() {
char fn[UNIX_PATH_MAX]; char fn[UNIX_PATH_MAX];
struct stat64 st; struct stat st;
int ret; int ret;
int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d", int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d",
os::get_temp_directory(), os::current_process_id()); os::get_temp_directory(), os::current_process_id());
assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow"); assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow");
RESTARTABLE(::stat64(fn, &st), ret); RESTARTABLE(::stat(fn, &st), ret);
if (ret == 0) { if (ret == 0) {
ret = ::unlink(fn); ret = ::unlink(fn);
if (ret == -1) { if (ret == -1) {

View File

@ -1642,6 +1642,8 @@ void os::print_os_info(outputStream* st) {
void os::win32::print_windows_version(outputStream* st) { void os::win32::print_windows_version(outputStream* st) {
OSVERSIONINFOEX osvi; OSVERSIONINFOEX osvi;
SYSTEM_INFO si;
ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
@ -1651,6 +1653,18 @@ void os::win32::print_windows_version(outputStream* st) {
} }
int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
ZeroMemory(&si, sizeof(SYSTEM_INFO));
if (os_vers >= 5002) {
// Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
// find out whether we are running on 64 bit processor or not.
if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) {
os::Kernel32Dll::GetNativeSystemInfo(&si);
} else {
GetSystemInfo(&si);
}
}
if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
switch (os_vers) { switch (os_vers) {
case 3051: st->print(" Windows NT 3.51"); break; case 3051: st->print(" Windows NT 3.51"); break;
@ -1658,57 +1672,48 @@ void os::win32::print_windows_version(outputStream* st) {
case 5000: st->print(" Windows 2000"); break; case 5000: st->print(" Windows 2000"); break;
case 5001: st->print(" Windows XP"); break; case 5001: st->print(" Windows XP"); break;
case 5002: case 5002:
case 6000: if (osvi.wProductType == VER_NT_WORKSTATION &&
case 6001: si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
case 6002: { st->print(" Windows XP x64 Edition");
// Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
// find out whether we are running on 64 bit processor or not.
SYSTEM_INFO si;
ZeroMemory(&si, sizeof(SYSTEM_INFO));
if (!os::Kernel32Dll::GetNativeSystemInfoAvailable()){
GetSystemInfo(&si);
} else { } else {
os::Kernel32Dll::GetNativeSystemInfo(&si); st->print(" Windows Server 2003 family");
}
if (os_vers == 5002) {
if (osvi.wProductType == VER_NT_WORKSTATION &&
si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
st->print(" Windows XP x64 Edition");
else
st->print(" Windows Server 2003 family");
} else if (os_vers == 6000) {
if (osvi.wProductType == VER_NT_WORKSTATION)
st->print(" Windows Vista");
else
st->print(" Windows Server 2008");
if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
st->print(" , 64 bit");
} else if (os_vers == 6001) {
if (osvi.wProductType == VER_NT_WORKSTATION) {
st->print(" Windows 7");
} else {
// Unrecognized windows, print out its major and minor versions
st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
}
if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
st->print(" , 64 bit");
} else if (os_vers == 6002) {
if (osvi.wProductType == VER_NT_WORKSTATION) {
st->print(" Windows 8");
} else {
st->print(" Windows Server 2012");
}
if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
st->print(" , 64 bit");
} else { // future os
// Unrecognized windows, print out its major and minor versions
st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
st->print(" , 64 bit");
} }
break; break;
}
default: // future windows, print out its major and minor versions case 6000:
if (osvi.wProductType == VER_NT_WORKSTATION) {
st->print(" Windows Vista");
} else {
st->print(" Windows Server 2008");
}
break;
case 6001:
if (osvi.wProductType == VER_NT_WORKSTATION) {
st->print(" Windows 7");
} else {
st->print(" Windows Server 2008 R2");
}
break;
case 6002:
if (osvi.wProductType == VER_NT_WORKSTATION) {
st->print(" Windows 8");
} else {
st->print(" Windows Server 2012");
}
break;
case 6003:
if (osvi.wProductType == VER_NT_WORKSTATION) {
st->print(" Windows 8.1");
} else {
st->print(" Windows Server 2012 R2");
}
break;
default: // future os
// Unrecognized windows, print out its major and minor versions
st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
} }
} else { } else {
@ -1720,6 +1725,11 @@ void os::win32::print_windows_version(outputStream* st) {
st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
} }
} }
if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
st->print(" , 64 bit");
}
st->print(" Build %d", osvi.dwBuildNumber); st->print(" Build %d", osvi.dwBuildNumber);
st->print(" %s", osvi.szCSDVersion); // service pack st->print(" %s", osvi.szCSDVersion); // service pack
st->cr(); st->cr();

View File

@ -1,26 +0,0 @@
//
// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// X86 Bsd Architecture Description File

View File

@ -1,65 +0,0 @@
//
// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// AMD64 Bsd Architecture Description File
//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
// This block specifies the encoding classes used by the compiler to
// output byte streams. Encoding classes generate functions which are
// called by Machine Instruction Nodes in order to generate the bit
// encoding of the instruction. Operands specify their base encoding
// interface with the interface keyword. There are currently
// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
// COND_INTER. REG_INTER causes an operand to generate a function
// which returns its register number when queried. CONST_INTER causes
// an operand to generate a function which returns the value of the
// constant when queried. MEMORY_INTER causes an operand to generate
// four functions which return the Base Register, the Index Register,
// the Scale Value, and the Offset Value of the operand when queried.
// COND_INTER causes an operand to generate six functions which return
// the encoding code (ie - encoding bits for the instruction)
// associated with each basic boolean condition for a conditional
// instruction. Instructions specify two basic values for encoding.
// They use the ins_encode keyword to specify their encoding class
// (which must be one of the class names specified in the encoding
// block), and they use the opcode keyword to specify, in order, their
// primary, secondary, and tertiary opcode. Only the opcode sections
// which a particular instruction needs for encoding need to be
// specified.
encode %{
// Build emit functions for each basic byte or larger field in the intel
// encoding scheme (opcode, rm, sib, immediate), and call them from C++
// code in the enc_class source block. Emit functions will live in the
// main source block for now. In future, we can generalize this by
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
%}
// Platform dependent source
source %{
%}

View File

@ -1,26 +0,0 @@
//
// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// X86 Linux Architecture Description File

View File

@ -1,65 +0,0 @@
//
// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// AMD64 Linux Architecture Description File
//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
// This block specifies the encoding classes used by the compiler to
// output byte streams. Encoding classes generate functions which are
// called by Machine Instruction Nodes in order to generate the bit
// encoding of the instruction. Operands specify their base encoding
// interface with the interface keyword. There are currently
// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
// COND_INTER. REG_INTER causes an operand to generate a function
// which returns its register number when queried. CONST_INTER causes
// an operand to generate a function which returns the value of the
// constant when queried. MEMORY_INTER causes an operand to generate
// four functions which return the Base Register, the Index Register,
// the Scale Value, and the Offset Value of the operand when queried.
// COND_INTER causes an operand to generate six functions which return
// the encoding code (ie - encoding bits for the instruction)
// associated with each basic boolean condition for a conditional
// instruction. Instructions specify two basic values for encoding.
// They use the ins_encode keyword to specify their encoding class
// (which must be one of the class names specified in the encoding
// block), and they use the opcode keyword to specify, in order, their
// primary, secondary, and tertiary opcode. Only the opcode sections
// which a particular instruction needs for encoding need to be
// specified.
encode %{
// Build emit functions for each basic byte or larger field in the intel
// encoding scheme (opcode, rm, sib, immediate), and call them from C++
// code in the enc_class source block. Emit functions will live in the
// main source block for now. In future, we can generalize this by
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
%}
// Platform dependent source
source %{
%}

View File

@ -410,16 +410,6 @@ extern "C" {
int SpinPause() { int SpinPause() {
} }
int SafeFetch32(int *adr, int errValue) {
int value = errValue;
value = *adr;
return value;
}
intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
intptr_t value = errValue;
value = *adr;
return value;
}
void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
if (from > to) { if (from > to) {

View File

@ -1,27 +0,0 @@
//
// Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
//
// SPARC Solaris Architecture Description File

View File

@ -1,26 +0,0 @@
//
// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// X86 Solaris Architecture Description File

View File

@ -1,63 +0,0 @@
//
// Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// AMD64 Solaris Architecture Description File
//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
// This block specifies the encoding classes used by the compiler to
// output byte streams. Encoding classes generate functions which are
// called by Machine Instruction Nodes in order to generate the bit
// encoding of the instruction. Operands specify their base encoding
// interface with the interface keyword. There are currently
// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
// COND_INTER. REG_INTER causes an operand to generate a function
// which returns its register number when queried. CONST_INTER causes
// an operand to generate a function which returns the value of the
// constant when queried. MEMORY_INTER causes an operand to generate
// four functions which return the Base Register, the Index Register,
// the Scale Value, and the Offset Value of the operand when queried.
// COND_INTER causes an operand to generate six functions which return
// the encoding code (ie - encoding bits for the instruction)
// associated with each basic boolean condition for a conditional
// instruction. Instructions specify two basic values for encoding.
// They use the ins_encode keyword to specify their encoding class
// (which must be one of the class names specified in the encoding
// block), and they use the opcode keyword to specify, in order, their
// primary, secondary, and tertiary opcode. Only the opcode sections
// which a particular instruction needs for encoding need to be
// specified.
encode %{
// Build emit functions for each basic byte or larger field in the intel
// encoding scheme (opcode, rm, sib, immediate), and call them from C++
// code in the enc_class source block. Emit functions will live in the
// main source block for now. In future, we can generalize this by
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
%}
// Platform dependent source
source %{
%}

View File

@ -1,26 +0,0 @@
//
// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// X86 Win32 Architecture Description File

View File

@ -1,63 +0,0 @@
//
// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// AMD64 Win32 Architecture Description File
//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
// This block specifies the encoding classes used by the compiler to output
// byte streams. Encoding classes generate functions which are called by
// Machine Instruction Nodes in order to generate the bit encoding of the
// instruction. Operands specify their base encoding interface with the
// interface keyword. There are currently supported four interfaces,
// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
// operand to generate a function which returns its register number when
// queried. CONST_INTER causes an operand to generate a function which
// returns the value of the constant when queried. MEMORY_INTER causes an
// operand to generate four functions which return the Base Register, the
// Index Register, the Scale Value, and the Offset Value of the operand when
// queried. COND_INTER causes an operand to generate six functions which
// return the encoding code (ie - encoding bits for the instruction)
// associated with each basic boolean condition for a conditional instruction.
// Instructions specify two basic values for encoding. They use the
// ins_encode keyword to specify their encoding class (which must be one of
// the class names specified in the encoding block), and they use the
// opcode keyword to specify, in order, their primary, secondary, and
// tertiary opcode. Only the opcode sections which a particular instruction
// needs for encoding need to be specified.
encode %{
// Build emit functions for each basic byte or larger field in the intel
// encoding scheme (opcode, rm, sib, immediate), and call them from C++
// code in the enc_class source block. Emit functions will live in the
// main source block for now. In future, we can generalize this by
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
%}
// Platform dependent source
source %{
%}

View File

@ -142,6 +142,69 @@ class BuildConfig {
return rv; return rv;
} }
// Returns true if the specified path refers to a relative alternate
// source file. RelativeAltSrcInclude is usually "src\closed".
public static boolean matchesRelativeAltSrcInclude(String path) {
String relativeAltSrcInclude =
getFieldString(null, "RelativeAltSrcInclude");
Vector<String> v = getFieldVector(null, "AltRelativeInclude");
for (String pathPart : v) {
if (path.contains(relativeAltSrcInclude + Util.sep + pathPart)) {
return true;
}
}
return false;
}
// Returns the relative alternate source file for the specified path.
// Null is returned if the specified path does not have a matching
// alternate source file.
public static String getMatchingRelativeAltSrcFile(String path) {
Vector<String> v = getFieldVector(null, "RelativeAltSrcFileList");
if (v == null) {
return null;
}
for (String pathPart : v) {
if (path.endsWith(pathPart)) {
String relativeAltSrcInclude =
getFieldString(null, "RelativeAltSrcInclude");
return relativeAltSrcInclude + Util.sep + pathPart;
}
}
return null;
}
// Returns true if the specified path has a matching alternate
// source file.
public static boolean matchesRelativeAltSrcFile(String path) {
return getMatchingRelativeAltSrcFile(path) != null;
}
// Track the specified alternate source file. The source file is
// tracked without the leading .*<sep><RelativeAltSrcFileList><sep>
// part to make matching regular source files easier.
public static void trackRelativeAltSrcFile(String path) {
String pattern = getFieldString(null, "RelativeAltSrcInclude") +
Util.sep;
int altSrcInd = path.indexOf(pattern);
if (altSrcInd == -1) {
// not an AltSrc path
return;
}
altSrcInd += pattern.length();
if (altSrcInd >= path.length()) {
// not a valid AltSrc path
return;
}
String altSrcFile = path.substring(altSrcInd);
Vector v = getFieldVector(null, "RelativeAltSrcFileList");
if (v == null || !v.contains(altSrcFile)) {
addFieldVector(null, "RelativeAltSrcFileList", altSrcFile);
}
}
void addTo(Hashtable ht, String key, String value) { void addTo(Hashtable ht, String key, String value) {
ht.put(expandFormat(key), expandFormat(value)); ht.put(expandFormat(key), expandFormat(value));
} }
@ -272,8 +335,19 @@ class BuildConfig {
private Vector getSourceIncludes() { private Vector getSourceIncludes() {
Vector<String> rv = new Vector<String>(); Vector<String> rv = new Vector<String>();
Vector<String> ri = new Vector<String>();
String sourceBase = getFieldString(null, "SourceBase"); String sourceBase = getFieldString(null, "SourceBase");
// add relative alternate source include values:
String relativeAltSrcInclude =
getFieldString(null, "RelativeAltSrcInclude");
Vector<String> asri = new Vector<String>();
collectRelevantVectors(asri, "AltRelativeInclude");
for (String f : asri) {
rv.add(sourceBase + Util.sep + relativeAltSrcInclude +
Util.sep + f);
}
Vector<String> ri = new Vector<String>();
collectRelevantVectors(ri, "RelativeInclude"); collectRelevantVectors(ri, "RelativeInclude");
for (String f : ri) { for (String f : ri) {
rv.add(sourceBase + Util.sep + f); rv.add(sourceBase + Util.sep + f);
@ -541,35 +615,6 @@ class TieredProductConfig extends ProductConfig {
} }
} }
class CoreDebugConfig extends GenericDebugNonKernelConfig {
String getOptFlag() {
return getCI().getNoOptFlag();
}
CoreDebugConfig() {
initNames("core", "debug", "jvm.dll");
init(getIncludes(), getDefines());
}
}
class CoreFastDebugConfig extends GenericDebugNonKernelConfig {
String getOptFlag() {
return getCI().getOptFlag();
}
CoreFastDebugConfig() {
initNames("core", "fastdebug", "jvm.dll");
init(getIncludes(), getDefines());
}
}
class CoreProductConfig extends ProductConfig {
CoreProductConfig() {
initNames("core", "product", "jvm.dll");
init(getIncludes(), getDefines());
}
}
abstract class CompilerInterface { abstract class CompilerInterface {
abstract Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir); abstract Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir);

View File

@ -12,11 +12,15 @@ public class FileTreeCreator extends SimpleFileVisitor<Path>
final int startDirLength; final int startDirLength;
Stack<DirAttributes> attributes = new Stack<DirAttributes>(); Stack<DirAttributes> attributes = new Stack<DirAttributes>();
Vector<BuildConfig> allConfigs; Vector<BuildConfig> allConfigs;
WinGammaPlatformVC10 wg; WinGammaPlatform wg;
WinGammaPlatformVC10 wg10;
public FileTreeCreator(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatformVC10 wg) { public FileTreeCreator(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatform wg) {
super(); super();
this.wg = wg; this.wg = wg;
if (wg instanceof WinGammaPlatformVC10) {
wg10 = (WinGammaPlatformVC10)wg;
}
this.allConfigs = allConfigs; this.allConfigs = allConfigs;
this.startDir = startDir; this.startDir = startDir;
startDirLength = startDir.toAbsolutePath().toString().length(); startDirLength = startDir.toAbsolutePath().toString().length();

View File

@ -1,3 +1,27 @@
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import static java.nio.file.FileVisitResult.CONTINUE; import static java.nio.file.FileVisitResult.CONTINUE;
import java.io.IOException; import java.io.IOException;
@ -21,6 +45,8 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
boolean usePch = false; boolean usePch = false;
boolean disablePch = false; boolean disablePch = false;
boolean useIgnore = false; boolean useIgnore = false;
boolean isAltSrc = false; // only needed as a debugging crumb
boolean isReplacedByAltSrc = false;
String fileName = file.getFileName().toString(); String fileName = file.getFileName().toString();
// TODO hideFile // TODO hideFile
@ -30,6 +56,26 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
usePch = true; usePch = true;
} }
String fileLoc = vcProjLocation.relativize(file).toString();
// isAltSrc and isReplacedByAltSrc applies to all configs for a file
if (BuildConfig.matchesRelativeAltSrcInclude(
file.toAbsolutePath().toString())) {
// current file is an alternate source file so track it
isAltSrc = true;
BuildConfig.trackRelativeAltSrcFile(
file.toAbsolutePath().toString());
} else if (BuildConfig.matchesRelativeAltSrcFile(
file.toAbsolutePath().toString())) {
// current file is a regular file that matches an alternate
// source file so yack about replacing the regular file
isReplacedByAltSrc = true;
System.out.println("INFO: alternate source file '" +
BuildConfig.getMatchingRelativeAltSrcFile(
file.toAbsolutePath().toString()) +
"' replaces '" + fileLoc + "'");
}
for (BuildConfig cfg : allConfigs) { for (BuildConfig cfg : allConfigs) {
if (cfg.lookupHashFieldInContext("IgnoreFile", fileName) != null) { if (cfg.lookupHashFieldInContext("IgnoreFile", fileName) != null) {
useIgnore = true; useIgnore = true;
@ -57,10 +103,9 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
} }
} }
String tagName = wg.getFileTagFromSuffix(fileName); String tagName = wg10.getFileTagFromSuffix(fileName);
String fileLoc = vcProjLocation.relativize(file).toString();
if (!useIgnore && !disablePch && !usePch) { if (!useIgnore && !disablePch && !usePch && !isReplacedByAltSrc) {
wg.tag(tagName, new String[] { "Include", fileLoc}); wg.tag(tagName, new String[] { "Include", fileLoc});
} else { } else {
wg.startTag( wg.startTag(
@ -78,12 +123,17 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
if (disablePch) { if (disablePch) {
wg.tag("PrecompiledHeader", "Condition", "'$(Configuration)|$(Platform)'=='" + cfg.get("Name") + "'"); wg.tag("PrecompiledHeader", "Condition", "'$(Configuration)|$(Platform)'=='" + cfg.get("Name") + "'");
} }
if (isReplacedByAltSrc) {
wg.tagData("ExcludedFromBuild", "true", "Condition",
"'$(Configuration)|$(Platform)'=='" +
cfg.get("Name") + "'");
}
} }
wg.endTag(); wg.endTag();
} }
String filter = startDir.relativize(file.getParent().toAbsolutePath()).toString(); String filter = startDir.relativize(file.getParent().toAbsolutePath()).toString();
wg.addFilterDependency(fileLoc, filter); wg10.addFilterDependency(fileLoc, filter);
return CONTINUE; return CONTINUE;
} }
@ -112,7 +162,7 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
if (!hide) { if (!hide) {
String name = startDir.relativize(path.toAbsolutePath()).toString(); String name = startDir.relativize(path.toAbsolutePath()).toString();
if (!"".equals(name)) { if (!"".equals(name)) {
wg.addFilter(name); wg10.addFilter(name);
} }
attributes.push(newAttr); attributes.push(newAttr);
@ -137,6 +187,4 @@ public class FileTreeCreatorVC10 extends FileTreeCreator {
public void writeFileTree() throws IOException { public void writeFileTree() throws IOException {
Files.walkFileTree(this.startDir, this); Files.walkFileTree(this.startDir, this);
} }
}
}

View File

@ -12,7 +12,7 @@ import java.util.Vector;
public class FileTreeCreatorVC7 extends FileTreeCreator { public class FileTreeCreatorVC7 extends FileTreeCreator {
public FileTreeCreatorVC7(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatform wg) { public FileTreeCreatorVC7(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatform wg) {
super(startDir, allConfigs, null); super(startDir, allConfigs, wg);
} }
@Override @Override

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,10 +39,15 @@ public class ProjectCreator {
+ "jvm.dll; no trailing slash>"); + "jvm.dll; no trailing slash>");
System.err.println(" If any of the above are specified, " System.err.println(" If any of the above are specified, "
+ "they must all be."); + "they must all be.");
System.err.println(" Note: if '-altRelativeInclude' option below is "
+ "used, then the '-relativeAltSrcInclude' option must be used "
+ "to specify the alternate source dir, e.g., 'src\\closed'");
System.err.println(" Additional, optional arguments, which can be " System.err.println(" Additional, optional arguments, which can be "
+ "specified multiple times:"); + "specified multiple times:");
System.err.println(" -absoluteInclude <string containing absolute " System.err.println(" -absoluteInclude <string containing absolute "
+ "path to include directory>"); + "path to include directory>");
System.err.println(" -altRelativeInclude <string containing "
+ "alternate include directory relative to -envVar>");
System.err.println(" -relativeInclude <string containing include " System.err.println(" -relativeInclude <string containing include "
+ "directory relative to -envVar>"); + "directory relative to -envVar>");
System.err.println(" -define <preprocessor flag to be #defined " System.err.println(" -define <preprocessor flag to be #defined "

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -140,10 +140,17 @@ public abstract class WinGammaPlatform {
"already exist>"); "already exist>");
System.err.println(" If any of the above are specified, "+ System.err.println(" If any of the above are specified, "+
"they must all be."); "they must all be.");
System.err.println(" Note: if '-altRelativeInclude' option below " +
"is used, then the '-relativeAltSrcInclude' " +
"option must be used to specify the alternate " +
"source dir, e.g., 'src\\closed'");
System.err.println(" Additional, optional arguments, which can be " + System.err.println(" Additional, optional arguments, which can be " +
"specified multiple times:"); "specified multiple times:");
System.err.println(" -absoluteInclude <string containing absolute " + System.err.println(" -absoluteInclude <string containing absolute " +
"path to include directory>"); "path to include directory>");
System.err.println(" -altRelativeInclude <string containing " +
"alternate include directory relative to " +
"-sourceBase>");
System.err.println(" -relativeInclude <string containing include " + System.err.println(" -relativeInclude <string containing include " +
"directory relative to -sourceBase>"); "directory relative to -sourceBase>");
System.err.println(" -define <preprocessor flag to be #defined " + System.err.println(" -define <preprocessor flag to be #defined " +
@ -343,6 +350,12 @@ public abstract class WinGammaPlatform {
HsArgHandler.VECTOR HsArgHandler.VECTOR
), ),
new HsArgRule("-altRelativeInclude",
"AltRelativeInclude",
null,
HsArgHandler.VECTOR
),
new HsArgRule("-relativeInclude", new HsArgRule("-relativeInclude",
"RelativeInclude", "RelativeInclude",
null, null,
@ -355,6 +368,12 @@ public abstract class WinGammaPlatform {
HsArgHandler.VECTOR HsArgHandler.VECTOR
), ),
new HsArgRule("-relativeAltSrcInclude",
"RelativeAltSrcInclude",
null,
HsArgHandler.STRING
),
new HsArgRule("-relativeSrcInclude", new HsArgRule("-relativeSrcInclude",
"RelativeSrcInclude", "RelativeSrcInclude",
null, null,
@ -560,10 +579,6 @@ public abstract class WinGammaPlatform {
allConfigs.add(new TieredFastDebugConfig()); allConfigs.add(new TieredFastDebugConfig());
allConfigs.add(new TieredProductConfig()); allConfigs.add(new TieredProductConfig());
allConfigs.add(new CoreDebugConfig());
allConfigs.add(new CoreFastDebugConfig());
allConfigs.add(new CoreProductConfig());
return allConfigs; return allConfigs;
} }

View File

@ -1,3 +1,27 @@
/*
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import java.io.File; import java.io.File;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
@ -24,7 +48,7 @@ public class WinGammaPlatformVC10 extends WinGammaPlatformVC7 {
public void writeProjectFile(String projectFileName, String projectName, public void writeProjectFile(String projectFileName, String projectName,
Vector<BuildConfig> allConfigs) throws IOException { Vector<BuildConfig> allConfigs) throws IOException {
System.out.println(); System.out.println();
System.out.print(" Writing .vcxproj file: " + projectFileName); System.out.println(" Writing .vcxproj file: " + projectFileName);
String projDir = Util.normalize(new File(projectFileName).getParent()); String projDir = Util.normalize(new File(projectFileName).getParent());
@ -114,7 +138,7 @@ public class WinGammaPlatformVC10 extends WinGammaPlatformVC7 {
endTag(); endTag();
printWriter.close(); printWriter.close();
System.out.println(" Done."); System.out.println(" Done writing .vcxproj file.");
writeFilterFile(projectFileName, projectName, allConfigs, projDir); writeFilterFile(projectFileName, projectName, allConfigs, projDir);
writeUserFile(projectFileName, allConfigs); writeUserFile(projectFileName, allConfigs);

View File

@ -139,19 +139,22 @@ public class WinGammaPlatformVC7 extends WinGammaPlatform {
tagV("Tool", cfg.getV("LinkerFlags")); tagV("Tool", cfg.getV("LinkerFlags"));
tag("Tool", String postBuildCmd = BuildConfig.getFieldString(null,
new String[] { "PostbuildCommand");
"Name", if (postBuildCmd != null) {
"VCPostBuildEventTool", tag("Tool",
"Description", new String[] {
BuildConfig "Name",
.getFieldString(null, "PostbuildDescription"), "VCPostBuildEventTool",
// Caution: String.replace(String,String) is available "Description",
// from JDK5 onwards only BuildConfig
"CommandLine", .getFieldString(null, "PostbuildDescription"),
cfg.expandFormat(BuildConfig.getFieldString(null, // Caution: String.replace(String,String) is available
"PostbuildCommand").replace("\t", // from JDK5 onwards only
"&#x0D;&#x0A;")) }); "CommandLine",
cfg.expandFormat(postBuildCmd.replace("\t",
"&#x0D;&#x0A;")) });
}
tag("Tool", new String[] { "Name", "VCPreBuildEventTool" }); tag("Tool", new String[] { "Name", "VCPreBuildEventTool" });

View File

@ -2557,6 +2557,26 @@ void java_lang_ref_SoftReference::set_clock(jlong value) {
*offset = value; *offset = value;
} }
// Support for java_lang_invoke_DirectMethodHandle
int java_lang_invoke_DirectMethodHandle::_member_offset;
oop java_lang_invoke_DirectMethodHandle::member(oop dmh) {
oop member_name = NULL;
bool is_dmh = dmh->is_oop() && java_lang_invoke_DirectMethodHandle::is_instance(dmh);
assert(is_dmh, "a DirectMethodHandle oop is expected");
if (is_dmh) {
member_name = dmh->obj_field(member_offset_in_bytes());
}
return member_name;
}
void java_lang_invoke_DirectMethodHandle::compute_offsets() {
Klass* klass_oop = SystemDictionary::DirectMethodHandle_klass();
if (klass_oop != NULL && EnableInvokeDynamic) {
compute_offset(_member_offset, klass_oop, vmSymbols::member_name(), vmSymbols::java_lang_invoke_MemberName_signature());
}
}
// Support for java_lang_invoke_MethodHandle // Support for java_lang_invoke_MethodHandle
@ -3205,6 +3225,7 @@ void JavaClasses::compute_offsets() {
java_lang_ThreadGroup::compute_offsets(); java_lang_ThreadGroup::compute_offsets();
if (EnableInvokeDynamic) { if (EnableInvokeDynamic) {
java_lang_invoke_MethodHandle::compute_offsets(); java_lang_invoke_MethodHandle::compute_offsets();
java_lang_invoke_DirectMethodHandle::compute_offsets();
java_lang_invoke_MemberName::compute_offsets(); java_lang_invoke_MemberName::compute_offsets();
java_lang_invoke_LambdaForm::compute_offsets(); java_lang_invoke_LambdaForm::compute_offsets();
java_lang_invoke_MethodType::compute_offsets(); java_lang_invoke_MethodType::compute_offsets();

View File

@ -976,6 +976,32 @@ class java_lang_invoke_MethodHandle: AllStatic {
static int form_offset_in_bytes() { return _form_offset; } static int form_offset_in_bytes() { return _form_offset; }
}; };
// Interface to java.lang.invoke.DirectMethodHandle objects
class java_lang_invoke_DirectMethodHandle: AllStatic {
friend class JavaClasses;
private:
static int _member_offset; // the MemberName of this DMH
static void compute_offsets();
public:
// Accessors
static oop member(oop mh);
// Testers
static bool is_subclass(Klass* klass) {
return klass->is_subclass_of(SystemDictionary::DirectMethodHandle_klass());
}
static bool is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}
// Accessors for code generation:
static int member_offset_in_bytes() { return _member_offset; }
};
// Interface to java.lang.invoke.LambdaForm objects // Interface to java.lang.invoke.LambdaForm objects
// (These are a private interface for managing adapter code generation.) // (These are a private interface for managing adapter code generation.)

View File

@ -151,6 +151,7 @@ class SymbolPropertyTable;
do_klass(reflect_CallerSensitive_klass, sun_reflect_CallerSensitive, Opt ) \ do_klass(reflect_CallerSensitive_klass, sun_reflect_CallerSensitive, Opt ) \
\ \
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \ /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
do_klass(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Opt ) \
do_klass(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292 ) \ do_klass(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292 ) \
do_klass(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292 ) \ do_klass(MemberName_klass, java_lang_invoke_MemberName, Pre_JSR292 ) \
do_klass(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292 ) \ do_klass(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre_JSR292 ) \

View File

@ -255,6 +255,7 @@
/* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \ /* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \
template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \ template(java_lang_invoke_CallSite, "java/lang/invoke/CallSite") \
template(java_lang_invoke_ConstantCallSite, "java/lang/invoke/ConstantCallSite") \ template(java_lang_invoke_ConstantCallSite, "java/lang/invoke/ConstantCallSite") \
template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \
template(java_lang_invoke_MutableCallSite, "java/lang/invoke/MutableCallSite") \ template(java_lang_invoke_MutableCallSite, "java/lang/invoke/MutableCallSite") \
template(java_lang_invoke_VolatileCallSite, "java/lang/invoke/VolatileCallSite") \ template(java_lang_invoke_VolatileCallSite, "java/lang/invoke/VolatileCallSite") \
template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \ template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \
@ -352,6 +353,7 @@
template(thread_id_name, "tid") \ template(thread_id_name, "tid") \
template(newInstance0_name, "newInstance0") \ template(newInstance0_name, "newInstance0") \
template(limit_name, "limit") \ template(limit_name, "limit") \
template(member_name, "member") \
template(forName_name, "forName") \ template(forName_name, "forName") \
template(forName0_name, "forName0") \ template(forName0_name, "forName0") \
template(isJavaIdentifierStart_name, "isJavaIdentifierStart") \ template(isJavaIdentifierStart_name, "isJavaIdentifierStart") \

View File

@ -50,6 +50,7 @@
#include "memory/genMarkSweep.hpp" #include "memory/genMarkSweep.hpp"
#include "memory/genOopClosures.inline.hpp" #include "memory/genOopClosures.inline.hpp"
#include "memory/iterator.hpp" #include "memory/iterator.hpp"
#include "memory/padded.hpp"
#include "memory/referencePolicy.hpp" #include "memory/referencePolicy.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "memory/tenuredGeneration.hpp" #include "memory/tenuredGeneration.hpp"

View File

@ -927,11 +927,9 @@ void ParNewGeneration::collect(bool full,
workers->active_workers(), workers->active_workers(),
Threads::number_of_non_daemon_threads()); Threads::number_of_non_daemon_threads());
workers->set_active_workers(active_workers); workers->set_active_workers(active_workers);
_next_gen = gch->next_gen(this);
assert(_next_gen != NULL,
"This must be the youngest gen, and not the only gen");
assert(gch->n_gens() == 2, assert(gch->n_gens() == 2,
"Par collection currently only works with single older gen."); "Par collection currently only works with single older gen.");
_next_gen = gch->next_gen(this);
// Do we have to avoid promotion_undo? // Do we have to avoid promotion_undo?
if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
set_avoid_promotion_undo(true); set_avoid_promotion_undo(true);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "gc_implementation/shared/parGCAllocBuffer.hpp" #include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp" #include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/defNewGeneration.hpp" #include "memory/defNewGeneration.hpp"
#include "memory/padded.hpp"
#include "utilities/taskqueue.hpp" #include "utilities/taskqueue.hpp"
class ChunkArray; class ChunkArray;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PAROOPCLOSURES_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PAROOPCLOSURES_HPP
#include "memory/genOopClosures.hpp" #include "memory/genOopClosures.hpp"
#include "memory/padded.hpp"
// Closures for ParNewGeneration // Closures for ParNewGeneration

View File

@ -29,14 +29,16 @@
#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/mutableSpace.hpp" #include "gc_implementation/shared/mutableSpace.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/memRegion.hpp" #include "memory/memRegion.hpp"
#include "memory/padded.inline.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "oops/oop.psgc.inline.hpp" #include "oops/oop.psgc.inline.hpp"
PSPromotionManager** PSPromotionManager::_manager_array = NULL; PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL; OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
PSOldGen* PSPromotionManager::_old_gen = NULL; PSOldGen* PSPromotionManager::_old_gen = NULL;
MutableSpace* PSPromotionManager::_young_space = NULL; MutableSpace* PSPromotionManager::_young_space = NULL;
void PSPromotionManager::initialize() { void PSPromotionManager::initialize() {
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@ -45,8 +47,10 @@ void PSPromotionManager::initialize() {
_old_gen = heap->old_gen(); _old_gen = heap->old_gen();
_young_space = heap->young_gen()->to_space(); _young_space = heap->young_gen()->to_space();
// To prevent false sharing, we pad the PSPromotionManagers
// and make sure that the first instance starts at a cache line.
assert(_manager_array == NULL, "Attempt to initialize twice"); assert(_manager_array == NULL, "Attempt to initialize twice");
_manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1, mtGC); _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(ParallelGCThreads + 1);
guarantee(_manager_array != NULL, "Could not initialize promotion manager"); guarantee(_manager_array != NULL, "Could not initialize promotion manager");
_stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
@ -54,26 +58,21 @@ void PSPromotionManager::initialize() {
// Create and register the PSPromotionManager(s) for the worker threads. // Create and register the PSPromotionManager(s) for the worker threads.
for(uint i=0; i<ParallelGCThreads; i++) { for(uint i=0; i<ParallelGCThreads; i++) {
_manager_array[i] = new PSPromotionManager(); stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth());
guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
} }
// The VMThread gets its own PSPromotionManager, which is not available // The VMThread gets its own PSPromotionManager, which is not available
// for work stealing. // for work stealing.
_manager_array[ParallelGCThreads] = new PSPromotionManager();
guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
} }
PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) { PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range"); assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
assert(_manager_array != NULL, "Sanity"); assert(_manager_array != NULL, "Sanity");
return _manager_array[index]; return &_manager_array[index];
} }
PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() { PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
assert(_manager_array != NULL, "Sanity"); assert(_manager_array != NULL, "Sanity");
return _manager_array[ParallelGCThreads]; return &_manager_array[ParallelGCThreads];
} }
void PSPromotionManager::pre_scavenge() { void PSPromotionManager::pre_scavenge() {

View File

@ -29,6 +29,8 @@
#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/copyFailedInfo.hpp" #include "gc_implementation/shared/copyFailedInfo.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "memory/padded.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/taskqueue.hpp" #include "utilities/taskqueue.hpp"
// //
@ -51,14 +53,14 @@ class MutableSpace;
class PSOldGen; class PSOldGen;
class ParCompactionManager; class ParCompactionManager;
class PSPromotionManager : public CHeapObj<mtGC> { class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
friend class PSScavenge; friend class PSScavenge;
friend class PSRefProcTaskExecutor; friend class PSRefProcTaskExecutor;
private: private:
static PSPromotionManager** _manager_array; static PaddedEnd<PSPromotionManager>* _manager_array;
static OopStarTaskQueueSet* _stack_array_depth; static OopStarTaskQueueSet* _stack_array_depth;
static PSOldGen* _old_gen; static PSOldGen* _old_gen;
static MutableSpace* _young_space; static MutableSpace* _young_space;
#if TASKQUEUE_STATS #if TASKQUEUE_STATS
size_t _masked_pushes; size_t _masked_pushes;

View File

@ -32,7 +32,7 @@
inline PSPromotionManager* PSPromotionManager::manager_array(int index) { inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
assert(_manager_array != NULL, "access of NULL manager_array"); assert(_manager_array != NULL, "access of NULL manager_array");
assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access"); assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
return _manager_array[index]; return &_manager_array[index];
} }
template <class T> template <class T>

View File

@ -32,6 +32,7 @@
#if INCLUDE_SERVICES #if INCLUDE_SERVICES
void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp) { void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp) {
#if INCLUDE_TRACE
assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId), assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId),
"Only call this method if the event is enabled"); "Only call this method if the event is enabled");
@ -42,6 +43,7 @@ void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, jlong
event.set_totalSize(entry->words() * BytesPerWord); event.set_totalSize(entry->words() * BytesPerWord);
event.set_endtime(timestamp); event.set_endtime(timestamp);
event.commit(); event.commit();
#endif // INCLUDE_TRACE
} }
bool ObjectCountEventSender::should_send_event() { bool ObjectCountEventSender::should_send_event() {

View File

@ -1209,3 +1209,26 @@ IRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* threa
size_of_arguments * Interpreter::stackElementSize); size_of_arguments * Interpreter::stackElementSize);
IRT_END IRT_END
#endif #endif
#if INCLUDE_JVMTI
// This is a support of the JVMTI PopFrame interface.
// Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument
// and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters.
// The dmh argument is a reference to a DirectMethoHandle that has a member name field.
IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address dmh,
Method* method, address bcp))
Bytecodes::Code code = Bytecodes::code_at(method, bcp);
if (code != Bytecodes::_invokestatic) {
return;
}
ConstantPool* cpool = method->constants();
int cp_index = Bytes::get_native_u2(bcp + 1) + ConstantPool::CPCACHE_INDEX_TAG;
Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index));
Symbol* mname = cpool->name_ref_at(cp_index);
if (MethodHandles::has_member_arg(cname, mname)) {
oop member_name = java_lang_invoke_DirectMethodHandle::member((oop)dmh);
thread->set_vm_result(member_name);
}
IRT_END
#endif // INCLUDE_JVMTI

View File

@ -95,6 +95,9 @@ class InterpreterRuntime: AllStatic {
static void create_exception(JavaThread* thread, char* name, char* message); static void create_exception(JavaThread* thread, char* name, char* message);
static void create_klass_exception(JavaThread* thread, char* name, oopDesc* obj); static void create_klass_exception(JavaThread* thread, char* name, oopDesc* obj);
static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception); static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception);
#if INCLUDE_JVMTI
static void member_name_arg_or_null(JavaThread* thread, address dmh, Method* m, address bcp);
#endif
static void throw_pending_exception(JavaThread* thread); static void throw_pending_exception(JavaThread* thread);
// Statics & fields // Statics & fields

View File

@ -310,46 +310,31 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
_ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this); _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
} }
void CardTableRS::clear_into_younger(Generation* gen) { void CardTableRS::clear_into_younger(Generation* old_gen) {
GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(old_gen->level() == 1, "Should only be called for the old generation");
// Generations younger than gen have been evacuated. We can clear // The card tables for the youngest gen need never be cleared.
// card table entries for gen (we know that it has no pointers
// to younger gens) and for those below. The card tables for
// the youngest gen need never be cleared.
// There's a bit of subtlety in the clear() and invalidate() // There's a bit of subtlety in the clear() and invalidate()
// methods that we exploit here and in invalidate_or_clear() // methods that we exploit here and in invalidate_or_clear()
// below to avoid missing cards at the fringes. If clear() or // below to avoid missing cards at the fringes. If clear() or
// invalidate() are changed in the future, this code should // invalidate() are changed in the future, this code should
// be revisited. 20040107.ysr // be revisited. 20040107.ysr
Generation* g = gen; clear(old_gen->prev_used_region());
for(Generation* prev_gen = gch->prev_gen(g);
prev_gen != NULL;
g = prev_gen, prev_gen = gch->prev_gen(g)) {
MemRegion to_be_cleared_mr = g->prev_used_region();
clear(to_be_cleared_mr);
}
} }
void CardTableRS::invalidate_or_clear(Generation* gen, bool younger) { void CardTableRS::invalidate_or_clear(Generation* old_gen) {
GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(old_gen->level() == 1, "Should only be called for the old generation");
// For each generation gen (and younger) // Invalidate the cards for the currently occupied part of
// invalidate the cards for the currently occupied part // the old generation and clear the cards for the
// of that generation and clear the cards for the
// unoccupied part of the generation (if any, making use // unoccupied part of the generation (if any, making use
// of that generation's prev_used_region to determine that // of that generation's prev_used_region to determine that
// region). No need to do anything for the youngest // region). No need to do anything for the youngest
// generation. Also see note#20040107.ysr above. // generation. Also see note#20040107.ysr above.
Generation* g = gen; MemRegion used_mr = old_gen->used_region();
for(Generation* prev_gen = gch->prev_gen(g); prev_gen != NULL; MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr);
g = prev_gen, prev_gen = gch->prev_gen(g)) { if (!to_be_cleared_mr.is_empty()) {
MemRegion used_mr = g->used_region(); clear(to_be_cleared_mr);
MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr);
if (!to_be_cleared_mr.is_empty()) {
clear(to_be_cleared_mr);
}
invalidate(used_mr);
if (!younger) break;
} }
invalidate(used_mr);
} }

View File

@ -142,12 +142,12 @@ public:
void verify_aligned_region_empty(MemRegion mr); void verify_aligned_region_empty(MemRegion mr);
void clear(MemRegion mr) { _ct_bs->clear(mr); } void clear(MemRegion mr) { _ct_bs->clear(mr); }
void clear_into_younger(Generation* gen); void clear_into_younger(Generation* old_gen);
void invalidate(MemRegion mr, bool whole_heap = false) { void invalidate(MemRegion mr, bool whole_heap = false) {
_ct_bs->invalidate(mr, whole_heap); _ct_bs->invalidate(mr, whole_heap);
} }
void invalidate_or_clear(Generation* gen, bool younger); void invalidate_or_clear(Generation* old_gen);
static uintx ct_max_alignment_constraint() { static uintx ct_max_alignment_constraint() {
return CardTableModRefBS::ct_max_alignment_constraint(); return CardTableModRefBS::ct_max_alignment_constraint();

View File

@ -567,8 +567,6 @@ void DefNewGeneration::collect(bool full,
gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
_next_gen = gch->next_gen(this); _next_gen = gch->next_gen(this);
assert(_next_gen != NULL,
"This must be the youngest gen, and not the only gen");
// If the next generation is too full to accommodate promotion // If the next generation is too full to accommodate promotion
// from this generation, pass on collection; let the next generation // from this generation, pass on collection; let the next generation
@ -901,8 +899,6 @@ bool DefNewGeneration::collection_attempt_is_safe() {
if (_next_gen == NULL) { if (_next_gen == NULL) {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
_next_gen = gch->next_gen(this); _next_gen = gch->next_gen(this);
assert(_next_gen != NULL,
"This must be the youngest gen, and not the only gen");
} }
return _next_gen->promotion_attempt_is_safe(used()); return _next_gen->promotion_attempt_is_safe(used());
} }

View File

@ -1070,13 +1070,13 @@ GenCollectedHeap* GenCollectedHeap::heap() {
void GenCollectedHeap::prepare_for_compaction() { void GenCollectedHeap::prepare_for_compaction() {
Generation* scanning_gen = _gens[_n_gens-1]; guarantee(_n_gens = 2, "Wrong number of generations");
Generation* old_gen = _gens[1];
// Start by compacting into same gen. // Start by compacting into same gen.
CompactPoint cp(scanning_gen, NULL, NULL); CompactPoint cp(old_gen, NULL, NULL);
while (scanning_gen != NULL) { old_gen->prepare_for_compaction(&cp);
scanning_gen->prepare_for_compaction(&cp); Generation* young_gen = _gens[0];
scanning_gen = prev_gen(scanning_gen); young_gen->prepare_for_compaction(&cp);
}
} }
GCStats* GenCollectedHeap::gc_stats(int level) const { GCStats* GenCollectedHeap::gc_stats(int level) const {
@ -1245,27 +1245,14 @@ void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
generation_iterate(&ep_cl, false); generation_iterate(&ep_cl, false);
} }
oop GenCollectedHeap::handle_failed_promotion(Generation* gen, oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
oop obj, oop obj,
size_t obj_size) { size_t obj_size) {
guarantee(old_gen->level() == 1, "We only get here with an old generation");
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
HeapWord* result = NULL; HeapWord* result = NULL;
// First give each higher generation a chance to allocate the promoted object. result = old_gen->expand_and_allocate(obj_size, false);
Generation* allocator = next_gen(gen);
if (allocator != NULL) {
do {
result = allocator->allocate(obj_size, false);
} while (result == NULL && (allocator = next_gen(allocator)) != NULL);
}
if (result == NULL) {
// Then give gen and higher generations a chance to expand and allocate the
// object.
do {
result = gen->expand_and_allocate(obj_size, false);
} while (result == NULL && (gen = next_gen(gen)) != NULL);
}
if (result != NULL) { if (result != NULL) {
Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);

View File

@ -368,25 +368,23 @@ public:
// collection. // collection.
virtual bool is_maximal_no_gc() const; virtual bool is_maximal_no_gc() const;
// Return the generation before "gen", or else NULL. // Return the generation before "gen".
Generation* prev_gen(Generation* gen) const { Generation* prev_gen(Generation* gen) const {
int l = gen->level(); int l = gen->level();
if (l == 0) return NULL; guarantee(l > 0, "Out of bounds");
else return _gens[l-1]; return _gens[l-1];
} }
// Return the generation after "gen", or else NULL. // Return the generation after "gen".
Generation* next_gen(Generation* gen) const { Generation* next_gen(Generation* gen) const {
int l = gen->level() + 1; int l = gen->level() + 1;
if (l == _n_gens) return NULL; guarantee(l < _n_gens, "Out of bounds");
else return _gens[l]; return _gens[l];
} }
Generation* get_gen(int i) const { Generation* get_gen(int i) const {
if (i >= 0 && i < _n_gens) guarantee(i >= 0 && i < _n_gens, "Out of bounds");
return _gens[i]; return _gens[i];
else
return NULL;
} }
int n_gens() const { int n_gens() const {
@ -485,9 +483,9 @@ public:
// Promotion of obj into gen failed. Try to promote obj to higher // Promotion of obj into gen failed. Try to promote obj to higher
// gens in ascending order; return the new location of obj if successful. // gens in ascending order; return the new location of obj if successful.
// Otherwise, try expand-and-allocate for obj in each generation starting at // Otherwise, try expand-and-allocate for obj in both the young and old
// gen; return the new location of obj if successful. Otherwise, return NULL. // generation; return the new location of obj if successful. Otherwise, return NULL.
oop handle_failed_promotion(Generation* gen, oop handle_failed_promotion(Generation* old_gen,
oop obj, oop obj,
size_t obj_size); size_t obj_size);

View File

@ -52,8 +52,8 @@
#include "utilities/copy.hpp" #include "utilities/copy.hpp"
#include "utilities/events.hpp" #include "utilities/events.hpp"
void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) {
bool clear_all_softrefs) { guarantee(level == 1, "We always collect both old and young.");
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
@ -84,11 +84,6 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
// Capture heap size before collection for printing. // Capture heap size before collection for printing.
size_t gch_prev_used = gch->used(); size_t gch_prev_used = gch->used();
// Some of the card table updates below assume that the perm gen is
// also being collected.
assert(level == gch->n_gens() - 1,
"All generations are being collected, ergo perm gen too.");
// Capture used regions for each generation that will be // Capture used regions for each generation that will be
// subject to collection, so that card table adjustments can // subject to collection, so that card table adjustments can
// be made intelligently (see clear / invalidate further below). // be made intelligently (see clear / invalidate further below).
@ -126,17 +121,15 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
all_empty = all_empty && gch->get_gen(i)->used() == 0; all_empty = all_empty && gch->get_gen(i)->used() == 0;
} }
GenRemSet* rs = gch->rem_set(); GenRemSet* rs = gch->rem_set();
Generation* old_gen = gch->get_gen(level);
// Clear/invalidate below make use of the "prev_used_regions" saved earlier. // Clear/invalidate below make use of the "prev_used_regions" saved earlier.
if (all_empty) { if (all_empty) {
// We've evacuated all generations below us. // We've evacuated all generations below us.
Generation* g = gch->get_gen(level); rs->clear_into_younger(old_gen);
rs->clear_into_younger(g);
} else { } else {
// Invalidate the cards corresponding to the currently used // Invalidate the cards corresponding to the currently used
// region and clear those corresponding to the evacuated region // region and clear those corresponding to the evacuated region.
// of all generations just collected (i.e. level and younger). rs->invalidate_or_clear(old_gen);
rs->invalidate_or_clear(gch->get_gen(level),
true /* younger */);
} }
Threads::gc_epilogue(); Threads::gc_epilogue();

View File

@ -135,7 +135,7 @@ public:
// younger than gen from generations gen and older. // younger than gen from generations gen and older.
// The parameter clear_perm indicates if the perm_gen's // The parameter clear_perm indicates if the perm_gen's
// remembered set should also be processed/cleared. // remembered set should also be processed/cleared.
virtual void clear_into_younger(Generation* gen) = 0; virtual void clear_into_younger(Generation* old_gen) = 0;
// Informs the RS that refs in the given "mr" may have changed // Informs the RS that refs in the given "mr" may have changed
// arbitrarily, and therefore may contain old-to-young pointers. // arbitrarily, and therefore may contain old-to-young pointers.
@ -146,11 +146,8 @@ public:
// Informs the RS that refs in this generation // Informs the RS that refs in this generation
// may have changed arbitrarily, and therefore may contain // may have changed arbitrarily, and therefore may contain
// old-to-young pointers in arbitrary locations. The parameter // old-to-young pointers in arbitrary locations.
// younger indicates if the same should be done for younger generations virtual void invalidate_or_clear(Generation* old_gen) = 0;
// as well. The parameter perm indicates if the same should be done for
// perm gen as well.
virtual void invalidate_or_clear(Generation* gen, bool younger) = 0;
}; };
#endif // SHARE_VM_MEMORY_GENREMSET_HPP #endif // SHARE_VM_MEMORY_GENREMSET_HPP

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_PADDED_HPP
#define SHARE_VM_MEMORY_PADDED_HPP
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
// Bytes needed to pad type to avoid cache-line sharing; alignment should be the
// expected cache line size (a power of two). The first addend avoids sharing
// when the start address is not a multiple of alignment; the second maintains
// alignment of starting addresses that happen to be a multiple.
#define PADDING_SIZE(type, alignment) \
((alignment) + align_size_up_(sizeof(type), alignment))
// Templates to create a subclass padded to avoid cache line sharing. These are
// effective only when applied to derived-most (leaf) classes.
// When no args are passed to the base ctor.
template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
class Padded : public T {
private:
char _pad_buf_[PADDING_SIZE(T, alignment)];
};
// When either 0 or 1 args may be passed to the base ctor.
template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
class Padded01 : public T {
public:
Padded01(): T() { }
Padded01(Arg1T arg1): T(arg1) { }
private:
char _pad_buf_[PADDING_SIZE(T, alignment)];
};
// Super class of PaddedEnd when pad_size != 0.
template <class T, size_t pad_size>
class PaddedEndImpl : public T {
private:
char _pad_buf[pad_size];
};
// Super class of PaddedEnd when pad_size == 0.
template <class T>
class PaddedEndImpl<T, /*pad_size*/ 0> : public T {
// No padding.
};
#define PADDED_END_SIZE(type, alignment) (align_size_up_(sizeof(type), alignment) - sizeof(type))
// More memory conservative implementation of Padded. The subclass adds the
// minimal amount of padding needed to make the size of the objects be aligned.
// This will help reducing false sharing,
// if the start address is a multiple of alignment.
template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
class PaddedEnd : public PaddedEndImpl<T, PADDED_END_SIZE(T, alignment)> {
// C++ don't allow zero-length arrays. The padding is put in a
// super class that is specialized for the pad_size == 0 case.
};
// Helper class to create an array of PaddedEnd<T> objects. All elements will
// start at a multiple of alignment and the size will be aligned to alignment.
template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
class PaddedArray {
public:
// Creates an aligned padded array.
// The memory can't be deleted since the raw memory chunk is not returned.
static PaddedEnd<T>* create_unfreeable(uint length);
};
#endif // SHARE_VM_MEMORY_PADDED_HPP

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "memory/allocation.inline.hpp"
#include "memory/padded.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
// Creates an aligned padded array.
// The memory can't be deleted since the raw memory chunk is not returned.
template <class T, MEMFLAGS flags, size_t alignment>
PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
// Check that the PaddedEnd class works as intended.
STATIC_ASSERT(is_size_aligned_(sizeof(PaddedEnd<T>), alignment));
// Allocate a chunk of memory large enough to allow for some alignment.
void* chunk = AllocateHeap(length * sizeof(PaddedEnd<T, alignment>) + alignment, flags);
// Make the initial alignment.
PaddedEnd<T>* aligned_padded_array = (PaddedEnd<T>*)align_pointer_up(chunk, alignment);
// Call the default constructor for each element.
for (uint i = 0; i < length; i++) {
::new (&aligned_padded_array[i]) T();
}
return aligned_padded_array;
}

View File

@ -105,10 +105,9 @@ objArrayOop Universe::_the_empty_class_klass_array = NULL;
Array<Klass*>* Universe::_the_array_interfaces_array = NULL; Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
oop Universe::_the_null_string = NULL; oop Universe::_the_null_string = NULL;
oop Universe::_the_min_jint_string = NULL; oop Universe::_the_min_jint_string = NULL;
LatestMethodOopCache* Universe::_finalizer_register_cache = NULL; LatestMethodCache* Universe::_finalizer_register_cache = NULL;
LatestMethodOopCache* Universe::_loader_addClass_cache = NULL; LatestMethodCache* Universe::_loader_addClass_cache = NULL;
LatestMethodOopCache* Universe::_pd_implies_cache = NULL; LatestMethodCache* Universe::_pd_implies_cache = NULL;
ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL;
oop Universe::_out_of_memory_error_java_heap = NULL; oop Universe::_out_of_memory_error_java_heap = NULL;
oop Universe::_out_of_memory_error_metaspace = NULL; oop Universe::_out_of_memory_error_metaspace = NULL;
oop Universe::_out_of_memory_error_class_metaspace = NULL; oop Universe::_out_of_memory_error_class_metaspace = NULL;
@ -225,7 +224,6 @@ void Universe::serialize(SerializeClosure* f, bool do_all) {
f->do_ptr((void**)&_the_empty_klass_array); f->do_ptr((void**)&_the_empty_klass_array);
_finalizer_register_cache->serialize(f); _finalizer_register_cache->serialize(f);
_loader_addClass_cache->serialize(f); _loader_addClass_cache->serialize(f);
_reflect_invoke_cache->serialize(f);
_pd_implies_cache->serialize(f); _pd_implies_cache->serialize(f);
} }
@ -649,10 +647,9 @@ jint universe_init() {
// We have a heap so create the Method* caches before // We have a heap so create the Method* caches before
// Metaspace::initialize_shared_spaces() tries to populate them. // Metaspace::initialize_shared_spaces() tries to populate them.
Universe::_finalizer_register_cache = new LatestMethodOopCache(); Universe::_finalizer_register_cache = new LatestMethodCache();
Universe::_loader_addClass_cache = new LatestMethodOopCache(); Universe::_loader_addClass_cache = new LatestMethodCache();
Universe::_pd_implies_cache = new LatestMethodOopCache(); Universe::_pd_implies_cache = new LatestMethodCache();
Universe::_reflect_invoke_cache = new ActiveMethodOopsCache();
if (UseSharedSpaces) { if (UseSharedSpaces) {
// Read the data structures supporting the shared spaces (shared // Read the data structures supporting the shared spaces (shared
@ -1088,35 +1085,21 @@ bool universe_post_init() {
vmSymbols::register_method_name(), vmSymbols::register_method_name(),
vmSymbols::register_method_signature()); vmSymbols::register_method_signature());
if (m == NULL || !m->is_static()) { if (m == NULL || !m->is_static()) {
THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), tty->print_cr("Unable to link/verify Finalizer.register method");
"java.lang.ref.Finalizer.register", false); return false; // initialization failed (cannot throw exception yet)
} }
Universe::_finalizer_register_cache->init( Universe::_finalizer_register_cache->init(
SystemDictionary::Finalizer_klass(), m, CHECK_false); SystemDictionary::Finalizer_klass(), m);
// Resolve on first use and initialize class.
// Note: No race-condition here, since a resolve will always return the same result
// Setup method for security checks
k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_reflect_Method(), true, CHECK_false);
k_h = instanceKlassHandle(THREAD, k);
k_h->link_class(CHECK_false);
m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature());
if (m == NULL || m->is_static()) {
THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
"java.lang.reflect.Method.invoke", false);
}
Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
// Setup method for registering loaded classes in class loader vector // Setup method for registering loaded classes in class loader vector
InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false); InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature()); m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
if (m == NULL || m->is_static()) { if (m == NULL || m->is_static()) {
THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), tty->print_cr("Unable to link/verify ClassLoader.addClass method");
"java.lang.ClassLoader.addClass", false); return false; // initialization failed (cannot throw exception yet)
} }
Universe::_loader_addClass_cache->init( Universe::_loader_addClass_cache->init(
SystemDictionary::ClassLoader_klass(), m, CHECK_false); SystemDictionary::ClassLoader_klass(), m);
// Setup method for checking protection domain // Setup method for checking protection domain
InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false); InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
@ -1132,7 +1115,7 @@ bool universe_post_init() {
return false; // initialization failed return false; // initialization failed
} }
Universe::_pd_implies_cache->init( Universe::_pd_implies_cache->init(
SystemDictionary::ProtectionDomain_klass(), m, CHECK_false);; SystemDictionary::ProtectionDomain_klass(), m);;
} }
// The folowing is initializing converter functions for serialization in // The folowing is initializing converter functions for serialization in
@ -1455,7 +1438,7 @@ void Universe::compute_verify_oop_data() {
} }
void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) { void LatestMethodCache::init(Klass* k, Method* m) {
if (!UseSharedSpaces) { if (!UseSharedSpaces) {
_klass = k; _klass = k;
} }
@ -1471,88 +1454,7 @@ void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) {
} }
ActiveMethodOopsCache::~ActiveMethodOopsCache() { Method* LatestMethodCache::get_method() {
if (_prev_methods != NULL) {
delete _prev_methods;
_prev_methods = NULL;
}
}
void ActiveMethodOopsCache::add_previous_version(Method* method) {
assert(Thread::current()->is_VM_thread(),
"only VMThread can add previous versions");
// Only append the previous method if it is executing on the stack.
if (method->on_stack()) {
if (_prev_methods == NULL) {
// This is the first previous version so make some space.
// Start with 2 elements under the assumption that the class
// won't be redefined much.
_prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Method*>(2, true);
}
// RC_TRACE macro has an embedded ResourceMark
RC_TRACE(0x00000100,
("add: %s(%s): adding prev version ref for cached method @%d",
method->name()->as_C_string(), method->signature()->as_C_string(),
_prev_methods->length()));
_prev_methods->append(method);
}
// Since the caller is the VMThread and we are at a safepoint, this is a good
// time to clear out unused method references.
if (_prev_methods == NULL) return;
for (int i = _prev_methods->length() - 1; i >= 0; i--) {
Method* method = _prev_methods->at(i);
assert(method != NULL, "weak method ref was unexpectedly cleared");
if (!method->on_stack()) {
// This method isn't running anymore so remove it
_prev_methods->remove_at(i);
MetadataFactory::free_metadata(method->method_holder()->class_loader_data(), method);
} else {
// RC_TRACE macro has an embedded ResourceMark
RC_TRACE(0x00000400,
("add: %s(%s): previous cached method @%d is alive",
method->name()->as_C_string(), method->signature()->as_C_string(), i));
}
}
} // end add_previous_version()
bool ActiveMethodOopsCache::is_same_method(const Method* method) const {
InstanceKlass* ik = InstanceKlass::cast(klass());
const Method* check_method = ik->method_with_idnum(method_idnum());
assert(check_method != NULL, "sanity check");
if (check_method == method) {
// done with the easy case
return true;
}
if (_prev_methods != NULL) {
// The cached method has been redefined at least once so search
// the previous versions for a match.
for (int i = 0; i < _prev_methods->length(); i++) {
check_method = _prev_methods->at(i);
if (check_method == method) {
// a previous version matches
return true;
}
}
}
// either no previous versions or no previous version matched
return false;
}
Method* LatestMethodOopCache::get_Method() {
if (klass() == NULL) return NULL; if (klass() == NULL) return NULL;
InstanceKlass* ik = InstanceKlass::cast(klass()); InstanceKlass* ik = InstanceKlass::cast(klass());
Method* m = ik->method_with_idnum(method_idnum()); Method* m = ik->method_with_idnum(method_idnum());

View File

@ -41,10 +41,11 @@ class CollectedHeap;
class DeferredObjAllocEvent; class DeferredObjAllocEvent;
// Common parts of a Method* cache. This cache safely interacts with // A helper class for caching a Method* when the user of the cache
// the RedefineClasses API. // only cares about the latest version of the Method*. This cache safely
// // interacts with the RedefineClasses API.
class CommonMethodOopCache : public CHeapObj<mtClass> {
class LatestMethodCache : public CHeapObj<mtClass> {
// We save the Klass* and the idnum of Method* in order to get // We save the Klass* and the idnum of Method* in order to get
// the current cached Method*. // the current cached Method*.
private: private:
@ -52,12 +53,14 @@ class CommonMethodOopCache : public CHeapObj<mtClass> {
int _method_idnum; int _method_idnum;
public: public:
CommonMethodOopCache() { _klass = NULL; _method_idnum = -1; } LatestMethodCache() { _klass = NULL; _method_idnum = -1; }
~CommonMethodOopCache() { _klass = NULL; _method_idnum = -1; } ~LatestMethodCache() { _klass = NULL; _method_idnum = -1; }
void init(Klass* k, Method* m, TRAPS); void init(Klass* k, Method* m);
Klass* klass() const { return _klass; } Klass* klass() const { return _klass; }
int method_idnum() const { return _method_idnum; } int method_idnum() const { return _method_idnum; }
Method* get_method();
// Enhanced Class Redefinition support // Enhanced Class Redefinition support
void classes_do(void f(Klass*)) { void classes_do(void f(Klass*)) {
@ -72,39 +75,6 @@ class CommonMethodOopCache : public CHeapObj<mtClass> {
}; };
// A helper class for caching a Method* when the user of the cache
// cares about all versions of the Method*.
//
class ActiveMethodOopsCache : public CommonMethodOopCache {
// This subclass adds weak references to older versions of the
// Method* and a query method for a Method*.
private:
// If the cached Method* has not been redefined, then
// _prev_methods will be NULL. If all of the previous
// versions of the method have been collected, then
// _prev_methods can have a length of zero.
GrowableArray<Method*>* _prev_methods;
public:
ActiveMethodOopsCache() { _prev_methods = NULL; }
~ActiveMethodOopsCache();
void add_previous_version(Method* method);
bool is_same_method(const Method* method) const;
};
// A helper class for caching a Method* when the user of the cache
// only cares about the latest version of the Method*.
//
class LatestMethodOopCache : public CommonMethodOopCache {
// This subclass adds a getter method for the latest Method*.
public:
Method* get_Method();
};
// For UseCompressedOops and UseCompressedKlassPointers. // For UseCompressedOops and UseCompressedKlassPointers.
struct NarrowPtrStruct { struct NarrowPtrStruct {
// Base address for oop/klass-within-java-object materialization. // Base address for oop/klass-within-java-object materialization.
@ -174,10 +144,10 @@ class Universe: AllStatic {
static objArrayOop _the_empty_class_klass_array; // Canonicalized obj array of type java.lang.Class static objArrayOop _the_empty_class_klass_array; // Canonicalized obj array of type java.lang.Class
static oop _the_null_string; // A cache of "null" as a Java string static oop _the_null_string; // A cache of "null" as a Java string
static oop _the_min_jint_string; // A cache of "-2147483648" as a Java string static oop _the_min_jint_string; // A cache of "-2147483648" as a Java string
static LatestMethodOopCache* _finalizer_register_cache; // static method for registering finalizable objects static LatestMethodCache* _finalizer_register_cache; // static method for registering finalizable objects
static LatestMethodOopCache* _loader_addClass_cache; // method for registering loaded classes in class loader vector static LatestMethodCache* _loader_addClass_cache; // method for registering loaded classes in class loader vector
static LatestMethodOopCache* _pd_implies_cache; // method for checking protection domain attributes static LatestMethodCache* _pd_implies_cache; // method for checking protection domain attributes
static ActiveMethodOopsCache* _reflect_invoke_cache; // method for security checks
// preallocated error objects (no backtrace) // preallocated error objects (no backtrace)
static oop _out_of_memory_error_java_heap; static oop _out_of_memory_error_java_heap;
static oop _out_of_memory_error_metaspace; static oop _out_of_memory_error_metaspace;
@ -334,11 +304,11 @@ class Universe: AllStatic {
static Array<Klass*>* the_array_interfaces_array() { return _the_array_interfaces_array; } static Array<Klass*>* the_array_interfaces_array() { return _the_array_interfaces_array; }
static oop the_null_string() { return _the_null_string; } static oop the_null_string() { return _the_null_string; }
static oop the_min_jint_string() { return _the_min_jint_string; } static oop the_min_jint_string() { return _the_min_jint_string; }
static Method* finalizer_register_method() { return _finalizer_register_cache->get_Method(); }
static Method* loader_addClass_method() { return _loader_addClass_cache->get_Method(); }
static Method* protection_domain_implies_method() { return _pd_implies_cache->get_Method(); } static Method* finalizer_register_method() { return _finalizer_register_cache->get_method(); }
static ActiveMethodOopsCache* reflect_invoke_cache() { return _reflect_invoke_cache; } static Method* loader_addClass_method() { return _loader_addClass_cache->get_method(); }
static Method* protection_domain_implies_method() { return _pd_implies_cache->get_method(); }
static oop null_ptr_exception_instance() { return _null_ptr_exception_instance; } static oop null_ptr_exception_instance() { return _null_ptr_exception_instance; }
static oop arithmetic_exception_instance() { return _arithmetic_exception_instance; } static oop arithmetic_exception_instance() { return _arithmetic_exception_instance; }

View File

@ -981,7 +981,6 @@ bool Method::should_not_be_cached() const {
bool Method::is_ignored_by_security_stack_walk() const { bool Method::is_ignored_by_security_stack_walk() const {
const bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection; const bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
assert(intrinsic_id() != vmIntrinsics::_invoke || Universe::reflect_invoke_cache()->is_same_method((Method*)this), "sanity");
if (intrinsic_id() == vmIntrinsics::_invoke) { if (intrinsic_id() == vmIntrinsics::_invoke) {
// This is Method.invoke() -- ignore it // This is Method.invoke() -- ignore it
return true; return true;

View File

@ -221,7 +221,7 @@ bool Block::has_uncommon_code() const {
//------------------------------is_uncommon------------------------------------ //------------------------------is_uncommon------------------------------------
// True if block is low enough frequency or guarded by a test which // True if block is low enough frequency or guarded by a test which
// mostly does not go here. // mostly does not go here.
bool Block::is_uncommon( Block_Array &bbs ) const { bool Block::is_uncommon(PhaseCFG* cfg) const {
// Initial blocks must never be moved, so are never uncommon. // Initial blocks must never be moved, so are never uncommon.
if (head()->is_Root() || head()->is_Start()) return false; if (head()->is_Root() || head()->is_Start()) return false;
@ -238,7 +238,7 @@ bool Block::is_uncommon( Block_Array &bbs ) const {
uint uncommon_for_freq_preds = 0; uint uncommon_for_freq_preds = 0;
for( uint i=1; i<num_preds(); i++ ) { for( uint i=1; i<num_preds(); i++ ) {
Block* guard = bbs[pred(i)->_idx]; Block* guard = cfg->get_block_for_node(pred(i));
// Check to see if this block follows its guard 1 time out of 10000 // Check to see if this block follows its guard 1 time out of 10000
// or less. // or less.
// //
@ -285,11 +285,11 @@ void Block::dump_bidx(const Block* orig, outputStream* st) const {
} }
} }
void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) const { void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const {
if (is_connector()) { if (is_connector()) {
for (uint i=1; i<num_preds(); i++) { for (uint i=1; i<num_preds(); i++) {
Block *p = ((*bbs)[pred(i)->_idx]); Block *p = cfg->get_block_for_node(pred(i));
p->dump_pred(bbs, orig, st); p->dump_pred(cfg, orig, st);
} }
} else { } else {
dump_bidx(orig, st); dump_bidx(orig, st);
@ -297,7 +297,7 @@ void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) con
} }
} }
void Block::dump_head( const Block_Array *bbs, outputStream* st ) const { void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
// Print the basic block // Print the basic block
dump_bidx(this, st); dump_bidx(this, st);
st->print(": #\t"); st->print(": #\t");
@ -311,26 +311,28 @@ void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
if( head()->is_block_start() ) { if( head()->is_block_start() ) {
for (uint i=1; i<num_preds(); i++) { for (uint i=1; i<num_preds(); i++) {
Node *s = pred(i); Node *s = pred(i);
if (bbs) { if (cfg != NULL) {
Block *p = (*bbs)[s->_idx]; Block *p = cfg->get_block_for_node(s);
p->dump_pred(bbs, p, st); p->dump_pred(cfg, p, st);
} else { } else {
while (!s->is_block_start()) while (!s->is_block_start())
s = s->in(0); s = s->in(0);
st->print("N%d ", s->_idx ); st->print("N%d ", s->_idx );
} }
} }
} else } else {
st->print("BLOCK HEAD IS JUNK "); st->print("BLOCK HEAD IS JUNK ");
}
// Print loop, if any // Print loop, if any
const Block *bhead = this; // Head of self-loop const Block *bhead = this; // Head of self-loop
Node *bh = bhead->head(); Node *bh = bhead->head();
if( bbs && bh->is_Loop() && !head()->is_Root() ) {
if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
LoopNode *loop = bh->as_Loop(); LoopNode *loop = bh->as_Loop();
const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx]; const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
while (bx->is_connector()) { while (bx->is_connector()) {
bx = (*bbs)[bx->pred(1)->_idx]; bx = cfg->get_block_for_node(bx->pred(1));
} }
st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order); st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
// Dump any loop-specific bits, especially for CountedLoops. // Dump any loop-specific bits, especially for CountedLoops.
@ -349,29 +351,32 @@ void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
st->print_cr(""); st->print_cr("");
} }
void Block::dump() const { dump(NULL); } void Block::dump() const {
dump(NULL);
}
void Block::dump( const Block_Array *bbs ) const { void Block::dump(const PhaseCFG* cfg) const {
dump_head(bbs); dump_head(cfg);
uint cnt = _nodes.size(); for (uint i=0; i< _nodes.size(); i++) {
for( uint i=0; i<cnt; i++ )
_nodes[i]->dump(); _nodes[i]->dump();
}
tty->print("\n"); tty->print("\n");
} }
#endif #endif
//============================================================================= //=============================================================================
//------------------------------PhaseCFG--------------------------------------- //------------------------------PhaseCFG---------------------------------------
PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) : PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
Phase(CFG), : Phase(CFG)
_bbs(a), , _block_arena(arena)
_root(r), , _node_to_block_mapping(arena)
_node_latency(NULL) , _root(root)
, _node_latency(NULL)
#ifndef PRODUCT #ifndef PRODUCT
, _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining")) , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
#endif #endif
#ifdef ASSERT #ifdef ASSERT
, _raw_oops(a) , _raw_oops(arena)
#endif #endif
{ {
ResourceMark rm; ResourceMark rm;
@ -380,13 +385,13 @@ PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
// Node on demand. // Node on demand.
Node *x = new (C) GotoNode(NULL); Node *x = new (C) GotoNode(NULL);
x->init_req(0, x); x->init_req(0, x);
_goto = m.match_tree(x); _goto = matcher.match_tree(x);
assert(_goto != NULL, ""); assert(_goto != NULL, "");
_goto->set_req(0,_goto); _goto->set_req(0,_goto);
// Build the CFG in Reverse Post Order // Build the CFG in Reverse Post Order
_num_blocks = build_cfg(); _num_blocks = build_cfg();
_broot = _bbs[_root->_idx]; _broot = get_block_for_node(_root);
} }
//------------------------------build_cfg-------------------------------------- //------------------------------build_cfg--------------------------------------
@ -440,9 +445,9 @@ uint PhaseCFG::build_cfg() {
// 'p' now points to the start of this basic block // 'p' now points to the start of this basic block
// Put self in array of basic blocks // Put self in array of basic blocks
Block *bb = new (_bbs._arena) Block(_bbs._arena,p); Block *bb = new (_block_arena) Block(_block_arena, p);
_bbs.map(p->_idx,bb); map_node_to_block(p, bb);
_bbs.map(x->_idx,bb); map_node_to_block(x, bb);
if( x != p ) { // Only for root is x == p if( x != p ) { // Only for root is x == p
bb->_nodes.push((Node*)x); bb->_nodes.push((Node*)x);
} }
@ -473,16 +478,16 @@ uint PhaseCFG::build_cfg() {
// Check if it the fist node pushed on stack at the beginning. // Check if it the fist node pushed on stack at the beginning.
if (idx == 0) break; // end of the build if (idx == 0) break; // end of the build
// Find predecessor basic block // Find predecessor basic block
Block *pb = _bbs[x->_idx]; Block *pb = get_block_for_node(x);
// Insert into nodes array, if not already there // Insert into nodes array, if not already there
if( !_bbs.lookup(proj->_idx) ) { if (!has_block(proj)) {
assert( x != proj, "" ); assert( x != proj, "" );
// Map basic block of projection // Map basic block of projection
_bbs.map(proj->_idx,pb); map_node_to_block(proj, pb);
pb->_nodes.push(proj); pb->_nodes.push(proj);
} }
// Insert self as a child of my predecessor block // Insert self as a child of my predecessor block
pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]); pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(), assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
"too many control users, not a CFG?" ); "too many control users, not a CFG?" );
} }
@ -511,15 +516,15 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
RegionNode* region = new (C) RegionNode(2); RegionNode* region = new (C) RegionNode(2);
region->init_req(1, proj); region->init_req(1, proj);
// setup corresponding basic block // setup corresponding basic block
Block* block = new (_bbs._arena) Block(_bbs._arena, region); Block* block = new (_block_arena) Block(_block_arena, region);
_bbs.map(region->_idx, block); map_node_to_block(region, block);
C->regalloc()->set_bad(region->_idx); C->regalloc()->set_bad(region->_idx);
// add a goto node // add a goto node
Node* gto = _goto->clone(); // get a new goto node Node* gto = _goto->clone(); // get a new goto node
gto->set_req(0, region); gto->set_req(0, region);
// add it to the basic block // add it to the basic block
block->_nodes.push(gto); block->_nodes.push(gto);
_bbs.map(gto->_idx, block); map_node_to_block(gto, block);
C->regalloc()->set_bad(gto->_idx); C->regalloc()->set_bad(gto->_idx);
// hook up successor block // hook up successor block
block->_succs.map(block->_num_succs++, out); block->_succs.map(block->_num_succs++, out);
@ -570,7 +575,7 @@ void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
gto->set_req(0, b->head()); gto->set_req(0, b->head());
Node *bp = b->_nodes[end_idx]; Node *bp = b->_nodes[end_idx];
b->_nodes.map(end_idx,gto); // Slam over NeverBranch b->_nodes.map(end_idx,gto); // Slam over NeverBranch
_bbs.map(gto->_idx, b); map_node_to_block(gto, b);
C->regalloc()->set_bad(gto->_idx); C->regalloc()->set_bad(gto->_idx);
b->_nodes.pop(); // Yank projections b->_nodes.pop(); // Yank projections
b->_nodes.pop(); // Yank projections b->_nodes.pop(); // Yank projections
@ -613,7 +618,7 @@ bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
// If the previous block conditionally falls into bx, return false, // If the previous block conditionally falls into bx, return false,
// because moving bx will create an extra jump. // because moving bx will create an extra jump.
for(uint k = 1; k < bx->num_preds(); k++ ) { for(uint k = 1; k < bx->num_preds(); k++ ) {
Block* pred = _bbs[bx->pred(k)->_idx]; Block* pred = get_block_for_node(bx->pred(k));
if (pred == _blocks[bx_index-1]) { if (pred == _blocks[bx_index-1]) {
if (pred->_num_succs != 1) { if (pred->_num_succs != 1) {
return false; return false;
@ -682,7 +687,7 @@ void PhaseCFG::remove_empty() {
// Look for uncommon blocks and move to end. // Look for uncommon blocks and move to end.
if (!C->do_freq_based_layout()) { if (!C->do_freq_based_layout()) {
if( b->is_uncommon(_bbs) ) { if (b->is_uncommon(this)) {
move_to_end(b, i); move_to_end(b, i);
last--; // No longer check for being uncommon! last--; // No longer check for being uncommon!
if( no_flip_branch(b) ) { // Fall-thru case must follow? if( no_flip_branch(b) ) { // Fall-thru case must follow?
@ -870,28 +875,31 @@ void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const {
} while( !p->is_block_start() ); } while( !p->is_block_start() );
// Recursively visit // Recursively visit
for( uint i=1; i<p->req(); i++ ) for (uint i = 1; i < p->req(); i++) {
_dump_cfg(p->in(i),visited); _dump_cfg(p->in(i), visited);
}
// Dump the block // Dump the block
_bbs[p->_idx]->dump(&_bbs); get_block_for_node(p)->dump(this);
} }
void PhaseCFG::dump( ) const { void PhaseCFG::dump( ) const {
tty->print("\n--- CFG --- %d BBs\n",_num_blocks); tty->print("\n--- CFG --- %d BBs\n",_num_blocks);
if( _blocks.size() ) { // Did we do basic-block layout? if (_blocks.size()) { // Did we do basic-block layout?
for( uint i=0; i<_num_blocks; i++ ) for (uint i = 0; i < _num_blocks; i++) {
_blocks[i]->dump(&_bbs); _blocks[i]->dump(this);
}
} else { // Else do it with a DFS } else { // Else do it with a DFS
VectorSet visited(_bbs._arena); VectorSet visited(_block_arena);
_dump_cfg(_root,visited); _dump_cfg(_root,visited);
} }
} }
void PhaseCFG::dump_headers() { void PhaseCFG::dump_headers() {
for( uint i = 0; i < _num_blocks; i++ ) { for( uint i = 0; i < _num_blocks; i++ ) {
if( _blocks[i] == NULL ) continue; if (_blocks[i]) {
_blocks[i]->dump_head(&_bbs); _blocks[i]->dump_head(this);
}
} }
} }
@ -904,7 +912,7 @@ void PhaseCFG::verify( ) const {
uint j; uint j;
for (j = 0; j < cnt; j++) { for (j = 0; j < cnt; j++) {
Node *n = b->_nodes[j]; Node *n = b->_nodes[j];
assert( _bbs[n->_idx] == b, "" ); assert(get_block_for_node(n) == b, "");
if (j >= 1 && n->is_Mach() && if (j >= 1 && n->is_Mach() &&
n->as_Mach()->ideal_Opcode() == Op_CreateEx) { n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
assert(j == 1 || b->_nodes[j-1]->is_Phi(), assert(j == 1 || b->_nodes[j-1]->is_Phi(),
@ -913,13 +921,12 @@ void PhaseCFG::verify( ) const {
for (uint k = 0; k < n->req(); k++) { for (uint k = 0; k < n->req(); k++) {
Node *def = n->in(k); Node *def = n->in(k);
if (def && def != n) { if (def && def != n) {
assert(_bbs[def->_idx] || def->is_Con(), assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok");
"must have block; constants for debug info ok");
// Verify that instructions in the block is in correct order. // Verify that instructions in the block is in correct order.
// Uses must follow their definition if they are at the same block. // Uses must follow their definition if they are at the same block.
// Mostly done to check that MachSpillCopy nodes are placed correctly // Mostly done to check that MachSpillCopy nodes are placed correctly
// when CreateEx node is moved in build_ifg_physical(). // when CreateEx node is moved in build_ifg_physical().
if (_bbs[def->_idx] == b && if (get_block_for_node(def) == b &&
!(b->head()->is_Loop() && n->is_Phi()) && !(b->head()->is_Loop() && n->is_Phi()) &&
// See (+++) comment in reg_split.cpp // See (+++) comment in reg_split.cpp
!(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) { !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {

View File

@ -48,13 +48,12 @@ class Block_Array : public ResourceObj {
friend class VMStructs; friend class VMStructs;
uint _size; // allocated size, as opposed to formal limit uint _size; // allocated size, as opposed to formal limit
debug_only(uint _limit;) // limit to formal domain debug_only(uint _limit;) // limit to formal domain
Arena *_arena; // Arena to allocate in
protected: protected:
Block **_blocks; Block **_blocks;
void grow( uint i ); // Grow array node to fit void grow( uint i ); // Grow array node to fit
public: public:
Arena *_arena; // Arena to allocate in
Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) { Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
debug_only(_limit=0); debug_only(_limit=0);
_blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize ); _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
@ -77,7 +76,7 @@ class Block_List : public Block_Array {
public: public:
uint _cnt; uint _cnt;
Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {} Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
void push( Block *b ) { map(_cnt++,b); } void push( Block *b ) { map(_cnt++,b); }
Block *pop() { return _blocks[--_cnt]; } Block *pop() { return _blocks[--_cnt]; }
Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;} Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
void remove( uint i ); void remove( uint i );
@ -284,15 +283,15 @@ class Block : public CFGElement {
// helper function that adds caller save registers to MachProjNode // helper function that adds caller save registers to MachProjNode
void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe); void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
// Schedule a call next in the block // Schedule a call next in the block
uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call); uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
// Perform basic-block local scheduling // Perform basic-block local scheduling
Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot); Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ); void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs); void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call); bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
// Cleanup if any code lands between a Call and his Catch // Cleanup if any code lands between a Call and his Catch
void call_catch_cleanup(Block_Array &bbs, Compile *C); void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
// Detect implicit-null-check opportunities. Basically, find NULL checks // Detect implicit-null-check opportunities. Basically, find NULL checks
// with suitable memory ops nearby. Use the memory op to do the NULL check. // with suitable memory ops nearby. Use the memory op to do the NULL check.
// I can generate a memory op if there is not one nearby. // I can generate a memory op if there is not one nearby.
@ -331,15 +330,15 @@ class Block : public CFGElement {
// Use frequency calculations and code shape to predict if the block // Use frequency calculations and code shape to predict if the block
// is uncommon. // is uncommon.
bool is_uncommon( Block_Array &bbs ) const; bool is_uncommon(PhaseCFG* cfg) const;
#ifndef PRODUCT #ifndef PRODUCT
// Debugging print of basic block // Debugging print of basic block
void dump_bidx(const Block* orig, outputStream* st = tty) const; void dump_bidx(const Block* orig, outputStream* st = tty) const;
void dump_pred(const Block_Array *bbs, Block* orig, outputStream* st = tty) const; void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const;
void dump_head( const Block_Array *bbs, outputStream* st = tty ) const; void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const;
void dump() const; void dump() const;
void dump( const Block_Array *bbs ) const; void dump(const PhaseCFG* cfg) const;
#endif #endif
}; };
@ -349,6 +348,12 @@ class Block : public CFGElement {
class PhaseCFG : public Phase { class PhaseCFG : public Phase {
friend class VMStructs; friend class VMStructs;
private: private:
// Arena for the blocks to be stored in
Arena* _block_arena;
// Map nodes to owning basic block
Block_Array _node_to_block_mapping;
// Build a proper looking cfg. Return count of basic blocks // Build a proper looking cfg. Return count of basic blocks
uint build_cfg(); uint build_cfg();
@ -371,22 +376,42 @@ class PhaseCFG : public Phase {
Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false); Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
void verify_anti_dependences(Block* LCA, Node* load) { void verify_anti_dependences(Block* LCA, Node* load) {
assert(LCA == _bbs[load->_idx], "should already be scheduled"); assert(LCA == get_block_for_node(load), "should already be scheduled");
insert_anti_dependences(LCA, load, true); insert_anti_dependences(LCA, load, true);
} }
public: public:
PhaseCFG( Arena *a, RootNode *r, Matcher &m ); PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher);
uint _num_blocks; // Count of basic blocks uint _num_blocks; // Count of basic blocks
Block_List _blocks; // List of basic blocks Block_List _blocks; // List of basic blocks
RootNode *_root; // Root of whole program RootNode *_root; // Root of whole program
Block_Array _bbs; // Map Nodes to owning Basic Block
Block *_broot; // Basic block of root Block *_broot; // Basic block of root
uint _rpo_ctr; uint _rpo_ctr;
CFGLoop* _root_loop; CFGLoop* _root_loop;
float _outer_loop_freq; // Outmost loop frequency float _outer_loop_freq; // Outmost loop frequency
// set which block this node should reside in
void map_node_to_block(const Node* node, Block* block) {
_node_to_block_mapping.map(node->_idx, block);
}
// removes the mapping from a node to a block
void unmap_node_from_block(const Node* node) {
_node_to_block_mapping.map(node->_idx, NULL);
}
// get the block in which this node resides
Block* get_block_for_node(const Node* node) const {
return _node_to_block_mapping[node->_idx];
}
// does this node reside in a block; return true
bool has_block(const Node* node) const {
return (_node_to_block_mapping.lookup(node->_idx) != NULL);
}
// Per node latency estimation, valid only during GCM // Per node latency estimation, valid only during GCM
GrowableArray<uint> *_node_latency; GrowableArray<uint> *_node_latency;
@ -405,7 +430,7 @@ class PhaseCFG : public Phase {
void Estimate_Block_Frequency(); void Estimate_Block_Frequency();
// Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific
// basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block. // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list ); void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
// Compute the (backwards) latency of a node from the uses // Compute the (backwards) latency of a node from the uses
@ -454,7 +479,7 @@ class PhaseCFG : public Phase {
// Insert a node into a block, and update the _bbs // Insert a node into a block, and update the _bbs
void insert( Block *b, uint idx, Node *n ) { void insert( Block *b, uint idx, Node *n ) {
b->_nodes.insert( idx, n ); b->_nodes.insert( idx, n );
_bbs.map( n->_idx, b ); map_node_to_block(n, b);
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -543,7 +568,7 @@ class CFGLoop : public CFGElement {
_child(NULL), _child(NULL),
_exit_prob(1.0f) {} _exit_prob(1.0f) {}
CFGLoop* parent() { return _parent; } CFGLoop* parent() { return _parent; }
void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk); void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg);
void add_member(CFGElement *s) { _members.push(s); } void add_member(CFGElement *s) { _members.push(s); }
void add_nested_loop(CFGLoop* cl); void add_nested_loop(CFGLoop* cl);
Block* head() { Block* head() {

View File

@ -426,14 +426,16 @@ static void do_liveness( PhaseRegAlloc *regalloc, PhaseCFG *cfg, Block_List *wor
} }
memset( live, 0, cfg->_num_blocks * (max_reg_ints<<LogBytesPerInt) ); memset( live, 0, cfg->_num_blocks * (max_reg_ints<<LogBytesPerInt) );
// Push preds onto worklist // Push preds onto worklist
for( uint i=1; i<root->req(); i++ ) for (uint i = 1; i < root->req(); i++) {
worklist->push(cfg->_bbs[root->in(i)->_idx]); Block* block = cfg->get_block_for_node(root->in(i));
worklist->push(block);
}
// ZKM.jar includes tiny infinite loops which are unreached from below. // ZKM.jar includes tiny infinite loops which are unreached from below.
// If we missed any blocks, we'll retry here after pushing all missed // If we missed any blocks, we'll retry here after pushing all missed
// blocks on the worklist. Normally this outer loop never trips more // blocks on the worklist. Normally this outer loop never trips more
// than once. // than once.
while( 1 ) { while (1) {
while( worklist->size() ) { // Standard worklist algorithm while( worklist->size() ) { // Standard worklist algorithm
Block *b = worklist->rpop(); Block *b = worklist->rpop();
@ -537,8 +539,10 @@ static void do_liveness( PhaseRegAlloc *regalloc, PhaseCFG *cfg, Block_List *wor
for( l=0; l<max_reg_ints; l++ ) for( l=0; l<max_reg_ints; l++ )
old_live[l] = tmp_live[l]; old_live[l] = tmp_live[l];
// Push preds onto worklist // Push preds onto worklist
for( l=1; l<(int)b->num_preds(); l++ ) for (l = 1; l < (int)b->num_preds(); l++) {
worklist->push(cfg->_bbs[b->pred(l)->_idx]); Block* block = cfg->get_block_for_node(b->pred(l));
worklist->push(block);
}
} }
} }
@ -629,10 +633,9 @@ void Compile::BuildOopMaps() {
// pred to this block. Otherwise we have to grab a new OopFlow. // pred to this block. Otherwise we have to grab a new OopFlow.
OopFlow *flow = NULL; // Flag for finding optimized flow OopFlow *flow = NULL; // Flag for finding optimized flow
Block *pred = (Block*)0xdeadbeef; Block *pred = (Block*)0xdeadbeef;
uint j;
// Scan this block's preds to find a done predecessor // Scan this block's preds to find a done predecessor
for( j=1; j<b->num_preds(); j++ ) { for (uint j = 1; j < b->num_preds(); j++) {
Block *p = _cfg->_bbs[b->pred(j)->_idx]; Block* p = _cfg->get_block_for_node(b->pred(j));
OopFlow *p_flow = flows[p->_pre_order]; OopFlow *p_flow = flows[p->_pre_order];
if( p_flow ) { // Predecessor is done if( p_flow ) { // Predecessor is done
assert( p_flow->_b == p, "cross check" ); assert( p_flow->_b == p, "cross check" );

View File

@ -179,6 +179,9 @@
product_pd(intx, LoopUnrollLimit, \ product_pd(intx, LoopUnrollLimit, \
"Unroll loop bodies with node count less than this") \ "Unroll loop bodies with node count less than this") \
\ \
product(intx, LoopMaxUnroll, 16, \
"Maximum number of unrolls for main loop") \
\
product(intx, LoopUnrollMin, 4, \ product(intx, LoopUnrollMin, 4, \
"Minimum number of unroll loop bodies before checking progress" \ "Minimum number of unroll loop bodies before checking progress" \
"of rounds of unroll,optimize,..") \ "of rounds of unroll,optimize,..") \

View File

@ -295,7 +295,7 @@ void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) { bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
Block *bcon = _cfg._bbs[con->_idx]; Block* bcon = _cfg.get_block_for_node(con);
uint cindex = bcon->find_node(con); uint cindex = bcon->find_node(con);
Node *con_next = bcon->_nodes[cindex+1]; Node *con_next = bcon->_nodes[cindex+1];
if (con_next->in(0) != con || !con_next->is_MachProj()) { if (con_next->in(0) != con || !con_next->is_MachProj()) {
@ -306,7 +306,7 @@ bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy,
Node *kills = con_next->clone(); Node *kills = con_next->clone();
kills->set_req(0, copy); kills->set_req(0, copy);
b->_nodes.insert(idx, kills); b->_nodes.insert(idx, kills);
_cfg._bbs.map(kills->_idx, b); _cfg.map_node_to_block(kills, b);
new_lrg(kills, max_lrg_id); new_lrg(kills, max_lrg_id);
return true; return true;
} }
@ -962,8 +962,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// AggressiveCoalesce. This effectively pre-virtual-splits // AggressiveCoalesce. This effectively pre-virtual-splits
// around uncommon uses of common defs. // around uncommon uses of common defs.
const RegMask &rm = n->in_RegMask(k); const RegMask &rm = n->in_RegMask(k);
if( !after_aggressive && if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * b->_freq) {
_cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) {
// Since we are BEFORE aggressive coalesce, leave the register // Since we are BEFORE aggressive coalesce, leave the register
// mask untrimmed by the call. This encourages more coalescing. // mask untrimmed by the call. This encourages more coalescing.
// Later, AFTER aggressive, this live range will have to spill // Later, AFTER aggressive, this live range will have to spill
@ -1709,16 +1708,15 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
// set control to _root and place it into Start block // set control to _root and place it into Start block
// (where top() node is placed). // (where top() node is placed).
base->init_req(0, _cfg._root); base->init_req(0, _cfg._root);
Block *startb = _cfg._bbs[C->top()->_idx]; Block *startb = _cfg.get_block_for_node(C->top());
startb->_nodes.insert(startb->find_node(C->top()), base ); startb->_nodes.insert(startb->find_node(C->top()), base );
_cfg._bbs.map( base->_idx, startb ); _cfg.map_node_to_block(base, startb);
assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
} }
if (_lrg_map.live_range_id(base) == 0) { if (_lrg_map.live_range_id(base) == 0) {
new_lrg(base, maxlrg++); new_lrg(base, maxlrg++);
} }
assert(base->in(0) == _cfg._root && assert(base->in(0) == _cfg._root && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
_cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared");
derived_base_map[derived->_idx] = base; derived_base_map[derived->_idx] = base;
return base; return base;
} }
@ -1754,12 +1752,12 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive
base->as_Phi()->set_type(t); base->as_Phi()->set_type(t);
// Search the current block for an existing base-Phi // Search the current block for an existing base-Phi
Block *b = _cfg._bbs[derived->_idx]; Block *b = _cfg.get_block_for_node(derived);
for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
Node *phi = b->_nodes[i]; Node *phi = b->_nodes[i];
if( !phi->is_Phi() ) { // Found end of Phis with no match? if( !phi->is_Phi() ) { // Found end of Phis with no match?
b->_nodes.insert( i, base ); // Must insert created Phi here as base b->_nodes.insert( i, base ); // Must insert created Phi here as base
_cfg._bbs.map( base->_idx, b ); _cfg.map_node_to_block(base, b);
new_lrg(base,maxlrg++); new_lrg(base,maxlrg++);
break; break;
} }
@ -1815,8 +1813,8 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) { if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
Node *phi = n->in(1); Node *phi = n->in(1);
if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) { if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
Block *phi_block = _cfg._bbs[phi->_idx]; Block *phi_block = _cfg.get_block_for_node(phi);
if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) { if (_cfg.get_block_for_node(phi_block->pred(2)) == b) {
const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI]; const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask ); Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
insert_proj( phi_block, 1, spill, maxlrg++ ); insert_proj( phi_block, 1, spill, maxlrg++ );
@ -1870,7 +1868,7 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
!liveout.member(_lrg_map.live_range_id(base))) && // not live) AND !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
(_lrg_map.live_range_id(base) > 0) && // not a constant (_lrg_map.live_range_id(base) > 0) && // not a constant
_cfg._bbs[base->_idx] != b) { // base not def'd in blk) _cfg.get_block_for_node(base) != b) { // base not def'd in blk)
// Base pointer is not currently live. Since I stretched // Base pointer is not currently live. Since I stretched
// the base pointer to here and it crosses basic-block // the base pointer to here and it crosses basic-block
// boundaries, the global live info is now incorrect. // boundaries, the global live info is now incorrect.
@ -1993,8 +1991,8 @@ void PhaseChaitin::dump(const Node *n) const {
tty->print("\n"); tty->print("\n");
} }
void PhaseChaitin::dump( const Block * b ) const { void PhaseChaitin::dump(const Block *b) const {
b->dump_head( &_cfg._bbs ); b->dump_head(&_cfg);
// For all instructions // For all instructions
for( uint j = 0; j < b->_nodes.size(); j++ ) for( uint j = 0; j < b->_nodes.size(); j++ )
@ -2299,7 +2297,7 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
if (_lrg_map.find_const(n) == lidx) { if (_lrg_map.find_const(n) == lidx) {
if (!dump_once++) { if (!dump_once++) {
tty->cr(); tty->cr();
b->dump_head( &_cfg._bbs ); b->dump_head(&_cfg);
} }
dump(n); dump(n);
continue; continue;
@ -2314,7 +2312,7 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
if (_lrg_map.find_const(m) == lidx) { if (_lrg_map.find_const(m) == lidx) {
if (!dump_once++) { if (!dump_once++) {
tty->cr(); tty->cr();
b->dump_head(&_cfg._bbs); b->dump_head(&_cfg);
} }
dump(n); dump(n);
} }

View File

@ -52,7 +52,7 @@ void PhaseCoalesce::dump() const {
// Print a nice block header // Print a nice block header
tty->print("B%d: ",b->_pre_order); tty->print("B%d: ",b->_pre_order);
for( j=1; j<b->num_preds(); j++ ) for( j=1; j<b->num_preds(); j++ )
tty->print("B%d ", _phc._cfg._bbs[b->pred(j)->_idx]->_pre_order); tty->print("B%d ", _phc._cfg.get_block_for_node(b->pred(j))->_pre_order);
tty->print("-> "); tty->print("-> ");
for( j=0; j<b->_num_succs; j++ ) for( j=0; j<b->_num_succs; j++ )
tty->print("B%d ",b->_succs[j]->_pre_order); tty->print("B%d ",b->_succs[j]->_pre_order);
@ -208,7 +208,7 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui
copy->set_req(idx,tmp); copy->set_req(idx,tmp);
// Save source in temp early, before source is killed // Save source in temp early, before source is killed
b->_nodes.insert(kill_src_idx,tmp); b->_nodes.insert(kill_src_idx,tmp);
_phc._cfg._bbs.map( tmp->_idx, b ); _phc._cfg.map_node_to_block(tmp, b);
last_use_idx++; last_use_idx++;
} }
@ -286,7 +286,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
Node *m = n->in(j); Node *m = n->in(j);
uint src_name = _phc._lrg_map.find(m); uint src_name = _phc._lrg_map.find(m);
if (src_name != phi_name) { if (src_name != phi_name) {
Block *pred = _phc._cfg._bbs[b->pred(j)->_idx]; Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
Node *copy; Node *copy;
assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
// Rematerialize constants instead of copying them // Rematerialize constants instead of copying them
@ -305,7 +305,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
} }
// Insert the copy in the use-def chain // Insert the copy in the use-def chain
n->set_req(j, copy); n->set_req(j, copy);
_phc._cfg._bbs.map( copy->_idx, pred ); _phc._cfg.map_node_to_block(copy, pred);
// Extend ("register allocate") the names array for the copy. // Extend ("register allocate") the names array for the copy.
_phc._lrg_map.extend(copy->_idx, phi_name); _phc._lrg_map.extend(copy->_idx, phi_name);
} // End of if Phi names do not match } // End of if Phi names do not match
@ -343,13 +343,13 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
n->set_req(idx, copy); n->set_req(idx, copy);
// Extend ("register allocate") the names array for the copy. // Extend ("register allocate") the names array for the copy.
_phc._lrg_map.extend(copy->_idx, name); _phc._lrg_map.extend(copy->_idx, name);
_phc._cfg._bbs.map( copy->_idx, b ); _phc._cfg.map_node_to_block(copy, b);
} }
} // End of is two-adr } // End of is two-adr
// Insert a copy at a debug use for a lrg which has high frequency // Insert a copy at a debug use for a lrg which has high frequency
if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) { if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) {
// Walk the debug inputs to the node and check for lrg freq // Walk the debug inputs to the node and check for lrg freq
JVMState* jvms = n->jvms(); JVMState* jvms = n->jvms();
uint debug_start = jvms ? jvms->debug_start() : 999999; uint debug_start = jvms ? jvms->debug_start() : 999999;
@ -391,7 +391,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
uint max_lrg_id = _phc._lrg_map.max_lrg_id(); uint max_lrg_id = _phc._lrg_map.max_lrg_id();
_phc.new_lrg(copy, max_lrg_id); _phc.new_lrg(copy, max_lrg_id);
_phc._lrg_map.set_max_lrg_id(max_lrg_id + 1); _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
_phc._cfg._bbs.map(copy->_idx, b); _phc._cfg.map_node_to_block(copy, b);
//tty->print_cr("Split a debug use in Aggressive Coalesce"); //tty->print_cr("Split a debug use in Aggressive Coalesce");
} // End of if high frequency use/def } // End of if high frequency use/def
} // End of for all debug inputs } // End of for all debug inputs
@ -437,7 +437,10 @@ void PhaseAggressiveCoalesce::coalesce( Block *b ) {
Block *bs = b->_succs[i]; Block *bs = b->_succs[i];
// Find index of 'b' in 'bs' predecessors // Find index of 'b' in 'bs' predecessors
uint j=1; uint j=1;
while( _phc._cfg._bbs[bs->pred(j)->_idx] != b ) j++; while (_phc._cfg.get_block_for_node(bs->pred(j)) != b) {
j++;
}
// Visit all the Phis in successor block // Visit all the Phis in successor block
for( uint k = 1; k<bs->_nodes.size(); k++ ) { for( uint k = 1; k<bs->_nodes.size(); k++ ) {
Node *n = bs->_nodes[k]; Node *n = bs->_nodes[k];
@ -510,9 +513,9 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui
if( bindex < b->_fhrp_index ) b->_fhrp_index--; if( bindex < b->_fhrp_index ) b->_fhrp_index--;
// Stretched lr1; add it to liveness of intermediate blocks // Stretched lr1; add it to liveness of intermediate blocks
Block *b2 = _phc._cfg._bbs[src_copy->_idx]; Block *b2 = _phc._cfg.get_block_for_node(src_copy);
while( b != b2 ) { while( b != b2 ) {
b = _phc._cfg._bbs[b->pred(1)->_idx]; b = _phc._cfg.get_block_for_node(b->pred(1));
_phc._live->live(b)->insert(lr1); _phc._live->live(b)->insert(lr1);
} }
} }
@ -532,7 +535,7 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy,
bindex2--; // Chain backwards 1 instruction bindex2--; // Chain backwards 1 instruction
while( bindex2 == 0 ) { // At block start, find prior block while( bindex2 == 0 ) { // At block start, find prior block
assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" ); assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" );
b2 = _phc._cfg._bbs[b2->pred(1)->_idx]; b2 = _phc._cfg.get_block_for_node(b2->pred(1));
bindex2 = b2->end_idx()-1; bindex2 = b2->end_idx()-1;
} }
// Get prior instruction // Get prior instruction
@ -676,8 +679,8 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
if (UseFPUForSpilling && rm.is_AllStack() ) { if (UseFPUForSpilling && rm.is_AllStack() ) {
// Don't coalesce when frequency difference is large // Don't coalesce when frequency difference is large
Block *dst_b = _phc._cfg._bbs[dst_copy->_idx]; Block *dst_b = _phc._cfg.get_block_for_node(dst_copy);
Block *src_def_b = _phc._cfg._bbs[src_def->_idx]; Block *src_def_b = _phc._cfg.get_block_for_node(src_def);
if (src_def_b->_freq > 10*dst_b->_freq ) if (src_def_b->_freq > 10*dst_b->_freq )
return false; return false;
} }
@ -690,7 +693,7 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
// Another early bail-out test is when we are double-coalescing and the // Another early bail-out test is when we are double-coalescing and the
// 2 copies are separated by some control flow. // 2 copies are separated by some control flow.
if( dst_copy != src_copy ) { if( dst_copy != src_copy ) {
Block *src_b = _phc._cfg._bbs[src_copy->_idx]; Block *src_b = _phc._cfg.get_block_for_node(src_copy);
Block *b2 = b; Block *b2 = b;
while( b2 != src_b ) { while( b2 != src_b ) {
if( b2->num_preds() > 2 ){// Found merge-point if( b2->num_preds() > 2 ){// Found merge-point
@ -701,7 +704,7 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
//record_bias( _phc._lrgs, lr1, lr2 ); //record_bias( _phc._lrgs, lr1, lr2 );
return false; // To hard to find all interferences return false; // To hard to find all interferences
} }
b2 = _phc._cfg._bbs[b2->pred(1)->_idx]; b2 = _phc._cfg.get_block_for_node(b2->pred(1));
} }
} }
@ -786,8 +789,9 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
// Conservative (but pessimistic) copy coalescing of a single block // Conservative (but pessimistic) copy coalescing of a single block
void PhaseConservativeCoalesce::coalesce( Block *b ) { void PhaseConservativeCoalesce::coalesce( Block *b ) {
// Bail out on infrequent blocks // Bail out on infrequent blocks
if( b->is_uncommon(_phc._cfg._bbs) ) if (b->is_uncommon(&_phc._cfg)) {
return; return;
}
// Check this block for copies. // Check this block for copies.
for( uint i = 1; i<b->end_idx(); i++ ) { for( uint i = 1; i<b->end_idx(); i++ ) {
// Check for actual copies on inputs. Coalesce a copy into its // Check for actual copies on inputs. Coalesce a copy into its

View File

@ -2262,7 +2262,7 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
tty->print("%3.3x ", pcs[n->_idx]); tty->print("%3.3x ", pcs[n->_idx]);
else else
tty->print(" "); tty->print(" ");
b->dump_head( &_cfg->_bbs ); b->dump_head(_cfg);
if (b->is_connector()) { if (b->is_connector()) {
tty->print_cr(" # Empty connector block"); tty->print_cr(" # Empty connector block");
} else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) { } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
@ -3525,7 +3525,7 @@ void Compile::ConstantTable::add(Constant& con) {
} }
Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) { Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) {
Block* b = Compile::current()->cfg()->_bbs[n->_idx]; Block* b = Compile::current()->cfg()->get_block_for_node(n);
Constant con(type, value, b->_freq); Constant con(type, value, b->_freq);
add(con); add(con);
return con; return con;

View File

@ -105,8 +105,8 @@ void PhaseCFG::Dominators( ) {
// Step 2: // Step 2:
Node *whead = w->_block->head(); Node *whead = w->_block->head();
for( uint j=1; j < whead->req(); j++ ) { for (uint j = 1; j < whead->req(); j++) {
Block *b = _bbs[whead->in(j)->_idx]; Block* b = get_block_for_node(whead->in(j));
Tarjan *vx = &tarjan[b->_pre_order]; Tarjan *vx = &tarjan[b->_pre_order];
Tarjan *u = vx->EVAL(); Tarjan *u = vx->EVAL();
if( u->_semi < w->_semi ) if( u->_semi < w->_semi )

View File

@ -66,7 +66,7 @@
// are in b also. // are in b also.
void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
// Set basic block of n, Add n to b, // Set basic block of n, Add n to b,
_bbs.map(n->_idx, b); map_node_to_block(n, b);
b->add_inst(n); b->add_inst(n);
// After Matching, nearly any old Node may have projections trailing it. // After Matching, nearly any old Node may have projections trailing it.
@ -75,11 +75,12 @@ void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* use = n->fast_out(i); Node* use = n->fast_out(i);
if (use->is_Proj()) { if (use->is_Proj()) {
Block* buse = _bbs[use->_idx]; Block* buse = get_block_for_node(use);
if (buse != b) { // In wrong block? if (buse != b) { // In wrong block?
if (buse != NULL) if (buse != NULL) {
buse->find_remove(use); // Remove from wrong block buse->find_remove(use); // Remove from wrong block
_bbs.map(use->_idx, b); // Re-insert in this block }
map_node_to_block(use, b);
b->add_inst(use); b->add_inst(use);
} }
} }
@ -97,7 +98,7 @@ void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
if (p != NULL && p != n) { // Control from a block projection? if (p != NULL && p != n) { // Control from a block projection?
assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here"); assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
// Find trailing Region // Find trailing Region
Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block Block *pb = get_block_for_node(in0); // Block-projection already has basic block
uint j = 0; uint j = 0;
if (pb->_num_succs != 1) { // More then 1 successor? if (pb->_num_succs != 1) { // More then 1 successor?
// Search for successor // Search for successor
@ -127,14 +128,15 @@ void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) {
while ( spstack.is_nonempty() ) { while ( spstack.is_nonempty() ) {
Node *n = spstack.pop(); Node *n = spstack.pop();
if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
if( n->pinned() && !_bbs.lookup(n->_idx) ) { // Pinned? Nail it down! if( n->pinned() && !has_block(n)) { // Pinned? Nail it down!
assert( n->in(0), "pinned Node must have Control" ); assert( n->in(0), "pinned Node must have Control" );
// Before setting block replace block_proj control edge // Before setting block replace block_proj control edge
replace_block_proj_ctrl(n); replace_block_proj_ctrl(n);
Node *input = n->in(0); Node *input = n->in(0);
while( !input->is_block_start() ) while (!input->is_block_start()) {
input = input->in(0); input = input->in(0);
Block *b = _bbs[input->_idx]; // Basic block of controlling input }
Block *b = get_block_for_node(input); // Basic block of controlling input
schedule_node_into_block(n, b); schedule_node_into_block(n, b);
} }
for( int i = n->req() - 1; i >= 0; --i ) { // For all inputs for( int i = n->req() - 1; i >= 0; --i ) { // For all inputs
@ -149,7 +151,7 @@ void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) {
// Assert that new input b2 is dominated by all previous inputs. // Assert that new input b2 is dominated by all previous inputs.
// Check this by by seeing that it is dominated by b1, the deepest // Check this by by seeing that it is dominated by b1, the deepest
// input observed until b2. // input observed until b2.
static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) { static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
if (b1 == NULL) return; if (b1 == NULL) return;
assert(b1->_dom_depth < b2->_dom_depth, "sanity"); assert(b1->_dom_depth < b2->_dom_depth, "sanity");
Block* tmp = b2; Block* tmp = b2;
@ -162,7 +164,7 @@ static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
for (uint j=0; j<n->len(); j++) { // For all inputs for (uint j=0; j<n->len(); j++) { // For all inputs
Node* inn = n->in(j); // Get input Node* inn = n->in(j); // Get input
if (inn == NULL) continue; // Ignore NULL, missing inputs if (inn == NULL) continue; // Ignore NULL, missing inputs
Block* inb = bbs[inn->_idx]; Block* inb = cfg->get_block_for_node(inn);
tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
inn->dump(); inn->dump();
@ -174,20 +176,20 @@ static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
} }
#endif #endif
static Block* find_deepest_input(Node* n, Block_Array &bbs) { static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
// Find the last input dominated by all other inputs. // Find the last input dominated by all other inputs.
Block* deepb = NULL; // Deepest block so far Block* deepb = NULL; // Deepest block so far
int deepb_dom_depth = 0; int deepb_dom_depth = 0;
for (uint k = 0; k < n->len(); k++) { // For all inputs for (uint k = 0; k < n->len(); k++) { // For all inputs
Node* inn = n->in(k); // Get input Node* inn = n->in(k); // Get input
if (inn == NULL) continue; // Ignore NULL, missing inputs if (inn == NULL) continue; // Ignore NULL, missing inputs
Block* inb = bbs[inn->_idx]; Block* inb = cfg->get_block_for_node(inn);
assert(inb != NULL, "must already have scheduled this input"); assert(inb != NULL, "must already have scheduled this input");
if (deepb_dom_depth < (int) inb->_dom_depth) { if (deepb_dom_depth < (int) inb->_dom_depth) {
// The new inb must be dominated by the previous deepb. // The new inb must be dominated by the previous deepb.
// The various inputs must be linearly ordered in the dom // The various inputs must be linearly ordered in the dom
// tree, or else there will not be a unique deepest block. // tree, or else there will not be a unique deepest block.
DEBUG_ONLY(assert_dom(deepb, inb, n, bbs)); DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
deepb = inb; // Save deepest block deepb = inb; // Save deepest block
deepb_dom_depth = deepb->_dom_depth; deepb_dom_depth = deepb->_dom_depth;
} }
@ -243,7 +245,7 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
++i; ++i;
if (in == NULL) continue; // Ignore NULL, missing inputs if (in == NULL) continue; // Ignore NULL, missing inputs
int is_visited = visited.test_set(in->_idx); int is_visited = visited.test_set(in->_idx);
if (!_bbs.lookup(in->_idx)) { // Missing block selection? if (!has_block(in)) { // Missing block selection?
if (is_visited) { if (is_visited) {
// assert( !visited.test(in->_idx), "did not schedule early" ); // assert( !visited.test(in->_idx), "did not schedule early" );
return false; return false;
@ -265,9 +267,9 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
// any projections which depend on them. // any projections which depend on them.
if (!n->pinned()) { if (!n->pinned()) {
// Set earliest legal block. // Set earliest legal block.
_bbs.map(n->_idx, find_deepest_input(n, _bbs)); map_node_to_block(n, find_deepest_input(n, this));
} else { } else {
assert(_bbs[n->_idx] == _bbs[n->in(0)->_idx], "Pinned Node should be at the same block as its control edge"); assert(get_block_for_node(n) == get_block_for_node(n->in(0)), "Pinned Node should be at the same block as its control edge");
} }
if (nstack.is_empty()) { if (nstack.is_empty()) {
@ -313,8 +315,8 @@ Block* Block::dom_lca(Block* LCA) {
// The definition must dominate the use, so move the LCA upward in the // The definition must dominate the use, so move the LCA upward in the
// dominator tree to dominate the use. If the use is a phi, adjust // dominator tree to dominate the use. If the use is a phi, adjust
// the LCA only with the phi input paths which actually use this def. // the LCA only with the phi input paths which actually use this def.
static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) { static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
Block* buse = bbs[use->_idx]; Block* buse = cfg->get_block_for_node(use);
if (buse == NULL) return LCA; // Unused killing Projs have no use block if (buse == NULL) return LCA; // Unused killing Projs have no use block
if (!use->is_Phi()) return buse->dom_lca(LCA); if (!use->is_Phi()) return buse->dom_lca(LCA);
uint pmax = use->req(); // Number of Phi inputs uint pmax = use->req(); // Number of Phi inputs
@ -329,7 +331,7 @@ static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array
// more than once. // more than once.
for (uint j=1; j<pmax; j++) { // For all inputs for (uint j=1; j<pmax; j++) { // For all inputs
if (use->in(j) == def) { // Found matching input? if (use->in(j) == def) { // Found matching input?
Block* pred = bbs[buse->pred(j)->_idx]; Block* pred = cfg->get_block_for_node(buse->pred(j));
LCA = pred->dom_lca(LCA); LCA = pred->dom_lca(LCA);
} }
} }
@ -342,8 +344,7 @@ static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array
// which are marked with the given index. Return the LCA (in the dom tree) // which are marked with the given index. Return the LCA (in the dom tree)
// of all marked blocks. If there are none marked, return the original // of all marked blocks. If there are none marked, return the original
// LCA. // LCA.
static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
Block* early, Block_Array &bbs) {
Block_List worklist; Block_List worklist;
worklist.push(LCA); worklist.push(LCA);
while (worklist.size() > 0) { while (worklist.size() > 0) {
@ -366,7 +367,7 @@ static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
} else { } else {
// Keep searching through this block's predecessors. // Keep searching through this block's predecessors.
for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
Block* mid_parent = bbs[ mid->pred(j)->_idx ]; Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
worklist.push(mid_parent); worklist.push(mid_parent);
} }
} }
@ -384,7 +385,7 @@ static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
// be earlier (at a shallower dom_depth) than the true schedule_early // be earlier (at a shallower dom_depth) than the true schedule_early
// point of the node. We compute this earlier block as a more permissive // point of the node. We compute this earlier block as a more permissive
// site for anti-dependency insertion, but only if subsume_loads is enabled. // site for anti-dependency insertion, but only if subsume_loads is enabled.
static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) { static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
Node* base; Node* base;
Node* index; Node* index;
Node* store = load->in(MemNode::Memory); Node* store = load->in(MemNode::Memory);
@ -412,12 +413,12 @@ static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
Block* deepb = NULL; // Deepest block so far Block* deepb = NULL; // Deepest block so far
int deepb_dom_depth = 0; int deepb_dom_depth = 0;
for (int i = 0; i < mem_inputs_length; i++) { for (int i = 0; i < mem_inputs_length; i++) {
Block* inb = bbs[mem_inputs[i]->_idx]; Block* inb = cfg->get_block_for_node(mem_inputs[i]);
if (deepb_dom_depth < (int) inb->_dom_depth) { if (deepb_dom_depth < (int) inb->_dom_depth) {
// The new inb must be dominated by the previous deepb. // The new inb must be dominated by the previous deepb.
// The various inputs must be linearly ordered in the dom // The various inputs must be linearly ordered in the dom
// tree, or else there will not be a unique deepest block. // tree, or else there will not be a unique deepest block.
DEBUG_ONLY(assert_dom(deepb, inb, load, bbs)); DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
deepb = inb; // Save deepest block deepb = inb; // Save deepest block
deepb_dom_depth = deepb->_dom_depth; deepb_dom_depth = deepb->_dom_depth;
} }
@ -488,14 +489,14 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
// and other inputs are first available. (Computed by schedule_early.) // and other inputs are first available. (Computed by schedule_early.)
// For normal loads, 'early' is the shallowest place (dom graph wise) // For normal loads, 'early' is the shallowest place (dom graph wise)
// to look for anti-deps between this load and any store. // to look for anti-deps between this load and any store.
Block* early = _bbs[load_index]; Block* early = get_block_for_node(load);
// If we are subsuming loads, compute an "early" block that only considers // If we are subsuming loads, compute an "early" block that only considers
// memory or address inputs. This block may be different than the // memory or address inputs. This block may be different than the
// schedule_early block in that it could be at an even shallower depth in the // schedule_early block in that it could be at an even shallower depth in the
// dominator tree, and allow for a broader discovery of anti-dependences. // dominator tree, and allow for a broader discovery of anti-dependences.
if (C->subsume_loads()) { if (C->subsume_loads()) {
early = memory_early_block(load, early, _bbs); early = memory_early_block(load, early, this);
} }
ResourceArea *area = Thread::current()->resource_area(); ResourceArea *area = Thread::current()->resource_area();
@ -619,7 +620,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
// or else observe that 'store' is all the way up in the // or else observe that 'store' is all the way up in the
// earliest legal block for 'load'. In the latter case, // earliest legal block for 'load'. In the latter case,
// immediately insert an anti-dependence edge. // immediately insert an anti-dependence edge.
Block* store_block = _bbs[store->_idx]; Block* store_block = get_block_for_node(store);
assert(store_block != NULL, "unused killing projections skipped above"); assert(store_block != NULL, "unused killing projections skipped above");
if (store->is_Phi()) { if (store->is_Phi()) {
@ -637,7 +638,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
if (store->in(j) == mem) { // Found matching input? if (store->in(j) == mem) { // Found matching input?
DEBUG_ONLY(found_match = true); DEBUG_ONLY(found_match = true);
Block* pred_block = _bbs[store_block->pred(j)->_idx]; Block* pred_block = get_block_for_node(store_block->pred(j));
if (pred_block != early) { if (pred_block != early) {
// If any predecessor of the Phi matches the load's "early block", // If any predecessor of the Phi matches the load's "early block",
// we do not need a precedence edge between the Phi and 'load' // we do not need a precedence edge between the Phi and 'load'
@ -711,7 +712,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
// preventing the load from sinking past any block containing // preventing the load from sinking past any block containing
// a store that may invalidate the memory state required by 'load'. // a store that may invalidate the memory state required by 'load'.
if (must_raise_LCA) if (must_raise_LCA)
LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs); LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
if (LCA == early) return LCA; if (LCA == early) return LCA;
// Insert anti-dependence edges from 'load' to each store // Insert anti-dependence edges from 'load' to each store
@ -720,7 +721,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
if (LCA->raise_LCA_mark() == load_index) { if (LCA->raise_LCA_mark() == load_index) {
while (non_early_stores.size() > 0) { while (non_early_stores.size() > 0) {
Node* store = non_early_stores.pop(); Node* store = non_early_stores.pop();
Block* store_block = _bbs[store->_idx]; Block* store_block = get_block_for_node(store);
if (store_block == LCA) { if (store_block == LCA) {
// add anti_dependence from store to load in its own block // add anti_dependence from store to load in its own block
assert(store != load->in(0), "dependence cycle found"); assert(store != load->in(0), "dependence cycle found");
@ -754,7 +755,7 @@ private:
public: public:
// Constructor for the iterator // Constructor for the iterator
Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs); Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
// Postincrement operator to iterate over the nodes // Postincrement operator to iterate over the nodes
Node *next(); Node *next();
@ -762,12 +763,12 @@ public:
private: private:
VectorSet &_visited; VectorSet &_visited;
Node_List &_stack; Node_List &_stack;
Block_Array &_bbs; PhaseCFG &_cfg;
}; };
// Constructor for the Node_Backward_Iterator // Constructor for the Node_Backward_Iterator
Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs ) Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
: _visited(visited), _stack(stack), _bbs(bbs) { : _visited(visited), _stack(stack), _cfg(cfg) {
// The stack should contain exactly the root // The stack should contain exactly the root
stack.clear(); stack.clear();
stack.push(root); stack.push(root);
@ -797,8 +798,8 @@ Node *Node_Backward_Iterator::next() {
_visited.set(self->_idx); _visited.set(self->_idx);
// Now schedule all uses as late as possible. // Now schedule all uses as late as possible.
uint src = self->is_Proj() ? self->in(0)->_idx : self->_idx; const Node* src = self->is_Proj() ? self->in(0) : self;
uint src_rpo = _bbs[src]->_rpo; uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
// Schedule all nodes in a post-order visit // Schedule all nodes in a post-order visit
Node *unvisited = NULL; // Unvisited anti-dependent Node, if any Node *unvisited = NULL; // Unvisited anti-dependent Node, if any
@ -814,7 +815,7 @@ Node *Node_Backward_Iterator::next() {
// do not traverse backward control edges // do not traverse backward control edges
Node *use = n->is_Proj() ? n->in(0) : n; Node *use = n->is_Proj() ? n->in(0) : n;
uint use_rpo = _bbs[use->_idx]->_rpo; uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
if ( use_rpo < src_rpo ) if ( use_rpo < src_rpo )
continue; continue;
@ -852,7 +853,7 @@ void PhaseCFG::ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack) {
tty->print("\n#---- ComputeLatenciesBackwards ----\n"); tty->print("\n#---- ComputeLatenciesBackwards ----\n");
#endif #endif
Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs); Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
Node *n; Node *n;
// Walk over all the nodes from last to first // Walk over all the nodes from last to first
@ -883,7 +884,7 @@ void PhaseCFG::partial_latency_of_defs(Node *n) {
uint nlen = n->len(); uint nlen = n->len();
uint use_latency = _node_latency->at_grow(n->_idx); uint use_latency = _node_latency->at_grow(n->_idx);
uint use_pre_order = _bbs[n->_idx]->_pre_order; uint use_pre_order = get_block_for_node(n)->_pre_order;
for ( uint j=0; j<nlen; j++ ) { for ( uint j=0; j<nlen; j++ ) {
Node *def = n->in(j); Node *def = n->in(j);
@ -903,7 +904,7 @@ void PhaseCFG::partial_latency_of_defs(Node *n) {
#endif #endif
// If the defining block is not known, assume it is ok // If the defining block is not known, assume it is ok
Block *def_block = _bbs[def->_idx]; Block *def_block = get_block_for_node(def);
uint def_pre_order = def_block ? def_block->_pre_order : 0; uint def_pre_order = def_block ? def_block->_pre_order : 0;
if ( (use_pre_order < def_pre_order) || if ( (use_pre_order < def_pre_order) ||
@ -931,10 +932,11 @@ void PhaseCFG::partial_latency_of_defs(Node *n) {
// Compute the latency of a specific use // Compute the latency of a specific use
int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
// If self-reference, return no latency // If self-reference, return no latency
if (use == n || use->is_Root()) if (use == n || use->is_Root()) {
return 0; return 0;
}
uint def_pre_order = _bbs[def->_idx]->_pre_order; uint def_pre_order = get_block_for_node(def)->_pre_order;
uint latency = 0; uint latency = 0;
// If the use is not a projection, then it is simple... // If the use is not a projection, then it is simple...
@ -946,7 +948,7 @@ int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
} }
#endif #endif
uint use_pre_order = _bbs[use->_idx]->_pre_order; uint use_pre_order = get_block_for_node(use)->_pre_order;
if (use_pre_order < def_pre_order) if (use_pre_order < def_pre_order)
return 0; return 0;
@ -1018,7 +1020,7 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx); uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
uint end_latency = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx); uint end_latency = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
bool in_latency = (target <= start_latency); bool in_latency = (target <= start_latency);
const Block* root_block = _bbs[_root->_idx]; const Block* root_block = get_block_for_node(_root);
// Turn off latency scheduling if scheduling is just plain off // Turn off latency scheduling if scheduling is just plain off
if (!C->do_scheduling()) if (!C->do_scheduling())
@ -1126,12 +1128,12 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
tty->print("\n#---- schedule_late ----\n"); tty->print("\n#---- schedule_late ----\n");
#endif #endif
Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs); Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
Node *self; Node *self;
// Walk over all the nodes from last to first // Walk over all the nodes from last to first
while (self = iter.next()) { while (self = iter.next()) {
Block* early = _bbs[self->_idx]; // Earliest legal placement Block* early = get_block_for_node(self); // Earliest legal placement
if (self->is_top()) { if (self->is_top()) {
// Top node goes in bb #2 with other constants. // Top node goes in bb #2 with other constants.
@ -1179,7 +1181,7 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
// For all uses, find LCA // For all uses, find LCA
Node* use = self->fast_out(i); Node* use = self->fast_out(i);
LCA = raise_LCA_above_use(LCA, use, self, _bbs); LCA = raise_LCA_above_use(LCA, use, self, this);
} }
} // (Hide defs of imax, i from rest of block.) } // (Hide defs of imax, i from rest of block.)
@ -1187,7 +1189,7 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
// requirement for correctness but it reduces useless // requirement for correctness but it reduces useless
// interference between temps and other nodes. // interference between temps and other nodes.
if (mach != NULL && mach->is_MachTemp()) { if (mach != NULL && mach->is_MachTemp()) {
_bbs.map(self->_idx, LCA); map_node_to_block(self, LCA);
LCA->add_inst(self); LCA->add_inst(self);
continue; continue;
} }
@ -1262,10 +1264,10 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
} }
#endif #endif
// Initialize the bbs.map for things on the proj_list // Initialize the node to block mapping for things on the proj_list
uint i; for (uint i = 0; i < proj_list.size(); i++) {
for( i=0; i < proj_list.size(); i++ ) unmap_node_from_block(proj_list[i]);
_bbs.map(proj_list[i]->_idx, NULL); }
// Set the basic block for Nodes pinned into blocks // Set the basic block for Nodes pinned into blocks
Arena *a = Thread::current()->resource_area(); Arena *a = Thread::current()->resource_area();
@ -1333,7 +1335,7 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) { for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
Node *proj = matcher._null_check_tests[i ]; Node *proj = matcher._null_check_tests[i ];
Node *val = matcher._null_check_tests[i+1]; Node *val = matcher._null_check_tests[i+1];
_bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons); get_block_for_node(proj)->implicit_null_check(this, proj, val, allowed_reasons);
// The implicit_null_check will only perform the transformation // The implicit_null_check will only perform the transformation
// if the null branch is truly uncommon, *and* it leads to an // if the null branch is truly uncommon, *and* it leads to an
// uncommon trap. Combined with the too_many_traps guards // uncommon trap. Combined with the too_many_traps guards
@ -1353,7 +1355,7 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
uint max_idx = C->unique(); uint max_idx = C->unique();
GrowableArray<int> ready_cnt(max_idx, max_idx, -1); GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
visited.Clear(); visited.Clear();
for (i = 0; i < _num_blocks; i++) { for (uint i = 0; i < _num_blocks; i++) {
if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) { if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
C->record_method_not_compilable("local schedule failed"); C->record_method_not_compilable("local schedule failed");
@ -1364,8 +1366,9 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
// If we inserted any instructions between a Call and his CatchNode, // If we inserted any instructions between a Call and his CatchNode,
// clone the instructions on all paths below the Catch. // clone the instructions on all paths below the Catch.
for( i=0; i < _num_blocks; i++ ) for (uint i = 0; i < _num_blocks; i++) {
_blocks[i]->call_catch_cleanup(_bbs, C); _blocks[i]->call_catch_cleanup(this, C);
}
#ifndef PRODUCT #ifndef PRODUCT
if (trace_opto_pipelining()) { if (trace_opto_pipelining()) {
@ -1392,7 +1395,7 @@ void PhaseCFG::Estimate_Block_Frequency() {
Block_List worklist; Block_List worklist;
Block* root_blk = _blocks[0]; Block* root_blk = _blocks[0];
for (uint i = 1; i < root_blk->num_preds(); i++) { for (uint i = 1; i < root_blk->num_preds(); i++) {
Block *pb = _bbs[root_blk->pred(i)->_idx]; Block *pb = get_block_for_node(root_blk->pred(i));
if (pb->has_uncommon_code()) { if (pb->has_uncommon_code()) {
worklist.push(pb); worklist.push(pb);
} }
@ -1401,7 +1404,7 @@ void PhaseCFG::Estimate_Block_Frequency() {
Block* uct = worklist.pop(); Block* uct = worklist.pop();
if (uct == _broot) continue; if (uct == _broot) continue;
for (uint i = 1; i < uct->num_preds(); i++) { for (uint i = 1; i < uct->num_preds(); i++) {
Block *pb = _bbs[uct->pred(i)->_idx]; Block *pb = get_block_for_node(uct->pred(i));
if (pb->_num_succs == 1) { if (pb->_num_succs == 1) {
worklist.push(pb); worklist.push(pb);
} else if (pb->num_fall_throughs() == 2) { } else if (pb->num_fall_throughs() == 2) {
@ -1430,7 +1433,7 @@ void PhaseCFG::Estimate_Block_Frequency() {
Block_List worklist; Block_List worklist;
Block* root_blk = _blocks[0]; Block* root_blk = _blocks[0];
for (uint i = 1; i < root_blk->num_preds(); i++) { for (uint i = 1; i < root_blk->num_preds(); i++) {
Block *pb = _bbs[root_blk->pred(i)->_idx]; Block *pb = get_block_for_node(root_blk->pred(i));
if (pb->has_uncommon_code()) { if (pb->has_uncommon_code()) {
worklist.push(pb); worklist.push(pb);
} }
@ -1439,7 +1442,7 @@ void PhaseCFG::Estimate_Block_Frequency() {
Block* uct = worklist.pop(); Block* uct = worklist.pop();
uct->_freq = PROB_MIN; uct->_freq = PROB_MIN;
for (uint i = 1; i < uct->num_preds(); i++) { for (uint i = 1; i < uct->num_preds(); i++) {
Block *pb = _bbs[uct->pred(i)->_idx]; Block *pb = get_block_for_node(uct->pred(i));
if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
worklist.push(pb); worklist.push(pb);
} }
@ -1499,7 +1502,7 @@ CFGLoop* PhaseCFG::create_loop_tree() {
Block* loop_head = b; Block* loop_head = b;
assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
Block* tail = _bbs[tail_n->_idx]; Block* tail = get_block_for_node(tail_n);
// Defensively filter out Loop nodes for non-single-entry loops. // Defensively filter out Loop nodes for non-single-entry loops.
// For all reasonable loops, the head occurs before the tail in RPO. // For all reasonable loops, the head occurs before the tail in RPO.
@ -1514,13 +1517,13 @@ CFGLoop* PhaseCFG::create_loop_tree() {
loop_head->_loop = nloop; loop_head->_loop = nloop;
// Add to nloop so push_pred() will skip over inner loops // Add to nloop so push_pred() will skip over inner loops
nloop->add_member(loop_head); nloop->add_member(loop_head);
nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs); nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
while (worklist.size() > 0) { while (worklist.size() > 0) {
Block* member = worklist.pop(); Block* member = worklist.pop();
if (member != loop_head) { if (member != loop_head) {
for (uint j = 1; j < member->num_preds(); j++) { for (uint j = 1; j < member->num_preds(); j++) {
nloop->push_pred(member, j, worklist, _bbs); nloop->push_pred(member, j, worklist, this);
} }
} }
} }
@ -1557,9 +1560,9 @@ CFGLoop* PhaseCFG::create_loop_tree() {
} }
//------------------------------push_pred-------------------------------------- //------------------------------push_pred--------------------------------------
void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) { void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
Node* pred_n = blk->pred(i); Node* pred_n = blk->pred(i);
Block* pred = node_to_blk[pred_n->_idx]; Block* pred = cfg->get_block_for_node(pred_n);
CFGLoop *pred_loop = pred->_loop; CFGLoop *pred_loop = pred->_loop;
if (pred_loop == NULL) { if (pred_loop == NULL) {
// Filter out blocks for non-single-entry loops. // Filter out blocks for non-single-entry loops.
@ -1580,7 +1583,7 @@ void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& no
Block* pred_head = pred_loop->head(); Block* pred_head = pred_loop->head();
assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
assert(pred_head != head(), "loop head in only one loop"); assert(pred_head != head(), "loop head in only one loop");
push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk); push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
} else { } else {
assert(pred_loop->_parent == this && _parent == NULL, "just checking"); assert(pred_loop->_parent == this && _parent == NULL, "just checking");
} }

View File

@ -413,9 +413,9 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
print_prop("debug_idx", node->_debug_idx); print_prop("debug_idx", node->_debug_idx);
#endif #endif
if(C->cfg() != NULL) { if (C->cfg() != NULL) {
Block *block = C->cfg()->_bbs[node->_idx]; Block* block = C->cfg()->get_block_for_node(node);
if(block == NULL) { if (block == NULL) {
print_prop("block", C->cfg()->_blocks[0]->_pre_order); print_prop("block", C->cfg()->_blocks[0]->_pre_order);
} else { } else {
print_prop("block", block->_pre_order); print_prop("block", block->_pre_order);

View File

@ -565,7 +565,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
lrgs(r)._def = 0; lrgs(r)._def = 0;
} }
n->disconnect_inputs(NULL, C); n->disconnect_inputs(NULL, C);
_cfg._bbs.map(n->_idx,NULL); _cfg.unmap_node_from_block(n);
n->replace_by(C->top()); n->replace_by(C->top());
// Since yanking a Node from block, high pressure moves up one // Since yanking a Node from block, high pressure moves up one
hrp_index[0]--; hrp_index[0]--;
@ -607,7 +607,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
if( n->is_SpillCopy() if( n->is_SpillCopy()
&& lrgs(r).is_singledef() // MultiDef live range can still split && lrgs(r).is_singledef() // MultiDef live range can still split
&& n->outcnt() == 1 // and use must be in this block && n->outcnt() == 1 // and use must be in this block
&& _cfg._bbs[n->unique_out()->_idx] == b ) { && _cfg.get_block_for_node(n->unique_out()) == b ) {
// All single-use MachSpillCopy(s) that immediately precede their // All single-use MachSpillCopy(s) that immediately precede their
// use must color early. If a longer live range steals their // use must color early. If a longer live range steals their
// color, the spill copy will split and may push another spill copy // color, the spill copy will split and may push another spill copy

View File

@ -237,7 +237,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
} }
// Check ctrl input to see if the null-check dominates the memory op // Check ctrl input to see if the null-check dominates the memory op
Block *cb = cfg->_bbs[mach->_idx]; Block *cb = cfg->get_block_for_node(mach);
cb = cb->_idom; // Always hoist at least 1 block cb = cb->_idom; // Always hoist at least 1 block
if( !was_store ) { // Stores can be hoisted only one block if( !was_store ) { // Stores can be hoisted only one block
while( cb->_dom_depth > (_dom_depth + 1)) while( cb->_dom_depth > (_dom_depth + 1))
@ -262,7 +262,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
if( is_decoden ) continue; if( is_decoden ) continue;
} }
// Block of memory-op input // Block of memory-op input
Block *inb = cfg->_bbs[mach->in(j)->_idx]; Block *inb = cfg->get_block_for_node(mach->in(j));
Block *b = this; // Start from nul check Block *b = this; // Start from nul check
while( b != inb && b->_dom_depth > inb->_dom_depth ) while( b != inb && b->_dom_depth > inb->_dom_depth )
b = b->_idom; // search upwards for input b = b->_idom; // search upwards for input
@ -272,7 +272,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
} }
if( j > 0 ) if( j > 0 )
continue; continue;
Block *mb = cfg->_bbs[mach->_idx]; Block *mb = cfg->get_block_for_node(mach);
// Hoisting stores requires more checks for the anti-dependence case. // Hoisting stores requires more checks for the anti-dependence case.
// Give up hoisting if we have to move the store past any load. // Give up hoisting if we have to move the store past any load.
if( was_store ) { if( was_store ) {
@ -291,7 +291,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
break; // Found anti-dependent load break; // Found anti-dependent load
// Make sure control does not do a merge (would have to check allpaths) // Make sure control does not do a merge (would have to check allpaths)
if( b->num_preds() != 2 ) break; if( b->num_preds() != 2 ) break;
b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block
} }
if( b != this ) continue; if( b != this ) continue;
} }
@ -303,15 +303,15 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
// Found a candidate! Pick one with least dom depth - the highest // Found a candidate! Pick one with least dom depth - the highest
// in the dom tree should be closest to the null check. // in the dom tree should be closest to the null check.
if( !best || if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) {
cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) {
best = mach; best = mach;
bidx = vidx; bidx = vidx;
} }
} }
// No candidate! // No candidate!
if( !best ) return; if (best == NULL) {
return;
}
// ---- Found an implicit null check // ---- Found an implicit null check
extern int implicit_null_checks; extern int implicit_null_checks;
@ -319,29 +319,29 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
if( is_decoden ) { if( is_decoden ) {
// Check if we need to hoist decodeHeapOop_not_null first. // Check if we need to hoist decodeHeapOop_not_null first.
Block *valb = cfg->_bbs[val->_idx]; Block *valb = cfg->get_block_for_node(val);
if( this != valb && this->_dom_depth < valb->_dom_depth ) { if( this != valb && this->_dom_depth < valb->_dom_depth ) {
// Hoist it up to the end of the test block. // Hoist it up to the end of the test block.
valb->find_remove(val); valb->find_remove(val);
this->add_inst(val); this->add_inst(val);
cfg->_bbs.map(val->_idx,this); cfg->map_node_to_block(val, this);
// DecodeN on x86 may kill flags. Check for flag-killing projections // DecodeN on x86 may kill flags. Check for flag-killing projections
// that also need to be hoisted. // that also need to be hoisted.
for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
Node* n = val->fast_out(j); Node* n = val->fast_out(j);
if( n->is_MachProj() ) { if( n->is_MachProj() ) {
cfg->_bbs[n->_idx]->find_remove(n); cfg->get_block_for_node(n)->find_remove(n);
this->add_inst(n); this->add_inst(n);
cfg->_bbs.map(n->_idx,this); cfg->map_node_to_block(n, this);
} }
} }
} }
} }
// Hoist the memory candidate up to the end of the test block. // Hoist the memory candidate up to the end of the test block.
Block *old_block = cfg->_bbs[best->_idx]; Block *old_block = cfg->get_block_for_node(best);
old_block->find_remove(best); old_block->find_remove(best);
add_inst(best); add_inst(best);
cfg->_bbs.map(best->_idx,this); cfg->map_node_to_block(best, this);
// Move the control dependence // Move the control dependence
if (best->in(0) && best->in(0) == old_block->_nodes[0]) if (best->in(0) && best->in(0) == old_block->_nodes[0])
@ -352,9 +352,9 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) { for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
Node* n = best->fast_out(j); Node* n = best->fast_out(j);
if( n->is_MachProj() ) { if( n->is_MachProj() ) {
cfg->_bbs[n->_idx]->find_remove(n); cfg->get_block_for_node(n)->find_remove(n);
add_inst(n); add_inst(n);
cfg->_bbs.map(n->_idx,this); cfg->map_node_to_block(n, this);
} }
} }
@ -385,7 +385,7 @@ void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowe
Node *old_tst = proj->in(0); Node *old_tst = proj->in(0);
MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx); MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
_nodes.map(end_idx(),nul_chk); _nodes.map(end_idx(),nul_chk);
cfg->_bbs.map(nul_chk->_idx,this); cfg->map_node_to_block(nul_chk, this);
// Redirect users of old_test to nul_chk // Redirect users of old_test to nul_chk
for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2) for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
old_tst->last_out(i2)->set_req(0, nul_chk); old_tst->last_out(i2)->set_req(0, nul_chk);
@ -468,7 +468,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
Node* use = n->fast_out(j); Node* use = n->fast_out(j);
// The use is a conditional branch, make them adjacent // The use is a conditional branch, make them adjacent
if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) { if (use->is_MachIf() && cfg->get_block_for_node(use) == this) {
found_machif = true; found_machif = true;
break; break;
} }
@ -529,13 +529,14 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
//------------------------------set_next_call---------------------------------- //------------------------------set_next_call----------------------------------
void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) { void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
if( next_call.test_set(n->_idx) ) return; if( next_call.test_set(n->_idx) ) return;
for( uint i=0; i<n->len(); i++ ) { for( uint i=0; i<n->len(); i++ ) {
Node *m = n->in(i); Node *m = n->in(i);
if( !m ) continue; // must see all nodes in block that precede call if( !m ) continue; // must see all nodes in block that precede call
if( bbs[m->_idx] == this ) if (cfg->get_block_for_node(m) == this) {
set_next_call( m, next_call, bbs ); set_next_call(m, next_call, cfg);
}
} }
} }
@ -545,12 +546,12 @@ void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) {
// next subroutine call get priority - basically it moves things NOT needed // next subroutine call get priority - basically it moves things NOT needed
// for the next call till after the call. This prevents me from trying to // for the next call till after the call. This prevents me from trying to
// carry lots of stuff live across a call. // carry lots of stuff live across a call.
void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) { void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) {
// Find the next control-defining Node in this block // Find the next control-defining Node in this block
Node* call = NULL; Node* call = NULL;
for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
Node* m = this_call->fast_out(i); Node* m = this_call->fast_out(i);
if( bbs[m->_idx] == this && // Local-block user if(cfg->get_block_for_node(m) == this && // Local-block user
m != this_call && // Not self-start node m != this_call && // Not self-start node
m->is_MachCall() ) m->is_MachCall() )
call = m; call = m;
@ -558,7 +559,7 @@ void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Ar
} }
if (call == NULL) return; // No next call (e.g., block end is near) if (call == NULL) return; // No next call (e.g., block end is near)
// Set next-call for all inputs to this call // Set next-call for all inputs to this call
set_next_call(call, next_call, bbs); set_next_call(call, next_call, cfg);
} }
//------------------------------add_call_kills------------------------------------- //------------------------------add_call_kills-------------------------------------
@ -578,7 +579,7 @@ void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_p
//------------------------------sched_call------------------------------------- //------------------------------sched_call-------------------------------------
uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) { uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
RegMask regs; RegMask regs;
// Schedule all the users of the call right now. All the users are // Schedule all the users of the call right now. All the users are
@ -597,12 +598,14 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
// Check for scheduling the next control-definer // Check for scheduling the next control-definer
if( n->bottom_type() == Type::CONTROL ) if( n->bottom_type() == Type::CONTROL )
// Warm up next pile of heuristic bits // Warm up next pile of heuristic bits
needed_for_next_call(n, next_call, bbs); needed_for_next_call(n, next_call, cfg);
// Children of projections are now all ready // Children of projections are now all ready
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
Node* m = n->fast_out(j); // Get user Node* m = n->fast_out(j); // Get user
if( bbs[m->_idx] != this ) continue; if(cfg->get_block_for_node(m) != this) {
continue;
}
if( m->is_Phi() ) continue; if( m->is_Phi() ) continue;
int m_cnt = ready_cnt.at(m->_idx)-1; int m_cnt = ready_cnt.at(m->_idx)-1;
ready_cnt.at_put(m->_idx, m_cnt); ready_cnt.at_put(m->_idx, m_cnt);
@ -620,7 +623,7 @@ uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_
uint r_cnt = mcall->tf()->range()->cnt(); uint r_cnt = mcall->tf()->range()->cnt();
int op = mcall->ideal_Opcode(); int op = mcall->ideal_Opcode();
MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj ); MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
bbs.map(proj->_idx,this); cfg->map_node_to_block(proj, this);
_nodes.insert(node_cnt++, proj); _nodes.insert(node_cnt++, proj);
// Select the right register save policy. // Select the right register save policy.
@ -708,7 +711,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
uint local = 0; uint local = 0;
for( uint j=0; j<cnt; j++ ) { for( uint j=0; j<cnt; j++ ) {
Node *m = n->in(j); Node *m = n->in(j);
if( m && cfg->_bbs[m->_idx] == this && !m->is_top() ) if( m && cfg->get_block_for_node(m) == this && !m->is_top() )
local++; // One more block-local input local++; // One more block-local input
} }
ready_cnt.at_put(n->_idx, local); // Count em up ready_cnt.at_put(n->_idx, local); // Count em up
@ -720,7 +723,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
for (uint prec = n->req(); prec < n->len(); prec++) { for (uint prec = n->req(); prec < n->len(); prec++) {
Node* oop_store = n->in(prec); Node* oop_store = n->in(prec);
if (oop_store != NULL) { if (oop_store != NULL) {
assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark"); assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
} }
} }
} }
@ -753,7 +756,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
Node *n = _nodes[i3]; // Get pre-scheduled Node *n = _nodes[i3]; // Get pre-scheduled
for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
Node* m = n->fast_out(j); Node* m = n->fast_out(j);
if( cfg->_bbs[m->_idx] ==this ) { // Local-block user if (cfg->get_block_for_node(m) == this) { // Local-block user
int m_cnt = ready_cnt.at(m->_idx)-1; int m_cnt = ready_cnt.at(m->_idx)-1;
ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count
} }
@ -786,7 +789,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
} }
// Warm up the 'next_call' heuristic bits // Warm up the 'next_call' heuristic bits
needed_for_next_call(_nodes[0], next_call, cfg->_bbs); needed_for_next_call(_nodes[0], next_call, cfg);
#ifndef PRODUCT #ifndef PRODUCT
if (cfg->trace_opto_pipelining()) { if (cfg->trace_opto_pipelining()) {
@ -837,7 +840,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
#endif #endif
if( n->is_MachCall() ) { if( n->is_MachCall() ) {
MachCallNode *mcall = n->as_MachCall(); MachCallNode *mcall = n->as_MachCall();
phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call); phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call);
continue; continue;
} }
@ -847,7 +850,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
regs.OR(n->out_RegMask()); regs.OR(n->out_RegMask());
MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj ); MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
cfg->_bbs.map(proj->_idx,this); cfg->map_node_to_block(proj, this);
_nodes.insert(phi_cnt++, proj); _nodes.insert(phi_cnt++, proj);
add_call_kills(proj, regs, matcher._c_reg_save_policy, false); add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
@ -856,7 +859,9 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &
// Children are now all ready // Children are now all ready
for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) { for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
Node* m = n->fast_out(i5); // Get user Node* m = n->fast_out(i5); // Get user
if( cfg->_bbs[m->_idx] != this ) continue; if (cfg->get_block_for_node(m) != this) {
continue;
}
if( m->is_Phi() ) continue; if( m->is_Phi() ) continue;
if (m->_idx >= max_idx) { // new node, skip it if (m->_idx >= max_idx) { // new node, skip it
assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types"); assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
@ -914,7 +919,7 @@ static void catch_cleanup_fix_all_inputs(Node *use, Node *old_def, Node *new_def
} }
//------------------------------catch_cleanup_find_cloned_def------------------ //------------------------------catch_cleanup_find_cloned_def------------------
static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) { static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
assert( use_blk != def_blk, "Inter-block cleanup only"); assert( use_blk != def_blk, "Inter-block cleanup only");
// The use is some block below the Catch. Find and return the clone of the def // The use is some block below the Catch. Find and return the clone of the def
@ -940,7 +945,8 @@ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def
// PhiNode, the PhiNode uses from the def and IT's uses need fixup. // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
Node_Array inputs = new Node_List(Thread::current()->resource_area()); Node_Array inputs = new Node_List(Thread::current()->resource_area());
for(uint k = 1; k < use_blk->num_preds(); k++) { for(uint k = 1; k < use_blk->num_preds(); k++) {
inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx)); Block* block = cfg->get_block_for_node(use_blk->pred(k));
inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx));
} }
// Check to see if the use_blk already has an identical phi inserted. // Check to see if the use_blk already has an identical phi inserted.
@ -962,7 +968,7 @@ static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def
if (fixup == NULL) { if (fixup == NULL) {
Node *new_phi = PhiNode::make(use_blk->head(), def); Node *new_phi = PhiNode::make(use_blk->head(), def);
use_blk->_nodes.insert(1, new_phi); use_blk->_nodes.insert(1, new_phi);
bbs.map(new_phi->_idx, use_blk); cfg->map_node_to_block(new_phi, use_blk);
for (uint k = 1; k < use_blk->num_preds(); k++) { for (uint k = 1; k < use_blk->num_preds(); k++) {
new_phi->set_req(k, inputs[k]); new_phi->set_req(k, inputs[k]);
} }
@ -1002,17 +1008,17 @@ static void catch_cleanup_intra_block(Node *use, Node *def, Block *blk, int beg,
//------------------------------catch_cleanup_inter_block--------------------- //------------------------------catch_cleanup_inter_block---------------------
// Fix all input edges in use that reference "def". The use is in a different // Fix all input edges in use that reference "def". The use is in a different
// block than the def. // block than the def.
static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) { static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
if( !use_blk ) return; // Can happen if the use is a precedence edge if( !use_blk ) return; // Can happen if the use is a precedence edge
Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, bbs, n_clone_idx); Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx);
catch_cleanup_fix_all_inputs(use, def, new_def); catch_cleanup_fix_all_inputs(use, def, new_def);
} }
//------------------------------call_catch_cleanup----------------------------- //------------------------------call_catch_cleanup-----------------------------
// If we inserted any instructions between a Call and his CatchNode, // If we inserted any instructions between a Call and his CatchNode,
// clone the instructions on all paths below the Catch. // clone the instructions on all paths below the Catch.
void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) { void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
// End of region to clone // End of region to clone
uint end = end_idx(); uint end = end_idx();
@ -1037,7 +1043,7 @@ void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
// since clones dominate on each path. // since clones dominate on each path.
Node *clone = _nodes[j-1]->clone(); Node *clone = _nodes[j-1]->clone();
sb->_nodes.insert( 1, clone ); sb->_nodes.insert( 1, clone );
bbs.map(clone->_idx,sb); cfg->map_node_to_block(clone, sb);
} }
} }
@ -1054,18 +1060,19 @@ void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
uint max = out->size(); uint max = out->size();
for (uint j = 0; j < max; j++) {// For all users for (uint j = 0; j < max; j++) {// For all users
Node *use = out->pop(); Node *use = out->pop();
Block *buse = bbs[use->_idx]; Block *buse = cfg->get_block_for_node(use);
if( use->is_Phi() ) { if( use->is_Phi() ) {
for( uint k = 1; k < use->req(); k++ ) for( uint k = 1; k < use->req(); k++ )
if( use->in(k) == n ) { if( use->in(k) == n ) {
Node *fixup = catch_cleanup_find_cloned_def(bbs[buse->pred(k)->_idx], n, this, bbs, n_clone_idx); Block* block = cfg->get_block_for_node(buse->pred(k));
Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx);
use->set_req(k, fixup); use->set_req(k, fixup);
} }
} else { } else {
if (this == buse) { if (this == buse) {
catch_cleanup_intra_block(use, n, this, beg, n_clone_idx); catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
} else { } else {
catch_cleanup_inter_block(use, buse, n, this, bbs, n_clone_idx); catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx);
} }
} }
} // End for all users } // End for all users

View File

@ -101,7 +101,7 @@ void PhaseLive::compute(uint maxlrg) {
for( uint k=1; k<cnt; k++ ) { for( uint k=1; k<cnt; k++ ) {
Node *nk = n->in(k); Node *nk = n->in(k);
uint nkidx = nk->_idx; uint nkidx = nk->_idx;
if( _cfg._bbs[nkidx] != b ) { if (_cfg.get_block_for_node(nk) != b) {
uint u = _names[nkidx]; uint u = _names[nkidx];
use->insert( u ); use->insert( u );
DEBUG_ONLY(def_outside->insert( u );) DEBUG_ONLY(def_outside->insert( u );)
@ -121,7 +121,7 @@ void PhaseLive::compute(uint maxlrg) {
// Push these live-in things to predecessors // Push these live-in things to predecessors
for( uint l=1; l<b->num_preds(); l++ ) { for( uint l=1; l<b->num_preds(); l++ ) {
Block *p = _cfg._bbs[b->pred(l)->_idx]; Block *p = _cfg.get_block_for_node(b->pred(l));
add_liveout( p, use, first_pass ); add_liveout( p, use, first_pass );
// PhiNode uses go in the live-out set of prior blocks. // PhiNode uses go in the live-out set of prior blocks.
@ -142,8 +142,10 @@ void PhaseLive::compute(uint maxlrg) {
assert( delta->count(), "missing delta set" ); assert( delta->count(), "missing delta set" );
// Add new-live-in to predecessors live-out sets // Add new-live-in to predecessors live-out sets
for( uint l=1; l<b->num_preds(); l++ ) for (uint l = 1; l < b->num_preds(); l++) {
add_liveout( _cfg._bbs[b->pred(l)->_idx], delta, first_pass ); Block* block = _cfg.get_block_for_node(b->pred(l));
add_liveout(block, delta, first_pass);
}
freeset(b); freeset(b);
} // End of while-worklist-not-empty } // End of while-worklist-not-empty

View File

@ -624,8 +624,6 @@ bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
} }
#define MAX_UNROLL 16 // maximum number of unrolls for main loop
//------------------------------policy_unroll---------------------------------- //------------------------------policy_unroll----------------------------------
// Return TRUE or FALSE if the loop should be unrolled or not. Unroll if // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
// the loop is a CountedLoop and the body is small enough. // the loop is a CountedLoop and the body is small enough.
@ -642,7 +640,7 @@ bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false; if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
int future_unroll_ct = cl->unrolled_count() * 2; int future_unroll_ct = cl->unrolled_count() * 2;
if (future_unroll_ct > MAX_UNROLL) return false; if (future_unroll_ct > LoopMaxUnroll) return false;
// Check for initial stride being a small enough constant // Check for initial stride being a small enough constant
if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false; if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;

View File

@ -42,7 +42,6 @@ class AliasInfo;
class AllocateArrayNode; class AllocateArrayNode;
class AllocateNode; class AllocateNode;
class Block; class Block;
class Block_Array;
class BoolNode; class BoolNode;
class BoxLockNode; class BoxLockNode;
class CMoveNode; class CMoveNode;

View File

@ -68,7 +68,6 @@ void Compile::Output() {
return; return;
} }
// Make sure I can find the Start Node // Make sure I can find the Start Node
Block_Array& bbs = _cfg->_bbs;
Block *entry = _cfg->_blocks[1]; Block *entry = _cfg->_blocks[1];
Block *broot = _cfg->_broot; Block *broot = _cfg->_broot;
@ -77,8 +76,8 @@ void Compile::Output() {
// Replace StartNode with prolog // Replace StartNode with prolog
MachPrologNode *prolog = new (this) MachPrologNode(); MachPrologNode *prolog = new (this) MachPrologNode();
entry->_nodes.map( 0, prolog ); entry->_nodes.map( 0, prolog );
bbs.map( prolog->_idx, entry ); _cfg->map_node_to_block(prolog, entry);
bbs.map( start->_idx, NULL ); // start is no longer in any block _cfg->unmap_node_from_block(start); // start is no longer in any block
// Virtual methods need an unverified entry point // Virtual methods need an unverified entry point
@ -117,8 +116,7 @@ void Compile::Output() {
if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) { if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) {
MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return); MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
b->add_inst( epilog ); b->add_inst( epilog );
bbs.map(epilog->_idx, b); _cfg->map_node_to_block(epilog, b);
//_regalloc->set_bad(epilog->_idx); // Already initialized this way.
} }
} }
} }
@ -252,7 +250,7 @@ void Compile::Insert_zap_nodes() {
if (insert) { if (insert) {
Node *zap = call_zap_node(n->as_MachSafePoint(), i); Node *zap = call_zap_node(n->as_MachSafePoint(), i);
b->_nodes.insert( j, zap ); b->_nodes.insert( j, zap );
_cfg->_bbs.map( zap->_idx, b ); _cfg->map_node_to_block(zap, b);
++j; ++j;
} }
} }
@ -1234,7 +1232,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
#ifdef ASSERT #ifdef ASSERT
if (!b->is_connector()) { if (!b->is_connector()) {
stringStream st; stringStream st;
b->dump_head(&_cfg->_bbs, &st); b->dump_head(_cfg, &st);
MacroAssembler(cb).block_comment(st.as_string()); MacroAssembler(cb).block_comment(st.as_string());
} }
jmp_target[i] = 0; jmp_target[i] = 0;
@ -1310,7 +1308,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
MachNode *nop = new (this) MachNopNode(nops_cnt); MachNode *nop = new (this) MachNopNode(nops_cnt);
b->_nodes.insert(j++, nop); b->_nodes.insert(j++, nop);
last_inst++; last_inst++;
_cfg->_bbs.map( nop->_idx, b ); _cfg->map_node_to_block(nop, b);
nop->emit(*cb, _regalloc); nop->emit(*cb, _regalloc);
cb->flush_bundle(true); cb->flush_bundle(true);
current_offset = cb->insts_size(); current_offset = cb->insts_size();
@ -1395,7 +1393,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
if (needs_padding && replacement->avoid_back_to_back()) { if (needs_padding && replacement->avoid_back_to_back()) {
MachNode *nop = new (this) MachNopNode(); MachNode *nop = new (this) MachNopNode();
b->_nodes.insert(j++, nop); b->_nodes.insert(j++, nop);
_cfg->_bbs.map(nop->_idx, b); _cfg->map_node_to_block(nop, b);
last_inst++; last_inst++;
nop->emit(*cb, _regalloc); nop->emit(*cb, _regalloc);
cb->flush_bundle(true); cb->flush_bundle(true);
@ -1549,7 +1547,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
if( padding > 0 ) { if( padding > 0 ) {
MachNode *nop = new (this) MachNopNode(padding / nop_size); MachNode *nop = new (this) MachNopNode(padding / nop_size);
b->_nodes.insert( b->_nodes.size(), nop ); b->_nodes.insert( b->_nodes.size(), nop );
_cfg->_bbs.map( nop->_idx, b ); _cfg->map_node_to_block(nop, b);
nop->emit(*cb, _regalloc); nop->emit(*cb, _regalloc);
current_offset = cb->insts_size(); current_offset = cb->insts_size();
} }
@ -1737,7 +1735,6 @@ uint Scheduling::_total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+
Scheduling::Scheduling(Arena *arena, Compile &compile) Scheduling::Scheduling(Arena *arena, Compile &compile)
: _arena(arena), : _arena(arena),
_cfg(compile.cfg()), _cfg(compile.cfg()),
_bbs(compile.cfg()->_bbs),
_regalloc(compile.regalloc()), _regalloc(compile.regalloc()),
_reg_node(arena), _reg_node(arena),
_bundle_instr_count(0), _bundle_instr_count(0),
@ -2085,8 +2082,9 @@ void Scheduling::DecrementUseCounts(Node *n, const Block *bb) {
if( def->is_Proj() ) // If this is a machine projection, then if( def->is_Proj() ) // If this is a machine projection, then
def = def->in(0); // propagate usage thru to the base instruction def = def->in(0); // propagate usage thru to the base instruction
if( _bbs[def->_idx] != bb ) // Ignore if not block-local if(_cfg->get_block_for_node(def) != bb) { // Ignore if not block-local
continue; continue;
}
// Compute the latency // Compute the latency
uint l = _bundle_cycle_number + n->latency(i); uint l = _bundle_cycle_number + n->latency(i);
@ -2358,9 +2356,10 @@ void Scheduling::ComputeUseCount(const Block *bb) {
Node *inp = n->in(k); Node *inp = n->in(k);
if (!inp) continue; if (!inp) continue;
assert(inp != n, "no cycles allowed" ); assert(inp != n, "no cycles allowed" );
if( _bbs[inp->_idx] == bb ) { // Block-local use? if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
if( inp->is_Proj() ) // Skip through Proj's if (inp->is_Proj()) { // Skip through Proj's
inp = inp->in(0); inp = inp->in(0);
}
++_uses[inp->_idx]; // Count 1 block-local use ++_uses[inp->_idx]; // Count 1 block-local use
} }
} }
@ -2643,7 +2642,7 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is
return; return;
Node *pinch = _reg_node[def_reg]; // Get pinch point Node *pinch = _reg_node[def_reg]; // Get pinch point
if( !pinch || _bbs[pinch->_idx] != b || // No pinch-point yet? if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
is_def ) { // Check for a true def (not a kill) is_def ) { // Check for a true def (not a kill)
_reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
return; return;
@ -2669,7 +2668,7 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is
_cfg->C->record_method_not_compilable("too many D-U pinch points"); _cfg->C->record_method_not_compilable("too many D-U pinch points");
return; return;
} }
_bbs.map(pinch->_idx,b); // Pretend it's valid in this block (lazy init) _cfg->map_node_to_block(pinch, b); // Pretend it's valid in this block (lazy init)
_reg_node.map(def_reg,pinch); // Record pinch-point _reg_node.map(def_reg,pinch); // Record pinch-point
//_regalloc->set_bad(pinch->_idx); // Already initialized this way. //_regalloc->set_bad(pinch->_idx); // Already initialized this way.
if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
@ -2713,9 +2712,9 @@ void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
return; return;
Node *pinch = _reg_node[use_reg]; // Get pinch point Node *pinch = _reg_node[use_reg]; // Get pinch point
// Check for no later def_reg/kill in block // Check for no later def_reg/kill in block
if( pinch && _bbs[pinch->_idx] == b && if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
// Use has to be block-local as well // Use has to be block-local as well
_bbs[use->_idx] == b ) { _cfg->get_block_for_node(use) == b) {
if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?) if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
pinch->req() == 1 ) { // pinch not yet in block? pinch->req() == 1 ) { // pinch not yet in block?
pinch->del_req(0); // yank pointer to later-def, also set flag pinch->del_req(0); // yank pointer to later-def, also set flag
@ -2895,7 +2894,7 @@ void Scheduling::garbage_collect_pinch_nodes() {
int trace_cnt = 0; int trace_cnt = 0;
for (uint k = 0; k < _reg_node.Size(); k++) { for (uint k = 0; k < _reg_node.Size(); k++) {
Node* pinch = _reg_node[k]; Node* pinch = _reg_node[k];
if (pinch != NULL && pinch->Opcode() == Op_Node && if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
// no predecence input edges // no predecence input edges
(pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) { (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
cleanup_pinch(pinch); cleanup_pinch(pinch);

View File

@ -96,9 +96,6 @@ private:
// List of nodes currently available for choosing for scheduling // List of nodes currently available for choosing for scheduling
Node_List _available; Node_List _available;
// Mapping from node (index) to basic block
Block_Array& _bbs;
// For each instruction beginning a bundle, the number of following // For each instruction beginning a bundle, the number of following
// nodes to be bundled with it. // nodes to be bundled with it.
Bundle *_node_bundling_base; Bundle *_node_bundling_base;

View File

@ -78,11 +78,13 @@ bool PhaseChaitin::may_be_copy_of_callee( Node *def ) const {
// Helper function for yank_if_dead // Helper function for yank_if_dead
int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) { int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) {
int blk_adjust=0; int blk_adjust=0;
Block *oldb = _cfg._bbs[old->_idx]; Block *oldb = _cfg.get_block_for_node(old);
oldb->find_remove(old); oldb->find_remove(old);
// Count 1 if deleting an instruction from the current block // Count 1 if deleting an instruction from the current block
if( oldb == current_block ) blk_adjust++; if (oldb == current_block) {
_cfg._bbs.map(old->_idx,NULL); blk_adjust++;
}
_cfg.unmap_node_from_block(old);
OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg(); OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg();
if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available? if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available?
value->map(old_reg,NULL); // Yank from value/regnd maps value->map(old_reg,NULL); // Yank from value/regnd maps
@ -433,7 +435,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
bool missing_some_inputs = false; bool missing_some_inputs = false;
Block *freed = NULL; Block *freed = NULL;
for( j = 1; j < b->num_preds(); j++ ) { for( j = 1; j < b->num_preds(); j++ ) {
Block *pb = _cfg._bbs[b->pred(j)->_idx]; Block *pb = _cfg.get_block_for_node(b->pred(j));
// Remove copies along phi edges // Remove copies along phi edges
for( uint k=1; k<phi_dex; k++ ) for( uint k=1; k<phi_dex; k++ )
elide_copy( b->_nodes[k], j, b, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false ); elide_copy( b->_nodes[k], j, b, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false );
@ -478,7 +480,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
} else { } else {
if( !freed ) { // Didn't get a freebie prior block if( !freed ) { // Didn't get a freebie prior block
// Must clone some data // Must clone some data
freed = _cfg._bbs[b->pred(1)->_idx]; freed = _cfg.get_block_for_node(b->pred(1));
Node_List &f_value = *blk2value[freed->_pre_order]; Node_List &f_value = *blk2value[freed->_pre_order];
Node_List &f_regnd = *blk2regnd[freed->_pre_order]; Node_List &f_regnd = *blk2regnd[freed->_pre_order];
for( uint k = 0; k < (uint)_max_reg; k++ ) { for( uint k = 0; k < (uint)_max_reg; k++ ) {
@ -488,7 +490,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
} }
// Merge all inputs together, setting to NULL any conflicts. // Merge all inputs together, setting to NULL any conflicts.
for( j = 1; j < b->num_preds(); j++ ) { for( j = 1; j < b->num_preds(); j++ ) {
Block *pb = _cfg._bbs[b->pred(j)->_idx]; Block *pb = _cfg.get_block_for_node(b->pred(j));
if( pb == freed ) continue; // Did self already via freelist if( pb == freed ) continue; // Did self already via freelist
Node_List &p_regnd = *blk2regnd[pb->_pre_order]; Node_List &p_regnd = *blk2regnd[pb->_pre_order];
for( uint k = 0; k < (uint)_max_reg; k++ ) { for( uint k = 0; k < (uint)_max_reg; k++ ) {
@ -515,8 +517,9 @@ void PhaseChaitin::post_allocate_copy_removal() {
u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
} }
if( u != NodeSentinel ) { // Junk Phi. Remove if( u != NodeSentinel ) { // Junk Phi. Remove
b->_nodes.remove(j--); phi_dex--; b->_nodes.remove(j--);
_cfg._bbs.map(phi->_idx,NULL); phi_dex--;
_cfg.unmap_node_from_block(phi);
phi->replace_by(u); phi->replace_by(u);
phi->disconnect_inputs(NULL, C); phi->disconnect_inputs(NULL, C);
continue; continue;

View File

@ -132,7 +132,7 @@ void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
} }
b->_nodes.insert(i,spill); // Insert node in block b->_nodes.insert(i,spill); // Insert node in block
_cfg._bbs.map(spill->_idx,b); // Update node->block mapping to reflect _cfg.map_node_to_block(spill, b); // Update node->block mapping to reflect
// Adjust the point where we go hi-pressure // Adjust the point where we go hi-pressure
if( i <= b->_ihrp_index ) b->_ihrp_index++; if( i <= b->_ihrp_index ) b->_ihrp_index++;
if( i <= b->_fhrp_index ) b->_fhrp_index++; if( i <= b->_fhrp_index ) b->_fhrp_index++;
@ -219,7 +219,7 @@ uint PhaseChaitin::split_USE( Node *def, Block *b, Node *use, uint useidx, uint
use->set_req(useidx, def); use->set_req(useidx, def);
} else { } else {
// Block and index where the use occurs. // Block and index where the use occurs.
Block *b = _cfg._bbs[use->_idx]; Block *b = _cfg.get_block_for_node(use);
// Put the clone just prior to use // Put the clone just prior to use
int bindex = b->find_node(use); int bindex = b->find_node(use);
// DEF is UP, so must copy it DOWN and hook in USE // DEF is UP, so must copy it DOWN and hook in USE
@ -270,7 +270,7 @@ uint PhaseChaitin::split_USE( Node *def, Block *b, Node *use, uint useidx, uint
int bindex; int bindex;
// Phi input spill-copys belong at the end of the prior block // Phi input spill-copys belong at the end of the prior block
if( use->is_Phi() ) { if( use->is_Phi() ) {
b = _cfg._bbs[b->pred(useidx)->_idx]; b = _cfg.get_block_for_node(b->pred(useidx));
bindex = b->end_idx(); bindex = b->end_idx();
} else { } else {
// Put the clone just prior to use // Put the clone just prior to use
@ -335,7 +335,7 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint
continue; continue;
} }
Block *b_def = _cfg._bbs[def->_idx]; Block *b_def = _cfg.get_block_for_node(def);
int idx_def = b_def->find_node(def); int idx_def = b_def->find_node(def);
Node *in_spill = get_spillcopy_wide( in, def, i ); Node *in_spill = get_spillcopy_wide( in, def, i );
if( !in_spill ) return 0; // Bailed out if( !in_spill ) return 0; // Bailed out
@ -589,7 +589,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
UPblock[slidx] = true; UPblock[slidx] = true;
// Record following instruction in case 'n' rematerializes and // Record following instruction in case 'n' rematerializes and
// kills flags // kills flags
Block *pred1 = _cfg._bbs[b->pred(1)->_idx]; Block *pred1 = _cfg.get_block_for_node(b->pred(1));
continue; continue;
} }
@ -601,7 +601,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Grab predecessor block header // Grab predecessor block header
n1 = b->pred(1); n1 = b->pred(1);
// Grab the appropriate reaching def info for inpidx // Grab the appropriate reaching def info for inpidx
pred = _cfg._bbs[n1->_idx]; pred = _cfg.get_block_for_node(n1);
pidx = pred->_pre_order; pidx = pred->_pre_order;
Node **Ltmp = Reaches[pidx]; Node **Ltmp = Reaches[pidx];
bool *Utmp = UP[pidx]; bool *Utmp = UP[pidx];
@ -616,7 +616,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Grab predecessor block headers // Grab predecessor block headers
n2 = b->pred(inpidx); n2 = b->pred(inpidx);
// Grab the appropriate reaching def info for inpidx // Grab the appropriate reaching def info for inpidx
pred = _cfg._bbs[n2->_idx]; pred = _cfg.get_block_for_node(n2);
pidx = pred->_pre_order; pidx = pred->_pre_order;
Ltmp = Reaches[pidx]; Ltmp = Reaches[pidx];
Utmp = UP[pidx]; Utmp = UP[pidx];
@ -701,7 +701,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// Grab predecessor block header // Grab predecessor block header
n1 = b->pred(1); n1 = b->pred(1);
// Grab the appropriate reaching def info for k // Grab the appropriate reaching def info for k
pred = _cfg._bbs[n1->_idx]; pred = _cfg.get_block_for_node(n1);
pidx = pred->_pre_order; pidx = pred->_pre_order;
Node **Ltmp = Reaches[pidx]; Node **Ltmp = Reaches[pidx];
bool *Utmp = UP[pidx]; bool *Utmp = UP[pidx];
@ -919,7 +919,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
return 0; return 0;
} }
_lrg_map.extend(def->_idx, 0); _lrg_map.extend(def->_idx, 0);
_cfg._bbs.map(def->_idx,b); _cfg.map_node_to_block(def, b);
n->set_req(inpidx, def); n->set_req(inpidx, def);
continue; continue;
} }
@ -1291,7 +1291,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
for( insidx = 0; insidx < phis->size(); insidx++ ) { for( insidx = 0; insidx < phis->size(); insidx++ ) {
Node *phi = phis->at(insidx); Node *phi = phis->at(insidx);
assert(phi->is_Phi(),"This list must only contain Phi Nodes"); assert(phi->is_Phi(),"This list must only contain Phi Nodes");
Block *b = _cfg._bbs[phi->_idx]; Block *b = _cfg.get_block_for_node(phi);
// Grab the live range number // Grab the live range number
uint lidx = _lrg_map.find_id(phi); uint lidx = _lrg_map.find_id(phi);
uint slidx = lrg2reach[lidx]; uint slidx = lrg2reach[lidx];
@ -1315,7 +1315,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// DEF has the wrong UP/DOWN value. // DEF has the wrong UP/DOWN value.
for( uint i = 1; i < b->num_preds(); i++ ) { for( uint i = 1; i < b->num_preds(); i++ ) {
// Get predecessor block pre-order number // Get predecessor block pre-order number
Block *pred = _cfg._bbs[b->pred(i)->_idx]; Block *pred = _cfg.get_block_for_node(b->pred(i));
pidx = pred->_pre_order; pidx = pred->_pre_order;
// Grab reaching def // Grab reaching def
Node *def = Reaches[pidx][slidx]; Node *def = Reaches[pidx][slidx];

View File

@ -3217,15 +3217,6 @@ void VM_RedefineClasses::redefine_single_class(jclass the_jclass,
JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints(); JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints();
jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop); jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop);
if (the_class_oop == Universe::reflect_invoke_cache()->klass()) {
// We are redefining java.lang.reflect.Method. Method.invoke() is
// cached and users of the cache care about each active version of
// the method so we have to track this previous version.
// Do this before methods get switched
Universe::reflect_invoke_cache()->add_previous_version(
the_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum()));
}
// Deoptimize all compiled code that depends on this class // Deoptimize all compiled code that depends on this class
flush_dependent_code(the_class, THREAD); flush_dependent_code(the_class, THREAD);

View File

@ -1098,7 +1098,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
\ \
c2_nonstatic_field(PhaseCFG, _num_blocks, uint) \ c2_nonstatic_field(PhaseCFG, _num_blocks, uint) \
c2_nonstatic_field(PhaseCFG, _blocks, Block_List) \ c2_nonstatic_field(PhaseCFG, _blocks, Block_List) \
c2_nonstatic_field(PhaseCFG, _bbs, Block_Array) \ c2_nonstatic_field(PhaseCFG, _node_to_block_mapping, Block_Array) \
c2_nonstatic_field(PhaseCFG, _broot, Block*) \ c2_nonstatic_field(PhaseCFG, _broot, Block*) \
\ \
c2_nonstatic_field(PhaseRegAlloc, _node_regs, OptoRegPair*) \ c2_nonstatic_field(PhaseRegAlloc, _node_regs, OptoRegPair*) \

View File

@ -225,6 +225,22 @@ void report_untested(const char* file, int line, const char* message);
void warning(const char* format, ...); void warning(const char* format, ...);
#ifdef ASSERT
// Compile-time asserts.
template <bool> struct StaticAssert;
template <> struct StaticAssert<true> {};
// Only StaticAssert<true> is defined, so if cond evaluates to false we get
// a compile time exception when trying to use StaticAssert<false>.
#define STATIC_ASSERT(cond) \
do { \
StaticAssert<(cond)> DUMMY_STATIC_ASSERT; \
(void)DUMMY_STATIC_ASSERT; /* ignore */ \
} while (false)
#else
#define STATIC_ASSERT(cond)
#endif
// out of shared space reporting // out of shared space reporting
enum SharedSpaceType { enum SharedSpaceType {
SharedPermGen, SharedPermGen,

View File

@ -306,6 +306,6 @@ class ExceptionMark {
// which preserves pre-existing exceptions and does not allow new // which preserves pre-existing exceptions and does not allow new
// exceptions. // exceptions.
#define EXCEPTION_MARK Thread* THREAD; ExceptionMark __em(THREAD); #define EXCEPTION_MARK Thread* THREAD = NULL; ExceptionMark __em(THREAD);
#endif // SHARE_VM_UTILITIES_EXCEPTIONS_HPP #endif // SHARE_VM_UTILITIES_EXCEPTIONS_HPP

View File

@ -410,6 +410,8 @@ inline intptr_t align_size_down(intptr_t size, intptr_t alignment) {
return align_size_down_(size, alignment); return align_size_down_(size, alignment);
} }
#define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment)))
// Align objects by rounding up their size, in HeapWord units. // Align objects by rounding up their size, in HeapWord units.
#define align_object_size_(size) align_size_up_(size, MinObjAlignment) #define align_object_size_(size) align_size_up_(size, MinObjAlignment)
@ -428,6 +430,10 @@ inline intptr_t align_object_offset(intptr_t offset) {
return align_size_up(offset, HeapWordsPerLong); return align_size_up(offset, HeapWordsPerLong);
} }
inline void* align_pointer_up(const void* addr, size_t size) {
return (void*) align_size_up_((uintptr_t)addr, size);
}
// Clamp an address to be within a specific page // Clamp an address to be within a specific page
// 1. If addr is on the page it is returned as is // 1. If addr is on the page it is returned as is
// 2. If addr is above the page_address the start of the *next* page will be returned // 2. If addr is above the page_address the start of the *next* page will be returned
@ -449,32 +455,6 @@ inline address clamp_address_in_page(address addr, address page_address, intptr_
// The expected size in bytes of a cache line, used to pad data structures. // The expected size in bytes of a cache line, used to pad data structures.
#define DEFAULT_CACHE_LINE_SIZE 64 #define DEFAULT_CACHE_LINE_SIZE 64
// Bytes needed to pad type to avoid cache-line sharing; alignment should be the
// expected cache line size (a power of two). The first addend avoids sharing
// when the start address is not a multiple of alignment; the second maintains
// alignment of starting addresses that happen to be a multiple.
#define PADDING_SIZE(type, alignment) \
((alignment) + align_size_up_(sizeof(type), alignment))
// Templates to create a subclass padded to avoid cache line sharing. These are
// effective only when applied to derived-most (leaf) classes.
// When no args are passed to the base ctor.
template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
class Padded: public T {
private:
char _pad_buf_[PADDING_SIZE(T, alignment)];
};
// When either 0 or 1 args may be passed to the base ctor.
template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
class Padded01: public T {
public:
Padded01(): T() { }
Padded01(Arg1T arg1): T(arg1) { }
private:
char _pad_buf_[PADDING_SIZE(T, alignment)];
};
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Utility macros for compilers // Utility macros for compilers

View File

@ -26,7 +26,7 @@
* @library /testlibrary /testlibrary/whitebox * @library /testlibrary /testlibrary/whitebox
* @build ClearMethodStateTest * @build ClearMethodStateTest
* @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ClearMethodStateTest * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* ClearMethodStateTest
* @summary testing of WB::clearMethodState() * @summary testing of WB::clearMethodState()
* @author igor.ignatyev@oracle.com * @author igor.ignatyev@oracle.com
*/ */

View File

@ -61,6 +61,9 @@ public abstract class CompilerWhiteBoxTest {
/** Value of {@code -XX:TieredStopAtLevel} */ /** Value of {@code -XX:TieredStopAtLevel} */
protected static final int TIERED_STOP_AT_LEVEL protected static final int TIERED_STOP_AT_LEVEL
= Integer.parseInt(getVMOption("TieredStopAtLevel", "0")); = Integer.parseInt(getVMOption("TieredStopAtLevel", "0"));
/** Flag for verbose output, true if {@code -Dverbose} specified */
protected static final boolean IS_VERBOSE
= System.getProperty("verbose") != null;
/** /**
* Returns value of VM option. * Returns value of VM option.
@ -268,7 +271,9 @@ public abstract class CompilerWhiteBoxTest {
} }
result += tmp == null ? 0 : tmp; result += tmp == null ? 0 : tmp;
} }
System.out.println("method was invoked " + count + " times"); if (IS_VERBOSE) {
System.out.println("method was invoked " + count + " times");
}
return result; return result;
} }
} }

Some files were not shown because too many files have changed in this diff Show More