Merge
This commit is contained in:
commit
f3160929ea
1
.hgtags
1
.hgtags
@ -234,3 +234,4 @@ af9a674e12a16da1a4bd53e4990ddb1121a21ef1 jdk8-b109
|
|||||||
b5d2bf482a3ea1cca08c994512804ffbc73de0a1 jdk8-b110
|
b5d2bf482a3ea1cca08c994512804ffbc73de0a1 jdk8-b110
|
||||||
b9a0f6c693f347a6f4b9bb994957f4eaa05bdedd jdk8-b111
|
b9a0f6c693f347a6f4b9bb994957f4eaa05bdedd jdk8-b111
|
||||||
ad67c34f79c28a8e755f4a49f313868619d6702c jdk8-b112
|
ad67c34f79c28a8e755f4a49f313868619d6702c jdk8-b112
|
||||||
|
4a4dbcf7cb7d3e1a81beaa3b11cd909f69ebc79a jdk8-b113
|
||||||
|
@ -234,3 +234,4 @@ b7e64be81c8a7690703df5711f4fc2375da8a9cb jdk8-b103
|
|||||||
4faa09c7fe555de086dd9048d3c5cc92317d6f45 jdk8-b110
|
4faa09c7fe555de086dd9048d3c5cc92317d6f45 jdk8-b110
|
||||||
d086227bfc45d124f09b3bd72a07956b4073bf71 jdk8-b111
|
d086227bfc45d124f09b3bd72a07956b4073bf71 jdk8-b111
|
||||||
547316ea137d83d9c63083a9b83db64198fe0c81 jdk8-b112
|
547316ea137d83d9c63083a9b83db64198fe0c81 jdk8-b112
|
||||||
|
6ba4c7cb623ec612031e05cf8bf279d8f407bd1e jdk8-b113
|
||||||
|
@ -869,6 +869,7 @@ SRC_ROOT
|
|||||||
ZERO_ARCHDEF
|
ZERO_ARCHDEF
|
||||||
DEFINE_CROSS_COMPILE_ARCH
|
DEFINE_CROSS_COMPILE_ARCH
|
||||||
LP64
|
LP64
|
||||||
|
OPENJDK_TARGET_OS_EXPORT_DIR
|
||||||
OPENJDK_TARGET_OS_API_DIR
|
OPENJDK_TARGET_OS_API_DIR
|
||||||
OPENJDK_TARGET_CPU_JLI_CFLAGS
|
OPENJDK_TARGET_CPU_JLI_CFLAGS
|
||||||
OPENJDK_TARGET_CPU_OSARCH
|
OPENJDK_TARGET_CPU_OSARCH
|
||||||
@ -3864,7 +3865,7 @@ fi
|
|||||||
#CUSTOM_AUTOCONF_INCLUDE
|
#CUSTOM_AUTOCONF_INCLUDE
|
||||||
|
|
||||||
# Do not change or remove the following line, it is needed for consistency checks:
|
# Do not change or remove the following line, it is needed for consistency checks:
|
||||||
DATE_WHEN_GENERATED=1382540536
|
DATE_WHEN_GENERATED=1382702260
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
#
|
#
|
||||||
@ -7149,6 +7150,13 @@ $as_echo "$COMPILE_TYPE" >&6; }
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||||
|
OPENJDK_TARGET_OS_EXPORT_DIR=macosx
|
||||||
|
else
|
||||||
|
OPENJDK_TARGET_OS_EXPORT_DIR=${OPENJDK_TARGET_OS_API_DIR}
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
|
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
|
||||||
A_LP64="LP64:="
|
A_LP64="LP64:="
|
||||||
# -D_LP64=1 is only set on linux and mac. Setting on windows causes diff in
|
# -D_LP64=1 is only set on linux and mac. Setting on windows causes diff in
|
||||||
@ -29638,7 +29646,7 @@ fi
|
|||||||
-I${JDK_OUTPUTDIR}/include \
|
-I${JDK_OUTPUTDIR}/include \
|
||||||
-I${JDK_OUTPUTDIR}/include/$OPENJDK_TARGET_OS \
|
-I${JDK_OUTPUTDIR}/include/$OPENJDK_TARGET_OS \
|
||||||
-I${JDK_TOPDIR}/src/share/javavm/export \
|
-I${JDK_TOPDIR}/src/share/javavm/export \
|
||||||
-I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_API_DIR/javavm/export \
|
-I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_EXPORT_DIR/javavm/export \
|
||||||
-I${JDK_TOPDIR}/src/share/native/common \
|
-I${JDK_TOPDIR}/src/share/native/common \
|
||||||
-I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_API_DIR/native/common"
|
-I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_API_DIR/native/common"
|
||||||
|
|
||||||
@ -34245,10 +34253,10 @@ ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ex
|
|||||||
ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
|
ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
|
||||||
|
|
||||||
PREV_CXXCFLAGS="$CXXFLAGS"
|
PREV_CXXCFLAGS="$CXXFLAGS"
|
||||||
PREV_LDFLAGS="$LDFLAGS"
|
PREV_LIBS="$LIBS"
|
||||||
PREV_CXX="$CXX"
|
PREV_CXX="$CXX"
|
||||||
CXXFLAGS="$CXXFLAGS $FREETYPE_CFLAGS"
|
CXXFLAGS="$CXXFLAGS $FREETYPE_CFLAGS"
|
||||||
LDFLAGS="$LDFLAGS $FREETYPE_LIBS"
|
LIBS="$LIBS $FREETYPE_LIBS"
|
||||||
CXX="$FIXPATH $CXX"
|
CXX="$FIXPATH $CXX"
|
||||||
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
|
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
|
||||||
/* end confdefs.h. */
|
/* end confdefs.h. */
|
||||||
@ -34315,7 +34323,7 @@ fi
|
|||||||
rm -f core conftest.err conftest.$ac_objext \
|
rm -f core conftest.err conftest.$ac_objext \
|
||||||
conftest$ac_exeext conftest.$ac_ext
|
conftest$ac_exeext conftest.$ac_ext
|
||||||
CXXCFLAGS="$PREV_CXXFLAGS"
|
CXXCFLAGS="$PREV_CXXFLAGS"
|
||||||
LDFLAGS="$PREV_LDFLAGS"
|
LIBS="$PREV_LIBS"
|
||||||
CXX="$PREV_CXX"
|
CXX="$PREV_CXX"
|
||||||
ac_ext=cpp
|
ac_ext=cpp
|
||||||
ac_cpp='$CXXCPP $CPPFLAGS'
|
ac_cpp='$CXXCPP $CPPFLAGS'
|
||||||
|
@ -481,10 +481,10 @@ AC_DEFUN_ONCE([LIB_SETUP_FREETYPE],
|
|||||||
AC_MSG_CHECKING([if we can compile and link with freetype])
|
AC_MSG_CHECKING([if we can compile and link with freetype])
|
||||||
AC_LANG_PUSH(C++)
|
AC_LANG_PUSH(C++)
|
||||||
PREV_CXXCFLAGS="$CXXFLAGS"
|
PREV_CXXCFLAGS="$CXXFLAGS"
|
||||||
PREV_LDFLAGS="$LDFLAGS"
|
PREV_LIBS="$LIBS"
|
||||||
PREV_CXX="$CXX"
|
PREV_CXX="$CXX"
|
||||||
CXXFLAGS="$CXXFLAGS $FREETYPE_CFLAGS"
|
CXXFLAGS="$CXXFLAGS $FREETYPE_CFLAGS"
|
||||||
LDFLAGS="$LDFLAGS $FREETYPE_LIBS"
|
LIBS="$LIBS $FREETYPE_LIBS"
|
||||||
CXX="$FIXPATH $CXX"
|
CXX="$FIXPATH $CXX"
|
||||||
AC_LINK_IFELSE([AC_LANG_SOURCE([[
|
AC_LINK_IFELSE([AC_LANG_SOURCE([[
|
||||||
#include<ft2build.h>
|
#include<ft2build.h>
|
||||||
@ -508,7 +508,7 @@ AC_DEFUN_ONCE([LIB_SETUP_FREETYPE],
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
CXXCFLAGS="$PREV_CXXFLAGS"
|
CXXCFLAGS="$PREV_CXXFLAGS"
|
||||||
LDFLAGS="$PREV_LDFLAGS"
|
LIBS="$PREV_LIBS"
|
||||||
CXX="$PREV_CXX"
|
CXX="$PREV_CXX"
|
||||||
AC_LANG_POP(C++)
|
AC_LANG_POP(C++)
|
||||||
|
|
||||||
|
@ -327,6 +327,13 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS],
|
|||||||
fi
|
fi
|
||||||
AC_SUBST(OPENJDK_TARGET_OS_API_DIR)
|
AC_SUBST(OPENJDK_TARGET_OS_API_DIR)
|
||||||
|
|
||||||
|
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||||
|
OPENJDK_TARGET_OS_EXPORT_DIR=macosx
|
||||||
|
else
|
||||||
|
OPENJDK_TARGET_OS_EXPORT_DIR=${OPENJDK_TARGET_OS_API_DIR}
|
||||||
|
fi
|
||||||
|
AC_SUBST(OPENJDK_TARGET_OS_EXPORT_DIR)
|
||||||
|
|
||||||
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
|
if test "x$OPENJDK_TARGET_CPU_BITS" = x64; then
|
||||||
A_LP64="LP64:="
|
A_LP64="LP64:="
|
||||||
# -D_LP64=1 is only set on linux and mac. Setting on windows causes diff in
|
# -D_LP64=1 is only set on linux and mac. Setting on windows causes diff in
|
||||||
|
@ -92,6 +92,7 @@ OPENJDK_TARGET_CPU_LEGACY_LIB:=@OPENJDK_TARGET_CPU_LEGACY_LIB@
|
|||||||
OPENJDK_TARGET_CPU_OSARCH:=@OPENJDK_TARGET_CPU_OSARCH@
|
OPENJDK_TARGET_CPU_OSARCH:=@OPENJDK_TARGET_CPU_OSARCH@
|
||||||
OPENJDK_TARGET_CPU_JLI_CFLAGS:=@OPENJDK_TARGET_CPU_JLI_CFLAGS@
|
OPENJDK_TARGET_CPU_JLI_CFLAGS:=@OPENJDK_TARGET_CPU_JLI_CFLAGS@
|
||||||
OPENJDK_TARGET_OS_API_DIR:=@OPENJDK_TARGET_OS_API_DIR@
|
OPENJDK_TARGET_OS_API_DIR:=@OPENJDK_TARGET_OS_API_DIR@
|
||||||
|
OPENJDK_TARGET_OS_EXPORT_DIR:=@OPENJDK_TARGET_OS_EXPORT_DIR@
|
||||||
|
|
||||||
# We are building on this build system.
|
# We are building on this build system.
|
||||||
# When not cross-compiling, it is the same as the target.
|
# When not cross-compiling, it is the same as the target.
|
||||||
|
@ -942,7 +942,7 @@ AC_DEFUN_ONCE([TOOLCHAIN_SETUP_COMPILER_FLAGS_FOR_JDK],
|
|||||||
-I${JDK_OUTPUTDIR}/include \
|
-I${JDK_OUTPUTDIR}/include \
|
||||||
-I${JDK_OUTPUTDIR}/include/$OPENJDK_TARGET_OS \
|
-I${JDK_OUTPUTDIR}/include/$OPENJDK_TARGET_OS \
|
||||||
-I${JDK_TOPDIR}/src/share/javavm/export \
|
-I${JDK_TOPDIR}/src/share/javavm/export \
|
||||||
-I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_API_DIR/javavm/export \
|
-I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_EXPORT_DIR/javavm/export \
|
||||||
-I${JDK_TOPDIR}/src/share/native/common \
|
-I${JDK_TOPDIR}/src/share/native/common \
|
||||||
-I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_API_DIR/native/common"
|
-I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_API_DIR/native/common"
|
||||||
|
|
||||||
|
@ -506,30 +506,30 @@ define SetupJavaCompilation
|
|||||||
|
|
||||||
$$($1_BIN)/javac_state: $$($1_SRCS) $$($1_DEPENDS)
|
$$($1_BIN)/javac_state: $$($1_SRCS) $$($1_DEPENDS)
|
||||||
$(MKDIR) -p $$(@D)
|
$(MKDIR) -p $$(@D)
|
||||||
$$(call ListPathsSafely,$1_SRCS,\n, >> $$($1_BIN)/_the.batch.tmp)
|
$$(call ListPathsSafely,$1_SRCS,\n, >> $$($1_BIN)/_the.$1_batch.tmp)
|
||||||
$(ECHO) Compiling $1
|
$(ECHO) Compiling $1
|
||||||
($$($1_JVM) $$($1_SJAVAC) \
|
($$($1_JVM) $$($1_SJAVAC) \
|
||||||
$$($1_REMOTE) \
|
$$($1_REMOTE) \
|
||||||
-j $(JOBS) \
|
-j $(JOBS) \
|
||||||
--permit-unidentified-artifacts \
|
--permit-unidentified-artifacts \
|
||||||
--permit-sources-without-package \
|
--permit-sources-without-package \
|
||||||
--compare-found-sources $$($1_BIN)/_the.batch.tmp \
|
--compare-found-sources $$($1_BIN)/_the.$1_batch.tmp \
|
||||||
--log=$(LOG_LEVEL) \
|
--log=$(LOG_LEVEL) \
|
||||||
$$($1_SJAVAC_ARGS) \
|
$$($1_SJAVAC_ARGS) \
|
||||||
$$($1_FLAGS) \
|
$$($1_FLAGS) \
|
||||||
$$($1_HEADERS_ARG) \
|
$$($1_HEADERS_ARG) \
|
||||||
-d $$($1_BIN) && \
|
-d $$($1_BIN) && \
|
||||||
$(MV) $$($1_BIN)/_the.batch.tmp $$($1_BIN)/_the.batch)
|
$(MV) $$($1_BIN)/_the.$1_batch.tmp $$($1_BIN)/_the.$1_batch)
|
||||||
else
|
else
|
||||||
# Using plain javac to batch compile everything.
|
# Using plain javac to batch compile everything.
|
||||||
$1 := $$($1_ALL_COPY_TARGETS) $$($1_ALL_COPY_CLEAN_TARGETS) $$($1_BIN)/_the.batch
|
$1 := $$($1_ALL_COPY_TARGETS) $$($1_ALL_COPY_CLEAN_TARGETS) $$($1_BIN)/_the.$1_batch
|
||||||
|
|
||||||
# When building in batch, put headers in a temp dir to filter out those that actually
|
# When building in batch, put headers in a temp dir to filter out those that actually
|
||||||
# changed before copying them to the real header dir.
|
# changed before copying them to the real header dir.
|
||||||
ifneq (,$$($1_HEADERS))
|
ifneq (,$$($1_HEADERS))
|
||||||
$1_HEADERS_ARG := -h $$($1_HEADERS).tmp
|
$1_HEADERS_ARG := -h $$($1_HEADERS).tmp
|
||||||
|
|
||||||
$$($1_HEADERS)/_the.headers: $$($1_BIN)/_the.batch
|
$$($1_HEADERS)/_the.$1_headers: $$($1_BIN)/_the.$1_batch
|
||||||
$(MKDIR) -p $$(@D)
|
$(MKDIR) -p $$(@D)
|
||||||
for f in `ls $$($1_HEADERS).tmp`; do \
|
for f in `ls $$($1_HEADERS).tmp`; do \
|
||||||
if [ ! -f "$$($1_HEADERS)/$$$$f" ] || [ "`$(DIFF) $$($1_HEADERS)/$$$$f $$($1_HEADERS).tmp/$$$$f`" != "" ]; then \
|
if [ ! -f "$$($1_HEADERS)/$$$$f" ] || [ "`$(DIFF) $$($1_HEADERS)/$$$$f $$($1_HEADERS).tmp/$$$$f`" != "" ]; then \
|
||||||
@ -539,19 +539,19 @@ define SetupJavaCompilation
|
|||||||
$(RM) -r $$($1_HEADERS).tmp
|
$(RM) -r $$($1_HEADERS).tmp
|
||||||
$(TOUCH) $$@
|
$(TOUCH) $$@
|
||||||
|
|
||||||
$1 += $$($1_HEADERS)/_the.headers
|
$1 += $$($1_HEADERS)/_the.$1_headers
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# When not using sjavac, pass along all sources to javac using an @file.
|
# When not using sjavac, pass along all sources to javac using an @file.
|
||||||
$$($1_BIN)/_the.batch: $$($1_SRCS) $$($1_DEPENDS)
|
$$($1_BIN)/_the.$1_batch: $$($1_SRCS) $$($1_DEPENDS)
|
||||||
$(MKDIR) -p $$(@D)
|
$(MKDIR) -p $$(@D)
|
||||||
$(RM) $$($1_BIN)/_the.batch $$($1_BIN)/_the.batch.tmp
|
$(RM) $$($1_BIN)/_the.$1_batch $$($1_BIN)/_the.$1_batch.tmp
|
||||||
$$(call ListPathsSafely,$1_SRCS,\n, >> $$($1_BIN)/_the.batch.tmp)
|
$$(call ListPathsSafely,$1_SRCS,\n, >> $$($1_BIN)/_the.$1_batch.tmp)
|
||||||
$(ECHO) Compiling `$(WC) $$($1_BIN)/_the.batch.tmp | $(TR) -s ' ' | $(CUT) -f 2 -d ' '` files for $1
|
$(ECHO) Compiling `$(WC) $$($1_BIN)/_the.$1_batch.tmp | $(TR) -s ' ' | $(CUT) -f 2 -d ' '` files for $1
|
||||||
($$($1_JVM) $$($1_JAVAC) $$($1_FLAGS) \
|
($$($1_JVM) $$($1_JAVAC) $$($1_FLAGS) \
|
||||||
-implicit:none -sourcepath "$$($1_SRCROOTSC)" \
|
-implicit:none -sourcepath "$$($1_SRCROOTSC)" \
|
||||||
-d $$($1_BIN) $$($1_HEADERS_ARG) @$$($1_BIN)/_the.batch.tmp && \
|
-d $$($1_BIN) $$($1_HEADERS_ARG) @$$($1_BIN)/_the.$1_batch.tmp && \
|
||||||
$(MV) $$($1_BIN)/_the.batch.tmp $$($1_BIN)/_the.batch)
|
$(MV) $$($1_BIN)/_the.$1_batch.tmp $$($1_BIN)/_the.$1_batch)
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
@ -234,3 +234,4 @@ a4bb3b4500164748a9c33b2283cfda76d89f25ab jdk8-b108
|
|||||||
3d2b7ce93c5c2e3db748f29c3d29620a8b3b748a jdk8-b110
|
3d2b7ce93c5c2e3db748f29c3d29620a8b3b748a jdk8-b110
|
||||||
85c1c94e723582f9a1dd0251502c42b73d6deea7 jdk8-b111
|
85c1c94e723582f9a1dd0251502c42b73d6deea7 jdk8-b111
|
||||||
43cec76d1d62587a07af07e2d9bec93aba2a506b jdk8-b112
|
43cec76d1d62587a07af07e2d9bec93aba2a506b jdk8-b112
|
||||||
|
a259ff3e42d91da68f4d4f09d7eb9dc22bc024fc jdk8-b113
|
||||||
|
@ -387,3 +387,5 @@ f6962730bbde82f279a0ae3a1c14bc5e58096c6e jdk8-b111
|
|||||||
4a845c7a463844cead9e1e1641d6bcfb8a77f1c7 hs25-b54
|
4a845c7a463844cead9e1e1641d6bcfb8a77f1c7 hs25-b54
|
||||||
0ed9a90f45e1b392c671005f9ee22ce1acf02984 jdk8-b112
|
0ed9a90f45e1b392c671005f9ee22ce1acf02984 jdk8-b112
|
||||||
23b8db5ea31d3079f1326afde4cd5c67b1dac49c hs25-b55
|
23b8db5ea31d3079f1326afde4cd5c67b1dac49c hs25-b55
|
||||||
|
4589b398ab03aba6a5da8c06ff53603488d1b8f4 jdk8-b113
|
||||||
|
82a9cdbf683e374a76f2009352de53e16bed5a91 hs25-b56
|
||||||
|
@ -736,26 +736,29 @@ static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* li
|
|||||||
|
|
||||||
if (existing_map == NULL){
|
if (existing_map == NULL){
|
||||||
if (add_map_info(ph, lib_fd, lib_php->p_offset,
|
if (add_map_info(ph, lib_fd, lib_php->p_offset,
|
||||||
target_vaddr, lib_php->p_filesz) == NULL) {
|
target_vaddr, lib_php->p_memsz) == NULL) {
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
// Coredump stores value of p_memsz elf field
|
||||||
|
// rounded up to page boundary.
|
||||||
|
|
||||||
if ((existing_map->memsz != page_size) &&
|
if ((existing_map->memsz != page_size) &&
|
||||||
(existing_map->fd != lib_fd) &&
|
(existing_map->fd != lib_fd) &&
|
||||||
(existing_map->memsz != lib_php->p_filesz)){
|
(ROUNDUP(existing_map->memsz, page_size) != ROUNDUP(lib_php->p_memsz, page_size))) {
|
||||||
|
|
||||||
print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
|
print_debug("address conflict @ 0x%lx (existing map size = %ld, size = %ld, flags = %d)\n",
|
||||||
target_vaddr, lib_php->p_filesz, lib_php->p_flags);
|
target_vaddr, existing_map->memsz, lib_php->p_memsz, lib_php->p_flags);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* replace PT_LOAD segment with library segment */
|
/* replace PT_LOAD segment with library segment */
|
||||||
print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
|
print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
|
||||||
existing_map->memsz, lib_php->p_filesz);
|
existing_map->memsz, ROUNDUP(lib_php->p_memsz, page_size));
|
||||||
|
|
||||||
existing_map->fd = lib_fd;
|
existing_map->fd = lib_fd;
|
||||||
existing_map->offset = lib_php->p_offset;
|
existing_map->offset = lib_php->p_offset;
|
||||||
existing_map->memsz = lib_php->p_filesz;
|
existing_map->memsz = ROUNDUP(lib_php->p_memsz, page_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,6 +51,7 @@ public class ConstMethod extends VMObject {
|
|||||||
private static int HAS_GENERIC_SIGNATURE;
|
private static int HAS_GENERIC_SIGNATURE;
|
||||||
private static int HAS_METHOD_ANNOTATIONS;
|
private static int HAS_METHOD_ANNOTATIONS;
|
||||||
private static int HAS_PARAMETER_ANNOTATIONS;
|
private static int HAS_PARAMETER_ANNOTATIONS;
|
||||||
|
private static int HAS_METHOD_PARAMETERS;
|
||||||
private static int HAS_DEFAULT_ANNOTATIONS;
|
private static int HAS_DEFAULT_ANNOTATIONS;
|
||||||
private static int HAS_TYPE_ANNOTATIONS;
|
private static int HAS_TYPE_ANNOTATIONS;
|
||||||
|
|
||||||
@ -70,6 +71,7 @@ public class ConstMethod extends VMObject {
|
|||||||
HAS_GENERIC_SIGNATURE = db.lookupIntConstant("ConstMethod::_has_generic_signature").intValue();
|
HAS_GENERIC_SIGNATURE = db.lookupIntConstant("ConstMethod::_has_generic_signature").intValue();
|
||||||
HAS_METHOD_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_method_annotations").intValue();
|
HAS_METHOD_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_method_annotations").intValue();
|
||||||
HAS_PARAMETER_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_parameter_annotations").intValue();
|
HAS_PARAMETER_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_parameter_annotations").intValue();
|
||||||
|
HAS_METHOD_PARAMETERS = db.lookupIntConstant("ConstMethod::_has_method_parameters").intValue();
|
||||||
HAS_DEFAULT_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_default_annotations").intValue();
|
HAS_DEFAULT_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_default_annotations").intValue();
|
||||||
HAS_TYPE_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_type_annotations").intValue();
|
HAS_TYPE_ANNOTATIONS = db.lookupIntConstant("ConstMethod::_has_type_annotations").intValue();
|
||||||
|
|
||||||
@ -85,6 +87,9 @@ public class ConstMethod extends VMObject {
|
|||||||
// start of byte code
|
// start of byte code
|
||||||
bytecodeOffset = type.getSize();
|
bytecodeOffset = type.getSize();
|
||||||
|
|
||||||
|
type = db.lookupType("MethodParametersElement");
|
||||||
|
methodParametersElementSize = type.getSize();
|
||||||
|
|
||||||
type = db.lookupType("CheckedExceptionElement");
|
type = db.lookupType("CheckedExceptionElement");
|
||||||
checkedExceptionElementSize = type.getSize();
|
checkedExceptionElementSize = type.getSize();
|
||||||
|
|
||||||
@ -113,7 +118,7 @@ public class ConstMethod extends VMObject {
|
|||||||
|
|
||||||
// start of bytecode
|
// start of bytecode
|
||||||
private static long bytecodeOffset;
|
private static long bytecodeOffset;
|
||||||
|
private static long methodParametersElementSize;
|
||||||
private static long checkedExceptionElementSize;
|
private static long checkedExceptionElementSize;
|
||||||
private static long localVariableTableElementSize;
|
private static long localVariableTableElementSize;
|
||||||
private static long exceptionTableElementSize;
|
private static long exceptionTableElementSize;
|
||||||
@ -387,6 +392,10 @@ public class ConstMethod extends VMObject {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean hasMethodParameters() {
|
||||||
|
return (getFlags() & HAS_METHOD_PARAMETERS) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
private boolean hasGenericSignature() {
|
private boolean hasGenericSignature() {
|
||||||
return (getFlags() & HAS_GENERIC_SIGNATURE) != 0;
|
return (getFlags() & HAS_GENERIC_SIGNATURE) != 0;
|
||||||
}
|
}
|
||||||
@ -442,11 +451,41 @@ public class ConstMethod extends VMObject {
|
|||||||
return offsetOfLastU2Element();
|
return offsetOfLastU2Element();
|
||||||
}
|
}
|
||||||
|
|
||||||
private long offsetOfCheckedExceptionsLength() {
|
private long offsetOfMethodParametersLength() {
|
||||||
|
if (Assert.ASSERTS_ENABLED) {
|
||||||
|
Assert.that(hasMethodParameters(), "should only be called if table is present");
|
||||||
|
}
|
||||||
return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort :
|
return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort :
|
||||||
offsetOfLastU2Element();
|
offsetOfLastU2Element();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private int getMethodParametersLength() {
|
||||||
|
if (hasMethodParameters())
|
||||||
|
return (int) getAddress().getCIntegerAt(offsetOfMethodParametersLength(), 2, true);
|
||||||
|
else
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset of start of checked exceptions
|
||||||
|
private long offsetOfMethodParameters() {
|
||||||
|
long offset = offsetOfMethodParametersLength();
|
||||||
|
long length = getMethodParametersLength();
|
||||||
|
if (Assert.ASSERTS_ENABLED) {
|
||||||
|
Assert.that(length > 0, "should only be called if method parameter information is present");
|
||||||
|
}
|
||||||
|
offset -= length * methodParametersElementSize;
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
private long offsetOfCheckedExceptionsLength() {
|
||||||
|
if (hasMethodParameters())
|
||||||
|
return offsetOfMethodParameters() - sizeofShort;
|
||||||
|
else {
|
||||||
|
return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort :
|
||||||
|
offsetOfLastU2Element();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private int getCheckedExceptionsLength() {
|
private int getCheckedExceptionsLength() {
|
||||||
if (hasCheckedExceptions()) {
|
if (hasCheckedExceptions()) {
|
||||||
return (int) getAddress().getCIntegerAt(offsetOfCheckedExceptionsLength(), 2, true);
|
return (int) getAddress().getCIntegerAt(offsetOfCheckedExceptionsLength(), 2, true);
|
||||||
@ -496,6 +535,8 @@ public class ConstMethod extends VMObject {
|
|||||||
return offsetOfExceptionTable() - sizeofShort;
|
return offsetOfExceptionTable() - sizeofShort;
|
||||||
} else if (hasCheckedExceptions()) {
|
} else if (hasCheckedExceptions()) {
|
||||||
return offsetOfCheckedExceptions() - sizeofShort;
|
return offsetOfCheckedExceptions() - sizeofShort;
|
||||||
|
} else if (hasMethodParameters()) {
|
||||||
|
return offsetOfMethodParameters() - sizeofShort;
|
||||||
} else {
|
} else {
|
||||||
return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort :
|
return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort :
|
||||||
offsetOfLastU2Element();
|
offsetOfLastU2Element();
|
||||||
@ -526,6 +567,8 @@ public class ConstMethod extends VMObject {
|
|||||||
}
|
}
|
||||||
if (hasCheckedExceptions()) {
|
if (hasCheckedExceptions()) {
|
||||||
return offsetOfCheckedExceptions() - sizeofShort;
|
return offsetOfCheckedExceptions() - sizeofShort;
|
||||||
|
} else if (hasMethodParameters()) {
|
||||||
|
return offsetOfMethodParameters() - sizeofShort;
|
||||||
} else {
|
} else {
|
||||||
return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort :
|
return hasGenericSignature() ? offsetOfLastU2Element() - sizeofShort :
|
||||||
offsetOfLastU2Element();
|
offsetOfLastU2Element();
|
||||||
|
@ -51,8 +51,7 @@ public class ClassLoaderStats extends Tool {
|
|||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
ClassLoaderStats cls = new ClassLoaderStats();
|
ClassLoaderStats cls = new ClassLoaderStats();
|
||||||
cls.start(args);
|
cls.execute(args);
|
||||||
cls.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class ClassData {
|
private static class ClassData {
|
||||||
|
@ -54,8 +54,7 @@ public class FinalizerInfo extends Tool {
|
|||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
FinalizerInfo finfo = new FinalizerInfo();
|
FinalizerInfo finfo = new FinalizerInfo();
|
||||||
finfo.start(args);
|
finfo.execute(args);
|
||||||
finfo.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void run() {
|
public void run() {
|
||||||
|
@ -54,7 +54,6 @@ public class FlagDumper extends Tool {
|
|||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
FlagDumper fd = new FlagDumper();
|
FlagDumper fd = new FlagDumper();
|
||||||
fd.start(args);
|
fd.execute(args);
|
||||||
fd.stop();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -80,8 +80,7 @@ public class HeapDumper extends Tool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
HeapDumper dumper = new HeapDumper(file);
|
HeapDumper dumper = new HeapDumper(file);
|
||||||
dumper.start(args);
|
dumper.execute(args);
|
||||||
dumper.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -46,8 +46,7 @@ public class HeapSummary extends Tool {
|
|||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
HeapSummary hs = new HeapSummary();
|
HeapSummary hs = new HeapSummary();
|
||||||
hs.start(args);
|
hs.execute(args);
|
||||||
hs.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void run() {
|
public void run() {
|
||||||
|
@ -134,8 +134,7 @@ public class JInfo extends Tool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
JInfo jinfo = new JInfo(mode);
|
JInfo jinfo = new JInfo(mode);
|
||||||
jinfo.start(args);
|
jinfo.execute(args);
|
||||||
jinfo.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void printVMFlags() {
|
private void printVMFlags() {
|
||||||
|
@ -136,7 +136,9 @@ public class JMap extends Tool {
|
|||||||
mode = MODE_HEAP_GRAPH_GXL;
|
mode = MODE_HEAP_GRAPH_GXL;
|
||||||
} else {
|
} else {
|
||||||
System.err.println("unknown heap format:" + format);
|
System.err.println("unknown heap format:" + format);
|
||||||
return;
|
|
||||||
|
// Exit with error status
|
||||||
|
System.exit(1);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
copyArgs = false;
|
copyArgs = false;
|
||||||
@ -153,8 +155,7 @@ public class JMap extends Tool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
JMap jmap = new JMap(mode);
|
JMap jmap = new JMap(mode);
|
||||||
jmap.start(args);
|
jmap.execute(args);
|
||||||
jmap.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean writeHeapHprofBin(String fileName) {
|
public boolean writeHeapHprofBin(String fileName) {
|
||||||
|
@ -64,7 +64,6 @@ public class JSnap extends Tool {
|
|||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
JSnap js = new JSnap();
|
JSnap js = new JSnap();
|
||||||
js.start(args);
|
js.execute(args);
|
||||||
js.stop();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -89,8 +89,7 @@ public class JStack extends Tool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
JStack jstack = new JStack(mixedMode, concurrentLocks);
|
JStack jstack = new JStack(mixedMode, concurrentLocks);
|
||||||
jstack.start(args);
|
jstack.execute(args);
|
||||||
jstack.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean mixedMode;
|
private boolean mixedMode;
|
||||||
|
@ -61,7 +61,6 @@ public class ObjectHistogram extends Tool {
|
|||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
ObjectHistogram oh = new ObjectHistogram();
|
ObjectHistogram oh = new ObjectHistogram();
|
||||||
oh.start(args);
|
oh.execute(args);
|
||||||
oh.stop();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,6 @@ public class PMap extends Tool {
|
|||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
PMap t = new PMap();
|
PMap t = new PMap();
|
||||||
t.start(args);
|
t.execute(args);
|
||||||
t.stop();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -182,8 +182,7 @@ public class PStack extends Tool {
|
|||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
PStack t = new PStack();
|
PStack t = new PStack();
|
||||||
t.start(args);
|
t.execute(args);
|
||||||
t.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// -- Internals only below this point
|
// -- Internals only below this point
|
||||||
|
@ -137,8 +137,7 @@ public class StackTrace extends Tool {
|
|||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
StackTrace st = new StackTrace();
|
StackTrace st = new StackTrace();
|
||||||
st.start(args);
|
st.execute(args);
|
||||||
st.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean verbose;
|
private boolean verbose;
|
||||||
|
@ -58,7 +58,6 @@ public class SysPropsDumper extends Tool {
|
|||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
SysPropsDumper pd = new SysPropsDumper();
|
SysPropsDumper pd = new SysPropsDumper();
|
||||||
pd.start(args);
|
pd.execute(args);
|
||||||
pd.stop();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ package sun.jvm.hotspot.tools;
|
|||||||
|
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
import java.util.Hashtable;
|
import java.util.Hashtable;
|
||||||
|
|
||||||
import sun.jvm.hotspot.*;
|
import sun.jvm.hotspot.*;
|
||||||
import sun.jvm.hotspot.runtime.*;
|
import sun.jvm.hotspot.runtime.*;
|
||||||
import sun.jvm.hotspot.debugger.*;
|
import sun.jvm.hotspot.debugger.*;
|
||||||
@ -105,26 +106,44 @@ public abstract class Tool implements Runnable {
|
|||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
<derived class> obj = new <derived class>;
|
<derived class> obj = new <derived class>;
|
||||||
obj.start(args);
|
obj.execute(args);
|
||||||
}
|
}
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
protected void stop() {
|
protected void execute(String[] args) {
|
||||||
|
int returnStatus = 1;
|
||||||
|
|
||||||
|
try {
|
||||||
|
returnStatus = start(args);
|
||||||
|
} finally {
|
||||||
|
stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exit with 0 or 1
|
||||||
|
System.exit(returnStatus);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void stop() {
|
||||||
if (agent != null) {
|
if (agent != null) {
|
||||||
agent.detach();
|
agent.detach();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void start(String[] args) {
|
private int start(String[] args) {
|
||||||
|
|
||||||
if ((args.length < 1) || (args.length > 2)) {
|
if ((args.length < 1) || (args.length > 2)) {
|
||||||
usage();
|
usage();
|
||||||
return;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to handle -h or -help or some invalid flag
|
// Attempt to handle -h or -help or some invalid flag
|
||||||
if (args[0].startsWith("-")) {
|
if (args[0].startsWith("-h")) {
|
||||||
usage();
|
usage();
|
||||||
|
return 0;
|
||||||
|
} else if (args[0].startsWith("-")) {
|
||||||
|
usage();
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
PrintStream err = System.err;
|
PrintStream err = System.err;
|
||||||
@ -154,6 +173,7 @@ public abstract class Tool implements Runnable {
|
|||||||
|
|
||||||
default:
|
default:
|
||||||
usage();
|
usage();
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
agent = new HotSpotAgent();
|
agent = new HotSpotAgent();
|
||||||
@ -191,15 +211,16 @@ public abstract class Tool implements Runnable {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (e.getMessage() != null) {
|
if (e.getMessage() != null) {
|
||||||
err.print(e.getMessage());
|
err.println(e.getMessage());
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
err.println();
|
err.println();
|
||||||
return;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
err.println("Debugger attached successfully.");
|
err.println("Debugger attached successfully.");
|
||||||
startInternal();
|
startInternal();
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// When using an existing JVMDebugger.
|
// When using an existing JVMDebugger.
|
||||||
|
@ -177,7 +177,6 @@ public class ClassDump extends Tool {
|
|||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
|
|
||||||
ClassDump cd = new ClassDump();
|
ClassDump cd = new ClassDump();
|
||||||
cd.start(args);
|
cd.execute(args);
|
||||||
cd.stop();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -42,8 +42,7 @@ public class JSDB extends Tool {
|
|||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
JSDB jsdb = new JSDB();
|
JSDB jsdb = new JSDB();
|
||||||
jsdb.start(args);
|
jsdb.execute(args);
|
||||||
jsdb.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void run() {
|
public void run() {
|
||||||
|
@ -40,8 +40,7 @@ import sun.jvm.hotspot.utilities.soql.*;
|
|||||||
public class SOQL extends Tool {
|
public class SOQL extends Tool {
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
SOQL soql = new SOQL();
|
SOQL soql = new SOQL();
|
||||||
soql.start(args);
|
soql.execute(args);
|
||||||
soql.stop();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public SOQL() {
|
public SOQL() {
|
||||||
|
@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
|
|||||||
|
|
||||||
HS_MAJOR_VER=25
|
HS_MAJOR_VER=25
|
||||||
HS_MINOR_VER=0
|
HS_MINOR_VER=0
|
||||||
HS_BUILD_NUMBER=55
|
HS_BUILD_NUMBER=56
|
||||||
|
|
||||||
JDK_MAJOR_VER=1
|
JDK_MAJOR_VER=1
|
||||||
JDK_MINOR_VER=8
|
JDK_MINOR_VER=8
|
||||||
|
@ -24,12 +24,7 @@
|
|||||||
|
|
||||||
# Properties for jprt
|
# Properties for jprt
|
||||||
|
|
||||||
# All build result bundles are full jdks, so the 64bit testing does not
|
# All build result bundles are full jdks.
|
||||||
# need the 32bit sibling bundle installed.
|
|
||||||
# Note: If the hotspot/make/Makefile changed to only bundle the 64bit files
|
|
||||||
# when bundling 64bit, and stripped out the 64bit files from any 32bit
|
|
||||||
# bundles, then this setting would be need to be "true".
|
|
||||||
|
|
||||||
jprt.need.sibling.build=false
|
jprt.need.sibling.build=false
|
||||||
|
|
||||||
# At submit time, the release supplied will be in jprt.submit.release
|
# At submit time, the release supplied will be in jprt.submit.release
|
||||||
@ -52,21 +47,11 @@ jprt.sync.push=false
|
|||||||
# sparc etc.
|
# sparc etc.
|
||||||
|
|
||||||
# Define the Solaris platforms we want for the various releases
|
# Define the Solaris platforms we want for the various releases
|
||||||
jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
|
|
||||||
jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
|
|
||||||
jprt.my.solaris.sparc.jdk7u8=${jprt.my.solaris.sparc.jdk7}
|
|
||||||
jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
|
jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
|
||||||
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
|
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
|
||||||
jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7}
|
jprt.my.solaris.sparcv9.jdk7u8=${jprt.my.solaris.sparcv9.jdk7}
|
||||||
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
|
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
|
||||||
|
|
||||||
jprt.my.solaris.i586.jdk8=solaris_i586_5.10
|
|
||||||
jprt.my.solaris.i586.jdk7=solaris_i586_5.10
|
|
||||||
jprt.my.solaris.i586.jdk7u8=${jprt.my.solaris.i586.jdk7}
|
|
||||||
jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
|
|
||||||
|
|
||||||
jprt.my.solaris.x64.jdk8=solaris_x64_5.10
|
jprt.my.solaris.x64.jdk8=solaris_x64_5.10
|
||||||
jprt.my.solaris.x64.jdk7=solaris_x64_5.10
|
jprt.my.solaris.x64.jdk7=solaris_x64_5.10
|
||||||
jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7}
|
jprt.my.solaris.x64.jdk7u8=${jprt.my.solaris.x64.jdk7}
|
||||||
@ -133,9 +118,7 @@ jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
|
|||||||
# Standard list of jprt build targets for this source tree
|
# Standard list of jprt build targets for this source tree
|
||||||
|
|
||||||
jprt.build.targets.standard= \
|
jprt.build.targets.standard= \
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug|optimized}, \
|
${jprt.my.solaris.sparcv9}-{product|fastdebug|optimized}, \
|
||||||
${jprt.my.solaris.i586}-{product|fastdebug}, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}, \
|
${jprt.my.solaris.x64}-{product|fastdebug}, \
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}, \
|
${jprt.my.linux.i586}-{product|fastdebug}, \
|
||||||
${jprt.my.linux.x64}-{product|fastdebug|optimized}, \
|
${jprt.my.linux.x64}-{product|fastdebug|optimized}, \
|
||||||
@ -145,7 +128,6 @@ jprt.build.targets.standard= \
|
|||||||
${jprt.my.linux.armvh}-{product|fastdebug}
|
${jprt.my.linux.armvh}-{product|fastdebug}
|
||||||
|
|
||||||
jprt.build.targets.open= \
|
jprt.build.targets.open= \
|
||||||
${jprt.my.solaris.i586}-{productOpen}, \
|
|
||||||
${jprt.my.solaris.x64}-{debugOpen}, \
|
${jprt.my.solaris.x64}-{debugOpen}, \
|
||||||
${jprt.my.linux.x64}-{productOpen}
|
${jprt.my.linux.x64}-{productOpen}
|
||||||
|
|
||||||
@ -168,31 +150,6 @@ jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
|
|||||||
|
|
||||||
# Subset lists of test targets for this source tree
|
# Subset lists of test targets for this source tree
|
||||||
|
|
||||||
jprt.my.solaris.sparc.test.targets= \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jvm98, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jvm98_nontiered, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark, \
|
|
||||||
${jprt.my.solaris.sparc}-product-{c1|c2}-runThese, \
|
|
||||||
${jprt.my.solaris.sparc}-fastdebug-c1-runThese_Xshare, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_SerialGC, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParallelGC, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParNewGC, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_CMS, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_G1, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParOldGC, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jbb_default_nontiered, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_SerialGC, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParallelGC, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_CMS, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_G1, \
|
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParOldGC
|
|
||||||
|
|
||||||
jprt.my.solaris.sparcv9.test.targets= \
|
jprt.my.solaris.sparcv9.test.targets= \
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \
|
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \
|
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \
|
||||||
@ -242,37 +199,6 @@ jprt.my.solaris.x64.test.targets= \
|
|||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
|
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC
|
||||||
|
|
||||||
jprt.my.solaris.i586.test.targets= \
|
|
||||||
${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
|
||||||
${jprt.my.solaris.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
|
|
||||||
${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-scimark, \
|
|
||||||
${jprt.my.solaris.i586}-product-{c1|c2}-runThese_Xcomp, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xcomp, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xshare, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCBasher_SerialGC, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCBasher_ParallelGC, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCBasher_ParNewGC, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCBasher_CMS, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCBasher_G1, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCBasher_ParOldGC, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_SerialGC, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParallelGC, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParNewGC, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_CMS, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_G1, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParOldGC, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCOld_SerialGC, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCOld_ParallelGC, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCOld_ParNewGC, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCOld_CMS, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCOld_G1, \
|
|
||||||
${jprt.my.solaris.i586}-product-c1-GCOld_ParOldGC, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-jbb_default_nontiered, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParallelGC, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-jbb_CMS, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-jbb_G1, \
|
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParOldGC
|
|
||||||
|
|
||||||
jprt.my.linux.i586.test.targets = \
|
jprt.my.linux.i586.test.targets = \
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
|
${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_nontiered, \
|
||||||
@ -395,7 +321,6 @@ jprt.my.windows.x64.test.targets = \
|
|||||||
# Some basic "smoke" tests for OpenJDK builds
|
# Some basic "smoke" tests for OpenJDK builds
|
||||||
jprt.test.targets.open = \
|
jprt.test.targets.open = \
|
||||||
${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \
|
${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \
|
||||||
${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98, \
|
|
||||||
${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98
|
${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98
|
||||||
|
|
||||||
# Testing for actual embedded builds is different to standard
|
# Testing for actual embedded builds is different to standard
|
||||||
@ -407,9 +332,7 @@ jprt.my.linux.i586.test.targets.embedded = \
|
|||||||
|
|
||||||
jprt.test.targets.standard = \
|
jprt.test.targets.standard = \
|
||||||
${jprt.my.linux.i586.test.targets.embedded}, \
|
${jprt.my.linux.i586.test.targets.embedded}, \
|
||||||
${jprt.my.solaris.sparc.test.targets}, \
|
|
||||||
${jprt.my.solaris.sparcv9.test.targets}, \
|
${jprt.my.solaris.sparcv9.test.targets}, \
|
||||||
${jprt.my.solaris.i586.test.targets}, \
|
|
||||||
${jprt.my.solaris.x64.test.targets}, \
|
${jprt.my.solaris.x64.test.targets}, \
|
||||||
${jprt.my.linux.i586.test.targets}, \
|
${jprt.my.linux.i586.test.targets}, \
|
||||||
${jprt.my.linux.x64.test.targets}, \
|
${jprt.my.linux.x64.test.targets}, \
|
||||||
@ -420,15 +343,12 @@ jprt.test.targets.standard = \
|
|||||||
|
|
||||||
jprt.test.targets.embedded= \
|
jprt.test.targets.embedded= \
|
||||||
${jprt.my.linux.i586.test.targets.embedded}, \
|
${jprt.my.linux.i586.test.targets.embedded}, \
|
||||||
${jprt.my.solaris.sparc.test.targets}, \
|
|
||||||
${jprt.my.solaris.sparcv9.test.targets}, \
|
${jprt.my.solaris.sparcv9.test.targets}, \
|
||||||
${jprt.my.solaris.i586.test.targets}, \
|
|
||||||
${jprt.my.solaris.x64.test.targets}, \
|
${jprt.my.solaris.x64.test.targets}, \
|
||||||
${jprt.my.linux.x64.test.targets}, \
|
${jprt.my.linux.x64.test.targets}, \
|
||||||
${jprt.my.windows.i586.test.targets}, \
|
${jprt.my.windows.i586.test.targets}, \
|
||||||
${jprt.my.windows.x64.test.targets}
|
${jprt.my.windows.x64.test.targets}
|
||||||
|
|
||||||
|
|
||||||
jprt.test.targets.jdk8=${jprt.test.targets.standard}
|
jprt.test.targets.jdk8=${jprt.test.targets.standard}
|
||||||
jprt.test.targets.jdk7=${jprt.test.targets.standard}
|
jprt.test.targets.jdk7=${jprt.test.targets.standard}
|
||||||
jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7}
|
jprt.test.targets.jdk7u8=${jprt.test.targets.jdk7}
|
||||||
@ -439,15 +359,11 @@ jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
|
|||||||
#jprt.make.rule.test.targets=*-product-*-packtest
|
#jprt.make.rule.test.targets=*-product-*-packtest
|
||||||
|
|
||||||
jprt.make.rule.test.targets.standard.client = \
|
jprt.make.rule.test.targets.standard.client = \
|
||||||
${jprt.my.solaris.sparc}-*-c1-clienttest, \
|
|
||||||
${jprt.my.solaris.i586}-*-c1-clienttest, \
|
|
||||||
${jprt.my.linux.i586}-*-c1-clienttest, \
|
${jprt.my.linux.i586}-*-c1-clienttest, \
|
||||||
${jprt.my.windows.i586}-*-c1-clienttest
|
${jprt.my.windows.i586}-*-c1-clienttest
|
||||||
|
|
||||||
jprt.make.rule.test.targets.standard.server = \
|
jprt.make.rule.test.targets.standard.server = \
|
||||||
${jprt.my.solaris.sparc}-*-c2-servertest, \
|
|
||||||
${jprt.my.solaris.sparcv9}-*-c2-servertest, \
|
${jprt.my.solaris.sparcv9}-*-c2-servertest, \
|
||||||
${jprt.my.solaris.i586}-*-c2-servertest, \
|
|
||||||
${jprt.my.solaris.x64}-*-c2-servertest, \
|
${jprt.my.solaris.x64}-*-c2-servertest, \
|
||||||
${jprt.my.linux.i586}-*-c2-servertest, \
|
${jprt.my.linux.i586}-*-c2-servertest, \
|
||||||
${jprt.my.linux.x64}-*-c2-servertest, \
|
${jprt.my.linux.x64}-*-c2-servertest, \
|
||||||
@ -456,9 +372,7 @@ jprt.make.rule.test.targets.standard.server = \
|
|||||||
${jprt.my.windows.x64}-*-c2-servertest
|
${jprt.my.windows.x64}-*-c2-servertest
|
||||||
|
|
||||||
jprt.make.rule.test.targets.standard.internalvmtests = \
|
jprt.make.rule.test.targets.standard.internalvmtests = \
|
||||||
${jprt.my.solaris.sparc}-fastdebug-c2-internalvmtests, \
|
|
||||||
${jprt.my.solaris.sparcv9}-fastdebug-c2-internalvmtests, \
|
${jprt.my.solaris.sparcv9}-fastdebug-c2-internalvmtests, \
|
||||||
${jprt.my.solaris.i586}-fastdebug-c2-internalvmtests, \
|
|
||||||
${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
|
${jprt.my.solaris.x64}-fastdebug-c2-internalvmtests, \
|
||||||
${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
|
${jprt.my.linux.i586}-fastdebug-c2-internalvmtests, \
|
||||||
${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
|
${jprt.my.linux.x64}-fastdebug-c2-internalvmtests, \
|
||||||
@ -467,16 +381,12 @@ jprt.make.rule.test.targets.standard.internalvmtests = \
|
|||||||
${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
|
${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
|
||||||
|
|
||||||
jprt.make.rule.test.targets.standard.wbapi = \
|
jprt.make.rule.test.targets.standard.wbapi = \
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-c2-wbapitest, \
|
|
||||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-wbapitest, \
|
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-wbapitest, \
|
||||||
${jprt.my.solaris.i586}-{product|fastdebug}-c2-wbapitest, \
|
|
||||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-wbapitest, \
|
${jprt.my.solaris.x64}-{product|fastdebug}-c2-wbapitest, \
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-c2-wbapitest, \
|
${jprt.my.linux.i586}-{product|fastdebug}-c2-wbapitest, \
|
||||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-wbapitest, \
|
${jprt.my.linux.x64}-{product|fastdebug}-c2-wbapitest, \
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-c2-wbapitest, \
|
${jprt.my.windows.i586}-{product|fastdebug}-c2-wbapitest, \
|
||||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-wbapitest, \
|
${jprt.my.windows.x64}-{product|fastdebug}-c2-wbapitest, \
|
||||||
${jprt.my.solaris.sparc}-{product|fastdebug}-c1-wbapitest, \
|
|
||||||
${jprt.my.solaris.i586}-{product|fastdebug}-c1-wbapitest, \
|
|
||||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-wbapitest, \
|
${jprt.my.linux.i586}-{product|fastdebug}-c1-wbapitest, \
|
||||||
${jprt.my.windows.i586}-{product|fastdebug}-c1-wbapitest
|
${jprt.my.windows.i586}-{product|fastdebug}-c1-wbapitest
|
||||||
|
|
||||||
|
@ -40,8 +40,7 @@ TraceGeneratedNames = \
|
|||||||
traceEventIds.hpp \
|
traceEventIds.hpp \
|
||||||
traceTypes.hpp
|
traceTypes.hpp
|
||||||
|
|
||||||
|
!if EXISTS($(TraceAltSrcDir))
|
||||||
!if "$(OPENJDK)" != "true"
|
|
||||||
TraceGeneratedNames = $(TraceGeneratedNames) \
|
TraceGeneratedNames = $(TraceGeneratedNames) \
|
||||||
traceRequestables.hpp \
|
traceRequestables.hpp \
|
||||||
traceEventControl.hpp \
|
traceEventControl.hpp \
|
||||||
@ -56,7 +55,7 @@ TraceGeneratedFiles = \
|
|||||||
$(TraceOutDir)/traceEventIds.hpp \
|
$(TraceOutDir)/traceEventIds.hpp \
|
||||||
$(TraceOutDir)/traceTypes.hpp
|
$(TraceOutDir)/traceTypes.hpp
|
||||||
|
|
||||||
!if "$(OPENJDK)" != "true"
|
!if EXISTS($(TraceAltSrcDir))
|
||||||
TraceGeneratedFiles = $(TraceGeneratedFiles) \
|
TraceGeneratedFiles = $(TraceGeneratedFiles) \
|
||||||
$(TraceOutDir)/traceRequestables.hpp \
|
$(TraceOutDir)/traceRequestables.hpp \
|
||||||
$(TraceOutDir)/traceEventControl.hpp \
|
$(TraceOutDir)/traceEventControl.hpp \
|
||||||
@ -68,7 +67,7 @@ XSLT = $(QUIETLY) $(REMOTE) $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen
|
|||||||
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
|
XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
|
||||||
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
|
$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
|
||||||
|
|
||||||
!if "$(OPENJDK)" != "true"
|
!if EXISTS($(TraceAltSrcDir))
|
||||||
XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml
|
XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml
|
||||||
!endif
|
!endif
|
||||||
|
|
||||||
@ -87,7 +86,7 @@ $(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceType
|
|||||||
@echo Generating $@
|
@echo Generating $@
|
||||||
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp
|
@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp
|
||||||
|
|
||||||
!if "$(OPENJDK)" == "true"
|
!if !EXISTS($(TraceAltSrcDir))
|
||||||
|
|
||||||
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
|
$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
|
||||||
@echo Generating OpenJDK $@
|
@echo Generating OpenJDK $@
|
||||||
|
@ -53,6 +53,8 @@ LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool outgoing) {
|
|||||||
opr = as_long_opr(reg);
|
opr = as_long_opr(reg);
|
||||||
} else if (type == T_OBJECT || type == T_ARRAY) {
|
} else if (type == T_OBJECT || type == T_ARRAY) {
|
||||||
opr = as_oop_opr(reg);
|
opr = as_oop_opr(reg);
|
||||||
|
} else if (type == T_METADATA) {
|
||||||
|
opr = as_metadata_opr(reg);
|
||||||
} else {
|
} else {
|
||||||
opr = as_opr(reg);
|
opr = as_opr(reg);
|
||||||
}
|
}
|
||||||
|
@ -2565,7 +2565,7 @@ void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
|
|||||||
Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
|
Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
|
||||||
mdo_offset_bias);
|
mdo_offset_bias);
|
||||||
__ ld_ptr(receiver_addr, tmp1);
|
__ ld_ptr(receiver_addr, tmp1);
|
||||||
__ verify_oop(tmp1);
|
__ verify_klass_ptr(tmp1);
|
||||||
__ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test);
|
__ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test);
|
||||||
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
|
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
|
||||||
mdo_offset_bias);
|
mdo_offset_bias);
|
||||||
|
@ -404,7 +404,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
|||||||
if (id == fast_new_instance_init_check_id) {
|
if (id == fast_new_instance_init_check_id) {
|
||||||
// make sure the klass is initialized
|
// make sure the klass is initialized
|
||||||
__ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1);
|
__ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1);
|
||||||
__ cmp_and_br_short(G3_t1, InstanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
|
__ cmp(G3_t1, InstanceKlass::fully_initialized);
|
||||||
|
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
|
||||||
|
__ delayed()->nop();
|
||||||
}
|
}
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// assert object can be fast path allocated
|
// assert object can be fast path allocated
|
||||||
@ -515,7 +517,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
|||||||
|
|
||||||
// check that array length is small enough for fast path
|
// check that array length is small enough for fast path
|
||||||
__ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
|
__ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
|
||||||
__ cmp_and_br_short(G4_length, G3_t1, Assembler::greaterUnsigned, Assembler::pn, slow_path);
|
__ cmp(G4_length, G3_t1);
|
||||||
|
__ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
|
||||||
|
__ delayed()->nop();
|
||||||
|
|
||||||
// if we got here then the TLAB allocation failed, so try
|
// if we got here then the TLAB allocation failed, so try
|
||||||
// refilling the TLAB or allocating directly from eden.
|
// refilling the TLAB or allocating directly from eden.
|
||||||
|
@ -3333,7 +3333,8 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
|
|||||||
|
|
||||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
||||||
// No allocation in the shared eden.
|
// No allocation in the shared eden.
|
||||||
ba_short(slow_case);
|
ba(slow_case);
|
||||||
|
delayed()->nop();
|
||||||
}
|
}
|
||||||
|
|
||||||
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
|
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
|
||||||
@ -3358,7 +3359,8 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
|
|||||||
add(t2, 1, t2);
|
add(t2, 1, t2);
|
||||||
stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
|
stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
|
||||||
}
|
}
|
||||||
ba_short(try_eden);
|
ba(try_eden);
|
||||||
|
delayed()->nop();
|
||||||
|
|
||||||
bind(discard_tlab);
|
bind(discard_tlab);
|
||||||
if (TLABStats) {
|
if (TLABStats) {
|
||||||
@ -3420,7 +3422,8 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
|
|||||||
sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
|
sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
|
||||||
st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
|
st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
|
||||||
verify_tlab();
|
verify_tlab();
|
||||||
ba_short(retry);
|
ba(retry);
|
||||||
|
delayed()->nop();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
|
void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
|
||||||
|
@ -2022,6 +2022,10 @@ const RegMask Matcher::mathExactI_result_proj_mask() {
|
|||||||
return G1_REGI_mask();
|
return G1_REGI_mask();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const RegMask Matcher::mathExactL_result_proj_mask() {
|
||||||
|
return G1_REGL_mask();
|
||||||
|
}
|
||||||
|
|
||||||
const RegMask Matcher::mathExactI_flags_proj_mask() {
|
const RegMask Matcher::mathExactI_flags_proj_mask() {
|
||||||
return INT_FLAGS_mask();
|
return INT_FLAGS_mask();
|
||||||
}
|
}
|
||||||
|
@ -1405,6 +1405,15 @@ void Assembler::imull(Register dst, Register src, int value) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::imull(Register dst, Address src) {
|
||||||
|
InstructionMark im(this);
|
||||||
|
prefix(src, dst);
|
||||||
|
emit_int8(0x0F);
|
||||||
|
emit_int8((unsigned char) 0xAF);
|
||||||
|
emit_operand(dst, src);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void Assembler::incl(Address dst) {
|
void Assembler::incl(Address dst) {
|
||||||
// Don't use it directly. Use MacroAssembler::increment() instead.
|
// Don't use it directly. Use MacroAssembler::increment() instead.
|
||||||
InstructionMark im(this);
|
InstructionMark im(this);
|
||||||
@ -5024,6 +5033,14 @@ void Assembler::imulq(Register dst, Register src, int value) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Assembler::imulq(Register dst, Address src) {
|
||||||
|
InstructionMark im(this);
|
||||||
|
prefixq(src, dst);
|
||||||
|
emit_int8(0x0F);
|
||||||
|
emit_int8((unsigned char) 0xAF);
|
||||||
|
emit_operand(dst, src);
|
||||||
|
}
|
||||||
|
|
||||||
void Assembler::incl(Register dst) {
|
void Assembler::incl(Register dst) {
|
||||||
// Don't use it directly. Use MacroAssembler::incrementl() instead.
|
// Don't use it directly. Use MacroAssembler::incrementl() instead.
|
||||||
// Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
|
// Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
|
||||||
|
@ -1162,9 +1162,13 @@ private:
|
|||||||
|
|
||||||
void imull(Register dst, Register src);
|
void imull(Register dst, Register src);
|
||||||
void imull(Register dst, Register src, int value);
|
void imull(Register dst, Register src, int value);
|
||||||
|
void imull(Register dst, Address src);
|
||||||
|
|
||||||
void imulq(Register dst, Register src);
|
void imulq(Register dst, Register src);
|
||||||
void imulq(Register dst, Register src, int value);
|
void imulq(Register dst, Register src, int value);
|
||||||
|
#ifdef _LP64
|
||||||
|
void imulq(Register dst, Address src);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
// jcc is the generic conditional branch generator to run-
|
// jcc is the generic conditional branch generator to run-
|
||||||
|
@ -40,11 +40,8 @@
|
|||||||
#include "runtime/synchronizer.hpp"
|
#include "runtime/synchronizer.hpp"
|
||||||
#include "runtime/vframeArray.hpp"
|
#include "runtime/vframeArray.hpp"
|
||||||
#include "utilities/debug.hpp"
|
#include "utilities/debug.hpp"
|
||||||
#ifdef TARGET_ARCH_MODEL_x86_32
|
#ifdef TARGET_ARCH_x86
|
||||||
# include "interp_masm_x86_32.hpp"
|
# include "interp_masm_x86.hpp"
|
||||||
#endif
|
|
||||||
#ifdef TARGET_ARCH_MODEL_x86_64
|
|
||||||
# include "interp_masm_x86_64.hpp"
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CC_INTERP
|
#ifdef CC_INTERP
|
||||||
|
@ -52,6 +52,8 @@ LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) {
|
|||||||
#endif // _LP64
|
#endif // _LP64
|
||||||
} else if (type == T_OBJECT || type == T_ARRAY) {
|
} else if (type == T_OBJECT || type == T_ARRAY) {
|
||||||
opr = as_oop_opr(reg);
|
opr = as_oop_opr(reg);
|
||||||
|
} else if (type == T_METADATA) {
|
||||||
|
opr = as_metadata_opr(reg);
|
||||||
} else {
|
} else {
|
||||||
opr = as_opr(reg);
|
opr = as_opr(reg);
|
||||||
}
|
}
|
||||||
|
@ -432,15 +432,16 @@ int LIR_Assembler::emit_unwind_handler() {
|
|||||||
int offset = code_offset();
|
int offset = code_offset();
|
||||||
|
|
||||||
// Fetch the exception from TLS and clear out exception related thread state
|
// Fetch the exception from TLS and clear out exception related thread state
|
||||||
__ get_thread(rsi);
|
Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
|
||||||
__ movptr(rax, Address(rsi, JavaThread::exception_oop_offset()));
|
NOT_LP64(__ get_thread(rsi));
|
||||||
__ movptr(Address(rsi, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
|
__ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
|
||||||
__ movptr(Address(rsi, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
|
__ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
|
||||||
|
__ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
|
||||||
|
|
||||||
__ bind(_unwind_handler_entry);
|
__ bind(_unwind_handler_entry);
|
||||||
__ verify_not_null_oop(rax);
|
__ verify_not_null_oop(rax);
|
||||||
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
||||||
__ mov(rsi, rax); // Preserve the exception
|
__ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Preform needed unlocking
|
// Preform needed unlocking
|
||||||
@ -448,19 +449,24 @@ int LIR_Assembler::emit_unwind_handler() {
|
|||||||
if (method()->is_synchronized()) {
|
if (method()->is_synchronized()) {
|
||||||
monitor_address(0, FrameMap::rax_opr);
|
monitor_address(0, FrameMap::rax_opr);
|
||||||
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
|
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
|
||||||
__ unlock_object(rdi, rbx, rax, *stub->entry());
|
__ unlock_object(rdi, rsi, rax, *stub->entry());
|
||||||
__ bind(*stub->continuation());
|
__ bind(*stub->continuation());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (compilation()->env()->dtrace_method_probes()) {
|
if (compilation()->env()->dtrace_method_probes()) {
|
||||||
|
#ifdef _LP64
|
||||||
|
__ mov(rdi, r15_thread);
|
||||||
|
__ mov_metadata(rsi, method()->constant_encoding());
|
||||||
|
#else
|
||||||
__ get_thread(rax);
|
__ get_thread(rax);
|
||||||
__ movptr(Address(rsp, 0), rax);
|
__ movptr(Address(rsp, 0), rax);
|
||||||
__ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
|
__ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
|
||||||
|
#endif
|
||||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
|
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
|
||||||
__ mov(rax, rsi); // Restore the exception
|
__ mov(rax, rbx); // Restore the exception
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove the activation and dispatch to the unwind handler
|
// remove the activation and dispatch to the unwind handler
|
||||||
@ -1206,6 +1212,10 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
|||||||
LIR_Address* addr = src->as_address_ptr();
|
LIR_Address* addr = src->as_address_ptr();
|
||||||
Address from_addr = as_Address(addr);
|
Address from_addr = as_Address(addr);
|
||||||
|
|
||||||
|
if (addr->base()->type() == T_OBJECT) {
|
||||||
|
__ verify_oop(addr->base()->as_pointer_register());
|
||||||
|
}
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case T_BOOLEAN: // fall through
|
case T_BOOLEAN: // fall through
|
||||||
case T_BYTE: // fall through
|
case T_BYTE: // fall through
|
||||||
|
@ -79,7 +79,7 @@ define_pd_global(bool, UseMembar, false);
|
|||||||
// GC Ergo Flags
|
// GC Ergo Flags
|
||||||
define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
|
define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
|
||||||
|
|
||||||
define_pd_global(uintx, TypeProfileLevel, 11);
|
define_pd_global(uintx, TypeProfileLevel, 111);
|
||||||
|
|
||||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||||
\
|
\
|
||||||
|
229
hotspot/src/cpu/x86/vm/interp_masm_x86.cpp
Normal file
229
hotspot/src/cpu/x86/vm/interp_masm_x86.cpp
Normal file
@ -0,0 +1,229 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "interp_masm_x86.hpp"
|
||||||
|
#include "interpreter/interpreter.hpp"
|
||||||
|
#include "oops/methodData.hpp"
|
||||||
|
|
||||||
|
#ifndef CC_INTERP
|
||||||
|
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
|
||||||
|
Label update, next, none;
|
||||||
|
|
||||||
|
verify_oop(obj);
|
||||||
|
|
||||||
|
testptr(obj, obj);
|
||||||
|
jccb(Assembler::notZero, update);
|
||||||
|
orptr(mdo_addr, TypeEntries::null_seen);
|
||||||
|
jmpb(next);
|
||||||
|
|
||||||
|
bind(update);
|
||||||
|
load_klass(obj, obj);
|
||||||
|
|
||||||
|
xorptr(obj, mdo_addr);
|
||||||
|
testptr(obj, TypeEntries::type_klass_mask);
|
||||||
|
jccb(Assembler::zero, next); // klass seen before, nothing to
|
||||||
|
// do. The unknown bit may have been
|
||||||
|
// set already but no need to check.
|
||||||
|
|
||||||
|
testptr(obj, TypeEntries::type_unknown);
|
||||||
|
jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
|
||||||
|
|
||||||
|
cmpptr(mdo_addr, 0);
|
||||||
|
jccb(Assembler::equal, none);
|
||||||
|
cmpptr(mdo_addr, TypeEntries::null_seen);
|
||||||
|
jccb(Assembler::equal, none);
|
||||||
|
// There is a chance that the checks above (re-reading profiling
|
||||||
|
// data from memory) fail if another thread has just set the
|
||||||
|
// profiling to this obj's klass
|
||||||
|
xorptr(obj, mdo_addr);
|
||||||
|
testptr(obj, TypeEntries::type_klass_mask);
|
||||||
|
jccb(Assembler::zero, next);
|
||||||
|
|
||||||
|
// different than before. Cannot keep accurate profile.
|
||||||
|
orptr(mdo_addr, TypeEntries::type_unknown);
|
||||||
|
jmpb(next);
|
||||||
|
|
||||||
|
bind(none);
|
||||||
|
// first time here. Set profile type.
|
||||||
|
movptr(mdo_addr, obj);
|
||||||
|
|
||||||
|
bind(next);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
|
||||||
|
if (!ProfileInterpreter) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (MethodData::profile_arguments() || MethodData::profile_return()) {
|
||||||
|
Label profile_continue;
|
||||||
|
|
||||||
|
test_method_data_pointer(mdp, profile_continue);
|
||||||
|
|
||||||
|
int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
|
||||||
|
|
||||||
|
cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
|
||||||
|
jcc(Assembler::notEqual, profile_continue);
|
||||||
|
|
||||||
|
if (MethodData::profile_arguments()) {
|
||||||
|
Label done;
|
||||||
|
int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
|
||||||
|
addptr(mdp, off_to_args);
|
||||||
|
|
||||||
|
for (int i = 0; i < TypeProfileArgsLimit; i++) {
|
||||||
|
if (i > 0 || MethodData::profile_return()) {
|
||||||
|
// If return value type is profiled we may have no argument to profile
|
||||||
|
movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
||||||
|
subl(tmp, i*TypeStackSlotEntries::per_arg_count());
|
||||||
|
cmpl(tmp, TypeStackSlotEntries::per_arg_count());
|
||||||
|
jcc(Assembler::less, done);
|
||||||
|
}
|
||||||
|
movptr(tmp, Address(callee, Method::const_offset()));
|
||||||
|
load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
|
||||||
|
// stack offset o (zero based) from the start of the argument
|
||||||
|
// list, for n arguments translates into offset n - o - 1 from
|
||||||
|
// the end of the argument list
|
||||||
|
subptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
|
||||||
|
subl(tmp, 1);
|
||||||
|
Address arg_addr = argument_address(tmp);
|
||||||
|
movptr(tmp, arg_addr);
|
||||||
|
|
||||||
|
Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
|
||||||
|
profile_obj_type(tmp, mdo_arg_addr);
|
||||||
|
|
||||||
|
int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
|
||||||
|
addptr(mdp, to_add);
|
||||||
|
off_to_args += to_add;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (MethodData::profile_return()) {
|
||||||
|
movptr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
||||||
|
subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
|
||||||
|
}
|
||||||
|
|
||||||
|
bind(done);
|
||||||
|
|
||||||
|
if (MethodData::profile_return()) {
|
||||||
|
// We're right after the type profile for the last
|
||||||
|
// argument. tmp is the number of cell left in the
|
||||||
|
// CallTypeData/VirtualCallTypeData to reach its end. Non null
|
||||||
|
// if there's a return to profile.
|
||||||
|
assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
|
||||||
|
shll(tmp, exact_log2(DataLayout::cell_size));
|
||||||
|
addptr(mdp, tmp);
|
||||||
|
}
|
||||||
|
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
|
||||||
|
} else {
|
||||||
|
assert(MethodData::profile_return(), "either profile call args or call ret");
|
||||||
|
update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// mdp points right after the end of the
|
||||||
|
// CallTypeData/VirtualCallTypeData, right after the cells for the
|
||||||
|
// return value type if there's one
|
||||||
|
|
||||||
|
bind(profile_continue);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
|
||||||
|
assert_different_registers(mdp, ret, tmp, _bcp_register);
|
||||||
|
if (ProfileInterpreter && MethodData::profile_return()) {
|
||||||
|
Label profile_continue, done;
|
||||||
|
|
||||||
|
test_method_data_pointer(mdp, profile_continue);
|
||||||
|
|
||||||
|
if (MethodData::profile_return_jsr292_only()) {
|
||||||
|
// If we don't profile all invoke bytecodes we must make sure
|
||||||
|
// it's a bytecode we indeed profile. We can't go back to the
|
||||||
|
// begining of the ProfileData we intend to update to check its
|
||||||
|
// type because we're right after it and we don't known its
|
||||||
|
// length
|
||||||
|
Label do_profile;
|
||||||
|
cmpb(Address(_bcp_register, 0), Bytecodes::_invokedynamic);
|
||||||
|
jcc(Assembler::equal, do_profile);
|
||||||
|
cmpb(Address(_bcp_register, 0), Bytecodes::_invokehandle);
|
||||||
|
jcc(Assembler::equal, do_profile);
|
||||||
|
get_method(tmp);
|
||||||
|
cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
|
||||||
|
jcc(Assembler::notEqual, profile_continue);
|
||||||
|
|
||||||
|
bind(do_profile);
|
||||||
|
}
|
||||||
|
|
||||||
|
Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
|
||||||
|
mov(tmp, ret);
|
||||||
|
profile_obj_type(tmp, mdo_ret_addr);
|
||||||
|
|
||||||
|
bind(profile_continue);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
|
||||||
|
if (ProfileInterpreter && MethodData::profile_parameters()) {
|
||||||
|
Label profile_continue, done;
|
||||||
|
|
||||||
|
test_method_data_pointer(mdp, profile_continue);
|
||||||
|
|
||||||
|
// Load the offset of the area within the MDO used for
|
||||||
|
// parameters. If it's negative we're not profiling any parameters
|
||||||
|
movl(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
|
||||||
|
testl(tmp1, tmp1);
|
||||||
|
jcc(Assembler::negative, profile_continue);
|
||||||
|
|
||||||
|
// Compute a pointer to the area for parameters from the offset
|
||||||
|
// and move the pointer to the slot for the last
|
||||||
|
// parameters. Collect profiling from last parameter down.
|
||||||
|
// mdo start + parameters offset + array length - 1
|
||||||
|
addptr(mdp, tmp1);
|
||||||
|
movptr(tmp1, Address(mdp, in_bytes(ArrayData::array_len_offset())));
|
||||||
|
decrement(tmp1, TypeStackSlotEntries::per_arg_count());
|
||||||
|
|
||||||
|
Label loop;
|
||||||
|
bind(loop);
|
||||||
|
|
||||||
|
int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
|
||||||
|
int type_base = in_bytes(ParametersTypeData::type_offset(0));
|
||||||
|
Address::ScaleFactor per_arg_scale = Address::times(DataLayout::cell_size);
|
||||||
|
Address arg_off(mdp, tmp1, per_arg_scale, off_base);
|
||||||
|
Address arg_type(mdp, tmp1, per_arg_scale, type_base);
|
||||||
|
|
||||||
|
// load offset on the stack from the slot for this parameter
|
||||||
|
movptr(tmp2, arg_off);
|
||||||
|
negptr(tmp2);
|
||||||
|
// read the parameter from the local area
|
||||||
|
movptr(tmp2, Address(_locals_register, tmp2, Interpreter::stackElementScale()));
|
||||||
|
|
||||||
|
// profile the parameter
|
||||||
|
profile_obj_type(tmp2, arg_type);
|
||||||
|
|
||||||
|
// go to next parameter
|
||||||
|
decrement(tmp1, TypeStackSlotEntries::per_arg_count());
|
||||||
|
jcc(Assembler::positive, loop);
|
||||||
|
|
||||||
|
bind(profile_continue);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
60
hotspot/src/cpu/x86/vm/interp_masm_x86.hpp
Normal file
60
hotspot/src/cpu/x86/vm/interp_masm_x86.hpp
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef CPU_X86_VM_INTERP_MASM_X86_HPP
|
||||||
|
#define CPU_X86_VM_INTERP_MASM_X86_HPP
|
||||||
|
|
||||||
|
#include "asm/macroAssembler.hpp"
|
||||||
|
#include "asm/macroAssembler.inline.hpp"
|
||||||
|
#include "interpreter/invocationCounter.hpp"
|
||||||
|
#include "runtime/frame.hpp"
|
||||||
|
|
||||||
|
// This file specializes the assember with interpreter-specific macros
|
||||||
|
|
||||||
|
|
||||||
|
class InterpreterMacroAssembler: public MacroAssembler {
|
||||||
|
|
||||||
|
#ifdef TARGET_ARCH_MODEL_x86_32
|
||||||
|
# include "interp_masm_x86_32.hpp"
|
||||||
|
#endif
|
||||||
|
#ifdef TARGET_ARCH_MODEL_x86_64
|
||||||
|
# include "interp_masm_x86_64.hpp"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
Register _locals_register; // register that contains the pointer to the locals
|
||||||
|
Register _bcp_register; // register that contains the bcp
|
||||||
|
|
||||||
|
public:
|
||||||
|
#ifndef CC_INTERP
|
||||||
|
void profile_obj_type(Register obj, const Address& mdo_addr);
|
||||||
|
void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
|
||||||
|
void profile_return_type(Register mdp, Register ret, Register tmp);
|
||||||
|
void profile_parameters_type(Register mdp, Register tmp1, Register tmp2);
|
||||||
|
#endif /* !CC_INTERP */
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // CPU_X86_VM_INTERP_MASM_X86_HPP
|
@ -23,7 +23,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "interp_masm_x86_32.hpp"
|
#include "interp_masm_x86.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "interpreter/interpreterRuntime.hpp"
|
#include "interpreter/interpreterRuntime.hpp"
|
||||||
#include "oops/arrayOop.hpp"
|
#include "oops/arrayOop.hpp"
|
||||||
@ -1046,159 +1046,6 @@ void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
|
|
||||||
Label update, next, none;
|
|
||||||
|
|
||||||
verify_oop(obj);
|
|
||||||
|
|
||||||
testptr(obj, obj);
|
|
||||||
jccb(Assembler::notZero, update);
|
|
||||||
orptr(mdo_addr, TypeEntries::null_seen);
|
|
||||||
jmpb(next);
|
|
||||||
|
|
||||||
bind(update);
|
|
||||||
load_klass(obj, obj);
|
|
||||||
|
|
||||||
xorptr(obj, mdo_addr);
|
|
||||||
testptr(obj, TypeEntries::type_klass_mask);
|
|
||||||
jccb(Assembler::zero, next); // klass seen before, nothing to
|
|
||||||
// do. The unknown bit may have been
|
|
||||||
// set already but no need to check.
|
|
||||||
|
|
||||||
testptr(obj, TypeEntries::type_unknown);
|
|
||||||
jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
|
|
||||||
|
|
||||||
cmpptr(mdo_addr, 0);
|
|
||||||
jccb(Assembler::equal, none);
|
|
||||||
cmpptr(mdo_addr, TypeEntries::null_seen);
|
|
||||||
jccb(Assembler::equal, none);
|
|
||||||
// There is a chance that the checks above (re-reading profiling
|
|
||||||
// data from memory) fail if another thread has just set the
|
|
||||||
// profiling to this obj's klass
|
|
||||||
xorptr(obj, mdo_addr);
|
|
||||||
testptr(obj, TypeEntries::type_klass_mask);
|
|
||||||
jccb(Assembler::zero, next);
|
|
||||||
|
|
||||||
// different than before. Cannot keep accurate profile.
|
|
||||||
orptr(mdo_addr, TypeEntries::type_unknown);
|
|
||||||
jmpb(next);
|
|
||||||
|
|
||||||
bind(none);
|
|
||||||
// first time here. Set profile type.
|
|
||||||
movptr(mdo_addr, obj);
|
|
||||||
|
|
||||||
bind(next);
|
|
||||||
}
|
|
||||||
|
|
||||||
void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
|
|
||||||
if (!ProfileInterpreter) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (MethodData::profile_arguments() || MethodData::profile_return()) {
|
|
||||||
Label profile_continue;
|
|
||||||
|
|
||||||
test_method_data_pointer(mdp, profile_continue);
|
|
||||||
|
|
||||||
int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
|
|
||||||
|
|
||||||
cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
|
|
||||||
jcc(Assembler::notEqual, profile_continue);
|
|
||||||
|
|
||||||
if (MethodData::profile_arguments()) {
|
|
||||||
Label done;
|
|
||||||
int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
|
|
||||||
addptr(mdp, off_to_args);
|
|
||||||
|
|
||||||
for (int i = 0; i < TypeProfileArgsLimit; i++) {
|
|
||||||
if (i > 0 || MethodData::profile_return()) {
|
|
||||||
// If return value type is profiled we may have no argument to profile
|
|
||||||
movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
|
||||||
subl(tmp, i*TypeStackSlotEntries::per_arg_count());
|
|
||||||
cmpl(tmp, TypeStackSlotEntries::per_arg_count());
|
|
||||||
jcc(Assembler::less, done);
|
|
||||||
}
|
|
||||||
movptr(tmp, Address(callee, Method::const_offset()));
|
|
||||||
load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
|
|
||||||
// stack offset o (zero based) from the start of the argument
|
|
||||||
// list, for n arguments translates into offset n - o - 1 from
|
|
||||||
// the end of the argument list
|
|
||||||
subl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
|
|
||||||
subl(tmp, 1);
|
|
||||||
Address arg_addr = argument_address(tmp);
|
|
||||||
movptr(tmp, arg_addr);
|
|
||||||
|
|
||||||
Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
|
|
||||||
profile_obj_type(tmp, mdo_arg_addr);
|
|
||||||
|
|
||||||
int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
|
|
||||||
addptr(mdp, to_add);
|
|
||||||
off_to_args += to_add;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (MethodData::profile_return()) {
|
|
||||||
movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
|
||||||
subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
|
|
||||||
}
|
|
||||||
|
|
||||||
bind(done);
|
|
||||||
|
|
||||||
if (MethodData::profile_return()) {
|
|
||||||
// We're right after the type profile for the last
|
|
||||||
// argument. tmp is the number of cell left in the
|
|
||||||
// CallTypeData/VirtualCallTypeData to reach its end. Non null
|
|
||||||
// if there's a return to profile.
|
|
||||||
assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
|
|
||||||
shll(tmp, exact_log2(DataLayout::cell_size));
|
|
||||||
addptr(mdp, tmp);
|
|
||||||
}
|
|
||||||
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
|
|
||||||
} else {
|
|
||||||
assert(MethodData::profile_return(), "either profile call args or call ret");
|
|
||||||
update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// mdp points right after the end of the
|
|
||||||
// CallTypeData/VirtualCallTypeData, right after the cells for the
|
|
||||||
// return value type if there's one
|
|
||||||
|
|
||||||
bind(profile_continue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
|
|
||||||
assert_different_registers(mdp, ret, tmp, rsi);
|
|
||||||
if (ProfileInterpreter && MethodData::profile_return()) {
|
|
||||||
Label profile_continue, done;
|
|
||||||
|
|
||||||
test_method_data_pointer(mdp, profile_continue);
|
|
||||||
|
|
||||||
if (MethodData::profile_return_jsr292_only()) {
|
|
||||||
// If we don't profile all invoke bytecodes we must make sure
|
|
||||||
// it's a bytecode we indeed profile. We can't go back to the
|
|
||||||
// begining of the ProfileData we intend to update to check its
|
|
||||||
// type because we're right after it and we don't known its
|
|
||||||
// length
|
|
||||||
Label do_profile;
|
|
||||||
cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
|
|
||||||
jcc(Assembler::equal, do_profile);
|
|
||||||
cmpb(Address(rsi, 0), Bytecodes::_invokehandle);
|
|
||||||
jcc(Assembler::equal, do_profile);
|
|
||||||
get_method(tmp);
|
|
||||||
cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
|
|
||||||
jcc(Assembler::notEqual, profile_continue);
|
|
||||||
|
|
||||||
bind(do_profile);
|
|
||||||
}
|
|
||||||
|
|
||||||
Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
|
|
||||||
mov(tmp, ret);
|
|
||||||
profile_obj_type(tmp, mdo_ret_addr);
|
|
||||||
|
|
||||||
bind(profile_continue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void InterpreterMacroAssembler::profile_call(Register mdp) {
|
void InterpreterMacroAssembler::profile_call(Register mdp) {
|
||||||
if (ProfileInterpreter) {
|
if (ProfileInterpreter) {
|
||||||
Label profile_continue;
|
Label profile_continue;
|
||||||
|
@ -22,18 +22,6 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef CPU_X86_VM_INTERP_MASM_X86_32_HPP
|
|
||||||
#define CPU_X86_VM_INTERP_MASM_X86_32_HPP
|
|
||||||
|
|
||||||
#include "asm/macroAssembler.hpp"
|
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
|
||||||
#include "interpreter/invocationCounter.hpp"
|
|
||||||
#include "runtime/frame.hpp"
|
|
||||||
|
|
||||||
// This file specializes the assember with interpreter-specific macros
|
|
||||||
|
|
||||||
|
|
||||||
class InterpreterMacroAssembler: public MacroAssembler {
|
|
||||||
#ifndef CC_INTERP
|
#ifndef CC_INTERP
|
||||||
protected:
|
protected:
|
||||||
// Interpreter specific version of call_VM_base
|
// Interpreter specific version of call_VM_base
|
||||||
@ -59,7 +47,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
#endif /* CC_INTERP */
|
#endif /* CC_INTERP */
|
||||||
|
|
||||||
public:
|
public:
|
||||||
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
|
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code), _locals_register(rdi), _bcp_register(rsi) {}
|
||||||
|
|
||||||
void load_earlyret_value(TosState state);
|
void load_earlyret_value(TosState state);
|
||||||
|
|
||||||
@ -215,9 +203,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
|
|
||||||
void profile_taken_branch(Register mdp, Register bumped_count);
|
void profile_taken_branch(Register mdp, Register bumped_count);
|
||||||
void profile_not_taken_branch(Register mdp);
|
void profile_not_taken_branch(Register mdp);
|
||||||
void profile_obj_type(Register obj, const Address& mdo_addr);
|
|
||||||
void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
|
|
||||||
void profile_return_type(Register mdp, Register ret, Register tmp);
|
|
||||||
void profile_call(Register mdp);
|
void profile_call(Register mdp);
|
||||||
void profile_final_call(Register mdp);
|
void profile_final_call(Register mdp);
|
||||||
void profile_virtual_call(Register receiver, Register mdp, Register scratch2,
|
void profile_virtual_call(Register receiver, Register mdp, Register scratch2,
|
||||||
@ -236,7 +221,3 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
// support for jvmti
|
// support for jvmti
|
||||||
void notify_method_entry();
|
void notify_method_entry();
|
||||||
void notify_method_exit(TosState state, NotifyMethodExitMode mode);
|
void notify_method_exit(TosState state, NotifyMethodExitMode mode);
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // CPU_X86_VM_INTERP_MASM_X86_32_HPP
|
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "interp_masm_x86_64.hpp"
|
#include "interp_masm_x86.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "interpreter/interpreterRuntime.hpp"
|
#include "interpreter/interpreterRuntime.hpp"
|
||||||
#include "oops/arrayOop.hpp"
|
#include "oops/arrayOop.hpp"
|
||||||
@ -1067,160 +1067,6 @@ void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
|
|
||||||
Label update, next, none;
|
|
||||||
|
|
||||||
verify_oop(obj);
|
|
||||||
|
|
||||||
testptr(obj, obj);
|
|
||||||
jccb(Assembler::notZero, update);
|
|
||||||
orptr(mdo_addr, TypeEntries::null_seen);
|
|
||||||
jmpb(next);
|
|
||||||
|
|
||||||
bind(update);
|
|
||||||
load_klass(obj, obj);
|
|
||||||
|
|
||||||
xorptr(obj, mdo_addr);
|
|
||||||
testptr(obj, TypeEntries::type_klass_mask);
|
|
||||||
jccb(Assembler::zero, next); // klass seen before, nothing to
|
|
||||||
// do. The unknown bit may have been
|
|
||||||
// set already but no need to check.
|
|
||||||
|
|
||||||
testptr(obj, TypeEntries::type_unknown);
|
|
||||||
jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
|
|
||||||
|
|
||||||
// There is a chance that by the time we do these checks (re-reading
|
|
||||||
// profiling data from memory) another thread has set the profling
|
|
||||||
// to this obj's klass and we set the profiling as unknow
|
|
||||||
// erroneously
|
|
||||||
cmpptr(mdo_addr, 0);
|
|
||||||
jccb(Assembler::equal, none);
|
|
||||||
cmpptr(mdo_addr, TypeEntries::null_seen);
|
|
||||||
jccb(Assembler::equal, none);
|
|
||||||
// There is a chance that the checks above (re-reading profiling
|
|
||||||
// data from memory) fail if another thread has just set the
|
|
||||||
// profiling to this obj's klass
|
|
||||||
xorptr(obj, mdo_addr);
|
|
||||||
testptr(obj, TypeEntries::type_klass_mask);
|
|
||||||
jccb(Assembler::zero, next);
|
|
||||||
|
|
||||||
// different than before. Cannot keep accurate profile.
|
|
||||||
orptr(mdo_addr, TypeEntries::type_unknown);
|
|
||||||
jmpb(next);
|
|
||||||
|
|
||||||
bind(none);
|
|
||||||
// first time here. Set profile type.
|
|
||||||
movptr(mdo_addr, obj);
|
|
||||||
|
|
||||||
bind(next);
|
|
||||||
}
|
|
||||||
|
|
||||||
void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
|
|
||||||
if (!ProfileInterpreter) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (MethodData::profile_arguments() || MethodData::profile_return()) {
|
|
||||||
Label profile_continue;
|
|
||||||
|
|
||||||
test_method_data_pointer(mdp, profile_continue);
|
|
||||||
|
|
||||||
int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
|
|
||||||
|
|
||||||
cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
|
|
||||||
jcc(Assembler::notEqual, profile_continue);
|
|
||||||
|
|
||||||
if (MethodData::profile_arguments()) {
|
|
||||||
Label done;
|
|
||||||
int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
|
|
||||||
addptr(mdp, off_to_args);
|
|
||||||
|
|
||||||
for (int i = 0; i < TypeProfileArgsLimit; i++) {
|
|
||||||
if (i > 0 || MethodData::profile_return()) {
|
|
||||||
// If return value type is profiled we may have no argument to profile
|
|
||||||
movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
|
||||||
subl(tmp, i*TypeStackSlotEntries::per_arg_count());
|
|
||||||
cmpl(tmp, TypeStackSlotEntries::per_arg_count());
|
|
||||||
jcc(Assembler::less, done);
|
|
||||||
}
|
|
||||||
movptr(tmp, Address(callee, Method::const_offset()));
|
|
||||||
load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
|
|
||||||
subq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
|
|
||||||
subl(tmp, 1);
|
|
||||||
Address arg_addr = argument_address(tmp);
|
|
||||||
movptr(tmp, arg_addr);
|
|
||||||
|
|
||||||
Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
|
|
||||||
profile_obj_type(tmp, mdo_arg_addr);
|
|
||||||
|
|
||||||
int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
|
|
||||||
addptr(mdp, to_add);
|
|
||||||
off_to_args += to_add;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (MethodData::profile_return()) {
|
|
||||||
movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
|
|
||||||
subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
|
|
||||||
}
|
|
||||||
|
|
||||||
bind(done);
|
|
||||||
|
|
||||||
if (MethodData::profile_return()) {
|
|
||||||
// We're right after the type profile for the last
|
|
||||||
// argument. tmp is the number of cell left in the
|
|
||||||
// CallTypeData/VirtualCallTypeData to reach its end. Non null
|
|
||||||
// if there's a return to profile.
|
|
||||||
assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
|
|
||||||
shll(tmp, exact_log2(DataLayout::cell_size));
|
|
||||||
addptr(mdp, tmp);
|
|
||||||
}
|
|
||||||
movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
|
|
||||||
} else {
|
|
||||||
assert(MethodData::profile_return(), "either profile call args or call ret");
|
|
||||||
update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// mdp points right after the end of the
|
|
||||||
// CallTypeData/VirtualCallTypeData, right after the cells for the
|
|
||||||
// return value type if there's one
|
|
||||||
|
|
||||||
bind(profile_continue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
|
|
||||||
assert_different_registers(mdp, ret, tmp, r13);
|
|
||||||
if (ProfileInterpreter && MethodData::profile_return()) {
|
|
||||||
Label profile_continue, done;
|
|
||||||
|
|
||||||
test_method_data_pointer(mdp, profile_continue);
|
|
||||||
|
|
||||||
if (MethodData::profile_return_jsr292_only()) {
|
|
||||||
// If we don't profile all invoke bytecodes we must make sure
|
|
||||||
// it's a bytecode we indeed profile. We can't go back to the
|
|
||||||
// begining of the ProfileData we intend to update to check its
|
|
||||||
// type because we're right after it and we don't known its
|
|
||||||
// length
|
|
||||||
Label do_profile;
|
|
||||||
cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
|
|
||||||
jcc(Assembler::equal, do_profile);
|
|
||||||
cmpb(Address(r13, 0), Bytecodes::_invokehandle);
|
|
||||||
jcc(Assembler::equal, do_profile);
|
|
||||||
get_method(tmp);
|
|
||||||
cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
|
|
||||||
jcc(Assembler::notEqual, profile_continue);
|
|
||||||
|
|
||||||
bind(do_profile);
|
|
||||||
}
|
|
||||||
|
|
||||||
Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
|
|
||||||
mov(tmp, ret);
|
|
||||||
profile_obj_type(tmp, mdo_ret_addr);
|
|
||||||
|
|
||||||
bind(profile_continue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void InterpreterMacroAssembler::profile_call(Register mdp) {
|
void InterpreterMacroAssembler::profile_call(Register mdp) {
|
||||||
if (ProfileInterpreter) {
|
if (ProfileInterpreter) {
|
||||||
Label profile_continue;
|
Label profile_continue;
|
||||||
|
@ -22,18 +22,6 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef CPU_X86_VM_INTERP_MASM_X86_64_HPP
|
|
||||||
#define CPU_X86_VM_INTERP_MASM_X86_64_HPP
|
|
||||||
|
|
||||||
#include "asm/macroAssembler.hpp"
|
|
||||||
#include "asm/macroAssembler.inline.hpp"
|
|
||||||
#include "interpreter/invocationCounter.hpp"
|
|
||||||
#include "runtime/frame.hpp"
|
|
||||||
|
|
||||||
// This file specializes the assember with interpreter-specific macros
|
|
||||||
|
|
||||||
|
|
||||||
class InterpreterMacroAssembler: public MacroAssembler {
|
|
||||||
#ifndef CC_INTERP
|
#ifndef CC_INTERP
|
||||||
protected:
|
protected:
|
||||||
// Interpreter specific version of call_VM_base
|
// Interpreter specific version of call_VM_base
|
||||||
@ -55,7 +43,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
#endif // CC_INTERP
|
#endif // CC_INTERP
|
||||||
|
|
||||||
public:
|
public:
|
||||||
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
|
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code), _locals_register(r14), _bcp_register(r13) {}
|
||||||
|
|
||||||
void load_earlyret_value(TosState state);
|
void load_earlyret_value(TosState state);
|
||||||
|
|
||||||
@ -224,9 +212,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
|
|
||||||
void profile_taken_branch(Register mdp, Register bumped_count);
|
void profile_taken_branch(Register mdp, Register bumped_count);
|
||||||
void profile_not_taken_branch(Register mdp);
|
void profile_not_taken_branch(Register mdp);
|
||||||
void profile_obj_type(Register obj, const Address& mdo_addr);
|
|
||||||
void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
|
|
||||||
void profile_return_type(Register mdp, Register ret, Register tmp);
|
|
||||||
void profile_call(Register mdp);
|
void profile_call(Register mdp);
|
||||||
void profile_final_call(Register mdp);
|
void profile_final_call(Register mdp);
|
||||||
void profile_virtual_call(Register receiver, Register mdp,
|
void profile_virtual_call(Register receiver, Register mdp,
|
||||||
@ -253,6 +238,3 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
// support for jvmti/dtrace
|
// support for jvmti/dtrace
|
||||||
void notify_method_entry();
|
void notify_method_entry();
|
||||||
void notify_method_exit(TosState state, NotifyMethodExitMode mode);
|
void notify_method_exit(TosState state, NotifyMethodExitMode mode);
|
||||||
};
|
|
||||||
|
|
||||||
#endif // CPU_X86_VM_INTERP_MASM_X86_64_HPP
|
|
||||||
|
@ -26,11 +26,8 @@
|
|||||||
#include "asm/assembler.hpp"
|
#include "asm/assembler.hpp"
|
||||||
#include "asm/register.hpp"
|
#include "asm/register.hpp"
|
||||||
#include "register_x86.hpp"
|
#include "register_x86.hpp"
|
||||||
#ifdef TARGET_ARCH_MODEL_x86_32
|
#ifdef TARGET_ARCH_x86
|
||||||
# include "interp_masm_x86_32.hpp"
|
# include "interp_masm_x86.hpp"
|
||||||
#endif
|
|
||||||
#ifdef TARGET_ARCH_MODEL_x86_64
|
|
||||||
# include "interp_masm_x86_64.hpp"
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
REGISTER_DEFINITION(Register, noreg);
|
REGISTER_DEFINITION(Register, noreg);
|
||||||
|
@ -34,9 +34,9 @@
|
|||||||
// Run with +PrintInterpreter to get the VM to print out the size.
|
// Run with +PrintInterpreter to get the VM to print out the size.
|
||||||
// Max size with JVMTI
|
// Max size with JVMTI
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
const static int InterpreterCodeSize = 208 * 1024;
|
const static int InterpreterCodeSize = 256 * 1024;
|
||||||
#else
|
#else
|
||||||
const static int InterpreterCodeSize = 176 * 1024;
|
const static int InterpreterCodeSize = 224 * 1024;
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
|
||||||
#endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
|
#endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
|
||||||
|
@ -1490,6 +1490,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
||||||
__ movbool(do_not_unlock_if_synchronized, true);
|
__ movbool(do_not_unlock_if_synchronized, true);
|
||||||
|
|
||||||
|
__ profile_parameters_type(rax, rcx, rdx);
|
||||||
// increment invocation count & check for overflow
|
// increment invocation count & check for overflow
|
||||||
Label invocation_counter_overflow;
|
Label invocation_counter_overflow;
|
||||||
Label profile_method;
|
Label profile_method;
|
||||||
|
@ -1497,6 +1497,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
|||||||
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
||||||
__ movbool(do_not_unlock_if_synchronized, true);
|
__ movbool(do_not_unlock_if_synchronized, true);
|
||||||
|
|
||||||
|
__ profile_parameters_type(rax, rcx, rdx);
|
||||||
// increment invocation count & check for overflow
|
// increment invocation count & check for overflow
|
||||||
Label invocation_counter_overflow;
|
Label invocation_counter_overflow;
|
||||||
Label profile_method;
|
Label profile_method;
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/macroAssembler.hpp"
|
#include "asm/macroAssembler.hpp"
|
||||||
#include "code/vtableStubs.hpp"
|
#include "code/vtableStubs.hpp"
|
||||||
#include "interp_masm_x86_32.hpp"
|
#include "interp_masm_x86.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "oops/instanceKlass.hpp"
|
#include "oops/instanceKlass.hpp"
|
||||||
#include "oops/klassVtable.hpp"
|
#include "oops/klassVtable.hpp"
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/macroAssembler.hpp"
|
#include "asm/macroAssembler.hpp"
|
||||||
#include "code/vtableStubs.hpp"
|
#include "code/vtableStubs.hpp"
|
||||||
#include "interp_masm_x86_64.hpp"
|
#include "interp_masm_x86.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "oops/instanceKlass.hpp"
|
#include "oops/instanceKlass.hpp"
|
||||||
#include "oops/klassVtable.hpp"
|
#include "oops/klassVtable.hpp"
|
||||||
|
@ -1538,6 +1538,11 @@ const RegMask Matcher::mathExactI_result_proj_mask() {
|
|||||||
return EAX_REG_mask();
|
return EAX_REG_mask();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const RegMask Matcher::mathExactL_result_proj_mask() {
|
||||||
|
ShouldNotReachHere();
|
||||||
|
return RegMask();
|
||||||
|
}
|
||||||
|
|
||||||
const RegMask Matcher::mathExactI_flags_proj_mask() {
|
const RegMask Matcher::mathExactI_flags_proj_mask() {
|
||||||
return INT_FLAGS_mask();
|
return INT_FLAGS_mask();
|
||||||
}
|
}
|
||||||
@ -7519,7 +7524,7 @@ instruct cmovL_regUCF(cmpOpUCF cop, eFlagsRegUCF cr, eRegL dst, eRegL src) %{
|
|||||||
//----------Arithmetic Instructions--------------------------------------------
|
//----------Arithmetic Instructions--------------------------------------------
|
||||||
//----------Addition Instructions----------------------------------------------
|
//----------Addition Instructions----------------------------------------------
|
||||||
|
|
||||||
instruct addExactI_rReg(eAXRegI dst, rRegI src, eFlagsReg cr)
|
instruct addExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
|
||||||
%{
|
%{
|
||||||
match(AddExactI dst src);
|
match(AddExactI dst src);
|
||||||
effect(DEF cr);
|
effect(DEF cr);
|
||||||
@ -7531,7 +7536,7 @@ instruct addExactI_rReg(eAXRegI dst, rRegI src, eFlagsReg cr)
|
|||||||
ins_pipe(ialu_reg_reg);
|
ins_pipe(ialu_reg_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
instruct addExactI_rReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
|
instruct addExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
|
||||||
%{
|
%{
|
||||||
match(AddExactI dst src);
|
match(AddExactI dst src);
|
||||||
effect(DEF cr);
|
effect(DEF cr);
|
||||||
@ -7543,6 +7548,20 @@ instruct addExactI_rReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
|
|||||||
ins_pipe(ialu_reg_reg);
|
ins_pipe(ialu_reg_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
instruct addExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(AddExactI dst (LoadI src));
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(125);
|
||||||
|
format %{ "ADD $dst,$src\t# addExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ addl($dst$$Register, $src$$Address);
|
||||||
|
%}
|
||||||
|
ins_pipe( ialu_reg_mem );
|
||||||
|
%}
|
||||||
|
|
||||||
|
|
||||||
// Integer Addition Instructions
|
// Integer Addition Instructions
|
||||||
instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
|
instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
|
||||||
match(Set dst (AddI dst src));
|
match(Set dst (AddI dst src));
|
||||||
@ -7851,6 +7870,44 @@ instruct xchgP( memory mem, pRegP newval) %{
|
|||||||
%}
|
%}
|
||||||
|
|
||||||
//----------Subtraction Instructions-------------------------------------------
|
//----------Subtraction Instructions-------------------------------------------
|
||||||
|
|
||||||
|
instruct subExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(SubExactI dst src);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
format %{ "SUB $dst, $src\t# subExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ subl($dst$$Register, $src$$Register);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct subExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(SubExactI dst src);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
format %{ "SUB $dst, $src\t# subExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ subl($dst$$Register, $src$$constant);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct subExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(SubExactI dst (LoadI src));
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(125);
|
||||||
|
format %{ "SUB $dst,$src\t# subExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ subl($dst$$Register, $src$$Address);
|
||||||
|
%}
|
||||||
|
ins_pipe( ialu_reg_mem );
|
||||||
|
%}
|
||||||
|
|
||||||
// Integer Subtraction Instructions
|
// Integer Subtraction Instructions
|
||||||
instruct subI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
|
instruct subI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
|
||||||
match(Set dst (SubI dst src));
|
match(Set dst (SubI dst src));
|
||||||
@ -7919,6 +7976,16 @@ instruct negI_eReg(rRegI dst, immI0 zero, eFlagsReg cr) %{
|
|||||||
ins_pipe( ialu_reg );
|
ins_pipe( ialu_reg );
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
instruct negExactI_eReg(eAXRegI dst, eFlagsReg cr) %{
|
||||||
|
match(NegExactI dst);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
format %{ "NEG $dst\t# negExact int"%}
|
||||||
|
ins_encode %{
|
||||||
|
__ negl($dst$$Register);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg);
|
||||||
|
%}
|
||||||
|
|
||||||
//----------Multiplication/Division Instructions-------------------------------
|
//----------Multiplication/Division Instructions-------------------------------
|
||||||
// Integer Multiplication Instructions
|
// Integer Multiplication Instructions
|
||||||
@ -8131,6 +8198,46 @@ instruct mulL_eReg_con(eADXRegL dst, immL_127 src, rRegI tmp, eFlagsReg cr) %{
|
|||||||
ins_pipe( pipe_slow );
|
ins_pipe( pipe_slow );
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
instruct mulExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(MulExactI dst src);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(300);
|
||||||
|
format %{ "IMUL $dst, $src\t# mulExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ imull($dst$$Register, $src$$Register);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg_alu0);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct mulExactI_eReg_imm(eAXRegI dst, rRegI src, immI imm, eFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(MulExactI src imm);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(300);
|
||||||
|
format %{ "IMUL $dst, $src, $imm\t# mulExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ imull($dst$$Register, $src$$Register, $imm$$constant);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg_alu0);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct mulExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(MulExactI dst (LoadI src));
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(350);
|
||||||
|
format %{ "IMUL $dst, $src\t# mulExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ imull($dst$$Register, $src$$Address);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_mem_alu0);
|
||||||
|
%}
|
||||||
|
|
||||||
|
|
||||||
// Integer DIV with Register
|
// Integer DIV with Register
|
||||||
instruct divI_eReg(eAXRegI rax, eDXRegI rdx, eCXRegI div, eFlagsReg cr) %{
|
instruct divI_eReg(eAXRegI rax, eDXRegI rdx, eCXRegI div, eFlagsReg cr) %{
|
||||||
match(Set rax (DivI rax div));
|
match(Set rax (DivI rax div));
|
||||||
|
@ -1653,6 +1653,10 @@ const RegMask Matcher::mathExactI_result_proj_mask() {
|
|||||||
return INT_RAX_REG_mask();
|
return INT_RAX_REG_mask();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const RegMask Matcher::mathExactL_result_proj_mask() {
|
||||||
|
return LONG_RAX_REG_mask();
|
||||||
|
}
|
||||||
|
|
||||||
const RegMask Matcher::mathExactI_flags_proj_mask() {
|
const RegMask Matcher::mathExactI_flags_proj_mask() {
|
||||||
return INT_FLAGS_mask();
|
return INT_FLAGS_mask();
|
||||||
}
|
}
|
||||||
@ -6962,6 +6966,58 @@ instruct addExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
|
|||||||
ins_pipe(ialu_reg_reg);
|
ins_pipe(ialu_reg_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
instruct addExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(AddExactI dst (LoadI src));
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(125); // XXX
|
||||||
|
format %{ "addl $dst, $src\t# addExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ addl($dst$$Register, $src$$Address);
|
||||||
|
%}
|
||||||
|
|
||||||
|
ins_pipe(ialu_reg_mem);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct addExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(AddExactL dst src);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
format %{ "addq $dst, $src\t# addExact long" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ addq($dst$$Register, $src$$Register);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct addExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(AddExactL dst src);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
format %{ "addq $dst, $src\t# addExact long" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ addq($dst$$Register, $src$$constant);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct addExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(AddExactL dst (LoadL src));
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(125); // XXX
|
||||||
|
format %{ "addq $dst, $src\t# addExact long" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ addq($dst$$Register, $src$$Address);
|
||||||
|
%}
|
||||||
|
|
||||||
|
ins_pipe(ialu_reg_mem);
|
||||||
|
%}
|
||||||
|
|
||||||
instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
|
instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
|
||||||
%{
|
%{
|
||||||
match(Set dst (AddI dst src));
|
match(Set dst (AddI dst src));
|
||||||
@ -7574,6 +7630,80 @@ instruct subI_mem_imm(memory dst, immI src, rFlagsReg cr)
|
|||||||
ins_pipe(ialu_mem_imm);
|
ins_pipe(ialu_mem_imm);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
instruct subExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(SubExactI dst src);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
format %{ "subl $dst, $src\t# subExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ subl($dst$$Register, $src$$Register);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct subExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(SubExactI dst src);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
format %{ "subl $dst, $src\t# subExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ subl($dst$$Register, $src$$constant);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct subExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(SubExactI dst (LoadI src));
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(125);
|
||||||
|
format %{ "subl $dst, $src\t# subExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ subl($dst$$Register, $src$$Address);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_mem);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct subExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(SubExactL dst src);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
format %{ "subq $dst, $src\t# subExact long" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ subq($dst$$Register, $src$$Register);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct subExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(SubExactL dst (LoadL src));
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
format %{ "subq $dst, $src\t# subExact long" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ subq($dst$$Register, $src$$constant);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct subExactL_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(SubExactI dst src);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(125);
|
||||||
|
format %{ "subq $dst, $src\t# subExact long" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ subq($dst$$Register, $src$$Address);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_mem);
|
||||||
|
%}
|
||||||
|
|
||||||
instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
|
instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
|
||||||
%{
|
%{
|
||||||
match(Set dst (SubL dst src));
|
match(Set dst (SubL dst src));
|
||||||
@ -7690,6 +7820,30 @@ instruct negL_mem(memory dst, immL0 zero, rFlagsReg cr)
|
|||||||
ins_pipe(ialu_reg);
|
ins_pipe(ialu_reg);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
instruct negExactI_rReg(rax_RegI dst, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(NegExactI dst);
|
||||||
|
effect(KILL cr);
|
||||||
|
|
||||||
|
format %{ "negl $dst\t# negExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ negl($dst$$Register);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct negExactL_rReg(rax_RegL dst, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(NegExactL dst);
|
||||||
|
effect(KILL cr);
|
||||||
|
|
||||||
|
format %{ "negq $dst\t# negExact long" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ negq($dst$$Register);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg);
|
||||||
|
%}
|
||||||
|
|
||||||
|
|
||||||
//----------Multiplication/Division Instructions-------------------------------
|
//----------Multiplication/Division Instructions-------------------------------
|
||||||
// Integer Multiplication Instructions
|
// Integer Multiplication Instructions
|
||||||
@ -7807,6 +7961,86 @@ instruct mulHiL_rReg(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr)
|
|||||||
ins_pipe(ialu_reg_reg_alu0);
|
ins_pipe(ialu_reg_reg_alu0);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
|
|
||||||
|
instruct mulExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(MulExactI dst src);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(300);
|
||||||
|
format %{ "imull $dst, $src\t# mulExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ imull($dst$$Register, $src$$Register);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg_alu0);
|
||||||
|
%}
|
||||||
|
|
||||||
|
|
||||||
|
instruct mulExactI_rReg_imm(rax_RegI dst, rRegI src, immI imm, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(MulExactI src imm);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(300);
|
||||||
|
format %{ "imull $dst, $src, $imm\t# mulExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ imull($dst$$Register, $src$$Register, $imm$$constant);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg_alu0);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct mulExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(MulExactI dst (LoadI src));
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(350);
|
||||||
|
format %{ "imull $dst, $src\t# mulExact int" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ imull($dst$$Register, $src$$Address);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_mem_alu0);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct mulExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(MulExactL dst src);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(300);
|
||||||
|
format %{ "imulq $dst, $src\t# mulExact long" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ imulq($dst$$Register, $src$$Register);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg_alu0);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct mulExactL_rReg_imm(rax_RegL dst, rRegL src, immL32 imm, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(MulExactL src imm);
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(300);
|
||||||
|
format %{ "imulq $dst, $src, $imm\t# mulExact long" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ imulq($dst$$Register, $src$$Register, $imm$$constant);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_reg_alu0);
|
||||||
|
%}
|
||||||
|
|
||||||
|
instruct mulExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr)
|
||||||
|
%{
|
||||||
|
match(MulExactL dst (LoadL src));
|
||||||
|
effect(DEF cr);
|
||||||
|
|
||||||
|
ins_cost(350);
|
||||||
|
format %{ "imulq $dst, $src\t# mulExact long" %}
|
||||||
|
ins_encode %{
|
||||||
|
__ imulq($dst$$Register, $src$$Address);
|
||||||
|
%}
|
||||||
|
ins_pipe(ialu_reg_mem_alu0);
|
||||||
|
%}
|
||||||
|
|
||||||
instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
|
instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
|
||||||
rFlagsReg cr)
|
rFlagsReg cr)
|
||||||
%{
|
%{
|
||||||
|
@ -4746,6 +4746,10 @@ int os::fork_and_exec(char* cmd) {
|
|||||||
// as libawt.so, and renamed libawt_xawt.so
|
// as libawt.so, and renamed libawt_xawt.so
|
||||||
//
|
//
|
||||||
bool os::is_headless_jre() {
|
bool os::is_headless_jre() {
|
||||||
|
#ifdef __APPLE__
|
||||||
|
// We no longer build headless-only on Mac OS X
|
||||||
|
return false;
|
||||||
|
#else
|
||||||
struct stat statbuf;
|
struct stat statbuf;
|
||||||
char buf[MAXPATHLEN];
|
char buf[MAXPATHLEN];
|
||||||
char libmawtpath[MAXPATHLEN];
|
char libmawtpath[MAXPATHLEN];
|
||||||
@ -4777,6 +4781,7 @@ bool os::is_headless_jre() {
|
|||||||
if (::stat(libmawtpath, &statbuf) == 0) return false;
|
if (::stat(libmawtpath, &statbuf) == 0) return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the default path to the core file
|
// Get the default path to the core file
|
||||||
|
@ -1193,6 +1193,13 @@ void ArchDesc::buildMustCloneMap(FILE *fp_hpp, FILE *fp_cpp) {
|
|||||||
|| strcmp(idealName,"FastLock") == 0
|
|| strcmp(idealName,"FastLock") == 0
|
||||||
|| strcmp(idealName,"FastUnlock") == 0
|
|| strcmp(idealName,"FastUnlock") == 0
|
||||||
|| strcmp(idealName,"AddExactI") == 0
|
|| strcmp(idealName,"AddExactI") == 0
|
||||||
|
|| strcmp(idealName,"AddExactL") == 0
|
||||||
|
|| strcmp(idealName,"SubExactI") == 0
|
||||||
|
|| strcmp(idealName,"SubExactL") == 0
|
||||||
|
|| strcmp(idealName,"MulExactI") == 0
|
||||||
|
|| strcmp(idealName,"MulExactL") == 0
|
||||||
|
|| strcmp(idealName,"NegExactI") == 0
|
||||||
|
|| strcmp(idealName,"NegExactL") == 0
|
||||||
|| strcmp(idealName,"FlagsProj") == 0
|
|| strcmp(idealName,"FlagsProj") == 0
|
||||||
|| strcmp(idealName,"Bool") == 0
|
|| strcmp(idealName,"Bool") == 0
|
||||||
|| strcmp(idealName,"Binary") == 0 ) {
|
|| strcmp(idealName,"Binary") == 0 ) {
|
||||||
|
@ -536,12 +536,6 @@ bool InstructForm::rematerialize(FormDict &globals, RegisterForm *registers ) {
|
|||||||
if( data_type != Form::none )
|
if( data_type != Form::none )
|
||||||
rematerialize = true;
|
rematerialize = true;
|
||||||
|
|
||||||
// Ugly: until a better fix is implemented, disable rematerialization for
|
|
||||||
// negD nodes because they are proved to be problematic.
|
|
||||||
if (is_ideal_negD()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
|
if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
|
||||||
rematerialize = true;
|
rematerialize = true;
|
||||||
|
@ -238,7 +238,18 @@ class Compilation: public StackObj {
|
|||||||
return env()->comp_level() == CompLevel_full_profile &&
|
return env()->comp_level() == CompLevel_full_profile &&
|
||||||
C1UpdateMethodData && C1ProfileCheckcasts;
|
C1UpdateMethodData && C1ProfileCheckcasts;
|
||||||
}
|
}
|
||||||
|
bool profile_parameters() {
|
||||||
|
return env()->comp_level() == CompLevel_full_profile &&
|
||||||
|
C1UpdateMethodData && MethodData::profile_parameters();
|
||||||
|
}
|
||||||
|
bool profile_arguments() {
|
||||||
|
return env()->comp_level() == CompLevel_full_profile &&
|
||||||
|
C1UpdateMethodData && MethodData::profile_arguments();
|
||||||
|
}
|
||||||
|
bool profile_return() {
|
||||||
|
return env()->comp_level() == CompLevel_full_profile &&
|
||||||
|
C1UpdateMethodData && MethodData::profile_return();
|
||||||
|
}
|
||||||
// will compilation make optimistic assumptions that might lead to
|
// will compilation make optimistic assumptions that might lead to
|
||||||
// deoptimization and that the runtime will account for?
|
// deoptimization and that the runtime will account for?
|
||||||
bool is_optimistic() const {
|
bool is_optimistic() const {
|
||||||
|
@ -1470,7 +1470,7 @@ void GraphBuilder::method_return(Value x) {
|
|||||||
set_state(state()->caller_state()->copy_for_parsing());
|
set_state(state()->caller_state()->copy_for_parsing());
|
||||||
if (x != NULL) {
|
if (x != NULL) {
|
||||||
state()->push(x->type(), x);
|
state()->push(x->type(), x);
|
||||||
if (profile_calls() && MethodData::profile_return() && x->type()->is_object_kind()) {
|
if (profile_return() && x->type()->is_object_kind()) {
|
||||||
ciMethod* caller = state()->scope()->method();
|
ciMethod* caller = state()->scope()->method();
|
||||||
ciMethodData* md = caller->method_data_or_null();
|
ciMethodData* md = caller->method_data_or_null();
|
||||||
ciProfileData* data = md->bci_to_data(invoke_bci);
|
ciProfileData* data = md->bci_to_data(invoke_bci);
|
||||||
@ -1672,15 +1672,23 @@ Dependencies* GraphBuilder::dependency_recorder() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// How many arguments do we want to profile?
|
// How many arguments do we want to profile?
|
||||||
Values* GraphBuilder::args_list_for_profiling(int& start, bool may_have_receiver) {
|
Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) {
|
||||||
int n = 0;
|
int n = 0;
|
||||||
assert(start == 0, "should be initialized");
|
bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
|
||||||
if (MethodData::profile_arguments()) {
|
start = has_receiver ? 1 : 0;
|
||||||
|
if (profile_arguments()) {
|
||||||
ciProfileData* data = method()->method_data()->bci_to_data(bci());
|
ciProfileData* data = method()->method_data()->bci_to_data(bci());
|
||||||
if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
|
if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
|
||||||
n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
|
n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
|
||||||
bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
|
}
|
||||||
start = has_receiver ? 1 : 0;
|
}
|
||||||
|
// If we are inlining then we need to collect arguments to profile parameters for the target
|
||||||
|
if (profile_parameters() && target != NULL) {
|
||||||
|
if (target->method_data() != NULL && target->method_data()->parameters_type_data() != NULL) {
|
||||||
|
// The receiver is profiled on method entry so it's included in
|
||||||
|
// the number of parameters but here we're only interested in
|
||||||
|
// actual arguments.
|
||||||
|
n = MAX2(n, target->method_data()->parameters_type_data()->number_of_parameters() - start);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (n > 0) {
|
if (n > 0) {
|
||||||
@ -1690,9 +1698,9 @@ Values* GraphBuilder::args_list_for_profiling(int& start, bool may_have_receiver
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Collect arguments that we want to profile in a list
|
// Collect arguments that we want to profile in a list
|
||||||
Values* GraphBuilder::collect_args_for_profiling(Values* args, bool may_have_receiver) {
|
Values* GraphBuilder::collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver) {
|
||||||
int start = 0;
|
int start = 0;
|
||||||
Values* obj_args = args_list_for_profiling(start, may_have_receiver);
|
Values* obj_args = args_list_for_profiling(target, start, may_have_receiver);
|
||||||
if (obj_args == NULL) {
|
if (obj_args == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -2006,7 +2014,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
|||||||
} else if (exact_target != NULL) {
|
} else if (exact_target != NULL) {
|
||||||
target_klass = exact_target->holder();
|
target_klass = exact_target->holder();
|
||||||
}
|
}
|
||||||
profile_call(target, recv, target_klass, collect_args_for_profiling(args, false), false);
|
profile_call(target, recv, target_klass, collect_args_for_profiling(args, NULL, false), false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2021,7 +2029,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
|
|||||||
push(result_type, result);
|
push(result_type, result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
|
if (profile_return() && result_type->is_object_kind()) {
|
||||||
profile_return_type(result, target);
|
profile_return_type(result, target);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3561,7 +3569,7 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
|||||||
recv = args->at(0);
|
recv = args->at(0);
|
||||||
null_check(recv);
|
null_check(recv);
|
||||||
}
|
}
|
||||||
profile_call(callee, recv, NULL, collect_args_for_profiling(args, true), true);
|
profile_call(callee, recv, NULL, collect_args_for_profiling(args, callee, true), true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3572,7 +3580,7 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
|||||||
Value value = append_split(result);
|
Value value = append_split(result);
|
||||||
if (result_type != voidType) push(result_type, value);
|
if (result_type != voidType) push(result_type, value);
|
||||||
|
|
||||||
if (callee != method() && profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
|
if (callee != method() && profile_return() && result_type->is_object_kind()) {
|
||||||
profile_return_type(result, callee);
|
profile_return_type(result, callee);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3760,6 +3768,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
|
|||||||
|
|
||||||
// now perform tests that are based on flag settings
|
// now perform tests that are based on flag settings
|
||||||
if (callee->force_inline()) {
|
if (callee->force_inline()) {
|
||||||
|
if (inline_level() > MaxForceInlineLevel) INLINE_BAILOUT("MaxForceInlineLevel");
|
||||||
print_inlining(callee, "force inline by annotation");
|
print_inlining(callee, "force inline by annotation");
|
||||||
} else if (callee->should_inline()) {
|
} else if (callee->should_inline()) {
|
||||||
print_inlining(callee, "force inline by CompileOracle");
|
print_inlining(callee, "force inline by CompileOracle");
|
||||||
@ -3820,7 +3829,7 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
|
|||||||
|
|
||||||
if (profile_calls()) {
|
if (profile_calls()) {
|
||||||
int start = 0;
|
int start = 0;
|
||||||
Values* obj_args = args_list_for_profiling(start, has_receiver);
|
Values* obj_args = args_list_for_profiling(callee, start, has_receiver);
|
||||||
if (obj_args != NULL) {
|
if (obj_args != NULL) {
|
||||||
int s = obj_args->size();
|
int s = obj_args->size();
|
||||||
// if called through method handle invoke, some arguments may have been popped
|
// if called through method handle invoke, some arguments may have been popped
|
||||||
|
@ -386,9 +386,12 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
|
|||||||
bool profile_calls() { return _compilation->profile_calls(); }
|
bool profile_calls() { return _compilation->profile_calls(); }
|
||||||
bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
|
bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
|
||||||
bool profile_checkcasts() { return _compilation->profile_checkcasts(); }
|
bool profile_checkcasts() { return _compilation->profile_checkcasts(); }
|
||||||
|
bool profile_parameters() { return _compilation->profile_parameters(); }
|
||||||
|
bool profile_arguments() { return _compilation->profile_arguments(); }
|
||||||
|
bool profile_return() { return _compilation->profile_return(); }
|
||||||
|
|
||||||
Values* args_list_for_profiling(int& start, bool may_have_receiver);
|
Values* args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver);
|
||||||
Values* collect_args_for_profiling(Values* args, bool may_have_receiver);
|
Values* collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
NOT_PRODUCT(void print_stats();)
|
NOT_PRODUCT(void print_stats();)
|
||||||
|
@ -183,10 +183,10 @@ char LIR_OprDesc::type_char(BasicType t) {
|
|||||||
case T_LONG:
|
case T_LONG:
|
||||||
case T_OBJECT:
|
case T_OBJECT:
|
||||||
case T_ADDRESS:
|
case T_ADDRESS:
|
||||||
case T_METADATA:
|
|
||||||
case T_VOID:
|
case T_VOID:
|
||||||
return ::type2char(t);
|
return ::type2char(t);
|
||||||
|
case T_METADATA:
|
||||||
|
return 'M';
|
||||||
case T_ILLEGAL:
|
case T_ILLEGAL:
|
||||||
return '?';
|
return '?';
|
||||||
|
|
||||||
|
@ -1175,7 +1175,7 @@ void LIRGenerator::do_Return(Return* x) {
|
|||||||
if (compilation()->env()->dtrace_method_probes()) {
|
if (compilation()->env()->dtrace_method_probes()) {
|
||||||
BasicTypeList signature;
|
BasicTypeList signature;
|
||||||
signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
|
signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
|
||||||
signature.append(T_OBJECT); // Method*
|
signature.append(T_METADATA); // Method*
|
||||||
LIR_OprList* args = new LIR_OprList();
|
LIR_OprList* args = new LIR_OprList();
|
||||||
args->append(getThreadPointer());
|
args->append(getThreadPointer());
|
||||||
LIR_Opr meth = new_register(T_METADATA);
|
LIR_Opr meth = new_register(T_METADATA);
|
||||||
@ -1265,6 +1265,7 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
|
|||||||
|
|
||||||
LIRItem rcvr(x->argument_at(0), this);
|
LIRItem rcvr(x->argument_at(0), this);
|
||||||
rcvr.load_item();
|
rcvr.load_item();
|
||||||
|
LIR_Opr temp = new_register(T_METADATA);
|
||||||
LIR_Opr result = rlock_result(x);
|
LIR_Opr result = rlock_result(x);
|
||||||
|
|
||||||
// need to perform the null check on the rcvr
|
// need to perform the null check on the rcvr
|
||||||
@ -1272,8 +1273,11 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
|
|||||||
if (x->needs_null_check()) {
|
if (x->needs_null_check()) {
|
||||||
info = state_for(x);
|
info = state_for(x);
|
||||||
}
|
}
|
||||||
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), result, info);
|
|
||||||
__ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
|
// FIXME T_ADDRESS should actually be T_METADATA but it can't because the
|
||||||
|
// meaning of these two is mixed up (see JDK-8026837).
|
||||||
|
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
|
||||||
|
__ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2643,6 +2647,39 @@ ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, in
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// profile parameters on entry to the root of the compilation
|
||||||
|
void LIRGenerator::profile_parameters(Base* x) {
|
||||||
|
if (compilation()->profile_parameters()) {
|
||||||
|
CallingConvention* args = compilation()->frame_map()->incoming_arguments();
|
||||||
|
ciMethodData* md = scope()->method()->method_data_or_null();
|
||||||
|
assert(md != NULL, "Sanity");
|
||||||
|
|
||||||
|
if (md->parameters_type_data() != NULL) {
|
||||||
|
ciParametersTypeData* parameters_type_data = md->parameters_type_data();
|
||||||
|
ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
|
||||||
|
LIR_Opr mdp = LIR_OprFact::illegalOpr;
|
||||||
|
for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
|
||||||
|
LIR_Opr src = args->at(i);
|
||||||
|
assert(!src->is_illegal(), "check");
|
||||||
|
BasicType t = src->type();
|
||||||
|
if (t == T_OBJECT || t == T_ARRAY) {
|
||||||
|
intptr_t profiled_k = parameters->type(j);
|
||||||
|
Local* local = x->state()->local_at(java_index)->as_Local();
|
||||||
|
ciKlass* exact = profile_arg_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
|
||||||
|
in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
|
||||||
|
profiled_k, local, mdp, false, local->declared_type()->as_klass());
|
||||||
|
// If the profile is known statically set it once for all and do not emit any code
|
||||||
|
if (exact != NULL) {
|
||||||
|
md->set_parameter_type(j, exact);
|
||||||
|
}
|
||||||
|
j++;
|
||||||
|
}
|
||||||
|
java_index += type2size[t];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void LIRGenerator::do_Base(Base* x) {
|
void LIRGenerator::do_Base(Base* x) {
|
||||||
__ std_entry(LIR_OprFact::illegalOpr);
|
__ std_entry(LIR_OprFact::illegalOpr);
|
||||||
// Emit moves from physical registers / stack slots to virtual registers
|
// Emit moves from physical registers / stack slots to virtual registers
|
||||||
@ -2683,7 +2720,7 @@ void LIRGenerator::do_Base(Base* x) {
|
|||||||
if (compilation()->env()->dtrace_method_probes()) {
|
if (compilation()->env()->dtrace_method_probes()) {
|
||||||
BasicTypeList signature;
|
BasicTypeList signature;
|
||||||
signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
|
signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
|
||||||
signature.append(T_OBJECT); // Method*
|
signature.append(T_METADATA); // Method*
|
||||||
LIR_OprList* args = new LIR_OprList();
|
LIR_OprList* args = new LIR_OprList();
|
||||||
args->append(getThreadPointer());
|
args->append(getThreadPointer());
|
||||||
LIR_Opr meth = new_register(T_METADATA);
|
LIR_Opr meth = new_register(T_METADATA);
|
||||||
@ -2718,6 +2755,7 @@ void LIRGenerator::do_Base(Base* x) {
|
|||||||
|
|
||||||
// increment invocation counters if needed
|
// increment invocation counters if needed
|
||||||
if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
|
if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
|
||||||
|
profile_parameters(x);
|
||||||
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
|
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
|
||||||
increment_invocation_counter(info);
|
increment_invocation_counter(info);
|
||||||
}
|
}
|
||||||
@ -3077,11 +3115,12 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void LIRGenerator::profile_arguments(ProfileCall* x) {
|
void LIRGenerator::profile_arguments(ProfileCall* x) {
|
||||||
if (MethodData::profile_arguments()) {
|
if (compilation()->profile_arguments()) {
|
||||||
int bci = x->bci_of_invoke();
|
int bci = x->bci_of_invoke();
|
||||||
ciMethodData* md = x->method()->method_data_or_null();
|
ciMethodData* md = x->method()->method_data_or_null();
|
||||||
ciProfileData* data = md->bci_to_data(bci);
|
ciProfileData* data = md->bci_to_data(bci);
|
||||||
if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
|
if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
|
||||||
|
(data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
|
||||||
ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
|
ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
|
||||||
int base_offset = md->byte_offset_of_slot(data, extra);
|
int base_offset = md->byte_offset_of_slot(data, extra);
|
||||||
LIR_Opr mdp = LIR_OprFact::illegalOpr;
|
LIR_Opr mdp = LIR_OprFact::illegalOpr;
|
||||||
@ -3107,6 +3146,71 @@ void LIRGenerator::profile_arguments(ProfileCall* x) {
|
|||||||
md->set_argument_type(bci, i, exact);
|
md->set_argument_type(bci, i, exact);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
#ifdef ASSERT
|
||||||
|
Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
|
||||||
|
int n = x->nb_profiled_args();
|
||||||
|
assert(MethodData::profile_parameters() && x->inlined() &&
|
||||||
|
((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)),
|
||||||
|
"only at JSR292 bytecodes");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// profile parameters on entry to an inlined method
|
||||||
|
void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
|
||||||
|
if (compilation()->profile_parameters() && x->inlined()) {
|
||||||
|
ciMethodData* md = x->callee()->method_data_or_null();
|
||||||
|
if (md != NULL) {
|
||||||
|
ciParametersTypeData* parameters_type_data = md->parameters_type_data();
|
||||||
|
if (parameters_type_data != NULL) {
|
||||||
|
ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
|
||||||
|
LIR_Opr mdp = LIR_OprFact::illegalOpr;
|
||||||
|
bool has_receiver = !x->callee()->is_static();
|
||||||
|
ciSignature* sig = x->callee()->signature();
|
||||||
|
ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
|
||||||
|
int i = 0; // to iterate on the Instructions
|
||||||
|
Value arg = x->recv();
|
||||||
|
bool not_null = false;
|
||||||
|
int bci = x->bci_of_invoke();
|
||||||
|
Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
|
||||||
|
// The first parameter is the receiver so that's what we start
|
||||||
|
// with if it exists. On exception if method handle call to
|
||||||
|
// virtual method has receiver in the args list
|
||||||
|
if (arg == NULL || !Bytecodes::has_receiver(bc)) {
|
||||||
|
i = 1;
|
||||||
|
arg = x->profiled_arg_at(0);
|
||||||
|
not_null = !x->arg_needs_null_check(0);
|
||||||
|
}
|
||||||
|
int k = 0; // to iterate on the profile data
|
||||||
|
for (;;) {
|
||||||
|
intptr_t profiled_k = parameters->type(k);
|
||||||
|
ciKlass* exact = profile_arg_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
|
||||||
|
in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
|
||||||
|
profiled_k, arg, mdp, not_null, sig_stream.next_klass());
|
||||||
|
// If the profile is known statically set it once for all and do not emit any code
|
||||||
|
if (exact != NULL) {
|
||||||
|
md->set_parameter_type(k, exact);
|
||||||
|
}
|
||||||
|
k++;
|
||||||
|
if (k >= parameters_type_data->number_of_parameters()) {
|
||||||
|
#ifdef ASSERT
|
||||||
|
int extra = 0;
|
||||||
|
if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
|
||||||
|
x->nb_profiled_args() >= TypeProfileParmsLimit &&
|
||||||
|
x->recv() != NULL && Bytecodes::has_receiver(bc)) {
|
||||||
|
extra += 1;
|
||||||
|
}
|
||||||
|
assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
|
||||||
|
#endif
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
arg = x->profiled_arg_at(i);
|
||||||
|
not_null = !x->arg_needs_null_check(i);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3122,6 +3226,11 @@ void LIRGenerator::do_ProfileCall(ProfileCall* x) {
|
|||||||
profile_arguments(x);
|
profile_arguments(x);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// profile parameters on inlined method entry including receiver
|
||||||
|
if (x->recv() != NULL || x->nb_profiled_args() > 0) {
|
||||||
|
profile_parameters_at_call(x);
|
||||||
|
}
|
||||||
|
|
||||||
if (x->recv() != NULL) {
|
if (x->recv() != NULL) {
|
||||||
LIRItem value(x->recv(), this);
|
LIRItem value(x->recv(), this);
|
||||||
value.load_item();
|
value.load_item();
|
||||||
@ -3222,7 +3331,7 @@ void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
|
|||||||
BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
|
BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
|
||||||
|
|
||||||
if (x->pass_thread()) {
|
if (x->pass_thread()) {
|
||||||
signature->append(T_ADDRESS);
|
signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
|
||||||
args->append(getThreadPointer());
|
args->append(getThreadPointer());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -436,6 +436,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
|
|||||||
#endif
|
#endif
|
||||||
ciKlass* profile_arg_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k);
|
ciKlass* profile_arg_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k);
|
||||||
void profile_arguments(ProfileCall* x);
|
void profile_arguments(ProfileCall* x);
|
||||||
|
void profile_parameters(Base* x);
|
||||||
|
void profile_parameters_at_call(ProfileCall* x);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Compilation* compilation() const { return _compilation; }
|
Compilation* compilation() const { return _compilation; }
|
||||||
|
@ -75,9 +75,9 @@
|
|||||||
|
|
||||||
// Map BasicType to spill size in 32-bit words, matching VMReg's notion of words
|
// Map BasicType to spill size in 32-bit words, matching VMReg's notion of words
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 1, -1};
|
static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 2, 1, 2, 1, -1};
|
||||||
#else
|
#else
|
||||||
static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1};
|
static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1, 1, 1, -1};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
@ -341,6 +341,8 @@
|
|||||||
diagnostic(bool, C1PatchInvokeDynamic, true, \
|
diagnostic(bool, C1PatchInvokeDynamic, true, \
|
||||||
"Patch invokedynamic appendix not known at compile time") \
|
"Patch invokedynamic appendix not known at compile time") \
|
||||||
\
|
\
|
||||||
|
develop(intx, MaxForceInlineLevel, 100, \
|
||||||
|
"maximum number of nested @ForceInline calls that are inlined") \
|
||||||
\
|
\
|
||||||
|
|
||||||
|
|
||||||
|
@ -483,8 +483,7 @@ ciKlass* ciEnv::get_klass_by_index_impl(constantPoolHandle cpool,
|
|||||||
{
|
{
|
||||||
// We have to lock the cpool to keep the oop from being resolved
|
// We have to lock the cpool to keep the oop from being resolved
|
||||||
// while we are accessing it.
|
// while we are accessing it.
|
||||||
oop cplock = cpool->lock();
|
MonitorLockerEx ml(cpool->lock());
|
||||||
ObjectLocker ol(cplock, THREAD, cplock != NULL);
|
|
||||||
constantTag tag = cpool->tag_at(index);
|
constantTag tag = cpool->tag_at(index);
|
||||||
if (tag.is_klass()) {
|
if (tag.is_klass()) {
|
||||||
// The klass has been inserted into the constant pool
|
// The klass has been inserted into the constant pool
|
||||||
|
@ -565,6 +565,116 @@ void ciCallProfile::add_receiver(ciKlass* receiver, int receiver_count) {
|
|||||||
if (_limit < MorphismLimit) _limit++;
|
if (_limit < MorphismLimit) _limit++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ciMethod::assert_virtual_call_type_ok(int bci) {
|
||||||
|
assert(java_code_at_bci(bci) == Bytecodes::_invokevirtual ||
|
||||||
|
java_code_at_bci(bci) == Bytecodes::_invokeinterface, err_msg("unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci))));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ciMethod::assert_call_type_ok(int bci) {
|
||||||
|
assert(java_code_at_bci(bci) == Bytecodes::_invokestatic ||
|
||||||
|
java_code_at_bci(bci) == Bytecodes::_invokespecial ||
|
||||||
|
java_code_at_bci(bci) == Bytecodes::_invokedynamic, err_msg("unexpected bytecode %s", Bytecodes::name(java_code_at_bci(bci))));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check whether profiling provides a type for the argument i to the
|
||||||
|
* call at bci bci
|
||||||
|
*
|
||||||
|
* @param bci bci of the call
|
||||||
|
* @param i argument number
|
||||||
|
* @return profiled type
|
||||||
|
*
|
||||||
|
* If the profile reports that the argument may be null, return false
|
||||||
|
* at least for now.
|
||||||
|
*/
|
||||||
|
ciKlass* ciMethod::argument_profiled_type(int bci, int i) {
|
||||||
|
if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
|
||||||
|
ciProfileData* data = method_data()->bci_to_data(bci);
|
||||||
|
if (data != NULL) {
|
||||||
|
if (data->is_VirtualCallTypeData()) {
|
||||||
|
assert_virtual_call_type_ok(bci);
|
||||||
|
ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
|
||||||
|
if (i >= call->number_of_arguments()) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
ciKlass* type = call->valid_argument_type(i);
|
||||||
|
if (type != NULL && !call->argument_maybe_null(i)) {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
} else if (data->is_CallTypeData()) {
|
||||||
|
assert_call_type_ok(bci);
|
||||||
|
ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
|
||||||
|
if (i >= call->number_of_arguments()) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
ciKlass* type = call->valid_argument_type(i);
|
||||||
|
if (type != NULL && !call->argument_maybe_null(i)) {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check whether profiling provides a type for the return value from
|
||||||
|
* the call at bci bci
|
||||||
|
*
|
||||||
|
* @param bci bci of the call
|
||||||
|
* @return profiled type
|
||||||
|
*
|
||||||
|
* If the profile reports that the argument may be null, return false
|
||||||
|
* at least for now.
|
||||||
|
*/
|
||||||
|
ciKlass* ciMethod::return_profiled_type(int bci) {
|
||||||
|
if (MethodData::profile_return() && method_data() != NULL && method_data()->is_mature()) {
|
||||||
|
ciProfileData* data = method_data()->bci_to_data(bci);
|
||||||
|
if (data != NULL) {
|
||||||
|
if (data->is_VirtualCallTypeData()) {
|
||||||
|
assert_virtual_call_type_ok(bci);
|
||||||
|
ciVirtualCallTypeData* call = (ciVirtualCallTypeData*)data->as_VirtualCallTypeData();
|
||||||
|
ciKlass* type = call->valid_return_type();
|
||||||
|
if (type != NULL && !call->return_maybe_null()) {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
} else if (data->is_CallTypeData()) {
|
||||||
|
assert_call_type_ok(bci);
|
||||||
|
ciCallTypeData* call = (ciCallTypeData*)data->as_CallTypeData();
|
||||||
|
ciKlass* type = call->valid_return_type();
|
||||||
|
if (type != NULL && !call->return_maybe_null()) {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check whether profiling provides a type for the parameter i
|
||||||
|
*
|
||||||
|
* @param i parameter number
|
||||||
|
* @return profiled type
|
||||||
|
*
|
||||||
|
* If the profile reports that the argument may be null, return false
|
||||||
|
* at least for now.
|
||||||
|
*/
|
||||||
|
ciKlass* ciMethod::parameter_profiled_type(int i) {
|
||||||
|
if (MethodData::profile_parameters() && method_data() != NULL && method_data()->is_mature()) {
|
||||||
|
ciParametersTypeData* parameters = method_data()->parameters_type_data();
|
||||||
|
if (parameters != NULL && i < parameters->number_of_parameters()) {
|
||||||
|
ciKlass* type = parameters->valid_parameter_type(i);
|
||||||
|
if (type != NULL && !parameters->parameter_maybe_null(i)) {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciMethod::find_monomorphic_target
|
// ciMethod::find_monomorphic_target
|
||||||
//
|
//
|
||||||
|
@ -117,6 +117,10 @@ class ciMethod : public ciMetadata {
|
|||||||
*bcp = code;
|
*bcp = code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check bytecode and profile data collected are compatible
|
||||||
|
void assert_virtual_call_type_ok(int bci);
|
||||||
|
void assert_call_type_ok(int bci);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Basic method information.
|
// Basic method information.
|
||||||
ciFlags flags() const { check_is_loaded(); return _flags; }
|
ciFlags flags() const { check_is_loaded(); return _flags; }
|
||||||
@ -230,6 +234,11 @@ class ciMethod : public ciMetadata {
|
|||||||
ciCallProfile call_profile_at_bci(int bci);
|
ciCallProfile call_profile_at_bci(int bci);
|
||||||
int interpreter_call_site_count(int bci);
|
int interpreter_call_site_count(int bci);
|
||||||
|
|
||||||
|
// Does type profiling provide a useful type at this point?
|
||||||
|
ciKlass* argument_profiled_type(int bci, int i);
|
||||||
|
ciKlass* parameter_profiled_type(int i);
|
||||||
|
ciKlass* return_profiled_type(int bci);
|
||||||
|
|
||||||
ciField* get_field_at_bci( int bci, bool &will_link);
|
ciField* get_field_at_bci( int bci, bool &will_link);
|
||||||
ciMethod* get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
|
ciMethod* get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
|
||||||
|
|
||||||
|
@ -53,6 +53,7 @@ ciMethodData::ciMethodData(MethodData* md) : ciMetadata(md) {
|
|||||||
_hint_di = first_di();
|
_hint_di = first_di();
|
||||||
// Initialize the escape information (to "don't know.");
|
// Initialize the escape information (to "don't know.");
|
||||||
_eflags = _arg_local = _arg_stack = _arg_returned = 0;
|
_eflags = _arg_local = _arg_stack = _arg_returned = 0;
|
||||||
|
_parameters = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
@ -74,6 +75,7 @@ ciMethodData::ciMethodData() : ciMetadata(NULL) {
|
|||||||
_hint_di = first_di();
|
_hint_di = first_di();
|
||||||
// Initialize the escape information (to "don't know.");
|
// Initialize the escape information (to "don't know.");
|
||||||
_eflags = _arg_local = _arg_stack = _arg_returned = 0;
|
_eflags = _arg_local = _arg_stack = _arg_returned = 0;
|
||||||
|
_parameters = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ciMethodData::load_data() {
|
void ciMethodData::load_data() {
|
||||||
@ -108,6 +110,12 @@ void ciMethodData::load_data() {
|
|||||||
ci_data = next_data(ci_data);
|
ci_data = next_data(ci_data);
|
||||||
data = mdo->next_data(data);
|
data = mdo->next_data(data);
|
||||||
}
|
}
|
||||||
|
if (mdo->parameters_type_data() != NULL) {
|
||||||
|
_parameters = data_layout_at(mdo->parameters_type_data_di());
|
||||||
|
ciParametersTypeData* parameters = new ciParametersTypeData(_parameters);
|
||||||
|
parameters->translate_from(mdo->parameters_type_data());
|
||||||
|
}
|
||||||
|
|
||||||
// Note: Extra data are all BitData, and do not need translation.
|
// Note: Extra data are all BitData, and do not need translation.
|
||||||
_current_mileage = MethodData::mileage_of(mdo->method());
|
_current_mileage = MethodData::mileage_of(mdo->method());
|
||||||
_invocation_counter = mdo->invocation_count();
|
_invocation_counter = mdo->invocation_count();
|
||||||
@ -182,6 +190,8 @@ ciProfileData* ciMethodData::data_at(int data_index) {
|
|||||||
return new ciCallTypeData(data_layout);
|
return new ciCallTypeData(data_layout);
|
||||||
case DataLayout::virtual_call_type_data_tag:
|
case DataLayout::virtual_call_type_data_tag:
|
||||||
return new ciVirtualCallTypeData(data_layout);
|
return new ciVirtualCallTypeData(data_layout);
|
||||||
|
case DataLayout::parameters_type_data_tag:
|
||||||
|
return new ciParametersTypeData(data_layout);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -318,6 +328,14 @@ void ciMethodData::set_argument_type(int bci, int i, ciKlass* k) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ciMethodData::set_parameter_type(int i, ciKlass* k) {
|
||||||
|
VM_ENTRY_MARK;
|
||||||
|
MethodData* mdo = get_MethodData();
|
||||||
|
if (mdo != NULL) {
|
||||||
|
mdo->parameters_type_data()->set_type(i, k->get_Klass());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void ciMethodData::set_return_type(int bci, ciKlass* k) {
|
void ciMethodData::set_return_type(int bci, ciKlass* k) {
|
||||||
VM_ENTRY_MARK;
|
VM_ENTRY_MARK;
|
||||||
MethodData* mdo = get_MethodData();
|
MethodData* mdo = get_MethodData();
|
||||||
@ -605,4 +623,9 @@ void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
|
|||||||
ret()->print_data_on(st);
|
ret()->print_data_on(st);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ciParametersTypeData::print_data_on(outputStream* st) const {
|
||||||
|
st->print_cr("Parametertypes");
|
||||||
|
parameters()->print_data_on(st);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -43,6 +43,7 @@ class ciMultiBranchData;
|
|||||||
class ciArgInfoData;
|
class ciArgInfoData;
|
||||||
class ciCallTypeData;
|
class ciCallTypeData;
|
||||||
class ciVirtualCallTypeData;
|
class ciVirtualCallTypeData;
|
||||||
|
class ciParametersTypeData;
|
||||||
|
|
||||||
typedef ProfileData ciProfileData;
|
typedef ProfileData ciProfileData;
|
||||||
|
|
||||||
@ -99,6 +100,10 @@ public:
|
|||||||
return valid_ciklass(type(i));
|
return valid_ciklass(type(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool maybe_null(int i) const {
|
||||||
|
return was_null_seen(type(i));
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void print_data_on(outputStream* st) const;
|
void print_data_on(outputStream* st) const;
|
||||||
#endif
|
#endif
|
||||||
@ -112,6 +117,10 @@ public:
|
|||||||
return valid_ciklass(type());
|
return valid_ciklass(type());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool maybe_null() const {
|
||||||
|
return was_null_seen(type());
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void print_data_on(outputStream* st) const;
|
void print_data_on(outputStream* st) const;
|
||||||
#endif
|
#endif
|
||||||
@ -124,7 +133,7 @@ public:
|
|||||||
ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)CallTypeData::args(); }
|
ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)CallTypeData::args(); }
|
||||||
ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)CallTypeData::ret(); }
|
ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)CallTypeData::ret(); }
|
||||||
|
|
||||||
void translate_type_data_from(const ProfileData* data) {
|
void translate_from(const ProfileData* data) {
|
||||||
if (has_arguments()) {
|
if (has_arguments()) {
|
||||||
args()->translate_type_data_from(data->as_CallTypeData()->args());
|
args()->translate_type_data_from(data->as_CallTypeData()->args());
|
||||||
}
|
}
|
||||||
@ -153,6 +162,14 @@ public:
|
|||||||
return ret()->valid_type();
|
return ret()->valid_type();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool argument_maybe_null(int i) const {
|
||||||
|
return args()->maybe_null(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool return_maybe_null() const {
|
||||||
|
return ret()->maybe_null();
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void print_data_on(outputStream* st) const;
|
void print_data_on(outputStream* st) const;
|
||||||
#endif
|
#endif
|
||||||
@ -259,6 +276,14 @@ public:
|
|||||||
return ret()->valid_type();
|
return ret()->valid_type();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool argument_maybe_null(int i) const {
|
||||||
|
return args()->maybe_null(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool return_maybe_null() const {
|
||||||
|
return ret()->maybe_null();
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
void print_data_on(outputStream* st) const;
|
void print_data_on(outputStream* st) const;
|
||||||
#endif
|
#endif
|
||||||
@ -290,6 +315,29 @@ public:
|
|||||||
ciArgInfoData(DataLayout* layout) : ArgInfoData(layout) {};
|
ciArgInfoData(DataLayout* layout) : ArgInfoData(layout) {};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class ciParametersTypeData : public ParametersTypeData {
|
||||||
|
public:
|
||||||
|
ciParametersTypeData(DataLayout* layout) : ParametersTypeData(layout) {}
|
||||||
|
|
||||||
|
virtual void translate_from(const ProfileData* data) {
|
||||||
|
parameters()->translate_type_data_from(data->as_ParametersTypeData()->parameters());
|
||||||
|
}
|
||||||
|
|
||||||
|
ciTypeStackSlotEntries* parameters() const { return (ciTypeStackSlotEntries*)ParametersTypeData::parameters(); }
|
||||||
|
|
||||||
|
ciKlass* valid_parameter_type(int i) const {
|
||||||
|
return parameters()->valid_type(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool parameter_maybe_null(int i) const {
|
||||||
|
return parameters()->maybe_null(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void print_data_on(outputStream* st) const;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
// ciMethodData
|
// ciMethodData
|
||||||
//
|
//
|
||||||
// This class represents a MethodData* in the HotSpot virtual
|
// This class represents a MethodData* in the HotSpot virtual
|
||||||
@ -335,6 +383,10 @@ private:
|
|||||||
// Coherent snapshot of original header.
|
// Coherent snapshot of original header.
|
||||||
MethodData _orig;
|
MethodData _orig;
|
||||||
|
|
||||||
|
// Dedicated area dedicated to parameters. Null if no parameter
|
||||||
|
// profiling for this method.
|
||||||
|
DataLayout* _parameters;
|
||||||
|
|
||||||
ciMethodData(MethodData* md);
|
ciMethodData(MethodData* md);
|
||||||
ciMethodData();
|
ciMethodData();
|
||||||
|
|
||||||
@ -403,6 +455,7 @@ public:
|
|||||||
// If the compiler finds a profiled type that is known statically
|
// If the compiler finds a profiled type that is known statically
|
||||||
// for sure, set it in the MethodData
|
// for sure, set it in the MethodData
|
||||||
void set_argument_type(int bci, int i, ciKlass* k);
|
void set_argument_type(int bci, int i, ciKlass* k);
|
||||||
|
void set_parameter_type(int i, ciKlass* k);
|
||||||
void set_return_type(int bci, ciKlass* k);
|
void set_return_type(int bci, ciKlass* k);
|
||||||
|
|
||||||
void load_data();
|
void load_data();
|
||||||
@ -467,6 +520,10 @@ public:
|
|||||||
bool is_arg_returned(int i) const;
|
bool is_arg_returned(int i) const;
|
||||||
uint arg_modified(int arg) const;
|
uint arg_modified(int arg) const;
|
||||||
|
|
||||||
|
ciParametersTypeData* parameters_type_data() const {
|
||||||
|
return _parameters != NULL ? new ciParametersTypeData(_parameters) : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
// Code generation helper
|
// Code generation helper
|
||||||
ByteSize offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data);
|
ByteSize offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data);
|
||||||
int byte_offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data) { return in_bytes(offset_of_slot(data, slot_offset_in_data)); }
|
int byte_offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data) { return in_bytes(offset_of_slot(data, slot_offset_in_data)); }
|
||||||
|
@ -2360,6 +2360,11 @@ methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name,
|
|||||||
objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty));
|
objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty));
|
||||||
assert(appendix_box->obj_at(0) == NULL, "");
|
assert(appendix_box->obj_at(0) == NULL, "");
|
||||||
|
|
||||||
|
// This should not happen. JDK code should take care of that.
|
||||||
|
if (accessing_klass.is_null() || method_type.is_null()) {
|
||||||
|
THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad invokehandle", empty);
|
||||||
|
}
|
||||||
|
|
||||||
// call java.lang.invoke.MethodHandleNatives::linkMethod(... String, MethodType) -> MemberName
|
// call java.lang.invoke.MethodHandleNatives::linkMethod(... String, MethodType) -> MemberName
|
||||||
JavaCallArguments args;
|
JavaCallArguments args;
|
||||||
args.push_oop(accessing_klass()->java_mirror());
|
args.push_oop(accessing_klass()->java_mirror());
|
||||||
@ -2485,6 +2490,9 @@ Handle SystemDictionary::link_method_handle_constant(KlassHandle caller,
|
|||||||
Handle type;
|
Handle type;
|
||||||
if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') {
|
if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') {
|
||||||
type = find_method_handle_type(signature, caller, CHECK_(empty));
|
type = find_method_handle_type(signature, caller, CHECK_(empty));
|
||||||
|
} else if (caller.is_null()) {
|
||||||
|
// This should not happen. JDK code should take care of that.
|
||||||
|
THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad MH constant", empty);
|
||||||
} else {
|
} else {
|
||||||
ResourceMark rm(THREAD);
|
ResourceMark rm(THREAD);
|
||||||
SignatureStream ss(signature, false);
|
SignatureStream ss(signature, false);
|
||||||
@ -2548,6 +2556,11 @@ methodHandle SystemDictionary::find_dynamic_call_site_invoker(KlassHandle caller
|
|||||||
Handle method_name = java_lang_String::create_from_symbol(name, CHECK_(empty));
|
Handle method_name = java_lang_String::create_from_symbol(name, CHECK_(empty));
|
||||||
Handle method_type = find_method_handle_type(type, caller, CHECK_(empty));
|
Handle method_type = find_method_handle_type(type, caller, CHECK_(empty));
|
||||||
|
|
||||||
|
// This should not happen. JDK code should take care of that.
|
||||||
|
if (caller.is_null() || method_type.is_null()) {
|
||||||
|
THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad invokedynamic", empty);
|
||||||
|
}
|
||||||
|
|
||||||
objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty));
|
objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty));
|
||||||
assert(appendix_box->obj_at(0) == NULL, "");
|
assert(appendix_box->obj_at(0) == NULL, "");
|
||||||
|
|
||||||
|
@ -624,6 +624,7 @@
|
|||||||
do_class(java_lang_StrictMath, "java/lang/StrictMath") \
|
do_class(java_lang_StrictMath, "java/lang/StrictMath") \
|
||||||
do_signature(double2_double_signature, "(DD)D") \
|
do_signature(double2_double_signature, "(DD)D") \
|
||||||
do_signature(int2_int_signature, "(II)I") \
|
do_signature(int2_int_signature, "(II)I") \
|
||||||
|
do_signature(long2_long_signature, "(JJ)J") \
|
||||||
\
|
\
|
||||||
/* here are the math names, all together: */ \
|
/* here are the math names, all together: */ \
|
||||||
do_name(abs_name,"abs") do_name(sin_name,"sin") do_name(cos_name,"cos") \
|
do_name(abs_name,"abs") do_name(sin_name,"sin") do_name(cos_name,"cos") \
|
||||||
@ -632,8 +633,11 @@
|
|||||||
do_name(exp_name,"exp") do_name(min_name,"min") do_name(max_name,"max") \
|
do_name(exp_name,"exp") do_name(min_name,"min") do_name(max_name,"max") \
|
||||||
\
|
\
|
||||||
do_name(addExact_name,"addExact") \
|
do_name(addExact_name,"addExact") \
|
||||||
do_name(subtractExact_name,"subtractExact") \
|
do_name(decrementExact_name,"decrementExact") \
|
||||||
|
do_name(incrementExact_name,"incrementExact") \
|
||||||
do_name(multiplyExact_name,"multiplyExact") \
|
do_name(multiplyExact_name,"multiplyExact") \
|
||||||
|
do_name(negateExact_name,"negateExact") \
|
||||||
|
do_name(subtractExact_name,"subtractExact") \
|
||||||
\
|
\
|
||||||
do_intrinsic(_dabs, java_lang_Math, abs_name, double_double_signature, F_S) \
|
do_intrinsic(_dabs, java_lang_Math, abs_name, double_double_signature, F_S) \
|
||||||
do_intrinsic(_dsin, java_lang_Math, sin_name, double_double_signature, F_S) \
|
do_intrinsic(_dsin, java_lang_Math, sin_name, double_double_signature, F_S) \
|
||||||
@ -647,7 +651,18 @@
|
|||||||
do_intrinsic(_dexp, java_lang_Math, exp_name, double_double_signature, F_S) \
|
do_intrinsic(_dexp, java_lang_Math, exp_name, double_double_signature, F_S) \
|
||||||
do_intrinsic(_min, java_lang_Math, min_name, int2_int_signature, F_S) \
|
do_intrinsic(_min, java_lang_Math, min_name, int2_int_signature, F_S) \
|
||||||
do_intrinsic(_max, java_lang_Math, max_name, int2_int_signature, F_S) \
|
do_intrinsic(_max, java_lang_Math, max_name, int2_int_signature, F_S) \
|
||||||
do_intrinsic(_addExact, java_lang_Math, addExact_name, int2_int_signature, F_S) \
|
do_intrinsic(_addExactI, java_lang_Math, addExact_name, int2_int_signature, F_S) \
|
||||||
|
do_intrinsic(_addExactL, java_lang_Math, addExact_name, long2_long_signature, F_S) \
|
||||||
|
do_intrinsic(_decrementExactI, java_lang_Math, decrementExact_name, int_int_signature, F_S) \
|
||||||
|
do_intrinsic(_decrementExactL, java_lang_Math, decrementExact_name, long2_long_signature, F_S) \
|
||||||
|
do_intrinsic(_incrementExactI, java_lang_Math, incrementExact_name, int_int_signature, F_S) \
|
||||||
|
do_intrinsic(_incrementExactL, java_lang_Math, incrementExact_name, long2_long_signature, F_S) \
|
||||||
|
do_intrinsic(_multiplyExactI, java_lang_Math, multiplyExact_name, int2_int_signature, F_S) \
|
||||||
|
do_intrinsic(_multiplyExactL, java_lang_Math, multiplyExact_name, long2_long_signature, F_S) \
|
||||||
|
do_intrinsic(_negateExactI, java_lang_Math, negateExact_name, int_int_signature, F_S) \
|
||||||
|
do_intrinsic(_negateExactL, java_lang_Math, negateExact_name, long_long_signature, F_S) \
|
||||||
|
do_intrinsic(_subtractExactI, java_lang_Math, subtractExact_name, int2_int_signature, F_S) \
|
||||||
|
do_intrinsic(_subtractExactL, java_lang_Math, subtractExact_name, long2_long_signature, F_S) \
|
||||||
\
|
\
|
||||||
do_intrinsic(_floatToRawIntBits, java_lang_Float, floatToRawIntBits_name, float_int_signature, F_S) \
|
do_intrinsic(_floatToRawIntBits, java_lang_Float, floatToRawIntBits_name, float_int_signature, F_S) \
|
||||||
do_name( floatToRawIntBits_name, "floatToRawIntBits") \
|
do_name( floatToRawIntBits_name, "floatToRawIntBits") \
|
||||||
|
@ -1297,13 +1297,6 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
|||||||
method->jmethod_id();
|
method->jmethod_id();
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the compiler is shut off due to code cache getting full
|
|
||||||
// fail out now so blocking compiles dont hang the java thread
|
|
||||||
if (!should_compile_new_jobs()) {
|
|
||||||
CompilationPolicy::policy()->delay_compilation(method());
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// do the compilation
|
// do the compilation
|
||||||
if (method->is_native()) {
|
if (method->is_native()) {
|
||||||
if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) {
|
if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) {
|
||||||
@ -1313,11 +1306,22 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
|||||||
MutexLocker locker(MethodCompileQueue_lock, THREAD);
|
MutexLocker locker(MethodCompileQueue_lock, THREAD);
|
||||||
compile_id = assign_compile_id(method, standard_entry_bci);
|
compile_id = assign_compile_id(method, standard_entry_bci);
|
||||||
}
|
}
|
||||||
|
// To properly handle the appendix argument for out-of-line calls we are using a small trampoline that
|
||||||
|
// pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime).
|
||||||
|
//
|
||||||
|
// Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter
|
||||||
|
// in this case. If we can't generate one and use it we can not execute the out-of-line method handle calls.
|
||||||
(void) AdapterHandlerLibrary::create_native_wrapper(method, compile_id);
|
(void) AdapterHandlerLibrary::create_native_wrapper(method, compile_id);
|
||||||
} else {
|
} else {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
// If the compiler is shut off due to code cache getting full
|
||||||
|
// fail out now so blocking compiles dont hang the java thread
|
||||||
|
if (!should_compile_new_jobs()) {
|
||||||
|
CompilationPolicy::policy()->delay_compilation(method());
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, THREAD);
|
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ private:
|
|||||||
// then _alloc_region is NULL and this object should not be used to
|
// then _alloc_region is NULL and this object should not be used to
|
||||||
// satisfy allocation requests (it was done this way to force the
|
// satisfy allocation requests (it was done this way to force the
|
||||||
// correct use of init() and release()).
|
// correct use of init() and release()).
|
||||||
HeapRegion* _alloc_region;
|
HeapRegion* volatile _alloc_region;
|
||||||
|
|
||||||
// It keeps track of the distinct number of regions that are used
|
// It keeps track of the distinct number of regions that are used
|
||||||
// for allocation in the active interval of this object, i.e.,
|
// for allocation in the active interval of this object, i.e.,
|
||||||
@ -132,8 +132,9 @@ public:
|
|||||||
static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
|
static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
|
||||||
|
|
||||||
HeapRegion* get() const {
|
HeapRegion* get() const {
|
||||||
|
HeapRegion * hr = _alloc_region;
|
||||||
// Make sure that the dummy region does not escape this class.
|
// Make sure that the dummy region does not escape this class.
|
||||||
return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
|
return (hr == _dummy_region) ? NULL : hr;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint count() { return _count; }
|
uint count() { return _count; }
|
||||||
|
@ -187,19 +187,23 @@ public:
|
|||||||
size_t code_root_elems() const { return _code_root_elems; }
|
size_t code_root_elems() const { return _code_root_elems; }
|
||||||
|
|
||||||
void print_rs_mem_info_on(outputStream * out, size_t total) {
|
void print_rs_mem_info_on(outputStream * out, size_t total) {
|
||||||
out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name);
|
out->print_cr(" "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions",
|
||||||
|
round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name);
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_cards_occupied_info_on(outputStream * out, size_t total) {
|
void print_cards_occupied_info_on(outputStream * out, size_t total) {
|
||||||
out->print_cr(" %8d (%5.1f%%) entries by %zd %s regions", cards_occupied(), cards_occupied_percent_of(total), amount(), _name);
|
out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) entries by "SIZE_FORMAT" %s regions",
|
||||||
|
cards_occupied(), cards_occupied_percent_of(total), amount(), _name);
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_code_root_mem_info_on(outputStream * out, size_t total) {
|
void print_code_root_mem_info_on(outputStream * out, size_t total) {
|
||||||
out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name);
|
out->print_cr(" "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions",
|
||||||
|
round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name);
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_code_root_elems_info_on(outputStream * out, size_t total) {
|
void print_code_root_elems_info_on(outputStream * out, size_t total) {
|
||||||
out->print_cr(" %8d (%5.1f%%) elements by %zd %s regions", code_root_elems(), code_root_elems_percent_of(total), amount(), _name);
|
out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) elements by "SIZE_FORMAT" %s regions",
|
||||||
|
code_root_elems(), code_root_elems_percent_of(total), amount(), _name);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -327,14 +331,14 @@ void G1RemSetSummary::print_on(outputStream* out) {
|
|||||||
out->print_cr("\n Recent concurrent refinement statistics");
|
out->print_cr("\n Recent concurrent refinement statistics");
|
||||||
out->print_cr(" Processed "SIZE_FORMAT" cards",
|
out->print_cr(" Processed "SIZE_FORMAT" cards",
|
||||||
num_concurrent_refined_cards());
|
num_concurrent_refined_cards());
|
||||||
out->print_cr(" Of %d completed buffers:", num_processed_buf_total());
|
out->print_cr(" Of "SIZE_FORMAT" completed buffers:", num_processed_buf_total());
|
||||||
out->print_cr(" %8d (%5.1f%%) by concurrent RS threads.",
|
out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) by concurrent RS threads.",
|
||||||
num_processed_buf_total(),
|
num_processed_buf_total(),
|
||||||
percent_of(num_processed_buf_rs_threads(), num_processed_buf_total()));
|
percent_of(num_processed_buf_rs_threads(), num_processed_buf_total()));
|
||||||
out->print_cr(" %8d (%5.1f%%) by mutator threads.",
|
out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) by mutator threads.",
|
||||||
num_processed_buf_mutator(),
|
num_processed_buf_mutator(),
|
||||||
percent_of(num_processed_buf_mutator(), num_processed_buf_total()));
|
percent_of(num_processed_buf_mutator(), num_processed_buf_total()));
|
||||||
out->print_cr(" Did %d coarsenings.", num_coarsenings());
|
out->print_cr(" Did "SIZE_FORMAT" coarsenings.", num_coarsenings());
|
||||||
out->print_cr(" Concurrent RS threads times (s)");
|
out->print_cr(" Concurrent RS threads times (s)");
|
||||||
out->print(" ");
|
out->print(" ");
|
||||||
for (uint i = 0; i < _num_vtimes; i++) {
|
for (uint i = 0; i < _num_vtimes; i++) {
|
||||||
|
@ -30,11 +30,8 @@
|
|||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "runtime/vmThread.hpp"
|
#include "runtime/vmThread.hpp"
|
||||||
#include "utilities/top.hpp"
|
#include "utilities/top.hpp"
|
||||||
#ifdef TARGET_ARCH_MODEL_x86_32
|
#ifdef TARGET_ARCH_x86
|
||||||
# include "interp_masm_x86_32.hpp"
|
# include "interp_masm_x86.hpp"
|
||||||
#endif
|
|
||||||
#ifdef TARGET_ARCH_MODEL_x86_64
|
|
||||||
# include "interp_masm_x86_64.hpp"
|
|
||||||
#endif
|
#endif
|
||||||
#ifdef TARGET_ARCH_MODEL_sparc
|
#ifdef TARGET_ARCH_MODEL_sparc
|
||||||
# include "interp_masm_sparc.hpp"
|
# include "interp_masm_sparc.hpp"
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
@ -265,7 +264,7 @@ void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle kl
|
|||||||
void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
|
void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
|
||||||
Method* result_oop = klass->uncached_lookup_method(name, signature);
|
Method* result_oop = klass->uncached_lookup_method(name, signature);
|
||||||
result = methodHandle(THREAD, result_oop);
|
result = methodHandle(THREAD, result_oop);
|
||||||
while (!result.is_null() && result->is_static()) {
|
while (!result.is_null() && result->is_static() && result->method_holder()->super() != NULL) {
|
||||||
klass = KlassHandle(THREAD, result->method_holder()->super());
|
klass = KlassHandle(THREAD, result->method_holder()->super());
|
||||||
result = methodHandle(THREAD, klass->uncached_lookup_method(name, signature));
|
result = methodHandle(THREAD, klass->uncached_lookup_method(name, signature));
|
||||||
}
|
}
|
||||||
@ -419,18 +418,28 @@ void LinkResolver::check_method_accessability(KlassHandle ref_klass,
|
|||||||
|
|
||||||
AccessFlags flags = sel_method->access_flags();
|
AccessFlags flags = sel_method->access_flags();
|
||||||
|
|
||||||
// Special case: arrays always override "clone". JVMS 2.15.
|
// Special case #1: arrays always override "clone". JVMS 2.15.
|
||||||
// If the resolved klass is an array class, and the declaring class
|
// If the resolved klass is an array class, and the declaring class
|
||||||
// is java.lang.Object and the method is "clone", set the flags
|
// is java.lang.Object and the method is "clone", set the flags
|
||||||
// to public.
|
// to public.
|
||||||
|
// Special case #2: If the resolved klass is an interface, and
|
||||||
|
// the declaring class is java.lang.Object, and the method is
|
||||||
|
// "clone" or "finalize", set the flags to public. If the
|
||||||
|
// resolved interface does not contain "clone" or "finalize"
|
||||||
|
// methods, the method/interface method resolution looks to
|
||||||
|
// the interface's super class, java.lang.Object. With JDK 8
|
||||||
|
// interface accessability check requirement, special casing
|
||||||
|
// this scenario is necessary to avoid an IAE.
|
||||||
//
|
//
|
||||||
// We'll check for the method name first, as that's most likely
|
// We'll check for each method name first and then java.lang.Object
|
||||||
// to be false (so we'll short-circuit out of these tests).
|
// to best short-circuit out of these tests.
|
||||||
if (sel_method->name() == vmSymbols::clone_name() &&
|
if (((sel_method->name() == vmSymbols::clone_name() &&
|
||||||
sel_klass() == SystemDictionary::Object_klass() &&
|
(resolved_klass->oop_is_array() || resolved_klass->is_interface())) ||
|
||||||
resolved_klass->oop_is_array()) {
|
(sel_method->name() == vmSymbols::finalize_method_name() &&
|
||||||
|
resolved_klass->is_interface())) &&
|
||||||
|
sel_klass() == SystemDictionary::Object_klass()) {
|
||||||
// We need to change "protected" to "public".
|
// We need to change "protected" to "public".
|
||||||
assert(flags.is_protected(), "clone not protected?");
|
assert(flags.is_protected(), "clone or finalize not protected?");
|
||||||
jint new_flags = flags.as_int();
|
jint new_flags = flags.as_int();
|
||||||
new_flags = new_flags & (~JVM_ACC_PROTECTED);
|
new_flags = new_flags & (~JVM_ACC_PROTECTED);
|
||||||
new_flags = new_flags | JVM_ACC_PUBLIC;
|
new_flags = new_flags | JVM_ACC_PUBLIC;
|
||||||
|
@ -28,11 +28,8 @@
|
|||||||
#include "interpreter/bytecodes.hpp"
|
#include "interpreter/bytecodes.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "runtime/frame.hpp"
|
#include "runtime/frame.hpp"
|
||||||
#ifdef TARGET_ARCH_MODEL_x86_32
|
#ifdef TARGET_ARCH_x86
|
||||||
# include "interp_masm_x86_32.hpp"
|
# include "interp_masm_x86.hpp"
|
||||||
#endif
|
|
||||||
#ifdef TARGET_ARCH_MODEL_x86_64
|
|
||||||
# include "interp_masm_x86_64.hpp"
|
|
||||||
#endif
|
#endif
|
||||||
#ifdef TARGET_ARCH_MODEL_sparc
|
#ifdef TARGET_ARCH_MODEL_sparc
|
||||||
# include "interp_masm_sparc.hpp"
|
# include "interp_masm_sparc.hpp"
|
||||||
|
@ -75,8 +75,7 @@ enum ChunkSizes { // in words.
|
|||||||
ClassSmallChunk = 256,
|
ClassSmallChunk = 256,
|
||||||
SmallChunk = 512,
|
SmallChunk = 512,
|
||||||
ClassMediumChunk = 4 * K,
|
ClassMediumChunk = 4 * K,
|
||||||
MediumChunk = 8 * K,
|
MediumChunk = 8 * K
|
||||||
HumongousChunkGranularity = 8
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static ChunkIndex next_chunk_index(ChunkIndex i) {
|
static ChunkIndex next_chunk_index(ChunkIndex i) {
|
||||||
@ -92,6 +91,7 @@ typedef class FreeList<Metachunk> ChunkList;
|
|||||||
|
|
||||||
// Manages the global free lists of chunks.
|
// Manages the global free lists of chunks.
|
||||||
class ChunkManager : public CHeapObj<mtInternal> {
|
class ChunkManager : public CHeapObj<mtInternal> {
|
||||||
|
friend class TestVirtualSpaceNodeTest;
|
||||||
|
|
||||||
// Free list of chunks of different sizes.
|
// Free list of chunks of different sizes.
|
||||||
// SpecializedChunk
|
// SpecializedChunk
|
||||||
@ -257,6 +257,8 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
|
|||||||
// VirtualSpace
|
// VirtualSpace
|
||||||
Metachunk* first_chunk() { return (Metachunk*) bottom(); }
|
Metachunk* first_chunk() { return (Metachunk*) bottom(); }
|
||||||
|
|
||||||
|
// Committed but unused space in the virtual space
|
||||||
|
size_t free_words_in_vs() const;
|
||||||
public:
|
public:
|
||||||
|
|
||||||
VirtualSpaceNode(size_t byte_size);
|
VirtualSpaceNode(size_t byte_size);
|
||||||
@ -301,7 +303,6 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
|
|||||||
// used and capacity in this single entry in the list
|
// used and capacity in this single entry in the list
|
||||||
size_t used_words_in_vs() const;
|
size_t used_words_in_vs() const;
|
||||||
size_t capacity_words_in_vs() const;
|
size_t capacity_words_in_vs() const;
|
||||||
size_t free_words_in_vs() const;
|
|
||||||
|
|
||||||
bool initialize();
|
bool initialize();
|
||||||
|
|
||||||
@ -319,6 +320,13 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
|
|||||||
// in the node from any freelist.
|
// in the node from any freelist.
|
||||||
void purge(ChunkManager* chunk_manager);
|
void purge(ChunkManager* chunk_manager);
|
||||||
|
|
||||||
|
// If an allocation doesn't fit in the current node a new node is created.
|
||||||
|
// Allocate chunks out of the remaining committed space in this node
|
||||||
|
// to avoid wasting that memory.
|
||||||
|
// This always adds up because all the chunk sizes are multiples of
|
||||||
|
// the smallest chunk size.
|
||||||
|
void retire(ChunkManager* chunk_manager);
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
// Debug support
|
// Debug support
|
||||||
void mangle();
|
void mangle();
|
||||||
@ -461,6 +469,10 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
|
|||||||
// and is typically followed by the allocation of a chunk.
|
// and is typically followed by the allocation of a chunk.
|
||||||
bool create_new_virtual_space(size_t vs_word_size);
|
bool create_new_virtual_space(size_t vs_word_size);
|
||||||
|
|
||||||
|
// Chunk up the unused committed space in the current
|
||||||
|
// virtual space and add the chunks to the free list.
|
||||||
|
void retire_current_virtual_space();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
VirtualSpaceList(size_t word_size);
|
VirtualSpaceList(size_t word_size);
|
||||||
VirtualSpaceList(ReservedSpace rs);
|
VirtualSpaceList(ReservedSpace rs);
|
||||||
@ -624,11 +636,13 @@ class SpaceManager : public CHeapObj<mtClass> {
|
|||||||
bool is_class() { return _mdtype == Metaspace::ClassType; }
|
bool is_class() { return _mdtype == Metaspace::ClassType; }
|
||||||
|
|
||||||
// Accessors
|
// Accessors
|
||||||
size_t specialized_chunk_size() { return SpecializedChunk; }
|
size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; }
|
||||||
size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
|
size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
|
||||||
size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
|
size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
|
||||||
size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
|
size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
|
||||||
|
|
||||||
|
size_t smallest_chunk_size() { return specialized_chunk_size(); }
|
||||||
|
|
||||||
size_t allocated_blocks_words() const { return _allocated_blocks_words; }
|
size_t allocated_blocks_words() const { return _allocated_blocks_words; }
|
||||||
size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
|
size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
|
||||||
size_t allocated_chunks_words() const { return _allocated_chunks_words; }
|
size_t allocated_chunks_words() const { return _allocated_chunks_words; }
|
||||||
@ -1056,6 +1070,35 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void VirtualSpaceList::retire_current_virtual_space() {
|
||||||
|
assert_lock_strong(SpaceManager::expand_lock());
|
||||||
|
|
||||||
|
VirtualSpaceNode* vsn = current_virtual_space();
|
||||||
|
|
||||||
|
ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
|
||||||
|
Metaspace::chunk_manager_metadata();
|
||||||
|
|
||||||
|
vsn->retire(cm);
|
||||||
|
}
|
||||||
|
|
||||||
|
void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
|
||||||
|
for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
|
||||||
|
ChunkIndex index = (ChunkIndex)i;
|
||||||
|
size_t chunk_size = chunk_manager->free_chunks(index)->size();
|
||||||
|
|
||||||
|
while (free_words_in_vs() >= chunk_size) {
|
||||||
|
DEBUG_ONLY(verify_container_count();)
|
||||||
|
Metachunk* chunk = get_chunk_vs(chunk_size);
|
||||||
|
assert(chunk != NULL, "allocation should have been successful");
|
||||||
|
|
||||||
|
chunk_manager->return_chunks(index, chunk);
|
||||||
|
chunk_manager->inc_free_chunks_total(chunk_size);
|
||||||
|
DEBUG_ONLY(verify_container_count();)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(free_words_in_vs() == 0, "should be empty now");
|
||||||
|
}
|
||||||
|
|
||||||
VirtualSpaceList::VirtualSpaceList(size_t word_size) :
|
VirtualSpaceList::VirtualSpaceList(size_t word_size) :
|
||||||
_is_class(false),
|
_is_class(false),
|
||||||
_virtual_space_list(NULL),
|
_virtual_space_list(NULL),
|
||||||
@ -1181,6 +1224,7 @@ bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
|
|||||||
if (vs_expanded) {
|
if (vs_expanded) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
retire_current_virtual_space();
|
||||||
|
|
||||||
// Get another virtual space.
|
// Get another virtual space.
|
||||||
size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
|
size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
|
||||||
@ -1902,12 +1946,12 @@ size_t SpaceManager::calc_chunk_size(size_t word_size) {
|
|||||||
chunk_word_size = medium_chunk_size();
|
chunk_word_size = medium_chunk_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Might still need a humongous chunk. Enforce an
|
// Might still need a humongous chunk. Enforce
|
||||||
// eight word granularity to facilitate reuse (some
|
// humongous allocations sizes to be aligned up to
|
||||||
// wastage but better chance of reuse).
|
// the smallest chunk size.
|
||||||
size_t if_humongous_sized_chunk =
|
size_t if_humongous_sized_chunk =
|
||||||
align_size_up(word_size + Metachunk::overhead(),
|
align_size_up(word_size + Metachunk::overhead(),
|
||||||
HumongousChunkGranularity);
|
smallest_chunk_size());
|
||||||
chunk_word_size =
|
chunk_word_size =
|
||||||
MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
|
MAX2((size_t) chunk_word_size, if_humongous_sized_chunk);
|
||||||
|
|
||||||
@ -2151,10 +2195,10 @@ SpaceManager::~SpaceManager() {
|
|||||||
}
|
}
|
||||||
assert(humongous_chunks->word_size() == (size_t)
|
assert(humongous_chunks->word_size() == (size_t)
|
||||||
align_size_up(humongous_chunks->word_size(),
|
align_size_up(humongous_chunks->word_size(),
|
||||||
HumongousChunkGranularity),
|
smallest_chunk_size()),
|
||||||
err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
|
err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT
|
||||||
" granularity %d",
|
" granularity %d",
|
||||||
humongous_chunks->word_size(), HumongousChunkGranularity));
|
humongous_chunks->word_size(), smallest_chunk_size()));
|
||||||
Metachunk* next_humongous_chunks = humongous_chunks->next();
|
Metachunk* next_humongous_chunks = humongous_chunks->next();
|
||||||
humongous_chunks->container()->dec_container_count();
|
humongous_chunks->container()->dec_container_count();
|
||||||
chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
|
chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
|
||||||
@ -3301,9 +3345,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (result == NULL) {
|
if (result == NULL) {
|
||||||
report_metadata_oome(loader_data, word_size, mdtype, THREAD);
|
report_metadata_oome(loader_data, word_size, mdtype, CHECK_NULL);
|
||||||
// Will not reach here.
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Zero initialize.
|
// Zero initialize.
|
||||||
@ -3494,4 +3536,94 @@ void TestMetaspaceAux_test() {
|
|||||||
TestMetaspaceAuxTest::test();
|
TestMetaspaceAuxTest::test();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class TestVirtualSpaceNodeTest {
|
||||||
|
static void chunk_up(size_t words_left, size_t& num_medium_chunks,
|
||||||
|
size_t& num_small_chunks,
|
||||||
|
size_t& num_specialized_chunks) {
|
||||||
|
num_medium_chunks = words_left / MediumChunk;
|
||||||
|
words_left = words_left % MediumChunk;
|
||||||
|
|
||||||
|
num_small_chunks = words_left / SmallChunk;
|
||||||
|
words_left = words_left % SmallChunk;
|
||||||
|
// how many specialized chunks can we get?
|
||||||
|
num_specialized_chunks = words_left / SpecializedChunk;
|
||||||
|
assert(words_left % SpecializedChunk == 0, "should be nothing left");
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static void test() {
|
||||||
|
MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
|
||||||
|
const size_t vsn_test_size_words = MediumChunk * 4;
|
||||||
|
const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord;
|
||||||
|
|
||||||
|
// The chunk sizes must be multiples of eachother, or this will fail
|
||||||
|
STATIC_ASSERT(MediumChunk % SmallChunk == 0);
|
||||||
|
STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
|
||||||
|
|
||||||
|
{ // No committed memory in VSN
|
||||||
|
ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
|
||||||
|
VirtualSpaceNode vsn(vsn_test_size_bytes);
|
||||||
|
vsn.initialize();
|
||||||
|
vsn.retire(&cm);
|
||||||
|
assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // All of VSN is committed, half is used by chunks
|
||||||
|
ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
|
||||||
|
VirtualSpaceNode vsn(vsn_test_size_bytes);
|
||||||
|
vsn.initialize();
|
||||||
|
vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
|
||||||
|
vsn.get_chunk_vs(MediumChunk);
|
||||||
|
vsn.get_chunk_vs(MediumChunk);
|
||||||
|
vsn.retire(&cm);
|
||||||
|
assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks");
|
||||||
|
assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up");
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // 4 pages of VSN is committed, some is used by chunks
|
||||||
|
ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
|
||||||
|
VirtualSpaceNode vsn(vsn_test_size_bytes);
|
||||||
|
const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord;
|
||||||
|
assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size");
|
||||||
|
vsn.initialize();
|
||||||
|
vsn.expand_by(page_chunks, page_chunks);
|
||||||
|
vsn.get_chunk_vs(SmallChunk);
|
||||||
|
vsn.get_chunk_vs(SpecializedChunk);
|
||||||
|
vsn.retire(&cm);
|
||||||
|
|
||||||
|
// committed - used = words left to retire
|
||||||
|
const size_t words_left = page_chunks - SmallChunk - SpecializedChunk;
|
||||||
|
|
||||||
|
size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
|
||||||
|
chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
|
||||||
|
|
||||||
|
assert(num_medium_chunks == 0, "should not get any medium chunks");
|
||||||
|
assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
|
||||||
|
assert(cm.sum_free_chunks() == words_left, "sizes should add up");
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // Half of VSN is committed, a humongous chunk is used
|
||||||
|
ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
|
||||||
|
VirtualSpaceNode vsn(vsn_test_size_bytes);
|
||||||
|
vsn.initialize();
|
||||||
|
vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
|
||||||
|
vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
|
||||||
|
vsn.retire(&cm);
|
||||||
|
|
||||||
|
const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk);
|
||||||
|
size_t num_medium_chunks, num_small_chunks, num_spec_chunks;
|
||||||
|
chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks);
|
||||||
|
|
||||||
|
assert(num_medium_chunks == 0, "should not get any medium chunks");
|
||||||
|
assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks");
|
||||||
|
assert(cm.sum_free_chunks() == words_left, "sizes should add up");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void TestVirtualSpaceNode_test() {
|
||||||
|
TestVirtualSpaceNodeTest::test();
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -40,7 +40,6 @@
|
|||||||
#include "runtime/init.hpp"
|
#include "runtime/init.hpp"
|
||||||
#include "runtime/javaCalls.hpp"
|
#include "runtime/javaCalls.hpp"
|
||||||
#include "runtime/signature.hpp"
|
#include "runtime/signature.hpp"
|
||||||
#include "runtime/synchronizer.hpp"
|
|
||||||
#include "runtime/vframe.hpp"
|
#include "runtime/vframe.hpp"
|
||||||
|
|
||||||
ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
|
ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
|
||||||
@ -70,6 +69,7 @@ ConstantPool::ConstantPool(Array<u1>* tags) {
|
|||||||
|
|
||||||
// only set to non-zero if constant pool is merged by RedefineClasses
|
// only set to non-zero if constant pool is merged by RedefineClasses
|
||||||
set_version(0);
|
set_version(0);
|
||||||
|
set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock"));
|
||||||
|
|
||||||
// initialize tag array
|
// initialize tag array
|
||||||
int length = tags->length();
|
int length = tags->length();
|
||||||
@ -95,6 +95,9 @@ void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
|
|||||||
void ConstantPool::release_C_heap_structures() {
|
void ConstantPool::release_C_heap_structures() {
|
||||||
// walk constant pool and decrement symbol reference counts
|
// walk constant pool and decrement symbol reference counts
|
||||||
unreference_symbols();
|
unreference_symbols();
|
||||||
|
|
||||||
|
delete _lock;
|
||||||
|
set_lock(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
objArrayOop ConstantPool::resolved_references() const {
|
objArrayOop ConstantPool::resolved_references() const {
|
||||||
@ -151,6 +154,9 @@ void ConstantPool::restore_unshareable_info(TRAPS) {
|
|||||||
ClassLoaderData* loader_data = pool_holder()->class_loader_data();
|
ClassLoaderData* loader_data = pool_holder()->class_loader_data();
|
||||||
set_resolved_references(loader_data->add_handle(refs_handle));
|
set_resolved_references(loader_data->add_handle(refs_handle));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Also need to recreate the mutex. Make sure this matches the constructor
|
||||||
|
set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,23 +167,7 @@ void ConstantPool::remove_unshareable_info() {
|
|||||||
set_resolved_reference_length(
|
set_resolved_reference_length(
|
||||||
resolved_references() != NULL ? resolved_references()->length() : 0);
|
resolved_references() != NULL ? resolved_references()->length() : 0);
|
||||||
set_resolved_references(NULL);
|
set_resolved_references(NULL);
|
||||||
}
|
set_lock(NULL);
|
||||||
|
|
||||||
oop ConstantPool::lock() {
|
|
||||||
if (_pool_holder) {
|
|
||||||
// We re-use the _pool_holder's init_lock to reduce footprint.
|
|
||||||
// Notes on deadlocks:
|
|
||||||
// [1] This lock is a Java oop, so it can be recursively locked by
|
|
||||||
// the same thread without self-deadlocks.
|
|
||||||
// [2] Deadlock will happen if there is circular dependency between
|
|
||||||
// the <clinit> of two Java classes. However, in this case,
|
|
||||||
// the deadlock would have happened long before we reach
|
|
||||||
// ConstantPool::lock(), so reusing init_lock does not
|
|
||||||
// increase the possibility of deadlock.
|
|
||||||
return _pool_holder->init_lock();
|
|
||||||
} else {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ConstantPool::cp_to_object_index(int cp_index) {
|
int ConstantPool::cp_to_object_index(int cp_index) {
|
||||||
@ -211,9 +201,7 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
|
|||||||
|
|
||||||
Symbol* name = NULL;
|
Symbol* name = NULL;
|
||||||
Handle loader;
|
Handle loader;
|
||||||
{
|
{ MonitorLockerEx ml(this_oop->lock());
|
||||||
oop cplock = this_oop->lock();
|
|
||||||
ObjectLocker ol(cplock , THREAD, cplock != NULL);
|
|
||||||
|
|
||||||
if (this_oop->tag_at(which).is_unresolved_klass()) {
|
if (this_oop->tag_at(which).is_unresolved_klass()) {
|
||||||
if (this_oop->tag_at(which).is_unresolved_klass_in_error()) {
|
if (this_oop->tag_at(which).is_unresolved_klass_in_error()) {
|
||||||
@ -260,8 +248,7 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
|
|||||||
|
|
||||||
bool throw_orig_error = false;
|
bool throw_orig_error = false;
|
||||||
{
|
{
|
||||||
oop cplock = this_oop->lock();
|
MonitorLockerEx ml(this_oop->lock());
|
||||||
ObjectLocker ol(cplock, THREAD, cplock != NULL);
|
|
||||||
|
|
||||||
// some other thread has beaten us and has resolved the class.
|
// some other thread has beaten us and has resolved the class.
|
||||||
if (this_oop->tag_at(which).is_klass()) {
|
if (this_oop->tag_at(which).is_klass()) {
|
||||||
@ -329,8 +316,7 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
|
|||||||
}
|
}
|
||||||
return k();
|
return k();
|
||||||
} else {
|
} else {
|
||||||
oop cplock = this_oop->lock();
|
MonitorLockerEx ml(this_oop->lock());
|
||||||
ObjectLocker ol(cplock, THREAD, cplock != NULL);
|
|
||||||
// Only updated constant pool - if it is resolved.
|
// Only updated constant pool - if it is resolved.
|
||||||
do_resolve = this_oop->tag_at(which).is_unresolved_klass();
|
do_resolve = this_oop->tag_at(which).is_unresolved_klass();
|
||||||
if (do_resolve) {
|
if (do_resolve) {
|
||||||
@ -600,8 +586,7 @@ void ConstantPool::save_and_throw_exception(constantPoolHandle this_oop, int whi
|
|||||||
int tag, TRAPS) {
|
int tag, TRAPS) {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
Symbol* error = PENDING_EXCEPTION->klass()->name();
|
Symbol* error = PENDING_EXCEPTION->klass()->name();
|
||||||
oop cplock = this_oop->lock();
|
MonitorLockerEx ml(this_oop->lock()); // lock cpool to change tag.
|
||||||
ObjectLocker ol(cplock, THREAD, cplock != NULL); // lock cpool to change tag.
|
|
||||||
|
|
||||||
int error_tag = (tag == JVM_CONSTANT_MethodHandle) ?
|
int error_tag = (tag == JVM_CONSTANT_MethodHandle) ?
|
||||||
JVM_CONSTANT_MethodHandleInError : JVM_CONSTANT_MethodTypeInError;
|
JVM_CONSTANT_MethodHandleInError : JVM_CONSTANT_MethodTypeInError;
|
||||||
@ -762,8 +747,7 @@ oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_oop, int inde
|
|||||||
if (cache_index >= 0) {
|
if (cache_index >= 0) {
|
||||||
// Cache the oop here also.
|
// Cache the oop here also.
|
||||||
Handle result_handle(THREAD, result_oop);
|
Handle result_handle(THREAD, result_oop);
|
||||||
oop cplock = this_oop->lock();
|
MonitorLockerEx ml(this_oop->lock()); // don't know if we really need this
|
||||||
ObjectLocker ol(cplock, THREAD, cplock != NULL); // don't know if we really need this
|
|
||||||
oop result = this_oop->resolved_references()->obj_at(cache_index);
|
oop result = this_oop->resolved_references()->obj_at(cache_index);
|
||||||
// Benign race condition: resolved_references may already be filled in while we were trying to lock.
|
// Benign race condition: resolved_references may already be filled in while we were trying to lock.
|
||||||
// The important thing here is that all threads pick up the same result.
|
// The important thing here is that all threads pick up the same result.
|
||||||
|
@ -111,6 +111,7 @@ class ConstantPool : public Metadata {
|
|||||||
int _version;
|
int _version;
|
||||||
} _saved;
|
} _saved;
|
||||||
|
|
||||||
|
Monitor* _lock;
|
||||||
|
|
||||||
void set_tags(Array<u1>* tags) { _tags = tags; }
|
void set_tags(Array<u1>* tags) { _tags = tags; }
|
||||||
void tag_at_put(int which, jbyte t) { tags()->at_put(which, t); }
|
void tag_at_put(int which, jbyte t) { tags()->at_put(which, t); }
|
||||||
@ -843,17 +844,8 @@ class ConstantPool : public Metadata {
|
|||||||
|
|
||||||
void set_resolved_reference_length(int length) { _saved._resolved_reference_length = length; }
|
void set_resolved_reference_length(int length) { _saved._resolved_reference_length = length; }
|
||||||
int resolved_reference_length() const { return _saved._resolved_reference_length; }
|
int resolved_reference_length() const { return _saved._resolved_reference_length; }
|
||||||
|
void set_lock(Monitor* lock) { _lock = lock; }
|
||||||
// lock() may return null -- constant pool updates may happen before this lock is
|
Monitor* lock() { return _lock; }
|
||||||
// initialized, because the _pool_holder has not been fully initialized and
|
|
||||||
// has not been registered into the system dictionary. In this case, no other
|
|
||||||
// thread can be modifying this constantpool, so no synchronization is
|
|
||||||
// necessary.
|
|
||||||
//
|
|
||||||
// Use cplock() like this:
|
|
||||||
// oop cplock = cp->lock();
|
|
||||||
// ObjectLocker ol(cplock , THREAD, cplock != NULL);
|
|
||||||
oop lock();
|
|
||||||
|
|
||||||
// Decrease ref counts of symbols that are in the constant pool
|
// Decrease ref counts of symbols that are in the constant pool
|
||||||
// when the holder class is unloaded
|
// when the holder class is unloaded
|
||||||
|
@ -284,8 +284,7 @@ void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
|
|||||||
// the lock, so that when the losing writer returns, he can use the linked
|
// the lock, so that when the losing writer returns, he can use the linked
|
||||||
// cache entry.
|
// cache entry.
|
||||||
|
|
||||||
oop cplock = cpool->lock();
|
MonitorLockerEx ml(cpool->lock());
|
||||||
ObjectLocker ol(cplock, Thread::current(), cplock != NULL);
|
|
||||||
if (!is_f1_null()) {
|
if (!is_f1_null()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -498,13 +498,27 @@ objArrayOop InstanceKlass::signers() const {
|
|||||||
|
|
||||||
oop InstanceKlass::init_lock() const {
|
oop InstanceKlass::init_lock() const {
|
||||||
// return the init lock from the mirror
|
// return the init lock from the mirror
|
||||||
return java_lang_Class::init_lock(java_mirror());
|
oop lock = java_lang_Class::init_lock(java_mirror());
|
||||||
|
assert((oop)lock != NULL || !is_not_initialized(), // initialized or in_error state
|
||||||
|
"only fully initialized state can have a null lock");
|
||||||
|
return lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the initialization lock to null so the object can be GC'ed. Any racing
|
||||||
|
// threads to get this lock will see a null lock and will not lock.
|
||||||
|
// That's okay because they all check for initialized state after getting
|
||||||
|
// the lock and return.
|
||||||
|
void InstanceKlass::fence_and_clear_init_lock() {
|
||||||
|
// make sure previous stores are all done, notably the init_state.
|
||||||
|
OrderAccess::storestore();
|
||||||
|
java_lang_Class::set_init_lock(java_mirror(), NULL);
|
||||||
|
assert(!is_not_initialized(), "class must be initialized now");
|
||||||
}
|
}
|
||||||
|
|
||||||
void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
|
void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
|
||||||
EXCEPTION_MARK;
|
EXCEPTION_MARK;
|
||||||
oop init_lock = this_oop->init_lock();
|
oop init_lock = this_oop->init_lock();
|
||||||
ObjectLocker ol(init_lock, THREAD);
|
ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
|
||||||
|
|
||||||
// abort if someone beat us to the initialization
|
// abort if someone beat us to the initialization
|
||||||
if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized()
|
if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized()
|
||||||
@ -523,6 +537,7 @@ void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
|
|||||||
} else {
|
} else {
|
||||||
// linking successfull, mark class as initialized
|
// linking successfull, mark class as initialized
|
||||||
this_oop->set_init_state (fully_initialized);
|
this_oop->set_init_state (fully_initialized);
|
||||||
|
this_oop->fence_and_clear_init_lock();
|
||||||
// trace
|
// trace
|
||||||
if (TraceClassInitialization) {
|
if (TraceClassInitialization) {
|
||||||
ResourceMark rm(THREAD);
|
ResourceMark rm(THREAD);
|
||||||
@ -649,7 +664,7 @@ bool InstanceKlass::link_class_impl(
|
|||||||
// verification & rewriting
|
// verification & rewriting
|
||||||
{
|
{
|
||||||
oop init_lock = this_oop->init_lock();
|
oop init_lock = this_oop->init_lock();
|
||||||
ObjectLocker ol(init_lock, THREAD);
|
ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
|
||||||
// rewritten will have been set if loader constraint error found
|
// rewritten will have been set if loader constraint error found
|
||||||
// on an earlier link attempt
|
// on an earlier link attempt
|
||||||
// don't verify or rewrite if already rewritten
|
// don't verify or rewrite if already rewritten
|
||||||
@ -772,7 +787,7 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
|
|||||||
// Step 1
|
// Step 1
|
||||||
{
|
{
|
||||||
oop init_lock = this_oop->init_lock();
|
oop init_lock = this_oop->init_lock();
|
||||||
ObjectLocker ol(init_lock, THREAD);
|
ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
|
||||||
|
|
||||||
Thread *self = THREAD; // it's passed the current thread
|
Thread *self = THREAD; // it's passed the current thread
|
||||||
|
|
||||||
@ -920,8 +935,9 @@ void InstanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS)
|
|||||||
|
|
||||||
void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
|
void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
|
||||||
oop init_lock = this_oop->init_lock();
|
oop init_lock = this_oop->init_lock();
|
||||||
ObjectLocker ol(init_lock, THREAD);
|
ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
|
||||||
this_oop->set_init_state(state);
|
this_oop->set_init_state(state);
|
||||||
|
this_oop->fence_and_clear_init_lock();
|
||||||
ol.notify_all(CHECK);
|
ol.notify_all(CHECK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1023,6 +1023,7 @@ public:
|
|||||||
// It has to be an object not a Mutex because it's held through java calls.
|
// It has to be an object not a Mutex because it's held through java calls.
|
||||||
oop init_lock() const;
|
oop init_lock() const;
|
||||||
private:
|
private:
|
||||||
|
void fence_and_clear_init_lock();
|
||||||
|
|
||||||
// Static methods that are used to implement member methods where an exposed this pointer
|
// Static methods that are used to implement member methods where an exposed this pointer
|
||||||
// is needed due to possible GCs
|
// is needed due to possible GCs
|
||||||
|
@ -41,7 +41,7 @@
|
|||||||
|
|
||||||
// Some types of data layouts need a length field.
|
// Some types of data layouts need a length field.
|
||||||
bool DataLayout::needs_array_len(u1 tag) {
|
bool DataLayout::needs_array_len(u1 tag) {
|
||||||
return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag);
|
return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Perform generic initialization of the data. More specific
|
// Perform generic initialization of the data. More specific
|
||||||
@ -156,10 +156,13 @@ void JumpData::print_data_on(outputStream* st) const {
|
|||||||
}
|
}
|
||||||
#endif // !PRODUCT
|
#endif // !PRODUCT
|
||||||
|
|
||||||
int TypeStackSlotEntries::compute_cell_count(Symbol* signature, int max) {
|
int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
|
||||||
|
// Parameter profiling include the receiver
|
||||||
|
int args_count = include_receiver ? 1 : 0;
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
SignatureStream ss(signature);
|
SignatureStream ss(signature);
|
||||||
int args_count = MIN2(ss.reference_parameter_count(), max);
|
args_count += ss.reference_parameter_count();
|
||||||
|
args_count = MIN2(args_count, max);
|
||||||
return args_count * per_arg_cell_count;
|
return args_count * per_arg_cell_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,7 +172,7 @@ int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
|
|||||||
Bytecode_invoke inv(stream->method(), stream->bci());
|
Bytecode_invoke inv(stream->method(), stream->bci());
|
||||||
int args_cell = 0;
|
int args_cell = 0;
|
||||||
if (arguments_profiling_enabled()) {
|
if (arguments_profiling_enabled()) {
|
||||||
args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), TypeProfileArgsLimit);
|
args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
|
||||||
}
|
}
|
||||||
int ret_cell = 0;
|
int ret_cell = 0;
|
||||||
if (return_profiling_enabled() && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) {
|
if (return_profiling_enabled() && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) {
|
||||||
@ -212,12 +215,19 @@ public:
|
|||||||
int off_at(int i) const { return _offsets.at(i); }
|
int off_at(int i) const { return _offsets.at(i); }
|
||||||
};
|
};
|
||||||
|
|
||||||
void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver) {
|
void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
ArgumentOffsetComputer aos(signature, _number_of_entries);
|
int start = 0;
|
||||||
|
// Parameter profiling include the receiver
|
||||||
|
if (include_receiver && has_receiver) {
|
||||||
|
set_stack_slot(0, 0);
|
||||||
|
set_type(0, type_none());
|
||||||
|
start += 1;
|
||||||
|
}
|
||||||
|
ArgumentOffsetComputer aos(signature, _number_of_entries-start);
|
||||||
aos.total();
|
aos.total();
|
||||||
for (int i = 0; i < _number_of_entries; i++) {
|
for (int i = start; i < _number_of_entries; i++) {
|
||||||
set_stack_slot(i, aos.off_at(i) + (has_receiver ? 1 : 0));
|
set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
|
||||||
set_type(i, type_none());
|
set_type(i, type_none());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -234,7 +244,7 @@ void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
|
|||||||
assert(count > 0, "room for args type but none found?");
|
assert(count > 0, "room for args type but none found?");
|
||||||
check_number_of_arguments(count);
|
check_number_of_arguments(count);
|
||||||
#endif
|
#endif
|
||||||
_args.post_initialize(inv.signature(), inv.has_receiver());
|
_args.post_initialize(inv.signature(), inv.has_receiver(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (has_return()) {
|
if (has_return()) {
|
||||||
@ -255,7 +265,7 @@ void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* md
|
|||||||
assert(count > 0, "room for args type but none found?");
|
assert(count > 0, "room for args type but none found?");
|
||||||
check_number_of_arguments(count);
|
check_number_of_arguments(count);
|
||||||
#endif
|
#endif
|
||||||
_args.post_initialize(inv.signature(), inv.has_receiver());
|
_args.post_initialize(inv.signature(), inv.has_receiver(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (has_return()) {
|
if (has_return()) {
|
||||||
@ -579,6 +589,34 @@ void ArgInfoData::print_data_on(outputStream* st) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
int ParametersTypeData::compute_cell_count(Method* m) {
|
||||||
|
if (!MethodData::profile_parameters_for_method(m)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
|
||||||
|
int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
|
||||||
|
if (obj_args > 0) {
|
||||||
|
return obj_args + 1; // 1 cell for array len
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
|
||||||
|
_parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ParametersTypeData::profiling_enabled() {
|
||||||
|
return MethodData::profile_parameters();
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void ParametersTypeData::print_data_on(outputStream* st) const {
|
||||||
|
st->print("parameter types");
|
||||||
|
_parameters.print_data_on(st);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// ==================================================================
|
// ==================================================================
|
||||||
// MethodData*
|
// MethodData*
|
||||||
//
|
//
|
||||||
@ -741,6 +779,12 @@ int MethodData::compute_allocation_size_in_bytes(methodHandle method) {
|
|||||||
int arg_size = method->size_of_parameters();
|
int arg_size = method->size_of_parameters();
|
||||||
object_size += DataLayout::compute_size_in_bytes(arg_size+1);
|
object_size += DataLayout::compute_size_in_bytes(arg_size+1);
|
||||||
|
|
||||||
|
// Reserve room for an area of the MDO dedicated to profiling of
|
||||||
|
// parameters
|
||||||
|
int args_cell = ParametersTypeData::compute_cell_count(method());
|
||||||
|
if (args_cell > 0) {
|
||||||
|
object_size += DataLayout::compute_size_in_bytes(args_cell);
|
||||||
|
}
|
||||||
return object_size;
|
return object_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -915,6 +959,8 @@ ProfileData* DataLayout::data_in() {
|
|||||||
return new CallTypeData(this);
|
return new CallTypeData(this);
|
||||||
case DataLayout::virtual_call_type_data_tag:
|
case DataLayout::virtual_call_type_data_tag:
|
||||||
return new VirtualCallTypeData(this);
|
return new VirtualCallTypeData(this);
|
||||||
|
case DataLayout::parameters_type_data_tag:
|
||||||
|
return new ParametersTypeData(this);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -936,6 +982,9 @@ void MethodData::post_initialize(BytecodeStream* stream) {
|
|||||||
stream->next();
|
stream->next();
|
||||||
data->post_initialize(stream, this);
|
data->post_initialize(stream, this);
|
||||||
}
|
}
|
||||||
|
if (_parameters_type_data_di != -1) {
|
||||||
|
parameters_type_data()->post_initialize(NULL, this);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the MethodData* corresponding to a given method.
|
// Initialize the MethodData* corresponding to a given method.
|
||||||
@ -975,7 +1024,23 @@ MethodData::MethodData(methodHandle method, int size, TRAPS) {
|
|||||||
int arg_size = method->size_of_parameters();
|
int arg_size = method->size_of_parameters();
|
||||||
dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
|
dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
|
||||||
|
|
||||||
object_size += extra_size + DataLayout::compute_size_in_bytes(arg_size+1);
|
int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
|
||||||
|
object_size += extra_size + arg_data_size;
|
||||||
|
|
||||||
|
int args_cell = ParametersTypeData::compute_cell_count(method());
|
||||||
|
// If we are profiling parameters, we reserver an area near the end
|
||||||
|
// of the MDO after the slots for bytecodes (because there's no bci
|
||||||
|
// for method entry so they don't fit with the framework for the
|
||||||
|
// profiling of bytecodes). We store the offset within the MDO of
|
||||||
|
// this area (or -1 if no parameter is profiled)
|
||||||
|
if (args_cell > 0) {
|
||||||
|
object_size += DataLayout::compute_size_in_bytes(args_cell);
|
||||||
|
_parameters_type_data_di = data_size + extra_size + arg_data_size;
|
||||||
|
DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
|
||||||
|
dp->initialize(DataLayout::parameters_type_data_tag, 0, args_cell);
|
||||||
|
} else {
|
||||||
|
_parameters_type_data_di = -1;
|
||||||
|
}
|
||||||
|
|
||||||
// Set an initial hint. Don't use set_hint_di() because
|
// Set an initial hint. Don't use set_hint_di() because
|
||||||
// first_di() may be out of bounds if data_size is 0.
|
// first_di() may be out of bounds if data_size is 0.
|
||||||
@ -1134,6 +1199,9 @@ void MethodData::print_value_on(outputStream* st) const {
|
|||||||
void MethodData::print_data_on(outputStream* st) const {
|
void MethodData::print_data_on(outputStream* st) const {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
ProfileData* data = first_data();
|
ProfileData* data = first_data();
|
||||||
|
if (_parameters_type_data_di != -1) {
|
||||||
|
parameters_type_data()->print_data_on(st);
|
||||||
|
}
|
||||||
for ( ; is_valid(data); data = next_data(data)) {
|
for ( ; is_valid(data); data = next_data(data)) {
|
||||||
st->print("%d", dp_to_di(data->dp()));
|
st->print("%d", dp_to_di(data->dp()));
|
||||||
st->fill_to(6);
|
st->fill_to(6);
|
||||||
@ -1222,7 +1290,7 @@ bool MethodData::profile_arguments_for_invoke(methodHandle m, int bci) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int MethodData::profile_return_flag() {
|
int MethodData::profile_return_flag() {
|
||||||
return TypeProfileLevel / 10;
|
return (TypeProfileLevel % 100) / 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MethodData::profile_return() {
|
bool MethodData::profile_return() {
|
||||||
@ -1249,3 +1317,32 @@ bool MethodData::profile_return_for_invoke(methodHandle m, int bci) {
|
|||||||
assert(profile_return_jsr292_only(), "inconsistent");
|
assert(profile_return_jsr292_only(), "inconsistent");
|
||||||
return profile_jsr292(m, bci);
|
return profile_jsr292(m, bci);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int MethodData::profile_parameters_flag() {
|
||||||
|
return TypeProfileLevel / 100;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MethodData::profile_parameters() {
|
||||||
|
return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MethodData::profile_parameters_jsr292_only() {
|
||||||
|
return profile_parameters_flag() == type_profile_jsr292;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MethodData::profile_all_parameters() {
|
||||||
|
return profile_parameters_flag() == type_profile_all;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MethodData::profile_parameters_for_method(methodHandle m) {
|
||||||
|
if (!profile_parameters()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (profile_all_parameters()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(profile_parameters_jsr292_only(), "inconsistent");
|
||||||
|
return m->is_compiled_lambda_form();
|
||||||
|
}
|
||||||
|
@ -119,7 +119,8 @@ public:
|
|||||||
multi_branch_data_tag,
|
multi_branch_data_tag,
|
||||||
arg_info_data_tag,
|
arg_info_data_tag,
|
||||||
call_type_data_tag,
|
call_type_data_tag,
|
||||||
virtual_call_type_data_tag
|
virtual_call_type_data_tag,
|
||||||
|
parameters_type_data_tag
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@ -264,6 +265,7 @@ class BranchData;
|
|||||||
class ArrayData;
|
class ArrayData;
|
||||||
class MultiBranchData;
|
class MultiBranchData;
|
||||||
class ArgInfoData;
|
class ArgInfoData;
|
||||||
|
class ParametersTypeData;
|
||||||
|
|
||||||
// ProfileData
|
// ProfileData
|
||||||
//
|
//
|
||||||
@ -397,6 +399,7 @@ public:
|
|||||||
virtual bool is_ArgInfoData() const { return false; }
|
virtual bool is_ArgInfoData() const { return false; }
|
||||||
virtual bool is_CallTypeData() const { return false; }
|
virtual bool is_CallTypeData() const { return false; }
|
||||||
virtual bool is_VirtualCallTypeData()const { return false; }
|
virtual bool is_VirtualCallTypeData()const { return false; }
|
||||||
|
virtual bool is_ParametersTypeData() const { return false; }
|
||||||
|
|
||||||
|
|
||||||
BitData* as_BitData() const {
|
BitData* as_BitData() const {
|
||||||
@ -447,6 +450,10 @@ public:
|
|||||||
assert(is_VirtualCallTypeData(), "wrong type");
|
assert(is_VirtualCallTypeData(), "wrong type");
|
||||||
return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
|
return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
|
||||||
}
|
}
|
||||||
|
ParametersTypeData* as_ParametersTypeData() const {
|
||||||
|
assert(is_ParametersTypeData(), "wrong type");
|
||||||
|
return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Subclass specific initialization
|
// Subclass specific initialization
|
||||||
@ -767,9 +774,9 @@ public:
|
|||||||
TypeStackSlotEntries(int base_off, int nb_entries)
|
TypeStackSlotEntries(int base_off, int nb_entries)
|
||||||
: TypeEntries(base_off), _number_of_entries(nb_entries) {}
|
: TypeEntries(base_off), _number_of_entries(nb_entries) {}
|
||||||
|
|
||||||
static int compute_cell_count(Symbol* signature, int max);
|
static int compute_cell_count(Symbol* signature, bool include_receiver, int max);
|
||||||
|
|
||||||
void post_initialize(Symbol* signature, bool has_receiver);
|
void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver);
|
||||||
|
|
||||||
// offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
|
// offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
|
||||||
static int stack_slot_local_offset(int i) {
|
static int stack_slot_local_offset(int i) {
|
||||||
@ -946,17 +953,6 @@ private:
|
|||||||
assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
|
assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
|
||||||
// An entry for a return value takes less space than an entry for an
|
|
||||||
// argument so if the number of cells exceeds the number of cells
|
|
||||||
// needed for an argument, this object contains type information for
|
|
||||||
// at least one argument.
|
|
||||||
bool has_arguments() const {
|
|
||||||
bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
|
|
||||||
assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
CallTypeData(DataLayout* layout) :
|
CallTypeData(DataLayout* layout) :
|
||||||
CounterData(layout),
|
CounterData(layout),
|
||||||
@ -1017,6 +1013,16 @@ public:
|
|||||||
_ret.set_type(TypeEntries::with_status(k, current));
|
_ret.set_type(TypeEntries::with_status(k, current));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// An entry for a return value takes less space than an entry for an
|
||||||
|
// argument so if the number of cells exceeds the number of cells
|
||||||
|
// needed for an argument, this object contains type information for
|
||||||
|
// at least one argument.
|
||||||
|
bool has_arguments() const {
|
||||||
|
bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
|
||||||
|
assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
// An entry for a return value takes less space than an entry for an
|
// An entry for a return value takes less space than an entry for an
|
||||||
// argument, so if the remainder of the number of cells divided by
|
// argument, so if the remainder of the number of cells divided by
|
||||||
// the number of cells for an argument is not null, a return value
|
// the number of cells for an argument is not null, a return value
|
||||||
@ -1213,17 +1219,6 @@ private:
|
|||||||
assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
|
assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
|
||||||
// An entry for a return value takes less space than an entry for an
|
|
||||||
// argument so if the number of cells exceeds the number of cells
|
|
||||||
// needed for an argument, this object contains type information for
|
|
||||||
// at least one argument.
|
|
||||||
bool has_arguments() const {
|
|
||||||
bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
|
|
||||||
assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
VirtualCallTypeData(DataLayout* layout) :
|
VirtualCallTypeData(DataLayout* layout) :
|
||||||
VirtualCallData(layout),
|
VirtualCallData(layout),
|
||||||
@ -1294,6 +1289,16 @@ public:
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// An entry for a return value takes less space than an entry for an
|
||||||
|
// argument so if the number of cells exceeds the number of cells
|
||||||
|
// needed for an argument, this object contains type information for
|
||||||
|
// at least one argument.
|
||||||
|
bool has_arguments() const {
|
||||||
|
bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
|
||||||
|
assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
// Code generation support
|
// Code generation support
|
||||||
static ByteSize args_data_offset() {
|
static ByteSize args_data_offset() {
|
||||||
return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
|
return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
|
||||||
@ -1662,6 +1667,75 @@ public:
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// ParametersTypeData
|
||||||
|
//
|
||||||
|
// A ParametersTypeData is used to access profiling information about
|
||||||
|
// types of parameters to a method
|
||||||
|
class ParametersTypeData : public ArrayData {
|
||||||
|
|
||||||
|
private:
|
||||||
|
TypeStackSlotEntries _parameters;
|
||||||
|
|
||||||
|
static int stack_slot_local_offset(int i) {
|
||||||
|
assert_profiling_enabled();
|
||||||
|
return array_start_off_set + TypeStackSlotEntries::stack_slot_local_offset(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int type_local_offset(int i) {
|
||||||
|
assert_profiling_enabled();
|
||||||
|
return array_start_off_set + TypeStackSlotEntries::type_local_offset(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool profiling_enabled();
|
||||||
|
static void assert_profiling_enabled() {
|
||||||
|
assert(profiling_enabled(), "method parameters profiling should be on");
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
ParametersTypeData(DataLayout* layout) : ArrayData(layout), _parameters(1, number_of_parameters()) {
|
||||||
|
assert(layout->tag() == DataLayout::parameters_type_data_tag, "wrong type");
|
||||||
|
// Some compilers (VC++) don't want this passed in member initialization list
|
||||||
|
_parameters.set_profile_data(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int compute_cell_count(Method* m);
|
||||||
|
|
||||||
|
virtual bool is_ParametersTypeData() const { return true; }
|
||||||
|
|
||||||
|
virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
|
||||||
|
|
||||||
|
int number_of_parameters() const {
|
||||||
|
return array_len() / TypeStackSlotEntries::per_arg_count();
|
||||||
|
}
|
||||||
|
|
||||||
|
const TypeStackSlotEntries* parameters() const { return &_parameters; }
|
||||||
|
|
||||||
|
uint stack_slot(int i) const {
|
||||||
|
return _parameters.stack_slot(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_type(int i, Klass* k) {
|
||||||
|
intptr_t current = _parameters.type(i);
|
||||||
|
_parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
|
||||||
|
_parameters.clean_weak_klass_links(is_alive_closure);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
virtual void print_data_on(outputStream* st) const;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static ByteSize stack_slot_offset(int i) {
|
||||||
|
return cell_offset(stack_slot_local_offset(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
static ByteSize type_offset(int i) {
|
||||||
|
return cell_offset(type_local_offset(i));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// MethodData*
|
// MethodData*
|
||||||
//
|
//
|
||||||
// A MethodData* holds information which has been collected about
|
// A MethodData* holds information which has been collected about
|
||||||
@ -1773,6 +1847,10 @@ private:
|
|||||||
// Size of _data array in bytes. (Excludes header and extra_data fields.)
|
// Size of _data array in bytes. (Excludes header and extra_data fields.)
|
||||||
int _data_size;
|
int _data_size;
|
||||||
|
|
||||||
|
// data index for the area dedicated to parameters. -1 if no
|
||||||
|
// parameter profiling.
|
||||||
|
int _parameters_type_data_di;
|
||||||
|
|
||||||
// Beginning of the data entries
|
// Beginning of the data entries
|
||||||
intptr_t _data[1];
|
intptr_t _data[1];
|
||||||
|
|
||||||
@ -1842,6 +1920,9 @@ private:
|
|||||||
static int profile_return_flag();
|
static int profile_return_flag();
|
||||||
static bool profile_all_return();
|
static bool profile_all_return();
|
||||||
static bool profile_return_for_invoke(methodHandle m, int bci);
|
static bool profile_return_for_invoke(methodHandle m, int bci);
|
||||||
|
static int profile_parameters_flag();
|
||||||
|
static bool profile_parameters_jsr292_only();
|
||||||
|
static bool profile_all_parameters();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static int header_size() {
|
static int header_size() {
|
||||||
@ -2048,6 +2129,16 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return pointer to area dedicated to parameters in MDO
|
||||||
|
ParametersTypeData* parameters_type_data() const {
|
||||||
|
return _parameters_type_data_di != -1 ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int parameters_type_data_di() const {
|
||||||
|
assert(_parameters_type_data_di != -1, "no args type data");
|
||||||
|
return _parameters_type_data_di;
|
||||||
|
}
|
||||||
|
|
||||||
// Support for code generation
|
// Support for code generation
|
||||||
static ByteSize data_offset() {
|
static ByteSize data_offset() {
|
||||||
return byte_offset_of(MethodData, _data[0]);
|
return byte_offset_of(MethodData, _data[0]);
|
||||||
@ -2060,6 +2151,10 @@ public:
|
|||||||
return byte_offset_of(MethodData, _backedge_counter);
|
return byte_offset_of(MethodData, _backedge_counter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ByteSize parameters_type_data_di_offset() {
|
||||||
|
return byte_offset_of(MethodData, _parameters_type_data_di);
|
||||||
|
}
|
||||||
|
|
||||||
// Deallocation support - no pointer fields to deallocate
|
// Deallocation support - no pointer fields to deallocate
|
||||||
void deallocate_contents(ClassLoaderData* loader_data) {}
|
void deallocate_contents(ClassLoaderData* loader_data) {}
|
||||||
|
|
||||||
@ -2083,8 +2178,10 @@ public:
|
|||||||
void verify_on(outputStream* st);
|
void verify_on(outputStream* st);
|
||||||
void verify_data_on(outputStream* st);
|
void verify_data_on(outputStream* st);
|
||||||
|
|
||||||
|
static bool profile_parameters_for_method(methodHandle m);
|
||||||
static bool profile_arguments();
|
static bool profile_arguments();
|
||||||
static bool profile_return();
|
static bool profile_return();
|
||||||
|
static bool profile_parameters();
|
||||||
static bool profile_return_jsr292_only();
|
static bool profile_return_jsr292_only();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -638,7 +638,13 @@
|
|||||||
"Find best control for expensive operations") \
|
"Find best control for expensive operations") \
|
||||||
\
|
\
|
||||||
product(bool, UseMathExactIntrinsics, true, \
|
product(bool, UseMathExactIntrinsics, true, \
|
||||||
"Enables intrinsification of various java.lang.Math funcitons")
|
"Enables intrinsification of various java.lang.Math functions") \
|
||||||
|
\
|
||||||
|
experimental(bool, ReplaceInParentMaps, false, \
|
||||||
|
"Propagate type improvements in callers of inlinee if possible") \
|
||||||
|
\
|
||||||
|
experimental(bool, UseTypeSpeculation, false, \
|
||||||
|
"Speculatively propagate types from profiles")
|
||||||
|
|
||||||
C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
|
C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
|
||||||
|
|
||||||
|
@ -63,12 +63,12 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual bool is_parse() const { return true; }
|
virtual bool is_parse() const { return true; }
|
||||||
virtual JVMState* generate(JVMState* jvms);
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||||
int is_osr() { return _is_osr; }
|
int is_osr() { return _is_osr; }
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
JVMState* ParseGenerator::generate(JVMState* jvms) {
|
JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
Compile* C = Compile::current();
|
Compile* C = Compile::current();
|
||||||
|
|
||||||
if (is_osr()) {
|
if (is_osr()) {
|
||||||
@ -80,7 +80,7 @@ JVMState* ParseGenerator::generate(JVMState* jvms) {
|
|||||||
return NULL; // bailing out of the compile; do not try to parse
|
return NULL; // bailing out of the compile; do not try to parse
|
||||||
}
|
}
|
||||||
|
|
||||||
Parse parser(jvms, method(), _expected_uses);
|
Parse parser(jvms, method(), _expected_uses, parent_parser);
|
||||||
// Grab signature for matching/allocation
|
// Grab signature for matching/allocation
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
|
if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
|
||||||
@ -119,12 +119,12 @@ class DirectCallGenerator : public CallGenerator {
|
|||||||
_separate_io_proj(separate_io_proj)
|
_separate_io_proj(separate_io_proj)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
virtual JVMState* generate(JVMState* jvms);
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||||
|
|
||||||
CallStaticJavaNode* call_node() const { return _call_node; }
|
CallStaticJavaNode* call_node() const { return _call_node; }
|
||||||
};
|
};
|
||||||
|
|
||||||
JVMState* DirectCallGenerator::generate(JVMState* jvms) {
|
JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
GraphKit kit(jvms);
|
GraphKit kit(jvms);
|
||||||
bool is_static = method()->is_static();
|
bool is_static = method()->is_static();
|
||||||
address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
|
address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
|
||||||
@ -171,10 +171,10 @@ public:
|
|||||||
vtable_index >= 0, "either invalid or usable");
|
vtable_index >= 0, "either invalid or usable");
|
||||||
}
|
}
|
||||||
virtual bool is_virtual() const { return true; }
|
virtual bool is_virtual() const { return true; }
|
||||||
virtual JVMState* generate(JVMState* jvms);
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||||
};
|
};
|
||||||
|
|
||||||
JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
|
JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
GraphKit kit(jvms);
|
GraphKit kit(jvms);
|
||||||
Node* receiver = kit.argument(0);
|
Node* receiver = kit.argument(0);
|
||||||
|
|
||||||
@ -276,7 +276,7 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
|||||||
// Convert the CallStaticJava into an inline
|
// Convert the CallStaticJava into an inline
|
||||||
virtual void do_late_inline();
|
virtual void do_late_inline();
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms) {
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
Compile *C = Compile::current();
|
Compile *C = Compile::current();
|
||||||
C->print_inlining_skip(this);
|
C->print_inlining_skip(this);
|
||||||
|
|
||||||
@ -290,7 +290,7 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
|||||||
// that the late inlining logic can distinguish between fall
|
// that the late inlining logic can distinguish between fall
|
||||||
// through and exceptional uses of the memory and io projections
|
// through and exceptional uses of the memory and io projections
|
||||||
// as is done for allocations and macro expansion.
|
// as is done for allocations and macro expansion.
|
||||||
return DirectCallGenerator::generate(jvms);
|
return DirectCallGenerator::generate(jvms, parent_parser);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void print_inlining_late(const char* msg) {
|
virtual void print_inlining_late(const char* msg) {
|
||||||
@ -389,7 +389,7 @@ void LateInlineCallGenerator::do_late_inline() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Now perform the inling using the synthesized JVMState
|
// Now perform the inling using the synthesized JVMState
|
||||||
JVMState* new_jvms = _inline_cg->generate(jvms);
|
JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
|
||||||
if (new_jvms == NULL) return; // no change
|
if (new_jvms == NULL) return; // no change
|
||||||
if (C->failing()) return;
|
if (C->failing()) return;
|
||||||
|
|
||||||
@ -429,8 +429,8 @@ class LateInlineMHCallGenerator : public LateInlineCallGenerator {
|
|||||||
|
|
||||||
virtual bool is_mh_late_inline() const { return true; }
|
virtual bool is_mh_late_inline() const { return true; }
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms) {
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
|
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
|
||||||
if (_input_not_const) {
|
if (_input_not_const) {
|
||||||
// inlining won't be possible so no need to enqueue right now.
|
// inlining won't be possible so no need to enqueue right now.
|
||||||
call_node()->set_generator(this);
|
call_node()->set_generator(this);
|
||||||
@ -477,15 +477,17 @@ class LateInlineStringCallGenerator : public LateInlineCallGenerator {
|
|||||||
LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
|
LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
|
||||||
LateInlineCallGenerator(method, inline_cg) {}
|
LateInlineCallGenerator(method, inline_cg) {}
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms) {
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
Compile *C = Compile::current();
|
Compile *C = Compile::current();
|
||||||
C->print_inlining_skip(this);
|
C->print_inlining_skip(this);
|
||||||
|
|
||||||
C->add_string_late_inline(this);
|
C->add_string_late_inline(this);
|
||||||
|
|
||||||
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
|
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
|
||||||
return new_jvms;
|
return new_jvms;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual bool is_string_late_inline() const { return true; }
|
||||||
};
|
};
|
||||||
|
|
||||||
CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
|
CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
|
||||||
@ -498,13 +500,13 @@ class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
|
|||||||
LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
|
LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
|
||||||
LateInlineCallGenerator(method, inline_cg) {}
|
LateInlineCallGenerator(method, inline_cg) {}
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms) {
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
Compile *C = Compile::current();
|
Compile *C = Compile::current();
|
||||||
C->print_inlining_skip(this);
|
C->print_inlining_skip(this);
|
||||||
|
|
||||||
C->add_boxing_late_inline(this);
|
C->add_boxing_late_inline(this);
|
||||||
|
|
||||||
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
|
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
|
||||||
return new_jvms;
|
return new_jvms;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -540,7 +542,7 @@ public:
|
|||||||
virtual bool is_virtual() const { return _is_virtual; }
|
virtual bool is_virtual() const { return _is_virtual; }
|
||||||
virtual bool is_deferred() const { return true; }
|
virtual bool is_deferred() const { return true; }
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms);
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -550,12 +552,12 @@ CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
|
|||||||
return new WarmCallGenerator(ci, if_cold, if_hot);
|
return new WarmCallGenerator(ci, if_cold, if_hot);
|
||||||
}
|
}
|
||||||
|
|
||||||
JVMState* WarmCallGenerator::generate(JVMState* jvms) {
|
JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
Compile* C = Compile::current();
|
Compile* C = Compile::current();
|
||||||
if (C->log() != NULL) {
|
if (C->log() != NULL) {
|
||||||
C->log()->elem("warm_call bci='%d'", jvms->bci());
|
C->log()->elem("warm_call bci='%d'", jvms->bci());
|
||||||
}
|
}
|
||||||
jvms = _if_cold->generate(jvms);
|
jvms = _if_cold->generate(jvms, parent_parser);
|
||||||
if (jvms != NULL) {
|
if (jvms != NULL) {
|
||||||
Node* m = jvms->map()->control();
|
Node* m = jvms->map()->control();
|
||||||
if (m->is_CatchProj()) m = m->in(0); else m = C->top();
|
if (m->is_CatchProj()) m = m->in(0); else m = C->top();
|
||||||
@ -616,7 +618,7 @@ public:
|
|||||||
virtual bool is_inline() const { return _if_hit->is_inline(); }
|
virtual bool is_inline() const { return _if_hit->is_inline(); }
|
||||||
virtual bool is_deferred() const { return _if_hit->is_deferred(); }
|
virtual bool is_deferred() const { return _if_hit->is_deferred(); }
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms);
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -628,7 +630,7 @@ CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
|
JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
GraphKit kit(jvms);
|
GraphKit kit(jvms);
|
||||||
PhaseGVN& gvn = kit.gvn();
|
PhaseGVN& gvn = kit.gvn();
|
||||||
// We need an explicit receiver null_check before checking its type.
|
// We need an explicit receiver null_check before checking its type.
|
||||||
@ -656,7 +658,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
|
|||||||
{ PreserveJVMState pjvms(&kit);
|
{ PreserveJVMState pjvms(&kit);
|
||||||
kit.set_control(slow_ctl);
|
kit.set_control(slow_ctl);
|
||||||
if (!kit.stopped()) {
|
if (!kit.stopped()) {
|
||||||
slow_jvms = _if_missed->generate(kit.sync_jvms());
|
slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser);
|
||||||
if (kit.failing())
|
if (kit.failing())
|
||||||
return NULL; // might happen because of NodeCountInliningCutoff
|
return NULL; // might happen because of NodeCountInliningCutoff
|
||||||
assert(slow_jvms != NULL, "must be");
|
assert(slow_jvms != NULL, "must be");
|
||||||
@ -677,12 +679,12 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
|
|||||||
kit.replace_in_map(receiver, exact_receiver);
|
kit.replace_in_map(receiver, exact_receiver);
|
||||||
|
|
||||||
// Make the hot call:
|
// Make the hot call:
|
||||||
JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
|
JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser);
|
||||||
if (new_jvms == NULL) {
|
if (new_jvms == NULL) {
|
||||||
// Inline failed, so make a direct call.
|
// Inline failed, so make a direct call.
|
||||||
assert(_if_hit->is_inline(), "must have been a failed inline");
|
assert(_if_hit->is_inline(), "must have been a failed inline");
|
||||||
CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
|
CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
|
||||||
new_jvms = cg->generate(kit.sync_jvms());
|
new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
|
||||||
}
|
}
|
||||||
kit.add_exception_states_from(new_jvms);
|
kit.add_exception_states_from(new_jvms);
|
||||||
kit.set_jvms(new_jvms);
|
kit.set_jvms(new_jvms);
|
||||||
@ -773,7 +775,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
|||||||
ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
|
ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
|
||||||
guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove
|
guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove
|
||||||
const int vtable_index = Method::invalid_vtable_index;
|
const int vtable_index = Method::invalid_vtable_index;
|
||||||
CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, true, true);
|
CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
|
||||||
assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
|
assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
|
||||||
if (cg != NULL && cg->is_inline())
|
if (cg != NULL && cg->is_inline())
|
||||||
return cg;
|
return cg;
|
||||||
@ -829,6 +831,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
|||||||
int vtable_index = Method::invalid_vtable_index;
|
int vtable_index = Method::invalid_vtable_index;
|
||||||
bool call_does_dispatch = false;
|
bool call_does_dispatch = false;
|
||||||
|
|
||||||
|
ciKlass* speculative_receiver_type = NULL;
|
||||||
if (is_virtual_or_interface) {
|
if (is_virtual_or_interface) {
|
||||||
ciInstanceKlass* klass = target->holder();
|
ciInstanceKlass* klass = target->holder();
|
||||||
Node* receiver_node = kit.argument(0);
|
Node* receiver_node = kit.argument(0);
|
||||||
@ -837,9 +840,12 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
|||||||
target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type,
|
target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type,
|
||||||
is_virtual,
|
is_virtual,
|
||||||
call_does_dispatch, vtable_index); // out-parameters
|
call_does_dispatch, vtable_index); // out-parameters
|
||||||
|
// We lack profiling at this call but type speculation may
|
||||||
|
// provide us with a type
|
||||||
|
speculative_receiver_type = receiver_type->speculative_type();
|
||||||
}
|
}
|
||||||
|
|
||||||
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, true, true);
|
CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
|
||||||
assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
|
assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
|
||||||
if (cg != NULL && cg->is_inline())
|
if (cg != NULL && cg->is_inline())
|
||||||
return cg;
|
return cg;
|
||||||
@ -874,7 +880,7 @@ public:
|
|||||||
virtual bool is_inlined() const { return true; }
|
virtual bool is_inlined() const { return true; }
|
||||||
virtual bool is_intrinsic() const { return true; }
|
virtual bool is_intrinsic() const { return true; }
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms);
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -884,7 +890,7 @@ CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
|
JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
GraphKit kit(jvms);
|
GraphKit kit(jvms);
|
||||||
PhaseGVN& gvn = kit.gvn();
|
PhaseGVN& gvn = kit.gvn();
|
||||||
|
|
||||||
@ -904,7 +910,7 @@ JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
|
|||||||
PreserveJVMState pjvms(&kit);
|
PreserveJVMState pjvms(&kit);
|
||||||
kit.set_control(slow_ctl);
|
kit.set_control(slow_ctl);
|
||||||
if (!kit.stopped()) {
|
if (!kit.stopped()) {
|
||||||
slow_jvms = _cg->generate(kit.sync_jvms());
|
slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
|
||||||
if (kit.failing())
|
if (kit.failing())
|
||||||
return NULL; // might happen because of NodeCountInliningCutoff
|
return NULL; // might happen because of NodeCountInliningCutoff
|
||||||
assert(slow_jvms != NULL, "must be");
|
assert(slow_jvms != NULL, "must be");
|
||||||
@ -922,12 +928,12 @@ JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Generate intrinsic code:
|
// Generate intrinsic code:
|
||||||
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
|
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
|
||||||
if (new_jvms == NULL) {
|
if (new_jvms == NULL) {
|
||||||
// Intrinsic failed, so use slow code or make a direct call.
|
// Intrinsic failed, so use slow code or make a direct call.
|
||||||
if (slow_map == NULL) {
|
if (slow_map == NULL) {
|
||||||
CallGenerator* cg = CallGenerator::for_direct_call(method());
|
CallGenerator* cg = CallGenerator::for_direct_call(method());
|
||||||
new_jvms = cg->generate(kit.sync_jvms());
|
new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
|
||||||
} else {
|
} else {
|
||||||
kit.set_jvms(slow_jvms);
|
kit.set_jvms(slow_jvms);
|
||||||
return kit.transfer_exceptions_into_jvms();
|
return kit.transfer_exceptions_into_jvms();
|
||||||
@ -997,7 +1003,7 @@ public:
|
|||||||
virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
|
virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
|
||||||
virtual bool is_trap() const { return true; }
|
virtual bool is_trap() const { return true; }
|
||||||
|
|
||||||
virtual JVMState* generate(JVMState* jvms);
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -1009,7 +1015,7 @@ CallGenerator::for_uncommon_trap(ciMethod* m,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
|
JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||||
GraphKit kit(jvms);
|
GraphKit kit(jvms);
|
||||||
// Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
|
// Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
|
||||||
int nargs = method()->arg_size();
|
int nargs = method()->arg_size();
|
||||||
|
@ -31,6 +31,8 @@
|
|||||||
#include "opto/type.hpp"
|
#include "opto/type.hpp"
|
||||||
#include "runtime/deoptimization.hpp"
|
#include "runtime/deoptimization.hpp"
|
||||||
|
|
||||||
|
class Parse;
|
||||||
|
|
||||||
//---------------------------CallGenerator-------------------------------------
|
//---------------------------CallGenerator-------------------------------------
|
||||||
// The subclasses of this class handle generation of ideal nodes for
|
// The subclasses of this class handle generation of ideal nodes for
|
||||||
// call sites and method entry points.
|
// call sites and method entry points.
|
||||||
@ -72,6 +74,7 @@ class CallGenerator : public ResourceObj {
|
|||||||
virtual bool is_late_inline() const { return false; }
|
virtual bool is_late_inline() const { return false; }
|
||||||
// same but for method handle calls
|
// same but for method handle calls
|
||||||
virtual bool is_mh_late_inline() const { return false; }
|
virtual bool is_mh_late_inline() const { return false; }
|
||||||
|
virtual bool is_string_late_inline() const{ return false; }
|
||||||
|
|
||||||
// for method handle calls: have we tried inlinining the call already?
|
// for method handle calls: have we tried inlinining the call already?
|
||||||
virtual bool already_attempted() const { ShouldNotReachHere(); return false; }
|
virtual bool already_attempted() const { ShouldNotReachHere(); return false; }
|
||||||
@ -108,7 +111,7 @@ class CallGenerator : public ResourceObj {
|
|||||||
//
|
//
|
||||||
// If the result is NULL, it means that this CallGenerator was unable
|
// If the result is NULL, it means that this CallGenerator was unable
|
||||||
// to handle the given call, and another CallGenerator should be consulted.
|
// to handle the given call, and another CallGenerator should be consulted.
|
||||||
virtual JVMState* generate(JVMState* jvms) = 0;
|
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0;
|
||||||
|
|
||||||
// How to generate a call site that is inlined:
|
// How to generate a call site that is inlined:
|
||||||
static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
|
static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
|
||||||
|
@ -30,6 +30,7 @@ macro(AbsF)
|
|||||||
macro(AbsI)
|
macro(AbsI)
|
||||||
macro(AddD)
|
macro(AddD)
|
||||||
macro(AddExactI)
|
macro(AddExactI)
|
||||||
|
macro(AddExactL)
|
||||||
macro(AddF)
|
macro(AddF)
|
||||||
macro(AddI)
|
macro(AddI)
|
||||||
macro(AddL)
|
macro(AddL)
|
||||||
@ -170,6 +171,8 @@ macro(LoopLimit)
|
|||||||
macro(Mach)
|
macro(Mach)
|
||||||
macro(MachProj)
|
macro(MachProj)
|
||||||
macro(MathExact)
|
macro(MathExact)
|
||||||
|
macro(MathExactI)
|
||||||
|
macro(MathExactL)
|
||||||
macro(MaxI)
|
macro(MaxI)
|
||||||
macro(MemBarAcquire)
|
macro(MemBarAcquire)
|
||||||
macro(MemBarAcquireLock)
|
macro(MemBarAcquireLock)
|
||||||
@ -189,12 +192,16 @@ macro(MoveF2I)
|
|||||||
macro(MoveL2D)
|
macro(MoveL2D)
|
||||||
macro(MoveD2L)
|
macro(MoveD2L)
|
||||||
macro(MulD)
|
macro(MulD)
|
||||||
|
macro(MulExactI)
|
||||||
|
macro(MulExactL)
|
||||||
macro(MulF)
|
macro(MulF)
|
||||||
macro(MulHiL)
|
macro(MulHiL)
|
||||||
macro(MulI)
|
macro(MulI)
|
||||||
macro(MulL)
|
macro(MulL)
|
||||||
macro(Multi)
|
macro(Multi)
|
||||||
macro(NegD)
|
macro(NegD)
|
||||||
|
macro(NegExactI)
|
||||||
|
macro(NegExactL)
|
||||||
macro(NegF)
|
macro(NegF)
|
||||||
macro(NeverBranch)
|
macro(NeverBranch)
|
||||||
macro(Opaque1)
|
macro(Opaque1)
|
||||||
@ -244,6 +251,8 @@ macro(StrComp)
|
|||||||
macro(StrEquals)
|
macro(StrEquals)
|
||||||
macro(StrIndexOf)
|
macro(StrIndexOf)
|
||||||
macro(SubD)
|
macro(SubD)
|
||||||
|
macro(SubExactI)
|
||||||
|
macro(SubExactL)
|
||||||
macro(SubF)
|
macro(SubF)
|
||||||
macro(SubI)
|
macro(SubI)
|
||||||
macro(SubL)
|
macro(SubL)
|
||||||
|
@ -655,7 +655,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
_inlining_progress(false),
|
_inlining_progress(false),
|
||||||
_inlining_incrementally(false),
|
_inlining_incrementally(false),
|
||||||
_print_inlining_list(NULL),
|
_print_inlining_list(NULL),
|
||||||
_print_inlining_idx(0) {
|
_print_inlining_idx(0),
|
||||||
|
_preserve_jvm_state(0) {
|
||||||
C = this;
|
C = this;
|
||||||
|
|
||||||
CompileWrapper cw(this);
|
CompileWrapper cw(this);
|
||||||
@ -763,7 +764,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
JVMState* jvms = build_start_state(start(), tf());
|
JVMState* jvms = build_start_state(start(), tf());
|
||||||
if ((jvms = cg->generate(jvms)) == NULL) {
|
if ((jvms = cg->generate(jvms, NULL)) == NULL) {
|
||||||
record_method_not_compilable("method parse failed");
|
record_method_not_compilable("method parse failed");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -940,7 +941,8 @@ Compile::Compile( ciEnv* ci_env,
|
|||||||
_inlining_progress(false),
|
_inlining_progress(false),
|
||||||
_inlining_incrementally(false),
|
_inlining_incrementally(false),
|
||||||
_print_inlining_list(NULL),
|
_print_inlining_list(NULL),
|
||||||
_print_inlining_idx(0) {
|
_print_inlining_idx(0),
|
||||||
|
_preserve_jvm_state(0) {
|
||||||
C = this;
|
C = this;
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
@ -1358,7 +1360,7 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
|
|||||||
// During the 2nd round of IterGVN, NotNull castings are removed.
|
// During the 2nd round of IterGVN, NotNull castings are removed.
|
||||||
// Make sure the Bottom and NotNull variants alias the same.
|
// Make sure the Bottom and NotNull variants alias the same.
|
||||||
// Also, make sure exact and non-exact variants alias the same.
|
// Also, make sure exact and non-exact variants alias the same.
|
||||||
if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) {
|
if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
|
||||||
tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
|
tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1383,6 +1385,9 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
|
|||||||
// Also, make sure exact and non-exact variants alias the same.
|
// Also, make sure exact and non-exact variants alias the same.
|
||||||
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
|
tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
|
||||||
}
|
}
|
||||||
|
if (to->speculative() != NULL) {
|
||||||
|
tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
|
||||||
|
}
|
||||||
// Canonicalize the holder of this field
|
// Canonicalize the holder of this field
|
||||||
if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
|
if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
|
||||||
// First handle header references such as a LoadKlassNode, even if the
|
// First handle header references such as a LoadKlassNode, even if the
|
||||||
@ -2011,6 +2016,12 @@ void Compile::Optimize() {
|
|||||||
if (failing()) return;
|
if (failing()) return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove the speculative part of types and clean up the graph from
|
||||||
|
// the extra CastPP nodes whose only purpose is to carry them. Do
|
||||||
|
// that early so that optimizations are not disrupted by the extra
|
||||||
|
// CastPP nodes.
|
||||||
|
remove_speculative_types(igvn);
|
||||||
|
|
||||||
// No more new expensive nodes will be added to the list from here
|
// No more new expensive nodes will be added to the list from here
|
||||||
// so keep only the actual candidates for optimizations.
|
// so keep only the actual candidates for optimizations.
|
||||||
cleanup_expensive_nodes(igvn);
|
cleanup_expensive_nodes(igvn);
|
||||||
@ -3004,6 +3015,10 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
|
|||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
|
for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
|
||||||
Node* out = result->fast_out(j);
|
Node* out = result->fast_out(j);
|
||||||
|
// Phi nodes shouldn't be moved. They would only match below if they
|
||||||
|
// had the same control as the MathExactNode. The only time that
|
||||||
|
// would happen is if the Phi is also an input to the MathExact
|
||||||
|
if (!out->is_Phi()) {
|
||||||
if (out->in(0) == NULL) {
|
if (out->in(0) == NULL) {
|
||||||
out->set_req(0, non_throwing);
|
out->set_req(0, non_throwing);
|
||||||
} else if (out->in(0) == ctrl) {
|
} else if (out->in(0) == ctrl) {
|
||||||
@ -3012,6 +3027,7 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
assert( !n->is_Call(), "" );
|
assert( !n->is_Call(), "" );
|
||||||
@ -3792,6 +3808,45 @@ void Compile::add_expensive_node(Node * n) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove the speculative part of types and clean up the graph
|
||||||
|
*/
|
||||||
|
void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
|
||||||
|
if (UseTypeSpeculation) {
|
||||||
|
Unique_Node_List worklist;
|
||||||
|
worklist.push(root());
|
||||||
|
int modified = 0;
|
||||||
|
// Go over all type nodes that carry a speculative type, drop the
|
||||||
|
// speculative part of the type and enqueue the node for an igvn
|
||||||
|
// which may optimize it out.
|
||||||
|
for (uint next = 0; next < worklist.size(); ++next) {
|
||||||
|
Node *n = worklist.at(next);
|
||||||
|
if (n->is_Type() && n->as_Type()->type()->isa_oopptr() != NULL &&
|
||||||
|
n->as_Type()->type()->is_oopptr()->speculative() != NULL) {
|
||||||
|
TypeNode* tn = n->as_Type();
|
||||||
|
const TypeOopPtr* t = tn->type()->is_oopptr();
|
||||||
|
bool in_hash = igvn.hash_delete(n);
|
||||||
|
assert(in_hash, "node should be in igvn hash table");
|
||||||
|
tn->set_type(t->remove_speculative());
|
||||||
|
igvn.hash_insert(n);
|
||||||
|
igvn._worklist.push(n); // give it a chance to go away
|
||||||
|
modified++;
|
||||||
|
}
|
||||||
|
uint max = n->len();
|
||||||
|
for( uint i = 0; i < max; ++i ) {
|
||||||
|
Node *m = n->in(i);
|
||||||
|
if (not_a_node(m)) continue;
|
||||||
|
worklist.push(m);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Drop the speculative part of all types in the igvn's type table
|
||||||
|
igvn.remove_speculative_types();
|
||||||
|
if (modified > 0) {
|
||||||
|
igvn.optimize();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Auxiliary method to support randomized stressing/fuzzing.
|
// Auxiliary method to support randomized stressing/fuzzing.
|
||||||
//
|
//
|
||||||
// This method can be called the arbitrary number of times, with current count
|
// This method can be called the arbitrary number of times, with current count
|
||||||
|
@ -424,6 +424,11 @@ class Compile : public Phase {
|
|||||||
static int cmp_expensive_nodes(Node** n1, Node** n2);
|
static int cmp_expensive_nodes(Node** n1, Node** n2);
|
||||||
// Expensive nodes list already sorted?
|
// Expensive nodes list already sorted?
|
||||||
bool expensive_nodes_sorted() const;
|
bool expensive_nodes_sorted() const;
|
||||||
|
// Remove the speculative part of types and clean up the graph
|
||||||
|
void remove_speculative_types(PhaseIterGVN &igvn);
|
||||||
|
|
||||||
|
// Are we within a PreserveJVMState block?
|
||||||
|
int _preserve_jvm_state;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -820,7 +825,9 @@ class Compile : public Phase {
|
|||||||
|
|
||||||
// Decide how to build a call.
|
// Decide how to build a call.
|
||||||
// The profile factor is a discount to apply to this site's interp. profile.
|
// The profile factor is a discount to apply to this site's interp. profile.
|
||||||
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false);
|
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
|
||||||
|
JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL,
|
||||||
|
bool allow_intrinsics = true, bool delayed_forbidden = false);
|
||||||
bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
|
bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
|
||||||
return should_delay_string_inlining(call_method, jvms) ||
|
return should_delay_string_inlining(call_method, jvms) ||
|
||||||
should_delay_boxing_inlining(call_method, jvms);
|
should_delay_boxing_inlining(call_method, jvms);
|
||||||
@ -1156,6 +1163,21 @@ class Compile : public Phase {
|
|||||||
|
|
||||||
// Auxiliary method for randomized fuzzing/stressing
|
// Auxiliary method for randomized fuzzing/stressing
|
||||||
static bool randomized_select(int count);
|
static bool randomized_select(int count);
|
||||||
|
|
||||||
|
// enter a PreserveJVMState block
|
||||||
|
void inc_preserve_jvm_state() {
|
||||||
|
_preserve_jvm_state++;
|
||||||
|
}
|
||||||
|
|
||||||
|
// exit a PreserveJVMState block
|
||||||
|
void dec_preserve_jvm_state() {
|
||||||
|
_preserve_jvm_state--;
|
||||||
|
assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative");
|
||||||
|
}
|
||||||
|
|
||||||
|
bool has_preserve_jvm_state() const {
|
||||||
|
return _preserve_jvm_state > 0;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_OPTO_COMPILE_HPP
|
#endif // SHARE_VM_OPTO_COMPILE_HPP
|
||||||
|
@ -63,7 +63,8 @@ void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMeth
|
|||||||
|
|
||||||
CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
|
CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
|
||||||
JVMState* jvms, bool allow_inline,
|
JVMState* jvms, bool allow_inline,
|
||||||
float prof_factor, bool allow_intrinsics, bool delayed_forbidden) {
|
float prof_factor, ciKlass* speculative_receiver_type,
|
||||||
|
bool allow_intrinsics, bool delayed_forbidden) {
|
||||||
ciMethod* caller = jvms->method();
|
ciMethod* caller = jvms->method();
|
||||||
int bci = jvms->bci();
|
int bci = jvms->bci();
|
||||||
Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
|
Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
|
||||||
@ -117,7 +118,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
|||||||
if (cg->is_predicted()) {
|
if (cg->is_predicted()) {
|
||||||
// Code without intrinsic but, hopefully, inlined.
|
// Code without intrinsic but, hopefully, inlined.
|
||||||
CallGenerator* inline_cg = this->call_generator(callee,
|
CallGenerator* inline_cg = this->call_generator(callee,
|
||||||
vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, false);
|
vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);
|
||||||
if (inline_cg != NULL) {
|
if (inline_cg != NULL) {
|
||||||
cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg);
|
cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg);
|
||||||
}
|
}
|
||||||
@ -212,8 +213,24 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
|||||||
// The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
|
// The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
|
||||||
bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
|
bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
|
||||||
ciMethod* receiver_method = NULL;
|
ciMethod* receiver_method = NULL;
|
||||||
if (have_major_receiver || profile.morphism() == 1 ||
|
|
||||||
(profile.morphism() == 2 && UseBimorphicInlining)) {
|
int morphism = profile.morphism();
|
||||||
|
if (speculative_receiver_type != NULL) {
|
||||||
|
// We have a speculative type, we should be able to resolve
|
||||||
|
// the call. We do that before looking at the profiling at
|
||||||
|
// this invoke because it may lead to bimorphic inlining which
|
||||||
|
// a speculative type should help us avoid.
|
||||||
|
receiver_method = callee->resolve_invoke(jvms->method()->holder(),
|
||||||
|
speculative_receiver_type);
|
||||||
|
if (receiver_method == NULL) {
|
||||||
|
speculative_receiver_type = NULL;
|
||||||
|
} else {
|
||||||
|
morphism = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (receiver_method == NULL &&
|
||||||
|
(have_major_receiver || morphism == 1 ||
|
||||||
|
(morphism == 2 && UseBimorphicInlining))) {
|
||||||
// receiver_method = profile.method();
|
// receiver_method = profile.method();
|
||||||
// Profiles do not suggest methods now. Look it up in the major receiver.
|
// Profiles do not suggest methods now. Look it up in the major receiver.
|
||||||
receiver_method = callee->resolve_invoke(jvms->method()->holder(),
|
receiver_method = callee->resolve_invoke(jvms->method()->holder(),
|
||||||
@ -227,7 +244,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
|||||||
// Look up second receiver.
|
// Look up second receiver.
|
||||||
CallGenerator* next_hit_cg = NULL;
|
CallGenerator* next_hit_cg = NULL;
|
||||||
ciMethod* next_receiver_method = NULL;
|
ciMethod* next_receiver_method = NULL;
|
||||||
if (profile.morphism() == 2 && UseBimorphicInlining) {
|
if (morphism == 2 && UseBimorphicInlining) {
|
||||||
next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
|
next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
|
||||||
profile.receiver(1));
|
profile.receiver(1));
|
||||||
if (next_receiver_method != NULL) {
|
if (next_receiver_method != NULL) {
|
||||||
@ -242,11 +259,10 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
CallGenerator* miss_cg;
|
CallGenerator* miss_cg;
|
||||||
Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
|
Deoptimization::DeoptReason reason = morphism == 2 ?
|
||||||
Deoptimization::Reason_bimorphic :
|
Deoptimization::Reason_bimorphic :
|
||||||
Deoptimization::Reason_class_check;
|
Deoptimization::Reason_class_check;
|
||||||
if (( profile.morphism() == 1 ||
|
if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
|
||||||
(profile.morphism() == 2 && next_hit_cg != NULL) ) &&
|
|
||||||
!too_many_traps(jvms->method(), jvms->bci(), reason)
|
!too_many_traps(jvms->method(), jvms->bci(), reason)
|
||||||
) {
|
) {
|
||||||
// Generate uncommon trap for class check failure path
|
// Generate uncommon trap for class check failure path
|
||||||
@ -260,6 +276,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
|||||||
}
|
}
|
||||||
if (miss_cg != NULL) {
|
if (miss_cg != NULL) {
|
||||||
if (next_hit_cg != NULL) {
|
if (next_hit_cg != NULL) {
|
||||||
|
assert(speculative_receiver_type == NULL, "shouldn't end up here if we used speculation");
|
||||||
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
|
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
|
||||||
// We don't need to record dependency on a receiver here and below.
|
// We don't need to record dependency on a receiver here and below.
|
||||||
// Whenever we inline, the dependency is added by Parse::Parse().
|
// Whenever we inline, the dependency is added by Parse::Parse().
|
||||||
@ -267,7 +284,9 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
|||||||
}
|
}
|
||||||
if (miss_cg != NULL) {
|
if (miss_cg != NULL) {
|
||||||
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
|
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
|
||||||
CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
|
ciKlass* k = speculative_receiver_type != NULL ? speculative_receiver_type : profile.receiver(0);
|
||||||
|
float hit_prob = speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0);
|
||||||
|
CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob);
|
||||||
if (cg != NULL) return cg;
|
if (cg != NULL) return cg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -446,6 +465,8 @@ void Parse::do_call() {
|
|||||||
int vtable_index = Method::invalid_vtable_index;
|
int vtable_index = Method::invalid_vtable_index;
|
||||||
bool call_does_dispatch = false;
|
bool call_does_dispatch = false;
|
||||||
|
|
||||||
|
// Speculative type of the receiver if any
|
||||||
|
ciKlass* speculative_receiver_type = NULL;
|
||||||
if (is_virtual_or_interface) {
|
if (is_virtual_or_interface) {
|
||||||
Node* receiver_node = stack(sp() - nargs);
|
Node* receiver_node = stack(sp() - nargs);
|
||||||
const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
|
const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
|
||||||
@ -453,6 +474,7 @@ void Parse::do_call() {
|
|||||||
callee = C->optimize_virtual_call(method(), bci(), klass, orig_callee, receiver_type,
|
callee = C->optimize_virtual_call(method(), bci(), klass, orig_callee, receiver_type,
|
||||||
is_virtual,
|
is_virtual,
|
||||||
call_does_dispatch, vtable_index); // out-parameters
|
call_does_dispatch, vtable_index); // out-parameters
|
||||||
|
speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: It's OK to try to inline a virtual call.
|
// Note: It's OK to try to inline a virtual call.
|
||||||
@ -468,7 +490,7 @@ void Parse::do_call() {
|
|||||||
// Decide call tactic.
|
// Decide call tactic.
|
||||||
// This call checks with CHA, the interpreter profile, intrinsics table, etc.
|
// This call checks with CHA, the interpreter profile, intrinsics table, etc.
|
||||||
// It decides whether inlining is desirable or not.
|
// It decides whether inlining is desirable or not.
|
||||||
CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor());
|
CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type);
|
||||||
|
|
||||||
// NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.
|
// NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.
|
||||||
orig_callee = callee = NULL;
|
orig_callee = callee = NULL;
|
||||||
@ -477,6 +499,10 @@ void Parse::do_call() {
|
|||||||
// Round double arguments before call
|
// Round double arguments before call
|
||||||
round_double_arguments(cg->method());
|
round_double_arguments(cg->method());
|
||||||
|
|
||||||
|
// Feed profiling data for arguments to the type system so it can
|
||||||
|
// propagate it as speculative types
|
||||||
|
record_profiled_arguments_for_speculation(cg->method(), bc());
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
// bump global counters for calls
|
// bump global counters for calls
|
||||||
count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
|
count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
|
||||||
@ -491,11 +517,18 @@ void Parse::do_call() {
|
|||||||
// save across call, for a subsequent cast_not_null.
|
// save across call, for a subsequent cast_not_null.
|
||||||
Node* receiver = has_receiver ? argument(0) : NULL;
|
Node* receiver = has_receiver ? argument(0) : NULL;
|
||||||
|
|
||||||
|
// The extra CheckCastPP for speculative types mess with PhaseStringOpts
|
||||||
|
if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) {
|
||||||
|
// Feed profiling data for a single receiver to the type system so
|
||||||
|
// it can propagate it as a speculative type
|
||||||
|
receiver = record_profiled_receiver_for_speculation(receiver);
|
||||||
|
}
|
||||||
|
|
||||||
// Bump method data counters (We profile *before* the call is made
|
// Bump method data counters (We profile *before* the call is made
|
||||||
// because exceptions don't return to the call site.)
|
// because exceptions don't return to the call site.)
|
||||||
profile_call(receiver);
|
profile_call(receiver);
|
||||||
|
|
||||||
JVMState* new_jvms = cg->generate(jvms);
|
JVMState* new_jvms = cg->generate(jvms, this);
|
||||||
if (new_jvms == NULL) {
|
if (new_jvms == NULL) {
|
||||||
// When inlining attempt fails (e.g., too many arguments),
|
// When inlining attempt fails (e.g., too many arguments),
|
||||||
// it may contaminate the current compile state, making it
|
// it may contaminate the current compile state, making it
|
||||||
@ -508,8 +541,8 @@ void Parse::do_call() {
|
|||||||
// the call site, perhaps because it did not match a pattern the
|
// the call site, perhaps because it did not match a pattern the
|
||||||
// intrinsic was expecting to optimize. Should always be possible to
|
// intrinsic was expecting to optimize. Should always be possible to
|
||||||
// get a normal java call that may inline in that case
|
// get a normal java call that may inline in that case
|
||||||
cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
|
cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
|
||||||
if ((new_jvms = cg->generate(jvms)) == NULL) {
|
if ((new_jvms = cg->generate(jvms, this)) == NULL) {
|
||||||
guarantee(failing(), "call failed to generate: calls should work");
|
guarantee(failing(), "call failed to generate: calls should work");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -607,6 +640,16 @@ void Parse::do_call() {
|
|||||||
null_assert(peek());
|
null_assert(peek());
|
||||||
set_bci(iter().cur_bci()); // put it back
|
set_bci(iter().cur_bci()); // put it back
|
||||||
}
|
}
|
||||||
|
BasicType ct = ctype->basic_type();
|
||||||
|
if (ct == T_OBJECT || ct == T_ARRAY) {
|
||||||
|
ciKlass* better_type = method()->return_profiled_type(bci());
|
||||||
|
if (UseTypeSpeculation && better_type != NULL) {
|
||||||
|
// If profiling reports a single type for the return value,
|
||||||
|
// feed it to the type system so it can propagate it as a
|
||||||
|
// speculative type
|
||||||
|
record_profile_for_speculation(stack(sp()-1), better_type);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Restart record of parsing work after possible inlining of call
|
// Restart record of parsing work after possible inlining of call
|
||||||
|
@ -639,6 +639,7 @@ PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) {
|
|||||||
_map = kit->map(); // preserve the map
|
_map = kit->map(); // preserve the map
|
||||||
_sp = kit->sp();
|
_sp = kit->sp();
|
||||||
kit->set_map(clone_map ? kit->clone_map() : NULL);
|
kit->set_map(clone_map ? kit->clone_map() : NULL);
|
||||||
|
Compile::current()->inc_preserve_jvm_state();
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
_bci = kit->bci();
|
_bci = kit->bci();
|
||||||
Parse* parser = kit->is_Parse();
|
Parse* parser = kit->is_Parse();
|
||||||
@ -656,6 +657,7 @@ PreserveJVMState::~PreserveJVMState() {
|
|||||||
#endif
|
#endif
|
||||||
kit->set_map(_map);
|
kit->set_map(_map);
|
||||||
kit->set_sp(_sp);
|
kit->set_sp(_sp);
|
||||||
|
Compile::current()->dec_preserve_jvm_state();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1373,17 +1375,70 @@ Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
|
|||||||
|
|
||||||
//--------------------------replace_in_map-------------------------------------
|
//--------------------------replace_in_map-------------------------------------
|
||||||
void GraphKit::replace_in_map(Node* old, Node* neww) {
|
void GraphKit::replace_in_map(Node* old, Node* neww) {
|
||||||
this->map()->replace_edge(old, neww);
|
if (old == neww) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
map()->replace_edge(old, neww);
|
||||||
|
|
||||||
// Note: This operation potentially replaces any edge
|
// Note: This operation potentially replaces any edge
|
||||||
// on the map. This includes locals, stack, and monitors
|
// on the map. This includes locals, stack, and monitors
|
||||||
// of the current (innermost) JVM state.
|
// of the current (innermost) JVM state.
|
||||||
|
|
||||||
// We can consider replacing in caller maps.
|
if (!ReplaceInParentMaps) {
|
||||||
// The idea would be that an inlined function's null checks
|
return;
|
||||||
// can be shared with the entire inlining tree.
|
}
|
||||||
// The expense of doing this is that the PreserveJVMState class
|
|
||||||
// would have to preserve caller states too, with a deep copy.
|
// PreserveJVMState doesn't do a deep copy so we can't modify
|
||||||
|
// parents
|
||||||
|
if (Compile::current()->has_preserve_jvm_state()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Parse* parser = is_Parse();
|
||||||
|
bool progress = true;
|
||||||
|
Node* ctrl = map()->in(0);
|
||||||
|
// Follow the chain of parsers and see whether the update can be
|
||||||
|
// done in the map of callers. We can do the replace for a caller if
|
||||||
|
// the current control post dominates the control of a caller.
|
||||||
|
while (parser != NULL && parser->caller() != NULL && progress) {
|
||||||
|
progress = false;
|
||||||
|
Node* parent_map = parser->caller()->map();
|
||||||
|
assert(parser->exits().map()->jvms()->depth() == parser->caller()->depth(), "map mismatch");
|
||||||
|
|
||||||
|
Node* parent_ctrl = parent_map->in(0);
|
||||||
|
|
||||||
|
while (parent_ctrl->is_Region()) {
|
||||||
|
Node* n = parent_ctrl->as_Region()->is_copy();
|
||||||
|
if (n == NULL) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
parent_ctrl = n;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
if (ctrl == parent_ctrl) {
|
||||||
|
// update the map of the exits which is the one that will be
|
||||||
|
// used when compilation resume after inlining
|
||||||
|
parser->exits().map()->replace_edge(old, neww);
|
||||||
|
progress = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (ctrl->is_Proj() && ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
|
||||||
|
ctrl = ctrl->in(0)->in(0);
|
||||||
|
} else if (ctrl->is_Region()) {
|
||||||
|
Node* n = ctrl->as_Region()->is_copy();
|
||||||
|
if (n == NULL) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
ctrl = n;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
parser = parser->parent_parser();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2043,6 +2098,104 @@ void GraphKit::round_double_arguments(ciMethod* dest_method) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record profiling data exact_kls for Node n with the type system so
|
||||||
|
* that it can propagate it (speculation)
|
||||||
|
*
|
||||||
|
* @param n node that the type applies to
|
||||||
|
* @param exact_kls type from profiling
|
||||||
|
*
|
||||||
|
* @return node with improved type
|
||||||
|
*/
|
||||||
|
Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
|
||||||
|
const TypeOopPtr* current_type = _gvn.type(n)->isa_oopptr();
|
||||||
|
assert(UseTypeSpeculation, "type speculation must be on");
|
||||||
|
if (exact_kls != NULL &&
|
||||||
|
// nothing to improve if type is already exact
|
||||||
|
(current_type == NULL ||
|
||||||
|
(!current_type->klass_is_exact() &&
|
||||||
|
(current_type->speculative() == NULL ||
|
||||||
|
!current_type->speculative()->klass_is_exact())))) {
|
||||||
|
const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
|
||||||
|
const TypeOopPtr* xtype = tklass->as_instance_type();
|
||||||
|
assert(xtype->klass_is_exact(), "Should be exact");
|
||||||
|
|
||||||
|
// Build a type with a speculative type (what we think we know
|
||||||
|
// about the type but will need a guard when we use it)
|
||||||
|
const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, xtype);
|
||||||
|
// We're changing the type, we need a new cast node to carry the
|
||||||
|
// new type. The new type depends on the control: what profiling
|
||||||
|
// tells us is only valid from here as far as we can tell.
|
||||||
|
Node* cast = new(C) CastPPNode(n, spec_type);
|
||||||
|
cast->init_req(0, control());
|
||||||
|
cast = _gvn.transform(cast);
|
||||||
|
replace_in_map(n, cast);
|
||||||
|
n = cast;
|
||||||
|
}
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record profiling data from receiver profiling at an invoke with the
|
||||||
|
* type system so that it can propagate it (speculation)
|
||||||
|
*
|
||||||
|
* @param n receiver node
|
||||||
|
*
|
||||||
|
* @return node with improved type
|
||||||
|
*/
|
||||||
|
Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
|
||||||
|
if (!UseTypeSpeculation) {
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
ciKlass* exact_kls = profile_has_unique_klass();
|
||||||
|
return record_profile_for_speculation(n, exact_kls);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record profiling data from argument profiling at an invoke with the
|
||||||
|
* type system so that it can propagate it (speculation)
|
||||||
|
*
|
||||||
|
* @param dest_method target method for the call
|
||||||
|
* @param bc what invoke bytecode is this?
|
||||||
|
*/
|
||||||
|
void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
|
||||||
|
if (!UseTypeSpeculation) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const TypeFunc* tf = TypeFunc::make(dest_method);
|
||||||
|
int nargs = tf->_domain->_cnt - TypeFunc::Parms;
|
||||||
|
int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
|
||||||
|
for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
|
||||||
|
const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms);
|
||||||
|
if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) {
|
||||||
|
ciKlass* better_type = method()->argument_profiled_type(bci(), i);
|
||||||
|
if (better_type != NULL) {
|
||||||
|
record_profile_for_speculation(argument(j), better_type);
|
||||||
|
}
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Record profiling data from parameter profiling at an invoke with
|
||||||
|
* the type system so that it can propagate it (speculation)
|
||||||
|
*/
|
||||||
|
void GraphKit::record_profiled_parameters_for_speculation() {
|
||||||
|
if (!UseTypeSpeculation) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
|
||||||
|
if (_gvn.type(local(i))->isa_oopptr()) {
|
||||||
|
ciKlass* better_type = method()->parameter_profiled_type(j);
|
||||||
|
if (better_type != NULL) {
|
||||||
|
record_profile_for_speculation(local(i), better_type);
|
||||||
|
}
|
||||||
|
j++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void GraphKit::round_double_result(ciMethod* dest_method) {
|
void GraphKit::round_double_result(ciMethod* dest_method) {
|
||||||
// A non-strict method may return a double value which has an extended
|
// A non-strict method may return a double value which has an extended
|
||||||
// exponent, but this must not be visible in a caller which is 'strict'
|
// exponent, but this must not be visible in a caller which is 'strict'
|
||||||
@ -2580,10 +2733,10 @@ bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) {
|
|||||||
// If the profile has seen exactly one type, narrow to exactly that type.
|
// If the profile has seen exactly one type, narrow to exactly that type.
|
||||||
// Subsequent type checks will always fold up.
|
// Subsequent type checks will always fold up.
|
||||||
Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
|
Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
|
||||||
ciProfileData* data,
|
ciKlass* require_klass,
|
||||||
ciKlass* require_klass) {
|
ciKlass* spec_klass,
|
||||||
|
bool safe_for_replace) {
|
||||||
if (!UseTypeProfile || !TypeProfileCasts) return NULL;
|
if (!UseTypeProfile || !TypeProfileCasts) return NULL;
|
||||||
if (data == NULL) return NULL;
|
|
||||||
|
|
||||||
// Make sure we haven't already deoptimized from this tactic.
|
// Make sure we haven't already deoptimized from this tactic.
|
||||||
if (too_many_traps(Deoptimization::Reason_class_check))
|
if (too_many_traps(Deoptimization::Reason_class_check))
|
||||||
@ -2591,15 +2744,15 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
|
|||||||
|
|
||||||
// (No, this isn't a call, but it's enough like a virtual call
|
// (No, this isn't a call, but it's enough like a virtual call
|
||||||
// to use the same ciMethod accessor to get the profile info...)
|
// to use the same ciMethod accessor to get the profile info...)
|
||||||
ciCallProfile profile = method()->call_profile_at_bci(bci());
|
// If we have a speculative type use it instead of profiling (which
|
||||||
if (profile.count() >= 0 && // no cast failures here
|
// may not help us)
|
||||||
profile.has_receiver(0) &&
|
ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass;
|
||||||
profile.morphism() == 1) {
|
if (exact_kls != NULL) {// no cast failures here
|
||||||
ciKlass* exact_kls = profile.receiver(0);
|
|
||||||
if (require_klass == NULL ||
|
if (require_klass == NULL ||
|
||||||
static_subtype_check(require_klass, exact_kls) == SSC_always_true) {
|
static_subtype_check(require_klass, exact_kls) == SSC_always_true) {
|
||||||
// If we narrow the type to match what the type profile sees,
|
// If we narrow the type to match what the type profile sees or
|
||||||
// we can then remove the rest of the cast.
|
// the speculative type, we can then remove the rest of the
|
||||||
|
// cast.
|
||||||
// This is a win, even if the exact_kls is very specific,
|
// This is a win, even if the exact_kls is very specific,
|
||||||
// because downstream operations, such as method calls,
|
// because downstream operations, such as method calls,
|
||||||
// will often benefit from the sharper type.
|
// will often benefit from the sharper type.
|
||||||
@ -2611,7 +2764,9 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
|
|||||||
uncommon_trap(Deoptimization::Reason_class_check,
|
uncommon_trap(Deoptimization::Reason_class_check,
|
||||||
Deoptimization::Action_maybe_recompile);
|
Deoptimization::Action_maybe_recompile);
|
||||||
}
|
}
|
||||||
|
if (safe_for_replace) {
|
||||||
replace_in_map(not_null_obj, exact_obj);
|
replace_in_map(not_null_obj, exact_obj);
|
||||||
|
}
|
||||||
return exact_obj;
|
return exact_obj;
|
||||||
}
|
}
|
||||||
// assert(ssc == SSC_always_true)... except maybe the profile lied to us.
|
// assert(ssc == SSC_always_true)... except maybe the profile lied to us.
|
||||||
@ -2620,11 +2775,59 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cast obj to type and emit guard unless we had too many traps here
|
||||||
|
* already
|
||||||
|
*
|
||||||
|
* @param obj node being casted
|
||||||
|
* @param type type to cast the node to
|
||||||
|
* @param not_null true if we know node cannot be null
|
||||||
|
*/
|
||||||
|
Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
|
||||||
|
ciKlass* type,
|
||||||
|
bool not_null) {
|
||||||
|
// type == NULL if profiling tells us this object is always null
|
||||||
|
if (type != NULL) {
|
||||||
|
if (!too_many_traps(Deoptimization::Reason_null_check) &&
|
||||||
|
!too_many_traps(Deoptimization::Reason_class_check)) {
|
||||||
|
Node* not_null_obj = NULL;
|
||||||
|
// not_null is true if we know the object is not null and
|
||||||
|
// there's no need for a null check
|
||||||
|
if (!not_null) {
|
||||||
|
Node* null_ctl = top();
|
||||||
|
not_null_obj = null_check_oop(obj, &null_ctl, true, true);
|
||||||
|
assert(null_ctl->is_top(), "no null control here");
|
||||||
|
} else {
|
||||||
|
not_null_obj = obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
Node* exact_obj = not_null_obj;
|
||||||
|
ciKlass* exact_kls = type;
|
||||||
|
Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
|
||||||
|
&exact_obj);
|
||||||
|
{
|
||||||
|
PreserveJVMState pjvms(this);
|
||||||
|
set_control(slow_ctl);
|
||||||
|
uncommon_trap(Deoptimization::Reason_class_check,
|
||||||
|
Deoptimization::Action_maybe_recompile);
|
||||||
|
}
|
||||||
|
replace_in_map(not_null_obj, exact_obj);
|
||||||
|
obj = exact_obj;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (!too_many_traps(Deoptimization::Reason_null_assert)) {
|
||||||
|
Node* exact_obj = null_assert(obj);
|
||||||
|
replace_in_map(obj, exact_obj);
|
||||||
|
obj = exact_obj;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
//-------------------------------gen_instanceof--------------------------------
|
//-------------------------------gen_instanceof--------------------------------
|
||||||
// Generate an instance-of idiom. Used by both the instance-of bytecode
|
// Generate an instance-of idiom. Used by both the instance-of bytecode
|
||||||
// and the reflective instance-of call.
|
// and the reflective instance-of call.
|
||||||
Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) {
|
Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replace) {
|
||||||
kill_dead_locals(); // Benefit all the uncommon traps
|
kill_dead_locals(); // Benefit all the uncommon traps
|
||||||
assert( !stopped(), "dead parse path should be checked in callers" );
|
assert( !stopped(), "dead parse path should be checked in callers" );
|
||||||
assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
|
assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()),
|
||||||
@ -2637,10 +2840,8 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) {
|
|||||||
C->set_has_split_ifs(true); // Has chance for split-if optimization
|
C->set_has_split_ifs(true); // Has chance for split-if optimization
|
||||||
|
|
||||||
ciProfileData* data = NULL;
|
ciProfileData* data = NULL;
|
||||||
bool safe_for_replace = false;
|
|
||||||
if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode
|
if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode
|
||||||
data = method()->method_data()->bci_to_data(bci());
|
data = method()->method_data()->bci_to_data(bci());
|
||||||
safe_for_replace = true;
|
|
||||||
}
|
}
|
||||||
bool never_see_null = (ProfileDynamicTypes // aggressive use of profile
|
bool never_see_null = (ProfileDynamicTypes // aggressive use of profile
|
||||||
&& seems_never_null(obj, data));
|
&& seems_never_null(obj, data));
|
||||||
@ -2664,15 +2865,38 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) {
|
|||||||
phi ->del_req(_null_path);
|
phi ->del_req(_null_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ProfileDynamicTypes && data != NULL) {
|
// Do we know the type check always succeed?
|
||||||
Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, NULL);
|
bool known_statically = false;
|
||||||
|
if (_gvn.type(superklass)->singleton()) {
|
||||||
|
ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
|
||||||
|
ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
|
||||||
|
if (subk != NULL && subk->is_loaded()) {
|
||||||
|
int static_res = static_subtype_check(superk, subk);
|
||||||
|
known_statically = (static_res == SSC_always_true || static_res == SSC_always_false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (known_statically && UseTypeSpeculation) {
|
||||||
|
// If we know the type check always succeed then we don't use the
|
||||||
|
// profiling data at this bytecode. Don't lose it, feed it to the
|
||||||
|
// type system as a speculative type.
|
||||||
|
not_null_obj = record_profiled_receiver_for_speculation(not_null_obj);
|
||||||
|
} else {
|
||||||
|
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
|
||||||
|
// We may not have profiling here or it may not help us. If we
|
||||||
|
// have a speculative type use it to perform an exact cast.
|
||||||
|
ciKlass* spec_obj_type = obj_type->speculative_type();
|
||||||
|
if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) {
|
||||||
|
Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
|
||||||
if (stopped()) { // Profile disagrees with this path.
|
if (stopped()) { // Profile disagrees with this path.
|
||||||
set_control(null_ctl); // Null is the only remaining possibility.
|
set_control(null_ctl); // Null is the only remaining possibility.
|
||||||
return intcon(0);
|
return intcon(0);
|
||||||
}
|
}
|
||||||
if (cast_obj != NULL)
|
if (cast_obj != NULL) {
|
||||||
not_null_obj = cast_obj;
|
not_null_obj = cast_obj;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Load the object's klass
|
// Load the object's klass
|
||||||
Node* obj_klass = load_object_klass(not_null_obj);
|
Node* obj_klass = load_object_klass(not_null_obj);
|
||||||
@ -2718,7 +2942,10 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
|
|||||||
if (objtp != NULL && objtp->klass() != NULL) {
|
if (objtp != NULL && objtp->klass() != NULL) {
|
||||||
switch (static_subtype_check(tk->klass(), objtp->klass())) {
|
switch (static_subtype_check(tk->klass(), objtp->klass())) {
|
||||||
case SSC_always_true:
|
case SSC_always_true:
|
||||||
return obj;
|
// If we know the type check always succeed then we don't use
|
||||||
|
// the profiling data at this bytecode. Don't lose it, feed it
|
||||||
|
// to the type system as a speculative type.
|
||||||
|
return record_profiled_receiver_for_speculation(obj);
|
||||||
case SSC_always_false:
|
case SSC_always_false:
|
||||||
// It needs a null check because a null will *pass* the cast check.
|
// It needs a null check because a null will *pass* the cast check.
|
||||||
// A non-null value will always produce an exception.
|
// A non-null value will always produce an exception.
|
||||||
@ -2767,12 +2994,17 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
|
|||||||
}
|
}
|
||||||
|
|
||||||
Node* cast_obj = NULL;
|
Node* cast_obj = NULL;
|
||||||
if (data != NULL &&
|
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
|
||||||
|
// We may not have profiling here or it may not help us. If we have
|
||||||
|
// a speculative type use it to perform an exact cast.
|
||||||
|
ciKlass* spec_obj_type = obj_type->speculative_type();
|
||||||
|
if (spec_obj_type != NULL ||
|
||||||
|
(data != NULL &&
|
||||||
// Counter has never been decremented (due to cast failure).
|
// Counter has never been decremented (due to cast failure).
|
||||||
// ...This is a reasonable thing to expect. It is true of
|
// ...This is a reasonable thing to expect. It is true of
|
||||||
// all casts inserted by javac to implement generic types.
|
// all casts inserted by javac to implement generic types.
|
||||||
data->as_CounterData()->count() >= 0) {
|
data->as_CounterData()->count() >= 0)) {
|
||||||
cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass());
|
cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
|
||||||
if (cast_obj != NULL) {
|
if (cast_obj != NULL) {
|
||||||
if (failure_control != NULL) // failure is now impossible
|
if (failure_control != NULL) // failure is now impossible
|
||||||
(*failure_control) = top();
|
(*failure_control) = top();
|
||||||
|
@ -386,10 +386,33 @@ class GraphKit : public Phase {
|
|||||||
// Check the null_seen bit.
|
// Check the null_seen bit.
|
||||||
bool seems_never_null(Node* obj, ciProfileData* data);
|
bool seems_never_null(Node* obj, ciProfileData* data);
|
||||||
|
|
||||||
|
// Check for unique class for receiver at call
|
||||||
|
ciKlass* profile_has_unique_klass() {
|
||||||
|
ciCallProfile profile = method()->call_profile_at_bci(bci());
|
||||||
|
if (profile.count() >= 0 && // no cast failures here
|
||||||
|
profile.has_receiver(0) &&
|
||||||
|
profile.morphism() == 1) {
|
||||||
|
return profile.receiver(0);
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// record type from profiling with the type system
|
||||||
|
Node* record_profile_for_speculation(Node* n, ciKlass* exact_kls);
|
||||||
|
Node* record_profiled_receiver_for_speculation(Node* n);
|
||||||
|
void record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc);
|
||||||
|
void record_profiled_parameters_for_speculation();
|
||||||
|
|
||||||
// Use the type profile to narrow an object type.
|
// Use the type profile to narrow an object type.
|
||||||
Node* maybe_cast_profiled_receiver(Node* not_null_obj,
|
Node* maybe_cast_profiled_receiver(Node* not_null_obj,
|
||||||
ciProfileData* data,
|
ciKlass* require_klass,
|
||||||
ciKlass* require_klass);
|
ciKlass* spec,
|
||||||
|
bool safe_for_replace);
|
||||||
|
|
||||||
|
// Cast obj to type and emit guard unless we had too many traps here already
|
||||||
|
Node* maybe_cast_profiled_obj(Node* obj,
|
||||||
|
ciKlass* type,
|
||||||
|
bool not_null = false);
|
||||||
|
|
||||||
// Cast obj to not-null on this path
|
// Cast obj to not-null on this path
|
||||||
Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
|
Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
|
||||||
@ -775,7 +798,7 @@ class GraphKit : public Phase {
|
|||||||
|
|
||||||
// Generate an instance-of idiom. Used by both the instance-of bytecode
|
// Generate an instance-of idiom. Used by both the instance-of bytecode
|
||||||
// and the reflective instance-of call.
|
// and the reflective instance-of call.
|
||||||
Node* gen_instanceof( Node *subobj, Node* superkls );
|
Node* gen_instanceof(Node *subobj, Node* superkls, bool safe_for_replace = false);
|
||||||
|
|
||||||
// Generate a check-cast idiom. Used by both the check-cast bytecode
|
// Generate a check-cast idiom. Used by both the check-cast bytecode
|
||||||
// and the array-store bytecode
|
// and the array-store bytecode
|
||||||
|
@ -1019,7 +1019,7 @@ void IfNode::dominated_by( Node *prev_dom, PhaseIterGVN *igvn ) {
|
|||||||
// be skipped. For example, range check predicate has two checks
|
// be skipped. For example, range check predicate has two checks
|
||||||
// for lower and upper bounds.
|
// for lower and upper bounds.
|
||||||
ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
|
ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
|
||||||
if (PhaseIdealLoop::is_uncommon_trap_proj(unc_proj, Deoptimization::Reason_predicate))
|
if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
|
||||||
prev_dom = idom;
|
prev_dom = idom;
|
||||||
|
|
||||||
// Now walk the current IfNode's projections.
|
// Now walk the current IfNode's projections.
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user