This commit is contained in:
Lana Steuck 2013-04-11 19:13:13 -07:00
commit 78667dcfe9
171 changed files with 5351 additions and 1778 deletions

View File

@ -205,3 +205,4 @@ a1313a8d90d17d363a3b2a645dc4030ec204b168 jdk8-b79
e41d716405b209d3eddef8bd4240cec2bd34dcca jdk8-b81
5e8c55025644730385a6f8fa029ecdb2d2c98a07 jdk8-b82
bcebd3fdefc91abb9d7fa0c5af6211b3f8720da6 jdk8-b83
d7ad0dfaa41151bd3a9ae46725b0aec3730a9cd0 jdk8-b84

View File

@ -206,3 +206,4 @@ fd1a5574cf68af24bfd52decc37ac6361afb278a jdk8-b78
29153d0df68f84162ffe8c2cf4f402a3f2245e85 jdk8-b82
466685ba01bfb7bc1e1ac61490fd8c0f3cc18763 jdk8-b83
01f631f89fa392b4e484d0812c40ea8f9d2353aa jdk8-b84
7fc358f5943676b82f1dccd3152b1ac07d92e38b jdk8-b85

View File

@ -43,8 +43,8 @@ fi
custom_hook=$custom_script_dir/custom-hook.m4
AUTOCONF=$(which autoconf 2> /dev/null);
AUTOCONF_267=$(which autoconf-2.67 2> /dev/null);
AUTOCONF="`which autoconf 2> /dev/null | grep -v '^no autoconf in'`"
AUTOCONF_267="`which autoconf-2.67 2> /dev/null | grep -v '^no autoconf-2.67 in'`"
echo "Autoconf found: ${AUTOCONF}"
echo "Autoconf-2.67 found: ${AUTOCONF_267}"

View File

@ -602,6 +602,10 @@ AC_PATH_PROG(TIME, time)
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
BASIC_REQUIRE_PROG(COMM, comm)
fi
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
BASIC_REQUIRE_PROG(XATTR, xattr)
fi
])
# Check if build directory is on local disk. If not possible to determine,

View File

@ -29,9 +29,16 @@
include @SPEC@
# Check that the user did not try to specify a different java to use for compiling.
ifneq ($(firstword $(SJAVAC_SERVER_JAVA)),$(firstword $(JAVA)))
$(error Bootcycle builds are not possible if --with-sjavac-server-java is specified)
# On windows we need to account for fixpath being first word.
ifeq ($(firstword $(JAVA)),$(FIXPATH))
JAVA_EXEC_POS=2
else
JAVA_EXEC_POS=1
endif
ifneq ($(word $(JAVA_EXEC_POS),$(SJAVAC_SERVER_JAVA)),$(word $(JAVA_EXEC_POS),$(JAVA)))
$(error Bootcycle builds are not possible if --with-sjavac-server-java is specified)
endif
# Override specific values to do a boot cycle build
@ -39,5 +46,8 @@ endif
BUILD_OUTPUT:=@BUILD_OUTPUT@/bootcycle-build
# Use a different Boot JDK
OLD_BOOT_JDK:=$(BOOT_JDK)
BOOT_JDK:=@BUILD_OUTPUT@/images/j2sdk-image
BOOT_RTJAR:=@BUILD_OUTPUT@/images/j2sdk-image/jre/lib/rt.jar
SJAVAC_SERVER_JAVA:=$(subst $(OLD_BOOT_JDK),$(BOOT_JDK),$(SJAVAC_SERVER_JAVA))

View File

@ -49,7 +49,7 @@ JAVAP="@FIXPATH@ @BOOT_JDK@/bin/javap"
LDD="@LDD@"
MKDIR="@MKDIR@"
NAWK="@NAWK@"
NM="@NM@"
NM="@GNM@"
OBJDUMP="@OBJDUMP@"
OTOOL="@OTOOL@"
PRINTF="@PRINTF@"

File diff suppressed because it is too large Load Diff

View File

@ -75,6 +75,19 @@ ARCH=$(OPENJDK_TARGET_CPU_LEGACY)
# If yes then this expands to _LP64:=1
@LP64@
# Legacy settings for zero
ZERO_ENDIANNESS=$(OPENJDK_TARGET_CPU_ENDIAN)
ZERO_LIBARCH=$(OPENJDK_TARGET_CPU_LEGACY_LIB)
ZERO_ARCHDEF=@ZERO_ARCHDEF@
ZERO_ARCHFLAG=@ZERO_ARCHFLAG@
LIBFFI_CFLAGS=@LIBFFI_CFLAGS@
LIBFFI_LIBS=@LIBFFI_LIBS@
# Legacy settings for zeroshark
LLVM_CFLAGS=@LLVM_CFLAGS@
LLVM_LIBS=@LLVM_LIBS@
LLVM_LDFLAGS=@LLVM_LDFLAGS@
ALT_OUTPUTDIR=$(HOTSPOT_OUTPUTDIR)
ALT_EXPORT_PATH=$(HOTSPOT_DIST)

View File

@ -121,6 +121,15 @@ AC_SUBST(JVM_VARIANT_KERNEL)
AC_SUBST(JVM_VARIANT_ZERO)
AC_SUBST(JVM_VARIANT_ZEROSHARK)
INCLUDE_SA=true
if test "x$JVM_VARIANT_ZERO" = xtrue ; then
INCLUDE_SA=false
fi
if test "x$JVM_VARIANT_ZEROSHARK" = xtrue ; then
INCLUDE_SA=false
fi
AC_SUBST(INCLUDE_SA)
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
MACOSX_UNIVERSAL="true"
fi

View File

@ -687,7 +687,7 @@ if test "x$OPENJDK_TARGET_OS" = xlinux; then
AC_MSG_CHECKING([how to link with libstdc++])
# If dynamic was requested, it's available since it would fail above otherwise.
# If dynamic wasn't requested, go with static unless it isn't available.
if test "x$with_stdc__lib" = xdynamic || test "x$has_static_libstdcxx" = xno; then
if test "x$with_stdc__lib" = xdynamic || test "x$has_static_libstdcxx" = xno || test "x$JVM_VARIANT_ZEROSHARK" = xtrue; then
LIBCXX="$LIBCXX -lstdc++"
LDCXX="$CXX"
STATIC_CXX_SETTING="STATIC_CXX=false"
@ -701,6 +701,59 @@ if test "x$OPENJDK_TARGET_OS" = xlinux; then
fi
AC_SUBST(STATIC_CXX_SETTING)
if test "x$JVM_VARIANT_ZERO" = xtrue || test "x$JVM_VARIANT_ZEROSHARK" = xtrue; then
# Figure out LIBFFI_CFLAGS and LIBFFI_LIBS
PKG_CHECK_MODULES([LIBFFI], [libffi])
fi
if test "x$JVM_VARIANT_ZEROSHARK" = xtrue; then
AC_CHECK_PROG([LLVM_CONFIG], [llvm-config], [llvm-config])
if test "x$LLVM_CONFIG" != xllvm-config; then
AC_MSG_ERROR([llvm-config not found in $PATH.])
fi
llvm_components="jit mcjit engine nativecodegen native"
unset LLVM_CFLAGS
for flag in $("$LLVM_CONFIG" --cxxflags); do
if echo "${flag}" | grep -q '^-@<:@ID@:>@'; then
if test "${flag}" != "-D_DEBUG" ; then
if test "${LLVM_CFLAGS}" != "" ; then
LLVM_CFLAGS="${LLVM_CFLAGS} "
fi
LLVM_CFLAGS="${LLVM_CFLAGS}${flag}"
fi
fi
done
llvm_version=$("${LLVM_CONFIG}" --version | sed 's/\.//; s/svn.*//')
LLVM_CFLAGS="${LLVM_CFLAGS} -DSHARK_LLVM_VERSION=${llvm_version}"
unset LLVM_LDFLAGS
for flag in $("${LLVM_CONFIG}" --ldflags); do
if echo "${flag}" | grep -q '^-L'; then
if test "${LLVM_LDFLAGS}" != ""; then
LLVM_LDFLAGS="${LLVM_LDFLAGS} "
fi
LLVM_LDFLAGS="${LLVM_LDFLAGS}${flag}"
fi
done
unset LLVM_LIBS
for flag in $("${LLVM_CONFIG}" --libs ${llvm_components}); do
if echo "${flag}" | grep -q '^-l'; then
if test "${LLVM_LIBS}" != ""; then
LLVM_LIBS="${LLVM_LIBS} "
fi
LLVM_LIBS="${LLVM_LIBS}${flag}"
fi
done
AC_SUBST(LLVM_CFLAGS)
AC_SUBST(LLVM_LDFLAGS)
AC_SUBST(LLVM_LIBS)
fi
# libCrun is the c++ runtime-library with SunStudio (roughly the equivalent of gcc's libstdc++.so)
if test "x$OPENJDK_TARGET_OS" = xsolaris && test "x$LIBCXX" = x; then
LIBCXX="/usr/lib${OPENJDK_TARGET_CPU_ISADIR}/libCrun.so.1"

View File

@ -332,6 +332,29 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS],
DEFINE_CROSS_COMPILE_ARCH=""
fi
AC_SUBST(DEFINE_CROSS_COMPILE_ARCH)
# Some Zero and Shark settings.
# ZERO_ARCHFLAG tells the compiler which mode to build for
case "${OPENJDK_TARGET_CPU}" in
s390)
ZERO_ARCHFLAG="-m31"
;;
*)
ZERO_ARCHFLAG="-m${OPENJDK_TARGET_CPU_BITS}"
esac
AC_SUBST(ZERO_ARCHFLAG)
# ZERO_ARCHDEF is used to enable architecture-specific code
case "${OPENJDK_TARGET_CPU}" in
ppc*) ZERO_ARCHDEF=PPC ;;
s390*) ZERO_ARCHDEF=S390 ;;
sparc*) ZERO_ARCHDEF=SPARC ;;
x86_64*) ZERO_ARCHDEF=AMD64 ;;
x86) ZERO_ARCHDEF=IA32 ;;
*) ZERO_ARCHDEF=$(echo "${OPENJDK_TARGET_CPU_LEGACY_LIB}" | tr a-z A-Z)
esac
AC_SUBST(ZERO_ARCHDEF)
])
AC_DEFUN([PLATFORM_SET_RELEASE_FILE_OS_VALUES],

View File

@ -225,6 +225,7 @@ BUILD_VARIANT_RELEASE:=@BUILD_VARIANT_RELEASE@
# directory.
BUILD_OUTPUT:=@BUILD_OUTPUT@
# Colon left out to be able to override IMAGES_OUTPUTDIR for bootcycle-images
LANGTOOLS_OUTPUTDIR=$(BUILD_OUTPUT)/langtools
CORBA_OUTPUTDIR=$(BUILD_OUTPUT)/corba
JAXP_OUTPUTDIR=$(BUILD_OUTPUT)/jaxp
@ -376,6 +377,7 @@ AR:=@FIXPATH@ @AR@
ARFLAGS:=@ARFLAGS@
NM:=@NM@
GNM:=@GNM@
STRIP:=@STRIP@
MCS:=@MCS@
@ -522,6 +524,7 @@ FILE:=@FILE@
HG:=@HG@
OBJCOPY:=@OBJCOPY@
SETFILE:=@SETFILE@
XATTR:=@XATTR@
FIXPATH:=@FIXPATH@
@ -634,6 +637,8 @@ INSTALL_SYSCONFDIR=@sysconfdir@
# Name of Service Agent library
SALIB_NAME=@SALIB_NAME@
INCLUDE_SA=@INCLUDE_SA@
OS_VERSION_MAJOR:=@OS_VERSION_MAJOR@
OS_VERSION_MINOR:=@OS_VERSION_MINOR@
OS_VERSION_MICRO:=@OS_VERSION_MICRO@
@ -643,16 +648,17 @@ JDK_IMAGE_SUBDIR:=j2sdk-image
JRE_IMAGE_SUBDIR:=j2re-image
JDK_OVERLAY_IMAGE_SUBDIR:=j2sdk-overlay-image
JRE_OVERLAY_IMAGE_SUBDIR:=j2re-overlay-image
JDK_IMAGE_DIR:=$(IMAGES_OUTPUTDIR)/$(JDK_IMAGE_SUBDIR)
JRE_IMAGE_DIR:=$(IMAGES_OUTPUTDIR)/$(JRE_IMAGE_SUBDIR)
JDK_OVERLAY_IMAGE_DIR:=$(IMAGES_OUTPUTDIR)/$(JDK_OVERLAY_IMAGE_SUBDIR)
JRE_OVERLAY_IMAGE_DIR:=$(IMAGES_OUTPUTDIR)/$(JRE_OVERLAY_IMAGE_SUBDIR)
# Colon left out to be able to override output dir for bootcycle-images
JDK_IMAGE_DIR=$(IMAGES_OUTPUTDIR)/$(JDK_IMAGE_SUBDIR)
JRE_IMAGE_DIR=$(IMAGES_OUTPUTDIR)/$(JRE_IMAGE_SUBDIR)
JDK_OVERLAY_IMAGE_DIR=$(IMAGES_OUTPUTDIR)/$(JDK_OVERLAY_IMAGE_SUBDIR)
JRE_OVERLAY_IMAGE_DIR=$(IMAGES_OUTPUTDIR)/$(JRE_OVERLAY_IMAGE_SUBDIR)
# Macosx bundles directory definitions
JDK_BUNDLE_SUBDIR:=j2sdk-bundle/jdk$(JDK_VERSION).jdk/Contents
JRE_BUNDLE_SUBDIR:=j2re-bundle/jre$(JDK_VERSION).jre/Contents
JDK_BUNDLE_DIR:=$(IMAGES_OUTPUTDIR)/$(JDK_BUNDLE_SUBDIR)
JRE_BUNDLE_DIR:=$(IMAGES_OUTPUTDIR)/$(JRE_BUNDLE_SUBDIR)
JDK_BUNDLE_SUBDIR=j2sdk-bundle/jdk$(JDK_VERSION).jdk/Contents
JRE_BUNDLE_SUBDIR=j2re-bundle/jre$(JDK_VERSION).jre/Contents
JDK_BUNDLE_DIR=$(IMAGES_OUTPUTDIR)/$(JDK_BUNDLE_SUBDIR)
JRE_BUNDLE_DIR=$(IMAGES_OUTPUTDIR)/$(JRE_BUNDLE_SUBDIR)
# Include the custom-spec.gmk file if it exists
-include $(dir @SPEC@)/custom-spec.gmk

View File

@ -441,8 +441,10 @@ fi
AC_SUBST(AS)
if test "x$OPENJDK_TARGET_OS" = xsolaris; then
AC_PATH_PROGS(NM, [gnm nm])
AC_PATH_PROG(NM, nm)
BASIC_FIXUP_EXECUTABLE(NM)
AC_PATH_PROG(GNM, gnm)
BASIC_FIXUP_EXECUTABLE(GNM)
AC_PATH_PROG(STRIP, strip)
BASIC_FIXUP_EXECUTABLE(STRIP)
AC_PATH_PROG(MCS, mcs)
@ -450,6 +452,8 @@ if test "x$OPENJDK_TARGET_OS" = xsolaris; then
elif test "x$OPENJDK_TARGET_OS" != xwindows; then
AC_CHECK_TOOL(NM, nm)
BASIC_FIXUP_EXECUTABLE(NM)
GNM="$NM"
AC_SUBST(GNM)
AC_CHECK_TOOL(STRIP, strip)
BASIC_FIXUP_EXECUTABLE(STRIP)
fi

View File

@ -70,7 +70,7 @@ define add_idl_package
$(PREFIXES) \
$4
$(RM) -f $$(addprefix $3/$$($4_TMPDIR)/,$6)
$(CP) -rp $3/$$($4_TMPDIR)/* $3
$(CP) -r $3/$$($4_TMPDIR)/* $3
($(CD) $3/$$($4_TMPDIR) && $(FIND) . -type f | $(SED) 's!\./!$3/!g' | $(NAWK) '{ print $$$$1 ": $4" }' > $5)
$(RM) -rf $3/$$($4_TMPDIR)
endef

View File

@ -86,7 +86,7 @@ define SetupArchive
# NOTE: $2 is dependencies, not a named argument!
$(foreach i,3 4 5 6 7 8 9 10 11 12 13 14 15, $(if $($i),$1_$(strip $($i)))$(NEWLINE))
$(call LogSetupMacroEntry,SetupArchive($1),<dependencies>,$3,$4,$5,$6,$7,$8,$9,$(10),$(11),$(12),$(13),$(14),$(15))
$(if $(findstring $(LOG),debug trace), $(info *[2] <dependencies> = $(strip $2)))
$(if $(findstring $(LOG_LEVEL),debug trace), $(info *[2] <dependencies> = $(strip $2)))
$(if $(16),$(error Internal makefile error: Too many arguments to SetupArchive, please update JavaCompilation.gmk))
$1_JARMAIN:=$(strip $$($1_JARMAIN))
@ -505,7 +505,7 @@ define SetupJavaCompilation
--permit-unidentified-artifacts \
--permit-sources-without-package \
--compare-found-sources $$($1_BIN)/_the.batch.tmp \
--log=$(LOG) \
--log=$(LOG_LEVEL) \
$$($1_SJAVAC_ARGS) \
$$($1_FLAGS) \
$$($1_HEADERS_ARG) \

View File

@ -64,6 +64,10 @@ HOTSPOT_AVAILABLE := $(if $(wildcard $(root_dir)/hotspot),true,false)
# Build with the configure bridge. After running configure, restart make
# to parse the new spec file.
BRIDGE_TARGETS := all
# Add bootcycle-images target if legacy variable is set.
ifeq ($(SKIP_BOOT_CYCLE),false)
BRIDGE_TARGETS += bootcycle-images
endif
bridgeBuild: bridge2configure
@cd $(root_dir) && $(MAKE) -f NewMakefile.gmk $(BRIDGE_TARGETS)
@ -99,6 +103,9 @@ endif
ifdef ALT_FREETYPE_HEADERS_PATH
@$(ECHO) " --with-freetype=$(call UnixPath,$(ALT_FREETYPE_HEADERS_PATH)/..) " >> $@.tmp
endif
ifdef ENABLE_SJAVAC
@$(ECHO) " --enable-sjavac" >> $@.tmp
endif
ifeq ($(HOTSPOT_AVAILABLE),false)
ifdef ALT_JDK_IMPORT_PATH
@$(ECHO) " --with-import-hotspot=$(call UnixPath,$(ALT_JDK_IMPORT_PATH)) " >> $@.tmp

View File

@ -175,9 +175,8 @@ sign-jars-only: start-make
@($(CD) $(JDK_TOPDIR)/makefiles && $(BUILD_LOG_WRAPPER) $(MAKE) $(MAKE_ARGS) -f BuildJdk.gmk sign-jars)
@$(call TargetExit)
bootcycle-images:
@$(ECHO) Boot cycle build step 1: Building the JDK image normally
@($(CD) $(SRC_ROOT)/common/makefiles && $(BUILD_LOG_WRAPPER) $(MAKE) SPEC=$(SPEC) images)
bootcycle-images: images bootcycle-images-only
bootcycle-images-only: start-make
@$(ECHO) Boot cycle build step 2: Building a new JDK image using previously built image
@($(CD) $(SRC_ROOT)/common/makefiles && $(BUILD_LOG_WRAPPER) $(MAKE) SPEC=$(dir $(SPEC))bootcycle-spec.gmk images)

View File

@ -328,7 +328,7 @@ $(ECHO) $1/$(HGTIP_FILENAME)
endef
define SetupLogging
ifeq ($$(LOG), trace)
ifeq ($$(LOG_LEVEL),trace)
# Shell redefinition trick inspired by http://www.cmcrossroads.com/ask-mr-make/6535-tracing-rule-execution-in-gnu-make
# For each target executed, will print
# Building <TARGET> (from <FIRST PREREQUISITE>) (<ALL NEWER PREREQUISITES> newer)
@ -339,17 +339,17 @@ define SetupLogging
endif
# Never remove warning messages; this is just for completeness
LOG_WARN=
ifneq ($$(findstring $$(LOG),info debug trace),)
ifneq ($$(findstring $$(LOG_LEVEL),info debug trace),)
LOG_INFO=
else
LOG_INFO=> /dev/null
endif
ifneq ($$(findstring $$(LOG),debug trace),)
ifneq ($$(findstring $$(LOG_LEVEL),debug trace),)
LOG_DEBUG=
else
LOG_DEBUG=> /dev/null
endif
ifneq ($$(findstring $$(LOG),trace),)
ifneq ($$(findstring $$(LOG_LEVEL),trace),)
LOG_TRACE=
else
LOG_TRACE=> /dev/null
@ -362,7 +362,7 @@ $(eval $(call SetupLogging))
# This is to be called by all SetupFoo macros
define LogSetupMacroEntry
$(if $(26),$(error Internal makefile error: Too many arguments to LogSetupMacroEntry, please update MakeBase.gmk))
$(if $(findstring $(LOG),debug trace), $(info $1 $(foreach i,2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25,$(if $($i),$(NEWLINE) $(strip [$i] $($i))))))
$(if $(findstring $(LOG_LEVEL),debug trace), $(info $1 $(foreach i,2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25,$(if $($i),$(NEWLINE) $(strip [$i] $($i))))))
endef
# Make directory without forking mkdir if not needed
@ -374,15 +374,24 @@ endef
ifeq ($(OPENJDK_TARGET_OS),solaris)
# On Solaris, if the target is a symlink and exists, cp won't overwrite.
# Cp has to operate in recursive mode to allow for -P flag, to preserve soft links. If the
# name of the target file differs from the source file, rename after copy.
define install-file
$(MKDIR) -p $(@D)
$(RM) '$@'
$(CP) -f -r -P '$<' '$(@D)'
if [ "$(@F)" != "$(<F)" ]; then $(MV) '$(@D)/$(<F)' '$@'; fi
endef
else ifeq ($(OPENJDK_TARGET_OS),macosx)
# On mac, extended attributes sometimes creep into the source files, which may later
# cause the creation of ._* files which confuses testing. Clear these with xattr if
# set. Some files get their write permissions removed after being copied to the
# output dir. When these are copied again to images, xattr would fail. By only clearing
# attributes when they are present, failing on this is avoided.
define install-file
$(MKDIR) -p $(@D)
$(CP) -fpRP '$<' '$@'
$(CP) -fRP '$<' '$@'
if [ -n "`$(XATTR) -l '$@'`" ]; then $(XATTR) -c '$@'; fi
endef
else
define install-file

View File

@ -184,26 +184,34 @@ define ParseLogLevel
LOG_STRIPPED2=$$(subst nofile,,$$(LOG_STRIPPED1))
# We might have ended up with a leading comma. Remove it
LOG_STRIPPED3=$$(strip $$(patsubst $$(COMMA)%,%,$$(LOG_STRIPPED2)))
override LOG:=$$(LOG_STRIPPED3)
LOG_LEVEL:=$$(LOG_STRIPPED3)
else
LOG_LEVEL:=$$(LOG)
endif
ifeq ($$(LOG),)
ifeq ($$(LOG_LEVEL),)
# Set LOG to "warn" as default if not set (and no VERBOSE given)
override LOG=warn
override LOG_LEVEL=warn
endif
ifeq ($$(LOG),warn)
ifeq ($$(LOG_LEVEL),warn)
VERBOSE=-s
else ifeq ($$(LOG),info)
else ifeq ($$(LOG_LEVEL),info)
VERBOSE=-s
else ifeq ($$(LOG),debug)
else ifeq ($$(LOG_LEVEL),debug)
VERBOSE=
else ifeq ($$(LOG),trace)
else ifeq ($$(LOG_LEVEL),trace)
VERBOSE=
else
$$(info Error: LOG must be one of: warn, info, debug or trace.)
$$(eval $$(call FatalError))
endif
else
# Provide resonable interpretations of LOG_LEVEL if VERBOSE is given.
ifeq ($(VERBOSE),)
LOG_LEVEL:=debug
else
LOG_LEVEL:=warn
endif
ifneq ($$(LOG),)
# We have both a VERBOSE and a LOG argument. This is OK only if this is a repeated call by ourselves,
# but complain if this is the top-level make call.

View File

@ -205,3 +205,4 @@ e41fb1aa0329767b2737303c994e38bede1baa07 jdk8-b79
2a00aeeb466b9dee22508f6261f63b70f9c696fe jdk8-b81
48e1bc77004d9af575b733c04637b98fd17603c2 jdk8-b82
a45bb25a67c7517b45f00c9682e317f46fecbba9 jdk8-b83
928f8b888deb785cbd7bbd5f951cd6880f11f14e jdk8-b84

View File

@ -328,3 +328,5 @@ dd6350b4abc4a6c19c89dd982cc0e4f3d119885c hs25-b22
e3a41fc0234895eba4f272b984f7dacff495f8eb hs25-b24
1c8db54ee9f315e20d6d5d9bf0b5c10349e9d301 jdk8-b83
8d0f263a370c5f3e61791bb06054560804117288 hs25-b25
af788b85010ebabbc1e8f52c6766e08c7a95cf99 jdk8-b84
a947f40fb536e5b9e0aa210cf26abb430f80887a hs25-b26

View File

@ -572,9 +572,14 @@ public class WindbgDebuggerLocal extends DebuggerBase implements WindbgDebugger
DTFWHome = sysRoot + File.separator + ".." + File.separator +
"Program Files" + File.separator + "Debugging Tools For Windows";
searchList.add(DTFWHome);
searchList.add(DTFWHome + " (x86)");
searchList.add(DTFWHome + " (x64)");
// Only add the search path for the current CPU architecture:
String cpu = PlatformInfo.getCPU();
if (cpu.equals("x86")) {
searchList.add(DTFWHome + " (x86)");
} else if (cpu.equals("amd64")) {
searchList.add(DTFWHome + " (x64)");
}
// The last place to search is the system directory:
searchList.add(sysRoot + File.separator + "system32");
}

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -190,6 +190,17 @@ $(SIMPLE_DIRS):
# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
# This bit is needed to enable local rebuilds.
# Unless the makefile itself sets LP64, any environmental
# setting of LP64 will interfere with the build.
LP64_SETTING/32 = LP64 = \#empty
LP64_SETTING/64 = LP64 = 1
DATA_MODE/i486 = 32
DATA_MODE/amd64 = 64
DATA_MODE = $(DATA_MODE/$(BUILDARCH))
flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
@echo Creating $@ ...
$(QUIETLY) ( \
@ -212,6 +223,7 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \
echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \
echo "OPENJDK = $(OPENJDK)"; \
echo "$(LP64_SETTING/$(DATA_MODE))"; \
echo; \
echo "# Used for platform dispatching"; \
echo "TARGET_DEFINES = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \

View File

@ -1,6 +1,6 @@
#! /bin/sh
#
# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -28,44 +28,38 @@
set -u
if [ $# != 2 ]; then
echo "Usage : $0 Build_Options Location"
echo "Build Options : debug or optimized or basicdebug or basic or clean"
echo "Location : specify any workspace which has gamma sources"
if [ $# -lt 1 ]; then
echo "Usage : $0 BuildTarget [LP64=1] [BuildOptions]"
echo " Server VM | Client VM"
echo "BuildTarget : debug | debug1"
echo " fastdebug | fastdebug1"
echo " jvmg | jvmg1"
echo " optimized | optimized1"
echo " profiled | profiled1"
echo " product | product1"
exit 1
fi
if [ "${JAVA_HOME-}" = "" -o ! -d "${JAVA_HOME-}" -o ! -d ${JAVA_HOME-}/jre/lib/ ]; then
echo "JAVA_HOME needs to be set to a valid JDK path"
echo "JAVA_HOME: ${JAVA_HOME-}"
exit 1
fi
# Just in case:
case ${JAVA_HOME} in
/*) true;;
?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
esac
JAVA_HOME=`( cd $JAVA_HOME; pwd )`
case `uname -m` in
i386|i486|i586|i686)
mach=i386
;;
*)
echo "Unsupported machine: " `uname -m`
exit 1
;;
esac
if [ "${JAVA_HOME}" = "" -o ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/${mach} ]; then
echo "JAVA_HOME needs to be set to a valid JDK path"
echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/bsd"
echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/bsd"
exit 1
if [ "${ALT_BOOTDIR-}" = "" -o ! -d "${ALT_BOOTDIR-}" -o ! -d ${ALT_BOOTDIR-}/jre/lib/ ]; then
ALT_BOOTDIR=${JAVA_HOME}
fi
# build in current directory by default
if [ "${ALT_OUTPUTDIR-}" = "" -o ! -d "${ALT_OUTPUTDIR-}" ]; then
ALT_OUTPUTDIR=`(pwd)`
fi
LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\
${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.}
# This is necessary as long as we are using the old launcher
# with the new distribution format:
CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
HOTSPOT_SRC=`(dirname $0)`/..
HOTSPOT_SRC=`(cd ${HOTSPOT_SRC}; pwd)`
for gm in gmake gnumake
do
@ -74,22 +68,25 @@ do
done
: ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'}
# quiet build by default
Quiet="MAKE_VERBOSE="
# no debug info by default
NoDebugInfo="ENABLE_FULL_DEBUG_SYMBOLS="
LANG=C
echo "### ENVIRONMENT SETTINGS:"
export HOTSPOT_SRC ; echo "HOTSPOT_SRC=$HOTSPOT_SRC"
export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME"
export LD_LIBRARY_PATH ; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
export CLASSPATH ; echo "CLASSPATH=$CLASSPATH"
export ALT_BOOTDIR ; echo "ALT_BOOTDIR=$ALT_BOOTDIR"
export ALT_OUTPUTDIR ; echo "ALT_OUTPUTDIR=$ALT_OUTPUTDIR"
export GNUMAKE ; echo "GNUMAKE=$GNUMAKE"
export LANG ; echo "LANG=$LANG"
echo "###"
Build_Options=$1
Location=$2
case ${Location} in
/*) true;;
?*) Location=`(cd ${Location}; pwd)`;;
esac
BuildOptions="$Quiet $NoDebugInfo $*"
echo \
${GNUMAKE} -f ${Location}/make/bsd/Makefile $Build_Options GAMMADIR=${Location}
${GNUMAKE} -f ${Location}/make/bsd/Makefile $Build_Options GAMMADIR=${Location}
${GNUMAKE} -f ${HOTSPOT_SRC}/make/Makefile $BuildOptions GAMMADIR=${HOTSPOT_SRC}
${GNUMAKE} -f ${HOTSPOT_SRC}/make/Makefile $BuildOptions GAMMADIR=${HOTSPOT_SRC}

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=25
HS_BUILD_NUMBER=26
JDK_MAJOR_VER=1
JDK_MINOR_VER=8

View File

@ -1,98 +0,0 @@
#! /bin/sh
#
# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Make sure the variable JAVA_HOME is set before running this script.
set -u
if [ $# != 2 ]; then
echo "Usage : $0 Build_Options Location"
echo "Build Options : debug or optimized or basicdebug or basic or clean"
echo "Location : specify any workspace which has gamma sources"
exit 1
fi
# Just in case:
case ${JAVA_HOME} in
/*) true;;
?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
esac
case `uname -m` in
i386|i486|i586|i686)
mach=i386
;;
x86_64)
mach=amd64
;;
*)
echo "Unsupported machine: " `uname -m`
exit 1
;;
esac
if [ "${JAVA_HOME}" = "" -o ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/${mach} ]; then
echo "JAVA_HOME needs to be set to a valid JDK path"
echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/linux"
echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/linux"
exit 1
fi
LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\
${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.}
# This is necessary as long as we are using the old launcher
# with the new distribution format:
CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
for gm in gmake gnumake
do
if [ "${GNUMAKE-}" != "" ]; then break; fi
($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm
done
: ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'}
echo "### ENVIRONMENT SETTINGS:"
export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME"
export LD_LIBRARY_PATH ; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
export CLASSPATH ; echo "CLASSPATH=$CLASSPATH"
export GNUMAKE ; echo "GNUMAKE=$GNUMAKE"
echo "###"
Build_Options=$1
Location=$2
case ${Location} in
/*) true;;
?*) Location=`(cd ${Location}; pwd)`;;
esac
echo \
${GNUMAKE} -f ${Location}/make/linux/Makefile $Build_Options GAMMADIR=${Location}
${GNUMAKE} -f ${Location}/make/linux/Makefile $Build_Options GAMMADIR=${Location}

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -183,6 +183,19 @@ $(SIMPLE_DIRS):
# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
# This bit is needed to enable local rebuilds.
# Unless the makefile itself sets LP64, any environmental
# setting of LP64 will interfere with the build.
LP64_SETTING/32 = LP64 = \#empty
LP64_SETTING/64 = LP64 = 1
DATA_MODE/i486 = 32
DATA_MODE/sparc = 32
DATA_MODE/sparcv9 = 64
DATA_MODE/amd64 = 64
DATA_MODE = $(DATA_MODE/$(BUILDARCH))
flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
@echo Creating $@ ...
$(QUIETLY) ( \
@ -205,6 +218,7 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \
echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \
echo "OPENJDK = $(OPENJDK)"; \
echo "$(LP64_SETTING/$(DATA_MODE))"; \
echo; \
echo "# Used for platform dispatching"; \
echo "TARGET_DEFINES = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \

View File

@ -1,127 +0,0 @@
#! /bin/sh
#
# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# Make sure the variable JAVA_HOME is set before running this script.
set -u
usage() {
(
echo "Usage : $0 [-sb | -sbfast] config ws_path"
echo ""
echo "Where:"
echo " -sb ::= enable source browser info generation for"
echo " all configs during compilation"
echo ""
echo " -sbfast ::= enable source browser info generation for"
echo " all configs without compilation"
echo ""
echo " config ::= debug | debug1 | debugcore"
echo " fastdebug | fastdebug1 | fastdebugcore"
echo " jvmg | jvmg1 | jvmgcore"
echo " optimized | optimized1 | optimizedcore"
echo " profiled | profiled1 | profiledcore"
echo " product | product1 | productcore"
echo ""
echo " ws_path ::= path to HotSpot workspace"
) >&2
exit 1
}
# extract possible options
options=""
if [ $# -gt 2 ]; then
case "$1" in
-sb)
options="CFLAGS_BROWSE=-xsb"
shift
;;
-sbfast)
options="CFLAGS_BROWSE=-xsbfast"
shift
;;
*)
echo "Unknown option: '$1'" >&2
usage
;;
esac
fi
# should be just two args left at this point
if [ $# != 2 ]; then
usage
fi
# Just in case:
case ${JAVA_HOME} in
/*) true;;
?*) JAVA_HOME=`( cd $JAVA_HOME; pwd )`;;
esac
if [ "${JAVA_HOME}" = "" -o ! -d "${JAVA_HOME}" -o ! -d ${JAVA_HOME}/jre/lib/`uname -p` ]; then
echo "JAVA_HOME needs to be set to a valid JDK path"
echo "ksh : export JAVA_HOME=/net/tetrasparc/export/gobi/JDK1.2_fcs_V/solaris"
echo "csh : setenv JAVA_HOME /net/tetrasparc/export/gobi/JDK1.2_fcs_V/solaris"
exit 1
fi
LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/`uname -p`:\
${JAVA_HOME}/jre/lib/`uname -p`/native_threads:${LD_LIBRARY_PATH-.}
# This is necessary as long as we are using the old launcher
# with the new distribution format:
CLASSPATH=${JAVA_HOME}/jre/lib/rt.jar:${CLASSPATH-.}
for gm in gmake gnumake
do
if [ "${GNUMAKE-}" != "" ]; then break; fi
($gm --version >/dev/null) 2>/dev/null && GNUMAKE=$gm
done
: ${GNUMAKE:?'Cannot locate the gnumake program. Stop.'}
echo "### ENVIRONMENT SETTINGS:"
export JAVA_HOME ; echo "JAVA_HOME=$JAVA_HOME"
export LD_LIBRARY_PATH ; echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
export CLASSPATH ; echo "CLASSPATH=$CLASSPATH"
export GNUMAKE ; echo "GNUMAKE=$GNUMAKE"
echo "###"
config=$1
ws_path=$2
case ${ws_path} in
/*) true;;
?*) ws_path=`(cd ${ws_path}; pwd)`;;
esac
echo \
${GNUMAKE} -f ${ws_path}/make/solaris/Makefile \
$config GAMMADIR=${ws_path} $options
${GNUMAKE} -f ${ws_path}/make/solaris/Makefile \
$config GAMMADIR=${ws_path} $options

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -176,6 +176,19 @@ $(SIMPLE_DIRS):
# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
# This bit is needed to enable local rebuilds.
# Unless the makefile itself sets LP64, any environmental
# setting of LP64 will interfere with the build.
LP64_SETTING/32 = LP64 = \#empty
LP64_SETTING/64 = LP64 = 1
DATA_MODE/i486 = 32
DATA_MODE/sparc = 32
DATA_MODE/sparcv9 = 64
DATA_MODE/amd64 = 64
DATA_MODE = $(DATA_MODE/$(BUILDARCH))
flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
@echo Creating $@ ...
$(QUIETLY) ( \

View File

@ -51,6 +51,16 @@ RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_info->deoptimize_on_exception()) {
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ call(a, relocInfo::runtime_call_type);
__ delayed()->nop();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
return;
}
if (_index->is_register()) {
__ mov(_index->as_register(), G4);
} else {
@ -64,11 +74,22 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ delayed()->nop();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
#ifdef ASSERT
__ should_not_reach_here();
#endif
debug_only(__ should_not_reach_here());
}
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
_info = new CodeEmitInfo(info);
}
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ call(a, relocInfo::runtime_call_type);
__ delayed()->nop();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
@ -99,10 +120,17 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
address a;
if (_info->deoptimize_on_exception()) {
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
} else {
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
}
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry);
__ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id),
relocInfo::runtime_call_type);
__ call(a, relocInfo::runtime_call_type);
__ delayed()->nop();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);

View File

@ -3361,6 +3361,45 @@ void LIR_Assembler::get_thread(LIR_Opr result_reg) {
__ mov(G2_thread, result_reg->as_register());
}
#ifdef ASSERT
// emit run-time assertion
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
assert(op->code() == lir_assert, "must be");
if (op->in_opr1()->is_valid()) {
assert(op->in_opr2()->is_valid(), "both operands must be valid");
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
} else {
assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
assert(op->condition() == lir_cond_always, "no other conditions allowed");
}
Label ok;
if (op->condition() != lir_cond_always) {
Assembler::Condition acond;
switch (op->condition()) {
case lir_cond_equal: acond = Assembler::equal; break;
case lir_cond_notEqual: acond = Assembler::notEqual; break;
case lir_cond_less: acond = Assembler::less; break;
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
case lir_cond_greater: acond = Assembler::greater; break;
case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
default: ShouldNotReachHere();
};
__ br(acond, false, Assembler::pt, ok);
__ delayed()->nop();
}
if (op->halt()) {
const char* str = __ code_string(op->msg());
__ stop(str);
} else {
breakpoint();
}
__ bind(ok);
}
#endif
void LIR_Assembler::peephole(LIR_List* lir) {
LIR_OpList* inst = lir->instructions_list();

View File

@ -324,7 +324,7 @@ void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp)
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
bool needs_range_check = true;
bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
@ -339,12 +339,9 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
array.load_item();
index.load_nonconstant();
if (use_length) {
needs_range_check = x->compute_needs_range_check();
if (needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (needs_store_check) {
value.load_item();

View File

@ -987,6 +987,25 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break;
#endif // INCLUDE_ALL_GCS
case predicate_failed_trap_id:
{
__ set_info("predicate_failed_trap", dont_gc_arguments);
OopMap* oop_map = save_live_registers(sasm);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, oop_map);
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
restore_live_registers(sasm);
__ restore();
__ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
__ delayed()->nop();
}
break;
default:
{ __ set_info("unimplemented entry", dont_gc_arguments);
__ save_frame(0);

View File

@ -101,6 +101,15 @@ RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_info->deoptimize_on_exception()) {
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
return;
}
// pass the array index on stack because all registers must be preserved
if (_index->is_cpu_register()) {
ce->store_parameter(_index->as_register(), 0);
@ -115,9 +124,22 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
}
__ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
_info = new CodeEmitInfo(info);
}
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
if (_offset != -1) {
@ -414,10 +436,19 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
address a;
if (_info->deoptimize_on_exception()) {
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
} else {
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
}
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id)));
__ call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}

View File

@ -3755,6 +3755,44 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
}
}
#ifdef ASSERT
// emit run-time assertion
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
assert(op->code() == lir_assert, "must be");
if (op->in_opr1()->is_valid()) {
assert(op->in_opr2()->is_valid(), "both operands must be valid");
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
} else {
assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
assert(op->condition() == lir_cond_always, "no other conditions allowed");
}
Label ok;
if (op->condition() != lir_cond_always) {
Assembler::Condition acond = Assembler::zero;
switch (op->condition()) {
case lir_cond_equal: acond = Assembler::equal; break;
case lir_cond_notEqual: acond = Assembler::notEqual; break;
case lir_cond_less: acond = Assembler::less; break;
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
case lir_cond_greater: acond = Assembler::greater; break;
case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
default: ShouldNotReachHere();
}
__ jcc(acond, ok);
}
if (op->halt()) {
const char* str = __ code_string(op->msg());
__ stop(str);
} else {
breakpoint();
}
__ bind(ok);
}
#endif
void LIR_Assembler::membar() {
// QQQ sparc TSO uses this,

View File

@ -263,7 +263,7 @@ void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp)
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
bool needs_range_check = true;
bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
@ -278,12 +278,10 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
array.load_item();
index.load_nonconstant();
if (use_length) {
needs_range_check = x->compute_needs_range_check();
if (needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (needs_store_check) {
value.load_item();

View File

@ -675,7 +675,8 @@ void FpuStackAllocator::handle_op2(LIR_Op2* op2) {
switch (op2->code()) {
case lir_cmp:
case lir_cmp_fd2i:
case lir_ucmp_fd2i: {
case lir_ucmp_fd2i:
case lir_assert: {
assert(left->is_fpu_register(), "invalid LIR");
assert(right->is_fpu_register(), "invalid LIR");

View File

@ -1807,6 +1807,24 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
break;
#endif // INCLUDE_ALL_GCS
case predicate_failed_trap_id:
{
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
OopMap* map = save_live_registers(sasm, 1);
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, map);
restore_live_registers(sasm);
__ leave();
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
__ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
}
break;
default:
{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
__ movptr(rax, (int)id);

View File

@ -1299,25 +1299,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ push(rdx);
#endif // _LP64
// Either restore the MXCSR register after returning from the JNI Call
// or verify that it wasn't changed.
if (VM_Version::supports_sse()) {
if (RestoreMXCSROnJNICalls) {
__ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
}
else if (CheckJNICalls ) {
__ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
}
#ifndef _LP64
// Either restore the x87 floating pointer control word after returning
// from the JNI call or verify that it wasn't changed.
if (CheckJNICalls) {
__ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
}
#endif // _LP64
// Verify or restore cpu control state after JNI call
__ restore_cpu_control_state_after_jni();
// change thread state
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);

View File

@ -4765,6 +4765,31 @@ void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
pop_CPU_state();
}
void MacroAssembler::restore_cpu_control_state_after_jni() {
// Either restore the MXCSR register after returning from the JNI Call
// or verify that it wasn't changed (with -Xcheck:jni flag).
if (VM_Version::supports_sse()) {
if (RestoreMXCSROnJNICalls) {
ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
} else if (CheckJNICalls) {
call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
}
if (VM_Version::supports_avx()) {
// Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
vzeroupper();
}
#ifndef _LP64
// Either restore the x87 floating pointer control word after returning
// from the JNI call or verify that it wasn't changed.
if (CheckJNICalls) {
call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
}
#endif // _LP64
}
void MacroAssembler::load_klass(Register dst, Register src) {
#ifdef _LP64
if (UseCompressedKlassPointers) {
@ -5759,6 +5784,8 @@ void MacroAssembler::string_compare(Register str1, Register str2,
addptr(result, stride2);
subl(cnt2, stride2);
jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
// clean upper bits of YMM registers
vzeroupper();
// compare wide vectors tail
bind(COMPARE_WIDE_TAIL);
@ -5772,6 +5799,8 @@ void MacroAssembler::string_compare(Register str1, Register str2,
// Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
bind(VECTOR_NOT_EQUAL);
// clean upper bits of YMM registers
vzeroupper();
lea(str1, Address(str1, result, scale));
lea(str2, Address(str2, result, scale));
jmp(COMPARE_16_CHARS);
@ -6028,6 +6057,10 @@ void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Regist
// That's it
bind(DONE);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
vzeroupper();
}
}
void MacroAssembler::generate_fill(BasicType t, bool aligned,
@ -6157,6 +6190,10 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
vmovdqu(Address(to, 0), xtmp);
addptr(to, 32);
subl(count, 8 << shift);
BIND(L_check_fill_8_bytes);
// clean upper bits of YMM registers
vzeroupper();
} else {
// Fill 32-byte chunks
pshufd(xtmp, xtmp, 0);
@ -6180,8 +6217,9 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
addptr(to, 32);
subl(count, 8 << shift);
jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
BIND(L_check_fill_8_bytes);
}
BIND(L_check_fill_8_bytes);
addl(count, 8 << shift);
jccb(Assembler::zero, L_exit);
jmpb(L_fill_8_bytes);
@ -6316,6 +6354,10 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
jccb(Assembler::lessEqual, L_copy_16_chars);
bind(L_copy_16_chars_exit);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
vzeroupper();
}
subptr(len, 8);
jccb(Assembler::greater, L_copy_8_chars_exit);

View File

@ -582,6 +582,9 @@ class MacroAssembler: public Assembler {
// only if +VerifyFPU
void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
// Verify or restore cpu control state after JNI call
void restore_cpu_control_state_after_jni();
// prints msg, dumps registers and stops execution
void stop(const char* msg);

View File

@ -2065,6 +2065,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ call(RuntimeAddress(native_func));
// Verify or restore cpu control state after JNI call
__ restore_cpu_control_state_after_jni();
// WARNING - on Windows Java Natives use pascal calling convention and pop the
// arguments off of the stack. We could just re-adjust the stack pointer here
// and continue to do SP relative addressing but we instead switch to FP

View File

@ -2315,16 +2315,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ call(RuntimeAddress(native_func));
// Either restore the MXCSR register after returning from the JNI Call
// or verify that it wasn't changed.
if (RestoreMXCSROnJNICalls) {
__ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
}
else if (CheckJNICalls ) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
}
// Verify or restore cpu control state after JNI call
__ restore_cpu_control_state_after_jni();
// Unpack native results.
switch (ret_type) {

View File

@ -835,6 +835,11 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_64_bytes);
__ subl(qword_count, 8);
__ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
if (UseUnalignedLoadStores && (UseAVX >= 2)) {
// clean upper bits of YMM registers
__ vzeroupper();
}
__ addl(qword_count, 8);
__ jccb(Assembler::zero, L_exit);
//

View File

@ -1331,6 +1331,10 @@ class StubGenerator: public StubCodeGenerator {
}
__ addptr(qword_count, 4);
__ BIND(L_end);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
__ vzeroupper();
}
} else {
// Copy 32-bytes per iteration
__ BIND(L_loop);
@ -1404,6 +1408,10 @@ class StubGenerator: public StubCodeGenerator {
}
__ subptr(qword_count, 4);
__ BIND(L_end);
if (UseAVX >= 2) {
// clean upper bits of YMM registers
__ vzeroupper();
}
} else {
// Copy 32-bytes per iteration
__ BIND(L_loop);

View File

@ -1080,22 +1080,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// result potentially in rdx:rax or ST0
// Either restore the MXCSR register after returning from the JNI Call
// or verify that it wasn't changed.
if (VM_Version::supports_sse()) {
if (RestoreMXCSROnJNICalls) {
__ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
}
else if (CheckJNICalls ) {
__ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
}
// Either restore the x87 floating pointer control word after returning
// from the JNI call or verify that it wasn't changed.
if (CheckJNICalls) {
__ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
}
// Verify or restore cpu control state after JNI call
__ restore_cpu_control_state_after_jni();
// save potential result in ST(0) & rdx:rax
// (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -

View File

@ -1079,15 +1079,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ call(rax);
// result potentially in rax or xmm0
// Depending on runtime options, either restore the MXCSR
// register after returning from the JNI Call or verify that
// it wasn't changed during -Xcheck:jni.
if (RestoreMXCSROnJNICalls) {
__ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
}
else if (CheckJNICalls) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
}
// Verify or restore cpu control state after JNI call
__ restore_cpu_control_state_after_jni();
// NOTE: The order of these pushes is known to frame::interpreter_frame_result
// in order to extract the result of a method call. If the order of these

View File

@ -228,10 +228,16 @@ static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CON
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
// Offset hacking within calls.
static int pre_call_FPU_size() {
if (Compile::current()->in_24_bit_fp_mode())
return 6; // fldcw
return 0;
static int pre_call_resets_size() {
int size = 0;
Compile* C = Compile::current();
if (C->in_24_bit_fp_mode()) {
size += 6; // fldcw
}
if (C->max_vector_size() > 16) {
size += 3; // vzeroupper
}
return size;
}
static int preserve_SP_size() {
@ -242,21 +248,21 @@ static int preserve_SP_size() {
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset() {
int offset = 5 + pre_call_FPU_size(); // 5 bytes from start of call to where return address points
int offset = 5 + pre_call_resets_size(); // 5 bytes from start of call to where return address points
if (_method_handle_invoke)
offset += preserve_SP_size();
return offset;
}
int MachCallDynamicJavaNode::ret_addr_offset() {
return 10 + pre_call_FPU_size(); // 10 bytes from start of call to where return address points
return 10 + pre_call_resets_size(); // 10 bytes from start of call to where return address points
}
static int sizeof_FFree_Float_Stack_All = -1;
int MachCallRuntimeNode::ret_addr_offset() {
assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
return sizeof_FFree_Float_Stack_All + 5 + pre_call_FPU_size();
return sizeof_FFree_Float_Stack_All + 5 + pre_call_resets_size();
}
// Indicate if the safepoint node needs the polling page as an input.
@ -272,7 +278,7 @@ bool SafePointNode::needs_polling_address_input() {
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
current_offset += pre_call_FPU_size(); // skip fldcw, if any
current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
@ -280,7 +286,7 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
current_offset += pre_call_FPU_size(); // skip fldcw, if any
current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += preserve_SP_size(); // skip mov rbp, rsp
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
@ -289,7 +295,7 @@ int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
current_offset += pre_call_FPU_size(); // skip fldcw, if any
current_offset += pre_call_resets_size(); // skip fldcw, if any
current_offset += 5; // skip MOV instruction
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
@ -583,16 +589,20 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
// Remove two words for return addr and rbp,
framesize -= 2*wordSize;
if( C->in_24_bit_fp_mode() ) {
if (C->max_vector_size() > 16) {
st->print("VZEROUPPER");
st->cr(); st->print("\t");
}
if (C->in_24_bit_fp_mode()) {
st->print("FLDCW standard control word");
st->cr(); st->print("\t");
}
if( framesize ) {
if (framesize) {
st->print("ADD ESP,%d\t# Destroy frame",framesize);
st->cr(); st->print("\t");
}
st->print_cr("POPL EBP"); st->print("\t");
if( do_polling() && C->is_method_compilation() ) {
if (do_polling() && C->is_method_compilation()) {
st->print("TEST PollPage,EAX\t! Poll Safepoint");
st->cr(); st->print("\t");
}
@ -602,8 +612,14 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Compile *C = ra_->C;
if (C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
MacroAssembler masm(&cbuf);
masm.vzeroupper();
}
// If method set FPU control word, restore to standard control word
if( C->in_24_bit_fp_mode() ) {
if (C->in_24_bit_fp_mode()) {
MacroAssembler masm(&cbuf);
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
@ -615,12 +631,11 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
if( framesize >= 128 ) {
if (framesize >= 128) {
emit_opcode(cbuf, 0x81); // add SP, #framesize
emit_rm(cbuf, 0x3, 0x00, ESP_enc);
emit_d32(cbuf, framesize);
}
else if( framesize ) {
} else if (framesize) {
emit_opcode(cbuf, 0x83); // add SP, #framesize
emit_rm(cbuf, 0x3, 0x00, ESP_enc);
emit_d8(cbuf, framesize);
@ -628,7 +643,7 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
emit_opcode(cbuf, 0x58 | EBP_enc);
if( do_polling() && C->is_method_compilation() ) {
if (do_polling() && C->is_method_compilation()) {
cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0);
emit_opcode(cbuf,0x85);
emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX
@ -640,7 +655,8 @@ uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
Compile *C = ra_->C;
// If method set FPU control word, restore to standard control word
int size = C->in_24_bit_fp_mode() ? 6 : 0;
if( do_polling() && C->is_method_compilation() ) size += 6;
if (C->max_vector_size() > 16) size += 3; // vzeroupper
if (do_polling() && C->is_method_compilation()) size += 6;
int framesize = C->frame_slots() << LogBytesPerInt;
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
@ -649,7 +665,7 @@ uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
size++; // popl rbp,
if( framesize >= 128 ) {
if (framesize >= 128) {
size += 6;
} else {
size += framesize ? 3 : 0;
@ -1853,20 +1869,26 @@ encode %{
%}
enc_class pre_call_FPU %{
enc_class pre_call_resets %{
// If method sets FPU control word restore it here
debug_only(int off0 = cbuf.insts_size());
if( Compile::current()->in_24_bit_fp_mode() ) {
MacroAssembler masm(&cbuf);
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
if (ra_->C->in_24_bit_fp_mode()) {
MacroAssembler _masm(&cbuf);
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
if (ra_->C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
MacroAssembler _masm(&cbuf);
__ vzeroupper();
}
debug_only(int off1 = cbuf.insts_size());
assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
assert(off1 - off0 == pre_call_resets_size(), "correct size prediction");
%}
enc_class post_call_FPU %{
// If method sets FPU control word do it here also
if( Compile::current()->in_24_bit_fp_mode() ) {
if (Compile::current()->in_24_bit_fp_mode()) {
MacroAssembler masm(&cbuf);
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
}
@ -1877,17 +1899,17 @@ encode %{
// who we intended to call.
cbuf.set_insts_mark();
$$$emit8$primary;
if ( !_method ) {
if (!_method) {
emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
runtime_call_Relocation::spec(), RELOC_IMM32 );
} else if(_optimized_virtual) {
} else if (_optimized_virtual) {
emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
} else {
emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
static_call_Relocation::spec(), RELOC_IMM32 );
}
if( _method ) { // Emit stub for static call
if (_method) { // Emit stub for static call
emit_java_to_interp(cbuf);
}
%}
@ -12828,7 +12850,7 @@ instruct CallStaticJavaDirect(method meth) %{
ins_cost(300);
format %{ "CALL,static " %}
opcode(0xE8); /* E8 cd */
ins_encode( pre_call_FPU,
ins_encode( pre_call_resets,
Java_Static_Call( meth ),
call_epilog,
post_call_FPU );
@ -12849,7 +12871,7 @@ instruct CallStaticJavaHandle(method meth, eBPRegP ebp_mh_SP_save) %{
ins_cost(300);
format %{ "CALL,static/MethodHandle " %}
opcode(0xE8); /* E8 cd */
ins_encode( pre_call_FPU,
ins_encode( pre_call_resets,
preserve_SP,
Java_Static_Call( meth ),
restore_SP,
@ -12870,7 +12892,7 @@ instruct CallDynamicJavaDirect(method meth) %{
format %{ "MOV EAX,(oop)-1\n\t"
"CALL,dynamic" %}
opcode(0xE8); /* E8 cd */
ins_encode( pre_call_FPU,
ins_encode( pre_call_resets,
Java_Dynamic_Call( meth ),
call_epilog,
post_call_FPU );
@ -12887,7 +12909,7 @@ instruct CallRuntimeDirect(method meth) %{
format %{ "CALL,runtime " %}
opcode(0xE8); /* E8 cd */
// Use FFREEs to clear entries in float stack
ins_encode( pre_call_FPU,
ins_encode( pre_call_resets,
FFree_Float_Stack_All,
Java_To_Runtime( meth ),
post_call_FPU );
@ -12902,7 +12924,7 @@ instruct CallLeafDirect(method meth) %{
ins_cost(300);
format %{ "CALL_LEAF,runtime " %}
opcode(0xE8); /* E8 cd */
ins_encode( pre_call_FPU,
ins_encode( pre_call_resets,
FFree_Float_Stack_All,
Java_To_Runtime( meth ),
Verify_FPU_For_Leaf, post_call_FPU );

View File

@ -399,6 +399,9 @@ source %{
static int preserve_SP_size() {
return 3; // rex.w, op, rm(reg/reg)
}
static int clear_avx_size() {
return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
}
// !!!!! Special hack to get all types of calls to specify the byte offset
// from the start of the call to the point where the return address
@ -406,6 +409,7 @@ static int preserve_SP_size() {
int MachCallStaticJavaNode::ret_addr_offset()
{
int offset = 5; // 5 bytes from start of call to where return address points
offset += clear_avx_size();
if (_method_handle_invoke)
offset += preserve_SP_size();
return offset;
@ -413,11 +417,16 @@ int MachCallStaticJavaNode::ret_addr_offset()
int MachCallDynamicJavaNode::ret_addr_offset()
{
return 15; // 15 bytes from start of call to where return address points
int offset = 15; // 15 bytes from start of call to where return address points
offset += clear_avx_size();
return offset;
}
// In os_cpu .ad file
// int MachCallRuntimeNode::ret_addr_offset()
int MachCallRuntimeNode::ret_addr_offset() {
int offset = 13; // movq r10,#addr; callq (r10)
offset += clear_avx_size();
return offset;
}
// Indicate if the safepoint node needs the polling page as an input,
// it does if the polling page is more than disp32 away.
@ -434,6 +443,7 @@ bool SafePointNode::needs_polling_address_input()
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaDirectNode::compute_padding(int current_offset) const
{
current_offset += clear_avx_size(); // skip vzeroupper
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
@ -443,6 +453,7 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const
int CallStaticJavaHandleNode::compute_padding(int current_offset) const
{
current_offset += preserve_SP_size(); // skip mov rbp, rsp
current_offset += clear_avx_size(); // skip vzeroupper
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
@ -451,6 +462,7 @@ int CallStaticJavaHandleNode::compute_padding(int current_offset) const
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
{
current_offset += clear_avx_size(); // skip vzeroupper
current_offset += 11; // skip movq instruction + call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
@ -764,6 +776,11 @@ int MachPrologNode::reloc() const
void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
Compile* C = ra_->C;
if (C->max_vector_size() > 16) {
st->print("vzeroupper");
st->cr(); st->print("\t");
}
int framesize = C->frame_slots() << LogBytesPerInt;
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove word for return adr already pushed
@ -793,6 +810,13 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
Compile* C = ra_->C;
if (C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
MacroAssembler _masm(&cbuf);
__ vzeroupper();
}
int framesize = C->frame_slots() << LogBytesPerInt;
assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
// Remove word for return adr already pushed
@ -2008,6 +2032,25 @@ encode %{
__ bind(miss);
%}
enc_class clear_avx %{
debug_only(int off0 = cbuf.insts_size());
if (ra_->C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
MacroAssembler _masm(&cbuf);
__ vzeroupper();
}
debug_only(int off1 = cbuf.insts_size());
assert(off1 - off0 == clear_avx_size(), "correct size prediction");
%}
enc_class Java_To_Runtime(method meth) %{
// No relocation needed
MacroAssembler _masm(&cbuf);
__ mov64(r10, (int64_t) $meth$$method);
__ call(r10);
%}
enc_class Java_To_Interpreter(method meth)
%{
// CALL Java_To_Interpreter
@ -11366,7 +11409,7 @@ instruct CallStaticJavaDirect(method meth) %{
ins_cost(300);
format %{ "call,static " %}
opcode(0xE8); /* E8 cd */
ins_encode(Java_Static_Call(meth), call_epilog);
ins_encode(clear_avx, Java_Static_Call(meth), call_epilog);
ins_pipe(pipe_slow);
ins_alignment(4);
%}
@ -11384,7 +11427,7 @@ instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{
ins_cost(300);
format %{ "call,static/MethodHandle " %}
opcode(0xE8); /* E8 cd */
ins_encode(preserve_SP,
ins_encode(clear_avx, preserve_SP,
Java_Static_Call(meth),
restore_SP,
call_epilog);
@ -11403,7 +11446,7 @@ instruct CallDynamicJavaDirect(method meth)
ins_cost(300);
format %{ "movq rax, #Universe::non_oop_word()\n\t"
"call,dynamic " %}
ins_encode(Java_Dynamic_Call(meth), call_epilog);
ins_encode(clear_avx, Java_Dynamic_Call(meth), call_epilog);
ins_pipe(pipe_slow);
ins_alignment(4);
%}
@ -11416,8 +11459,7 @@ instruct CallRuntimeDirect(method meth)
ins_cost(300);
format %{ "call,runtime " %}
opcode(0xE8); /* E8 cd */
ins_encode(Java_To_Runtime(meth));
ins_encode(clear_avx, Java_To_Runtime(meth));
ins_pipe(pipe_slow);
%}
@ -11429,8 +11471,7 @@ instruct CallLeafDirect(method meth)
ins_cost(300);
format %{ "call_leaf,runtime " %}
opcode(0xE8); /* E8 cd */
ins_encode(Java_To_Runtime(meth));
ins_encode(clear_avx, Java_To_Runtime(meth));
ins_pipe(pipe_slow);
%}
@ -11442,7 +11483,6 @@ instruct CallLeafNoFPDirect(method meth)
ins_cost(300);
format %{ "call_leaf_nofp,runtime " %}
opcode(0xE8); /* E8 cd */
ins_encode(Java_To_Runtime(meth));
ins_pipe(pipe_slow);
%}

View File

@ -167,20 +167,6 @@ julong os::physical_memory() {
return Bsd::physical_memory();
}
julong os::allocatable_physical_memory(julong size) {
#ifdef _LP64
return size;
#else
julong result = MIN2(size, (julong)3800*M);
if (!is_allocatable(result)) {
// See comments under solaris for alignment considerations
julong reasonable_size = (julong)2*G - 2 * os::vm_page_size();
result = MIN2(size, reasonable_size);
}
return result;
#endif // _LP64
}
////////////////////////////////////////////////////////////////////////////////
// environment support

View File

@ -194,20 +194,6 @@ julong os::physical_memory() {
return Linux::physical_memory();
}
julong os::allocatable_physical_memory(julong size) {
#ifdef _LP64
return size;
#else
julong result = MIN2(size, (julong)3800*M);
if (!is_allocatable(result)) {
// See comments under solaris for alignment considerations
julong reasonable_size = (julong)2*G - 2 * os::vm_page_size();
result = MIN2(size, reasonable_size);
}
return result;
#endif // _LP64
}
////////////////////////////////////////////////////////////////////////////////
// environment support

View File

@ -199,7 +199,7 @@ case "$MODE" in
rm -f $GDBSCR
;;
dbx)
$DBX -s $MYDIR/.dbxrc $LAUNCHER $JPARAMS
$DBX -s $HOME/.dbxrc $LAUNCHER $JPARMS
;;
valgrind)
echo Warning: Defaulting to 16Mb heap to make Valgrind run faster, use -Xmx for larger heap

View File

@ -188,4 +188,66 @@ void os::Posix::print_uname_info(outputStream* st) {
st->cr();
}
bool os::has_allocatable_memory_limit(julong* limit) {
struct rlimit rlim;
int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
// if there was an error when calling getrlimit, assume that there is no limitation
// on virtual memory.
bool result;
if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
result = false;
} else {
*limit = (julong)rlim.rlim_cur;
result = true;
}
#ifdef _LP64
return result;
#else
// arbitrary virtual space limit for 32 bit Unices found by testing. If
// getrlimit above returned a limit, bound it with this limit. Otherwise
// directly use it.
const julong max_virtual_limit = (julong)3800*M;
if (result) {
*limit = MIN2(*limit, max_virtual_limit);
} else {
*limit = max_virtual_limit;
}
// bound by actually allocatable memory. The algorithm uses two bounds, an
// upper and a lower limit. The upper limit is the current highest amount of
// memory that could not be allocated, the lower limit is the current highest
// amount of memory that could be allocated.
// The algorithm iteratively refines the result by halving the difference
// between these limits, updating either the upper limit (if that value could
// not be allocated) or the lower limit (if the that value could be allocated)
// until the difference between these limits is "small".
// the minimum amount of memory we care about allocating.
const julong min_allocation_size = M;
julong upper_limit = *limit;
// first check a few trivial cases
if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
*limit = upper_limit;
} else if (!is_allocatable(min_allocation_size)) {
// we found that not even min_allocation_size is allocatable. Return it
// anyway. There is no point to search for a better value any more.
*limit = min_allocation_size;
} else {
// perform the binary search.
julong lower_limit = min_allocation_size;
while ((upper_limit - lower_limit) > min_allocation_size) {
julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
temp_limit = align_size_down_(temp_limit, min_allocation_size);
if (is_allocatable(temp_limit)) {
lower_limit = temp_limit;
} else {
upper_limit = temp_limit;
}
}
*limit = lower_limit;
}
return true;
#endif
}

View File

@ -476,24 +476,6 @@ julong os::physical_memory() {
return Solaris::physical_memory();
}
julong os::allocatable_physical_memory(julong size) {
#ifdef _LP64
return size;
#else
julong result = MIN2(size, (julong)3835*M);
if (!is_allocatable(result)) {
// Memory allocations will be aligned but the alignment
// is not known at this point. Alignments will
// be at most to LargePageSizeInBytes. Protect
// allocations from alignments up to illegal
// values. If at this point 2G is illegal.
julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes;
result = MIN2(size, reasonable_size);
}
return result;
#endif
}
static hrtime_t first_hrtime = 0;
static const hrtime_t hrtime_hz = 1000*1000*1000;
const int LOCK_BUSY = 1;

View File

@ -686,12 +686,17 @@ julong os::physical_memory() {
return win32::physical_memory();
}
julong os::allocatable_physical_memory(julong size) {
bool os::has_allocatable_memory_limit(julong* limit) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
GlobalMemoryStatusEx(&ms);
#ifdef _LP64
return size;
*limit = (julong)ms.ullAvailVirtual;
return true;
#else
// Limit to 1400m because of the 2gb address space wall
return MIN2(size, (julong)1400*M);
*limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
return true;
#endif
}
@ -3768,6 +3773,8 @@ extern "C" {
}
}
static jint initSock();
// this is called _after_ the global arguments have been parsed
jint os::init_2(void) {
// Allocate a single page and mark it as readable for safepoint polling
@ -3898,6 +3905,10 @@ jint os::init_2(void) {
if (!success) UseNUMAInterleaving = false;
}
if (initSock() != JNI_OK) {
return JNI_ERR;
}
return JNI_OK;
}
@ -4894,42 +4905,24 @@ LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
// We don't build a headless jre for Windows
bool os::is_headless_jre() { return false; }
typedef CRITICAL_SECTION mutex_t;
#define mutexInit(m) InitializeCriticalSection(m)
#define mutexDestroy(m) DeleteCriticalSection(m)
#define mutexLock(m) EnterCriticalSection(m)
#define mutexUnlock(m) LeaveCriticalSection(m)
static bool sock_initialized = FALSE;
static mutex_t sockFnTableMutex;
static void initSock() {
static jint initSock() {
WSADATA wsadata;
if (!os::WinSock2Dll::WinSock2Available()) {
jio_fprintf(stderr, "Could not load Winsock 2 (error: %d)\n",
jio_fprintf(stderr, "Could not load Winsock (error: %d)\n",
::GetLastError());
return;
return JNI_ERR;
}
if (sock_initialized == TRUE) return;
::mutexInit(&sockFnTableMutex);
::mutexLock(&sockFnTableMutex);
if (os::WinSock2Dll::WSAStartup(MAKEWORD(1,1), &wsadata) != 0) {
jio_fprintf(stderr, "Could not initialize Winsock\n");
if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
::GetLastError());
return JNI_ERR;
}
sock_initialized = TRUE;
::mutexUnlock(&sockFnTableMutex);
return JNI_OK;
}
struct hostent* os::get_host_by_name(char* name) {
if (!sock_initialized) {
initSock();
}
if (!os::WinSock2Dll::WinSock2Available()) {
return NULL;
}
return (struct hostent*)os::WinSock2Dll::gethostbyname(name);
}

View File

@ -55,20 +55,6 @@ encode %{
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
enc_class Java_To_Runtime(method meth) %{
// No relocation needed
// movq r10, <meth>
emit_opcode(cbuf, Assembler::REX_WB);
emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
emit_d64(cbuf, (int64_t) $meth$$method);
// call (r10)
emit_opcode(cbuf, Assembler::REX_B);
emit_opcode(cbuf, 0xFF);
emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
%}
%}
@ -76,8 +62,4 @@ encode %{
source %{
int MachCallRuntimeNode::ret_addr_offset() {
return 13; // movq r10,#addr; callq (r10)
}
%}

View File

@ -55,20 +55,6 @@ encode %{
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
enc_class Java_To_Runtime(method meth) %{
// No relocation needed
// movq r10, <meth>
emit_opcode(cbuf, Assembler::REX_WB);
emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
emit_d64(cbuf, (int64_t) $meth$$method);
// call (r10)
emit_opcode(cbuf, Assembler::REX_B);
emit_opcode(cbuf, 0xFF);
emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
%}
%}
@ -76,8 +62,4 @@ encode %{
source %{
int MachCallRuntimeNode::ret_addr_offset() {
return 13; // movq r10,#addr; callq (r10)
}
%}

View File

@ -54,39 +54,10 @@ encode %{
// main source block for now. In future, we can generalize this by
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
enc_class Java_To_Runtime(method meth) %{
// No relocation needed
// movq r10, <meth>
emit_opcode(cbuf, Assembler::REX_WB);
emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
emit_d64(cbuf, (int64_t) $meth$$method);
// call (r10)
emit_opcode(cbuf, Assembler::REX_B);
emit_opcode(cbuf, 0xFF);
emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
%}
enc_class post_call_verify_mxcsr %{
MacroAssembler _masm(&cbuf);
if (RestoreMXCSROnJNICalls) {
__ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
}
else if (CheckJNICalls) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
}
%}
%}
// Platform dependent source
source %{
int MachCallRuntimeNode::ret_addr_offset() {
return 13; // movq r10,#addr; callq (r10)
}
%}

View File

@ -53,30 +53,11 @@ encode %{
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
// No relocation needed
// movq r10, <meth>
emit_opcode(cbuf, Assembler::REX_WB);
emit_opcode(cbuf, 0xB8 | (R10_enc - 8));
emit_d64(cbuf, (int64_t) $meth$$method);
// call (r10)
emit_opcode(cbuf, Assembler::REX_B);
emit_opcode(cbuf, 0xFF);
emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
%}
%}
//
// Platform dependent source
//
source %{
int MachCallRuntimeNode::ret_addr_offset()
{
return 13; // movq r10,#addr; callq (r10)
}
%}

View File

@ -832,6 +832,7 @@ static const char *getRegMask(const char *reg_class_name) {
int length = (int)strlen(rc_name) + (int)strlen(mask) + 5;
char *regMask = new char[length];
sprintf(regMask,"%s%s()", rc_name, mask);
delete[] rc_name;
return regMask;
}
}

View File

@ -191,12 +191,19 @@ static void cost_check(FILE *fp, const char *spaces,
// Macro equivalent to: _kids[0]->valid(FOO) && _kids[1]->valid(BAR)
//
static void child_test(FILE *fp, MatchList &mList) {
if( mList._lchild ) // If left child, check it
fprintf(fp, "STATE__VALID_CHILD(_kids[0], %s)", ArchDesc::getMachOperEnum(mList._lchild));
if( mList._lchild && mList._rchild ) // If both, add the "&&"
fprintf(fp, " && " );
if( mList._rchild ) // If right child, check it
fprintf(fp, "STATE__VALID_CHILD(_kids[1], %s)", ArchDesc::getMachOperEnum(mList._rchild));
if (mList._lchild) { // If left child, check it
const char* lchild_to_upper = ArchDesc::getMachOperEnum(mList._lchild);
fprintf(fp, "STATE__VALID_CHILD(_kids[0], %s)", lchild_to_upper);
delete[] lchild_to_upper;
}
if (mList._lchild && mList._rchild) { // If both, add the "&&"
fprintf(fp, " && ");
}
if (mList._rchild) { // If right child, check it
const char* rchild_to_upper = ArchDesc::getMachOperEnum(mList._rchild);
fprintf(fp, "STATE__VALID_CHILD(_kids[1], %s)", rchild_to_upper);
delete[] rchild_to_upper;
}
}
//---------------------------calc_cost-----------------------------------------
@ -206,13 +213,17 @@ static void child_test(FILE *fp, MatchList &mList) {
Expr *ArchDesc::calc_cost(FILE *fp, const char *spaces, MatchList &mList, ProductionState &status) {
fprintf(fp, "%sunsigned int c = ", spaces);
Expr *c = new Expr("0");
if (mList._lchild ) { // If left child, add it in
sprintf(Expr::buffer(), "_kids[0]->_cost[%s]", ArchDesc::getMachOperEnum(mList._lchild));
if (mList._lchild) { // If left child, add it in
const char* lchild_to_upper = ArchDesc::getMachOperEnum(mList._lchild);
sprintf(Expr::buffer(), "_kids[0]->_cost[%s]", lchild_to_upper);
c->add(Expr::buffer());
delete[] lchild_to_upper;
}
if (mList._rchild) { // If right child, add it in
sprintf(Expr::buffer(), "_kids[1]->_cost[%s]", ArchDesc::getMachOperEnum(mList._rchild));
if (mList._rchild) { // If right child, add it in
const char* rchild_to_upper = ArchDesc::getMachOperEnum(mList._rchild);
sprintf(Expr::buffer(), "_kids[1]->_cost[%s]", rchild_to_upper);
c->add(Expr::buffer());
delete[] rchild_to_upper;
}
// Add in cost of this rule
const char *mList_cost = mList.get_cost();
@ -232,15 +243,17 @@ void ArchDesc::gen_match(FILE *fp, MatchList &mList, ProductionState &status, Di
fprintf(fp, "%s", spaces4);
// Only generate child tests if this is not a leaf node
bool has_child_constraints = mList._lchild || mList._rchild;
const char *predicate_test = mList.get_pred();
if( has_child_constraints || predicate_test ) {
const char *predicate_test = mList.get_pred();
if (has_child_constraints || predicate_test) {
// Open the child-and-predicate-test braces
fprintf(fp, "if( ");
status.set_constraint(hasConstraint);
child_test(fp, mList);
// Only generate predicate test if one exists for this match
if( predicate_test ) {
if( has_child_constraints ) { fprintf(fp," &&\n"); }
if (predicate_test) {
if (has_child_constraints) {
fprintf(fp," &&\n");
}
fprintf(fp, "%s %s", spaces6, predicate_test);
}
// End of outer tests

View File

@ -937,4 +937,6 @@ void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
void Canonicalizer::do_Assert(Assert* x) {}
void Canonicalizer::do_MemBar(MemBar* x) {}

View File

@ -107,6 +107,8 @@ class Canonicalizer: InstructionVisitor {
virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x);
virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
virtual void do_Assert (Assert* x);
};
#endif // SHARE_VM_C1_C1_CANONICALIZER_HPP

View File

@ -166,6 +166,22 @@ class RangeCheckStub: public CodeStub {
#endif // PRODUCT
};
// stub used when predicate fails and deoptimization is needed
class PredicateFailedStub: public CodeStub {
private:
CodeEmitInfo* _info;
public:
PredicateFailedStub(CodeEmitInfo* info);
virtual void emit_code(LIR_Assembler* e);
virtual CodeEmitInfo* info() const { return _info; }
virtual void visit(LIR_OpVisitState* visitor) {
visitor->do_slow_case(_info);
}
#ifndef PRODUCT
virtual void print_name(outputStream* out) const { out->print("PredicateFailedStub"); }
#endif // PRODUCT
};
class DivByZeroStub: public CodeStub {
private:

View File

@ -33,13 +33,16 @@
#include "c1/c1_ValueStack.hpp"
#include "code/debugInfoRec.hpp"
#include "compiler/compileLog.hpp"
#include "c1/c1_RangeCheckElimination.hpp"
typedef enum {
_t_compile,
_t_setup,
_t_optimizeIR,
_t_buildIR,
_t_optimize_blocks,
_t_optimize_null_checks,
_t_rangeCheckElimination,
_t_emit_lir,
_t_linearScan,
_t_lirGeneration,
@ -52,8 +55,10 @@ typedef enum {
static const char * timer_name[] = {
"compile",
"setup",
"optimizeIR",
"buildIR",
"optimize_blocks",
"optimize_null_checks",
"rangeCheckElimination",
"emit_lir",
"linearScan",
"lirGeneration",
@ -159,9 +164,9 @@ void Compilation::build_hir() {
if (UseC1Optimizations) {
NEEDS_CLEANUP
// optimization
PhaseTraceTime timeit(_t_optimizeIR);
PhaseTraceTime timeit(_t_optimize_blocks);
_hir->optimize();
_hir->optimize_blocks();
}
_hir->verify();
@ -180,13 +185,47 @@ void Compilation::build_hir() {
_hir->compute_code();
if (UseGlobalValueNumbering) {
ResourceMark rm;
// No resource mark here! LoopInvariantCodeMotion can allocate ValueStack objects.
int instructions = Instruction::number_of_instructions();
GlobalValueNumbering gvn(_hir);
assert(instructions == Instruction::number_of_instructions(),
"shouldn't have created an instructions");
}
_hir->verify();
#ifndef PRODUCT
if (PrintCFGToFile) {
CFGPrinter::print_cfg(_hir, "Before RangeCheckElimination", true, false);
}
#endif
if (RangeCheckElimination) {
if (_hir->osr_entry() == NULL) {
PhaseTraceTime timeit(_t_rangeCheckElimination);
RangeCheckElimination::eliminate(_hir);
}
}
#ifndef PRODUCT
if (PrintCFGToFile) {
CFGPrinter::print_cfg(_hir, "After RangeCheckElimination", true, false);
}
#endif
if (UseC1Optimizations) {
// loop invariant code motion reorders instructions and range
// check elimination adds new instructions so do null check
// elimination after.
NEEDS_CLEANUP
// optimization
PhaseTraceTime timeit(_t_optimize_null_checks);
_hir->eliminate_null_checks();
}
_hir->verify();
// compute use counts after global value numbering
_hir->compute_use_counts();
@ -502,6 +541,7 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
, _next_id(0)
, _next_block_id(0)
, _code(buffer_blob)
, _has_access_indexed(false)
, _current_instruction(NULL)
#ifndef PRODUCT
, _last_instruction_printed(NULL)
@ -567,7 +607,9 @@ void Compilation::print_timers() {
tty->print_cr(" Detailed C1 Timings");
tty->print_cr(" Setup time: %6.3f s (%4.1f%%)", timers[_t_setup].seconds(), (timers[_t_setup].seconds() / total) * 100.0);
tty->print_cr(" Build IR: %6.3f s (%4.1f%%)", timers[_t_buildIR].seconds(), (timers[_t_buildIR].seconds() / total) * 100.0);
tty->print_cr(" Optimize: %6.3f s (%4.1f%%)", timers[_t_optimizeIR].seconds(), (timers[_t_optimizeIR].seconds() / total) * 100.0);
float t_optimizeIR = timers[_t_optimize_blocks].seconds() + timers[_t_optimize_null_checks].seconds();
tty->print_cr(" Optimize: %6.3f s (%4.1f%%)", t_optimizeIR, (t_optimizeIR / total) * 100.0);
tty->print_cr(" RCE: %6.3f s (%4.1f%%)", timers[_t_rangeCheckElimination].seconds(), (timers[_t_rangeCheckElimination].seconds() / total) * 100.0);
tty->print_cr(" Emit LIR: %6.3f s (%4.1f%%)", timers[_t_emit_lir].seconds(), (timers[_t_emit_lir].seconds() / total) * 100.0);
tty->print_cr(" LIR Gen: %6.3f s (%4.1f%%)", timers[_t_lirGeneration].seconds(), (timers[_t_lirGeneration].seconds() / total) * 100.0);
tty->print_cr(" Linear Scan: %6.3f s (%4.1f%%)", timers[_t_linearScan].seconds(), (timers[_t_linearScan].seconds() / total) * 100.0);

View File

@ -26,8 +26,10 @@
#define SHARE_VM_C1_C1_COMPILATION_HPP
#include "ci/ciEnv.hpp"
#include "ci/ciMethodData.hpp"
#include "code/exceptionHandlerTable.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/deoptimization.hpp"
class CompilationResourceObj;
class XHandlers;
@ -85,6 +87,7 @@ class Compilation: public StackObj {
LinearScan* _allocator;
CodeOffsets _offsets;
CodeBuffer _code;
bool _has_access_indexed;
// compilation helpers
void initialize();
@ -140,6 +143,7 @@ class Compilation: public StackObj {
C1_MacroAssembler* masm() const { return _masm; }
CodeOffsets* offsets() { return &_offsets; }
Arena* arena() { return _arena; }
bool has_access_indexed() { return _has_access_indexed; }
// Instruction ids
int get_next_id() { return _next_id++; }
@ -154,6 +158,7 @@ class Compilation: public StackObj {
void set_has_fpu_code(bool f) { _has_fpu_code = f; }
void set_has_unsafe_access(bool f) { _has_unsafe_access = f; }
void set_would_profile(bool f) { _would_profile = f; }
void set_has_access_indexed(bool f) { _has_access_indexed = f; }
// Add a set of exception handlers covering the given PC offset
void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers);
// Statistics gathering
@ -233,6 +238,14 @@ class Compilation: public StackObj {
return env()->comp_level() == CompLevel_full_profile &&
C1UpdateMethodData && C1ProfileCheckcasts;
}
// will compilation make optimistic assumptions that might lead to
// deoptimization and that the runtime will account for?
bool is_optimistic() const {
return !TieredCompilation &&
(RangeCheckElimination || UseLoopInvariantCodeMotion) &&
method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
}
};

View File

@ -947,7 +947,9 @@ void GraphBuilder::store_local(ValueStack* state, Value x, int index) {
void GraphBuilder::load_indexed(BasicType type) {
ValueStack* state_before = copy_state_for_exception();
// In case of in block code motion in range check elimination
ValueStack* state_before = copy_state_indexed_access();
compilation()->set_has_access_indexed(true);
Value index = ipop();
Value array = apop();
Value length = NULL;
@ -961,7 +963,9 @@ void GraphBuilder::load_indexed(BasicType type) {
void GraphBuilder::store_indexed(BasicType type) {
ValueStack* state_before = copy_state_for_exception();
// In case of in block code motion in range check elimination
ValueStack* state_before = copy_state_indexed_access();
compilation()->set_has_access_indexed(true);
Value value = pop(as_ValueType(type));
Value index = ipop();
Value array = apop();
@ -1179,7 +1183,9 @@ void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* sta
BlockBegin* tsux = block_at(stream()->get_dest());
BlockBegin* fsux = block_at(stream()->next_bci());
bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
// In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : NULL, is_bb));
assert(i->as_Goto() == NULL ||
(i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) ||
@ -1294,7 +1300,9 @@ void GraphBuilder::table_switch() {
BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0));
BlockBegin* fsux = block_at(bci() + sw.default_offset());
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
ValueStack* state_before = is_bb ? copy_state_before() : NULL;
// In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(is_bb);
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
} else {
// collect successors
@ -1308,7 +1316,9 @@ void GraphBuilder::table_switch() {
// add default successor
if (sw.default_offset() < 0) has_bb = true;
sux->at_put(i, block_at(bci() + sw.default_offset()));
ValueStack* state_before = has_bb ? copy_state_before() : NULL;
// In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(has_bb);
Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
#ifdef ASSERT
if (res->as_Goto()) {
@ -1336,7 +1346,9 @@ void GraphBuilder::lookup_switch() {
BlockBegin* tsux = block_at(bci() + pair.offset());
BlockBegin* fsux = block_at(bci() + sw.default_offset());
bool is_bb = tsux->bci() < bci() || fsux->bci() < bci();
ValueStack* state_before = is_bb ? copy_state_before() : NULL;
// In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(is_bb);;
append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb));
} else {
// collect successors & keys
@ -1353,7 +1365,9 @@ void GraphBuilder::lookup_switch() {
// add default successor
if (sw.default_offset() < 0) has_bb = true;
sux->at_put(i, block_at(bci() + sw.default_offset()));
ValueStack* state_before = has_bb ? copy_state_before() : NULL;
// In case of loop invariant code motion or predicate insertion
// before the body of a loop the state is needed
ValueStack* state_before = copy_state_if_bb(has_bb);
Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
#ifdef ASSERT
if (res->as_Goto()) {

View File

@ -301,6 +301,8 @@ class GraphBuilder VALUE_OBJ_CLASS_SPEC {
ValueStack* copy_state_exhandling();
ValueStack* copy_state_for_exception_with_bci(int bci);
ValueStack* copy_state_for_exception();
ValueStack* copy_state_if_bb(bool is_bb) { return (is_bb || compilation()->is_optimistic()) ? copy_state_before() : NULL; }
ValueStack* copy_state_indexed_access() { return compilation()->is_optimistic() ? copy_state_before() : copy_state_for_exception(); }
//
// Inlining support

View File

@ -182,13 +182,14 @@ bool IRScopeDebugInfo::should_reexecute() {
// Implementation of CodeEmitInfo
// Stack must be NON-null
CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers)
CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception)
: _scope(stack->scope())
, _scope_debug_info(NULL)
, _oop_map(NULL)
, _stack(stack)
, _exception_handlers(exception_handlers)
, _is_method_handle_invoke(false) {
, _is_method_handle_invoke(false)
, _deoptimize_on_exception(deoptimize_on_exception) {
assert(_stack != NULL, "must be non null");
}
@ -199,7 +200,8 @@ CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
, _scope_debug_info(NULL)
, _oop_map(NULL)
, _stack(stack == NULL ? info->_stack : stack)
, _is_method_handle_invoke(info->_is_method_handle_invoke) {
, _is_method_handle_invoke(info->_is_method_handle_invoke)
, _deoptimize_on_exception(info->_deoptimize_on_exception) {
// deep copy of exception handlers
if (info->_exception_handlers != NULL) {
@ -239,7 +241,7 @@ IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) :
}
void IR::optimize() {
void IR::optimize_blocks() {
Optimizer opt(this);
if (!compilation()->profile_branches()) {
if (DoCEE) {
@ -257,6 +259,10 @@ void IR::optimize() {
#endif
}
}
}
void IR::eliminate_null_checks() {
Optimizer opt(this);
if (EliminateNullChecks) {
opt.eliminate_null_checks();
#ifndef PRODUCT
@ -429,6 +435,7 @@ class ComputeLinearScanOrder : public StackObj {
BlockList _loop_end_blocks; // list of all loop end blocks collected during count_edges
BitMap2D _loop_map; // two-dimensional bit set: a bit is set if a block is contained in a loop
BlockList _work_list; // temporary list (used in mark_loops and compute_order)
BlockList _loop_headers;
Compilation* _compilation;
@ -594,6 +601,7 @@ void ComputeLinearScanOrder::count_edges(BlockBegin* cur, BlockBegin* parent) {
TRACE_LINEAR_SCAN(3, tty->print_cr("Block B%d is loop header of loop %d", cur->block_id(), _num_loops));
cur->set_loop_index(_num_loops);
_loop_headers.append(cur);
_num_loops++;
}
@ -656,6 +664,16 @@ void ComputeLinearScanOrder::clear_non_natural_loops(BlockBegin* start_block) {
// -> this is not a natural loop, so ignore it
TRACE_LINEAR_SCAN(2, tty->print_cr("Loop %d is non-natural, so it is ignored", i));
BlockBegin *loop_header = _loop_headers.at(i);
assert(loop_header->is_set(BlockBegin::linear_scan_loop_header_flag), "Must be loop header");
for (int j = 0; j < loop_header->number_of_preds(); j++) {
BlockBegin *pred = loop_header->pred_at(j);
pred->clear(BlockBegin::linear_scan_loop_end_flag);
}
loop_header->clear(BlockBegin::linear_scan_loop_header_flag);
for (int block_id = _max_block_id - 1; block_id >= 0; block_id--) {
clear_block_in_loop(i, block_id);
}
@ -729,9 +747,20 @@ void ComputeLinearScanOrder::compute_dominator(BlockBegin* cur, BlockBegin* pare
} else if (!(cur->is_set(BlockBegin::linear_scan_loop_header_flag) && parent->is_set(BlockBegin::linear_scan_loop_end_flag))) {
TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: computing dominator of B%d: common dominator of B%d and B%d is B%d", cur->block_id(), parent->block_id(), cur->dominator()->block_id(), common_dominator(cur->dominator(), parent)->block_id()));
assert(cur->number_of_preds() > 1, "");
// Does not hold for exception blocks
assert(cur->number_of_preds() > 1 || cur->is_set(BlockBegin::exception_entry_flag), "");
cur->set_dominator(common_dominator(cur->dominator(), parent));
}
// Additional edge to xhandler of all our successors
// range check elimination needs that the state at the end of a
// block be valid in every block it dominates so cur must dominate
// the exception handlers of its successors.
int num_cur_xhandler = cur->number_of_exception_handlers();
for (int j = 0; j < num_cur_xhandler; j++) {
BlockBegin* xhandler = cur->exception_handler_at(j);
compute_dominator(xhandler, parent);
}
}
@ -898,7 +927,6 @@ void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) {
num_sux = cur->number_of_exception_handlers();
for (i = 0; i < num_sux; i++) {
BlockBegin* sux = cur->exception_handler_at(i);
compute_dominator(sux, cur);
if (ready_for_processing(sux)) {
sort_into_work_list(sux);
}
@ -918,8 +946,23 @@ bool ComputeLinearScanOrder::compute_dominators_iter() {
BlockBegin* dominator = block->pred_at(0);
int num_preds = block->number_of_preds();
for (int i = 1; i < num_preds; i++) {
dominator = common_dominator(dominator, block->pred_at(i));
TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: Processing B%d", block->block_id()));
for (int j = 0; j < num_preds; j++) {
BlockBegin *pred = block->pred_at(j);
TRACE_LINEAR_SCAN(4, tty->print_cr(" DOM: Subrocessing B%d", pred->block_id()));
if (block->is_set(BlockBegin::exception_entry_flag)) {
dominator = common_dominator(dominator, pred);
int num_pred_preds = pred->number_of_preds();
for (int k = 0; k < num_pred_preds; k++) {
dominator = common_dominator(dominator, pred->pred_at(k));
}
} else {
dominator = common_dominator(dominator, pred);
}
}
if (dominator != block->dominator()) {
@ -946,6 +989,21 @@ void ComputeLinearScanOrder::compute_dominators() {
// check that dominators are correct
assert(!compute_dominators_iter(), "fix point not reached");
// Add Blocks to dominates-Array
int num_blocks = _linear_scan_order->length();
for (int i = 0; i < num_blocks; i++) {
BlockBegin* block = _linear_scan_order->at(i);
BlockBegin *dom = block->dominator();
if (dom) {
assert(dom->dominator_depth() != -1, "Dominator must have been visited before");
dom->dominates()->append(block);
block->set_dominator_depth(dom->dominator_depth() + 1);
} else {
block->set_dominator_depth(0);
}
}
}
@ -1032,7 +1090,7 @@ void ComputeLinearScanOrder::verify() {
BlockBegin* sux = cur->sux_at(j);
assert(sux->linear_scan_number() >= 0 && sux->linear_scan_number() == _linear_scan_order->index_of(sux), "incorrect linear_scan_number");
if (!cur->is_set(BlockBegin::linear_scan_loop_end_flag)) {
if (!sux->is_set(BlockBegin::backward_branch_target_flag)) {
assert(cur->linear_scan_number() < sux->linear_scan_number(), "invalid order");
}
if (cur->loop_depth() == sux->loop_depth()) {
@ -1044,7 +1102,7 @@ void ComputeLinearScanOrder::verify() {
BlockBegin* pred = cur->pred_at(j);
assert(pred->linear_scan_number() >= 0 && pred->linear_scan_number() == _linear_scan_order->index_of(pred), "incorrect linear_scan_number");
if (!cur->is_set(BlockBegin::linear_scan_loop_header_flag)) {
if (!cur->is_set(BlockBegin::backward_branch_target_flag)) {
assert(cur->linear_scan_number() > pred->linear_scan_number(), "invalid order");
}
if (cur->loop_depth() == pred->loop_depth()) {
@ -1060,7 +1118,8 @@ void ComputeLinearScanOrder::verify() {
} else {
assert(cur->dominator() != NULL, "all but first block must have dominator");
}
assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0), "Single predecessor must also be dominator");
// Assertion does not hold for exception handlers
assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0) || cur->is_set(BlockBegin::exception_entry_flag), "Single predecessor must also be dominator");
}
// check that all loops are continuous
@ -1249,9 +1308,22 @@ class PredecessorValidator : public BlockClosure {
}
};
class VerifyBlockBeginField : public BlockClosure {
public:
virtual void block_do(BlockBegin *block) {
for ( Instruction *cur = block; cur != NULL; cur = cur->next()) {
assert(cur->block() == block, "Block begin is not correct");
}
}
};
void IR::verify() {
#ifdef ASSERT
PredecessorValidator pv(this);
VerifyBlockBeginField verifier;
this->iterate_postorder(&verifier);
#endif
}

View File

@ -254,6 +254,7 @@ class CodeEmitInfo: public CompilationResourceObj {
OopMap* _oop_map;
ValueStack* _stack; // used by deoptimization (contains also monitors
bool _is_method_handle_invoke; // true if the associated call site is a MethodHandle call site.
bool _deoptimize_on_exception;
FrameMap* frame_map() const { return scope()->compilation()->frame_map(); }
Compilation* compilation() const { return scope()->compilation(); }
@ -261,7 +262,7 @@ class CodeEmitInfo: public CompilationResourceObj {
public:
// use scope from ValueStack
CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers);
CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception = false);
// make a copy
CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack = NULL);
@ -272,6 +273,7 @@ class CodeEmitInfo: public CompilationResourceObj {
IRScope* scope() const { return _scope; }
XHandlers* exception_handlers() const { return _exception_handlers; }
ValueStack* stack() const { return _stack; }
bool deoptimize_on_exception() const { return _deoptimize_on_exception; }
void add_register_oop(LIR_Opr opr);
void record_debug_info(DebugInformationRecorder* recorder, int pc_offset);
@ -309,7 +311,8 @@ class IR: public CompilationResourceObj {
int max_stack() const { return top_scope()->max_stack(); } // expensive
// ir manipulation
void optimize();
void optimize_blocks();
void eliminate_null_checks();
void compute_predecessors();
void split_critical_edges();
void compute_code();

View File

@ -34,6 +34,15 @@
// Implementation of Instruction
int Instruction::dominator_depth() {
int result = -1;
if (block()) {
result = block()->dominator_depth();
}
assert(result != -1 || this->as_Local(), "Only locals have dominator depth -1");
return result;
}
Instruction::Condition Instruction::mirror(Condition cond) {
switch (cond) {
case eql: return eql;
@ -42,6 +51,8 @@ Instruction::Condition Instruction::mirror(Condition cond) {
case leq: return geq;
case gtr: return lss;
case geq: return leq;
case aeq: return beq;
case beq: return aeq;
}
ShouldNotReachHere();
return eql;
@ -56,6 +67,8 @@ Instruction::Condition Instruction::negate(Condition cond) {
case leq: return gtr;
case gtr: return leq;
case geq: return lss;
case aeq: assert(false, "Above equal cannot be negated");
case beq: assert(false, "Below equal cannot be negated");
}
ShouldNotReachHere();
return eql;
@ -70,10 +83,10 @@ void Instruction::update_exception_state(ValueStack* state) {
}
}
Instruction* Instruction::prev(BlockBegin* block) {
// Prev without need to have BlockBegin
Instruction* Instruction::prev() {
Instruction* p = NULL;
Instruction* q = block;
Instruction* q = block();
while (q != this) {
assert(q != NULL, "this is not in the block's instruction list");
p = q; q = q->next();
@ -122,15 +135,24 @@ void Instruction::print(InstructionPrinter& ip) {
// perform constant and interval tests on index value
bool AccessIndexed::compute_needs_range_check() {
Constant* clength = length()->as_Constant();
Constant* cindex = index()->as_Constant();
if (clength && cindex) {
IntConstant* l = clength->type()->as_IntConstant();
IntConstant* i = cindex->type()->as_IntConstant();
if (l && i && i->value() < l->value() && i->value() >= 0) {
return false;
if (length()) {
Constant* clength = length()->as_Constant();
Constant* cindex = index()->as_Constant();
if (clength && cindex) {
IntConstant* l = clength->type()->as_IntConstant();
IntConstant* i = cindex->type()->as_IntConstant();
if (l && i && i->value() < l->value() && i->value() >= 0) {
return false;
}
}
}
if (!this->check_flag(NeedsRangeCheckFlag)) {
return false;
}
return true;
}
@ -631,19 +653,25 @@ void BlockBegin::substitute_sux(BlockBegin* old_sux, BlockBegin* new_sux) {
// of the inserted block, without recomputing the values of the other blocks
// in the CFG. Therefore the value of "depth_first_number" in BlockBegin becomes meaningless.
BlockBegin* BlockBegin::insert_block_between(BlockBegin* sux) {
BlockBegin* new_sux = new BlockBegin(end()->state()->bci());
int bci = sux->bci();
// critical edge splitting may introduce a goto after a if and array
// bound check elimination may insert a predicate between the if and
// goto. The bci of the goto can't be the one of the if otherwise
// the state and bci are inconsistent and a deoptimization triggered
// by the predicate would lead to incorrect execution/a crash.
BlockBegin* new_sux = new BlockBegin(bci);
// mark this block (special treatment when block order is computed)
new_sux->set(critical_edge_split_flag);
// This goto is not a safepoint.
Goto* e = new Goto(sux, false);
new_sux->set_next(e, end()->state()->bci());
new_sux->set_next(e, bci);
new_sux->set_end(e);
// setup states
ValueStack* s = end()->state();
new_sux->set_state(s->copy());
e->set_state(s->copy());
new_sux->set_state(s->copy(s->kind(), bci));
e->set_state(s->copy(s->kind(), bci));
assert(new_sux->state()->locals_size() == s->locals_size(), "local size mismatch!");
assert(new_sux->state()->stack_size() == s->stack_size(), "stack size mismatch!");
assert(new_sux->state()->locks_size() == s->locks_size(), "locks size mismatch!");
@ -960,15 +988,14 @@ void BlockEnd::set_begin(BlockBegin* begin) {
BlockList* sux = NULL;
if (begin != NULL) {
sux = begin->successors();
} else if (_begin != NULL) {
} else if (this->begin() != NULL) {
// copy our sux list
BlockList* sux = new BlockList(_begin->number_of_sux());
for (int i = 0; i < _begin->number_of_sux(); i++) {
sux->append(_begin->sux_at(i));
BlockList* sux = new BlockList(this->begin()->number_of_sux());
for (int i = 0; i < this->begin()->number_of_sux(); i++) {
sux->append(this->begin()->sux_at(i));
}
}
_sux = sux;
_begin = begin;
}
@ -1008,7 +1035,38 @@ int Phi::operand_count() const {
}
}
#ifdef ASSERT
// Constructor of Assert
Assert::Assert(Value x, Condition cond, bool unordered_is_true, Value y) : Instruction(illegalType)
, _x(x)
, _cond(cond)
, _y(y)
{
set_flag(UnorderedIsTrueFlag, unordered_is_true);
assert(x->type()->tag() == y->type()->tag(), "types must match");
pin();
stringStream strStream;
Compilation::current()->method()->print_name(&strStream);
stringStream strStream1;
InstructionPrinter ip1(1, &strStream1);
ip1.print_instr(x);
stringStream strStream2;
InstructionPrinter ip2(1, &strStream2);
ip2.print_instr(y);
stringStream ss;
ss.print("Assertion %s %s %s in method %s", strStream1.as_string(), ip2.cond_name(cond), strStream2.as_string(), strStream.as_string());
_message = ss.as_string();
}
#endif
void RangeCheckPredicate::check_state() {
assert(state()->kind() != ValueStack::EmptyExceptionState && state()->kind() != ValueStack::ExceptionState, "will deopt with empty state");
}
void ProfileInvoke::state_values_do(ValueVisitor* f) {
if (state() != NULL) state()->values_do(f);

View File

@ -110,6 +110,8 @@ class ProfileCall;
class ProfileInvoke;
class RuntimeCall;
class MemBar;
class RangeCheckPredicate;
class Assert;
// A Value is a reference to the instruction creating the value
typedef Instruction* Value;
@ -210,6 +212,10 @@ class InstructionVisitor: public StackObj {
virtual void do_ProfileInvoke (ProfileInvoke* x) = 0;
virtual void do_RuntimeCall (RuntimeCall* x) = 0;
virtual void do_MemBar (MemBar* x) = 0;
virtual void do_RangeCheckPredicate(RangeCheckPredicate* x) = 0;
#ifdef ASSERT
virtual void do_Assert (Assert* x) = 0;
#endif
};
@ -306,8 +312,9 @@ class Instruction: public CompilationResourceObj {
void update_exception_state(ValueStack* state);
//protected:
public:
protected:
BlockBegin* _block; // Block that contains this instruction
void set_type(ValueType* type) {
assert(type != NULL, "type must exist");
_type = type;
@ -342,6 +349,9 @@ class Instruction: public CompilationResourceObj {
ThrowIncompatibleClassChangeErrorFlag,
ProfileMDOFlag,
IsLinkedInBlockFlag,
NeedsRangeCheckFlag,
InWorkListFlag,
DeoptimizeOnException,
InstructionLastFlag
};
@ -351,7 +361,7 @@ class Instruction: public CompilationResourceObj {
// 'globally' used condition values
enum Condition {
eql, neq, lss, leq, gtr, geq
eql, neq, lss, leq, gtr, geq, aeq, beq
};
// Instructions may be pinned for many reasons and under certain conditions
@ -381,6 +391,7 @@ class Instruction: public CompilationResourceObj {
, _pin_state(0)
, _type(type)
, _next(NULL)
, _block(NULL)
, _subst(NULL)
, _flags(0)
, _operand(LIR_OprFact::illegalOpr)
@ -399,11 +410,13 @@ class Instruction: public CompilationResourceObj {
int printable_bci() const { assert(has_printable_bci(), "_printable_bci should have been set"); return _printable_bci; }
void set_printable_bci(int bci) { _printable_bci = bci; }
#endif
int dominator_depth();
int use_count() const { return _use_count; }
int pin_state() const { return _pin_state; }
bool is_pinned() const { return _pin_state != 0 || PinAllInstructions; }
ValueType* type() const { return _type; }
Instruction* prev(BlockBegin* block); // use carefully, expensive operation
BlockBegin *block() const { return _block; }
Instruction* prev(); // use carefully, expensive operation
Instruction* next() const { return _next; }
bool has_subst() const { return _subst != NULL; }
Instruction* subst() { return _subst == NULL ? this : _subst->subst(); }
@ -432,6 +445,9 @@ class Instruction: public CompilationResourceObj {
assert(as_BlockEnd() == NULL, "BlockEnd instructions must have no next");
assert(next->can_be_linked(), "shouldn't link these instructions into list");
BlockBegin *block = this->block();
next->_block = block;
next->set_flag(Instruction::IsLinkedInBlockFlag, true);
_next = next;
return next;
@ -444,6 +460,29 @@ class Instruction: public CompilationResourceObj {
return set_next(next);
}
// when blocks are merged
void fixup_block_pointers() {
Instruction *cur = next()->next(); // next()'s block is set in set_next
while (cur && cur->_block != block()) {
cur->_block = block();
cur = cur->next();
}
}
Instruction *insert_after(Instruction *i) {
Instruction* n = _next;
set_next(i);
i->set_next(n);
return _next;
}
Instruction *insert_after_same_bci(Instruction *i) {
#ifndef PRODUCT
i->set_printable_bci(printable_bci());
#endif
return insert_after(i);
}
void set_subst(Instruction* subst) {
assert(subst == NULL ||
type()->base() == subst->type()->base() ||
@ -452,6 +491,7 @@ class Instruction: public CompilationResourceObj {
}
void set_exception_handlers(XHandlers *xhandlers) { _exception_handlers = xhandlers; }
void set_exception_state(ValueStack* s) { check_state(s); _exception_state = s; }
void set_state_before(ValueStack* s) { check_state(s); _state_before = s; }
// machine-specifics
void set_operand(LIR_Opr operand) { assert(operand != LIR_OprFact::illegalOpr, "operand must exist"); _operand = operand; }
@ -509,6 +549,11 @@ class Instruction: public CompilationResourceObj {
virtual ExceptionObject* as_ExceptionObject() { return NULL; }
virtual UnsafeOp* as_UnsafeOp() { return NULL; }
virtual ProfileInvoke* as_ProfileInvoke() { return NULL; }
virtual RangeCheckPredicate* as_RangeCheckPredicate() { return NULL; }
#ifdef ASSERT
virtual Assert* as_Assert() { return NULL; }
#endif
virtual void visit(InstructionVisitor* v) = 0;
@ -570,7 +615,6 @@ class AssertValues: public ValueVisitor {
LEAF(Phi, Instruction)
private:
BlockBegin* _block; // the block to which the phi function belongs
int _pf_flags; // the flags of the phi function
int _index; // to value on operand stack (index < 0) or to local
public:
@ -578,9 +622,9 @@ LEAF(Phi, Instruction)
Phi(ValueType* type, BlockBegin* b, int index)
: Instruction(type->base())
, _pf_flags(0)
, _block(b)
, _index(index)
{
_block = b;
NOT_PRODUCT(set_printable_bci(Value(b)->printable_bci()));
if (type->is_illegal()) {
make_illegal();
@ -603,8 +647,6 @@ LEAF(Phi, Instruction)
Value operand_at(int i) const;
int operand_count() const;
BlockBegin* block() const { return _block; }
void set(Flag f) { _pf_flags |= f; }
void clear(Flag f) { _pf_flags &= ~f; }
bool is_set(Flag f) const { return (_pf_flags & f) != 0; }
@ -670,6 +712,7 @@ LEAF(Constant, Instruction)
pin();
}
// generic
virtual bool can_trap() const { return state_before() != NULL; }
virtual void input_values_do(ValueVisitor* f) { /* no values */ }
@ -852,6 +895,7 @@ BASE(AccessIndexed, AccessArray)
, _length(length)
, _elt_type(elt_type)
{
set_flag(Instruction::NeedsRangeCheckFlag, true);
ASSERT_VALUES
}
@ -860,6 +904,7 @@ BASE(AccessIndexed, AccessArray)
Value length() const { return _length; }
BasicType elt_type() const { return _elt_type; }
void clear_length() { _length = NULL; }
// perform elimination of range checks involving constants
bool compute_needs_range_check();
@ -1524,6 +1569,7 @@ LEAF(BlockBegin, StateSplit)
int _bci; // start-bci of block
int _depth_first_number; // number of this block in a depth-first ordering
int _linear_scan_number; // number of this block in linear-scan ordering
int _dominator_depth;
int _loop_depth; // the loop nesting level of this block
int _loop_index; // number of the innermost loop of this block
int _flags; // the flags associated with this block
@ -1535,6 +1581,7 @@ LEAF(BlockBegin, StateSplit)
// SSA specific fields: (factor out later)
BlockList _successors; // the successors of this block
BlockList _predecessors; // the predecessors of this block
BlockList _dominates; // list of blocks that are dominated by this block
BlockBegin* _dominator; // the dominator of this block
// SSA specific ends
BlockEnd* _end; // the last instruction of this block
@ -1583,10 +1630,12 @@ LEAF(BlockBegin, StateSplit)
, _linear_scan_number(-1)
, _loop_depth(0)
, _flags(0)
, _dominator_depth(-1)
, _dominator(NULL)
, _end(NULL)
, _predecessors(2)
, _successors(2)
, _dominates(2)
, _exception_handlers(1)
, _exception_states(NULL)
, _exception_handler_pco(-1)
@ -1603,6 +1652,7 @@ LEAF(BlockBegin, StateSplit)
, _total_preds(0)
, _stores_to_locals()
{
_block = this;
#ifndef PRODUCT
set_printable_bci(bci);
#endif
@ -1612,8 +1662,10 @@ LEAF(BlockBegin, StateSplit)
int block_id() const { return _block_id; }
int bci() const { return _bci; }
BlockList* successors() { return &_successors; }
BlockList* dominates() { return &_dominates; }
BlockBegin* dominator() const { return _dominator; }
int loop_depth() const { return _loop_depth; }
int dominator_depth() const { return _dominator_depth; }
int depth_first_number() const { return _depth_first_number; }
int linear_scan_number() const { return _linear_scan_number; }
BlockEnd* end() const { return _end; }
@ -1634,6 +1686,7 @@ LEAF(BlockBegin, StateSplit)
// manipulation
void set_dominator(BlockBegin* dom) { _dominator = dom; }
void set_loop_depth(int d) { _loop_depth = d; }
void set_dominator_depth(int d) { _dominator_depth = d; }
void set_depth_first_number(int dfn) { _depth_first_number = dfn; }
void set_linear_scan_number(int lsn) { _linear_scan_number = lsn; }
void set_end(BlockEnd* end);
@ -1695,7 +1748,8 @@ LEAF(BlockBegin, StateSplit)
parser_loop_header_flag = 1 << 7, // set by parser to identify blocks where phi functions can not be created on demand
critical_edge_split_flag = 1 << 8, // set for all blocks that are introduced when critical edges are split
linear_scan_loop_header_flag = 1 << 9, // set during loop-detection for LinearScan
linear_scan_loop_end_flag = 1 << 10 // set during loop-detection for LinearScan
linear_scan_loop_end_flag = 1 << 10, // set during loop-detection for LinearScan
donot_eliminate_range_checks = 1 << 11 // Should be try to eliminate range checks in this block
};
void set(Flag f) { _flags |= f; }
@ -1728,7 +1782,6 @@ LEAF(BlockBegin, StateSplit)
BASE(BlockEnd, StateSplit)
private:
BlockBegin* _begin;
BlockList* _sux;
protected:
@ -1746,7 +1799,6 @@ BASE(BlockEnd, StateSplit)
// creation
BlockEnd(ValueType* type, ValueStack* state_before, bool is_safepoint)
: StateSplit(type, state_before)
, _begin(NULL)
, _sux(NULL)
{
set_flag(IsSafepointFlag, is_safepoint);
@ -1754,7 +1806,8 @@ BASE(BlockEnd, StateSplit)
// accessors
bool is_safepoint() const { return check_flag(IsSafepointFlag); }
BlockBegin* begin() const { return _begin; }
// For compatibility with old code, for new code use block()
BlockBegin* begin() const { return _block; }
// manipulation
void set_begin(BlockBegin* begin);
@ -1811,6 +1864,74 @@ LEAF(Goto, BlockEnd)
void set_direction(Direction d) { _direction = d; }
};
#ifdef ASSERT
LEAF(Assert, Instruction)
private:
Value _x;
Condition _cond;
Value _y;
char *_message;
public:
// creation
// unordered_is_true is valid for float/double compares only
Assert(Value x, Condition cond, bool unordered_is_true, Value y);
// accessors
Value x() const { return _x; }
Condition cond() const { return _cond; }
bool unordered_is_true() const { return check_flag(UnorderedIsTrueFlag); }
Value y() const { return _y; }
const char *message() const { return _message; }
// generic
virtual void input_values_do(ValueVisitor* f) { f->visit(&_x); f->visit(&_y); }
};
#endif
LEAF(RangeCheckPredicate, StateSplit)
private:
Value _x;
Condition _cond;
Value _y;
void check_state();
public:
// creation
// unordered_is_true is valid for float/double compares only
RangeCheckPredicate(Value x, Condition cond, bool unordered_is_true, Value y, ValueStack* state) : StateSplit(illegalType)
, _x(x)
, _cond(cond)
, _y(y)
{
ASSERT_VALUES
set_flag(UnorderedIsTrueFlag, unordered_is_true);
assert(x->type()->tag() == y->type()->tag(), "types must match");
this->set_state(state);
check_state();
}
// Always deoptimize
RangeCheckPredicate(ValueStack* state) : StateSplit(illegalType)
{
this->set_state(state);
_x = _y = NULL;
check_state();
}
// accessors
Value x() const { return _x; }
Condition cond() const { return _cond; }
bool unordered_is_true() const { return check_flag(UnorderedIsTrueFlag); }
Value y() const { return _y; }
void always_fail() { _x = _y = NULL; }
// generic
virtual void input_values_do(ValueVisitor* f) { StateSplit::input_values_do(f); f->visit(&_x); f->visit(&_y); }
HASHING3(RangeCheckPredicate, true, x()->subst(), y()->subst(), cond())
};
LEAF(If, BlockEnd)
private:

View File

@ -57,6 +57,8 @@ const char* InstructionPrinter::cond_name(If::Condition cond) {
case If::leq: return "<=";
case If::gtr: return ">";
case If::geq: return ">=";
case If::aeq: return "|>=|";
case If::beq: return "|<=|";
}
ShouldNotReachHere();
return NULL;
@ -181,6 +183,11 @@ void InstructionPrinter::print_indexed(AccessIndexed* indexed) {
output()->put('[');
print_value(indexed->index());
output()->put(']');
if (indexed->length() != NULL) {
output()->put('(');
print_value(indexed->length());
output()->put(')');
}
}
@ -373,6 +380,7 @@ void InstructionPrinter::do_Constant(Constant* x) {
void InstructionPrinter::do_LoadField(LoadField* x) {
print_field(x);
output()->print(" (%c)", type2char(x->field()->type()->basic_type()));
output()->print(" %s", x->field()->name()->as_utf8());
}
@ -381,6 +389,7 @@ void InstructionPrinter::do_StoreField(StoreField* x) {
output()->print(" := ");
print_value(x->value());
output()->print(" (%c)", type2char(x->field()->type()->basic_type()));
output()->print(" %s", x->field()->name()->as_utf8());
}
@ -393,6 +402,9 @@ void InstructionPrinter::do_ArrayLength(ArrayLength* x) {
void InstructionPrinter::do_LoadIndexed(LoadIndexed* x) {
print_indexed(x);
output()->print(" (%c)", type2char(x->elt_type()));
if (x->check_flag(Instruction::NeedsRangeCheckFlag)) {
output()->print(" [rc]");
}
}
@ -401,6 +413,9 @@ void InstructionPrinter::do_StoreIndexed(StoreIndexed* x) {
output()->print(" := ");
print_value(x->value());
output()->print(" (%c)", type2char(x->elt_type()));
if (x->check_flag(Instruction::NeedsRangeCheckFlag)) {
output()->print(" [rc]");
}
}
void InstructionPrinter::do_NegateOp(NegateOp* x) {
@ -843,6 +858,25 @@ void InstructionPrinter::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
output()->put(')');
}
void InstructionPrinter::do_RangeCheckPredicate(RangeCheckPredicate* x) {
if (x->x() != NULL && x->y() != NULL) {
output()->print("if ");
print_value(x->x());
output()->print(" %s ", cond_name(x->cond()));
print_value(x->y());
output()->print(" then deoptimize!");
} else {
output()->print("always deoptimize!");
}
}
void InstructionPrinter::do_Assert(Assert* x) {
output()->print("assert ");
print_value(x->x());
output()->print(" %s ", cond_name(x->cond()));
print_value(x->y());
}
void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
print_unsafe_object_op(x, "UnsafePrefetchWrite");

View File

@ -135,6 +135,8 @@ class InstructionPrinter: public InstructionVisitor {
virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x);
virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
virtual void do_Assert (Assert* x);
};
#endif // PRODUCT

View File

@ -633,6 +633,7 @@ void LIR_OpVisitState::visit(LIR_Op* op) {
case lir_ushr:
case lir_xadd:
case lir_xchg:
case lir_assert:
{
assert(op->as_Op2() != NULL, "must be");
LIR_Op2* op2 = (LIR_Op2*)op;
@ -1112,6 +1113,11 @@ void LIR_OpLock::emit_code(LIR_Assembler* masm) {
}
}
#ifdef ASSERT
void LIR_OpAssert::emit_code(LIR_Assembler* masm) {
masm->emit_assert(this);
}
#endif
void LIR_OpDelay::emit_code(LIR_Assembler* masm) {
masm->emit_delay(this);
@ -1771,6 +1777,8 @@ const char * LIR_Op::name() const {
case lir_cas_int: s = "cas_int"; break;
// LIR_OpProfileCall
case lir_profile_call: s = "profile_call"; break;
// LIR_OpAssert
case lir_assert: s = "assert"; break;
case lir_none: ShouldNotReachHere();break;
default: s = "illegal_op"; break;
}
@ -2017,6 +2025,13 @@ void LIR_OpLock::print_instr(outputStream* out) const {
out->print("[lbl:0x%x]", stub()->entry());
}
void LIR_OpAssert::print_instr(outputStream* out) const {
print_condition(out, condition()); out->print(" ");
in_opr1()->print(out); out->print(" ");
in_opr2()->print(out); out->print(", \"");
out->print(msg()); out->print("\"");
}
void LIR_OpDelay::print_instr(outputStream* out) const {
_op->print_on(out);

View File

@ -881,6 +881,7 @@ class LIR_OpLock;
class LIR_OpTypeCheck;
class LIR_OpCompareAndSwap;
class LIR_OpProfileCall;
class LIR_OpAssert;
// LIR operation codes
@ -1000,6 +1001,9 @@ enum LIR_Code {
, begin_opMDOProfile
, lir_profile_call
, end_opMDOProfile
, begin_opAssert
, lir_assert
, end_opAssert
};
@ -1135,6 +1139,7 @@ class LIR_Op: public CompilationResourceObj {
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
virtual LIR_OpAssert* as_OpAssert() { return NULL; }
virtual void verify() const {}
};
@ -1623,7 +1628,7 @@ class LIR_Op2: public LIR_Op {
, _tmp3(LIR_OprFact::illegalOpr)
, _tmp4(LIR_OprFact::illegalOpr)
, _tmp5(LIR_OprFact::illegalOpr) {
assert(code == lir_cmp, "code check");
assert(code == lir_cmp || code == lir_assert, "code check");
}
LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
@ -1683,7 +1688,7 @@ class LIR_Op2: public LIR_Op {
LIR_Opr tmp4_opr() const { return _tmp4; }
LIR_Opr tmp5_opr() const { return _tmp5; }
LIR_Condition condition() const {
assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition;
}
void set_condition(LIR_Condition condition) {
assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition;
@ -1823,6 +1828,30 @@ class LIR_OpDelay: public LIR_Op {
CodeEmitInfo* call_info() const { return info(); }
};
#ifdef ASSERT
// LIR_OpAssert
class LIR_OpAssert : public LIR_Op2 {
friend class LIR_OpVisitState;
private:
const char* _msg;
bool _halt;
public:
LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
: LIR_Op2(lir_assert, condition, opr1, opr2)
, _halt(halt)
, _msg(msg) {
}
const char* msg() const { return _msg; }
bool halt() const { return _halt; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpAssert* as_OpAssert() { return this; }
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
};
#endif
// LIR_OpCompareAndSwap
class LIR_OpCompareAndSwap : public LIR_Op {
@ -2196,6 +2225,9 @@ class LIR_List: public CompilationResourceObj {
void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
#ifdef ASSERT
void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); }
#endif
};
void print_LIR(BlockList* blocks);

View File

@ -210,6 +210,9 @@ class LIR_Assembler: public CompilationResourceObj {
void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack);
void arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info);
void intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op);
#ifdef ASSERT
void emit_assert(LIR_OpAssert* op);
#endif
void logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest);

View File

@ -403,6 +403,10 @@ void LIRGenerator::walk(Value instr) {
CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
assert(state != NULL, "state must be defined");
#ifndef PRODUCT
state->verify();
#endif
ValueStack* s = state;
for_each_state(s) {
if (s->kind() == ValueStack::EmptyExceptionState) {
@ -453,7 +457,7 @@ CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ig
}
}
return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
}
@ -1792,11 +1796,18 @@ void LIRGenerator::do_LoadField(LoadField* x) {
}
#endif
bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
if (x->needs_null_check() &&
(needs_patching ||
MacroAssembler::needs_explicit_null_check(x->offset()))) {
MacroAssembler::needs_explicit_null_check(x->offset()) ||
stress_deopt)) {
LIR_Opr obj = object.result();
if (stress_deopt) {
obj = new_register(T_OBJECT);
__ move(LIR_OprFact::oopConst(NULL), obj);
}
// emit an explicit null check because the offset is too large
__ null_check(object.result(), new CodeEmitInfo(info));
__ null_check(obj, new CodeEmitInfo(info));
}
LIR_Opr reg = rlock_result(x, field_type);
@ -1873,6 +1884,11 @@ void LIRGenerator::do_ArrayLength(ArrayLength* x) {
} else {
info = state_for(nc);
}
if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
LIR_Opr obj = new_register(T_OBJECT);
__ move(LIR_OprFact::oopConst(NULL), obj);
__ null_check(obj, new CodeEmitInfo(info));
}
}
__ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
}
@ -1883,14 +1899,11 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
LIRItem array(x->array(), this);
LIRItem index(x->index(), this);
LIRItem length(this);
bool needs_range_check = true;
bool needs_range_check = x->compute_needs_range_check();
if (use_length) {
needs_range_check = x->compute_needs_range_check();
if (needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
if (use_length && needs_range_check) {
length.set_instruction(x->length());
length.load_item();
}
array.load_item();
@ -1910,13 +1923,20 @@ void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
} else {
null_check_info = range_check_info;
}
if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
LIR_Opr obj = new_register(T_OBJECT);
__ move(LIR_OprFact::oopConst(NULL), obj);
__ null_check(obj, new CodeEmitInfo(null_check_info));
}
}
// emit array address setup early so it schedules better
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
if (GenerateRangeChecks && needs_range_check) {
if (use_length) {
if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
__ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
} else if (use_length) {
// TODO: use a (modified) version of array_range_check that does not require a
// constant length to be loaded to a register
__ cmp(lir_cond_belowEqual, length.result(), index.result());
@ -2634,7 +2654,7 @@ void LIRGenerator::do_Base(Base* x) {
LIR_Opr lock = new_register(T_INT);
__ load_stack_address_monitor(0, lock);
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
// receiver is guaranteed non-NULL so don't need CodeEmitInfo
@ -2644,7 +2664,7 @@ void LIRGenerator::do_Base(Base* x) {
// increment invocation counters if needed
if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
increment_invocation_counter(info);
}
@ -3102,6 +3122,95 @@ void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
}
}
void LIRGenerator::do_Assert(Assert *x) {
#ifdef ASSERT
ValueTag tag = x->x()->type()->tag();
If::Condition cond = x->cond();
LIRItem xitem(x->x(), this);
LIRItem yitem(x->y(), this);
LIRItem* xin = &xitem;
LIRItem* yin = &yitem;
assert(tag == intTag, "Only integer assertions are valid!");
xin->load_item();
yin->dont_load_item();
set_no_result(x);
LIR_Opr left = xin->result();
LIR_Opr right = yin->result();
__ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
#endif
}
void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
Instruction *a = x->x();
Instruction *b = x->y();
if (!a || StressRangeCheckElimination) {
assert(!b || StressRangeCheckElimination, "B must also be null");
CodeEmitInfo *info = state_for(x, x->state());
CodeStub* stub = new PredicateFailedStub(info);
__ jump(stub);
} else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
int a_int = a->type()->as_IntConstant()->value();
int b_int = b->type()->as_IntConstant()->value();
bool ok = false;
switch(x->cond()) {
case Instruction::eql: ok = (a_int == b_int); break;
case Instruction::neq: ok = (a_int != b_int); break;
case Instruction::lss: ok = (a_int < b_int); break;
case Instruction::leq: ok = (a_int <= b_int); break;
case Instruction::gtr: ok = (a_int > b_int); break;
case Instruction::geq: ok = (a_int >= b_int); break;
case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
default: ShouldNotReachHere();
}
if (ok) {
CodeEmitInfo *info = state_for(x, x->state());
CodeStub* stub = new PredicateFailedStub(info);
__ jump(stub);
}
} else {
ValueTag tag = x->x()->type()->tag();
If::Condition cond = x->cond();
LIRItem xitem(x->x(), this);
LIRItem yitem(x->y(), this);
LIRItem* xin = &xitem;
LIRItem* yin = &yitem;
assert(tag == intTag, "Only integer deoptimizations are valid!");
xin->load_item();
yin->dont_load_item();
set_no_result(x);
LIR_Opr left = xin->result();
LIR_Opr right = yin->result();
CodeEmitInfo *info = state_for(x, x->state());
CodeStub* stub = new PredicateFailedStub(info);
__ cmp(lir_cond(cond), left, right);
__ branch(lir_cond(cond), right->type(), stub);
}
}
LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
LIRItemList args(1);
LIRItem value(arg1, this);

View File

@ -412,6 +412,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
case If::leq: l = lir_cond_lessEqual; break;
case If::geq: l = lir_cond_greaterEqual; break;
case If::gtr: l = lir_cond_greater; break;
case If::aeq: l = lir_cond_aboveEqual; break;
case If::beq: l = lir_cond_belowEqual; break;
};
return l;
}
@ -534,6 +536,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
virtual void do_ProfileInvoke (ProfileInvoke* x);
virtual void do_RuntimeCall (RuntimeCall* x);
virtual void do_MemBar (MemBar* x);
virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
virtual void do_Assert (Assert* x);
};

View File

@ -6231,26 +6231,29 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
LIR_Op2* prev_cmp = NULL;
if (prev_branch->stub() == NULL) {
for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
prev_op = instructions->at(j);
if(prev_op->code() == lir_cmp) {
assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
prev_cmp = (LIR_Op2*)prev_op;
assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
LIR_Op2* prev_cmp = NULL;
for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
prev_op = instructions->at(j);
if (prev_op->code() == lir_cmp) {
assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
prev_cmp = (LIR_Op2*)prev_op;
assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
}
}
}
assert(prev_cmp != NULL, "should have found comp instruction for branch");
if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
assert(prev_cmp != NULL, "should have found comp instruction for branch");
if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
// eliminate a conditional branch to the immediate successor
prev_branch->change_block(last_branch->block());
prev_branch->negate_cond();
prev_cmp->set_condition(prev_branch->cond());
instructions->truncate(instructions->length() - 1);
// eliminate a conditional branch to the immediate successor
prev_branch->change_block(last_branch->block());
prev_branch->negate_cond();
prev_cmp->set_condition(prev_branch->cond());
instructions->truncate(instructions->length() - 1);
}
}
}
}

View File

@ -178,7 +178,7 @@ void CE_Eliminator::block_do(BlockBegin* block) {
// 2) substitute conditional expression
// with an IfOp followed by a Goto
// cut if_ away and get node before
Instruction* cur_end = if_->prev(block);
Instruction* cur_end = if_->prev();
// append constants of true- and false-block if necessary
// clone constants because original block must not be destroyed
@ -202,7 +202,7 @@ void CE_Eliminator::block_do(BlockBegin* block) {
}
// append Goto to successor
ValueStack* state_before = if_->is_safepoint() ? if_->state_before() : NULL;
ValueStack* state_before = if_->state_before();
Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint());
// prepare state for Goto
@ -367,10 +367,11 @@ class BlockMerger: public BlockClosure {
#endif
// find instruction before end & append first instruction of sux block
Instruction* prev = end->prev(block);
Instruction* prev = end->prev();
Instruction* next = sux->next();
assert(prev->as_BlockEnd() == NULL, "must not be a BlockEnd");
prev->set_next(next);
prev->fixup_block_pointers();
sux->disconnect_from_graph();
block->set_end(sux->end());
// add exception handlers of deleted block, if any
@ -533,6 +534,8 @@ public:
void do_ProfileInvoke (ProfileInvoke* x);
void do_RuntimeCall (RuntimeCall* x);
void do_MemBar (MemBar* x);
void do_RangeCheckPredicate(RangeCheckPredicate* x);
void do_Assert (Assert* x);
};
@ -714,6 +717,8 @@ void NullCheckVisitor::do_ProfileCall (ProfileCall* x) { nce()->clear_las
void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {}
void NullCheckVisitor::do_RuntimeCall (RuntimeCall* x) {}
void NullCheckVisitor::do_MemBar (MemBar* x) {}
void NullCheckVisitor::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
void NullCheckVisitor::do_Assert (Assert* x) {}
void NullCheckEliminator::visit(Value* p) {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,241 @@
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
#define SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP
#include "c1/c1_Instruction.hpp"
// Base class for range check elimination
class RangeCheckElimination : AllStatic {
public:
static void eliminate(IR *ir);
};
// Implementation
class RangeCheckEliminator VALUE_OBJ_CLASS_SPEC {
private:
int _number_of_instructions;
bool _optimistic; // Insert predicates and deoptimize when they fail
IR *_ir;
define_array(BlockBeginArray, BlockBegin*)
define_stack(BlockBeginList, BlockBeginArray)
define_stack(IntegerStack, intArray)
define_array(IntegerMap, IntegerStack*)
class Verification : public _ValueObj /*VALUE_OBJ_CLASS_SPEC*/, public BlockClosure {
private:
IR *_ir;
boolArray _used;
BlockBeginList _current;
BlockBeginList _successors;
public:
Verification(IR *ir);
virtual void block_do(BlockBegin *block);
bool can_reach(BlockBegin *start, BlockBegin *end, BlockBegin *dont_use = NULL);
bool dominates(BlockBegin *dominator, BlockBegin *block);
};
public:
// Bounds for an instruction in the form x + c which c integer
// constant and x another instruction
class Bound : public CompilationResourceObj {
private:
int _upper;
Value _upper_instr;
int _lower;
Value _lower_instr;
public:
Bound();
Bound(Value v);
Bound(Instruction::Condition cond, Value v, int constant = 0);
Bound(int lower, Value lower_instr, int upper, Value upper_instr);
~Bound();
#ifdef ASSERT
void add_assertion(Instruction *instruction, Instruction *position, int i, Value instr, Instruction::Condition cond);
#endif
int upper();
Value upper_instr();
int lower();
Value lower_instr();
void print();
bool check_no_overflow(int const_value);
void or_op(Bound *b);
void and_op(Bound *b);
bool has_upper();
bool has_lower();
void set_upper(int upper, Value upper_instr);
void set_lower(int lower, Value lower_instr);
bool is_smaller(Bound *b);
void remove_upper();
void remove_lower();
void add_constant(int value);
Bound *copy();
private:
void init();
};
class Visitor : public InstructionVisitor {
private:
Bound *_bound;
RangeCheckEliminator *_rce;
public:
void set_range_check_eliminator(RangeCheckEliminator *rce) { _rce = rce; }
Bound *bound() const { return _bound; }
void clear_bound() { _bound = NULL; }
protected:
// visitor functions
void do_Constant (Constant* x);
void do_IfOp (IfOp* x);
void do_LogicOp (LogicOp* x);
void do_ArithmeticOp (ArithmeticOp* x);
void do_Phi (Phi* x);
void do_StoreField (StoreField* x) { /* nothing to do */ };
void do_StoreIndexed (StoreIndexed* x) { /* nothing to do */ };
void do_MonitorEnter (MonitorEnter* x) { /* nothing to do */ };
void do_MonitorExit (MonitorExit* x) { /* nothing to do */ };
void do_Invoke (Invoke* x) { /* nothing to do */ };
void do_UnsafePutRaw (UnsafePutRaw* x) { /* nothing to do */ };
void do_UnsafePutObject(UnsafePutObject* x) { /* nothing to do */ };
void do_Intrinsic (Intrinsic* x) { /* nothing to do */ };
void do_Local (Local* x) { /* nothing to do */ };
void do_LoadField (LoadField* x) { /* nothing to do */ };
void do_ArrayLength (ArrayLength* x) { /* nothing to do */ };
void do_LoadIndexed (LoadIndexed* x) { /* nothing to do */ };
void do_NegateOp (NegateOp* x) { /* nothing to do */ };
void do_ShiftOp (ShiftOp* x) { /* nothing to do */ };
void do_CompareOp (CompareOp* x) { /* nothing to do */ };
void do_Convert (Convert* x) { /* nothing to do */ };
void do_NullCheck (NullCheck* x) { /* nothing to do */ };
void do_TypeCast (TypeCast* x) { /* nothing to do */ };
void do_NewInstance (NewInstance* x) { /* nothing to do */ };
void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ };
void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ };
void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ };
void do_CheckCast (CheckCast* x) { /* nothing to do */ };
void do_InstanceOf (InstanceOf* x) { /* nothing to do */ };
void do_BlockBegin (BlockBegin* x) { /* nothing to do */ };
void do_Goto (Goto* x) { /* nothing to do */ };
void do_If (If* x) { /* nothing to do */ };
void do_IfInstanceOf (IfInstanceOf* x) { /* nothing to do */ };
void do_TableSwitch (TableSwitch* x) { /* nothing to do */ };
void do_LookupSwitch (LookupSwitch* x) { /* nothing to do */ };
void do_Return (Return* x) { /* nothing to do */ };
void do_Throw (Throw* x) { /* nothing to do */ };
void do_Base (Base* x) { /* nothing to do */ };
void do_OsrEntry (OsrEntry* x) { /* nothing to do */ };
void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ };
void do_RoundFP (RoundFP* x) { /* nothing to do */ };
void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ };
void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ };
void do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { /* nothing to do */ };
void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ };
void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
void do_ProfileCall (ProfileCall* x) { /* nothing to do */ };
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ };
void do_MemBar (MemBar* x) { /* nothing to do */ };
void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
void do_Assert (Assert* x) { /* nothing to do */ };
};
#ifdef ASSERT
void add_assertions(Bound *bound, Instruction *instruction, Instruction *position);
#endif
define_array(BoundArray, Bound *)
define_stack(BoundStack, BoundArray)
define_array(BoundMap, BoundStack *)
define_array(AccessIndexedArray, AccessIndexed *)
define_stack(AccessIndexedList, AccessIndexedArray)
define_array(InstructionArray, Instruction *)
define_stack(InstructionList, InstructionArray)
class AccessIndexedInfo : public CompilationResourceObj {
public:
AccessIndexedList *_list;
int _min;
int _max;
};
define_array(AccessIndexedInfoArray, AccessIndexedInfo *)
BoundMap _bounds; // Mapping from Instruction's id to current bound
AccessIndexedInfoArray _access_indexed_info; // Mapping from Instruction's id to AccessIndexedInfo for in block motion
Visitor _visitor;
public:
RangeCheckEliminator(IR *ir);
IR *ir() const { return _ir; }
// Pass over the dominator tree to identify blocks where there's an oppportunity for optimization
bool set_process_block_flags(BlockBegin *block);
// The core of the optimization work: pass over the dominator tree
// to propagate bound information, insert predicate out of loops,
// eliminate bound checks when possible and perform in block motion
void calc_bounds(BlockBegin *block, BlockBegin *loop_header);
// reorder bound checks within a block in order to eliminate some of them
void in_block_motion(BlockBegin *block, AccessIndexedList &accessIndexed, InstructionList &arrays);
// update/access current bound
void update_bound(IntegerStack &pushed, Value v, Instruction::Condition cond, Value value, int constant);
void update_bound(IntegerStack &pushed, Value v, Bound *bound);
Bound *get_bound(Value v);
bool loop_invariant(BlockBegin *loop_header, Instruction *instruction); // check for loop invariance
void add_access_indexed_info(InstructionList &indices, int i, Value instruction, AccessIndexed *ai); // record indexed access for in block motion
void remove_range_check(AccessIndexed *ai); // Mark this instructions as not needing a range check
void add_if_condition(IntegerStack &pushed, Value x, Value y, Instruction::Condition condition); // Update bound for an If
bool in_array_bound(Bound *bound, Value array); // Check whether bound is known to fall within array
// helper functions to work with predicates
Instruction* insert_after(Instruction* insert_position, Instruction* instr, int bci);
Instruction* predicate(Instruction* left, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
Instruction* predicate_cmp_with_const(Instruction* instr, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=1);
Instruction* predicate_add(Instruction* left, int left_const, Instruction::Condition cond, Instruction* right, ValueStack* state, Instruction *insert_position, int bci=-1);
Instruction* predicate_add_cmp_with_const(Instruction* left, int left_const, Instruction::Condition cond, int constant, ValueStack* state, Instruction *insert_position, int bci=-1);
void insert_deoptimization(ValueStack *state, Instruction *insert_position, Instruction *array_instr, // Add predicate
Instruction *length_instruction, Instruction *lower_instr, int lower,
Instruction *upper_instr, int upper, AccessIndexed *ai);
bool is_ok_for_deoptimization(Instruction *insert_position, Instruction *array_instr, // Can we safely add a predicate?
Instruction *length_instr, Instruction *lower_instr,
int lower, Instruction *upper_instr, int upper);
void process_if(IntegerStack &pushed, BlockBegin *block, If *cond); // process If Instruction
void process_access_indexed(BlockBegin *loop_header, BlockBegin *block, AccessIndexed *ai); // process indexed access
void dump_condition_stack(BlockBegin *cur_block);
static void print_statistics();
};
#endif // SHARE_VM_C1_C1_RANGECHECKELIMINATION_HPP

View File

@ -1330,6 +1330,50 @@ JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
JRT_END
JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
ResourceMark rm;
assert(!TieredCompilation, "incompatible with tiered compilation");
RegisterMap reg_map(thread, false);
frame runtime_frame = thread->last_frame();
frame caller_frame = runtime_frame.sender(&reg_map);
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
assert (nm != NULL, "no more nmethod?");
nm->make_not_entrant();
methodHandle m(nm->method());
MethodData* mdo = m->method_data();
if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
// Build an MDO. Ignore errors like OutOfMemory;
// that simply means we won't have an MDO to update.
Method::build_interpreter_method_data(m, THREAD);
if (HAS_PENDING_EXCEPTION) {
assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
CLEAR_PENDING_EXCEPTION;
}
mdo = m->method_data();
}
if (mdo != NULL) {
mdo->inc_trap_count(Deoptimization::Reason_none);
}
if (TracePredicateFailedTraps) {
stringStream ss1, ss2;
vframeStream vfst(thread);
methodHandle inlinee = methodHandle(vfst.method());
inlinee->print_short_name(&ss1);
m->print_short_name(&ss2);
tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc %x", ss1.as_string(), vfst.bci(), ss2.as_string(), caller_frame.pc());
}
Deoptimization::deoptimize_frame(thread, caller_frame.id());
JRT_END
#ifndef PRODUCT
void Runtime1::print_statistics() {

View File

@ -71,6 +71,7 @@ class StubAssembler;
stub(g1_post_barrier_slow) \
stub(fpu2long_stub) \
stub(counter_overflow) \
stub(predicate_failed_trap) \
last_entry(number_of_ids)
#define DECLARE_STUB_ID(x) x ## _id ,
@ -190,6 +191,8 @@ class Runtime1: public AllStatic {
static void oop_arraycopy(HeapWord* src, HeapWord* dst, int length);
static int is_instance_of(oopDesc* mirror, oopDesc* obj);
static void predicate_failed_trap(JavaThread* thread);
static void print_statistics() PRODUCT_RETURN;
};

View File

@ -26,9 +26,9 @@
#include "c1/c1_Canonicalizer.hpp"
#include "c1/c1_IR.hpp"
#include "c1/c1_ValueMap.hpp"
#include "c1/c1_ValueStack.hpp"
#include "utilities/bitMap.inline.hpp"
#ifndef PRODUCT
int ValueMap::_number_of_finds = 0;
@ -192,10 +192,6 @@ Value ValueMap::find_insert(Value x) {
&& lf->field()->holder() == field->holder() \
&& (all_offsets || lf->field()->offset() == field->offset());
#define MUST_KILL_EXCEPTION(must_kill, entry, value) \
assert(entry->nesting() < nesting(), "must not find bigger nesting than current"); \
bool must_kill = (entry->nesting() == nesting() - 1);
void ValueMap::kill_memory() {
GENERIC_KILL_VALUE(MUST_KILL_MEMORY);
@ -209,11 +205,6 @@ void ValueMap::kill_field(ciField* field, bool all_offsets) {
GENERIC_KILL_VALUE(MUST_KILL_FIELD);
}
void ValueMap::kill_exception() {
GENERIC_KILL_VALUE(MUST_KILL_EXCEPTION);
}
void ValueMap::kill_map(ValueMap* map) {
assert(is_global_value_numbering(), "only for global value numbering");
_killed_values.set_union(&map->_killed_values);
@ -274,6 +265,8 @@ class ShortLoopOptimizer : public ValueNumberingVisitor {
GlobalValueNumbering* _gvn;
BlockList _loop_blocks;
bool _too_complicated_loop;
bool _has_field_store[T_ARRAY + 1];
bool _has_indexed_store[T_ARRAY + 1];
// simplified access to methods of GlobalValueNumbering
ValueMap* current_map() { return _gvn->current_map(); }
@ -281,8 +274,16 @@ class ShortLoopOptimizer : public ValueNumberingVisitor {
// implementation for abstract methods of ValueNumberingVisitor
void kill_memory() { _too_complicated_loop = true; }
void kill_field(ciField* field, bool all_offsets) { current_map()->kill_field(field, all_offsets); };
void kill_array(ValueType* type) { current_map()->kill_array(type); };
void kill_field(ciField* field, bool all_offsets) {
current_map()->kill_field(field, all_offsets);
assert(field->type()->basic_type() >= 0 && field->type()->basic_type() <= T_ARRAY, "Invalid type");
_has_field_store[field->type()->basic_type()] = true;
}
void kill_array(ValueType* type) {
current_map()->kill_array(type);
BasicType basic_type = as_BasicType(type); assert(basic_type >= 0 && basic_type <= T_ARRAY, "Invalid type");
_has_indexed_store[basic_type] = true;
}
public:
ShortLoopOptimizer(GlobalValueNumbering* gvn)
@ -290,11 +291,141 @@ class ShortLoopOptimizer : public ValueNumberingVisitor {
, _loop_blocks(ValueMapMaxLoopSize)
, _too_complicated_loop(false)
{
for (int i=0; i<= T_ARRAY; i++){
_has_field_store[i] = false;
_has_indexed_store[i] = false;
}
}
bool has_field_store(BasicType type) {
assert(type >= 0 && type <= T_ARRAY, "Invalid type");
return _has_field_store[type];
}
bool has_indexed_store(BasicType type) {
assert(type >= 0 && type <= T_ARRAY, "Invalid type");
return _has_indexed_store[type];
}
bool process(BlockBegin* loop_header);
};
class LoopInvariantCodeMotion : public StackObj {
private:
GlobalValueNumbering* _gvn;
ShortLoopOptimizer* _short_loop_optimizer;
Instruction* _insertion_point;
ValueStack * _state;
void set_invariant(Value v) const { _gvn->set_processed(v); }
bool is_invariant(Value v) const { return _gvn->is_processed(v); }
void process_block(BlockBegin* block);
public:
LoopInvariantCodeMotion(ShortLoopOptimizer *slo, GlobalValueNumbering* gvn, BlockBegin* loop_header, BlockList* loop_blocks);
};
LoopInvariantCodeMotion::LoopInvariantCodeMotion(ShortLoopOptimizer *slo, GlobalValueNumbering* gvn, BlockBegin* loop_header, BlockList* loop_blocks)
: _gvn(gvn), _short_loop_optimizer(slo) {
TRACE_VALUE_NUMBERING(tty->print_cr("using loop invariant code motion loop_header = %d", loop_header->block_id()));
TRACE_VALUE_NUMBERING(tty->print_cr("** loop invariant code motion for short loop B%d", loop_header->block_id()));
BlockBegin* insertion_block = loop_header->dominator();
if (insertion_block->number_of_preds() == 0) {
return; // only the entry block does not have a predecessor
}
assert(insertion_block->end()->as_Base() == NULL, "cannot insert into entry block");
_insertion_point = insertion_block->end()->prev();
BlockEnd *block_end = insertion_block->end();
_state = block_end->state_before();
if (!_state) {
// If, TableSwitch and LookupSwitch always have state_before when
// loop invariant code motion happens..
assert(block_end->as_Goto(), "Block has to be goto");
_state = block_end->state();
}
// the loop_blocks are filled by going backward from the loop header, so this processing order is best
assert(loop_blocks->at(0) == loop_header, "loop header must be first loop block");
process_block(loop_header);
for (int i = loop_blocks->length() - 1; i >= 1; i--) {
process_block(loop_blocks->at(i));
}
}
void LoopInvariantCodeMotion::process_block(BlockBegin* block) {
TRACE_VALUE_NUMBERING(tty->print_cr("processing block B%d", block->block_id()));
Instruction* prev = block;
Instruction* cur = block->next();
while (cur != NULL) {
// determine if cur instruction is loop invariant
// only selected instruction types are processed here
bool cur_invariant = false;
if (cur->as_Constant() != NULL) {
cur_invariant = !cur->can_trap();
} else if (cur->as_ArithmeticOp() != NULL || cur->as_LogicOp() != NULL || cur->as_ShiftOp() != NULL) {
assert(cur->as_Op2() != NULL, "must be Op2");
Op2* op2 = (Op2*)cur;
cur_invariant = !op2->can_trap() && is_invariant(op2->x()) && is_invariant(op2->y());
} else if (cur->as_LoadField() != NULL) {
LoadField* lf = (LoadField*)cur;
// deoptimizes on NullPointerException
cur_invariant = !lf->needs_patching() && !lf->field()->is_volatile() && !_short_loop_optimizer->has_field_store(lf->field()->type()->basic_type()) && is_invariant(lf->obj());
} else if (cur->as_ArrayLength() != NULL) {
ArrayLength *length = cur->as_ArrayLength();
cur_invariant = is_invariant(length->array());
} else if (cur->as_LoadIndexed() != NULL) {
LoadIndexed *li = (LoadIndexed *)cur->as_LoadIndexed();
cur_invariant = !_short_loop_optimizer->has_indexed_store(as_BasicType(cur->type())) && is_invariant(li->array()) && is_invariant(li->index());
}
if (cur_invariant) {
// perform value numbering and mark instruction as loop-invariant
_gvn->substitute(cur);
if (cur->as_Constant() == NULL) {
// ensure that code for non-constant instructions is always generated
cur->pin();
}
// remove cur instruction from loop block and append it to block before loop
Instruction* next = cur->next();
Instruction* in = _insertion_point->next();
_insertion_point = _insertion_point->set_next(cur);
cur->set_next(in);
// Deoptimize on exception
cur->set_flag(Instruction::DeoptimizeOnException, true);
// Clear exception handlers
cur->set_exception_handlers(NULL);
TRACE_VALUE_NUMBERING(tty->print_cr("Instruction %c%d is loop invariant", cur->type()->tchar(), cur->id()));
if (cur->state_before() != NULL) {
cur->set_state_before(_state->copy());
}
if (cur->exception_state() != NULL) {
cur->set_exception_state(_state->copy());
}
cur = prev->set_next(next);
} else {
prev = cur;
cur = cur->next();
}
}
}
bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
TRACE_VALUE_NUMBERING(tty->print_cr("** loop header block"));
@ -316,6 +447,10 @@ bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
for (int j = block->number_of_preds() - 1; j >= 0; j--) {
BlockBegin* pred = block->pred_at(j);
if (pred->is_set(BlockBegin::osr_entry_flag)) {
return false;
}
ValueMap* pred_map = value_map_of(pred);
if (pred_map != NULL) {
current_map()->kill_map(pred_map);
@ -336,6 +471,12 @@ bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
}
}
bool optimistic = this->_gvn->compilation()->is_optimistic();
if (UseLoopInvariantCodeMotion && optimistic) {
LoopInvariantCodeMotion code_motion(this, _gvn, loop_header, &_loop_blocks);
}
TRACE_VALUE_NUMBERING(tty->print_cr("** loop successfully optimized"));
return true;
}
@ -344,11 +485,11 @@ bool ShortLoopOptimizer::process(BlockBegin* loop_header) {
GlobalValueNumbering::GlobalValueNumbering(IR* ir)
: _current_map(NULL)
, _value_maps(ir->linear_scan_order()->length(), NULL)
, _compilation(ir->compilation())
{
TRACE_VALUE_NUMBERING(tty->print_cr("****** start of global value numbering"));
ShortLoopOptimizer short_loop_optimizer(this);
int subst_count = 0;
BlockList* blocks = ir->linear_scan_order();
int num_blocks = blocks->length();
@ -357,6 +498,12 @@ GlobalValueNumbering::GlobalValueNumbering(IR* ir)
assert(start_block == ir->start() && start_block->number_of_preds() == 0 && start_block->dominator() == NULL, "must be start block");
assert(start_block->next()->as_Base() != NULL && start_block->next()->next() == NULL, "start block must not have instructions");
// method parameters are not linked in instructions list, so process them separateley
for_each_state_value(start_block->state(), value,
assert(value->as_Local() != NULL, "only method parameters allowed");
set_processed(value);
);
// initial, empty value map with nesting 0
set_value_map_of(start_block, new ValueMap());
@ -374,7 +521,7 @@ GlobalValueNumbering::GlobalValueNumbering(IR* ir)
// create new value map with increased nesting
_current_map = new ValueMap(value_map_of(dominator));
if (num_preds == 1) {
if (num_preds == 1 && !block->is_set(BlockBegin::exception_entry_flag)) {
assert(dominator == block->pred_at(0), "dominator must be equal to predecessor");
// nothing to do here
@ -403,36 +550,41 @@ GlobalValueNumbering::GlobalValueNumbering(IR* ir)
}
}
if (block->is_set(BlockBegin::exception_entry_flag)) {
current_map()->kill_exception();
}
// phi functions are not linked in instructions list, so process them separateley
for_each_phi_fun(block, phi,
set_processed(phi);
);
TRACE_VALUE_NUMBERING(tty->print("value map before processing block: "); current_map()->print());
// visit all instructions of this block
for (Value instr = block->next(); instr != NULL; instr = instr->next()) {
assert(!instr->has_subst(), "substitution already set");
// check if instruction kills any values
instr->visit(this);
if (instr->hash() != 0) {
Value f = current_map()->find_insert(instr);
if (f != instr) {
assert(!f->has_subst(), "can't have a substitution");
instr->set_subst(f);
subst_count++;
}
}
// perform actual value numbering
substitute(instr);
}
// remember value map for successors
set_value_map_of(block, current_map());
}
if (subst_count != 0) {
if (_has_substitutions) {
SubstitutionResolver resolver(ir);
}
TRACE_VALUE_NUMBERING(tty->print("****** end of global value numbering. "); ValueMap::print_statistics());
}
void GlobalValueNumbering::substitute(Instruction* instr) {
assert(!instr->has_subst(), "substitution already set");
Value subst = current_map()->find_insert(instr);
if (subst != instr) {
assert(!subst->has_subst(), "can't have a substitution");
TRACE_VALUE_NUMBERING(tty->print_cr("substitution for %d set to %d", instr->id(), subst->id()));
instr->set_subst(subst);
_has_substitutions = true;
}
set_processed(instr);
}

View File

@ -206,6 +206,8 @@ class ValueNumberingVisitor: public InstructionVisitor {
void do_ProfileInvoke (ProfileInvoke* x) { /* nothing to do */ };
void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ };
void do_MemBar (MemBar* x) { /* nothing to do */ };
void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
void do_Assert (Assert* x) { /* nothing to do */ };
};
@ -225,15 +227,22 @@ class ValueNumberingEffects: public ValueNumberingVisitor {
class GlobalValueNumbering: public ValueNumberingVisitor {
private:
Compilation* _compilation; // compilation data
ValueMap* _current_map; // value map of current block
ValueMapArray _value_maps; // list of value maps for all blocks
ValueSet _processed_values; // marker for instructions that were already processed
bool _has_substitutions; // set to true when substitutions must be resolved
public:
// accessors
Compilation* compilation() const { return _compilation; }
ValueMap* current_map() { return _current_map; }
ValueMap* value_map_of(BlockBegin* block) { return _value_maps.at(block->linear_scan_number()); }
void set_value_map_of(BlockBegin* block, ValueMap* map) { assert(value_map_of(block) == NULL, ""); _value_maps.at_put(block->linear_scan_number(), map); }
bool is_processed(Value v) { return _processed_values.contains(v); }
void set_processed(Value v) { _processed_values.put(v); }
// implementation for abstract methods of ValueNumberingVisitor
void kill_memory() { current_map()->kill_memory(); }
void kill_field(ciField* field, bool all_offsets) { current_map()->kill_field(field, all_offsets); }
@ -241,6 +250,7 @@ class GlobalValueNumbering: public ValueNumberingVisitor {
// main entry point that performs global value numbering
GlobalValueNumbering(IR* ir);
void substitute(Instruction* instr); // substitute instruction if it is contained in current value map
};
#endif // SHARE_VM_C1_C1_VALUEMAP_HPP

View File

@ -119,6 +119,24 @@
develop(bool, UseGlobalValueNumbering, true, \
"Use Global Value Numbering (separate phase)") \
\
product(bool, UseLoopInvariantCodeMotion, true, \
"Simple loop invariant code motion for short loops during GVN") \
\
develop(bool, TracePredicateFailedTraps, false, \
"trace runtime traps caused by predicate failure") \
\
develop(bool, StressLoopInvariantCodeMotion, false, \
"stress loop invariant code motion") \
\
develop(bool, TraceRangeCheckElimination, false, \
"Trace Range Check Elimination") \
\
develop(bool, AssertRangeCheckElimination, false, \
"Assert Range Check Elimination") \
\
develop(bool, StressRangeCheckElimination, false, \
"stress Range Check Elimination") \
\
develop(bool, PrintValueNumbering, false, \
"Print Value Numbering") \
\

View File

@ -790,6 +790,17 @@ int ciMethod::scale_count(int count, float prof_factor) {
return count;
}
// ------------------------------------------------------------------
// ciMethod::is_special_get_caller_class_method
//
bool ciMethod::is_ignored_by_security_stack_walk() const {
check_is_loaded();
VM_ENTRY_MARK;
return get_Method()->is_ignored_by_security_stack_walk();
}
// ------------------------------------------------------------------
// invokedynamic support

View File

@ -166,8 +166,9 @@ class ciMethod : public ciMetadata {
// Code size for inlining decisions.
int code_size_for_inlining();
bool force_inline() { return get_Method()->force_inline(); }
bool dont_inline() { return get_Method()->dont_inline(); }
bool caller_sensitive() { return get_Method()->caller_sensitive(); }
bool force_inline() { return get_Method()->force_inline(); }
bool dont_inline() { return get_Method()->dont_inline(); }
int comp_level();
int highest_osr_comp_level();
@ -264,6 +265,9 @@ class ciMethod : public ciMetadata {
int instructions_size();
int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC
// Stack walking support
bool is_ignored_by_security_stack_walk() const;
// JSR 292 support
bool is_method_handle_intrinsic() const;
bool is_compiled_lambda_form() const;

View File

@ -1735,9 +1735,14 @@ ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_d
Symbol* name) {
vmSymbols::SID sid = vmSymbols::find_sid(name);
// Privileged code can use all annotations. Other code silently drops some.
bool privileged = loader_data->is_the_null_class_loader_data() ||
loader_data->is_anonymous();
const bool privileged = loader_data->is_the_null_class_loader_data() ||
loader_data->is_ext_class_loader_data() ||
loader_data->is_anonymous();
switch (sid) {
case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_reflect_CallerSensitive_signature):
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
return _method_CallerSensitive;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_ForceInline_signature):
if (_location != _in_method) break; // only allow for methods
if (!privileged) break; // only allow in privileged code
@ -1775,6 +1780,8 @@ ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() {
}
void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
if (has_annotation(_method_CallerSensitive))
m->set_caller_sensitive(true);
if (has_annotation(_method_ForceInline))
m->set_force_inline(true);
if (has_annotation(_method_DontInline))

View File

@ -119,6 +119,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
enum Location { _in_field, _in_method, _in_class };
enum ID {
_unknown = 0,
_method_CallerSensitive,
_method_ForceInline,
_method_DontInline,
_method_LambdaForm_Compiled,

View File

@ -321,6 +321,13 @@ ClassLoaderData::~ClassLoaderData() {
}
}
/**
* Returns true if this class loader data is for the extension class loader.
*/
bool ClassLoaderData::is_ext_class_loader_data() const {
return SystemDictionary::is_ext_class_loader(class_loader());
}
Metaspace* ClassLoaderData::metaspace_non_null() {
assert(!DumpSharedSpaces, "wrong metaspace!");
// If the metaspace has not been allocated, create a new one. Might want

View File

@ -191,6 +191,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
bool is_the_null_class_loader_data() const {
return this == _the_null_class_loader_data;
}
bool is_ext_class_loader_data() const;
// The Metaspace is created lazily so may be NULL. This
// method will allocate a Metaspace if needed.

View File

@ -1050,15 +1050,16 @@ class java_lang_invoke_MemberName: AllStatic {
// Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants):
enum {
MN_IS_METHOD = 0x00010000, // method (not constructor)
MN_IS_CONSTRUCTOR = 0x00020000, // constructor
MN_IS_FIELD = 0x00040000, // field
MN_IS_TYPE = 0x00080000, // nested type
MN_IS_METHOD = 0x00010000, // method (not constructor)
MN_IS_CONSTRUCTOR = 0x00020000, // constructor
MN_IS_FIELD = 0x00040000, // field
MN_IS_TYPE = 0x00080000, // nested type
MN_CALLER_SENSITIVE = 0x00100000, // @CallerSensitive annotation detected
MN_REFERENCE_KIND_SHIFT = 24, // refKind
MN_REFERENCE_KIND_MASK = 0x0F000000 >> MN_REFERENCE_KIND_SHIFT,
MN_REFERENCE_KIND_MASK = 0x0F000000 >> MN_REFERENCE_KIND_SHIFT,
// The SEARCH_* bits are not for MN.flags but for the matchFlags argument of MHN.getMembers:
MN_SEARCH_SUPERCLASSES = 0x00100000, // walk super classes
MN_SEARCH_INTERFACES = 0x00200000 // walk implemented interfaces
MN_SEARCH_SUPERCLASSES = 0x00100000, // walk super classes
MN_SEARCH_INTERFACES = 0x00200000 // walk implemented interfaces
};
// Accessors for code generation:

View File

@ -677,9 +677,14 @@ oop StringTable::lookup(Symbol* symbol) {
ResourceMark rm;
int length;
jchar* chars = symbol->as_unicode(length);
unsigned int hashValue = hash_string(chars, length);
int index = the_table()->hash_to_index(hashValue);
return the_table()->lookup(index, chars, length, hashValue);
return lookup(chars, length);
}
oop StringTable::lookup(jchar* name, int len) {
unsigned int hash = hash_string(name, len);
int index = the_table()->hash_to_index(hash);
return the_table()->lookup(index, name, len, hash);
}

View File

@ -287,6 +287,7 @@ public:
// Probing
static oop lookup(Symbol* symbol);
static oop lookup(jchar* chars, int length);
// Interning
static oop intern(Symbol* symbol, TRAPS);

View File

@ -146,6 +146,17 @@ bool SystemDictionary::is_parallelDefine(Handle class_loader) {
}
return false;
}
/**
* Returns true if the passed class loader is the extension class loader.
*/
bool SystemDictionary::is_ext_class_loader(Handle class_loader) {
if (class_loader.is_null()) {
return false;
}
return (class_loader->klass()->name() == vmSymbols::sun_misc_Launcher_ExtClassLoader());
}
// ----------------------------------------------------------------------------
// Resolving of classes
@ -816,13 +827,28 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle cla
// We didn't go as far as Klass::restore_unshareable_info(),
// so nothing to clean up.
} else {
MutexLocker mu(SystemDictionary_lock, THREAD);
Klass* kk = find_class(name, ik->class_loader_data());
Klass *kk;
{
MutexLocker mu(SystemDictionary_lock, THREAD);
kk = find_class(name, ik->class_loader_data());
}
if (kk != NULL) {
// No clean up is needed if the shared class has been entered
// into system dictionary, as load_shared_class() won't be called
// again.
} else {
// This must be done outside of the SystemDictionary_lock to
// avoid deadlock.
//
// Note that Klass::restore_unshareable_info (called via
// load_instance_class above) is also called outside
// of SystemDictionary_lock. Other threads are blocked from
// loading this class because they are waiting on the
// SystemDictionary_lock until this thread removes
// the placeholder below.
//
// This need to be re-thought when parallel-capable non-boot
// classloaders are supported by CDS (today they're not).
clean_up_shared_class(ik, class_loader, THREAD);
}
}
@ -2185,10 +2211,9 @@ Symbol* SystemDictionary::find_resolution_error(constantPoolHandle pool, int whi
// Make sure all class components (including arrays) in the given
// signature will be resolved to the same class in both loaders.
// Returns the name of the type that failed a loader constraint check, or
// NULL if no constraint failed. The returned C string needs cleaning up
// with a ResourceMark in the caller. No exception except OOME is thrown.
// NULL if no constraint failed. No exception except OOME is thrown.
// Arrays are not added to the loader constraint table, their elements are.
char* SystemDictionary::check_signature_loaders(Symbol* signature,
Symbol* SystemDictionary::check_signature_loaders(Symbol* signature,
Handle loader1, Handle loader2,
bool is_method, TRAPS) {
// Nothing to do if loaders are the same.
@ -2196,14 +2221,12 @@ char* SystemDictionary::check_signature_loaders(Symbol* signature,
return NULL;
}
ResourceMark rm(THREAD);
SignatureStream sig_strm(signature, is_method);
while (!sig_strm.is_done()) {
if (sig_strm.is_object()) {
Symbol* s = sig_strm.as_symbol(CHECK_NULL);
Symbol* sig = s;
Symbol* sig = sig_strm.as_symbol(CHECK_NULL);
if (!add_loader_constraint(sig, loader1, loader2, THREAD)) {
return sig->as_C_string();
return sig;
}
}
sig_strm.next();

View File

@ -106,6 +106,7 @@ class SymbolPropertyTable;
do_klass(ThreadDeath_klass, java_lang_ThreadDeath, Pre ) \
do_klass(Exception_klass, java_lang_Exception, Pre ) \
do_klass(RuntimeException_klass, java_lang_RuntimeException, Pre ) \
do_klass(SecurityManager_klass, java_lang_SecurityManager, Pre ) \
do_klass(ProtectionDomain_klass, java_security_ProtectionDomain, Pre ) \
do_klass(AccessControlContext_klass, java_security_AccessControlContext, Pre ) \
do_klass(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre ) \
@ -138,13 +139,14 @@ class SymbolPropertyTable;
/* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
do_klass(lambda_MagicLambdaImpl_klass, java_lang_invoke_MagicLambdaImpl, Opt ) \
do_klass(lambda_MagicLambdaImpl_klass, java_lang_invoke_MagicLambdaImpl, Opt ) \
do_klass(reflect_MagicAccessorImpl_klass, sun_reflect_MagicAccessorImpl, Opt ) \
do_klass(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
do_klass(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
do_klass(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt ) \
do_klass(reflect_ConstantPool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15 ) \
do_klass(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15 ) \
do_klass(reflect_CallerSensitive_klass, sun_reflect_CallerSensitive, Opt ) \
\
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
do_klass(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre_JSR292 ) \
@ -483,8 +485,8 @@ public:
// Check class loader constraints
static bool add_loader_constraint(Symbol* name, Handle loader1,
Handle loader2, TRAPS);
static char* check_signature_loaders(Symbol* signature, Handle loader1,
Handle loader2, bool is_method, TRAPS);
static Symbol* check_signature_loaders(Symbol* signature, Handle loader1,
Handle loader2, bool is_method, TRAPS);
// JSR 292
// find a java.lang.invoke.MethodHandle.invoke* method for a given signature
@ -628,12 +630,15 @@ private:
static bool is_parallelCapable(Handle class_loader);
static bool is_parallelDefine(Handle class_loader);
public:
static bool is_ext_class_loader(Handle class_loader);
private:
static Klass* find_shared_class(Symbol* class_name);
// Setup link to hierarchy
static void add_to_hierarchy(instanceKlassHandle k, TRAPS);
private:
// We pass in the hashtable index so we can calculate it outside of
// the SystemDictionary_lock.

View File

@ -91,6 +91,7 @@
template(java_lang_StringBuffer, "java/lang/StringBuffer") \
template(java_lang_StringBuilder, "java/lang/StringBuilder") \
template(java_lang_CharSequence, "java/lang/CharSequence") \
template(java_lang_SecurityManager, "java/lang/SecurityManager") \
template(java_security_AccessControlContext, "java/security/AccessControlContext") \
template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \
template(java_io_OutputStream, "java/io/OutputStream") \
@ -211,6 +212,8 @@
template(sun_reflect_SerializationConstructorAccessorImpl, "sun/reflect/SerializationConstructorAccessorImpl") \
template(sun_reflect_DelegatingClassLoader, "sun/reflect/DelegatingClassLoader") \
template(sun_reflect_Reflection, "sun/reflect/Reflection") \
template(sun_reflect_CallerSensitive, "sun/reflect/CallerSensitive") \
template(sun_reflect_CallerSensitive_signature, "Lsun/reflect/CallerSensitive;") \
template(checkedExceptions_name, "checkedExceptions") \
template(clazz_name, "clazz") \
template(exceptionTypes_name, "exceptionTypes") \
@ -343,6 +346,7 @@
template(contextClassLoader_name, "contextClassLoader") \
template(inheritedAccessControlContext_name, "inheritedAccessControlContext") \
template(isPrivileged_name, "isPrivileged") \
template(getClassContext_name, "getClassContext") \
template(wait_name, "wait") \
template(checkPackageAccess_name, "checkPackageAccess") \
template(stackSize_name, "stackSize") \
@ -463,6 +467,7 @@
template(void_classloader_signature, "()Ljava/lang/ClassLoader;") \
template(void_object_signature, "()Ljava/lang/Object;") \
template(void_class_signature, "()Ljava/lang/Class;") \
template(void_class_array_signature, "()[Ljava/lang/Class;") \
template(void_string_signature, "()Ljava/lang/String;") \
template(object_array_object_signature, "([Ljava/lang/Object;)Ljava/lang/Object;") \
template(object_object_array_object_signature, "(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;")\
@ -705,9 +710,8 @@
do_intrinsic(_getLength, java_lang_reflect_Array, getLength_name, object_int_signature, F_SN) \
do_name( getLength_name, "getLength") \
\
do_intrinsic(_getCallerClass, sun_reflect_Reflection, getCallerClass_name, getCallerClass_signature, F_SN) \
do_intrinsic(_getCallerClass, sun_reflect_Reflection, getCallerClass_name, void_class_signature, F_SN) \
do_name( getCallerClass_name, "getCallerClass") \
do_signature(getCallerClass_signature, "(I)Ljava/lang/Class;") \
\
do_intrinsic(_newArray, java_lang_reflect_Array, newArray_name, newArray_signature, F_SN) \
do_name( newArray_name, "newArray") \

Some files were not shown because too many files have changed in this diff Show More