Merge
This commit is contained in:
commit
021359923f
@ -290,3 +290,4 @@ f7c11da0b0481d49cc7a65a453336c108191e821 jdk9-b42
|
||||
3dd628fde2086218d548841022ee8436b6b88185 jdk9-b45
|
||||
12f1e276447bcc81516e85367d53e4f08897049d jdk9-b46
|
||||
b6cca3e6175a69f39e5799b7349ddb0176630291 jdk9-b47
|
||||
0064e246d83f6f9fc245c19b6d05041ecaf4b6d4 jdk9-b48
|
||||
|
@ -987,3 +987,26 @@ AC_DEFUN_ONCE([BASIC_TEST_USABILITY_ISSUES],
|
||||
IS_RECONFIGURE=no
|
||||
fi
|
||||
])
|
||||
|
||||
# Check for support for specific options in bash
|
||||
AC_DEFUN_ONCE([BASIC_CHECK_BASH_OPTIONS],
|
||||
[
|
||||
# Test if bash supports pipefail.
|
||||
AC_MSG_CHECKING([if bash supports pipefail])
|
||||
if ${BASH} -c 'set -o pipefail'; then
|
||||
BASH_ARGS="$BASH_ARGS -o pipefail"
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
|
||||
AC_MSG_CHECKING([if bash supports errexit (-e)])
|
||||
if ${BASH} -e -c 'true'; then
|
||||
BASH_ARGS="$BASH_ARGS -e"
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
|
||||
AC_SUBST(BASH_ARGS)
|
||||
])
|
||||
|
@ -46,8 +46,12 @@ endif
|
||||
BOOT_JDK := $(JDK_IMAGE_DIR)
|
||||
|
||||
# The bootcycle build has a different output directory
|
||||
BUILD_OUTPUT:=@BUILD_OUTPUT@/bootcycle-build
|
||||
SJAVAC_SERVER_DIR:=$(subst @BUILD_OUTPUT@,$(BUILD_OUTPUT),$(SJAVAC_SERVER_DIR))
|
||||
OLD_BUILD_OUTPUT:=@BUILD_OUTPUT@
|
||||
BUILD_OUTPUT:=$(OLD_BUILD_OUTPUT)/bootcycle-build
|
||||
# The HOTSPOT_DIST dir is not defined relative to BUILD_OUTPUT in spec.gmk. Must not
|
||||
# use space in this patsubst to avoid leading space in HOTSPOT_DIST.
|
||||
HOTSPOT_DIST:=$(patsubst $(OLD_BUILD_OUTPUT)%,$(BUILD_OUTPUT)%,$(HOTSPOT_DIST))
|
||||
SJAVAC_SERVER_DIR:=$(patsubst $(OLD_BUILD_OUTPUT)%, $(BUILD_OUTPUT)%, $(SJAVAC_SERVER_DIR))
|
||||
|
||||
JAVA_CMD:=$(BOOT_JDK)/bin/java
|
||||
JAVAC_CMD:=$(BOOT_JDK)/bin/javac
|
||||
|
@ -113,6 +113,7 @@ HELP_SETUP_DEPENDENCY_HELP
|
||||
|
||||
# Setup tools that requires more complex handling, or that is not needed by the configure script.
|
||||
BASIC_SETUP_COMPLEX_TOOLS
|
||||
BASIC_CHECK_BASH_OPTIONS
|
||||
|
||||
# Check if pkg-config is available.
|
||||
PKG_PROG_PKG_CONFIG
|
||||
|
@ -853,6 +853,7 @@ OS_VERSION_MICRO
|
||||
OS_VERSION_MINOR
|
||||
OS_VERSION_MAJOR
|
||||
PKG_CONFIG
|
||||
BASH_ARGS
|
||||
CODESIGN
|
||||
XATTR
|
||||
DSYMUTIL
|
||||
@ -3522,6 +3523,9 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
|
||||
|
||||
|
||||
|
||||
# Check for support for specific options in bash
|
||||
|
||||
|
||||
#
|
||||
# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -4329,7 +4333,7 @@ TOOLCHAIN_DESCRIPTION_xlc="IBM XL C/C++"
|
||||
#CUSTOM_AUTOCONF_INCLUDE
|
||||
|
||||
# Do not change or remove the following line, it is needed for consistency checks:
|
||||
DATE_WHEN_GENERATED=1420811523
|
||||
DATE_WHEN_GENERATED=1421247827
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
@ -19609,6 +19613,32 @@ $as_echo "yes" >&6; }
|
||||
fi
|
||||
|
||||
|
||||
# Test if bash supports pipefail.
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if bash supports pipefail" >&5
|
||||
$as_echo_n "checking if bash supports pipefail... " >&6; }
|
||||
if ${BASH} -c 'set -o pipefail'; then
|
||||
BASH_ARGS="$BASH_ARGS -o pipefail"
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
|
||||
$as_echo "yes" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
fi
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if bash supports errexit (-e)" >&5
|
||||
$as_echo_n "checking if bash supports errexit (-e)... " >&6; }
|
||||
if ${BASH} -e -c 'true'; then
|
||||
BASH_ARGS="$BASH_ARGS -e"
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
|
||||
$as_echo "yes" >&6; }
|
||||
else
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
|
||||
$as_echo "no" >&6; }
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
# Check if pkg-config is available.
|
||||
|
||||
|
||||
@ -27408,8 +27438,8 @@ $as_echo "$as_me: Trying to extract Visual Studio environment variables" >&6;}
|
||||
# The trailing space for everyone except PATH is no typo, but is needed due
|
||||
# to trailing \ in the Windows paths. These will be stripped later.
|
||||
$ECHO "$WINPATH_BASH -c 'echo VS_PATH="'\"$PATH\" > set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
|
||||
$ECHO "$WINPATH_BASH -c 'echo VS_INCLUDE="'\"$INCLUDE\;$include \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
|
||||
$ECHO "$WINPATH_BASH -c 'echo VS_LIB="'\"$LIB\;$lib \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
|
||||
$ECHO "$WINPATH_BASH -c 'echo VS_INCLUDE="'\"$INCLUDE \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
|
||||
$ECHO "$WINPATH_BASH -c 'echo VS_LIB="'\"$LIB \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
|
||||
$ECHO "$WINPATH_BASH -c 'echo VCINSTALLDIR="'\"$VCINSTALLDIR \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
|
||||
$ECHO "$WINPATH_BASH -c 'echo WindowsSdkDir="'\"$WindowsSdkDir \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
|
||||
$ECHO "$WINPATH_BASH -c 'echo WINDOWSSDKDIR="'\"$WINDOWSSDKDIR \" >> set-vs-env.sh' >> $EXTRACT_VC_ENV_BAT_FILE
|
||||
|
@ -78,6 +78,11 @@ endif
|
||||
OUTPUT_SYNC_SUPPORTED:=@OUTPUT_SYNC_SUPPORTED@
|
||||
OUTPUT_SYNC:=@OUTPUT_SYNC@
|
||||
|
||||
# Override the shell with bash
|
||||
BASH:=@BASH@
|
||||
BASH_ARGS:=@BASH_ARGS@
|
||||
SHELL:=$(BASH) $(BASH_ARGS)
|
||||
|
||||
# The "human readable" name of this configuration
|
||||
CONF_NAME:=@CONF_NAME@
|
||||
|
||||
@ -243,7 +248,7 @@ MAKESUPPORT_OUTPUTDIR=$(BUILD_OUTPUT)/makesupport
|
||||
HOTSPOT_OUTPUTDIR=$(BUILD_OUTPUT)/hotspot
|
||||
JDK_OUTPUTDIR=$(BUILD_OUTPUT)/jdk
|
||||
IMAGES_OUTPUTDIR=$(BUILD_OUTPUT)/images
|
||||
TESTMAKE_OUTPUTDIR=$(BUILD_OUTPUT)/testmake
|
||||
TESTMAKE_OUTPUTDIR=$(BUILD_OUTPUT)/test-make
|
||||
MAKESUPPORT_OUTPUTDIR=$(BUILD_OUTPUT)/make-support
|
||||
|
||||
HOTSPOT_DIST=@HOTSPOT_DIST@
|
||||
@ -495,7 +500,6 @@ endif
|
||||
# Tools adhering to a minimal and common standard of posix compliance.
|
||||
AWK:=@AWK@
|
||||
BASENAME:=@BASENAME@
|
||||
BASH:=@BASH@
|
||||
CAT:=@CAT@
|
||||
CCACHE:=@CCACHE@
|
||||
# CD is going away, but remains to cater for legacy makefiles.
|
||||
|
@ -450,3 +450,4 @@ c363a8b87e477ee45d6d3cb2a36cb365141bc596 jdk9-b38
|
||||
5dc8184af1e2bb30b0103113d1f1a58a21a80c37 jdk9-b45
|
||||
a184ee1d717297bd35b7c3e35393e137921a3ed2 jdk9-b46
|
||||
3b241fb72b8925b75941d612db762a6d5da66d02 jdk9-b47
|
||||
cc775a4a24c7f5d9e624b4205e9fbd48a17331f6 jdk9-b48
|
||||
|
@ -246,8 +246,7 @@ endif
|
||||
XSLT_CHECK = $(REMOTE) $(RUN.JAVAP) javax.xml.transform.TransformerFactory
|
||||
# If not found then fail fast.
|
||||
check_j2se_version:
|
||||
$(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \
|
||||
if [ $$? -ne 0 ]; then \
|
||||
$(QUIETLY) if ! $(XSLT_CHECK) > /dev/null 2>&1; then \
|
||||
$(REMOTE) $(RUN.JAVA) -version; \
|
||||
echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \
|
||||
"to bootstrap this build" 1>&2; \
|
||||
|
@ -74,6 +74,12 @@ CFLAGS += -D_REENTRANT
|
||||
# no xlc counterpart for -fcheck-new
|
||||
# CFLAGS += -fcheck-new
|
||||
|
||||
# We need to define this on the command line if we want to use the the
|
||||
# predefined format specifiers from "inttypes.h". Otherwise system headrs
|
||||
# can indirectly include inttypes.h before we define __STDC_FORMAT_MACROS
|
||||
# in globalDefinitions.hpp
|
||||
CFLAGS += -D__STDC_FORMAT_MACROS
|
||||
|
||||
ARCHFLAG = -q64
|
||||
|
||||
CFLAGS += $(ARCHFLAG)
|
||||
|
@ -240,8 +240,7 @@ endif
|
||||
XSLT_CHECK = $(REMOTE) $(RUN.JAVAP) javax.xml.transform.TransformerFactory
|
||||
# If not found then fail fast.
|
||||
check_j2se_version:
|
||||
$(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \
|
||||
if [ $$? -ne 0 ]; then \
|
||||
$(QUIETLY) if ! $(XSLT_CHECK) > /dev/null 2>&1; then \
|
||||
$(REMOTE) $(RUN.JAVA) -version; \
|
||||
echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \
|
||||
"to bootstrap this build" 1>&2; \
|
||||
|
@ -179,23 +179,23 @@ $(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).dylib
|
||||
# $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs.
|
||||
$(JVMOFFS).h: $(GENOFFS)
|
||||
$(QUIETLY) DYLD_LIBRARY_PATH=.:$(DYLD_LIBRARY_PATH) ./$(GENOFFS) -header > $@.tmp; touch $@; \
|
||||
if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
|
||||
then rm -f $@; mv $@.tmp $@; \
|
||||
else rm -f $@.tmp; \
|
||||
if diff $@.tmp $@ > /dev/null 2>&1 ; \
|
||||
then rm -f $@.tmp; \
|
||||
else rm -f $@; mv $@.tmp $@; \
|
||||
fi
|
||||
|
||||
$(JVMOFFS)Index.h: $(GENOFFS)
|
||||
$(QUIETLY) DYLD_LIBRARY_PATH=.:$(DYLD_LIBRARY_PATH) ./$(GENOFFS) -index > $@.tmp; touch $@; \
|
||||
if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
|
||||
then rm -f $@; mv $@.tmp $@; \
|
||||
else rm -f $@.tmp; \
|
||||
if diff $@.tmp $@ > /dev/null 2>&1 ; \
|
||||
then rm -f $@.tmp; \
|
||||
else rm -f $@; mv $@.tmp $@; \
|
||||
fi
|
||||
|
||||
$(JVMOFFS).cpp: $(GENOFFS) $(JVMOFFS).h $(JVMOFFS)Index.h
|
||||
$(QUIETLY) DYLD_LIBRARY_PATH=.:$(DYLD_LIBRARY_PATH) ./$(GENOFFS) -table > $@.tmp; touch $@; \
|
||||
if [ `diff $@.tmp $@ > /dev/null 2>&1; echo $$?` -ne 0 ] ; \
|
||||
then rm -f $@; mv $@.tmp $@; \
|
||||
else rm -f $@.tmp; \
|
||||
if diff $@.tmp $@ > /dev/null 2>&1; \
|
||||
then rm -f $@.tmp; \
|
||||
else rm -f $@; mv $@.tmp $@; \
|
||||
fi
|
||||
|
||||
$(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp
|
||||
|
@ -59,7 +59,7 @@ universalize: $(UNIVERSAL_LIPO_LIST) $(UNIVERSAL_COPY_LIST)
|
||||
|
||||
# Package built libraries in a universal binary
|
||||
$(UNIVERSAL_LIPO_LIST):
|
||||
BUILT_LIPO_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \
|
||||
BUILT_LIPO_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`" || test $$? = "1"; \
|
||||
if [ -n "$${BUILT_LIPO_FILES}" ]; then \
|
||||
$(MKDIR) -p $(shell dirname $@); \
|
||||
lipo -create -output $@ $${BUILT_LIPO_FILES}; \
|
||||
@ -70,7 +70,7 @@ $(UNIVERSAL_LIPO_LIST):
|
||||
# - copies directories; including empty dirs
|
||||
# - copies files, symlinks, other non-directory files
|
||||
$(UNIVERSAL_COPY_LIST):
|
||||
BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) -prune 2>/dev/null`"; \
|
||||
BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) -prune 2>/dev/null`" || test $$? = "1"; \
|
||||
if [ -n "$${BUILT_COPY_FILES}" ]; then \
|
||||
for i in $${BUILT_COPY_FILES}; do \
|
||||
$(MKDIR) -p $(shell dirname $@); \
|
||||
|
@ -246,8 +246,7 @@ endif
|
||||
XSLT_CHECK = $(REMOTE) $(RUN.JAVAP) javax.xml.transform.TransformerFactory
|
||||
# If not found then fail fast.
|
||||
check_j2se_version:
|
||||
$(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \
|
||||
if [ $$? -ne 0 ]; then \
|
||||
$(QUIETLY) if ! $(XSLT_CHECK) > /dev/null 2>&1; then \
|
||||
$(REMOTE) $(RUN.JAVA) -version; \
|
||||
echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \
|
||||
"to bootstrap this build" 1>&2; \
|
||||
|
@ -334,10 +334,8 @@ $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
|
||||
rm -f $@.1; ln -s $@ $@.1; \
|
||||
if [ \"$(CROSS_COMPILE_ARCH)\" = \"\" ] ; then \
|
||||
if [ -x /usr/sbin/selinuxenabled ] ; then \
|
||||
/usr/sbin/selinuxenabled; \
|
||||
if [ $$? = 0 ] ; then \
|
||||
/usr/bin/chcon -t textrel_shlib_t $@; \
|
||||
if [ $$? != 0 ]; then \
|
||||
if /usr/sbin/selinuxenabled; then \
|
||||
if ! /usr/bin/chcon -t textrel_shlib_t $@; then \
|
||||
echo "ERROR: Cannot chcon $@"; \
|
||||
fi \
|
||||
fi \
|
||||
|
@ -39,6 +39,7 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/c1/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/ci/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/classfile/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/code/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/compiler/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/*.java \
|
||||
@ -49,8 +50,10 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/bsd/x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dummy/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/ia64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/amd64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/ia64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/ppc64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/sparc/*.java \
|
||||
@ -71,6 +74,7 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/sparc/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/win32/coff/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/amd64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/ia64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windows/x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windows/amd64/*.java \
|
||||
@ -101,6 +105,8 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_sparc/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/sparc/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_amd64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_x86/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/ppc64/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/*.java \
|
||||
$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/jcore/*.java \
|
||||
|
@ -190,8 +190,7 @@ endif
|
||||
XSLT_CHECK = $(RUN.JAVAP) javax.xml.transform.TransformerFactory
|
||||
# If not found then fail fast.
|
||||
check_j2se_version:
|
||||
$(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \
|
||||
if [ $$? -ne 0 ]; then \
|
||||
$(QUIETLY) if ! $(XSLT_CHECK) > /dev/null 2>&1; then \
|
||||
$(RUN.JAVA) -version; \
|
||||
echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \
|
||||
"to bootstrap this build" 1>&2; \
|
||||
|
@ -171,11 +171,11 @@ $(GENOFFS): $(DTRACE_SRCDIR)/$(GENOFFS)Main.c lib$(GENOFFS).so
|
||||
./lib$(GENOFFS).so
|
||||
|
||||
CONDITIONALLY_UPDATE_JVMOFFS_TARGET = \
|
||||
cmp -s $@ $@.tmp; \
|
||||
case $$? in \
|
||||
0) rm -f $@.tmp;; \
|
||||
*) rm -f $@ && mv $@.tmp $@ && echo Updated $@;; \
|
||||
esac
|
||||
if cmp -s $@ $@.tmp; then \
|
||||
rm -f $@.tmp; \
|
||||
else \
|
||||
rm -f $@ && mv $@.tmp $@ && echo Updated $@; \
|
||||
fi
|
||||
|
||||
# $@.tmp is created first to avoid an empty $(JVMOFFS).h if an error occurs.
|
||||
$(JVMOFFS).h: $(GENOFFS)
|
||||
|
@ -567,16 +567,21 @@ class MacroAssembler: public Assembler {
|
||||
inline void load_with_trap_null_check(Register d, int si16, Register s1);
|
||||
|
||||
// Load heap oop and decompress. Loaded oop may not be null.
|
||||
inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
|
||||
// Specify tmp to save one cycle.
|
||||
inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg,
|
||||
Register tmp = noreg);
|
||||
// Store heap oop and decompress. Decompressed oop may not be null.
|
||||
// Specify tmp register if d should not be changed.
|
||||
inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1,
|
||||
/*specify if d must stay uncompressed*/ Register tmp = noreg);
|
||||
Register tmp = noreg);
|
||||
|
||||
// Null allowed.
|
||||
inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
|
||||
|
||||
// Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
|
||||
// src == d allowed.
|
||||
inline Register encode_heap_oop_not_null(Register d, Register src = noreg);
|
||||
inline void decode_heap_oop_not_null(Register d);
|
||||
inline Register decode_heap_oop_not_null(Register d, Register src = noreg);
|
||||
|
||||
// Null allowed.
|
||||
inline void decode_heap_oop(Register d);
|
||||
|
@ -311,11 +311,14 @@ inline void MacroAssembler::load_with_trap_null_check(Register d, int si16, Regi
|
||||
ld(d, si16, s1);
|
||||
}
|
||||
|
||||
inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1) {
|
||||
inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1, Register tmp) {
|
||||
if (UseCompressedOops) {
|
||||
lwz(d, offs, s1);
|
||||
// In disjoint mode decoding can save a cycle if src != dst.
|
||||
Register narrowOop = (tmp != noreg && Universe::narrow_oop_base_disjoint()) ? tmp : d;
|
||||
lwz(narrowOop, offs, s1);
|
||||
// Attention: no null check here!
|
||||
decode_heap_oop_not_null(d);
|
||||
Register res = decode_heap_oop_not_null(d, narrowOop);
|
||||
assert(res == d, "caller will not consume loaded value");
|
||||
} else {
|
||||
ld(d, offs, s1);
|
||||
}
|
||||
@ -340,26 +343,36 @@ inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, R
|
||||
}
|
||||
|
||||
inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register src) {
|
||||
Register current = (src!=noreg) ? src : d; // Compressed oop is in d if no src provided.
|
||||
if (Universe::narrow_oop_base() != NULL) {
|
||||
Register current = (src != noreg) ? src : d; // Oop to be compressed is in d if no src provided.
|
||||
if (Universe::narrow_oop_base_overlaps()) {
|
||||
sub(d, current, R30);
|
||||
current = d;
|
||||
}
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
srdi(d, current, LogMinObjAlignmentInBytes);
|
||||
rldicl(d, current, 64-Universe::narrow_oop_shift(), 32); // Clears the upper bits.
|
||||
current = d;
|
||||
}
|
||||
return current; // Encoded oop is in this register.
|
||||
}
|
||||
|
||||
inline void MacroAssembler::decode_heap_oop_not_null(Register d) {
|
||||
inline Register MacroAssembler::decode_heap_oop_not_null(Register d, Register src) {
|
||||
if (Universe::narrow_oop_base_disjoint() && src != noreg && src != d &&
|
||||
Universe::narrow_oop_shift() != 0) {
|
||||
mr(d, R30);
|
||||
rldimi(d, src, Universe::narrow_oop_shift(), 32-Universe::narrow_oop_shift());
|
||||
return d;
|
||||
}
|
||||
|
||||
Register current = (src != noreg) ? src : d; // Compressed oop is in d if no src provided.
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
||||
sldi(d, d, LogMinObjAlignmentInBytes);
|
||||
sldi(d, current, Universe::narrow_oop_shift());
|
||||
current = d;
|
||||
}
|
||||
if (Universe::narrow_oop_base() != NULL) {
|
||||
add(d, d, R30);
|
||||
add(d, current, R30);
|
||||
current = d;
|
||||
}
|
||||
return current; // Decoded oop is in this register.
|
||||
}
|
||||
|
||||
inline void MacroAssembler::decode_heap_oop(Register d) {
|
||||
@ -368,13 +381,7 @@ inline void MacroAssembler::decode_heap_oop(Register d) {
|
||||
cmpwi(CCR0, d, 0);
|
||||
beq(CCR0, isNull);
|
||||
}
|
||||
if (Universe::narrow_oop_shift() != 0) {
|
||||
assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
|
||||
sldi(d, d, LogMinObjAlignmentInBytes);
|
||||
}
|
||||
if (Universe::narrow_oop_base() != NULL) {
|
||||
add(d, d, R30);
|
||||
}
|
||||
decode_heap_oop_not_null(d);
|
||||
bind(isNull);
|
||||
}
|
||||
|
||||
|
@ -172,15 +172,15 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
|
||||
// Load the invoker, as MH -> MH.form -> LF.vmentry
|
||||
__ verify_oop(recv);
|
||||
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv);
|
||||
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv, temp2);
|
||||
__ verify_oop(method_temp);
|
||||
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp);
|
||||
__ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp, temp2);
|
||||
__ verify_oop(method_temp);
|
||||
// the following assumes that a Method* is normally compressed in the vmtarget field:
|
||||
// The following assumes that a Method* is normally compressed in the vmtarget field:
|
||||
__ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp);
|
||||
|
||||
if (VerifyMethodHandles && !for_compiler_entry) {
|
||||
// make sure recv is already on stack
|
||||
// Make sure recv is already on stack.
|
||||
__ ld(temp2, in_bytes(Method::const_offset()), method_temp);
|
||||
__ load_sized_value(temp2, in_bytes(ConstMethod::size_of_parameters_offset()), temp2,
|
||||
sizeof(u2), /*is_signed*/ false);
|
||||
@ -259,8 +259,9 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
|
||||
}
|
||||
|
||||
if (TraceMethodHandles) {
|
||||
if (tmp_mh != noreg)
|
||||
if (tmp_mh != noreg) {
|
||||
__ mr(R23_method_handle, tmp_mh); // make stub happy
|
||||
}
|
||||
trace_method_handle_interpreter_entry(_masm, iid);
|
||||
}
|
||||
|
||||
@ -332,7 +333,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
|
||||
Label L_ok;
|
||||
Register temp2_defc = temp2;
|
||||
__ load_heap_oop_not_null(temp2_defc, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg);
|
||||
__ load_heap_oop_not_null(temp2_defc, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg, temp3);
|
||||
load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
|
||||
__ verify_klass_ptr(temp2_defc);
|
||||
__ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok);
|
||||
@ -407,7 +408,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
}
|
||||
|
||||
Register temp2_intf = temp2;
|
||||
__ load_heap_oop_not_null(temp2_intf, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg);
|
||||
__ load_heap_oop_not_null(temp2_intf, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg, temp3);
|
||||
load_klass_from_Class(_masm, temp2_intf, temp3, temp4);
|
||||
__ verify_klass_ptr(temp2_intf);
|
||||
|
||||
@ -464,7 +465,7 @@ void trace_method_handle_stub(const char* adaptername,
|
||||
strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH
|
||||
const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23";
|
||||
tty->print_cr("MH %s %s="INTPTR_FORMAT " sp=" INTPTR_FORMAT,
|
||||
adaptername, mh_reg_name, (intptr_t) mh, (intptr_t) entry_sp);
|
||||
adaptername, mh_reg_name, (intptr_t) mh, entry_sp);
|
||||
|
||||
if (Verbose) {
|
||||
tty->print_cr("Registers:");
|
||||
@ -535,23 +536,22 @@ void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adapt
|
||||
|
||||
BLOCK_COMMENT("trace_method_handle {");
|
||||
|
||||
int nbytes_save = 10 * 8; // 10 volatile gprs
|
||||
__ save_LR_CR(R0);
|
||||
__ mr(R0, R1_SP); // saved_sp
|
||||
assert(Assembler::is_simm(-nbytes_save, 16), "Overwriting R0");
|
||||
// Push_frame_reg_args only uses R0 if nbytes_save is wider than 16 bit.
|
||||
__ push_frame_reg_args(nbytes_save, R0);
|
||||
__ save_volatile_gprs(R1_SP, frame::abi_reg_args_size); // Except R0.
|
||||
const Register tmp = R11; // Will be preserved.
|
||||
const int nbytes_save = 11*8; // volatile gprs except R0
|
||||
__ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
|
||||
__ save_LR_CR(tmp); // save in old frame
|
||||
|
||||
__ load_const(R3_ARG1, (address)adaptername);
|
||||
__ mr(R5_ARG3, R1_SP); // saved_sp
|
||||
__ push_frame_reg_args(nbytes_save, tmp);
|
||||
|
||||
__ load_const_optimized(R3_ARG1, (address)adaptername, tmp);
|
||||
__ mr(R4_ARG2, R23_method_handle);
|
||||
__ mr(R5_ARG3, R0); // saved_sp
|
||||
__ mr(R6_ARG4, R1_SP);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub));
|
||||
|
||||
__ restore_volatile_gprs(R1_SP, 112); // Except R0.
|
||||
__ pop_frame();
|
||||
__ restore_LR_CR(R0);
|
||||
__ restore_LR_CR(tmp);
|
||||
__ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
|
||||
|
||||
BLOCK_COMMENT("} trace_method_handle");
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
// Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright 2012, 2015 SAP AG. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@ -2698,7 +2698,7 @@ encode %{
|
||||
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
||||
__ relocate(a.rspec());
|
||||
} else if (constant_reloc == relocInfo::metadata_type) {
|
||||
AddressLiteral a = __ allocate_metadata_address((Metadata *)val);
|
||||
AddressLiteral a = __ constant_metadata_address((Metadata *)val);
|
||||
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
||||
__ relocate(a.rspec());
|
||||
} else {
|
||||
@ -2727,7 +2727,7 @@ encode %{
|
||||
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
||||
__ relocate(a.rspec());
|
||||
} else if (constant_reloc == relocInfo::metadata_type) {
|
||||
AddressLiteral a = __ allocate_metadata_address((Metadata *)val);
|
||||
AddressLiteral a = __ constant_metadata_address((Metadata *)val);
|
||||
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
||||
__ relocate(a.rspec());
|
||||
} else { // non-oop pointers, e.g. card mark base, heap top
|
||||
@ -6029,6 +6029,20 @@ instruct clearMs32b(iRegNdst dst, iRegNsrc src) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Optimize DecodeN for disjoint base.
|
||||
// Load base of compressed oops into a register
|
||||
instruct loadBase(iRegLdst dst) %{
|
||||
effect(DEF dst);
|
||||
|
||||
format %{ "MR $dst, r30_heapbase" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_or);
|
||||
__ mr($dst$$Register, R30);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Loading ConN must be postalloc expanded so that edges between
|
||||
// the nodes are safe. They may not interfere with a safepoint.
|
||||
// GL TODO: This needs three instructions: better put this into the constant pool.
|
||||
@ -6724,13 +6738,12 @@ instruct cond_set_0_oop(iRegNdst dst, flagsReg crx, iRegPsrc src1) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// base != 0
|
||||
// 32G aligned narrow oop base.
|
||||
instruct encodeP_32GAligned(iRegNdst dst, iRegPsrc src) %{
|
||||
// Disjoint narrow oop base.
|
||||
instruct encodeP_Disjoint(iRegNdst dst, iRegPsrc src) %{
|
||||
match(Set dst (EncodeP src));
|
||||
predicate(false /* TODO: PPC port Universe::narrow_oop_base_disjoint()*/);
|
||||
predicate(Universe::narrow_oop_base_disjoint());
|
||||
|
||||
format %{ "EXTRDI $dst, $src, #32, #3 \t// encode with 32G aligned base" %}
|
||||
format %{ "EXTRDI $dst, $src, #32, #3 \t// encode with disjoint base" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
|
||||
@ -6745,7 +6758,7 @@ instruct encodeP_Ex(iRegNdst dst, flagsReg crx, iRegPsrc src) %{
|
||||
effect(TEMP crx);
|
||||
predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull &&
|
||||
Universe::narrow_oop_shift() != 0 &&
|
||||
true /* TODO: PPC port Universe::narrow_oop_base_overlaps()*/);
|
||||
Universe::narrow_oop_base_overlaps());
|
||||
|
||||
format %{ "EncodeP $dst, $crx, $src \t// postalloc expanded" %}
|
||||
postalloc_expand( postalloc_expand_encode_oop(dst, src, crx));
|
||||
@ -6756,7 +6769,7 @@ instruct encodeP_not_null_Ex(iRegNdst dst, iRegPsrc src) %{
|
||||
match(Set dst (EncodeP src));
|
||||
predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull &&
|
||||
Universe::narrow_oop_shift() != 0 &&
|
||||
true /* TODO: PPC port Universe::narrow_oop_base_overlaps()*/);
|
||||
Universe::narrow_oop_base_overlaps());
|
||||
|
||||
format %{ "EncodeP $dst, $src\t// $src != Null, postalloc expanded" %}
|
||||
postalloc_expand( postalloc_expand_encode_oop_not_null(dst, src) );
|
||||
@ -6876,6 +6889,7 @@ instruct decodeN_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
|
||||
n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
|
||||
Universe::narrow_oop_shift() != 0 &&
|
||||
Universe::narrow_oop_base() != 0);
|
||||
ins_cost(4 * DEFAULT_COST); // Should be more expensive than decodeN_Disjoint_isel_Ex.
|
||||
effect(TEMP crx);
|
||||
|
||||
format %{ "DecodeN $dst, $src \t// Kills $crx, postalloc expanded" %}
|
||||
@ -6897,6 +6911,106 @@ instruct decodeN_nullBase(iRegPdst dst, iRegNsrc src) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Optimize DecodeN for disjoint base.
|
||||
// Shift narrow oop and or it into register that already contains the heap base.
|
||||
// Base == dst must hold, and is assured by construction in postaloc_expand.
|
||||
instruct decodeN_mergeDisjoint(iRegPdst dst, iRegNsrc src, iRegLsrc base) %{
|
||||
match(Set dst (DecodeN src));
|
||||
effect(TEMP base);
|
||||
predicate(false);
|
||||
|
||||
format %{ "RLDIMI $dst, $src, shift, 32-shift \t// DecodeN (disjoint base)" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_rldimi);
|
||||
__ rldimi($dst$$Register, $src$$Register, Universe::narrow_oop_shift(), 32-Universe::narrow_oop_shift());
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Optimize DecodeN for disjoint base.
|
||||
// This node requires only one cycle on the critical path.
|
||||
// We must postalloc_expand as we can not express use_def effects where
|
||||
// the used register is L and the def'ed register P.
|
||||
instruct decodeN_Disjoint_notNull_Ex(iRegPdst dst, iRegNsrc src) %{
|
||||
match(Set dst (DecodeN src));
|
||||
effect(TEMP_DEF dst);
|
||||
predicate((n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
|
||||
n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
|
||||
Universe::narrow_oop_base_disjoint());
|
||||
ins_cost(DEFAULT_COST);
|
||||
|
||||
format %{ "MOV $dst, R30 \t\n"
|
||||
"RLDIMI $dst, $src, shift, 32-shift \t// decode with disjoint base" %}
|
||||
postalloc_expand %{
|
||||
loadBaseNode *n1 = new loadBaseNode();
|
||||
n1->add_req(NULL);
|
||||
n1->_opnds[0] = op_dst;
|
||||
|
||||
decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode();
|
||||
n2->add_req(n_region, n_src, n1);
|
||||
n2->_opnds[0] = op_dst;
|
||||
n2->_opnds[1] = op_src;
|
||||
n2->_opnds[2] = op_dst;
|
||||
n2->_bottom_type = _bottom_type;
|
||||
|
||||
ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
|
||||
nodes->push(n1);
|
||||
nodes->push(n2);
|
||||
%}
|
||||
%}
|
||||
|
||||
instruct decodeN_Disjoint_isel_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
|
||||
match(Set dst (DecodeN src));
|
||||
effect(TEMP_DEF dst, TEMP crx);
|
||||
predicate((n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
|
||||
n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
|
||||
Universe::narrow_oop_base_disjoint() && VM_Version::has_isel());
|
||||
ins_cost(3 * DEFAULT_COST);
|
||||
|
||||
format %{ "DecodeN $dst, $src \t// decode with disjoint base using isel" %}
|
||||
postalloc_expand %{
|
||||
loadBaseNode *n1 = new loadBaseNode();
|
||||
n1->add_req(NULL);
|
||||
n1->_opnds[0] = op_dst;
|
||||
|
||||
cmpN_reg_imm0Node *n_compare = new cmpN_reg_imm0Node();
|
||||
n_compare->add_req(n_region, n_src);
|
||||
n_compare->_opnds[0] = op_crx;
|
||||
n_compare->_opnds[1] = op_src;
|
||||
n_compare->_opnds[2] = new immN_0Oper(TypeNarrowOop::NULL_PTR);
|
||||
|
||||
decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode();
|
||||
n2->add_req(n_region, n_src, n1);
|
||||
n2->_opnds[0] = op_dst;
|
||||
n2->_opnds[1] = op_src;
|
||||
n2->_opnds[2] = op_dst;
|
||||
n2->_bottom_type = _bottom_type;
|
||||
|
||||
cond_set_0_ptrNode *n_cond_set = new cond_set_0_ptrNode();
|
||||
n_cond_set->add_req(n_region, n_compare, n2);
|
||||
n_cond_set->_opnds[0] = op_dst;
|
||||
n_cond_set->_opnds[1] = op_crx;
|
||||
n_cond_set->_opnds[2] = op_dst;
|
||||
n_cond_set->_bottom_type = _bottom_type;
|
||||
|
||||
assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
|
||||
ra_->set_oop(n_cond_set, true);
|
||||
|
||||
ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
|
||||
ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
|
||||
nodes->push(n1);
|
||||
nodes->push(n_compare);
|
||||
nodes->push(n2);
|
||||
nodes->push(n_cond_set);
|
||||
%}
|
||||
%}
|
||||
|
||||
// src != 0, shift != 0, base != 0
|
||||
instruct decodeN_notNull_addBase_Ex(iRegPdst dst, iRegNsrc src) %{
|
||||
match(Set dst (DecodeN src));
|
||||
@ -6904,6 +7018,7 @@ instruct decodeN_notNull_addBase_Ex(iRegPdst dst, iRegNsrc src) %{
|
||||
n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
|
||||
Universe::narrow_oop_shift() != 0 &&
|
||||
Universe::narrow_oop_base() != 0);
|
||||
ins_cost(2 * DEFAULT_COST);
|
||||
|
||||
format %{ "DecodeN $dst, $src \t// $src != NULL, postalloc expanded" %}
|
||||
postalloc_expand( postalloc_expand_decode_oop_not_null(dst, src));
|
||||
@ -6973,13 +7088,12 @@ instruct encodePKlass_sub_base(iRegPdst dst, iRegLsrc base, iRegPdst src) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// base != 0
|
||||
// 32G aligned narrow oop base.
|
||||
instruct encodePKlass_32GAligned(iRegNdst dst, iRegPsrc src) %{
|
||||
// Disjoint narrow oop base.
|
||||
instruct encodePKlass_Disjoint(iRegNdst dst, iRegPsrc src) %{
|
||||
match(Set dst (EncodePKlass src));
|
||||
predicate(false /* TODO: PPC port Universe::narrow_klass_base_disjoint()*/);
|
||||
|
||||
format %{ "EXTRDI $dst, $src, #32, #3 \t// encode with 32G aligned base" %}
|
||||
format %{ "EXTRDI $dst, $src, #32, #3 \t// encode with disjoint base" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
|
||||
@ -7486,7 +7600,7 @@ instruct storeLConditional_regP_regL_regL(flagsReg crx, indirect mem_ptr, iRegLs
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
__ cmpxchgd($crx$$CondRegister, R0, $oldVal$$Register, $newVal$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
MacroAssembler::MemBarAcq, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, NULL, true);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@ -10476,7 +10590,7 @@ instruct cmpN_reg_reg(flagsReg crx, iRegNsrc src1, iRegNsrc src2) %{
|
||||
match(Set crx (CmpN src1 src2));
|
||||
|
||||
size(4);
|
||||
ins_cost(DEFAULT_COST);
|
||||
ins_cost(2);
|
||||
format %{ "CMPLW $crx, $src1, $src2 \t// compressed ptr" %}
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_cmpl);
|
||||
@ -10488,7 +10602,7 @@ instruct cmpN_reg_reg(flagsReg crx, iRegNsrc src1, iRegNsrc src2) %{
|
||||
instruct cmpN_reg_imm0(flagsReg crx, iRegNsrc src1, immN_0 src2) %{
|
||||
match(Set crx (CmpN src1 src2));
|
||||
// Make this more expensive than zeroCheckN_iReg_imm0.
|
||||
ins_cost(DEFAULT_COST);
|
||||
ins_cost(2);
|
||||
|
||||
format %{ "CMPLWI $crx, $src1, $src2 \t// compressed ptr" %}
|
||||
size(4);
|
||||
@ -10508,6 +10622,7 @@ instruct zeroCheckP_reg_imm0(cmpOp cmp, iRegP_N2P value, immP_0 zero, label labl
|
||||
_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne &&
|
||||
_leaf->as_If()->_prob >= PROB_LIKELY_MAG(4) &&
|
||||
Matcher::branches_to_uncommon_trap(_leaf));
|
||||
ins_cost(1); // Should not be cheaper than zeroCheckN.
|
||||
|
||||
ins_is_TrapBasedCheckNode(true);
|
||||
|
||||
@ -10889,7 +11004,7 @@ instruct branchLoopEndSched(cmpOp cmp, flagsReg crx, label labl) %{
|
||||
instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P superklass,
|
||||
iRegPdst tmp_klass, iRegPdst tmp_arrayptr) %{
|
||||
match(Set result (PartialSubtypeCheck subklass superklass));
|
||||
effect(TEMP result, TEMP tmp_klass, TEMP tmp_arrayptr);
|
||||
effect(TEMP_DEF result, TEMP tmp_klass, TEMP tmp_arrayptr);
|
||||
ins_cost(DEFAULT_COST*10);
|
||||
|
||||
format %{ "PartialSubtypeCheck $result = ($subklass instanceOf $superklass) tmp: $tmp_klass, $tmp_arrayptr" %}
|
||||
@ -11000,7 +11115,7 @@ instruct string_indexOf_imm1_char(iRegIdst result, iRegPsrc haystack, iRegIsrc h
|
||||
predicate(SpecialStringIndexOf); // type check implicit by parameter type, See Matcher::match_rule_supported
|
||||
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
|
||||
|
||||
effect(TEMP result, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1);
|
||||
effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1);
|
||||
|
||||
ins_cost(150);
|
||||
format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
|
||||
@ -11037,7 +11152,7 @@ instruct string_indexOf_imm1(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt
|
||||
iRegIdst tmp1, iRegIdst tmp2,
|
||||
flagsRegCR0 cr0, flagsRegCR1 cr1) %{
|
||||
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
|
||||
effect(USE_KILL needle, /* TDEF needle, */ TEMP result,
|
||||
effect(USE_KILL needle, /* TDEF needle, */ TEMP_DEF result,
|
||||
TEMP tmp1, TEMP tmp2);
|
||||
// Required for EA: check if it is still a type_array.
|
||||
predicate(SpecialStringIndexOf && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
|
||||
@ -11084,7 +11199,7 @@ instruct string_indexOf_imm(iRegIdst result, iRegPsrc haystack, rscratch1RegI ha
|
||||
iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
|
||||
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6) %{
|
||||
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
|
||||
effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP result,
|
||||
effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
|
||||
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6);
|
||||
// Required for EA: check if it is still a type_array.
|
||||
predicate(SpecialStringIndexOf && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
|
||||
@ -11118,7 +11233,7 @@ instruct string_indexOf(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt
|
||||
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6) %{
|
||||
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
|
||||
effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
|
||||
TEMP result,
|
||||
TEMP_DEF result,
|
||||
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6);
|
||||
predicate(SpecialStringIndexOf); // See Matcher::match_rule_supported.
|
||||
ins_cost(300);
|
||||
@ -11142,7 +11257,7 @@ instruct string_equals_imm(iRegPsrc str1, iRegPsrc str2, uimmI15 cntImm, iRegIds
|
||||
iRegPdst tmp1, iRegPdst tmp2,
|
||||
flagsRegCR0 cr0, flagsRegCR6 cr6, regCTR ctr) %{
|
||||
match(Set result (StrEquals (Binary str1 str2) cntImm));
|
||||
effect(TEMP result, TEMP tmp1, TEMP tmp2,
|
||||
effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2,
|
||||
KILL cr0, KILL cr6, KILL ctr);
|
||||
predicate(SpecialStringEquals); // See Matcher::match_rule_supported.
|
||||
ins_cost(250);
|
||||
@ -11165,7 +11280,7 @@ instruct string_equals(iRegPsrc str1, iRegPsrc str2, iRegIsrc cnt, iRegIdst resu
|
||||
iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3, iRegPdst tmp4, iRegPdst tmp5,
|
||||
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
|
||||
match(Set result (StrEquals (Binary str1 str2) cnt));
|
||||
effect(TEMP result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
|
||||
effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
|
||||
KILL cr0, KILL cr1, KILL cr6, KILL ctr);
|
||||
predicate(SpecialStringEquals); // See Matcher::match_rule_supported.
|
||||
ins_cost(300);
|
||||
@ -11188,7 +11303,7 @@ instruct string_equals(iRegPsrc str1, iRegPsrc str2, iRegIsrc cnt, iRegIdst resu
|
||||
instruct string_compare(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
|
||||
iRegPdst tmp, flagsRegCR0 cr0, regCTR ctr) %{
|
||||
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
|
||||
effect(USE_KILL cnt1, USE_KILL cnt2, USE_KILL str1, USE_KILL str2, TEMP result, TEMP tmp, KILL cr0, KILL ctr);
|
||||
effect(USE_KILL cnt1, USE_KILL cnt2, USE_KILL str1, USE_KILL str2, TEMP_DEF result, TEMP tmp, KILL cr0, KILL ctr);
|
||||
ins_cost(300);
|
||||
|
||||
ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -483,15 +483,6 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
}
|
||||
|
||||
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
|
||||
|
||||
jbyte* G1PostBarrierStub::byte_map_base_slow() {
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->is_a(BarrierSet::G1SATBCTLogging),
|
||||
"Must be if we're using this.");
|
||||
return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
|
||||
}
|
||||
|
||||
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
|
||||
|
@ -1374,6 +1374,7 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
|
||||
Register method_counters,
|
||||
Register Rtmp,
|
||||
Label &profile_continue) {
|
||||
assert(ProfileInterpreter, "must be profiling interpreter");
|
||||
@ -1386,9 +1387,8 @@ void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocat
|
||||
br_notnull_short(ImethodDataPtr, Assembler::pn, done);
|
||||
|
||||
// Test to see if we should create a method data oop
|
||||
AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
|
||||
sethi(profile_limit, Rtmp);
|
||||
ld(Rtmp, profile_limit.low10(), Rtmp);
|
||||
Address profile_limit(method_counters, MethodCounters::interpreter_profile_limit_offset());
|
||||
ld(profile_limit, Rtmp);
|
||||
cmp(invocation_count, Rtmp);
|
||||
// Use long branches because call_VM() code and following code generated by
|
||||
// test_backedge_count_for_osr() is large in debug VM.
|
||||
@ -2375,6 +2375,7 @@ void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters,
|
||||
|
||||
#ifndef CC_INTERP
|
||||
void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
|
||||
Register method_counters,
|
||||
Register branch_bcp,
|
||||
Register Rtmp ) {
|
||||
Label did_not_overflow;
|
||||
@ -2382,8 +2383,8 @@ void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_c
|
||||
assert_different_registers(backedge_count, Rtmp, branch_bcp);
|
||||
assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
|
||||
|
||||
AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
|
||||
load_contents(limit, Rtmp);
|
||||
Address limit(method_counters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
|
||||
ld(limit, Rtmp);
|
||||
cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
|
||||
|
||||
// When ProfileInterpreter is on, the backedge_count comes from the
|
||||
@ -2500,17 +2501,13 @@ void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
|
||||
|
||||
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
|
||||
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
int increment, Address mask_addr,
|
||||
Register scratch1, Register scratch2,
|
||||
Condition cond, Label *where) {
|
||||
ld(counter_addr, scratch1);
|
||||
add(scratch1, increment, scratch1);
|
||||
if (is_simm13(mask)) {
|
||||
andcc(scratch1, mask, G0);
|
||||
} else {
|
||||
set(mask, scratch2);
|
||||
andcc(scratch1, scratch2, G0);
|
||||
}
|
||||
ld(mask_addr, scratch2);
|
||||
andcc(scratch1, scratch2, G0);
|
||||
br(cond, false, Assembler::pn, *where);
|
||||
delayed()->st(scratch1, counter_addr);
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 );
|
||||
void increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 );
|
||||
#ifndef CC_INTERP
|
||||
void test_backedge_count_for_osr( Register backedge_count, Register branch_bcp, Register Rtmp );
|
||||
void test_backedge_count_for_osr(Register backedge_count, Register method_counters, Register branch_bcp, Register Rtmp );
|
||||
|
||||
#endif /* CC_INTERP */
|
||||
// Object locking
|
||||
@ -280,7 +280,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void set_method_data_pointer_for_bcp();
|
||||
void test_method_data_pointer(Label& zero_continue);
|
||||
void verify_method_data_pointer();
|
||||
void test_invocation_counter_for_mdp(Register invocation_count, Register Rtmp, Label &profile_continue);
|
||||
void test_invocation_counter_for_mdp(Register invocation_count, Register method_counters, Register Rtmp, Label &profile_continue);
|
||||
|
||||
void set_mdp_data_at(int constant, Register value);
|
||||
void increment_mdp_data_at(Address counter, Register bumped_count,
|
||||
@ -291,7 +291,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
Register bumped_count, Register scratch2,
|
||||
bool decrement = false);
|
||||
void increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
int increment, Address mask_addr,
|
||||
Register scratch1, Register scratch2,
|
||||
Condition cond, Label *where);
|
||||
void set_mdp_flag_at(int flag_constant, Register scratch);
|
||||
|
@ -282,12 +282,11 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
|
||||
void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
|
||||
// Note: In tiered we increment either counters in MethodCounters* or in
|
||||
// MDO depending if we're profiling or not.
|
||||
const Register Rcounters = G3_scratch;
|
||||
const Register G3_method_counters = G3_scratch;
|
||||
Label done;
|
||||
|
||||
if (TieredCompilation) {
|
||||
const int increment = InvocationCounter::count_increment;
|
||||
const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
|
||||
Label no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
// If no method data exists, go to profile_continue.
|
||||
@ -297,6 +296,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
|
||||
Address mdo_invocation_counter(G4_scratch,
|
||||
in_bytes(MethodData::invocation_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
|
||||
G3_scratch, Lscratch,
|
||||
Assembler::zero, overflow);
|
||||
@ -305,20 +305,21 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
|
||||
|
||||
// Increment counter in MethodCounters*
|
||||
__ bind(no_mdo);
|
||||
Address invocation_counter(Rcounters,
|
||||
Address invocation_counter(G3_method_counters,
|
||||
in_bytes(MethodCounters::invocation_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
__ get_method_counters(Lmethod, Rcounters, done);
|
||||
__ get_method_counters(Lmethod, G3_method_counters, done);
|
||||
Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(invocation_counter, increment, mask,
|
||||
G4_scratch, Lscratch,
|
||||
Assembler::zero, overflow);
|
||||
__ bind(done);
|
||||
} else {
|
||||
} else { // not TieredCompilation
|
||||
// Update standard invocation counters
|
||||
__ get_method_counters(Lmethod, Rcounters, done);
|
||||
__ increment_invocation_counter(Rcounters, O0, G4_scratch);
|
||||
__ get_method_counters(Lmethod, G3_method_counters, done);
|
||||
__ increment_invocation_counter(G3_method_counters, O0, G4_scratch);
|
||||
if (ProfileInterpreter) {
|
||||
Address interpreter_invocation_counter(Rcounters,
|
||||
Address interpreter_invocation_counter(G3_method_counters,
|
||||
in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
|
||||
__ ld(interpreter_invocation_counter, G4_scratch);
|
||||
__ inc(G4_scratch);
|
||||
@ -327,16 +328,16 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
|
||||
|
||||
if (ProfileInterpreter && profile_method != NULL) {
|
||||
// Test to see if we should create a method data oop
|
||||
AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
|
||||
__ load_contents(profile_limit, G3_scratch);
|
||||
__ cmp_and_br_short(O0, G3_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
|
||||
Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
|
||||
__ ld(profile_limit, G1_scratch);
|
||||
__ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
|
||||
|
||||
// if no method data exists, go to profile_method
|
||||
__ test_method_data_pointer(*profile_method);
|
||||
}
|
||||
|
||||
AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
|
||||
__ load_contents(invocation_limit, G3_scratch);
|
||||
Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
|
||||
__ ld(invocation_limit, G3_scratch);
|
||||
__ cmp(O0, G3_scratch);
|
||||
__ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
|
||||
__ delayed()->nop();
|
||||
|
@ -1599,13 +1599,12 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
// Bump bytecode pointer by displacement (take the branch)
|
||||
__ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
|
||||
|
||||
const Register Rcounters = G3_scratch;
|
||||
__ get_method_counters(Lmethod, Rcounters, Lforward);
|
||||
const Register G3_method_counters = G3_scratch;
|
||||
__ get_method_counters(Lmethod, G3_method_counters, Lforward);
|
||||
|
||||
if (TieredCompilation) {
|
||||
Label Lno_mdo, Loverflow;
|
||||
int increment = InvocationCounter::count_increment;
|
||||
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
|
||||
if (ProfileInterpreter) {
|
||||
// If no method data exists, go to profile_continue.
|
||||
__ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
|
||||
@ -1614,6 +1613,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
// Increment backedge counter in the MDO
|
||||
Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
|
||||
Assembler::notZero, &Lforward);
|
||||
__ ba_short(Loverflow);
|
||||
@ -1621,9 +1621,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
|
||||
// If there's no MDO, increment counter in MethodCounters*
|
||||
__ bind(Lno_mdo);
|
||||
Address backedge_counter(Rcounters,
|
||||
Address backedge_counter(G3_method_counters,
|
||||
in_bytes(MethodCounters::backedge_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
|
||||
Assembler::notZero, &Lforward);
|
||||
__ bind(Loverflow);
|
||||
@ -1663,18 +1664,19 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ jmp(O2, G0);
|
||||
__ delayed()->nop();
|
||||
|
||||
} else {
|
||||
} else { // not TieredCompilation
|
||||
// Update Backedge branch separately from invocations
|
||||
const Register G4_invoke_ctr = G4;
|
||||
__ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch);
|
||||
__ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch);
|
||||
if (ProfileInterpreter) {
|
||||
__ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
|
||||
__ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward);
|
||||
if (UseOnStackReplacement) {
|
||||
__ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch);
|
||||
|
||||
__ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch);
|
||||
}
|
||||
} else {
|
||||
if (UseOnStackReplacement) {
|
||||
__ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch);
|
||||
__ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -541,15 +541,6 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
}
|
||||
|
||||
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
|
||||
|
||||
jbyte* G1PostBarrierStub::byte_map_base_slow() {
|
||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
||||
assert(bs->is_a(BarrierSet::G1SATBCTLogging),
|
||||
"Must be if we're using this.");
|
||||
return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
|
||||
}
|
||||
|
||||
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
assert(addr()->is_register(), "Precondition.");
|
||||
|
@ -1360,7 +1360,7 @@ void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
|
||||
|
||||
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
|
||||
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
int increment, Address mask,
|
||||
Register scratch, bool preloaded,
|
||||
Condition cond, Label* where) {
|
||||
if (!preloaded) {
|
||||
|
@ -182,7 +182,7 @@
|
||||
void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
|
||||
bool decrement = false);
|
||||
void increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
int increment, Address mask,
|
||||
Register scratch, bool preloaded,
|
||||
Condition cond, Label* where);
|
||||
void set_mdp_flag_at(Register mdp_in, int flag_constant);
|
||||
|
@ -1426,7 +1426,7 @@ void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
|
||||
|
||||
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
|
||||
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
int increment, Address mask,
|
||||
Register scratch, bool preloaded,
|
||||
Condition cond, Label* where) {
|
||||
if (!preloaded) {
|
||||
|
@ -191,7 +191,7 @@
|
||||
void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
|
||||
bool decrement = false);
|
||||
void increment_mask_and_jump(Address counter_addr,
|
||||
int increment, int mask,
|
||||
int increment, Address mask,
|
||||
Register scratch, bool preloaded,
|
||||
Condition cond, Label* where);
|
||||
void set_mdp_flag_at(Register mdp_in, int flag_constant);
|
||||
|
@ -346,7 +346,6 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
|
||||
// depending if we're profiling or not.
|
||||
if (TieredCompilation) {
|
||||
int increment = InvocationCounter::count_increment;
|
||||
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
|
||||
Label no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
@ -356,6 +355,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
|
||||
// Increment counter in the MDO
|
||||
const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
|
||||
__ jmp(done);
|
||||
}
|
||||
@ -366,11 +366,12 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
|
||||
InvocationCounter::counter_offset());
|
||||
|
||||
__ get_method_counters(rbx, rax, done);
|
||||
const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(invocation_counter, increment, mask,
|
||||
rcx, false, Assembler::zero, overflow);
|
||||
__ bind(done);
|
||||
} else {
|
||||
const Address backedge_counter (rax,
|
||||
} else { // not TieredCompilation
|
||||
const Address backedge_counter(rax,
|
||||
MethodCounters::backedge_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
const Address invocation_counter(rax,
|
||||
@ -400,16 +401,16 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
|
||||
|
||||
if (ProfileInterpreter && profile_method != NULL) {
|
||||
// Test to see if we should create a method data oop
|
||||
__ cmp32(rcx,
|
||||
ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
|
||||
__ movptr(rax, Address(rbx, Method::method_counters_offset()));
|
||||
__ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
|
||||
__ jcc(Assembler::less, *profile_method_continue);
|
||||
|
||||
// if no method data exists, go to profile_method
|
||||
__ test_method_data_pointer(rax, *profile_method);
|
||||
}
|
||||
|
||||
__ cmp32(rcx,
|
||||
ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
|
||||
__ movptr(rax, Address(rbx, Method::method_counters_offset()));
|
||||
__ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
|
||||
__ jcc(Assembler::aboveEqual, *overflow);
|
||||
__ bind(done);
|
||||
}
|
||||
|
@ -299,7 +299,6 @@ void InterpreterGenerator::generate_counter_incr(
|
||||
// Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
|
||||
if (TieredCompilation) {
|
||||
int increment = InvocationCounter::count_increment;
|
||||
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
|
||||
Label no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
@ -309,6 +308,7 @@ void InterpreterGenerator::generate_counter_incr(
|
||||
// Increment counter in the MDO
|
||||
const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
|
||||
__ jmp(done);
|
||||
}
|
||||
@ -318,10 +318,11 @@ void InterpreterGenerator::generate_counter_incr(
|
||||
MethodCounters::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
__ get_method_counters(rbx, rax, done);
|
||||
const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
|
||||
false, Assembler::zero, overflow);
|
||||
__ bind(done);
|
||||
} else {
|
||||
} else { // not TieredCompilation
|
||||
const Address backedge_counter(rax,
|
||||
MethodCounters::backedge_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
@ -350,14 +351,16 @@ void InterpreterGenerator::generate_counter_incr(
|
||||
|
||||
if (ProfileInterpreter && profile_method != NULL) {
|
||||
// Test to see if we should create a method data oop
|
||||
__ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
|
||||
__ movptr(rax, Address(rbx, Method::method_counters_offset()));
|
||||
__ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
|
||||
__ jcc(Assembler::less, *profile_method_continue);
|
||||
|
||||
// if no method data exists, go to profile_method
|
||||
__ test_method_data_pointer(rax, *profile_method);
|
||||
}
|
||||
|
||||
__ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
|
||||
__ movptr(rax, Address(rbx, Method::method_counters_offset()));
|
||||
__ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
|
||||
__ jcc(Assembler::aboveEqual, *overflow);
|
||||
__ bind(done);
|
||||
}
|
||||
|
@ -1621,7 +1621,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
if (TieredCompilation) {
|
||||
Label no_mdo;
|
||||
int increment = InvocationCounter::count_increment;
|
||||
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
|
||||
@ -1630,6 +1629,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
// Increment the MDO backedge counter
|
||||
const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
|
||||
rax, false, Assembler::zero, &backedge_counter_overflow);
|
||||
__ jmp(dispatch);
|
||||
@ -1637,9 +1637,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ bind(no_mdo);
|
||||
// Increment backedge counter in MethodCounters*
|
||||
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
|
||||
const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
|
||||
rax, false, Assembler::zero, &backedge_counter_overflow);
|
||||
} else {
|
||||
} else { // not TieredCompilation
|
||||
// increment counter
|
||||
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
|
||||
__ movl(rax, Address(rcx, be_offset)); // load backedge counter
|
||||
@ -1653,8 +1654,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
// Test to see if we should create a method data oop
|
||||
__ cmp32(rax,
|
||||
ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
|
||||
__ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
|
||||
__ jcc(Assembler::less, dispatch);
|
||||
|
||||
// if no method data exists, go to profile method
|
||||
@ -1662,8 +1662,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
|
||||
if (UseOnStackReplacement) {
|
||||
// check for overflow against rbx, which is the MDO taken count
|
||||
__ cmp32(rbx,
|
||||
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
|
||||
__ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
|
||||
__ jcc(Assembler::below, dispatch);
|
||||
|
||||
// When ProfileInterpreter is on, the backedge_count comes from the
|
||||
@ -1678,8 +1677,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
} else {
|
||||
if (UseOnStackReplacement) {
|
||||
// check for overflow against rax, which is the sum of the counters
|
||||
__ cmp32(rax,
|
||||
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
|
||||
__ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
|
||||
__ jcc(Assembler::aboveEqual, backedge_counter_overflow);
|
||||
|
||||
}
|
||||
|
@ -1642,7 +1642,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
if (TieredCompilation) {
|
||||
Label no_mdo;
|
||||
int increment = InvocationCounter::count_increment;
|
||||
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
|
||||
@ -1651,6 +1650,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
// Increment the MDO backedge counter
|
||||
const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
|
||||
rax, false, Assembler::zero, &backedge_counter_overflow);
|
||||
__ jmp(dispatch);
|
||||
@ -1658,9 +1658,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ bind(no_mdo);
|
||||
// Increment backedge counter in MethodCounters*
|
||||
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
|
||||
const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
|
||||
rax, false, Assembler::zero, &backedge_counter_overflow);
|
||||
} else {
|
||||
} else { // not TieredCompilation
|
||||
// increment counter
|
||||
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
|
||||
__ movl(rax, Address(rcx, be_offset)); // load backedge counter
|
||||
@ -1674,8 +1675,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
// Test to see if we should create a method data oop
|
||||
__ cmp32(rax,
|
||||
ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
|
||||
__ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
|
||||
__ jcc(Assembler::less, dispatch);
|
||||
|
||||
// if no method data exists, go to profile method
|
||||
@ -1683,8 +1683,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
|
||||
if (UseOnStackReplacement) {
|
||||
// check for overflow against ebx which is the MDO taken count
|
||||
__ cmp32(rbx,
|
||||
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
|
||||
__ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
|
||||
__ jcc(Assembler::below, dispatch);
|
||||
|
||||
// When ProfileInterpreter is on, the backedge_count comes
|
||||
@ -1702,8 +1701,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
if (UseOnStackReplacement) {
|
||||
// check for overflow against eax, which is the sum of the
|
||||
// counters
|
||||
__ cmp32(rax,
|
||||
ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
|
||||
__ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
|
||||
__ jcc(Assembler::aboveEqual, backedge_counter_overflow);
|
||||
|
||||
}
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "os_aix.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/perfMemory.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
|
||||
// put OS-includes here
|
||||
@ -196,12 +197,37 @@ static pid_t filename_to_pid(const char* filename) {
|
||||
return pid;
|
||||
}
|
||||
|
||||
// Check if the given statbuf is considered a secure directory for
|
||||
// the backing store files. Returns true if the directory is considered
|
||||
// a secure location. Returns false if the statbuf is a symbolic link or
|
||||
// if an error occurred.
|
||||
static bool is_statbuf_secure(struct stat *statp) {
|
||||
if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) {
|
||||
// The path represents a link or some non-directory file type,
|
||||
// which is not what we expected. Declare it insecure.
|
||||
//
|
||||
return false;
|
||||
}
|
||||
// We have an existing directory, check if the permissions are safe.
|
||||
if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) {
|
||||
// The directory is open for writing and could be subjected
|
||||
// to a symlink or a hard link attack. Declare it insecure.
|
||||
return false;
|
||||
}
|
||||
// See if the uid of the directory matches the effective uid of the process.
|
||||
//
|
||||
if (statp->st_uid != geteuid()) {
|
||||
// The directory was not created by this user, declare it insecure.
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// check if the given path is considered a secure directory for
|
||||
|
||||
// Check if the given path is considered a secure directory for
|
||||
// the backing store files. Returns true if the directory exists
|
||||
// and is considered a secure location. Returns false if the path
|
||||
// is a symbolic link or if an error occurred.
|
||||
//
|
||||
static bool is_directory_secure(const char* path) {
|
||||
struct stat statbuf;
|
||||
int result = 0;
|
||||
@ -211,38 +237,276 @@ static bool is_directory_secure(const char* path) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// the path exists, now check it's mode
|
||||
if (S_ISLNK(statbuf.st_mode) || !S_ISDIR(statbuf.st_mode)) {
|
||||
// the path represents a link or some non-directory file type,
|
||||
// which is not what we expected. declare it insecure.
|
||||
//
|
||||
// The path exists, see if it is secure.
|
||||
return is_statbuf_secure(&statbuf);
|
||||
}
|
||||
|
||||
// (Taken over from Solaris to support the O_NOFOLLOW case on AIX.)
|
||||
// Check if the given directory file descriptor is considered a secure
|
||||
// directory for the backing store files. Returns true if the directory
|
||||
// exists and is considered a secure location. Returns false if the path
|
||||
// is a symbolic link or if an error occurred.
|
||||
static bool is_dirfd_secure(int dir_fd) {
|
||||
struct stat statbuf;
|
||||
int result = 0;
|
||||
|
||||
RESTARTABLE(::fstat(dir_fd, &statbuf), result);
|
||||
if (result == OS_ERR) {
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
// we have an existing directory, check if the permissions are safe.
|
||||
//
|
||||
if ((statbuf.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
|
||||
// the directory is open for writing and could be subjected
|
||||
// to a symlnk attack. declare it insecure.
|
||||
//
|
||||
return false;
|
||||
|
||||
// The path exists, now check its mode.
|
||||
return is_statbuf_secure(&statbuf);
|
||||
}
|
||||
|
||||
|
||||
// Check to make sure fd1 and fd2 are referencing the same file system object.
|
||||
static bool is_same_fsobject(int fd1, int fd2) {
|
||||
struct stat statbuf1;
|
||||
struct stat statbuf2;
|
||||
int result = 0;
|
||||
|
||||
RESTARTABLE(::fstat(fd1, &statbuf1), result);
|
||||
if (result == OS_ERR) {
|
||||
return false;
|
||||
}
|
||||
RESTARTABLE(::fstat(fd2, &statbuf2), result);
|
||||
if (result == OS_ERR) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((statbuf1.st_ino == statbuf2.st_ino) &&
|
||||
(statbuf1.st_dev == statbuf2.st_dev)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper functions for open without O_NOFOLLOW which is not present on AIX 5.3/6.1.
|
||||
// We use the jdk6 implementation here.
|
||||
#ifndef O_NOFOLLOW
|
||||
// The O_NOFOLLOW oflag doesn't exist before solaris 5.10, this is to simulate that behaviour
|
||||
// was done in jdk 5/6 hotspot by Oracle this way
|
||||
static int open_o_nofollow_impl(const char* path, int oflag, mode_t mode, bool use_mode) {
|
||||
struct stat orig_st;
|
||||
struct stat new_st;
|
||||
bool create;
|
||||
int error;
|
||||
int fd;
|
||||
|
||||
create = false;
|
||||
|
||||
if (lstat(path, &orig_st) != 0) {
|
||||
if (errno == ENOENT && (oflag & O_CREAT) != 0) {
|
||||
// File doesn't exist, but_we want to create it, add O_EXCL flag
|
||||
// to make sure no-one creates it (or a symlink) before us
|
||||
// This works as we expect with symlinks, from posix man page:
|
||||
// 'If O_EXCL and O_CREAT are set, and path names a symbolic
|
||||
// link, open() shall fail and set errno to [EEXIST]'.
|
||||
oflag |= O_EXCL;
|
||||
create = true;
|
||||
} else {
|
||||
// File doesn't exist, and we are not creating it.
|
||||
return OS_ERR;
|
||||
}
|
||||
} else {
|
||||
// Lstat success, check if existing file is a link.
|
||||
if ((orig_st.st_mode & S_IFMT) == S_IFLNK) {
|
||||
// File is a symlink.
|
||||
errno = ELOOP;
|
||||
return OS_ERR;
|
||||
}
|
||||
}
|
||||
|
||||
if (use_mode == true) {
|
||||
fd = open(path, oflag, mode);
|
||||
} else {
|
||||
fd = open(path, oflag);
|
||||
}
|
||||
|
||||
if (fd == OS_ERR) {
|
||||
return fd;
|
||||
}
|
||||
|
||||
// Can't do inode checks on before/after if we created the file.
|
||||
if (create == false) {
|
||||
if (fstat(fd, &new_st) != 0) {
|
||||
// Keep errno from fstat, in case close also fails.
|
||||
error = errno;
|
||||
::close(fd);
|
||||
errno = error;
|
||||
return OS_ERR;
|
||||
}
|
||||
|
||||
if (orig_st.st_dev != new_st.st_dev || orig_st.st_ino != new_st.st_ino) {
|
||||
// File was tampered with during race window.
|
||||
::close(fd);
|
||||
errno = EEXIST;
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("possible file tampering attempt detected when opening %s", path);
|
||||
}
|
||||
return OS_ERR;
|
||||
}
|
||||
}
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
static int open_o_nofollow(const char* path, int oflag, mode_t mode) {
|
||||
return open_o_nofollow_impl(path, oflag, mode, true);
|
||||
}
|
||||
|
||||
static int open_o_nofollow(const char* path, int oflag) {
|
||||
return open_o_nofollow_impl(path, oflag, 0, false);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Open the directory of the given path and validate it.
|
||||
// Return a DIR * of the open directory.
|
||||
static DIR *open_directory_secure(const char* dirname) {
|
||||
// Open the directory using open() so that it can be verified
|
||||
// to be secure by calling is_dirfd_secure(), opendir() and then check
|
||||
// to see if they are the same file system object. This method does not
|
||||
// introduce a window of opportunity for the directory to be attacked that
|
||||
// calling opendir() and is_directory_secure() does.
|
||||
int result;
|
||||
DIR *dirp = NULL;
|
||||
|
||||
// No O_NOFOLLOW defined at buildtime, and it is not documented for open;
|
||||
// so provide a workaround in this case.
|
||||
#ifdef O_NOFOLLOW
|
||||
RESTARTABLE(::open(dirname, O_RDONLY|O_NOFOLLOW), result);
|
||||
#else
|
||||
// workaround (jdk6 coding)
|
||||
RESTARTABLE(::open_o_nofollow(dirname, O_RDONLY), result);
|
||||
#endif
|
||||
|
||||
if (result == OS_ERR) {
|
||||
// Directory doesn't exist or is a symlink, so there is nothing to cleanup.
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
if (errno == ELOOP) {
|
||||
warning("directory %s is a symlink and is not secure\n", dirname);
|
||||
} else {
|
||||
warning("could not open directory %s: %s\n", dirname, strerror(errno));
|
||||
}
|
||||
}
|
||||
return dirp;
|
||||
}
|
||||
int fd = result;
|
||||
|
||||
// Determine if the open directory is secure.
|
||||
if (!is_dirfd_secure(fd)) {
|
||||
// The directory is not a secure directory.
|
||||
os::close(fd);
|
||||
return dirp;
|
||||
}
|
||||
|
||||
// Open the directory.
|
||||
dirp = ::opendir(dirname);
|
||||
if (dirp == NULL) {
|
||||
// The directory doesn't exist, close fd and return.
|
||||
os::close(fd);
|
||||
return dirp;
|
||||
}
|
||||
|
||||
// Check to make sure fd and dirp are referencing the same file system object.
|
||||
if (!is_same_fsobject(fd, dirp->dd_fd)) {
|
||||
// The directory is not secure.
|
||||
os::close(fd);
|
||||
os::closedir(dirp);
|
||||
dirp = NULL;
|
||||
return dirp;
|
||||
}
|
||||
|
||||
// Close initial open now that we know directory is secure
|
||||
os::close(fd);
|
||||
|
||||
return dirp;
|
||||
}
|
||||
|
||||
// NOTE: The code below uses fchdir(), open() and unlink() because
|
||||
// fdopendir(), openat() and unlinkat() are not supported on all
|
||||
// versions. Once the support for fdopendir(), openat() and unlinkat()
|
||||
// is available on all supported versions the code can be changed
|
||||
// to use these functions.
|
||||
|
||||
// Open the directory of the given path, validate it and set the
|
||||
// current working directory to it.
|
||||
// Return a DIR * of the open directory and the saved cwd fd.
|
||||
//
|
||||
static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
|
||||
|
||||
// Open the directory.
|
||||
DIR* dirp = open_directory_secure(dirname);
|
||||
if (dirp == NULL) {
|
||||
// Directory doesn't exist or is insecure, so there is nothing to cleanup.
|
||||
return dirp;
|
||||
}
|
||||
int fd = dirp->dd_fd;
|
||||
|
||||
// Open a fd to the cwd and save it off.
|
||||
int result;
|
||||
RESTARTABLE(::open(".", O_RDONLY), result);
|
||||
if (result == OS_ERR) {
|
||||
*saved_cwd_fd = -1;
|
||||
} else {
|
||||
*saved_cwd_fd = result;
|
||||
}
|
||||
|
||||
// Set the current directory to dirname by using the fd of the directory.
|
||||
result = fchdir(fd);
|
||||
|
||||
return dirp;
|
||||
}
|
||||
|
||||
// Close the directory and restore the current working directory.
|
||||
static void close_directory_secure_cwd(DIR* dirp, int saved_cwd_fd) {
|
||||
|
||||
int result;
|
||||
// If we have a saved cwd change back to it and close the fd.
|
||||
if (saved_cwd_fd != -1) {
|
||||
result = fchdir(saved_cwd_fd);
|
||||
::close(saved_cwd_fd);
|
||||
}
|
||||
|
||||
// Close the directory.
|
||||
os::closedir(dirp);
|
||||
}
|
||||
|
||||
// Check if the given file descriptor is considered a secure.
|
||||
static bool is_file_secure(int fd, const char *filename) {
|
||||
|
||||
int result;
|
||||
struct stat statbuf;
|
||||
|
||||
// Determine if the file is secure.
|
||||
RESTARTABLE(::fstat(fd, &statbuf), result);
|
||||
if (result == OS_ERR) {
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("fstat failed on %s: %s\n", filename, strerror(errno));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
if (statbuf.st_nlink > 1) {
|
||||
// A file with multiple links is not expected.
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("file %s has multiple links\n", filename);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// return the user name for the given user id
|
||||
//
|
||||
// the caller is expected to free the allocated memory.
|
||||
// Return the user name for the given user id.
|
||||
//
|
||||
// The caller is expected to free the allocated memory.
|
||||
static char* get_user_name(uid_t uid) {
|
||||
|
||||
struct passwd pwent;
|
||||
|
||||
// determine the max pwbuf size from sysconf, and hardcode
|
||||
// Determine the max pwbuf size from sysconf, and hardcode
|
||||
// a default if this not available through sysconf.
|
||||
//
|
||||
long bufsize = sysconf(_SC_GETPW_R_SIZE_MAX);
|
||||
if (bufsize == -1)
|
||||
bufsize = 1024;
|
||||
@ -344,7 +608,8 @@ static char* get_user_name_slow(int vmid, TRAPS) {
|
||||
strcat(usrdir_name, "/");
|
||||
strcat(usrdir_name, dentry->d_name);
|
||||
|
||||
DIR* subdirp = os::opendir(usrdir_name);
|
||||
// Open the user directory.
|
||||
DIR* subdirp = open_directory_secure(usrdir_name);
|
||||
|
||||
if (subdirp == NULL) {
|
||||
FREE_C_HEAP_ARRAY(char, usrdir_name);
|
||||
@ -464,28 +729,7 @@ static void remove_file(const char* path) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// remove file
|
||||
//
|
||||
// this method removes the file with the given file name in the
|
||||
// named directory.
|
||||
//
|
||||
static void remove_file(const char* dirname, const char* filename) {
|
||||
|
||||
size_t nbytes = strlen(dirname) + strlen(filename) + 2;
|
||||
char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
|
||||
|
||||
strcpy(path, dirname);
|
||||
strcat(path, "/");
|
||||
strcat(path, filename);
|
||||
|
||||
remove_file(path);
|
||||
|
||||
FREE_C_HEAP_ARRAY(char, path);
|
||||
}
|
||||
|
||||
|
||||
// cleanup stale shared memory resources
|
||||
// Cleanup stale shared memory resources
|
||||
//
|
||||
// This method attempts to remove all stale shared memory files in
|
||||
// the named user temporary directory. It scans the named directory
|
||||
@ -493,33 +737,26 @@ static void remove_file(const char* dirname, const char* filename) {
|
||||
// process id is extracted from the file name and a test is run to
|
||||
// determine if the process is alive. If the process is not alive,
|
||||
// any stale file resources are removed.
|
||||
//
|
||||
static void cleanup_sharedmem_resources(const char* dirname) {
|
||||
|
||||
// open the user temp directory
|
||||
DIR* dirp = os::opendir(dirname);
|
||||
|
||||
int saved_cwd_fd;
|
||||
// Open the directory.
|
||||
DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
|
||||
if (dirp == NULL) {
|
||||
// directory doesn't exist, so there is nothing to cleanup
|
||||
// Directory doesn't exist or is insecure, so there is nothing to cleanup.
|
||||
return;
|
||||
}
|
||||
|
||||
if (!is_directory_secure(dirname)) {
|
||||
// the directory is not a secure directory
|
||||
os::closedir(dirp);
|
||||
return;
|
||||
}
|
||||
|
||||
// for each entry in the directory that matches the expected file
|
||||
// For each entry in the directory that matches the expected file
|
||||
// name pattern, determine if the file resources are stale and if
|
||||
// so, remove the file resources. Note, instrumented HotSpot processes
|
||||
// for this user may start and/or terminate during this search and
|
||||
// remove or create new files in this directory. The behavior of this
|
||||
// loop under these conditions is dependent upon the implementation of
|
||||
// opendir/readdir.
|
||||
//
|
||||
struct dirent* entry;
|
||||
char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
|
||||
|
||||
errno = 0;
|
||||
while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
|
||||
|
||||
@ -529,56 +766,55 @@ static void cleanup_sharedmem_resources(const char* dirname) {
|
||||
|
||||
if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
|
||||
|
||||
// attempt to remove all unexpected files, except "." and ".."
|
||||
remove_file(dirname, entry->d_name);
|
||||
// Attempt to remove all unexpected files, except "." and "..".
|
||||
unlink(entry->d_name);
|
||||
}
|
||||
|
||||
errno = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
// we now have a file name that converts to a valid integer
|
||||
// We now have a file name that converts to a valid integer
|
||||
// that could represent a process id . if this process id
|
||||
// matches the current process id or the process is not running,
|
||||
// then remove the stale file resources.
|
||||
//
|
||||
// process liveness is detected by sending signal number 0 to
|
||||
// Process liveness is detected by sending signal number 0 to
|
||||
// the process id (see kill(2)). if kill determines that the
|
||||
// process does not exist, then the file resources are removed.
|
||||
// if kill determines that that we don't have permission to
|
||||
// signal the process, then the file resources are assumed to
|
||||
// be stale and are removed because the resources for such a
|
||||
// process should be in a different user specific directory.
|
||||
//
|
||||
if ((pid == os::current_process_id()) ||
|
||||
(kill(pid, 0) == OS_ERR && (errno == ESRCH || errno == EPERM))) {
|
||||
|
||||
remove_file(dirname, entry->d_name);
|
||||
unlink(entry->d_name);
|
||||
}
|
||||
errno = 0;
|
||||
}
|
||||
os::closedir(dirp);
|
||||
FREE_C_HEAP_ARRAY(char, dbuf);
|
||||
|
||||
// Close the directory and reset the current working directory.
|
||||
close_directory_secure_cwd(dirp, saved_cwd_fd);
|
||||
|
||||
FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
|
||||
}
|
||||
|
||||
// make the user specific temporary directory. Returns true if
|
||||
// Make the user specific temporary directory. Returns true if
|
||||
// the directory exists and is secure upon return. Returns false
|
||||
// if the directory exists but is either a symlink, is otherwise
|
||||
// insecure, or if an error occurred.
|
||||
//
|
||||
static bool make_user_tmp_dir(const char* dirname) {
|
||||
|
||||
// create the directory with 0755 permissions. note that the directory
|
||||
// Create the directory with 0755 permissions. note that the directory
|
||||
// will be owned by euid::egid, which may not be the same as uid::gid.
|
||||
//
|
||||
if (mkdir(dirname, S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) == OS_ERR) {
|
||||
if (errno == EEXIST) {
|
||||
// The directory already exists and was probably created by another
|
||||
// JVM instance. However, this could also be the result of a
|
||||
// deliberate symlink. Verify that the existing directory is safe.
|
||||
//
|
||||
if (!is_directory_secure(dirname)) {
|
||||
// directory is not secure
|
||||
// Directory is not secure.
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("%s directory is insecure\n", dirname);
|
||||
}
|
||||
@ -614,19 +850,63 @@ static int create_sharedmem_resources(const char* dirname, const char* filename,
|
||||
return -1;
|
||||
}
|
||||
|
||||
int result;
|
||||
|
||||
RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_TRUNC, S_IREAD|S_IWRITE), result);
|
||||
if (result == OS_ERR) {
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("could not create file %s: %s\n", filename, strerror(errno));
|
||||
}
|
||||
int saved_cwd_fd;
|
||||
// Open the directory and set the current working directory to it.
|
||||
DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
|
||||
if (dirp == NULL) {
|
||||
// Directory doesn't exist or is insecure, so cannot create shared
|
||||
// memory file.
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Open the filename in the current directory.
|
||||
// Cannot use O_TRUNC here; truncation of an existing file has to happen
|
||||
// after the is_file_secure() check below.
|
||||
int result;
|
||||
|
||||
// No O_NOFOLLOW defined at buildtime, and it is not documented for open;
|
||||
// so provide a workaround in this case.
|
||||
#ifdef O_NOFOLLOW
|
||||
RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IREAD|S_IWRITE), result);
|
||||
#else
|
||||
// workaround function (jdk6 code)
|
||||
RESTARTABLE(::open_o_nofollow(filename, O_RDWR|O_CREAT, S_IREAD|S_IWRITE), result);
|
||||
#endif
|
||||
|
||||
if (result == OS_ERR) {
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
if (errno == ELOOP) {
|
||||
warning("file %s is a symlink and is not secure\n", filename);
|
||||
} else {
|
||||
warning("could not create file %s: %s\n", filename, strerror(errno));
|
||||
}
|
||||
}
|
||||
// Close the directory and reset the current working directory.
|
||||
close_directory_secure_cwd(dirp, saved_cwd_fd);
|
||||
|
||||
return -1;
|
||||
}
|
||||
// Close the directory and reset the current working directory.
|
||||
close_directory_secure_cwd(dirp, saved_cwd_fd);
|
||||
|
||||
// save the file descriptor
|
||||
int fd = result;
|
||||
|
||||
// Check to see if the file is secure.
|
||||
if (!is_file_secure(fd, filename)) {
|
||||
::close(fd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Truncate the file to get rid of any existing data.
|
||||
RESTARTABLE(::ftruncate(fd, (off_t)0), result);
|
||||
if (result == OS_ERR) {
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
warning("could not truncate shared memory file: %s\n", strerror(errno));
|
||||
}
|
||||
::close(fd);
|
||||
return -1;
|
||||
}
|
||||
// set the file size
|
||||
RESTARTABLE(::ftruncate(fd, (off_t)size), result);
|
||||
if (result == OS_ERR) {
|
||||
@ -648,7 +928,14 @@ static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
|
||||
|
||||
// open the file
|
||||
int result;
|
||||
// No O_NOFOLLOW defined at buildtime, and it is not documented for open;
|
||||
// so provide a workaround in this case
|
||||
#ifdef O_NOFOLLOW
|
||||
RESTARTABLE(::open(filename, oflags), result);
|
||||
#else
|
||||
RESTARTABLE(::open_o_nofollow(filename, oflags), result);
|
||||
#endif
|
||||
|
||||
if (result == OS_ERR) {
|
||||
if (errno == ENOENT) {
|
||||
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
|
||||
@ -662,8 +949,15 @@ static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
|
||||
THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
|
||||
}
|
||||
}
|
||||
int fd = result;
|
||||
|
||||
return result;
|
||||
// Check to see if the file is secure.
|
||||
if (!is_file_secure(fd, filename)) {
|
||||
::close(fd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
// create a named shared memory region. returns the address of the
|
||||
@ -695,13 +989,21 @@ static char* mmap_create_shared(size_t size) {
|
||||
char* dirname = get_user_tmp_dir(user_name);
|
||||
char* filename = get_sharedmem_filename(dirname, vmid);
|
||||
|
||||
// Get the short filename.
|
||||
char* short_filename = strrchr(filename, '/');
|
||||
if (short_filename == NULL) {
|
||||
short_filename = filename;
|
||||
} else {
|
||||
short_filename++;
|
||||
}
|
||||
|
||||
// cleanup any stale shared memory files
|
||||
cleanup_sharedmem_resources(dirname);
|
||||
|
||||
assert(((size > 0) && (size % os::vm_page_size() == 0)),
|
||||
"unexpected PerfMemory region size");
|
||||
|
||||
fd = create_sharedmem_resources(dirname, filename, size);
|
||||
fd = create_sharedmem_resources(dirname, short_filename, size);
|
||||
|
||||
FREE_C_HEAP_ARRAY(char, user_name);
|
||||
FREE_C_HEAP_ARRAY(char, dirname);
|
||||
@ -733,6 +1035,9 @@ static char* mmap_create_shared(size_t size) {
|
||||
// clear the shared memory region
|
||||
(void)::memset((void*) mapAddress, 0, size);
|
||||
|
||||
// It does not go through os api, the operation has to record from here.
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||
|
||||
return mapAddress;
|
||||
}
|
||||
|
||||
@ -807,7 +1112,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
char* mapAddress;
|
||||
int result;
|
||||
int fd;
|
||||
size_t size;
|
||||
size_t size = 0;
|
||||
const char* luser = NULL;
|
||||
|
||||
int mmap_prot;
|
||||
@ -819,12 +1124,18 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
// constructs for the file and the shared memory mapping.
|
||||
if (mode == PerfMemory::PERF_MODE_RO) {
|
||||
mmap_prot = PROT_READ;
|
||||
|
||||
// No O_NOFOLLOW defined at buildtime, and it is not documented for open.
|
||||
#ifdef O_NOFOLLOW
|
||||
file_flags = O_RDONLY | O_NOFOLLOW;
|
||||
#else
|
||||
file_flags = O_RDONLY;
|
||||
#endif
|
||||
}
|
||||
else if (mode == PerfMemory::PERF_MODE_RW) {
|
||||
#ifdef LATER
|
||||
mmap_prot = PROT_READ | PROT_WRITE;
|
||||
file_flags = O_RDWR;
|
||||
file_flags = O_RDWR | O_NOFOLLOW;
|
||||
#else
|
||||
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
|
||||
"Unsupported access mode");
|
||||
@ -853,9 +1164,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
// store file, we don't follow them when attaching either.
|
||||
//
|
||||
if (!is_directory_secure(dirname)) {
|
||||
FREE_C_HEAP_ARRAY(char, dirname);
|
||||
FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
|
||||
if (luser != user) {
|
||||
FREE_C_HEAP_ARRAY(char, luser);
|
||||
FREE_C_HEAP_ARRAY(char, luser, mtInternal);
|
||||
}
|
||||
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
|
||||
"Process not found");
|
||||
@ -901,6 +1212,9 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
"Could not map PerfMemory");
|
||||
}
|
||||
|
||||
// It does not go through os api, the operation has to record from here.
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||
|
||||
*addr = mapAddress;
|
||||
*sizep = size;
|
||||
|
||||
|
@ -428,9 +428,9 @@ static unsigned __stdcall java_start(Thread* thread) {
|
||||
}
|
||||
|
||||
// Diagnostic code to investigate JDK-6573254
|
||||
int res = 50115; // non-java thread
|
||||
int res = 30115; // non-java thread
|
||||
if (thread->is_Java_thread()) {
|
||||
res = 40115; // java thread
|
||||
res = 20115; // java thread
|
||||
}
|
||||
|
||||
// Install a win32 structured exception handler around every thread created
|
||||
@ -3791,6 +3791,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
|
||||
static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
|
||||
static CRITICAL_SECTION crit_sect;
|
||||
static volatile jint process_exiting = 0;
|
||||
int i, j;
|
||||
DWORD res;
|
||||
HANDLE hproc, hthr;
|
||||
@ -3798,10 +3799,10 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
// The first thread that reached this point, initializes the critical section.
|
||||
if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
|
||||
warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
|
||||
} else {
|
||||
} else if (OrderAccess::load_acquire(&process_exiting) == 0) {
|
||||
EnterCriticalSection(&crit_sect);
|
||||
|
||||
if (what == EPT_THREAD) {
|
||||
if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
|
||||
// Remove from the array those handles of the threads that have completed exiting.
|
||||
for (i = 0, j = 0; i < handle_count; ++i) {
|
||||
res = WaitForSingleObject(handles[i], 0 /* don't wait */);
|
||||
@ -3856,7 +3857,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
// The current exiting thread has stored its handle in the array, and now
|
||||
// should leave the critical section before calling _endthreadex().
|
||||
|
||||
} else { // what != EPT_THREAD
|
||||
} else if (what != EPT_THREAD) {
|
||||
if (handle_count > 0) {
|
||||
// Before ending the process, make sure all the threads that had called
|
||||
// _endthreadex() completed.
|
||||
@ -3882,24 +3883,28 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
handle_count = 0;
|
||||
}
|
||||
|
||||
// End the process, not leaving critical section.
|
||||
// This makes sure no other thread executes exit-related code at the same
|
||||
// time, thus a race is avoided.
|
||||
if (what == EPT_PROCESS) {
|
||||
::exit(exit_code);
|
||||
} else {
|
||||
_exit(exit_code);
|
||||
}
|
||||
OrderAccess::release_store(&process_exiting, 1);
|
||||
}
|
||||
|
||||
LeaveCriticalSection(&crit_sect);
|
||||
}
|
||||
|
||||
if (what == EPT_THREAD) {
|
||||
while (OrderAccess::load_acquire(&process_exiting) != 0) {
|
||||
// Some other thread is about to call exit(), so we
|
||||
// don't let the current thread proceed to _endthreadex()
|
||||
SuspendThread(GetCurrentThread());
|
||||
// Avoid busy-wait loop, if SuspendThread() failed.
|
||||
Sleep(EXIT_TIMEOUT);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We are here if either
|
||||
// - there's no 'race at exit' bug on this OS release;
|
||||
// - initialization of the critical section failed (unlikely);
|
||||
// - the current thread has stored its handle and left the critical section.
|
||||
// - the current thread has stored its handle and left the critical section;
|
||||
// - the process-exiting thread has raised the flag and left the critical section.
|
||||
if (what == EPT_THREAD) {
|
||||
_endthreadex((unsigned)exit_code);
|
||||
} else if (what == EPT_PROCESS) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -601,15 +601,6 @@ class G1PostBarrierStub: public CodeStub {
|
||||
LIR_Opr _addr;
|
||||
LIR_Opr _new_val;
|
||||
|
||||
static jbyte* _byte_map_base;
|
||||
static jbyte* byte_map_base_slow();
|
||||
static jbyte* byte_map_base() {
|
||||
if (_byte_map_base == NULL) {
|
||||
_byte_map_base = byte_map_base_slow();
|
||||
}
|
||||
return _byte_map_base;
|
||||
}
|
||||
|
||||
public:
|
||||
// addr (the address of the object head) and new_val must be registers.
|
||||
G1PostBarrierStub(LIR_Opr addr, LIR_Opr new_val): _addr(addr), _new_val(new_val) { }
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "ci/ciArrayKlass.hpp"
|
||||
#include "ci/ciInstance.hpp"
|
||||
#include "ci/ciObjArray.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
@ -3351,7 +3352,12 @@ void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
|
||||
if (!x->inlinee()->is_accessor()) {
|
||||
CodeEmitInfo* info = state_for(x, x->state(), true);
|
||||
// Notify the runtime very infrequently only to take care of counter overflows
|
||||
increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
|
||||
int freq_log = Tier23InlineeNotifyFreqLog;
|
||||
double scale;
|
||||
if (_method->has_option_value("CompileThresholdScaling", scale)) {
|
||||
freq_log = Arguments::scaled_freq_log(freq_log, scale);
|
||||
}
|
||||
increment_event_counter_impl(info, x->inlinee(), right_n_bits(freq_log), InvocationEntryBci, false, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3366,7 +3372,11 @@ void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool bac
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
// Increment the appropriate invocation/backedge counter and notify the runtime.
|
||||
increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
|
||||
double scale;
|
||||
if (_method->has_option_value("CompileThresholdScaling", scale)) {
|
||||
freq_log = Arguments::scaled_freq_log(freq_log, scale);
|
||||
}
|
||||
increment_event_counter_impl(info, info->scope()->method(), right_n_bits(freq_log), bci, backedge, true);
|
||||
}
|
||||
|
||||
void LIRGenerator::decrement_age(CodeEmitInfo* info) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -221,6 +221,30 @@ template <class T, class N> const char* CompactHashtable<T, N>::init(const char*
|
||||
return (const char*)end;
|
||||
}
|
||||
|
||||
template <class T, class N> void CompactHashtable<T, N>::symbols_do(SymbolClosure *cl) {
|
||||
assert(!DumpSharedSpaces, "run-time only");
|
||||
for (juint i = 0; i < _bucket_count; i ++) {
|
||||
juint bucket_info = _buckets[i];
|
||||
juint bucket_offset = BUCKET_OFFSET(bucket_info);
|
||||
int bucket_type = BUCKET_TYPE(bucket_info);
|
||||
juint* bucket = _buckets + bucket_offset;
|
||||
juint* bucket_end = _buckets;
|
||||
|
||||
Symbol* sym;
|
||||
if (bucket_type == COMPACT_BUCKET_TYPE) {
|
||||
sym = (Symbol*)((void*)(_base_address + bucket[0]));
|
||||
cl->do_symbol(&sym);
|
||||
} else {
|
||||
bucket_end += BUCKET_OFFSET(_buckets[i + 1]);
|
||||
while (bucket < bucket_end) {
|
||||
sym = (Symbol*)((void*)(_base_address + bucket[1]));
|
||||
cl->do_symbol(&sym);
|
||||
bucket += 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Explicitly instantiate these types
|
||||
template class CompactHashtable<Symbol*, char>;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -249,6 +249,9 @@ public:
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// iterate over symbols
|
||||
void symbols_do(SymbolClosure *cl);
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -82,6 +82,10 @@ void SymbolTable::initialize_symbols(int arena_alloc_size) {
|
||||
|
||||
// Call function for all symbols in the symbol table.
|
||||
void SymbolTable::symbols_do(SymbolClosure *cl) {
|
||||
// all symbols from shared table
|
||||
_shared_table.symbols_do(cl);
|
||||
|
||||
// all symbols from the dynamic table
|
||||
const int n = the_table()->table_size();
|
||||
for (int i = 0; i < n; i++) {
|
||||
for (HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1546,10 +1546,15 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
|
||||
no_control_flow = true; break;
|
||||
case Bytecodes::_getstatic :
|
||||
case Bytecodes::_putstatic :
|
||||
// pass TRUE, operand can be an array type for getstatic/putstatic.
|
||||
verify_field_instructions(
|
||||
&bcs, ¤t_frame, cp, true, CHECK_VERIFY(this));
|
||||
no_control_flow = false; break;
|
||||
case Bytecodes::_getfield :
|
||||
case Bytecodes::_putfield :
|
||||
// pass FALSE, operand can't be an array type for getfield/putfield.
|
||||
verify_field_instructions(
|
||||
&bcs, ¤t_frame, cp, CHECK_VERIFY(this));
|
||||
&bcs, ¤t_frame, cp, false, CHECK_VERIFY(this));
|
||||
no_control_flow = false; break;
|
||||
case Bytecodes::_invokevirtual :
|
||||
case Bytecodes::_invokespecial :
|
||||
@ -2107,6 +2112,7 @@ bool ClassVerifier::name_in_supers(
|
||||
void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
|
||||
StackMapFrame* current_frame,
|
||||
constantPoolHandle cp,
|
||||
bool allow_arrays,
|
||||
TRAPS) {
|
||||
u2 index = bcs->get_index_u2();
|
||||
verify_cp_type(bcs->bci(), index, cp,
|
||||
@ -2126,8 +2132,8 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
|
||||
// Get referenced class type
|
||||
VerificationType ref_class_type = cp_ref_index_to_type(
|
||||
index, cp, CHECK_VERIFY(this));
|
||||
if (!ref_class_type.is_object()) {
|
||||
/* Unreachable? Class file parser verifies Fieldref contents */
|
||||
if (!ref_class_type.is_object() &&
|
||||
(!allow_arrays || !ref_class_type.is_array())) {
|
||||
verify_error(ErrorContext::bad_type(bcs->bci(),
|
||||
TypeOrigin::cp(index, ref_class_type)),
|
||||
"Expecting reference to class in class %s at constant pool index %d",
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -297,7 +297,7 @@ class ClassVerifier : public StackObj {
|
||||
|
||||
void verify_field_instructions(
|
||||
RawBytecodeStream* bcs, StackMapFrame* current_frame,
|
||||
constantPoolHandle cp, TRAPS);
|
||||
constantPoolHandle cp, bool allow_arrays, TRAPS);
|
||||
|
||||
void verify_invoke_init(
|
||||
RawBytecodeStream* bcs, u2 ref_index, VerificationType ref_class_type,
|
||||
|
@ -233,8 +233,8 @@ void CodeCache::initialize_heaps() {
|
||||
ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
|
||||
// Determine alignment
|
||||
const size_t page_size = os::can_execute_large_page_memory() ?
|
||||
MIN2(os::page_size_for_region(InitialCodeCacheSize, 8),
|
||||
os::page_size_for_region(size, 8)) :
|
||||
MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8),
|
||||
os::page_size_for_region_aligned(size, 8)) :
|
||||
os::vm_page_size();
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
const size_t r_align = MAX2(page_size, granularity);
|
||||
|
@ -1470,7 +1470,9 @@ bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci,
|
||||
|
||||
// The method may be explicitly excluded by the user.
|
||||
bool quietly;
|
||||
if (CompilerOracle::should_exclude(method, quietly)) {
|
||||
double scale;
|
||||
if (CompilerOracle::should_exclude(method, quietly)
|
||||
|| (CompilerOracle::has_option_value(method, "CompileThresholdScaling", scale) && scale == 0)) {
|
||||
if (!quietly) {
|
||||
// This does not happen quietly...
|
||||
ResourceMark rm;
|
||||
|
@ -162,8 +162,8 @@ void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
|
||||
"we should have already filtered out humongous regions");
|
||||
assert(_end == orig_end(),
|
||||
"we should have already filtered out humongous regions");
|
||||
|
||||
_in_collection_set = false;
|
||||
assert(!_in_collection_set,
|
||||
err_msg("Should not clear heap region %u in the collection set", hrm_index()));
|
||||
|
||||
set_allocation_context(AllocationContext::system());
|
||||
set_young_index_in_cset(-1);
|
||||
|
@ -1194,8 +1194,10 @@ oop ParNewGeneration::copy_to_survivor_space(
|
||||
return real_forwardee(old);
|
||||
}
|
||||
|
||||
new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
|
||||
old, m, sz);
|
||||
if (!_promotion_failed) {
|
||||
new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
|
||||
old, m, sz);
|
||||
}
|
||||
|
||||
if (new_obj == NULL) {
|
||||
// promotion failed, forward to self
|
||||
|
@ -61,9 +61,9 @@ void GenerationSizer::initialize_flags() {
|
||||
|
||||
void GenerationSizer::initialize_size_info() {
|
||||
trace_gen_sizes("ps heap raw");
|
||||
const size_t max_page_sz = os::page_size_for_region(_max_heap_byte_size, 8);
|
||||
const size_t max_page_sz = os::page_size_for_region_aligned(_max_heap_byte_size, 8);
|
||||
const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
|
||||
const size_t min_page_sz = os::page_size_for_region(_min_heap_byte_size, min_pages);
|
||||
const size_t min_page_sz = os::page_size_for_region_aligned(_min_heap_byte_size, min_pages);
|
||||
const size_t page_sz = MIN2(max_page_sz, min_page_sz);
|
||||
|
||||
// Can a page size be something else than a power of two?
|
||||
|
@ -41,7 +41,7 @@ ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
|
||||
const size_t words = bits / BitsPerWord;
|
||||
const size_t raw_bytes = words * sizeof(idx_t);
|
||||
const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
|
||||
const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
|
||||
|
||||
|
@ -403,7 +403,7 @@ PSVirtualSpace*
|
||||
ParallelCompactData::create_vspace(size_t count, size_t element_size)
|
||||
{
|
||||
const size_t raw_bytes = count * element_size;
|
||||
const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
|
||||
const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
_reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
|
||||
|
||||
|
@ -53,7 +53,7 @@
|
||||
/*
|
||||
* USELABELS - If using GCC, then use labels for the opcode dispatching
|
||||
* rather -then a switch statement. This improves performance because it
|
||||
* gives us the oportunity to have the instructions that calculate the
|
||||
* gives us the opportunity to have the instructions that calculate the
|
||||
* next opcode to jump to be intermixed with the rest of the instructions
|
||||
* that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
|
||||
*/
|
||||
|
@ -36,7 +36,7 @@
|
||||
// Implementation notes: For space reasons, state & counter are both encoded in one word,
|
||||
// The state is encoded using some of the least significant bits, the counter is using the
|
||||
// more significant bits. The counter is incremented before a method is activated and an
|
||||
// action is triggered when when count() > limit().
|
||||
// action is triggered when count() > limit().
|
||||
|
||||
class InvocationCounter VALUE_OBJ_CLASS_SPEC {
|
||||
friend class VMStructs;
|
||||
@ -48,7 +48,6 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC {
|
||||
number_of_state_bits = 2,
|
||||
number_of_carry_bits = 1,
|
||||
number_of_noncount_bits = number_of_state_bits + number_of_carry_bits,
|
||||
number_of_count_bits = BitsPerInt - number_of_noncount_bits,
|
||||
state_limit = nth_bit(number_of_state_bits),
|
||||
count_grain = nth_bit(number_of_state_bits + number_of_carry_bits),
|
||||
carry_mask = right_n_bits(number_of_carry_bits) << number_of_state_bits,
|
||||
@ -68,6 +67,7 @@ class InvocationCounter VALUE_OBJ_CLASS_SPEC {
|
||||
count_increment = count_grain, // use this value to increment the 32bit _counter word
|
||||
count_mask_value = count_mask, // use this value to mask the backedge counter
|
||||
count_shift = number_of_noncount_bits,
|
||||
number_of_count_bits = BitsPerInt - number_of_noncount_bits,
|
||||
count_limit = nth_bit(number_of_count_bits - 1)
|
||||
};
|
||||
|
||||
|
@ -104,8 +104,8 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
|
||||
size_t page_size = os::vm_page_size();
|
||||
if (os::can_execute_large_page_memory()) {
|
||||
const size_t min_pages = 8;
|
||||
page_size = MIN2(os::page_size_for_region(committed_size, min_pages),
|
||||
os::page_size_for_region(rs.size(), min_pages));
|
||||
page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
|
||||
os::page_size_for_region_aligned(rs.size(), min_pages));
|
||||
}
|
||||
|
||||
const size_t granularity = os::vm_allocation_granularity();
|
||||
|
@ -412,15 +412,14 @@ MethodCounters* Method::build_method_counters(Method* m, TRAPS) {
|
||||
}
|
||||
|
||||
methodHandle mh(m);
|
||||
ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
|
||||
MethodCounters* counters = MethodCounters::allocate(loader_data, THREAD);
|
||||
MethodCounters* counters = MethodCounters::allocate(mh, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
CompileBroker::log_metaspace_failure();
|
||||
ClassLoaderDataGraph::set_metaspace_oom(true);
|
||||
return NULL; // return the exception (which is cleared)
|
||||
}
|
||||
if (!mh->init_method_counters(counters)) {
|
||||
MetadataFactory::free_metadata(loader_data, counters);
|
||||
MetadataFactory::free_metadata(mh->method_holder()->class_loader_data(), counters);
|
||||
}
|
||||
return mh->method_counters();
|
||||
}
|
||||
|
@ -23,10 +23,11 @@
|
||||
*/
|
||||
#include "precompiled.hpp"
|
||||
#include "oops/methodCounters.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
||||
MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) {
|
||||
return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters();
|
||||
MethodCounters* MethodCounters::allocate(methodHandle mh, TRAPS) {
|
||||
ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
|
||||
return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh);
|
||||
}
|
||||
|
||||
void MethodCounters::clear_counters() {
|
||||
|
@ -26,7 +26,9 @@
|
||||
#define SHARE_VM_OOPS_METHODCOUNTERS_HPP
|
||||
|
||||
#include "oops/metadata.hpp"
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
#include "interpreter/invocationCounter.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
|
||||
class MethodCounters: public MetaspaceObj {
|
||||
friend class VMStructs;
|
||||
@ -45,7 +47,11 @@ class MethodCounters: public MetaspaceObj {
|
||||
// 3. (INT_MIN..0] - method is hot and will deopt and get
|
||||
// recompiled without the counters
|
||||
int _nmethod_age;
|
||||
|
||||
int _interpreter_invocation_limit; // per-method InterpreterInvocationLimit
|
||||
int _interpreter_backward_branch_limit; // per-method InterpreterBackwardBranchLimit
|
||||
int _interpreter_profile_limit; // per-method InterpreterProfileLimit
|
||||
int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
|
||||
int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
|
||||
#ifdef TIERED
|
||||
float _rate; // Events (invocation and backedge counter increments) per millisecond
|
||||
jlong _prev_time; // Previous time the rate was acquired
|
||||
@ -53,15 +59,15 @@ class MethodCounters: public MetaspaceObj {
|
||||
u1 _highest_osr_comp_level; // Same for OSR level
|
||||
#endif
|
||||
|
||||
MethodCounters() : _interpreter_invocation_count(0),
|
||||
_interpreter_throwout_count(0),
|
||||
_number_of_breakpoints(0),
|
||||
_nmethod_age(INT_MAX)
|
||||
MethodCounters(methodHandle mh) : _interpreter_invocation_count(0),
|
||||
_interpreter_throwout_count(0),
|
||||
_number_of_breakpoints(0),
|
||||
_nmethod_age(INT_MAX)
|
||||
#ifdef TIERED
|
||||
, _rate(0),
|
||||
_prev_time(0),
|
||||
_highest_comp_level(0),
|
||||
_highest_osr_comp_level(0)
|
||||
, _rate(0),
|
||||
_prev_time(0),
|
||||
_highest_comp_level(0),
|
||||
_highest_osr_comp_level(0)
|
||||
#endif
|
||||
{
|
||||
invocation_counter()->init();
|
||||
@ -70,10 +76,28 @@ class MethodCounters: public MetaspaceObj {
|
||||
if (StressCodeAging) {
|
||||
set_nmethod_age(HotMethodDetectionLimit);
|
||||
}
|
||||
|
||||
// Set per-method thresholds.
|
||||
double scale = 1.0;
|
||||
CompilerOracle::has_option_value(mh, "CompileThresholdScaling", scale);
|
||||
|
||||
int compile_threshold = Arguments::scaled_compile_threshold(CompileThreshold, scale);
|
||||
_interpreter_invocation_limit = compile_threshold << InvocationCounter::count_shift;
|
||||
if (ProfileInterpreter) {
|
||||
// If interpreter profiling is enabled, the backward branch limit
|
||||
// is compared against the method data counter rather than an invocation
|
||||
// counter, therefore no shifting of bits is required.
|
||||
_interpreter_backward_branch_limit = (compile_threshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
|
||||
} else {
|
||||
_interpreter_backward_branch_limit = ((compile_threshold * OnStackReplacePercentage) / 100) << InvocationCounter::count_shift;
|
||||
}
|
||||
_interpreter_profile_limit = ((compile_threshold * InterpreterProfilePercentage) / 100) << InvocationCounter::count_shift;
|
||||
_invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
|
||||
_backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
|
||||
}
|
||||
|
||||
public:
|
||||
static MethodCounters* allocate(ClassLoaderData* loader_data, TRAPS);
|
||||
static MethodCounters* allocate(methodHandle mh, TRAPS);
|
||||
|
||||
void deallocate_contents(ClassLoaderData* loader_data) {}
|
||||
DEBUG_ONLY(bool on_stack() { return false; }) // for template
|
||||
@ -161,5 +185,24 @@ class MethodCounters: public MetaspaceObj {
|
||||
return offset_of(MethodCounters, _interpreter_invocation_count);
|
||||
}
|
||||
|
||||
static ByteSize interpreter_invocation_limit_offset() {
|
||||
return byte_offset_of(MethodCounters, _interpreter_invocation_limit);
|
||||
}
|
||||
|
||||
static ByteSize interpreter_backward_branch_limit_offset() {
|
||||
return byte_offset_of(MethodCounters, _interpreter_backward_branch_limit);
|
||||
}
|
||||
|
||||
static ByteSize interpreter_profile_limit_offset() {
|
||||
return byte_offset_of(MethodCounters, _interpreter_profile_limit);
|
||||
}
|
||||
|
||||
static ByteSize invoke_mask_offset() {
|
||||
return byte_offset_of(MethodCounters, _invoke_mask);
|
||||
}
|
||||
|
||||
static ByteSize backedge_mask_offset() {
|
||||
return byte_offset_of(MethodCounters, _backedge_mask);
|
||||
}
|
||||
};
|
||||
#endif //SHARE_VM_OOPS_METHODCOUNTERS_HPP
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "memory/heapInspection.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "prims/jvmtiRedefineClasses.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/compilationPolicy.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
@ -1131,6 +1132,13 @@ void MethodData::init() {
|
||||
_backedge_counter.init();
|
||||
_invocation_counter_start = 0;
|
||||
_backedge_counter_start = 0;
|
||||
|
||||
// Set per-method invoke- and backedge mask.
|
||||
double scale = 1.0;
|
||||
CompilerOracle::has_option_value(_method, "CompileThresholdScaling", scale);
|
||||
_invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
|
||||
_backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
|
||||
|
||||
_tenure_traps = 0;
|
||||
_num_loops = 0;
|
||||
_num_blocks = 0;
|
||||
|
@ -2088,6 +2088,8 @@ private:
|
||||
int _invocation_counter_start;
|
||||
int _backedge_counter_start;
|
||||
uint _tenure_traps;
|
||||
int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
|
||||
int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
|
||||
|
||||
#if INCLUDE_RTM_OPT
|
||||
// State of RTM code generation during compilation of the method
|
||||
@ -2447,10 +2449,19 @@ public:
|
||||
static ByteSize invocation_counter_offset() {
|
||||
return byte_offset_of(MethodData, _invocation_counter);
|
||||
}
|
||||
|
||||
static ByteSize backedge_counter_offset() {
|
||||
return byte_offset_of(MethodData, _backedge_counter);
|
||||
}
|
||||
|
||||
static ByteSize invoke_mask_offset() {
|
||||
return byte_offset_of(MethodData, _invoke_mask);
|
||||
}
|
||||
|
||||
static ByteSize backedge_mask_offset() {
|
||||
return byte_offset_of(MethodData, _backedge_mask);
|
||||
}
|
||||
|
||||
static ByteSize parameters_type_data_di_offset() {
|
||||
return byte_offset_of(MethodData, _parameters_type_data_di);
|
||||
}
|
||||
|
@ -582,6 +582,9 @@ void PhaseChaitin::Register_Allocate() {
|
||||
// Peephole remove copies
|
||||
post_allocate_copy_removal();
|
||||
|
||||
// Merge multidefs if multiple defs representing the same value are used in a single block.
|
||||
merge_multidefs();
|
||||
|
||||
#ifdef ASSERT
|
||||
// Veify the graph after RA.
|
||||
verify(&live_arena);
|
||||
|
@ -681,6 +681,32 @@ private:
|
||||
// Extend the node to LRG mapping
|
||||
void add_reference( const Node *node, const Node *old_node);
|
||||
|
||||
// Record the first use of a def in the block for a register.
|
||||
class RegDefUse {
|
||||
Node* _def;
|
||||
Node* _first_use;
|
||||
public:
|
||||
RegDefUse() : _def(NULL), _first_use(NULL) { }
|
||||
Node* def() const { return _def; }
|
||||
Node* first_use() const { return _first_use; }
|
||||
|
||||
void update(Node* def, Node* use) {
|
||||
if (_def != def) {
|
||||
_def = def;
|
||||
_first_use = use;
|
||||
}
|
||||
}
|
||||
void clear() {
|
||||
_def = NULL;
|
||||
_first_use = NULL;
|
||||
}
|
||||
};
|
||||
typedef GrowableArray<RegDefUse> RegToDefUseMap;
|
||||
int possibly_merge_multidef(Node *n, uint k, Block *block, RegToDefUseMap& reg2defuse);
|
||||
|
||||
// Merge nodes that are a part of a multidef lrg and produce the same value within a block.
|
||||
void merge_multidefs();
|
||||
|
||||
private:
|
||||
|
||||
static int _final_loads, _final_stores, _final_copies, _final_memoves;
|
||||
|
@ -94,7 +94,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||
if (log != NULL) {
|
||||
int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
|
||||
int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
|
||||
log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
|
||||
log->begin_elem("call method='%d' count='%d' prof_factor='%f'",
|
||||
log->identify(callee), site_count, prof_factor);
|
||||
if (call_does_dispatch) log->print(" virtual='1'");
|
||||
if (allow_inline) log->print(" inline='1'");
|
||||
|
@ -2010,14 +2010,9 @@ bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
|
||||
bt = field->layout_type();
|
||||
} else {
|
||||
// Check for unsafe oop field access
|
||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||
int opcode = n->fast_out(i)->Opcode();
|
||||
if (opcode == Op_StoreP || opcode == Op_LoadP ||
|
||||
opcode == Op_StoreN || opcode == Op_LoadN) {
|
||||
bt = T_OBJECT;
|
||||
(*unsafe) = true;
|
||||
break;
|
||||
}
|
||||
if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) {
|
||||
bt = T_OBJECT;
|
||||
(*unsafe) = true;
|
||||
}
|
||||
}
|
||||
} else if (adr_type->isa_aryptr()) {
|
||||
@ -2031,13 +2026,8 @@ bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
|
||||
}
|
||||
} else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
|
||||
// Allocation initialization, ThreadLocal field access, unsafe access
|
||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||
int opcode = n->fast_out(i)->Opcode();
|
||||
if (opcode == Op_StoreP || opcode == Op_LoadP ||
|
||||
opcode == Op_StoreN || opcode == Op_LoadN) {
|
||||
bt = T_OBJECT;
|
||||
break;
|
||||
}
|
||||
if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) {
|
||||
bt = T_OBJECT;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3092,13 +3082,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
||||
continue;
|
||||
} else if (n->Opcode() == Op_EncodeISOArray) {
|
||||
// get the memory projection
|
||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||
Node *use = n->fast_out(i);
|
||||
if (use->Opcode() == Op_SCMemProj) {
|
||||
n = use;
|
||||
break;
|
||||
}
|
||||
}
|
||||
n = n->find_out_with(Op_SCMemProj);
|
||||
assert(n->Opcode() == Op_SCMemProj, "memory projection required");
|
||||
} else {
|
||||
assert(n->is_Mem(), "memory node required.");
|
||||
@ -3122,13 +3106,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
||||
continue; // don't push users
|
||||
} else if (n->is_LoadStore()) {
|
||||
// get the memory projection
|
||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||
Node *use = n->fast_out(i);
|
||||
if (use->Opcode() == Op_SCMemProj) {
|
||||
n = use;
|
||||
break;
|
||||
}
|
||||
}
|
||||
n = n->find_out_with(Op_SCMemProj);
|
||||
assert(n->Opcode() == Op_SCMemProj, "memory projection required");
|
||||
}
|
||||
}
|
||||
|
@ -535,12 +535,8 @@ bool PhaseChaitin::remove_node_if_not_used(Block* b, uint location, Node* n, uin
|
||||
// The method add_input_to_liveout() keeps such nodes alive (put them on liveout list)
|
||||
// when it sees SCMemProj node in a block. Unfortunately SCMemProj node could be placed
|
||||
// in block in such order that KILL MachProj nodes are processed first.
|
||||
uint cnt = def->outcnt();
|
||||
for (uint i = 0; i < cnt; i++) {
|
||||
Node* proj = def->raw_out(i);
|
||||
if (proj->Opcode() == Op_SCMemProj) {
|
||||
return false;
|
||||
}
|
||||
if (def->has_out_with(Op_SCMemProj)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
b->remove_node(location);
|
||||
|
@ -2057,10 +2057,9 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
}
|
||||
Node *main_cmp = main_bol->in(1);
|
||||
if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
|
||||
_igvn.hash_delete(main_bol);
|
||||
main_cmp = main_cmp->clone();// Clone a private CmpNode
|
||||
register_new_node( main_cmp, main_cle->in(0) );
|
||||
main_bol->set_req(1,main_cmp);
|
||||
_igvn.replace_input_of(main_bol, 1, main_cmp);
|
||||
}
|
||||
// Hack the now-private loop bounds
|
||||
_igvn.replace_input_of(main_cmp, 2, main_limit);
|
||||
|
@ -616,6 +616,29 @@ public:
|
||||
#endif
|
||||
};
|
||||
|
||||
// MachMergeNode is similar to a PhiNode in a sense it merges multiple values,
|
||||
// however it doesn't have a control input and is more like a MergeMem.
|
||||
// It is inserted after the register allocation is done to ensure that nodes use single
|
||||
// definition of a multidef lrg in a block.
|
||||
class MachMergeNode : public MachIdealNode {
|
||||
public:
|
||||
MachMergeNode(Node *n1) {
|
||||
init_class_id(Class_MachMerge);
|
||||
add_req(NULL);
|
||||
add_req(n1);
|
||||
}
|
||||
virtual const RegMask &out_RegMask() const { return in(1)->out_RegMask(); }
|
||||
virtual const RegMask &in_RegMask(uint idx) const { return in(1)->in_RegMask(idx); }
|
||||
virtual const class Type *bottom_type() const { return in(1)->bottom_type(); }
|
||||
virtual uint ideal_reg() const { return bottom_type()->ideal_reg(); }
|
||||
virtual uint oper_input_base() const { return 1; }
|
||||
virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { }
|
||||
virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
|
||||
#ifndef PRODUCT
|
||||
virtual const char *Name() const { return "MachMerge"; }
|
||||
#endif
|
||||
};
|
||||
|
||||
//------------------------------MachBranchNode--------------------------------
|
||||
// Abstract machine branch Node
|
||||
class MachBranchNode : public MachIdealNode {
|
||||
|
@ -258,14 +258,7 @@ void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
|
||||
// Search for CastP2X->Xor->URShift->Cmp path which
|
||||
// checks if the store done to a different from the value's region.
|
||||
// And replace Cmp with #0 (false) to collapse G1 post barrier.
|
||||
Node* xorx = NULL;
|
||||
for (DUIterator_Fast imax, i = p2x->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = p2x->fast_out(i);
|
||||
if (u->Opcode() == Op_XorX) {
|
||||
xorx = u;
|
||||
break;
|
||||
}
|
||||
}
|
||||
Node* xorx = p2x->find_out_with(Op_XorX);
|
||||
assert(xorx != NULL, "missing G1 post barrier");
|
||||
Node* shift = xorx->unique_out();
|
||||
Node* cmpx = shift->unique_out();
|
||||
|
@ -2609,7 +2609,6 @@ bool StoreNode::value_never_loaded( PhaseTransform *phase) const {
|
||||
return false; // if not a distinct instance, there may be aliases of the address
|
||||
for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) {
|
||||
Node *use = adr->fast_out(i);
|
||||
int opc = use->Opcode();
|
||||
if (use->is_Load() || use->is_LoadStore()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -881,6 +881,34 @@ Node* Node::uncast() const {
|
||||
return (Node*) this;
|
||||
}
|
||||
|
||||
// Find out of current node that matches opcode.
|
||||
Node* Node::find_out_with(int opcode) {
|
||||
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
|
||||
Node* use = fast_out(i);
|
||||
if (use->Opcode() == opcode) {
|
||||
return use;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Return true if the current node has an out that matches opcode.
|
||||
bool Node::has_out_with(int opcode) {
|
||||
return (find_out_with(opcode) != NULL);
|
||||
}
|
||||
|
||||
// Return true if the current node has an out that matches any of the opcodes.
|
||||
bool Node::has_out_with(int opcode1, int opcode2, int opcode3, int opcode4) {
|
||||
for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
|
||||
int opcode = fast_out(i)->Opcode();
|
||||
if (opcode == opcode1 || opcode == opcode2 || opcode == opcode3 || opcode == opcode4) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
//---------------------------uncast_helper-------------------------------------
|
||||
Node* Node::uncast_helper(const Node* p) {
|
||||
#ifdef ASSERT
|
||||
|
@ -98,6 +98,7 @@ class MachReturnNode;
|
||||
class MachSafePointNode;
|
||||
class MachSpillCopyNode;
|
||||
class MachTempNode;
|
||||
class MachMergeNode;
|
||||
class Matcher;
|
||||
class MemBarNode;
|
||||
class MemBarStoreStoreNode;
|
||||
@ -436,6 +437,13 @@ protected:
|
||||
return (this->uncast() == n->uncast());
|
||||
}
|
||||
|
||||
// Find out of current node that matches opcode.
|
||||
Node* find_out_with(int opcode);
|
||||
// Return true if the current node has an out that matches opcode.
|
||||
bool has_out_with(int opcode);
|
||||
// Return true if the current node has an out that matches any of the opcodes.
|
||||
bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4);
|
||||
|
||||
private:
|
||||
static Node* uncast_helper(const Node* n);
|
||||
|
||||
@ -507,18 +515,25 @@ public:
|
||||
|
||||
//----------------- Other Node Properties
|
||||
|
||||
// Generate class id for some ideal nodes to avoid virtual query
|
||||
// methods is_<Node>().
|
||||
// Class id is the set of bits corresponded to the node class and all its
|
||||
// super classes so that queries for super classes are also valid.
|
||||
// Subclasses of the same super class have different assigned bit
|
||||
// (the third parameter in the macro DEFINE_CLASS_ID).
|
||||
// Classes with deeper hierarchy are declared first.
|
||||
// Classes with the same hierarchy depth are sorted by usage frequency.
|
||||
// Generate class IDs for (some) ideal nodes so that it is possible to determine
|
||||
// the type of a node using a non-virtual method call (the method is_<Node>() below).
|
||||
//
|
||||
// The query method masks the bits to cut off bits of subclasses
|
||||
// and then compare the result with the class id
|
||||
// (see the macro DEFINE_CLASS_QUERY below).
|
||||
// A class ID of an ideal node is a set of bits. In a class ID, a single bit determines
|
||||
// the type of the node the ID represents; another subset of an ID's bits are reserved
|
||||
// for the superclasses of the node represented by the ID.
|
||||
//
|
||||
// By design, if A is a supertype of B, A.is_B() returns true and B.is_A()
|
||||
// returns false. A.is_A() returns true.
|
||||
//
|
||||
// If two classes, A and B, have the same superclass, a different bit of A's class id
|
||||
// is reserved for A's type than for B's type. That bit is specified by the third
|
||||
// parameter in the macro DEFINE_CLASS_ID.
|
||||
//
|
||||
// By convention, classes with deeper hierarchy are declared first. Moreover,
|
||||
// classes with the same hierarchy depth are sorted by usage frequency.
|
||||
//
|
||||
// The query method masks the bits to cut off bits of subclasses and then compares
|
||||
// the result with the class id (see the macro DEFINE_CLASS_QUERY below).
|
||||
//
|
||||
// Class_MachCall=30, ClassMask_MachCall=31
|
||||
// 12 8 4 0
|
||||
@ -592,6 +607,7 @@ public:
|
||||
DEFINE_CLASS_ID(MachTemp, Mach, 3)
|
||||
DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
|
||||
DEFINE_CLASS_ID(MachConstant, Mach, 5)
|
||||
DEFINE_CLASS_ID(MachMerge, Mach, 6)
|
||||
|
||||
DEFINE_CLASS_ID(Type, Node, 2)
|
||||
DEFINE_CLASS_ID(Phi, Type, 0)
|
||||
@ -763,6 +779,7 @@ public:
|
||||
DEFINE_CLASS_QUERY(MachSafePoint)
|
||||
DEFINE_CLASS_QUERY(MachSpillCopy)
|
||||
DEFINE_CLASS_QUERY(MachTemp)
|
||||
DEFINE_CLASS_QUERY(MachMerge)
|
||||
DEFINE_CLASS_QUERY(Mem)
|
||||
DEFINE_CLASS_QUERY(MemBar)
|
||||
DEFINE_CLASS_QUERY(MemBarStoreStore)
|
||||
|
@ -441,7 +441,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
|
||||
|
||||
CompileLog* log = C->log();
|
||||
if (log != NULL) {
|
||||
log->begin_head("parse method='%d' uses='%g'",
|
||||
log->begin_head("parse method='%d' uses='%f'",
|
||||
log->identify(parse_method), expected_uses);
|
||||
if (depth() == 1 && C->is_osr_compilation()) {
|
||||
log->print(" osr_bci='%d'", C->entry_bci());
|
||||
|
@ -832,7 +832,7 @@ float Parse::dynamic_branch_prediction(float &cnt) {
|
||||
sprintf(prob_str_buf, "%g", prob);
|
||||
prob_str = prob_str_buf;
|
||||
}
|
||||
C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'",
|
||||
C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'",
|
||||
iter().get_dest(), taken, not_taken, cnt, prob_str);
|
||||
}
|
||||
return prob;
|
||||
|
@ -110,6 +110,7 @@ void Phase::print_timers() {
|
||||
tty->print_cr (" Compute Liveness: %7.3f s", timers[_t_computeLive].seconds());
|
||||
tty->print_cr (" Regalloc Split: %7.3f s", timers[_t_regAllocSplit].seconds());
|
||||
tty->print_cr (" Postalloc Copy Rem: %7.3f s", timers[_t_postAllocCopyRemoval].seconds());
|
||||
tty->print_cr (" Merge multidefs: %7.3f s", timers[_t_mergeMultidefs].seconds());
|
||||
tty->print_cr (" Fixup Spills: %7.3f s", timers[_t_fixupSpills].seconds());
|
||||
tty->print_cr (" Compact: %7.3f s", timers[_t_chaitinCompact].seconds());
|
||||
tty->print_cr (" Coalesce 1: %7.3f s", timers[_t_chaitinCoalesce1].seconds());
|
||||
@ -126,6 +127,7 @@ void Phase::print_timers() {
|
||||
timers[_t_computeLive].seconds() +
|
||||
timers[_t_regAllocSplit].seconds() +
|
||||
timers[_t_postAllocCopyRemoval].seconds() +
|
||||
timers[_t_mergeMultidefs].seconds() +
|
||||
timers[_t_fixupSpills].seconds() +
|
||||
timers[_t_chaitinCompact].seconds() +
|
||||
timers[_t_chaitinCoalesce1].seconds() +
|
||||
|
@ -88,6 +88,7 @@ public:
|
||||
_t_computeLive,
|
||||
_t_regAllocSplit,
|
||||
_t_postAllocCopyRemoval,
|
||||
_t_mergeMultidefs,
|
||||
_t_fixupSpills,
|
||||
_t_chaitinCompact,
|
||||
_t_chaitinCoalesce1,
|
||||
|
@ -263,20 +263,6 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v
|
||||
// intermediate copies might be illegal, i.e., value is stored down to stack
|
||||
// then reloaded BUT survives in a register the whole way.
|
||||
Node *val = skip_copies(n->in(k));
|
||||
|
||||
if (val == x && nk_idx != 0 &&
|
||||
regnd[nk_reg] != NULL && regnd[nk_reg] != x &&
|
||||
_lrg_map.live_range_id(x) == _lrg_map.live_range_id(regnd[nk_reg])) {
|
||||
// When rematerialzing nodes and stretching lifetimes, the
|
||||
// allocator will reuse the original def for multidef LRG instead
|
||||
// of the current reaching def because it can't know it's safe to
|
||||
// do so. After allocation completes if they are in the same LRG
|
||||
// then it should use the current reaching def instead.
|
||||
n->set_req(k, regnd[nk_reg]);
|
||||
blk_adjust += yank_if_dead(val, current_block, &value, ®nd);
|
||||
val = skip_copies(n->in(k));
|
||||
}
|
||||
|
||||
if (val == x) return blk_adjust; // No progress?
|
||||
|
||||
int n_regs = RegMask::num_registers(val->ideal_reg());
|
||||
@ -382,6 +368,94 @@ bool PhaseChaitin::eliminate_copy_of_constant(Node* val, Node* n,
|
||||
return false;
|
||||
}
|
||||
|
||||
// The algorithms works as follows:
|
||||
// We traverse the block top to bottom. possibly_merge_multidef() is invoked for every input edge k
|
||||
// of the instruction n. We check to see if the input is a multidef lrg. If it is, we record the fact that we've
|
||||
// seen a definition (coming as an input) and add that fact to the reg2defuse array. The array maps registers to their
|
||||
// current reaching definitions (we track only multidefs though). With each definition we also associate the first
|
||||
// instruction we saw use it. If we encounter the situation when we observe an def (an input) that is a part of the
|
||||
// same lrg but is different from the previous seen def we merge the two with a MachMerge node and substitute
|
||||
// all the uses that we've seen so far to use the merge. After that we keep replacing the new defs in the same lrg
|
||||
// as they get encountered with the merge node and keep adding these defs to the merge inputs.
|
||||
void PhaseChaitin::merge_multidefs() {
|
||||
Compile::TracePhase tp("mergeMultidefs", &timers[_t_mergeMultidefs]);
|
||||
ResourceMark rm;
|
||||
// Keep track of the defs seen in registers and collect their uses in the block.
|
||||
RegToDefUseMap reg2defuse(_max_reg, _max_reg, RegDefUse());
|
||||
for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
|
||||
Block* block = _cfg.get_block(i);
|
||||
for (uint j = 1; j < block->number_of_nodes(); j++) {
|
||||
Node* n = block->get_node(j);
|
||||
if (n->is_Phi()) continue;
|
||||
for (uint k = 1; k < n->req(); k++) {
|
||||
j += possibly_merge_multidef(n, k, block, reg2defuse);
|
||||
}
|
||||
// Null out the value produced by the instruction itself, since we're only interested in defs
|
||||
// implicitly defined by the uses. We are actually interested in tracking only redefinitions
|
||||
// of the multidef lrgs in the same register. For that matter it's enough to track changes in
|
||||
// the base register only and ignore other effects of multi-register lrgs and fat projections.
|
||||
// It is also ok to ignore defs coming from singledefs. After an implicit overwrite by one of
|
||||
// those our register is guaranteed to be used by another lrg and we won't attempt to merge it.
|
||||
uint lrg = _lrg_map.live_range_id(n);
|
||||
if (lrg > 0 && lrgs(lrg).is_multidef()) {
|
||||
OptoReg::Name reg = lrgs(lrg).reg();
|
||||
reg2defuse.at(reg).clear();
|
||||
}
|
||||
}
|
||||
// Clear reg->def->use tracking for the next block
|
||||
for (int j = 0; j < reg2defuse.length(); j++) {
|
||||
reg2defuse.at(j).clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int PhaseChaitin::possibly_merge_multidef(Node *n, uint k, Block *block, RegToDefUseMap& reg2defuse) {
|
||||
int blk_adjust = 0;
|
||||
|
||||
uint lrg = _lrg_map.live_range_id(n->in(k));
|
||||
if (lrg > 0 && lrgs(lrg).is_multidef()) {
|
||||
OptoReg::Name reg = lrgs(lrg).reg();
|
||||
|
||||
Node* def = reg2defuse.at(reg).def();
|
||||
if (def != NULL && lrg == _lrg_map.live_range_id(def) && def != n->in(k)) {
|
||||
// Same lrg but different node, we have to merge.
|
||||
MachMergeNode* merge;
|
||||
if (def->is_MachMerge()) { // is it already a merge?
|
||||
merge = def->as_MachMerge();
|
||||
} else {
|
||||
merge = new MachMergeNode(def);
|
||||
|
||||
// Insert the merge node into the block before the first use.
|
||||
uint use_index = block->find_node(reg2defuse.at(reg).first_use());
|
||||
block->insert_node(merge, use_index++);
|
||||
|
||||
// Let the allocator know about the new node, use the same lrg
|
||||
_lrg_map.extend(merge->_idx, lrg);
|
||||
blk_adjust++;
|
||||
|
||||
// Fixup all the uses (there is at least one) that happened between the first
|
||||
// use and before the current one.
|
||||
for (; use_index < block->number_of_nodes(); use_index++) {
|
||||
Node* use = block->get_node(use_index);
|
||||
if (use == n) {
|
||||
break;
|
||||
}
|
||||
use->replace_edge(def, merge);
|
||||
}
|
||||
}
|
||||
if (merge->find_edge(n->in(k)) == -1) {
|
||||
merge->add_req(n->in(k));
|
||||
}
|
||||
n->set_req(k, merge);
|
||||
}
|
||||
|
||||
// update the uses
|
||||
reg2defuse.at(reg).update(n->in(k), n);
|
||||
}
|
||||
|
||||
return blk_adjust;
|
||||
}
|
||||
|
||||
|
||||
//------------------------------post_allocate_copy_removal---------------------
|
||||
// Post-Allocation peephole copy removal. We do this in 1 pass over the
|
||||
|
@ -1507,10 +1507,12 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
|
||||
}
|
||||
case StringConcat::StringMode: {
|
||||
const Type* type = kit.gvn().type(arg);
|
||||
Node* count = NULL;
|
||||
if (type == TypePtr::NULL_PTR) {
|
||||
// replace the argument with the null checked version
|
||||
arg = null_string;
|
||||
sc->set_argument(argi, arg);
|
||||
count = kit.load_String_length(kit.control(), arg);
|
||||
} else if (!type->higher_equal(TypeInstPtr::NOTNULL)) {
|
||||
// s = s != null ? s : "null";
|
||||
// length = length + (s.count - s.offset);
|
||||
@ -1533,10 +1535,13 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
|
||||
// replace the argument with the null checked version
|
||||
arg = phi;
|
||||
sc->set_argument(argi, arg);
|
||||
count = kit.load_String_length(kit.control(), arg);
|
||||
} else {
|
||||
// A corresponding nullcheck will be connected during IGVN MemNode::Ideal_common_DU_postCCP
|
||||
// kit.control might be a different test, that can be hoisted above the actual nullcheck
|
||||
// in case, that the control input is not null, Ideal_common_DU_postCCP will not look for a nullcheck.
|
||||
count = kit.load_String_length(NULL, arg);
|
||||
}
|
||||
|
||||
Node* count = kit.load_String_length(kit.control(), arg);
|
||||
|
||||
length = __ AddI(length, count);
|
||||
string_sizes->init_req(argi, NULL);
|
||||
break;
|
||||
|
@ -68,11 +68,11 @@ class JvmtiConstantPoolReconstituter : public StackObj {
|
||||
|
||||
~JvmtiConstantPoolReconstituter() {
|
||||
if (_symmap != NULL) {
|
||||
os::free(_symmap);
|
||||
delete _symmap;
|
||||
_symmap = NULL;
|
||||
}
|
||||
if (_classmap != NULL) {
|
||||
os::free(_classmap);
|
||||
delete _classmap;
|
||||
_classmap = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1046,10 +1046,16 @@ static jint invoke_string_value_callback(jvmtiStringPrimitiveValueCallback cb,
|
||||
{
|
||||
assert(str->klass() == SystemDictionary::String_klass(), "not a string");
|
||||
|
||||
typeArrayOop s_value = java_lang_String::value(str);
|
||||
|
||||
// JDK-6584008: the value field may be null if a String instance is
|
||||
// partially constructed.
|
||||
if (s_value == NULL) {
|
||||
return 0;
|
||||
}
|
||||
// get the string value and length
|
||||
// (string value may be offset from the base)
|
||||
int s_len = java_lang_String::length(str);
|
||||
typeArrayOop s_value = java_lang_String::value(str);
|
||||
int s_offset = java_lang_String::offset(str);
|
||||
jchar* value;
|
||||
if (s_len > 0) {
|
||||
|
@ -100,6 +100,11 @@ PERF_ENTRY(void, Perf_Detach(JNIEnv *env, jobject unused, jobject buffer))
|
||||
|
||||
PerfWrapper("Perf_Detach");
|
||||
|
||||
if (!UsePerfData) {
|
||||
// With -XX:-UsePerfData, detach is just a NOP
|
||||
return;
|
||||
}
|
||||
|
||||
void* address = 0;
|
||||
jlong capacity = 0;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1104,43 +1104,6 @@ UNSAFE_END
|
||||
|
||||
|
||||
|
||||
UNSAFE_ENTRY(void, Unsafe_MonitorEnter(JNIEnv *env, jobject unsafe, jobject jobj))
|
||||
UnsafeWrapper("Unsafe_MonitorEnter");
|
||||
{
|
||||
if (jobj == NULL) {
|
||||
THROW(vmSymbols::java_lang_NullPointerException());
|
||||
}
|
||||
Handle obj(thread, JNIHandles::resolve_non_null(jobj));
|
||||
ObjectSynchronizer::jni_enter(obj, CHECK);
|
||||
}
|
||||
UNSAFE_END
|
||||
|
||||
|
||||
UNSAFE_ENTRY(jboolean, Unsafe_TryMonitorEnter(JNIEnv *env, jobject unsafe, jobject jobj))
|
||||
UnsafeWrapper("Unsafe_TryMonitorEnter");
|
||||
{
|
||||
if (jobj == NULL) {
|
||||
THROW_(vmSymbols::java_lang_NullPointerException(), JNI_FALSE);
|
||||
}
|
||||
Handle obj(thread, JNIHandles::resolve_non_null(jobj));
|
||||
bool res = ObjectSynchronizer::jni_try_enter(obj, CHECK_0);
|
||||
return (res ? JNI_TRUE : JNI_FALSE);
|
||||
}
|
||||
UNSAFE_END
|
||||
|
||||
|
||||
UNSAFE_ENTRY(void, Unsafe_MonitorExit(JNIEnv *env, jobject unsafe, jobject jobj))
|
||||
UnsafeWrapper("Unsafe_MonitorExit");
|
||||
{
|
||||
if (jobj == NULL) {
|
||||
THROW(vmSymbols::java_lang_NullPointerException());
|
||||
}
|
||||
Handle obj(THREAD, JNIHandles::resolve_non_null(jobj));
|
||||
ObjectSynchronizer::jni_exit(obj(), CHECK);
|
||||
}
|
||||
UNSAFE_END
|
||||
|
||||
|
||||
UNSAFE_ENTRY(void, Unsafe_ThrowException(JNIEnv *env, jobject unsafe, jthrowable thr))
|
||||
UnsafeWrapper("Unsafe_ThrowException");
|
||||
{
|
||||
@ -1365,8 +1328,6 @@ static JNINativeMethod methods_140[] = {
|
||||
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
|
||||
{CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
|
||||
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
|
||||
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
|
||||
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
|
||||
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)}
|
||||
};
|
||||
|
||||
@ -1411,8 +1372,6 @@ static JNINativeMethod methods_141[] = {
|
||||
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
|
||||
{CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
|
||||
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
|
||||
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
|
||||
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
|
||||
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)}
|
||||
|
||||
};
|
||||
@ -1461,8 +1420,6 @@ static JNINativeMethod methods_15[] = {
|
||||
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
|
||||
{CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
|
||||
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
|
||||
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
|
||||
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
|
||||
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)},
|
||||
{CC"compareAndSwapObject", CC"("OBJ"J"OBJ""OBJ")Z", FN_PTR(Unsafe_CompareAndSwapObject)},
|
||||
{CC"compareAndSwapInt", CC"("OBJ"J""I""I"")Z", FN_PTR(Unsafe_CompareAndSwapInt)},
|
||||
@ -1515,9 +1472,6 @@ static JNINativeMethod methods_16[] = {
|
||||
{CC"defineClass", CC"("DC0_Args")"CLS, FN_PTR(Unsafe_DefineClass0)},
|
||||
{CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
|
||||
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
|
||||
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
|
||||
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
|
||||
{CC"tryMonitorEnter", CC"("OBJ")Z", FN_PTR(Unsafe_TryMonitorEnter)},
|
||||
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)},
|
||||
{CC"compareAndSwapObject", CC"("OBJ"J"OBJ""OBJ")Z", FN_PTR(Unsafe_CompareAndSwapObject)},
|
||||
{CC"compareAndSwapInt", CC"("OBJ"J""I""I"")Z", FN_PTR(Unsafe_CompareAndSwapInt)},
|
||||
@ -1571,9 +1525,6 @@ static JNINativeMethod methods_18[] = {
|
||||
|
||||
{CC"defineClass", CC"("DC_Args")"CLS, FN_PTR(Unsafe_DefineClass)},
|
||||
{CC"allocateInstance", CC"("CLS")"OBJ, FN_PTR(Unsafe_AllocateInstance)},
|
||||
{CC"monitorEnter", CC"("OBJ")V", FN_PTR(Unsafe_MonitorEnter)},
|
||||
{CC"monitorExit", CC"("OBJ")V", FN_PTR(Unsafe_MonitorExit)},
|
||||
{CC"tryMonitorEnter", CC"("OBJ")Z", FN_PTR(Unsafe_TryMonitorEnter)},
|
||||
{CC"throwException", CC"("THR")V", FN_PTR(Unsafe_ThrowException)},
|
||||
{CC"compareAndSwapObject", CC"("OBJ"J"OBJ""OBJ")Z", FN_PTR(Unsafe_CompareAndSwapObject)},
|
||||
{CC"compareAndSwapInt", CC"("OBJ"J""I""I"")Z", FN_PTR(Unsafe_CompareAndSwapInt)},
|
||||
|
@ -155,7 +155,7 @@ bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
|
||||
if (mdo != NULL) {
|
||||
int i = mdo->invocation_count_delta();
|
||||
int b = mdo->backedge_count_delta();
|
||||
return call_predicate_helper<CompLevel_full_profile>(i, b, 1);
|
||||
return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -229,32 +229,32 @@ double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k)
|
||||
// Tier?LoadFeedback is basically a coefficient that determines of
|
||||
// how many methods per compiler thread can be in the queue before
|
||||
// the threshold values double.
|
||||
bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
|
||||
bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
|
||||
switch(cur_level) {
|
||||
case CompLevel_none:
|
||||
case CompLevel_limited_profile: {
|
||||
double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
|
||||
return loop_predicate_helper<CompLevel_none>(i, b, k);
|
||||
return loop_predicate_helper<CompLevel_none>(i, b, k, method);
|
||||
}
|
||||
case CompLevel_full_profile: {
|
||||
double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
|
||||
return loop_predicate_helper<CompLevel_full_profile>(i, b, k);
|
||||
return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
|
||||
}
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
|
||||
bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
|
||||
switch(cur_level) {
|
||||
case CompLevel_none:
|
||||
case CompLevel_limited_profile: {
|
||||
double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
|
||||
return call_predicate_helper<CompLevel_none>(i, b, k);
|
||||
return call_predicate_helper<CompLevel_none>(i, b, k, method);
|
||||
}
|
||||
case CompLevel_full_profile: {
|
||||
double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
|
||||
return call_predicate_helper<CompLevel_full_profile>(i, b, k);
|
||||
return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
|
||||
}
|
||||
default:
|
||||
return true;
|
||||
@ -271,7 +271,7 @@ bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_le
|
||||
int i = method->invocation_count();
|
||||
int b = method->backedge_count();
|
||||
double k = Tier0ProfilingStartPercentage / 100.0;
|
||||
return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
|
||||
return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -348,7 +348,7 @@ CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel
|
||||
// If we were at full profile level, would we switch to full opt?
|
||||
if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
|
||||
next_level = CompLevel_full_optimization;
|
||||
} else if ((this->*p)(i, b, cur_level)) {
|
||||
} else if ((this->*p)(i, b, cur_level, method)) {
|
||||
// C1-generated fully profiled code is about 30% slower than the limited profile
|
||||
// code that has only invocation and backedge counters. The observation is that
|
||||
// if C2 queue is large enough we can spend too much time in the fully profiled code
|
||||
@ -374,7 +374,7 @@ CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel
|
||||
if (mdo->would_profile()) {
|
||||
if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
|
||||
Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
|
||||
(this->*p)(i, b, cur_level))) {
|
||||
(this->*p)(i, b, cur_level, method))) {
|
||||
next_level = CompLevel_full_profile;
|
||||
}
|
||||
} else {
|
||||
@ -390,7 +390,7 @@ CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel
|
||||
if (mdo->would_profile()) {
|
||||
int mdo_i = mdo->invocation_count_delta();
|
||||
int mdo_b = mdo->backedge_count_delta();
|
||||
if ((this->*p)(mdo_i, mdo_b, cur_level)) {
|
||||
if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
|
||||
next_level = CompLevel_full_optimization;
|
||||
}
|
||||
} else {
|
||||
|
@ -84,7 +84,7 @@ class CompileQueue;
|
||||
* invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread
|
||||
* makes a call into the runtime.
|
||||
*
|
||||
* - Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control
|
||||
* - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control
|
||||
* compilation thresholds.
|
||||
* Level 2 thresholds are not used and are provided for option-compatibility and potential future use.
|
||||
* Other thresholds work as follows:
|
||||
@ -100,7 +100,9 @@ class CompileQueue;
|
||||
* The same predicate is used to control the transition from level 3 to level 4 (C2). It should be
|
||||
* noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come
|
||||
* from Method* and for 3->4 transition they come from MDO (since profiled invocations are
|
||||
* counted separately).
|
||||
* counted separately). Finally, if a method does not contain anything worth profiling, a transition
|
||||
* from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than
|
||||
* what is specified by Tier4InvocationThreshold).
|
||||
*
|
||||
* OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates.
|
||||
*
|
||||
@ -164,9 +166,9 @@ class AdvancedThresholdPolicy : public SimpleThresholdPolicy {
|
||||
// Call and loop predicates determine whether a transition to a higher compilation
|
||||
// level should be performed (pointers to predicate functions are passed to common().
|
||||
// Predicates also take compiler load into account.
|
||||
typedef bool (AdvancedThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level);
|
||||
bool call_predicate(int i, int b, CompLevel cur_level);
|
||||
bool loop_predicate(int i, int b, CompLevel cur_level);
|
||||
typedef bool (AdvancedThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method);
|
||||
bool call_predicate(int i, int b, CompLevel cur_level, Method* method);
|
||||
bool loop_predicate(int i, int b, CompLevel cur_level, Method* method);
|
||||
// Common transition function. Given a predicate determines if a method should transition to another level.
|
||||
CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false);
|
||||
// Transition functions.
|
||||
|
@ -1126,16 +1126,35 @@ static void no_shared_spaces(const char* message) {
|
||||
}
|
||||
#endif
|
||||
|
||||
// Returns threshold scaled with CompileThresholdScaling
|
||||
intx Arguments::get_scaled_compile_threshold(intx threshold) {
|
||||
return (intx)(threshold * CompileThresholdScaling);
|
||||
intx Arguments::scaled_compile_threshold(intx threshold, double scale) {
|
||||
if (scale == 1.0 || scale < 0.0) {
|
||||
return threshold;
|
||||
} else {
|
||||
return (intx)(threshold * scale);
|
||||
}
|
||||
}
|
||||
|
||||
// Returns freq_log scaled with CompileThresholdScaling
|
||||
intx Arguments::get_scaled_freq_log(intx freq_log) {
|
||||
intx scaled_freq = get_scaled_compile_threshold((intx)1 << freq_log);
|
||||
if (scaled_freq == 0) {
|
||||
return 0;
|
||||
intx Arguments::scaled_freq_log(intx freq_log, double scale) {
|
||||
// Check if scaling is necessary or negative value was specified.
|
||||
if (scale == 1.0 || scale < 0.0) {
|
||||
return freq_log;
|
||||
}
|
||||
|
||||
// Check value to avoid calculating log2 of 0.
|
||||
if (scale == 0.0) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale);
|
||||
// Determine the maximum notification frequency value currently supported.
|
||||
// The largest mask value that the interpreter/C1 can handle is
|
||||
// of length InvocationCounter::number_of_count_bits. Mask values are always
|
||||
// one bit shorter then the value of the notification frequency. Set
|
||||
// max_freq_bits accordingly.
|
||||
intx max_freq_bits = InvocationCounter::number_of_count_bits + 1;
|
||||
if (scaled_freq > nth_bit(max_freq_bits)) {
|
||||
return max_freq_bits;
|
||||
} else {
|
||||
return log2_intptr(scaled_freq);
|
||||
}
|
||||
@ -1180,31 +1199,36 @@ void Arguments::set_tiered_flags() {
|
||||
Tier3InvokeNotifyFreqLog = 0;
|
||||
Tier4InvocationThreshold = 0;
|
||||
}
|
||||
|
||||
if (CompileThresholdScaling < 0) {
|
||||
vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", NULL);
|
||||
}
|
||||
|
||||
// Scale tiered compilation thresholds
|
||||
if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) {
|
||||
FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, get_scaled_freq_log(Tier0InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, get_scaled_freq_log(Tier0BackedgeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, scaled_freq_log(Tier0InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, scaled_freq_log(Tier0BackedgeNotifyFreqLog));
|
||||
|
||||
FLAG_SET_ERGO(intx, Tier3InvocationThreshold, get_scaled_compile_threshold(Tier3InvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier3MinInvocationThreshold, get_scaled_compile_threshold(Tier3MinInvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier3CompileThreshold, get_scaled_compile_threshold(Tier3CompileThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, get_scaled_compile_threshold(Tier3BackEdgeThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier3InvocationThreshold, scaled_compile_threshold(Tier3InvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier3MinInvocationThreshold, scaled_compile_threshold(Tier3MinInvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier3CompileThreshold, scaled_compile_threshold(Tier3CompileThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
|
||||
|
||||
// Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
|
||||
// once these thresholds become supported.
|
||||
|
||||
FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, get_scaled_freq_log(Tier2InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, get_scaled_freq_log(Tier2BackedgeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
|
||||
|
||||
FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, get_scaled_freq_log(Tier3InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, get_scaled_freq_log(Tier3BackedgeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
|
||||
|
||||
FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, get_scaled_freq_log(Tier23InlineeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
|
||||
|
||||
FLAG_SET_ERGO(intx, Tier4InvocationThreshold, get_scaled_compile_threshold(Tier4InvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, get_scaled_compile_threshold(Tier4MinInvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier4CompileThreshold, get_scaled_compile_threshold(Tier4CompileThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, get_scaled_compile_threshold(Tier4BackEdgeThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
|
||||
FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
|
||||
}
|
||||
}
|
||||
|
||||
@ -3456,7 +3480,7 @@ jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_req
|
||||
}
|
||||
|
||||
if ((TieredCompilation && CompileThresholdScaling == 0)
|
||||
|| (!TieredCompilation && get_scaled_compile_threshold(CompileThreshold) == 0)) {
|
||||
|| (!TieredCompilation && scaled_compile_threshold(CompileThreshold) == 0)) {
|
||||
set_mode_flags(_int);
|
||||
}
|
||||
|
||||
@ -3896,7 +3920,7 @@ jint Arguments::apply_ergo() {
|
||||
}
|
||||
// Scale CompileThreshold
|
||||
if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) {
|
||||
FLAG_SET_ERGO(intx, CompileThreshold, get_scaled_compile_threshold(CompileThreshold));
|
||||
FLAG_SET_ERGO(intx, CompileThreshold, scaled_compile_threshold(CompileThreshold));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -328,9 +328,6 @@ class Arguments : AllStatic {
|
||||
static bool _ClipInlining;
|
||||
static bool _CIDynamicCompilePriority;
|
||||
|
||||
// Scale compile thresholds
|
||||
static intx get_scaled_compile_threshold(intx threshold);
|
||||
static intx get_scaled_freq_log(intx freq_log);
|
||||
// Tiered
|
||||
static void set_tiered_flags();
|
||||
static int get_min_number_of_compiler_threads();
|
||||
@ -452,6 +449,18 @@ class Arguments : AllStatic {
|
||||
static char* SharedArchivePath;
|
||||
|
||||
public:
|
||||
// Scale compile thresholds
|
||||
// Returns threshold scaled with CompileThresholdScaling
|
||||
static intx scaled_compile_threshold(intx threshold, double scale);
|
||||
static intx scaled_compile_threshold(intx threshold) {
|
||||
return scaled_compile_threshold(threshold, CompileThresholdScaling);
|
||||
}
|
||||
// Returns freq_log scaled with CompileThresholdScaling
|
||||
static intx scaled_freq_log(intx freq_log, double scale);
|
||||
static intx scaled_freq_log(intx freq_log) {
|
||||
return scaled_freq_log(freq_log, CompileThresholdScaling);
|
||||
}
|
||||
|
||||
// Parses the arguments, first phase
|
||||
static jint parse(const JavaVMInitArgs* args);
|
||||
// Apply ergonomics
|
||||
|
@ -2477,7 +2477,7 @@ class CommandLineFlags {
|
||||
"Number of compiler threads to run") \
|
||||
\
|
||||
product(intx, CompilationPolicyChoice, 0, \
|
||||
"which compilation policy (0/1)") \
|
||||
"which compilation policy (0-3)") \
|
||||
\
|
||||
develop(bool, UseStackBanging, true, \
|
||||
"use stack banging for stack overflow checks (required for " \
|
||||
@ -3528,7 +3528,16 @@ class CommandLineFlags {
|
||||
\
|
||||
product(double, CompileThresholdScaling, 1.0, \
|
||||
"Factor to control when first compilation happens " \
|
||||
"(both with and without tiered compilation)") \
|
||||
"(both with and without tiered compilation): " \
|
||||
"values greater than 1.0 delay counter overflow, " \
|
||||
"values between 0 and 1.0 rush counter overflow, " \
|
||||
"value of 1.0 leave compilation thresholds unchanged " \
|
||||
"value of 0.0 is equivalent to -Xint. " \
|
||||
"" \
|
||||
"Flag can be set as per-method option. " \
|
||||
"If a value is specified for a method, compilation thresholds " \
|
||||
"for that method are scaled by both the value of the global flag "\
|
||||
"and the value of the per-method flag.") \
|
||||
\
|
||||
product(intx, Tier0InvokeNotifyFreqLog, 7, \
|
||||
"Interpreter (tier 0) invocation notification frequency") \
|
||||
|
@ -1401,15 +1401,17 @@ bool os::stack_shadow_pages_available(Thread *thread, methodHandle method) {
|
||||
return (sp > (stack_limit + reserved_area));
|
||||
}
|
||||
|
||||
size_t os::page_size_for_region(size_t region_size, size_t min_pages) {
|
||||
size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) {
|
||||
assert(min_pages > 0, "sanity");
|
||||
if (UseLargePages) {
|
||||
const size_t max_page_size = region_size / min_pages;
|
||||
|
||||
for (size_t i = 0; _page_sizes[i] != 0; ++i) {
|
||||
const size_t page_size = _page_sizes[i];
|
||||
if (page_size <= max_page_size && is_size_aligned(region_size, page_size)) {
|
||||
return page_size;
|
||||
if (page_size <= max_page_size) {
|
||||
if (!must_be_aligned || is_size_aligned(region_size, page_size)) {
|
||||
return page_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1417,6 +1419,14 @@ size_t os::page_size_for_region(size_t region_size, size_t min_pages) {
|
||||
return vm_page_size();
|
||||
}
|
||||
|
||||
size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) {
|
||||
return page_size_for_region(region_size, min_pages, true);
|
||||
}
|
||||
|
||||
size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) {
|
||||
return page_size_for_region(region_size, min_pages, false);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count)
|
||||
{
|
||||
@ -1665,17 +1675,17 @@ class TestOS : AllStatic {
|
||||
|
||||
static size_t large_page_size() {
|
||||
const size_t large_page_size_example = 4 * M;
|
||||
return os::page_size_for_region(large_page_size_example, 1);
|
||||
return os::page_size_for_region_aligned(large_page_size_example, 1);
|
||||
}
|
||||
|
||||
static void test_page_size_for_region() {
|
||||
static void test_page_size_for_region_aligned() {
|
||||
if (UseLargePages) {
|
||||
const size_t small_page = small_page_size();
|
||||
const size_t large_page = large_page_size();
|
||||
|
||||
if (large_page > small_page) {
|
||||
size_t num_small_pages_in_large = large_page / small_page;
|
||||
size_t page = os::page_size_for_region(large_page, num_small_pages_in_large);
|
||||
size_t page = os::page_size_for_region_aligned(large_page, num_small_pages_in_large);
|
||||
|
||||
assert_eq(page, small_page);
|
||||
}
|
||||
@ -1688,21 +1698,53 @@ class TestOS : AllStatic {
|
||||
const size_t large_page = large_page_size();
|
||||
if (large_page > small_page) {
|
||||
const size_t unaligned_region = large_page + 17;
|
||||
size_t page = os::page_size_for_region(unaligned_region, 1);
|
||||
size_t page = os::page_size_for_region_aligned(unaligned_region, 1);
|
||||
assert_eq(page, small_page);
|
||||
|
||||
const size_t num_pages = 5;
|
||||
const size_t aligned_region = large_page * num_pages;
|
||||
page = os::page_size_for_region(aligned_region, num_pages);
|
||||
page = os::page_size_for_region_aligned(aligned_region, num_pages);
|
||||
assert_eq(page, large_page);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void test_page_size_for_region_unaligned() {
|
||||
if (UseLargePages) {
|
||||
// Given exact page size, should return that page size.
|
||||
for (size_t i = 0; os::_page_sizes[i] != 0; i++) {
|
||||
size_t expected = os::_page_sizes[i];
|
||||
size_t actual = os::page_size_for_region_unaligned(expected, 1);
|
||||
assert_eq(expected, actual);
|
||||
}
|
||||
|
||||
// Given slightly larger size than a page size, return the page size.
|
||||
for (size_t i = 0; os::_page_sizes[i] != 0; i++) {
|
||||
size_t expected = os::_page_sizes[i];
|
||||
size_t actual = os::page_size_for_region_unaligned(expected + 17, 1);
|
||||
assert_eq(expected, actual);
|
||||
}
|
||||
|
||||
// Given a slightly smaller size than a page size,
|
||||
// return the next smaller page size.
|
||||
if (os::_page_sizes[1] > os::_page_sizes[0]) {
|
||||
size_t expected = os::_page_sizes[0];
|
||||
size_t actual = os::page_size_for_region_unaligned(os::_page_sizes[1] - 17, 1);
|
||||
assert_eq(actual, expected);
|
||||
}
|
||||
|
||||
// Return small page size for values less than a small page.
|
||||
size_t small_page = small_page_size();
|
||||
size_t actual = os::page_size_for_region_unaligned(small_page - 17, 1);
|
||||
assert_eq(small_page, actual);
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static void run_tests() {
|
||||
test_page_size_for_region();
|
||||
test_page_size_for_region_aligned();
|
||||
test_page_size_for_region_alignment();
|
||||
test_page_size_for_region_unaligned();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -148,6 +148,7 @@ class os: AllStatic {
|
||||
static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
|
||||
static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
|
||||
|
||||
static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned);
|
||||
|
||||
public:
|
||||
static void init(void); // Called before command line parsing
|
||||
@ -267,8 +268,13 @@ class os: AllStatic {
|
||||
|
||||
// Returns the page size to use for a region of memory.
|
||||
// region_size / min_pages will always be greater than or equal to the
|
||||
// returned value.
|
||||
static size_t page_size_for_region(size_t region_size, size_t min_pages);
|
||||
// returned value. The returned value will divide region_size.
|
||||
static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages);
|
||||
|
||||
// Returns the page size to use for a region of memory.
|
||||
// region_size / min_pages will always be greater than or equal to the
|
||||
// returned value. The returned value might not divide region_size.
|
||||
static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages);
|
||||
|
||||
// Return the largest page size that can be used
|
||||
static size_t max_page_size() {
|
||||
|
@ -257,28 +257,28 @@ void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel l
|
||||
// Call and loop predicates determine whether a transition to a higher
|
||||
// compilation level should be performed (pointers to predicate functions
|
||||
// are passed to common() transition function).
|
||||
bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
|
||||
bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
|
||||
switch(cur_level) {
|
||||
case CompLevel_none:
|
||||
case CompLevel_limited_profile: {
|
||||
return loop_predicate_helper<CompLevel_none>(i, b, 1.0);
|
||||
return loop_predicate_helper<CompLevel_none>(i, b, 1.0, method);
|
||||
}
|
||||
case CompLevel_full_profile: {
|
||||
return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
|
||||
return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0, method);
|
||||
}
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
|
||||
bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
|
||||
switch(cur_level) {
|
||||
case CompLevel_none:
|
||||
case CompLevel_limited_profile: {
|
||||
return call_predicate_helper<CompLevel_none>(i, b, 1.0);
|
||||
return call_predicate_helper<CompLevel_none>(i, b, 1.0, method);
|
||||
}
|
||||
case CompLevel_full_profile: {
|
||||
return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
|
||||
return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0, method);
|
||||
}
|
||||
default:
|
||||
return true;
|
||||
@ -293,8 +293,8 @@ bool SimpleThresholdPolicy::is_mature(Method* method) {
|
||||
int i = mdo->invocation_count();
|
||||
int b = mdo->backedge_count();
|
||||
double k = ProfileMaturityPercentage / 100.0;
|
||||
return call_predicate_helper<CompLevel_full_profile>(i, b, k) ||
|
||||
loop_predicate_helper<CompLevel_full_profile>(i, b, k);
|
||||
return call_predicate_helper<CompLevel_full_profile>(i, b, k, method) ||
|
||||
loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -313,7 +313,7 @@ CompLevel SimpleThresholdPolicy::common(Predicate p, Method* method, CompLevel c
|
||||
// If we were at full profile level, would we switch to full opt?
|
||||
if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) {
|
||||
next_level = CompLevel_full_optimization;
|
||||
} else if ((this->*p)(i, b, cur_level)) {
|
||||
} else if ((this->*p)(i, b, cur_level, method)) {
|
||||
next_level = CompLevel_full_profile;
|
||||
}
|
||||
break;
|
||||
@ -325,7 +325,7 @@ CompLevel SimpleThresholdPolicy::common(Predicate p, Method* method, CompLevel c
|
||||
if (mdo->would_profile()) {
|
||||
int mdo_i = mdo->invocation_count_delta();
|
||||
int mdo_b = mdo->backedge_count_delta();
|
||||
if ((this->*p)(mdo_i, mdo_b, cur_level)) {
|
||||
if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
|
||||
next_level = CompLevel_full_optimization;
|
||||
}
|
||||
} else {
|
||||
|
@ -43,9 +43,9 @@ class SimpleThresholdPolicy : public CompilationPolicy {
|
||||
// Call and loop predicates determine whether a transition to a higher compilation
|
||||
// level should be performed (pointers to predicate functions are passed to common_TF().
|
||||
// Predicates also take compiler load into account.
|
||||
typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level);
|
||||
bool call_predicate(int i, int b, CompLevel cur_level);
|
||||
bool loop_predicate(int i, int b, CompLevel cur_level);
|
||||
typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method);
|
||||
bool call_predicate(int i, int b, CompLevel cur_level, Method* method);
|
||||
bool loop_predicate(int i, int b, CompLevel cur_level, Method* method);
|
||||
// Common transition function. Given a predicate determines if a method should transition to another level.
|
||||
CompLevel common(Predicate p, Method* method, CompLevel cur_level);
|
||||
// Transition functions.
|
||||
@ -76,8 +76,8 @@ protected:
|
||||
|
||||
// Predicate helpers are used by .*_predicate() methods as well as others.
|
||||
// They check the given counter values, multiplied by the scale against the thresholds.
|
||||
template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale);
|
||||
template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale);
|
||||
template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale, Method* method);
|
||||
template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale, Method* method);
|
||||
|
||||
// Get a compilation level for a given method.
|
||||
static CompLevel comp_level(Method* method) {
|
||||
|
@ -25,8 +25,14 @@
|
||||
#ifndef SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
|
||||
#define SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
|
||||
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
|
||||
template<CompLevel level>
|
||||
bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale) {
|
||||
bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) {
|
||||
double threshold_scaling;
|
||||
if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
|
||||
scale *= threshold_scaling;
|
||||
}
|
||||
switch(level) {
|
||||
case CompLevel_none:
|
||||
case CompLevel_limited_profile:
|
||||
@ -40,7 +46,11 @@ bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale) {
|
||||
}
|
||||
|
||||
template<CompLevel level>
|
||||
bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale) {
|
||||
bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale, Method* method) {
|
||||
double threshold_scaling;
|
||||
if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
|
||||
scale *= threshold_scaling;
|
||||
}
|
||||
switch(level) {
|
||||
case CompLevel_none:
|
||||
case CompLevel_limited_profile:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -276,18 +276,6 @@ void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
|
||||
THREAD->set_current_pending_monitor_is_from_java(true);
|
||||
}
|
||||
|
||||
// NOTE: must use heavy weight monitor to handle jni monitor enter
|
||||
bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
|
||||
if (UseBiasedLocking) {
|
||||
BiasedLocking::revoke_and_rebias(obj, false, THREAD);
|
||||
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
||||
}
|
||||
|
||||
ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
|
||||
return monitor->try_enter(THREAD);
|
||||
}
|
||||
|
||||
|
||||
// NOTE: must use heavy weight monitor to handle jni monitor exit
|
||||
void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
|
||||
TEVENT(jni_exit);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -65,7 +65,6 @@ class ObjectSynchronizer : AllStatic {
|
||||
// Used only to handle jni locks or other unmatched monitor enter/exit
|
||||
// Internally they will use heavy weight monitor.
|
||||
static void jni_enter(Handle obj, TRAPS);
|
||||
static bool jni_try_enter(Handle obj, Thread* THREAD); // Implements Unsafe.tryMonitorEnter
|
||||
static void jni_exit(oop obj, Thread* THREAD);
|
||||
|
||||
// Handle all interpreter, compiler and jni cases
|
||||
|
@ -38,7 +38,8 @@ ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
|
||||
}
|
||||
|
||||
ReservedSpace::ReservedSpace(size_t size) {
|
||||
size_t page_size = os::page_size_for_region(size, 1);
|
||||
// Want to use large pages where possible and pad with small pages.
|
||||
size_t page_size = os::page_size_for_region_unaligned(size, 1);
|
||||
bool large_pages = page_size != (size_t)os::vm_page_size();
|
||||
// Don't force the alignment to be large page aligned,
|
||||
// since that will waste memory.
|
||||
@ -617,7 +618,7 @@ VirtualSpace::VirtualSpace() {
|
||||
|
||||
|
||||
bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
|
||||
const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
|
||||
const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
|
||||
return initialize_with_granularity(rs, committed_size, max_commit_granularity);
|
||||
}
|
||||
|
||||
@ -1239,7 +1240,7 @@ class TestVirtualSpace : AllStatic {
|
||||
case Disable:
|
||||
return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
|
||||
case Commit:
|
||||
return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
|
||||
return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -351,11 +351,18 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
|
||||
nonstatic_field(MethodData, _arg_stack, intx) \
|
||||
nonstatic_field(MethodData, _arg_returned, intx) \
|
||||
nonstatic_field(MethodData, _tenure_traps, uint) \
|
||||
nonstatic_field(MethodData, _invoke_mask, int) \
|
||||
nonstatic_field(MethodData, _backedge_mask, int) \
|
||||
nonstatic_field(DataLayout, _header._struct._tag, u1) \
|
||||
nonstatic_field(DataLayout, _header._struct._flags, u1) \
|
||||
nonstatic_field(DataLayout, _header._struct._bci, u2) \
|
||||
nonstatic_field(DataLayout, _cells[0], intptr_t) \
|
||||
nonstatic_field(MethodCounters, _nmethod_age, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_invocation_limit, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_backward_branch_limit, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_profile_limit, int) \
|
||||
nonstatic_field(MethodCounters, _invoke_mask, int) \
|
||||
nonstatic_field(MethodCounters, _backedge_mask, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \
|
||||
nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \
|
||||
|
@ -1142,17 +1142,18 @@ inline bool is_power_of_2_long(jlong x) {
|
||||
return ((x != NoLongBits) && (mask_long_bits(x, x - 1) == NoLongBits));
|
||||
}
|
||||
|
||||
//* largest i such that 2^i <= x
|
||||
// A negative value of 'x' will return '31'
|
||||
// Returns largest i such that 2^i <= x.
|
||||
// If x < 0, the function returns 31 on a 32-bit machine and 63 on a 64-bit machine.
|
||||
// If x == 0, the function returns -1.
|
||||
inline int log2_intptr(intptr_t x) {
|
||||
int i = -1;
|
||||
uintptr_t p = 1;
|
||||
uintptr_t p = 1;
|
||||
while (p != 0 && p <= (uintptr_t)x) {
|
||||
// p = 2^(i+1) && p <= x (i.e., 2^(i+1) <= x)
|
||||
i++; p *= 2;
|
||||
}
|
||||
// p = 2^(i+1) && x < p (i.e., 2^i <= x < 2^(i+1))
|
||||
// (if p = 0 then overflow occurred and i = 31)
|
||||
// If p = 0, overflow has occurred and i = 31 or i = 63 (depending on the machine word size).
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ public class CheckCompileThresholdScaling {
|
||||
//
|
||||
// Tier0InvokeNotifyFreqLog, Tier0BackedgeNotifyFreqLog,
|
||||
// Tier3InvocationThreshold, Tier3MinInvocationThreshold,
|
||||
// Tier3CompileThreshold, and Tier3BackEdgeThreshold,
|
||||
// Tier3CompileThreshold, Tier3BackEdgeThreshold,
|
||||
// Tier2InvokeNotifyFreqLog, Tier2BackedgeNotifyFreqLog,
|
||||
// Tier3InvokeNotifyFreqLog, Tier3BackedgeNotifyFreqLog,
|
||||
// Tier23InlineeNotifyFreqLog, Tier4InvocationThreshold,
|
||||
|
@ -98,11 +98,13 @@ public class PoolsIndependenceTest implements NotificationListener {
|
||||
return false;
|
||||
});
|
||||
for (BlobType bt : BlobType.getAvailable()) {
|
||||
int expectedNotificationsAmount = bt.equals(btype) ? 1 : 0;
|
||||
Asserts.assertEQ(counters.get(bt.getMemoryPool().getName()).get(),
|
||||
expectedNotificationsAmount, String.format("Unexpected "
|
||||
+ "amount of notifications for pool: %s",
|
||||
bt.getMemoryPool().getName()));
|
||||
if (CodeCacheUtils.isCodeHeapPredictable(bt)) {
|
||||
int expectedNotificationsAmount = bt.equals(btype) ? 1 : 0;
|
||||
Asserts.assertEQ(counters.get(bt.getMemoryPool().getName()).get(),
|
||||
expectedNotificationsAmount, String.format("Unexpected "
|
||||
+ "amount of notifications for pool: %s",
|
||||
bt.getMemoryPool().getName()));
|
||||
}
|
||||
}
|
||||
try {
|
||||
((NotificationEmitter) ManagementFactory.getMemoryMXBean()).
|
||||
|
@ -52,7 +52,9 @@ public class ThresholdNotificationsTest implements NotificationListener {
|
||||
|
||||
public static void main(String[] args) {
|
||||
for (BlobType bt : BlobType.getAvailable()) {
|
||||
new ThresholdNotificationsTest(bt).runTest();
|
||||
if (CodeCacheUtils.isCodeHeapPredictable(bt)) {
|
||||
new ThresholdNotificationsTest(bt).runTest();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,6 @@
|
||||
/**
|
||||
* @test
|
||||
* @bug 7052494
|
||||
* @ignore 7154567
|
||||
* @summary Eclipse test fails on JDK 7 b142
|
||||
*
|
||||
* @run main/othervm -Xbatch Test7052494
|
||||
|
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Copyright 2015 SAP AG. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8068909
|
||||
* @key regression
|
||||
* @summary test that string optimizations produce code, that doesn't lead to a crash.
|
||||
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestOptimizeStringConcat
|
||||
* @author axel.siebenborn@sap.com
|
||||
*/
|
||||
public class TestOptimizeStringConcat {
|
||||
|
||||
static boolean checkArgumentSyntax(String value, String allowedchars, String notallowedchars, String logmsg) {
|
||||
String rc = null;
|
||||
|
||||
int maxchar = 99999;
|
||||
int minchar = 1;
|
||||
if ((allowedchars != null && notallowedchars != null) || minchar > maxchar) {
|
||||
rc = "internal error";
|
||||
} else {
|
||||
if (value == null) {
|
||||
rc = "the value null is not allowed, it is missing";
|
||||
} else if (value != null && minchar > 0 && value.trim().equals("")) {
|
||||
rc = "the value must not be empty";
|
||||
} else if (value != null) {
|
||||
if (value.length() < minchar || value.length() > maxchar) {
|
||||
if (rc == null) {
|
||||
rc = "the value length must be between +minchar+ and +maxchar";
|
||||
}
|
||||
}
|
||||
char[] _value = value.toCharArray();
|
||||
boolean dotfound = false;
|
||||
int i = 1;
|
||||
if (_value[i] == '.' && !dotfound) {
|
||||
dotfound = true;
|
||||
} else if (allowedchars != null && allowedchars.indexOf(_value[i]) == -1) {
|
||||
if (rc == null) {
|
||||
rc = "the value contains an illegal character: '" + _value[i] + "', only following characters are allowed: '+allowedchars+'";
|
||||
} else {
|
||||
rc += " / the value contains an illegal character: '" + _value[i] + "', only following characters are allowed: '+allowedchars+'";
|
||||
}
|
||||
} else if (notallowedchars != null && notallowedchars.indexOf(_value[i]) != -1) {
|
||||
if (rc == null) {
|
||||
rc = "the value contains an illegal character: '" + _value[i] + "', following characters are not allowed '+notallowedchars+'";
|
||||
} else {
|
||||
rc += " / the value contains an illegal character: '" + _value[i] + "', following characters are not allowed '+notallowedchars+'";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (rc != null) {
|
||||
System.out.println(logmsg + " ==> " + rc);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
boolean failed = false;
|
||||
for (int i = 0; i < 10000; i++) {
|
||||
failed |= !checkArgumentSyntax("theName", null, "\"<&", "Error consistencyCheck: name in component definition");
|
||||
failed |= !checkArgumentSyntax(null, null, "\"<&", "Error consistencyCheck: name in component definition");
|
||||
failed |= !checkArgumentSyntax("42", "0123456789.", null, "Error consistencyCheck: counter in component definition");
|
||||
}
|
||||
System.out.println(failed);
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user