Merge
This commit is contained in:
commit
911becc4a0
@ -355,3 +355,4 @@ c7be2a78c31b3b6132f2f5e9e4b3d3bb1c20245c jdk-9+108
|
||||
925be13b3740d07a5958ccb5ab3c0ae1baba7055 jdk-9+110
|
||||
f900d5afd9c83a0df8f36161c27c5e4c86a66f4c jdk-9+111
|
||||
03543a758cd5890f2266e4b9678378a925dde22a jdk-9+112
|
||||
55b6d550828d1223b364e6ead4a56e56411c56df jdk-9+113
|
||||
|
@ -34,6 +34,7 @@ export LEGACY_BUILD_DIR=@OPENJDK_TARGET_OS@-@OPENJDK_TARGET_CPU_LEGACY@
|
||||
export OPENJDK_TARGET_OS="@OPENJDK_TARGET_OS@"
|
||||
export OPENJDK_TARGET_CPU="@OPENJDK_TARGET_CPU@"
|
||||
export OPENJDK_TARGET_CPU_LIBDIR="@OPENJDK_TARGET_CPU_LIBDIR@"
|
||||
export DEBUG_LEVEL="@DEBUG_LEVEL@"
|
||||
|
||||
export AWK="@AWK@"
|
||||
export BASH="@BASH@"
|
||||
@ -47,8 +48,6 @@ export EXPR="@EXPR@"
|
||||
export FILE="@FILE@"
|
||||
export FIND="@FIND@"
|
||||
export GREP="@GREP@"
|
||||
export JAVAP="@FIXPATH@ @BOOT_JDK@/bin/javap @JAVA_TOOL_FLAGS_SMALL@"
|
||||
export JIMAGE="@FIXPATH@ @BUILD_OUTPUT@/jdk/bin/jimage"
|
||||
export LDD="@LDD@"
|
||||
export LN="@LN@"
|
||||
export MKDIR="@MKDIR@"
|
||||
@ -72,6 +71,17 @@ export UNARCHIVE="@UNZIP@ -q"
|
||||
export SRC_ROOT="@TOPDIR@"
|
||||
export OUTPUT_ROOT="@OUTPUT_ROOT@"
|
||||
|
||||
if [ "@COMPILE_TYPE@" != "cross" ]; then
|
||||
export JAVAP="@FIXPATH@ $OUTPUT_ROOT/jdk/bin/javap @JAVA_TOOL_FLAGS_SMALL@"
|
||||
export JIMAGE="@FIXPATH@ $OUTPUT_ROOT/jdk/bin/jimage"
|
||||
elif [ "@CREATE_BUILDJDK@" = "true" ]; then
|
||||
export JAVAP="@FIXPATH@ $OUTPUT_ROOT/buildjdk/jdk/bin/javap @JAVA_TOOL_FLAGS_SMALL@"
|
||||
export JIMAGE="@FIXPATH@ $OUTPUT_ROOT/buildjdk/jdk/bin/jimage"
|
||||
else
|
||||
export JAVAP="@FIXPATH@ @BUILD_JDK@/bin/javap @JAVA_TOOL_FLAGS_SMALL@"
|
||||
export JIMAGE="@FIXPATH@ @BUILD_JDK@/bin/jimage"
|
||||
fi
|
||||
|
||||
if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
|
||||
export PATH="@VS_PATH@"
|
||||
fi
|
||||
|
@ -75,6 +75,7 @@ diff_text() {
|
||||
THIS_FILE=$2
|
||||
|
||||
SUFFIX="${THIS_FILE##*.}"
|
||||
NAME="${THIS_FILE##*/}"
|
||||
|
||||
TMP=1
|
||||
|
||||
@ -92,6 +93,7 @@ diff_text() {
|
||||
$GREP '^[<>]' | \
|
||||
$SED -e '/[<>] \* from.*\.idl/d' \
|
||||
-e '/[<>] .*[0-9]\{4\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}-b[0-9]\{2\}.*/d' \
|
||||
-e '/[<>] .*[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\}-[0-9]\{6\}.*/d' \
|
||||
-e '/[<>] \*.*[0-9]\{4\} [0-9][0-9]*:[0-9]\{2\}:[0-9]\{2\}.*/d' \
|
||||
-e '/\/\/ Generated from input file.*/d' \
|
||||
-e '/\/\/ This file was generated AUTOMATICALLY from a template file.*/d' \
|
||||
@ -100,12 +102,34 @@ diff_text() {
|
||||
# Ignore date strings in class files.
|
||||
# Anonymous lambda classes get randomly assigned counters in their names.
|
||||
if test "x$SUFFIX" = "xclass"; then
|
||||
if [ "$NAME" = "module-info.class" ] || [ "$NAME" = "SystemModules.class" ]
|
||||
then
|
||||
# The SystemModules.class and module-info.class have several issues
|
||||
# with random ordering of elements in HashSets.
|
||||
MODULES_CLASS_FILTER="$SED \
|
||||
-e 's/,$//' \
|
||||
-e 's/;$//' \
|
||||
-e 's/^ *[0-9]*://' \
|
||||
-e 's/#[0-9]* */#/' \
|
||||
-e 's/ *\/\// \/\//' \
|
||||
-e 's/aload *[0-9]*/aload X/' \
|
||||
-e 's/ldc_w/ldc /' \
|
||||
| $SORT \
|
||||
"
|
||||
$JAVAP -c -constants -l -p "${OTHER_FILE}" \
|
||||
| eval "$MODULES_CLASS_FILTER" > ${OTHER_FILE}.javap &
|
||||
$JAVAP -c -constants -l -p "${THIS_FILE}" \
|
||||
| eval "$MODULES_CLASS_FILTER" > ${THIS_FILE}.javap &
|
||||
wait
|
||||
TMP=$($DIFF ${OTHER_FILE}.javap ${THIS_FILE}.javap)
|
||||
# To improve performance when large diffs are found, do a rough filtering of classes
|
||||
# elibeble for these exceptions
|
||||
if $GREP -R -e '[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\}-[0-9]\{6\}' \
|
||||
-e 'lambda\$[a-zA-Z0-9]*\$[0-9]' ${THIS_FILE} > /dev/null; then
|
||||
$JAVAP -c -constants -l -p "${OTHER_FILE}" > ${OTHER_FILE}.javap
|
||||
$JAVAP -c -constants -l -p "${THIS_FILE}" > ${THIS_FILE}.javap
|
||||
elif $GREP -R -e '[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\}-[0-9]\{6\}' \
|
||||
-e 'lambda\$[a-zA-Z0-9]*\$[0-9]' ${THIS_FILE} > /dev/null
|
||||
then
|
||||
$JAVAP -c -constants -l -p "${OTHER_FILE}" > ${OTHER_FILE}.javap &
|
||||
$JAVAP -c -constants -l -p "${THIS_FILE}" > ${THIS_FILE}.javap &
|
||||
wait
|
||||
TMP=$($DIFF ${OTHER_FILE}.javap ${THIS_FILE}.javap | \
|
||||
$GREP '^[<>]' | \
|
||||
$SED -e '/[<>].*[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\}-[0-9]\{6\}.*/d' \
|
||||
@ -305,14 +329,19 @@ compare_general_files() {
|
||||
if [ -e $OTHER_DIR/$f ]; then
|
||||
SUFFIX="${f##*.}"
|
||||
if [ "$(basename $f)" = "release" ]; then
|
||||
# Ignore differences in change numbers in release file.
|
||||
# In release file, ignore differences in change numbers and order
|
||||
# of modules in list.
|
||||
OTHER_FILE=$WORK_DIR/$f.other
|
||||
THIS_FILE=$WORK_DIR/$f.this
|
||||
$MKDIR -p $(dirname $OTHER_FILE)
|
||||
$MKDIR -p $(dirname $THIS_FILE)
|
||||
RELEASE_FILTER="$SED \
|
||||
-e 's/\:[0-9a-f]\{12,12\}/:CHANGE/g' \
|
||||
-e 's/[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\}-[0-9]\{6\}/<DATE>/g'
|
||||
-e 's/[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\}-[0-9]\{6\}/<DATE>/g' \
|
||||
-e 's/^#.*/#COMMENT/g' \
|
||||
-e 's/MODULES=/MODULES=\'$'\n/' \
|
||||
-e 's/,/\'$'\n/g' \
|
||||
| $SORT
|
||||
"
|
||||
$CAT $OTHER_DIR/$f | eval "$RELEASE_FILTER" > $OTHER_FILE
|
||||
$CAT $THIS_DIR/$f | eval "$RELEASE_FILTER" > $THIS_FILE
|
||||
@ -330,8 +359,9 @@ compare_general_files() {
|
||||
-e 's/\(-- Generated by javadoc \).*\( --\)/\1(removed)\2/' \
|
||||
-e 's/[A-Z][a-z]*, [A-Z][a-z]* [0-9][0-9]*, [0-9]\{4\} [0-9][0-9:]* [AMP]\{2,2\} [A-Z][A-Z]*/<DATE>/'
|
||||
"
|
||||
$CAT $OTHER_DIR/$f | eval "$HTML_FILTER" > $OTHER_FILE
|
||||
$CAT $THIS_DIR/$f | eval "$HTML_FILTER" > $THIS_FILE
|
||||
$CAT $OTHER_DIR/$f | eval "$HTML_FILTER" > $OTHER_FILE &
|
||||
$CAT $THIS_DIR/$f | eval "$HTML_FILTER" > $THIS_FILE &
|
||||
wait
|
||||
else
|
||||
OTHER_FILE=$OTHER_DIR/$f
|
||||
THIS_FILE=$THIS_DIR/$f
|
||||
@ -389,7 +419,7 @@ compare_zip_file() {
|
||||
$RM -rf $THIS_UNZIPDIR $OTHER_UNZIPDIR
|
||||
$MKDIR -p $THIS_UNZIPDIR
|
||||
$MKDIR -p $OTHER_UNZIPDIR
|
||||
if [ "$TYPE" = "jar" || "$TYPE" = "war" || "$TYPE" = "zip" || "$TYPE" = "jmod"]
|
||||
if [ "$TYPE" = "jar" -o "$TYPE" = "war" -o "$TYPE" = "zip" -o "$TYPE" = "jmod" ]
|
||||
then
|
||||
(cd $THIS_UNZIPDIR && $UNARCHIVE $THIS_ZIP)
|
||||
(cd $OTHER_UNZIPDIR && $UNARCHIVE $OTHER_ZIP)
|
||||
@ -526,7 +556,7 @@ compare_all_jar_files() {
|
||||
|
||||
# TODO filter?
|
||||
ZIPS=$(cd $THIS_DIR && $FIND . -type f -name "*.jar" -o -name "*.war" \
|
||||
-o -name "modules" -o -name "*.jmod" | $SORT | $FILTER)
|
||||
-o -name "modules" | $SORT | $FILTER)
|
||||
|
||||
if [ -n "$ZIPS" ]; then
|
||||
echo Jar files...
|
||||
@ -600,8 +630,8 @@ compare_bin_file() {
|
||||
&& [ -f "$OTHER/support/native/java.base/java_objs/java.diz" ]; then
|
||||
OTHER_DIZ_FILE="$OTHER/support/native/java.base/java_objs/java.diz"
|
||||
elif [ "$NAME" = "jimage.exe" ] \
|
||||
&& [ -f "$OTHER/support/native/jdk.dev/jimage_objs/jimage.diz" ]; then
|
||||
OTHER_DIZ_FILE="$OTHER/support/native/jdk.dev/jimage_objs/jimage.diz"
|
||||
&& [ -f "$OTHER/support/native/jdk.jlink/jimage_objs/jimage.diz" ]; then
|
||||
OTHER_DIZ_FILE="$OTHER/support/native/jdk.jlink/jimage_objs/jimage.diz"
|
||||
elif [ "$NAME" = "javacpl.exe" ] \
|
||||
&& [ -f "$OTHER/support/native/jdk.plugin/javacpl/javacpl.diz" ]; then
|
||||
OTHER_DIZ_FILE="$OTHER/support/native/jdk.plugin/javacpl/javacpl.diz"
|
||||
@ -632,8 +662,8 @@ compare_bin_file() {
|
||||
&& [ -f "$THIS/support/native/java.base/java_objs/java.diz" ]; then
|
||||
THIS_DIZ_FILE="$THIS/support/native/java.base/java_objs/java.diz"
|
||||
elif [ "$NAME" = "jimage.exe" ] \
|
||||
&& [ -f "$THIS/support/native/jdk.dev/jimage_objs/jimage.diz" ]; then
|
||||
THIS_DIZ_FILE="$THIS/support/native/jdk.dev/jimage_objs/jimage.diz"
|
||||
&& [ -f "$THIS/support/native/jdk.jlink/jimage_objs/jimage.diz" ]; then
|
||||
THIS_DIZ_FILE="$THIS/support/native/jdk.jlink/jimage_objs/jimage.diz"
|
||||
elif [ "$NAME" = "javacpl.exe" ] \
|
||||
&& [ -f "$THIS/support/native/jdk.plugin/javacpl/javacpl.diz" ]; then
|
||||
THIS_DIZ_FILE="$THIS/support/native/jdk.plugin/javacpl/javacpl.diz"
|
||||
@ -732,6 +762,13 @@ compare_bin_file() {
|
||||
SYM_SORT_CMD="cat"
|
||||
fi
|
||||
|
||||
if [ -n "$SYMBOLS_DIFF_FILTER" ] && [ -z "$NEED_SYMBOLS_DIFF_FILTER" ] \
|
||||
|| [[ "$NEED_SYMBOLS_DIFF_FILTER" = *"$BIN_FILE"* ]]; then
|
||||
this_SYMBOLS_DIFF_FILTER="$SYMBOLS_DIFF_FILTER"
|
||||
else
|
||||
this_SYMBOLS_DIFF_FILTER="$CAT"
|
||||
fi
|
||||
|
||||
# Check symbols
|
||||
if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
|
||||
# The output from dumpbin on windows differs depending on if the debug symbol
|
||||
@ -750,8 +787,16 @@ compare_bin_file() {
|
||||
$NM -j $ORIG_OTHER_FILE 2> /dev/null | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.other
|
||||
$NM -j $ORIG_THIS_FILE 2> /dev/null | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.this
|
||||
else
|
||||
$NM -a $ORIG_OTHER_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.other
|
||||
$NM -a $ORIG_THIS_FILE 2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.this
|
||||
$NM -a $ORIG_OTHER_FILE 2> /dev/null | $GREP -v $NAME \
|
||||
| $AWK '{print $2, $3, $4, $5}' \
|
||||
| eval "$this_SYMBOLS_DIFF_FILTER" \
|
||||
| $SYM_SORT_CMD \
|
||||
> $WORK_FILE_BASE.symbols.other
|
||||
$NM -a $ORIG_THIS_FILE 2> /dev/null | $GREP -v $NAME \
|
||||
| $AWK '{print $2, $3, $4, $5}' \
|
||||
| eval "$this_SYMBOLS_DIFF_FILTER" \
|
||||
| $SYM_SORT_CMD \
|
||||
> $WORK_FILE_BASE.symbols.this
|
||||
fi
|
||||
|
||||
LC_ALL=C $DIFF $WORK_FILE_BASE.symbols.other $WORK_FILE_BASE.symbols.this > $WORK_FILE_BASE.symbols.diff
|
||||
@ -828,9 +873,10 @@ compare_bin_file() {
|
||||
FULLDUMP_DIFF_FILTER="$CAT"
|
||||
fi
|
||||
$FULLDUMP_CMD $OTHER_FILE | eval "$BUILD_ID_FILTER" | eval "$FULLDUMP_DIFF_FILTER" \
|
||||
> $WORK_FILE_BASE.fulldump.other 2>&1
|
||||
> $WORK_FILE_BASE.fulldump.other 2>&1 &
|
||||
$FULLDUMP_CMD $THIS_FILE | eval "$BUILD_ID_FILTER" | eval "$FULLDUMP_DIFF_FILTER" \
|
||||
> $WORK_FILE_BASE.fulldump.this 2>&1
|
||||
> $WORK_FILE_BASE.fulldump.this 2>&1 &
|
||||
wait
|
||||
|
||||
LC_ALL=C $DIFF $WORK_FILE_BASE.fulldump.other $WORK_FILE_BASE.fulldump.this \
|
||||
> $WORK_FILE_BASE.fulldump.diff
|
||||
@ -854,18 +900,19 @@ compare_bin_file() {
|
||||
FULLDUMP_MSG=" "
|
||||
DIFF_FULLDUMP=
|
||||
if [[ "$KNOWN_FULLDUMP_DIFF $ACCEPTED_FULLDUMP_DIFF" = *"$BIN_FILE"* ]]; then
|
||||
FULLDUMP_MSG=" ! "
|
||||
FULLDUMP_MSG=" ! "
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Compare disassemble output
|
||||
if [ -n "$DIS_CMD" ] && [ -z "$SKIP_DIS_DIFF" ]; then
|
||||
# By default we filter out differences that include references to symbols.
|
||||
# To get a raw diff with the complete disassembly, set
|
||||
# DIS_DIFF_FILTER="$CAT"
|
||||
if [ -z "$DIS_DIFF_FILTER" ]; then
|
||||
DIS_DIFF_FILTER="$GREP -v ' # .* <.*>$' | $SED -r -e 's/(\b|x)([0-9a-fA-F]+)(\b|:|>)/X/g'"
|
||||
this_DIS_DIFF_FILTER="$CAT"
|
||||
if [ -n "$DIS_DIFF_FILTER" ]; then
|
||||
if [ -z "$NEED_DIS_DIFF_FILTER" ] \
|
||||
|| [[ "$NEED_DIS_DIFF_FILTER" = *"$BIN_FILE"* ]]; then
|
||||
this_DIS_DIFF_FILTER="$DIS_DIFF_FILTER"
|
||||
fi
|
||||
fi
|
||||
if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
|
||||
DIS_GREP_ARG=-a
|
||||
@ -873,9 +920,10 @@ compare_bin_file() {
|
||||
DIS_GREP_ARG=
|
||||
fi
|
||||
$DIS_CMD $OTHER_FILE | $GREP $DIS_GREP_ARG -v $NAME \
|
||||
| eval "$DIS_DIFF_FILTER" > $WORK_FILE_BASE.dis.other 2>&1
|
||||
| eval "$this_DIS_DIFF_FILTER" > $WORK_FILE_BASE.dis.other 2>&1 &
|
||||
$DIS_CMD $THIS_FILE | $GREP $DIS_GREP_ARG -v $NAME \
|
||||
| eval "$DIS_DIFF_FILTER" > $WORK_FILE_BASE.dis.this 2>&1
|
||||
| eval "$this_DIS_DIFF_FILTER" > $WORK_FILE_BASE.dis.this 2>&1 &
|
||||
wait
|
||||
|
||||
LC_ALL=C $DIFF $WORK_FILE_BASE.dis.other $WORK_FILE_BASE.dis.this > $WORK_FILE_BASE.dis.diff
|
||||
|
||||
@ -884,11 +932,15 @@ compare_bin_file() {
|
||||
DIS_MSG=$($PRINTF "%8d" $DIS_DIFF_SIZE)
|
||||
if [[ "$ACCEPTED_DIS_DIFF" != *"$BIN_FILE"* ]]; then
|
||||
DIFF_DIS=true
|
||||
if [[ "$KNOWN_DIS_DIFF" != *"$BIN_FILE"* ]]; then
|
||||
if [ "$MAX_KNOWN_DIS_DIFF_SIZE" = "" ]; then
|
||||
MAX_KNOWN_DIS_DIFF_SIZE="0"
|
||||
fi
|
||||
if [[ "$KNOWN_DIS_DIFF" = *"$BIN_FILE"* ]] \
|
||||
&& [ "$DIS_DIFF_SIZE" -lt "$MAX_KNOWN_DIS_DIFF_SIZE" ]; then
|
||||
DIS_MSG=" $DIS_MSG "
|
||||
else
|
||||
DIS_MSG="*$DIS_MSG*"
|
||||
REGRESSIONS=true
|
||||
else
|
||||
DIS_MSG=" $DIS_MSG "
|
||||
fi
|
||||
else
|
||||
DIS_MSG="($DIS_MSG)"
|
||||
|
@ -89,7 +89,9 @@ if [ "$OPENJDK_TARGET_OS" = "linux" ]; then
|
||||
./bin/jimage
|
||||
./bin/jinfo
|
||||
./bin/jjs
|
||||
./bin/jlink
|
||||
./bin/jmap
|
||||
./bin/jmod
|
||||
./bin/jps
|
||||
./bin/jrunscript
|
||||
./bin/jsadebugd
|
||||
@ -113,21 +115,36 @@ if [ "$OPENJDK_TARGET_OS" = "linux" ]; then
|
||||
./bin/xjc
|
||||
"
|
||||
|
||||
# Issue with __FILE__ usage in generated header files prevent clean fulldump diff of
|
||||
# server jvm with old hotspot build.
|
||||
KNOWN_FULLDUMP_DIFF="
|
||||
./lib$OPENJDK_TARGET_CPU_LIBDIR/client/libjvm.so
|
||||
./lib$OPENJDK_TARGET_CPU_LIBDIR/server/libjvm.so
|
||||
./lib$OPENJDK_TARGET_CPU_LIBDIR/minimal/libjvm.so
|
||||
"
|
||||
|
||||
if [ "$OPENJDK_TARGET_CPU" = "x86" ]; then
|
||||
KNOWN_DIS_DIFF="
|
||||
./lib$OPENJDK_TARGET_CPU_LIBDIR/server/libjvm.so
|
||||
if [ "$OPENJDK_TARGET_CPU" = "arm" ]; then
|
||||
# NOTE: When comparing the old and new hotspot builds, the link time
|
||||
# optimization makes good comparisons impossible. Fulldump compare always
|
||||
# fails and disassembly can end up with some functions in different order.
|
||||
# So for now, accept the difference but put a limit on the size. The
|
||||
# different order of functions shouldn't result in a very big diff.
|
||||
KNOWN_FULLDUMP_DIFF="
|
||||
./lib$OPENJDK_TARGET_CPU_LIBDIR/minimal/libjvm.so
|
||||
"
|
||||
|
||||
# Link time optimization adds random numbers to symbol names
|
||||
NEED_DIS_DIFF_FILTER="
|
||||
./lib$OPENJDK_TARGET_CPU_LIBDIR/minimal/libjvm.so
|
||||
"
|
||||
DIS_DIFF_FILTER="$SED -r \
|
||||
-e 's/\.[0-9]+/.X/g' \
|
||||
-e 's/\t[0-9a-f]{4} [0-9a-f]{4} /\tXXXX XXXX /' \
|
||||
-e 's/\t[0-9a-f]{5,} /\t<HEX> /' \
|
||||
"
|
||||
KNOWN_DIS_DIFF="
|
||||
./lib$OPENJDK_TARGET_CPU_LIBDIR/minimal/libjvm.so
|
||||
"
|
||||
MAX_KNOWN_DIS_DIFF_SIZE="3000"
|
||||
|
||||
NEED_SYMBOLS_DIFF_FILTER="
|
||||
./lib$OPENJDK_TARGET_CPU_LIBDIR/minimal/libjvm.so
|
||||
"
|
||||
SYMBOLS_DIFF_FILTER="$SED -r \
|
||||
-e 's/\.[0-9]+/.X/g'
|
||||
"
|
||||
DIS_DIFF_FILTER="$SED \
|
||||
-e 's/\(:\t\)\([0-9a-z]\{2,2\} \)\{1,7\}/\1<hex>/g' \
|
||||
-e 's/0x[0-9a-z]\{2,9\}/<hex>/g'"
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -224,7 +241,9 @@ if [ "$OPENJDK_TARGET_OS" = "solaris" ] && [ "$OPENJDK_TARGET_CPU" = "x86_64" ];
|
||||
./bin/jimage
|
||||
./bin/jinfo
|
||||
./bin/jjs
|
||||
./bin/jlink
|
||||
./bin/jmap
|
||||
./bin/jmod
|
||||
./bin/jps
|
||||
./bin/jrunscript
|
||||
./bin/jsadebugd
|
||||
@ -250,15 +269,10 @@ if [ "$OPENJDK_TARGET_OS" = "solaris" ] && [ "$OPENJDK_TARGET_CPU" = "x86_64" ];
|
||||
|
||||
SKIP_FULLDUMP_DIFF="true"
|
||||
|
||||
# Filter random C++ symbol strings.
|
||||
# Some numbers differ randomly.
|
||||
# Random strings looking like this differ: <.XAKoKoPIac2W0OA.
|
||||
DIS_DIFF_FILTER="$SED \
|
||||
-e 's/\.[a-zA-Z0-9_\$]\{15\}/<SYM>/g' \
|
||||
-e 's/\(\# \)[0-9a-f]*\( <\)/\1<HEX>\2/g' \
|
||||
-e 's/0x[0-9a-f]*$/<HEX>/g' \
|
||||
-e 's/0x[0-9a-f]*\([,(>]\)/<HEX>\1/g' \
|
||||
-e 's/: [0-9a-f][0-9a-f]\( [0-9a-f][0-9a-f]\)\{2,10\}/: <NUMS>/g' \
|
||||
-e 's/ [\.A-Za-z0-9%@]\{16\}$/ <BIN>/g'"
|
||||
-e 's/<\.[A-Za-z0-9]\{\15}\./<.SYM./' \
|
||||
"
|
||||
|
||||
fi
|
||||
|
||||
@ -356,7 +370,9 @@ if [ "$OPENJDK_TARGET_OS" = "solaris" ] && [ "$OPENJDK_TARGET_CPU" = "sparcv9" ]
|
||||
./bin/jimage
|
||||
./bin/jinfo
|
||||
./bin/jjs
|
||||
./bin/jlink
|
||||
./bin/jmap
|
||||
./bin/jmod
|
||||
./bin/jps
|
||||
./bin/jrunscript
|
||||
./bin/jsadebugd
|
||||
@ -380,26 +396,32 @@ if [ "$OPENJDK_TARGET_OS" = "solaris" ] && [ "$OPENJDK_TARGET_CPU" = "sparcv9" ]
|
||||
./bin/xjc
|
||||
"
|
||||
|
||||
# Some numbers differ randomly.
|
||||
DIS_DIFF_FILTER="$SED \
|
||||
-e 's/\$[a-zA-Z0-9_\$]\{15\}/<SYM>/g' \
|
||||
-e 's/: [0-9a-f][0-9a-f]\( [0-9a-f][0-9a-f]\)\{2,10\}/: <NUMS>/g' \
|
||||
-e 's/, [0-9a-fx\-]\{1,8\}/, <CONST>/g' \
|
||||
-e 's/call [0-9a-f]\{7\}/call <ADDR>/g' \
|
||||
-e 's/^[0-9a-f]\{16\}/<ADDR>:/' \
|
||||
-e 's/^ *[0-9a-f]\{3,8\}:/ <ADDR>:/' \
|
||||
-e 's/: [0-9a-f][0-9a-f]\( [0-9a-f][0-9a-f]\)\{2,10\}/: <NUMS>/' \
|
||||
-e 's/\$[a-zA-Z0-9_\$]\{15\}\./<SYM>./' \
|
||||
-e 's/, [0-9a-fx\-]\{1,8\}/, <ADDR>/g' \
|
||||
-e 's/0x[0-9a-f]\{1,8\}/<HEX>/g' \
|
||||
-e 's/\! [0-9a-f]\{1,8\} /! <ADDR> /g'"
|
||||
-e 's/\! [0-9a-f]\{1,8\} /! <ADDR> /' \
|
||||
-e 's/call [0-9a-f]\{4,7\}/call <ADDR>/' \
|
||||
-e 's/%hi(0),/%hi(<HEX>),/' \
|
||||
"
|
||||
|
||||
# libjvm.so
|
||||
# __FILE__ macro usage in debug.hpp causes differences between old and new
|
||||
# hotspot builds in ad_sparc.o and ad_sparc_clone.o. The .o files compare
|
||||
# equal when stripped, but at link time differences appear. Removing
|
||||
# __FILE__ from ShouldNotCallThis() and ShouldNotReachHere() removes
|
||||
# the differences.
|
||||
KNOWN_DIS_DIFF="
|
||||
./lib/sparcv9/server/libjvm.so
|
||||
./lib/sparcv9/libsaproc.so
|
||||
"
|
||||
|
||||
MAX_KNOWN_DIS_DIFF_SIZE="3000"
|
||||
|
||||
# On slowdebug the disassembly can differ randomly.
|
||||
if [ "$DEBUG_LEVEL" = "slowdebug" ]; then
|
||||
ACCEPTED_DIS_DIFF="
|
||||
./lib/sparcv9/libfontmanager.so
|
||||
./lib/sparcv9/server/libjvm.so
|
||||
"
|
||||
fi
|
||||
|
||||
SKIP_FULLDUMP_DIFF="true"
|
||||
|
||||
fi
|
||||
@ -419,6 +441,7 @@ if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
|
||||
./demo/jvmti/minst/lib/minst.dll
|
||||
./bin/attach.dll
|
||||
./bin/jsoundds.dll
|
||||
./bin/client/jvm.dll
|
||||
./bin/server/jvm.dll
|
||||
./bin/appletviewer.exe
|
||||
./bin/idlj.exe
|
||||
@ -438,7 +461,9 @@ if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
|
||||
./bin/jimage.exe
|
||||
./bin/jinfo.exe
|
||||
./bin/jjs.exe
|
||||
./bin/jlink.exe
|
||||
./bin/jmap.exe
|
||||
./bin/jmod.exe
|
||||
./bin/jps.exe
|
||||
./bin/jrunscript.exe
|
||||
./bin/jsadebugd.exe
|
||||
@ -469,22 +494,39 @@ if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
|
||||
./bin/jabswitch.exe
|
||||
"
|
||||
|
||||
# On windows, there are unavoidable allignment issues making
|
||||
# a perfect disasm diff impossible. Filter out the following:
|
||||
# * Random parts of C++ symbols (this is a bit greedy, but does the trick)
|
||||
# @XXXXX
|
||||
# * Hexadecimal addresses that are sometimes alligned differently.
|
||||
# * Dates in version strings XXXX_XX_XX.
|
||||
DIS_DIFF_FILTER="$SED \
|
||||
-e 's/^ [0-9A-F]\{16\}: //g' \
|
||||
-e 's/[@?][A-Za-z0-9_]\{1,25\}/<SYM>/g' \
|
||||
-e 's/\([\[+]\)[0-9A-F]\{4,16\}h\]/\1<HEXSTR>]/g' \
|
||||
-e 's/_[0-9]\{4\}_[0-9]\{2\}_[0-9]\{2\}/_<DATE>/g'"
|
||||
#DIS_DIFF_FILTER="$CAT"
|
||||
if [ "$OPENJDK_TARGET_CPU" = "x86" ]; then
|
||||
DIS_DIFF_FILTER="$SED -r \
|
||||
-e 's/^ [0-9A-F]{16}: //' \
|
||||
-e 's/^ [0-9A-F]{8}: / <ADDR>: /' \
|
||||
-e 's/(offset \?\?)_C@_.*/\1<SYM>/' \
|
||||
-e 's/[@?][A-Za-z0-9_]{1,25}/<SYM>/' \
|
||||
-e 's/([-,+])[0-9A-F]{2,16}/\1<HEXSTR>/g' \
|
||||
-e 's/\[[0-9A-F]{4,16}h\]/[<HEXSTR>]/' \
|
||||
-e 's/: ([a-z]{2}[a-z ]{2}) [0-9A-F]{2,16}h?$/: \1 <HEXSTR>/' \
|
||||
-e 's/_20[0-9]{2}_[0-1][0-9]_[0-9]{2}/_<DATE>/' \
|
||||
"
|
||||
elif [ "$OPENJDK_TARGET_CPU" = "x86_64" ]; then
|
||||
DIS_DIFF_FILTER="$SED -r \
|
||||
-e 's/^ [0-9A-F]{16}: //' \
|
||||
-e 's/\[[0-9A-F]{4,16}h\]/[<HEXSTR>]/' \
|
||||
-e 's/([,+])[0-9A-F]{2,16}h/\1<HEXSTR>/' \
|
||||
-e 's/([a-z]{2}[a-z ]{2}) [0-9A-F]{4,16}$/\1 <HEXSTR>/' \
|
||||
-e 's/\[\?\?_C@_.*/[<SYM>]/' \
|
||||
"
|
||||
fi
|
||||
|
||||
SKIP_BIN_DIFF="true"
|
||||
SKIP_FULLDUMP_DIFF="true"
|
||||
|
||||
# NOTE: When comparing the old and new hotspot builds, the server jvm.dll
|
||||
# cannot be made equal in disassembly. Some functions just always end up
|
||||
# in different order. So for now, accept the difference but put a limit
|
||||
# on the size. The different order of functions shouldn't result in a very
|
||||
# big diff.
|
||||
KNOWN_DIS_DIFF="
|
||||
./bin/server/jvm.dll
|
||||
"
|
||||
MAX_KNOWN_DIS_DIFF_SIZE="2000000"
|
||||
fi
|
||||
|
||||
|
||||
@ -512,7 +554,9 @@ if [ "$OPENJDK_TARGET_OS" = "macosx" ]; then
|
||||
./bin/jimage
|
||||
./bin/jinfo
|
||||
./bin/jjs
|
||||
./bin/jlink
|
||||
./bin/jmap
|
||||
./bin/jmod
|
||||
./bin/jps
|
||||
./bin/jrunscript
|
||||
./bin/jsadebugd
|
||||
@ -565,6 +609,7 @@ if [ "$OPENJDK_TARGET_OS" = "macosx" ]; then
|
||||
./Contents/Home/lib/libverify.dylib
|
||||
./Contents/Home/lib/libsaproc.dylib
|
||||
./Contents/Home/lib/libsplashscreen.dylib
|
||||
./Contents/Home/lib/server/libjsig.dylib
|
||||
./Contents/Home/lib/server/libjvm.dylib
|
||||
./Contents/Home/lib/deploy/JavaControlPanel.prefPane/Contents/MacOS/JavaControlPanel
|
||||
./Contents/Resources/JavaControlPanelHelper
|
||||
@ -590,6 +635,7 @@ if [ "$OPENJDK_TARGET_OS" = "macosx" ]; then
|
||||
./lib/libverify.dylib
|
||||
./lib/libsaproc.dylib
|
||||
./lib/libsplashscreen.dylib
|
||||
./lib/server/libjsig.dylib
|
||||
./lib/server/libjvm.dylib
|
||||
./lib/deploy/JavaControlPanel.prefPane/Contents/MacOS/JavaControlPanel
|
||||
./Versions/A/Resources/finish_installation.app/Contents/MacOS/finish_installation
|
||||
@ -606,7 +652,8 @@ if [ "$OPENJDK_TARGET_OS" = "macosx" ]; then
|
||||
|
||||
DIS_DIFF_FILTER="LANG=C $SED \
|
||||
-e 's/0x[0-9a-f]\{3,16\}/<HEXSTR>/g' -e 's/^[0-9a-f]\{12,20\}/<ADDR>/' \
|
||||
-e 's/## literal pool for: .Java HotSpot(TM) 64-Bit Server VM.*/<COMMENT>/g'
|
||||
-e 's/-20[0-9][0-9]-[0-1][0-9]-[0-3][0-9]-[0-2][0-9]\{5\}/<DATE>/g' \
|
||||
-e 's/), built on .*/), <DATE>/' \
|
||||
"
|
||||
|
||||
fi
|
||||
|
@ -355,3 +355,4 @@ b75afa17aefe480c23c616a6a2497063312f7189 jdk-9+109
|
||||
9666775734fb6028ee86df9972626b3667b6a318 jdk-9+110
|
||||
2bb92dd44275679edb29fdbffc3b7cbebc9a6bf0 jdk-9+111
|
||||
780d0620add32bf545471cf65038c9ac6d9c036d jdk-9+112
|
||||
cc30faa2da498c478e89ab062ff160653ca1b170 jdk-9+113
|
||||
|
@ -29,6 +29,8 @@ module java.corba {
|
||||
requires java.logging;
|
||||
requires java.naming;
|
||||
requires java.transaction;
|
||||
// 8148863
|
||||
requires jdk.unsupported;
|
||||
|
||||
exports javax.activity;
|
||||
exports javax.rmi;
|
||||
|
@ -515,3 +515,4 @@ c5146d4da417f76edfc43097d2e2ced042a65b4e jdk-9+107
|
||||
2f5d1578b24060ea06bd1f340a124db95d1475b2 jdk-9+110
|
||||
c558850fac5750d8ca98a45180121980f57cdd28 jdk-9+111
|
||||
76582e8dc9e6374e4f99ab797c8d364b6e9449b4 jdk-9+112
|
||||
c569f8d89269fb6205b90f727581eb8cc04132f9 jdk-9+113
|
||||
|
@ -32,24 +32,9 @@ suite = {
|
||||
|
||||
"libraries" : {
|
||||
|
||||
"HCFDIS" : {
|
||||
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/hcfdis-3.jar"],
|
||||
"sha1" : "a71247c6ddb90aad4abf7c77e501acc60674ef57",
|
||||
},
|
||||
|
||||
"C1VISUALIZER_DIST" : {
|
||||
"urls" : ["https://java.net/downloads/c1visualizer/c1visualizer_2015-07-22.zip"],
|
||||
"sha1" : "7ead6b2f7ed4643ef4d3343a5562e3d3f39564ac",
|
||||
},
|
||||
|
||||
"JOL_INTERNALS" : {
|
||||
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/truffle/jol/jol-internals.jar"],
|
||||
"sha1" : "508bcd26a4d7c4c44048990c6ea789a3b11a62dc",
|
||||
},
|
||||
|
||||
"BATIK" : {
|
||||
"sha1" : "122b87ca88e41a415cf8b523fd3d03b4325134a3",
|
||||
"urls" : ["https://lafo.ssw.uni-linz.ac.at/pub/graal-external-deps/batik-all-1.7.jar"],
|
||||
"TESTNG" : {
|
||||
"urls" : ["http://central.maven.org/maven2/org/testng/testng/6.9.10/testng-6.9.10.jar"],
|
||||
"sha1" : "6feb3e964aeb7097aff30c372aac3ec0f8d87ede",
|
||||
},
|
||||
|
||||
# Stubs for classes introduced in JDK9 that allow compilation with a JDK8 javac and Eclipse.
|
||||
@ -175,6 +160,18 @@ suite = {
|
||||
"workingSets" : "JVMCI",
|
||||
},
|
||||
|
||||
"jdk.vm.ci.hotspot.test" : {
|
||||
"subDir" : "test/compiler/jvmci",
|
||||
"sourceDirs" : ["src"],
|
||||
"dependencies" : [
|
||||
"mx:TESTNG",
|
||||
"jdk.vm.ci.hotspot",
|
||||
],
|
||||
"checkstyle" : "jdk.vm.ci.services",
|
||||
"javaCompliance" : "1.8",
|
||||
"workingSets" : "API,JVMCI",
|
||||
},
|
||||
|
||||
"jdk.vm.ci.hotspotvmconfig" : {
|
||||
"subDir" : "src/jdk.vm.ci/share/classes",
|
||||
"sourceDirs" : ["src"],
|
||||
|
@ -65,7 +65,7 @@ void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, T
|
||||
Unimplemented();
|
||||
} else {
|
||||
NativeMovConstReg* move = nativeMovConstReg_at(pc);
|
||||
Metadata* reference = record_metadata_reference(constant, CHECK);
|
||||
void* reference = record_metadata_reference(constant, CHECK);
|
||||
move->set_data((intptr_t) reference);
|
||||
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(reference));
|
||||
}
|
||||
|
@ -211,10 +211,6 @@ static int reg2offset_out(VMReg r) {
|
||||
return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
|
||||
}
|
||||
|
||||
template <class T> static const T& min (const T& a, const T& b) {
|
||||
return (a > b) ? b : a;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Read the array of BasicTypes from a signature, and compute where the
|
||||
// arguments should go. Values in the VMRegPair regs array refer to 4-byte
|
||||
|
@ -79,7 +79,7 @@ void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, T
|
||||
#endif
|
||||
} else {
|
||||
NativeMovConstReg* move = nativeMovConstReg_at(pc);
|
||||
Metadata* reference = record_metadata_reference(constant, CHECK);
|
||||
void* reference = record_metadata_reference(constant, CHECK);
|
||||
move->set_data((intptr_t)reference);
|
||||
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(reference));
|
||||
}
|
||||
|
@ -4666,8 +4666,109 @@ void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary
|
||||
bind(Ldone);
|
||||
}
|
||||
|
||||
void MacroAssembler::has_negatives(Register inp, Register size, Register result, Register t2, Register t3, Register t4, Register t5) {
|
||||
|
||||
// test for negative bytes in input string of a given size
|
||||
// result 1 if found, 0 otherwise.
|
||||
|
||||
Label Lcore, Ltail, Lreturn, Lcore_rpt;
|
||||
|
||||
assert_different_registers(inp, size, t2, t3, t4, t5, result);
|
||||
|
||||
Register i = result; // result used as integer index i until very end
|
||||
Register lmask = t2; // t2 is aliased to lmask
|
||||
|
||||
// INITIALIZATION
|
||||
// ===========================================================
|
||||
// initialize highbits mask -> lmask = 0x8080808080808080 (8B/64b)
|
||||
// compute unaligned offset -> i
|
||||
// compute core end index -> t5
|
||||
Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal
|
||||
add(t2, 0x80, t2);
|
||||
sllx(t2, 32, t3);
|
||||
or3(t3, t2, lmask); // 0x8080808080808080 -> lmask
|
||||
sra(size,0,size);
|
||||
andcc(inp, 0x7, i); // unaligned offset -> i
|
||||
br(Assembler::zero, true, Assembler::pn, Lcore); // starts 8B aligned?
|
||||
delayed()->add(size, -8, t5); // (annuled) core end index -> t5
|
||||
|
||||
// ===========================================================
|
||||
|
||||
// UNALIGNED HEAD
|
||||
// ===========================================================
|
||||
// * unaligned head handling: grab aligned 8B containing unaligned inp(ut)
|
||||
// * obliterate (ignore) bytes outside string by shifting off reg ends
|
||||
// * compare with bitmask, short circuit return true if one or more high
|
||||
// bits set.
|
||||
cmp(size, 0);
|
||||
br(Assembler::zero, true, Assembler::pn, Lreturn); // short-circuit?
|
||||
delayed()->mov(0,result); // annuled so i not clobbered for following
|
||||
neg(i, t4);
|
||||
add(i, size, t5);
|
||||
ldx(inp, t4, t3); // raw aligned 8B containing unaligned head -> t3
|
||||
mov(8, t4);
|
||||
sub(t4, t5, t4);
|
||||
sra(t4, 31, t5);
|
||||
andn(t4, t5, t5);
|
||||
add(i, t5, t4);
|
||||
sll(t5, 3, t5);
|
||||
sll(t4, 3, t4); // # bits to shift right, left -> t5,t4
|
||||
srlx(t3, t5, t3);
|
||||
sllx(t3, t4, t3); // bytes outside string in 8B header obliterated -> t3
|
||||
andcc(lmask, t3, G0);
|
||||
brx(Assembler::notZero, true, Assembler::pn, Lreturn); // short circuit?
|
||||
delayed()->mov(1,result); // annuled so i not clobbered for following
|
||||
add(size, -8, t5); // core end index -> t5
|
||||
mov(8, t4);
|
||||
sub(t4, i, i); // # bytes examined in unalgn head (<8) -> i
|
||||
// ===========================================================
|
||||
|
||||
// ALIGNED CORE
|
||||
// ===========================================================
|
||||
// * iterate index i over aligned 8B sections of core, comparing with
|
||||
// bitmask, short circuit return true if one or more high bits set
|
||||
// t5 contains core end index/loop limit which is the index
|
||||
// of the MSB of last (unaligned) 8B fully contained in the string.
|
||||
// inp contains address of first byte in string/array
|
||||
// lmask contains 8B high bit mask for comparison
|
||||
// i contains next index to be processed (adr. inp+i is on 8B boundary)
|
||||
bind(Lcore);
|
||||
cmp_and_br_short(i, t5, Assembler::greater, Assembler::pn, Ltail);
|
||||
bind(Lcore_rpt);
|
||||
ldx(inp, i, t3);
|
||||
andcc(t3, lmask, G0);
|
||||
brx(Assembler::notZero, true, Assembler::pn, Lreturn);
|
||||
delayed()->mov(1, result); // annuled so i not clobbered for following
|
||||
add(i, 8, i);
|
||||
cmp_and_br_short(i, t5, Assembler::lessEqual, Assembler::pn, Lcore_rpt);
|
||||
// ===========================================================
|
||||
|
||||
// ALIGNED TAIL (<8B)
|
||||
// ===========================================================
|
||||
// handle aligned tail of 7B or less as complete 8B, obliterating end of
|
||||
// string bytes by shifting them off end, compare what's left with bitmask
|
||||
// inp contains address of first byte in string/array
|
||||
// lmask contains 8B high bit mask for comparison
|
||||
// i contains next index to be processed (adr. inp+i is on 8B boundary)
|
||||
bind(Ltail);
|
||||
subcc(size, i, t4); // # of remaining bytes in string -> t4
|
||||
// return 0 if no more remaining bytes
|
||||
br(Assembler::lessEqual, true, Assembler::pn, Lreturn);
|
||||
delayed()->mov(0, result); // annuled so i not clobbered for following
|
||||
ldx(inp, i, t3); // load final 8B (aligned) containing tail -> t3
|
||||
mov(8, t5);
|
||||
sub(t5, t4, t4);
|
||||
mov(0, result); // ** i clobbered at this point
|
||||
sll(t4, 3, t4); // bits beyond end of string -> t4
|
||||
srlx(t3, t4, t3); // bytes beyond end now obliterated -> t3
|
||||
andcc(lmask, t3, G0);
|
||||
movcc(Assembler::notZero, false, xcc, 1, result);
|
||||
bind(Lreturn);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
// Use BIS for zeroing (count is in bytes).
|
||||
void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
|
||||
assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1392,6 +1392,11 @@ public:
|
||||
|
||||
void array_equals(bool is_array_equ, Register ary1, Register ary2,
|
||||
Register limit, Register tmp, Register result, bool is_byte);
|
||||
// test for negative bytes in input string of a given size, result 0 if none
|
||||
void has_negatives(Register inp, Register size, Register result,
|
||||
Register t2, Register t3, Register t4,
|
||||
Register t5);
|
||||
|
||||
#endif
|
||||
|
||||
// Use BIS for zeroing
|
||||
|
@ -10168,6 +10168,22 @@ instruct array_equalsC(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI resul
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
instruct has_negatives(o0RegP pAryR, g3RegI iSizeR, notemp_iRegI resultR,
|
||||
iRegL tmp1L, iRegL tmp2L, iRegL tmp3L, iRegL tmp4L,
|
||||
flagsReg ccr)
|
||||
%{
|
||||
match(Set resultR (HasNegatives pAryR iSizeR));
|
||||
effect(TEMP resultR, TEMP tmp1L, TEMP tmp2L, TEMP tmp3L, TEMP tmp4L, USE pAryR, USE iSizeR, KILL ccr);
|
||||
format %{ "has negatives byte[] $pAryR,$iSizeR -> $resultR // KILL $tmp1L,$tmp2L,$tmp3L,$tmp4L" %}
|
||||
ins_encode %{
|
||||
__ has_negatives($pAryR$$Register, $iSizeR$$Register,
|
||||
$resultR$$Register,
|
||||
$tmp1L$$Register, $tmp2L$$Register,
|
||||
$tmp3L$$Register, $tmp4L$$Register);
|
||||
%}
|
||||
ins_pipe(long_memory_op);
|
||||
%}
|
||||
|
||||
// char[] to byte[] compression
|
||||
instruct string_compress(o0RegP src, o1RegP dst, g3RegI len, notemp_iRegI result, iRegL tmp, flagsReg ccr) %{
|
||||
predicate(UseVIS < 3);
|
||||
|
@ -394,7 +394,7 @@ bool Assembler::emit_compressed_disp_byte(int &disp) {
|
||||
int mod_idx = 0;
|
||||
// We will test if the displacement fits the compressed format and if so
|
||||
// apply the compression to the displacment iff the result is8bit.
|
||||
if (VM_Version::supports_evex() && (_attributes != NULL) && _attributes->is_evex_instruction()) {
|
||||
if (VM_Version::supports_evex() && _attributes && _attributes->is_evex_instruction()) {
|
||||
int evex_encoding = _attributes->get_evex_encoding();
|
||||
int tuple_type = _attributes->get_tuple_type();
|
||||
switch (tuple_type) {
|
||||
@ -2154,7 +2154,7 @@ void Assembler::movb(Register dst, Address src) {
|
||||
|
||||
void Assembler::movddup(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse3(), ""));
|
||||
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_128bit;
|
||||
int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
|
||||
InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x12);
|
||||
@ -2423,7 +2423,8 @@ void Assembler::vmovdqu(Address dst, XMMRegister src) {
|
||||
void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x6F);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
@ -2432,8 +2433,9 @@ void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
|
||||
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x6F);
|
||||
emit_operand(dst, src);
|
||||
}
|
||||
@ -2443,8 +2445,9 @@ void Assembler::evmovdqub(Address dst, XMMRegister src, int vector_len) {
|
||||
assert(src != xnoreg, "sanity");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
|
||||
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x7F);
|
||||
emit_operand(src, dst);
|
||||
}
|
||||
@ -2452,7 +2455,8 @@ void Assembler::evmovdqub(Address dst, XMMRegister src, int vector_len) {
|
||||
void Assembler::evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x6F);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
@ -2462,7 +2466,8 @@ void Assembler::evmovdquw(XMMRegister dst, Address src, int vector_len) {
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
|
||||
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
|
||||
vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x6F);
|
||||
emit_operand(dst, src);
|
||||
}
|
||||
@ -2473,13 +2478,16 @@ void Assembler::evmovdquw(Address dst, XMMRegister src, int vector_len) {
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
|
||||
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
|
||||
int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
|
||||
vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x7F);
|
||||
emit_operand(src, dst);
|
||||
}
|
||||
|
||||
void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x6F);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -2490,6 +2498,7 @@ void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) {
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ true);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
|
||||
attributes.set_is_evex_instruction();
|
||||
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x6F);
|
||||
emit_operand(dst, src);
|
||||
@ -2500,6 +2509,7 @@ void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) {
|
||||
assert(src != xnoreg, "sanity");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
|
||||
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x7F);
|
||||
@ -2509,6 +2519,7 @@ void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) {
|
||||
void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x6F);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
@ -2518,6 +2529,7 @@ void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
|
||||
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x6F);
|
||||
@ -2529,6 +2541,7 @@ void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) {
|
||||
assert(src != xnoreg, "sanity");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
|
||||
vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x7F);
|
||||
@ -3196,6 +3209,7 @@ void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int
|
||||
void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
|
||||
assert(VM_Version::supports_avx512bw(), "");
|
||||
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x74);
|
||||
@ -3206,6 +3220,7 @@ void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vect
|
||||
assert(VM_Version::supports_avx512bw(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
int dst_enc = kdst->encoding();
|
||||
@ -3237,6 +3252,7 @@ void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int
|
||||
void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
|
||||
assert(VM_Version::supports_avx512bw(), "");
|
||||
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x75);
|
||||
@ -3248,6 +3264,7 @@ void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vect
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
|
||||
attributes.set_is_evex_instruction();
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
int dst_enc = kdst->encoding();
|
||||
vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
|
||||
@ -3278,6 +3295,7 @@ void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int
|
||||
void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x76);
|
||||
@ -3289,6 +3307,7 @@ void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vect
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
|
||||
attributes.set_is_evex_instruction();
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
int dst_enc = kdst->encoding();
|
||||
vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
|
||||
@ -3319,6 +3338,7 @@ void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int
|
||||
void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
int encode = vex_prefix_and_encode(kdst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8(0x29);
|
||||
@ -3330,6 +3350,7 @@ void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vect
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
attributes.set_is_evex_instruction();
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
int dst_enc = kdst->encoding();
|
||||
@ -3634,7 +3655,7 @@ void Assembler::pshufb(XMMRegister dst, Address src) {
|
||||
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
|
||||
assert(isByte(mode), "invalid value");
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_128bit;
|
||||
int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
|
||||
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
|
||||
emit_int8(0x70);
|
||||
@ -5702,7 +5723,7 @@ void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_
|
||||
void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
assert(imm8 <= 0x01, "imm8: %u", imm8);
|
||||
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
|
||||
int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit;
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
|
||||
@ -5782,7 +5803,7 @@ void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
assert(dst != xnoreg, "sanity");
|
||||
assert(imm8 <= 0x01, "imm8: %u", imm8);
|
||||
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
|
||||
int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit;
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
@ -5799,7 +5820,7 @@ void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8
|
||||
void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
assert(imm8 <= 0x01, "imm8: %u", imm8);
|
||||
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
|
||||
int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit;
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
|
||||
emit_int8(0x19);
|
||||
@ -5813,7 +5834,7 @@ void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
assert(src != xnoreg, "sanity");
|
||||
assert(imm8 <= 0x01, "imm8: %u", imm8);
|
||||
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
|
||||
int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit;
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
|
||||
@ -5828,7 +5849,7 @@ void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) {
|
||||
void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
|
||||
assert(VM_Version::supports_avx2(), "");
|
||||
assert(imm8 <= 0x01, "imm8: %u", imm8);
|
||||
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
|
||||
int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit;
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
|
||||
@ -5856,7 +5877,7 @@ void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8
|
||||
assert(VM_Version::supports_avx2(), "");
|
||||
assert(dst != xnoreg, "sanity");
|
||||
assert(imm8 <= 0x01, "imm8: %u", imm8);
|
||||
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
|
||||
int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit;
|
||||
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
@ -5873,7 +5894,7 @@ void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8
|
||||
void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
|
||||
assert(VM_Version::supports_avx(), "");
|
||||
assert(imm8 <= 0x01, "imm8: %u", imm8);
|
||||
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
|
||||
int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit;
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
|
||||
emit_int8(0x39);
|
||||
@ -5887,7 +5908,7 @@ void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
|
||||
assert(VM_Version::supports_avx2(), "");
|
||||
assert(src != xnoreg, "sanity");
|
||||
assert(imm8 <= 0x01, "imm8: %u", imm8);
|
||||
int vector_len = VM_Version::supports_evex() ? AVX_512bit : AVX_256bit;
|
||||
int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_256bit;
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
|
||||
@ -6147,7 +6168,11 @@ void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8(0x7A);
|
||||
if (attributes.is_evex_instruction()) {
|
||||
emit_int8(0x7A);
|
||||
} else {
|
||||
emit_int8(0x78);
|
||||
}
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
@ -6156,7 +6181,11 @@ void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8(0x7B);
|
||||
if (attributes.is_evex_instruction()) {
|
||||
emit_int8(0x7B);
|
||||
} else {
|
||||
emit_int8(0x79);
|
||||
}
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
@ -6165,7 +6194,11 @@ void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8(0x7C);
|
||||
if (attributes.is_evex_instruction()) {
|
||||
emit_int8(0x7C);
|
||||
} else {
|
||||
emit_int8(0x58);
|
||||
}
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
@ -6174,7 +6207,11 @@ void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) {
|
||||
assert(VM_Version::supports_evex(), "");
|
||||
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
|
||||
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
|
||||
emit_int8(0x7C);
|
||||
if (attributes.is_evex_instruction()) {
|
||||
emit_int8(0x7C);
|
||||
} else {
|
||||
emit_int8(0x59);
|
||||
}
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
@ -6793,7 +6830,7 @@ void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix
|
||||
attributes->set_current_assembler(this);
|
||||
|
||||
// if vector length is turned off, revert to AVX for vectors smaller than 512-bit
|
||||
if ((UseAVX > 2) && _legacy_mode_vl && attributes->uses_vl()) {
|
||||
if (UseAVX > 2 && _legacy_mode_vl && attributes->uses_vl()) {
|
||||
switch (attributes->get_vector_len()) {
|
||||
case AVX_128bit:
|
||||
case AVX_256bit:
|
||||
@ -6802,7 +6839,27 @@ void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix
|
||||
}
|
||||
}
|
||||
|
||||
if ((UseAVX > 2) && !attributes->is_legacy_mode())
|
||||
// For pure EVEX check and see if this instruction
|
||||
// is allowed in legacy mode and has resources which will
|
||||
// fit in it. Pure EVEX instructions will use set_is_evex_instruction in their definition,
|
||||
// else that field is set when we encode to EVEX
|
||||
if (UseAVX > 2 && !attributes->is_legacy_mode() &&
|
||||
!_is_managed && !attributes->is_evex_instruction()) {
|
||||
if (!_legacy_mode_vl && attributes->get_vector_len() != AVX_512bit) {
|
||||
bool check_register_bank = NOT_IA32(true) IA32_ONLY(false);
|
||||
if (check_register_bank) {
|
||||
// check nds_enc and xreg_enc for upper bank usage
|
||||
if (nds_enc < 16 && xreg_enc < 16) {
|
||||
attributes->set_is_legacy_mode();
|
||||
}
|
||||
} else {
|
||||
attributes->set_is_legacy_mode();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_is_managed = false;
|
||||
if (UseAVX > 2 && !attributes->is_legacy_mode())
|
||||
{
|
||||
bool evex_r = (xreg_enc >= 16);
|
||||
bool evex_v = (nds_enc >= 16);
|
||||
@ -6819,15 +6876,20 @@ int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexS
|
||||
bool vex_x = false;
|
||||
set_attributes(attributes);
|
||||
attributes->set_current_assembler(this);
|
||||
bool check_register_bank = NOT_IA32(true) IA32_ONLY(false);
|
||||
|
||||
// if vector length is turned off, revert to AVX for vectors smaller than 512-bit
|
||||
if ((UseAVX > 2) && _legacy_mode_vl && attributes->uses_vl()) {
|
||||
if (UseAVX > 2 && _legacy_mode_vl && attributes->uses_vl()) {
|
||||
switch (attributes->get_vector_len()) {
|
||||
case AVX_128bit:
|
||||
case AVX_256bit:
|
||||
if ((dst_enc >= 16) | (nds_enc >= 16) | (src_enc >= 16)) {
|
||||
// up propagate arithmetic instructions to meet RA requirements
|
||||
attributes->set_vector_len(AVX_512bit);
|
||||
if (check_register_bank) {
|
||||
if (dst_enc >= 16 || nds_enc >= 16 || src_enc >= 16) {
|
||||
// up propagate arithmetic instructions to meet RA requirements
|
||||
attributes->set_vector_len(AVX_512bit);
|
||||
} else {
|
||||
attributes->set_is_legacy_mode();
|
||||
}
|
||||
} else {
|
||||
attributes->set_is_legacy_mode();
|
||||
}
|
||||
@ -6835,7 +6897,26 @@ int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexS
|
||||
}
|
||||
}
|
||||
|
||||
if ((UseAVX > 2) && !attributes->is_legacy_mode())
|
||||
// For pure EVEX check and see if this instruction
|
||||
// is allowed in legacy mode and has resources which will
|
||||
// fit in it. Pure EVEX instructions will use set_is_evex_instruction in their definition,
|
||||
// else that field is set when we encode to EVEX
|
||||
if (UseAVX > 2 && !attributes->is_legacy_mode() &&
|
||||
!_is_managed && !attributes->is_evex_instruction()) {
|
||||
if (!_legacy_mode_vl && attributes->get_vector_len() != AVX_512bit) {
|
||||
if (check_register_bank) {
|
||||
// check dst_enc, nds_enc and src_enc for upper bank usage
|
||||
if (dst_enc < 16 && nds_enc < 16 && src_enc < 16) {
|
||||
attributes->set_is_legacy_mode();
|
||||
}
|
||||
} else {
|
||||
attributes->set_is_legacy_mode();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_is_managed = false;
|
||||
if (UseAVX > 2 && !attributes->is_legacy_mode())
|
||||
{
|
||||
bool evex_r = (dst_enc >= 16);
|
||||
bool evex_v = (nds_enc >= 16);
|
||||
|
@ -605,6 +605,7 @@ private:
|
||||
bool _legacy_mode_dq;
|
||||
bool _legacy_mode_vl;
|
||||
bool _legacy_mode_vlbw;
|
||||
bool _is_managed;
|
||||
|
||||
class InstructionAttr *_attributes;
|
||||
|
||||
@ -811,12 +812,17 @@ private:
|
||||
_legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
|
||||
_legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
|
||||
_legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
|
||||
_is_managed = false;
|
||||
_attributes = NULL;
|
||||
}
|
||||
|
||||
void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
|
||||
void clear_attributes(void) { _attributes = NULL; }
|
||||
|
||||
void set_managed(void) { _is_managed = true; }
|
||||
void clear_managed(void) { _is_managed = false; }
|
||||
bool is_managed(void) { return _is_managed; }
|
||||
|
||||
void lea(Register dst, Address src);
|
||||
|
||||
void mov(Register dst, Register src);
|
||||
|
@ -1646,31 +1646,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ jmp(done);
|
||||
|
||||
__ bind(runtime);
|
||||
__ push(rcx);
|
||||
#ifdef _LP64
|
||||
__ push(r8);
|
||||
__ push(r9);
|
||||
__ push(r10);
|
||||
__ push(r11);
|
||||
# ifndef _WIN64
|
||||
__ push(rdi);
|
||||
__ push(rsi);
|
||||
# endif
|
||||
#endif
|
||||
|
||||
save_live_registers(sasm, 3);
|
||||
|
||||
// load the pre-value
|
||||
f.load_argument(0, rcx);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
|
||||
#ifdef _LP64
|
||||
# ifndef _WIN64
|
||||
__ pop(rsi);
|
||||
__ pop(rdi);
|
||||
# endif
|
||||
__ pop(r11);
|
||||
__ pop(r10);
|
||||
__ pop(r9);
|
||||
__ pop(r8);
|
||||
#endif
|
||||
__ pop(rcx);
|
||||
|
||||
restore_live_registers(sasm);
|
||||
|
||||
__ bind(done);
|
||||
|
||||
__ pop(rdx);
|
||||
@ -1744,27 +1728,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
__ jmp(enqueued);
|
||||
|
||||
__ bind(runtime);
|
||||
#ifdef _LP64
|
||||
__ push(r8);
|
||||
__ push(r9);
|
||||
__ push(r10);
|
||||
__ push(r11);
|
||||
# ifndef _WIN64
|
||||
__ push(rdi);
|
||||
__ push(rsi);
|
||||
# endif
|
||||
#endif
|
||||
|
||||
save_live_registers(sasm, 3);
|
||||
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
|
||||
#ifdef _LP64
|
||||
# ifndef _WIN64
|
||||
__ pop(rsi);
|
||||
__ pop(rdi);
|
||||
# endif
|
||||
__ pop(r11);
|
||||
__ pop(r10);
|
||||
__ pop(r9);
|
||||
__ pop(r8);
|
||||
#endif
|
||||
|
||||
restore_live_registers(sasm);
|
||||
|
||||
__ bind(enqueued);
|
||||
__ pop(rdx);
|
||||
|
||||
|
@ -96,7 +96,7 @@ void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, T
|
||||
#endif
|
||||
} else {
|
||||
address operand = Assembler::locate_operand(pc, Assembler::imm_operand);
|
||||
*((Metadata**) operand) = record_metadata_reference(constant, CHECK);
|
||||
*((void**) operand) = record_metadata_reference(constant, CHECK);
|
||||
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
|
||||
}
|
||||
}
|
||||
|
@ -2600,15 +2600,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// rax - input length
|
||||
//
|
||||
|
||||
address generate_cipherBlockChaining_decryptAESCrypt() {
|
||||
address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
|
||||
address start = __ pc();
|
||||
|
||||
Label L_exit, L_key_192_256, L_key_256;
|
||||
Label L_singleBlock_loopTop_128;
|
||||
Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256;
|
||||
const Register from = rsi; // source array address
|
||||
const Register to = rdx; // destination array address
|
||||
const Register key = rcx; // key array address
|
||||
@ -2617,14 +2614,24 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register len_reg = rbx; // src len (must be multiple of blocksize 16)
|
||||
const Register pos = rax;
|
||||
|
||||
// xmm register assignments for the loops below
|
||||
const XMMRegister xmm_result = xmm0;
|
||||
const XMMRegister xmm_temp = xmm1;
|
||||
// first 6 keys preloaded into xmm2-xmm7
|
||||
const int XMM_REG_NUM_KEY_FIRST = 2;
|
||||
const int XMM_REG_NUM_KEY_LAST = 7;
|
||||
const int FIRST_NON_REG_KEY_offset = 0x70;
|
||||
const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
|
||||
const int PARALLEL_FACTOR = 4;
|
||||
const int ROUNDS[3] = { 10, 12, 14 }; //aes rounds for key128, key192, key256
|
||||
|
||||
Label L_exit;
|
||||
Label L_singleBlock_loopTop[3]; //128, 192, 256
|
||||
Label L_multiBlock_loopTop[3]; //128, 192, 256
|
||||
|
||||
const XMMRegister xmm_prev_block_cipher = xmm0; // holds cipher of previous block
|
||||
const XMMRegister xmm_key_shuf_mask = xmm1;
|
||||
|
||||
const XMMRegister xmm_key_tmp0 = xmm2;
|
||||
const XMMRegister xmm_key_tmp1 = xmm3;
|
||||
|
||||
// registers holding the six results in the parallelized loop
|
||||
const XMMRegister xmm_result0 = xmm4;
|
||||
const XMMRegister xmm_result1 = xmm5;
|
||||
const XMMRegister xmm_result2 = xmm6;
|
||||
const XMMRegister xmm_result3 = xmm7;
|
||||
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
handleSOERegisters(true /*saving*/);
|
||||
@ -2643,126 +2650,123 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Address key_param (rbp, 8+8);
|
||||
const Address rvec_param (rbp, 8+12);
|
||||
const Address len_param (rbp, 8+16);
|
||||
|
||||
__ movptr(from , from_param);
|
||||
__ movptr(to , to_param);
|
||||
__ movptr(key , key_param);
|
||||
__ movptr(rvec , rvec_param);
|
||||
__ movptr(len_reg , len_param);
|
||||
|
||||
// the java expanded key ordering is rotated one position from what we want
|
||||
// so we start from 0x10 here and hit 0x00 last
|
||||
const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front
|
||||
__ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
|
||||
// load up xmm regs 2 thru 6 with first 5 keys
|
||||
for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
|
||||
load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
|
||||
offset += 0x10;
|
||||
}
|
||||
__ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec
|
||||
|
||||
// inside here, use the rvec register to point to previous block cipher
|
||||
// with which we xor at the end of each newly decrypted block
|
||||
const Register prev_block_cipher_ptr = rvec;
|
||||
__ xorptr(pos, pos);
|
||||
|
||||
// now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
|
||||
__ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
|
||||
__ cmpl(rax, 44);
|
||||
__ jcc(Assembler::notEqual, L_key_192_256);
|
||||
// rvec is reused
|
||||
__ movl(rvec, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
|
||||
__ cmpl(rvec, 52);
|
||||
__ jcc(Assembler::equal, L_multiBlock_loopTop[1]);
|
||||
__ cmpl(rvec, 60);
|
||||
__ jcc(Assembler::equal, L_multiBlock_loopTop[2]);
|
||||
|
||||
#define DoFour(opc, src_reg) \
|
||||
__ opc(xmm_result0, src_reg); \
|
||||
__ opc(xmm_result1, src_reg); \
|
||||
__ opc(xmm_result2, src_reg); \
|
||||
__ opc(xmm_result3, src_reg); \
|
||||
|
||||
// 128-bit code follows here, parallelized
|
||||
__ movl(pos, 0);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_singleBlock_loopTop_128);
|
||||
__ cmpptr(len_reg, 0); // any blocks left??
|
||||
__ jcc(Assembler::equal, L_exit);
|
||||
__ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
|
||||
__ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
|
||||
for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
|
||||
__ aesdec(xmm_result, as_XMMRegister(rnum));
|
||||
}
|
||||
for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xa0; key_offset += 0x10) { // 128-bit runs up to key offset a0
|
||||
aes_dec_key(xmm_result, xmm_temp, key, key_offset);
|
||||
}
|
||||
load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0
|
||||
__ aesdeclast(xmm_result, xmm_temp);
|
||||
__ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
|
||||
__ pxor (xmm_result, xmm_temp); // xor with the current r vector
|
||||
__ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
|
||||
// no need to store r to memory until we exit
|
||||
__ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr
|
||||
__ addptr(pos, AESBlockSize);
|
||||
__ subptr(len_reg, AESBlockSize);
|
||||
__ jmp(L_singleBlock_loopTop_128);
|
||||
for (int k = 0; k < 3; ++k) {
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_multiBlock_loopTop[k]);
|
||||
__ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left
|
||||
__ jcc(Assembler::less, L_singleBlock_loopTop[k]);
|
||||
|
||||
__ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers
|
||||
__ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize));
|
||||
__ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize));
|
||||
__ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize));
|
||||
|
||||
// the java expanded key ordering is rotated one position from what we want
|
||||
// so we start from 0x10 here and hit 0x00 last
|
||||
load_key(xmm_key_tmp0, key, 0x10, xmm_key_shuf_mask);
|
||||
DoFour(pxor, xmm_key_tmp0); //xor with first key
|
||||
// do the aes dec rounds
|
||||
for (int rnum = 1; rnum <= ROUNDS[k];) {
|
||||
//load two keys at a time
|
||||
//k1->0x20, ..., k9->0xa0, k10->0x00
|
||||
load_key(xmm_key_tmp1, key, (rnum + 1) * 0x10, xmm_key_shuf_mask);
|
||||
load_key(xmm_key_tmp0, key, ((rnum + 2) % (ROUNDS[k] + 1)) * 0x10, xmm_key_shuf_mask); // hit 0x00 last!
|
||||
DoFour(aesdec, xmm_key_tmp1);
|
||||
rnum++;
|
||||
if (rnum != ROUNDS[k]) {
|
||||
DoFour(aesdec, xmm_key_tmp0);
|
||||
}
|
||||
else {
|
||||
DoFour(aesdeclast, xmm_key_tmp0);
|
||||
}
|
||||
rnum++;
|
||||
}
|
||||
|
||||
// for each result, xor with the r vector of previous cipher block
|
||||
__ pxor(xmm_result0, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize));
|
||||
__ pxor(xmm_result1, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize));
|
||||
__ pxor(xmm_result2, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize));
|
||||
__ pxor(xmm_result3, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks
|
||||
|
||||
// store 4 results into the next 64 bytes of output
|
||||
__ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0);
|
||||
__ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1);
|
||||
__ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2);
|
||||
__ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3);
|
||||
|
||||
__ addptr(pos, 4 * AESBlockSize);
|
||||
__ subptr(len_reg, 4 * AESBlockSize);
|
||||
__ jmp(L_multiBlock_loopTop[k]);
|
||||
|
||||
//singleBlock starts here
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_singleBlock_loopTop[k]);
|
||||
__ cmpptr(len_reg, 0); // any blocks left?
|
||||
__ jcc(Assembler::equal, L_exit);
|
||||
__ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
|
||||
__ movdqa(xmm_result1, xmm_result0);
|
||||
|
||||
load_key(xmm_key_tmp0, key, 0x10, xmm_key_shuf_mask);
|
||||
__ pxor(xmm_result0, xmm_key_tmp0);
|
||||
// do the aes dec rounds
|
||||
for (int rnum = 1; rnum < ROUNDS[k]; rnum++) {
|
||||
// the java expanded key ordering is rotated one position from what we want
|
||||
load_key(xmm_key_tmp0, key, (rnum + 1) * 0x10, xmm_key_shuf_mask);
|
||||
__ aesdec(xmm_result0, xmm_key_tmp0);
|
||||
}
|
||||
load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask);
|
||||
__ aesdeclast(xmm_result0, xmm_key_tmp0);
|
||||
__ pxor(xmm_result0, xmm_prev_block_cipher); // xor with the current r vector
|
||||
__ movdqu(Address(to, pos, Address::times_1, 0), xmm_result0); // store into the next 16 bytes of output
|
||||
// no need to store r to memory until we exit
|
||||
__ movdqa(xmm_prev_block_cipher, xmm_result1); // set up next r vector with cipher input from this block
|
||||
|
||||
__ addptr(pos, AESBlockSize);
|
||||
__ subptr(len_reg, AESBlockSize);
|
||||
__ jmp(L_singleBlock_loopTop[k]);
|
||||
}//for 128/192/256
|
||||
|
||||
__ BIND(L_exit);
|
||||
__ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
|
||||
__ movptr(rvec , rvec_param); // restore this since used in loop
|
||||
__ movdqu(Address(rvec, 0), xmm_temp); // final value of r stored in rvec of CipherBlockChaining object
|
||||
__ movptr(rvec, rvec_param); // restore this since reused earlier
|
||||
__ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object
|
||||
handleSOERegisters(false /*restoring*/);
|
||||
__ movptr(rax, len_param); // return length
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ movptr(rax, len_param); // return length
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ ret(0);
|
||||
|
||||
|
||||
__ BIND(L_key_192_256);
|
||||
// here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
|
||||
__ cmpl(rax, 52);
|
||||
__ jcc(Assembler::notEqual, L_key_256);
|
||||
|
||||
// 192-bit code follows here (could be optimized to use parallelism)
|
||||
__ movl(pos, 0);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_singleBlock_loopTop_192);
|
||||
__ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
|
||||
__ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
|
||||
for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
|
||||
__ aesdec(xmm_result, as_XMMRegister(rnum));
|
||||
}
|
||||
for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xc0; key_offset += 0x10) { // 192-bit runs up to key offset c0
|
||||
aes_dec_key(xmm_result, xmm_temp, key, key_offset);
|
||||
}
|
||||
load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0
|
||||
__ aesdeclast(xmm_result, xmm_temp);
|
||||
__ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
|
||||
__ pxor (xmm_result, xmm_temp); // xor with the current r vector
|
||||
__ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
|
||||
// no need to store r to memory until we exit
|
||||
__ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr
|
||||
__ addptr(pos, AESBlockSize);
|
||||
__ subptr(len_reg, AESBlockSize);
|
||||
__ jcc(Assembler::notEqual,L_singleBlock_loopTop_192);
|
||||
__ jmp(L_exit);
|
||||
|
||||
__ BIND(L_key_256);
|
||||
// 256-bit code follows here (could be optimized to use parallelism)
|
||||
__ movl(pos, 0);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_singleBlock_loopTop_256);
|
||||
__ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
|
||||
__ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
|
||||
for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
|
||||
__ aesdec(xmm_result, as_XMMRegister(rnum));
|
||||
}
|
||||
for (int key_offset = FIRST_NON_REG_KEY_offset; key_offset <= 0xe0; key_offset += 0x10) { // 256-bit runs up to key offset e0
|
||||
aes_dec_key(xmm_result, xmm_temp, key, key_offset);
|
||||
}
|
||||
load_key(xmm_temp, key, 0x00); // final key is stored in java expanded array at offset 0
|
||||
__ aesdeclast(xmm_result, xmm_temp);
|
||||
__ movdqu(xmm_temp, Address(prev_block_cipher_ptr, 0x00));
|
||||
__ pxor (xmm_result, xmm_temp); // xor with the current r vector
|
||||
__ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
|
||||
// no need to store r to memory until we exit
|
||||
__ lea(prev_block_cipher_ptr, Address(from, pos, Address::times_1, 0)); // set up new ptr
|
||||
__ addptr(pos, AESBlockSize);
|
||||
__ subptr(len_reg, AESBlockSize);
|
||||
__ jcc(Assembler::notEqual,L_singleBlock_loopTop_256);
|
||||
__ jmp(L_exit);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
|
||||
// CTR AES crypt.
|
||||
// In 32-bit stub, parallelize 4 blocks at a time
|
||||
// Arguments:
|
||||
@ -3894,7 +3898,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
|
||||
StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
|
||||
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
|
||||
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
|
||||
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
|
||||
}
|
||||
|
||||
if (UseAESCTRIntrinsics) {
|
||||
|
@ -3469,16 +3469,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Output:
|
||||
// rax - input length
|
||||
//
|
||||
|
||||
address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
|
||||
address start = __ pc();
|
||||
|
||||
Label L_exit, L_key_192_256, L_key_256;
|
||||
Label L_singleBlock_loopTop_128, L_multiBlock_loopTop_128;
|
||||
Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256;
|
||||
const Register from = c_rarg0; // source array address
|
||||
const Register to = c_rarg1; // destination array address
|
||||
const Register key = c_rarg2; // key array address
|
||||
@ -3492,7 +3488,17 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#endif
|
||||
const Register pos = rax;
|
||||
|
||||
// keys 0-10 preloaded into xmm2-xmm12
|
||||
const int PARALLEL_FACTOR = 4;
|
||||
const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256
|
||||
|
||||
Label L_exit;
|
||||
Label L_singleBlock_loopTopHead[3]; // 128, 192, 256
|
||||
Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256
|
||||
Label L_singleBlock_loopTop[3]; // 128, 192, 256
|
||||
Label L_multiBlock_loopTopHead[3]; // 128, 192, 256
|
||||
Label L_multiBlock_loopTop[3]; // 128, 192, 256
|
||||
|
||||
// keys 0-10 preloaded into xmm5-xmm15
|
||||
const int XMM_REG_NUM_KEY_FIRST = 5;
|
||||
const int XMM_REG_NUM_KEY_LAST = 15;
|
||||
const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
|
||||
@ -3519,7 +3525,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#else
|
||||
__ push(len_reg); // Save
|
||||
#endif
|
||||
|
||||
__ push(rbx);
|
||||
// the java expanded key ordering is rotated one position from what we want
|
||||
// so we start from 0x10 here and hit 0x00 last
|
||||
const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front
|
||||
@ -3541,85 +3547,173 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
__ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec
|
||||
|
||||
__ xorptr(pos, pos);
|
||||
|
||||
// now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
|
||||
__ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
|
||||
__ cmpl(rax, 44);
|
||||
__ jcc(Assembler::notEqual, L_key_192_256);
|
||||
__ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
|
||||
__ cmpl(rbx, 52);
|
||||
__ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]);
|
||||
__ cmpl(rbx, 60);
|
||||
__ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]);
|
||||
|
||||
#define DoFour(opc, src_reg) \
|
||||
__ opc(xmm_result0, src_reg); \
|
||||
__ opc(xmm_result1, src_reg); \
|
||||
__ opc(xmm_result2, src_reg); \
|
||||
__ opc(xmm_result3, src_reg); \
|
||||
|
||||
// 128-bit code follows here, parallelized
|
||||
__ movptr(pos, 0);
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_multiBlock_loopTop_128);
|
||||
__ cmpptr(len_reg, 4*AESBlockSize); // see if at least 4 blocks left
|
||||
__ jcc(Assembler::less, L_singleBlock_loopTop_128);
|
||||
for (int k = 0; k < 3; ++k) {
|
||||
__ BIND(L_multiBlock_loopTopHead[k]);
|
||||
if (k != 0) {
|
||||
__ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left
|
||||
__ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]);
|
||||
}
|
||||
if (k == 1) {
|
||||
__ subptr(rsp, 6 * wordSize);
|
||||
__ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15
|
||||
load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0
|
||||
__ movdqu(Address(rsp, 2 * wordSize), xmm15);
|
||||
load_key(xmm1, key, 0xc0); // 0xc0;
|
||||
__ movdqu(Address(rsp, 4 * wordSize), xmm1);
|
||||
} else if (k == 2) {
|
||||
__ subptr(rsp, 10 * wordSize);
|
||||
__ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15
|
||||
load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0
|
||||
__ movdqu(Address(rsp, 6 * wordSize), xmm15);
|
||||
load_key(xmm1, key, 0xe0); // 0xe0;
|
||||
__ movdqu(Address(rsp, 8 * wordSize), xmm1);
|
||||
load_key(xmm15, key, 0xb0); // 0xb0;
|
||||
__ movdqu(Address(rsp, 2 * wordSize), xmm15);
|
||||
load_key(xmm1, key, 0xc0); // 0xc0;
|
||||
__ movdqu(Address(rsp, 4 * wordSize), xmm1);
|
||||
}
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_multiBlock_loopTop[k]);
|
||||
__ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left
|
||||
__ jcc(Assembler::less, L_singleBlock_loopTopHead[k]);
|
||||
|
||||
__ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0*AESBlockSize)); // get next 4 blocks into xmmresult registers
|
||||
__ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1*AESBlockSize));
|
||||
__ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2*AESBlockSize));
|
||||
__ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3*AESBlockSize));
|
||||
if (k != 0) {
|
||||
__ movdqu(xmm15, Address(rsp, 2 * wordSize));
|
||||
__ movdqu(xmm1, Address(rsp, 4 * wordSize));
|
||||
}
|
||||
|
||||
#define DoFour(opc, src_reg) \
|
||||
__ opc(xmm_result0, src_reg); \
|
||||
__ opc(xmm_result1, src_reg); \
|
||||
__ opc(xmm_result2, src_reg); \
|
||||
__ opc(xmm_result3, src_reg);
|
||||
__ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers
|
||||
__ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize));
|
||||
__ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize));
|
||||
__ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize));
|
||||
|
||||
DoFour(pxor, xmm_key_first);
|
||||
for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
|
||||
DoFour(aesdec, as_XMMRegister(rnum));
|
||||
}
|
||||
DoFour(aesdeclast, xmm_key_last);
|
||||
// for each result, xor with the r vector of previous cipher block
|
||||
__ pxor(xmm_result0, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0*AESBlockSize));
|
||||
__ pxor(xmm_result1, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1*AESBlockSize));
|
||||
__ pxor(xmm_result2, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2*AESBlockSize));
|
||||
__ pxor(xmm_result3, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3*AESBlockSize)); // this will carry over to next set of blocks
|
||||
DoFour(pxor, xmm_key_first);
|
||||
if (k == 0) {
|
||||
for (int rnum = 1; rnum < ROUNDS[k]; rnum++) {
|
||||
DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST));
|
||||
}
|
||||
DoFour(aesdeclast, xmm_key_last);
|
||||
} else if (k == 1) {
|
||||
for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) {
|
||||
DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST));
|
||||
}
|
||||
__ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again.
|
||||
DoFour(aesdec, xmm1); // key : 0xc0
|
||||
__ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again
|
||||
DoFour(aesdeclast, xmm_key_last);
|
||||
} else if (k == 2) {
|
||||
for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) {
|
||||
DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST));
|
||||
}
|
||||
DoFour(aesdec, xmm1); // key : 0xc0
|
||||
__ movdqu(xmm15, Address(rsp, 6 * wordSize));
|
||||
__ movdqu(xmm1, Address(rsp, 8 * wordSize));
|
||||
DoFour(aesdec, xmm15); // key : 0xd0
|
||||
__ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again.
|
||||
DoFour(aesdec, xmm1); // key : 0xe0
|
||||
__ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again
|
||||
DoFour(aesdeclast, xmm_key_last);
|
||||
}
|
||||
|
||||
__ movdqu(Address(to, pos, Address::times_1, 0*AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output
|
||||
__ movdqu(Address(to, pos, Address::times_1, 1*AESBlockSize), xmm_result1);
|
||||
__ movdqu(Address(to, pos, Address::times_1, 2*AESBlockSize), xmm_result2);
|
||||
__ movdqu(Address(to, pos, Address::times_1, 3*AESBlockSize), xmm_result3);
|
||||
// for each result, xor with the r vector of previous cipher block
|
||||
__ pxor(xmm_result0, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize));
|
||||
__ pxor(xmm_result1, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize));
|
||||
__ pxor(xmm_result2, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize));
|
||||
__ pxor(xmm_result3, xmm_prev_block_cipher);
|
||||
__ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks
|
||||
if (k != 0) {
|
||||
__ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher);
|
||||
}
|
||||
|
||||
__ addptr(pos, 4*AESBlockSize);
|
||||
__ subptr(len_reg, 4*AESBlockSize);
|
||||
__ jmp(L_multiBlock_loopTop_128);
|
||||
__ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output
|
||||
__ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1);
|
||||
__ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2);
|
||||
__ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3);
|
||||
|
||||
// registers used in the non-parallelized loops
|
||||
// xmm register assignments for the loops below
|
||||
const XMMRegister xmm_result = xmm0;
|
||||
const XMMRegister xmm_prev_block_cipher_save = xmm2;
|
||||
const XMMRegister xmm_key11 = xmm3;
|
||||
const XMMRegister xmm_key12 = xmm4;
|
||||
const XMMRegister xmm_temp = xmm4;
|
||||
__ addptr(pos, PARALLEL_FACTOR * AESBlockSize);
|
||||
__ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize);
|
||||
__ jmp(L_multiBlock_loopTop[k]);
|
||||
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_singleBlock_loopTop_128);
|
||||
__ cmpptr(len_reg, 0); // any blocks left??
|
||||
__ jcc(Assembler::equal, L_exit);
|
||||
__ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
|
||||
__ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector
|
||||
__ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
|
||||
for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
|
||||
__ aesdec(xmm_result, as_XMMRegister(rnum));
|
||||
}
|
||||
__ aesdeclast(xmm_result, xmm_key_last);
|
||||
__ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector
|
||||
__ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
|
||||
// no need to store r to memory until we exit
|
||||
__ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block
|
||||
// registers used in the non-parallelized loops
|
||||
// xmm register assignments for the loops below
|
||||
const XMMRegister xmm_result = xmm0;
|
||||
const XMMRegister xmm_prev_block_cipher_save = xmm2;
|
||||
const XMMRegister xmm_key11 = xmm3;
|
||||
const XMMRegister xmm_key12 = xmm4;
|
||||
const XMMRegister key_tmp = xmm4;
|
||||
|
||||
__ addptr(pos, AESBlockSize);
|
||||
__ subptr(len_reg, AESBlockSize);
|
||||
__ jmp(L_singleBlock_loopTop_128);
|
||||
__ BIND(L_singleBlock_loopTopHead[k]);
|
||||
if (k == 1) {
|
||||
__ addptr(rsp, 6 * wordSize);
|
||||
} else if (k == 2) {
|
||||
__ addptr(rsp, 10 * wordSize);
|
||||
}
|
||||
__ cmpptr(len_reg, 0); // any blocks left??
|
||||
__ jcc(Assembler::equal, L_exit);
|
||||
__ BIND(L_singleBlock_loopTopHead2[k]);
|
||||
if (k == 1) {
|
||||
load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0
|
||||
load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0
|
||||
}
|
||||
if (k == 2) {
|
||||
load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0
|
||||
}
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(L_singleBlock_loopTop[k]);
|
||||
__ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
|
||||
__ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector
|
||||
__ pxor(xmm_result, xmm_key_first); // do the aes dec rounds
|
||||
for (int rnum = 1; rnum <= 9 ; rnum++) {
|
||||
__ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST));
|
||||
}
|
||||
if (k == 1) {
|
||||
__ aesdec(xmm_result, xmm_key11);
|
||||
__ aesdec(xmm_result, xmm_key12);
|
||||
}
|
||||
if (k == 2) {
|
||||
__ aesdec(xmm_result, xmm_key11);
|
||||
load_key(key_tmp, key, 0xc0);
|
||||
__ aesdec(xmm_result, key_tmp);
|
||||
load_key(key_tmp, key, 0xd0);
|
||||
__ aesdec(xmm_result, key_tmp);
|
||||
load_key(key_tmp, key, 0xe0);
|
||||
__ aesdec(xmm_result, key_tmp);
|
||||
}
|
||||
|
||||
__ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0
|
||||
__ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector
|
||||
__ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
|
||||
// no need to store r to memory until we exit
|
||||
__ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block
|
||||
__ addptr(pos, AESBlockSize);
|
||||
__ subptr(len_reg, AESBlockSize);
|
||||
__ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]);
|
||||
if (k != 2) {
|
||||
__ jmp(L_exit);
|
||||
}
|
||||
} //for 128/192/256
|
||||
|
||||
__ BIND(L_exit);
|
||||
__ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object
|
||||
__ pop(rbx);
|
||||
#ifdef _WIN64
|
||||
// restore regs belonging to calling function
|
||||
for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
|
||||
@ -3631,69 +3725,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#endif
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ ret(0);
|
||||
|
||||
|
||||
__ BIND(L_key_192_256);
|
||||
// here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
|
||||
load_key(xmm_key11, key, 0xb0);
|
||||
__ cmpl(rax, 52);
|
||||
__ jcc(Assembler::notEqual, L_key_256);
|
||||
|
||||
// 192-bit code follows here (could be optimized to use parallelism)
|
||||
load_key(xmm_key12, key, 0xc0); // 192-bit key goes up to c0
|
||||
__ movptr(pos, 0);
|
||||
__ align(OptoLoopAlignment);
|
||||
|
||||
__ BIND(L_singleBlock_loopTop_192);
|
||||
__ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
|
||||
__ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector
|
||||
__ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
|
||||
for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
|
||||
__ aesdec(xmm_result, as_XMMRegister(rnum));
|
||||
}
|
||||
__ aesdec(xmm_result, xmm_key11);
|
||||
__ aesdec(xmm_result, xmm_key12);
|
||||
__ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0
|
||||
__ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector
|
||||
__ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
|
||||
// no need to store r to memory until we exit
|
||||
__ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block
|
||||
__ addptr(pos, AESBlockSize);
|
||||
__ subptr(len_reg, AESBlockSize);
|
||||
__ jcc(Assembler::notEqual,L_singleBlock_loopTop_192);
|
||||
__ jmp(L_exit);
|
||||
|
||||
__ BIND(L_key_256);
|
||||
// 256-bit code follows here (could be optimized to use parallelism)
|
||||
__ movptr(pos, 0);
|
||||
__ align(OptoLoopAlignment);
|
||||
|
||||
__ BIND(L_singleBlock_loopTop_256);
|
||||
__ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
|
||||
__ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector
|
||||
__ pxor (xmm_result, xmm_key_first); // do the aes dec rounds
|
||||
for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
|
||||
__ aesdec(xmm_result, as_XMMRegister(rnum));
|
||||
}
|
||||
__ aesdec(xmm_result, xmm_key11);
|
||||
load_key(xmm_temp, key, 0xc0);
|
||||
__ aesdec(xmm_result, xmm_temp);
|
||||
load_key(xmm_temp, key, 0xd0);
|
||||
__ aesdec(xmm_result, xmm_temp);
|
||||
load_key(xmm_temp, key, 0xe0); // 256-bit key goes up to e0
|
||||
__ aesdec(xmm_result, xmm_temp);
|
||||
__ aesdeclast(xmm_result, xmm_key_last); // xmm15 came from key+0
|
||||
__ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector
|
||||
__ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output
|
||||
// no need to store r to memory until we exit
|
||||
__ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block
|
||||
__ addptr(pos, AESBlockSize);
|
||||
__ subptr(len_reg, AESBlockSize);
|
||||
__ jcc(Assembler::notEqual,L_singleBlock_loopTop_256);
|
||||
__ jmp(L_exit);
|
||||
|
||||
return start;
|
||||
}
|
||||
}
|
||||
|
||||
address generate_upper_word_mask() {
|
||||
__ align(64);
|
||||
|
@ -795,6 +795,9 @@ static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
|
||||
}
|
||||
if (cbuf) {
|
||||
MacroAssembler _masm(cbuf);
|
||||
// EVEX spills remain EVEX: Compressed displacemement is better than AVX on spill mem operations,
|
||||
// it maps more cases to single byte displacement
|
||||
_masm.set_managed();
|
||||
if (reg_lo+1 == reg_hi) { // double move?
|
||||
if (is_load) {
|
||||
__ movdbl(as_XMMRegister(Matcher::_regEncode[reg_lo]), Address(rsp, offset));
|
||||
@ -845,6 +848,8 @@ static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst
|
||||
int src_hi, int dst_hi, int size, outputStream* st ) {
|
||||
if (cbuf) {
|
||||
MacroAssembler _masm(cbuf);
|
||||
// EVEX spills remain EVEX: logic complex between full EVEX, partial and AVX, manage EVEX spill code one way.
|
||||
_masm.set_managed();
|
||||
if (src_lo+1 == src_hi && dst_lo+1 == dst_hi) { // double move?
|
||||
__ movdbl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
|
||||
as_XMMRegister(Matcher::_regEncode[src_lo]));
|
||||
@ -883,6 +888,8 @@ static int impl_movgpr2x_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int
|
||||
// 32-bit
|
||||
if (cbuf) {
|
||||
MacroAssembler _masm(cbuf);
|
||||
// EVEX spills remain EVEX: logic complex between full EVEX, partial and AVX, manage EVEX spill code one way.
|
||||
_masm.set_managed();
|
||||
__ movdl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
|
||||
as_Register(Matcher::_regEncode[src_lo]));
|
||||
#ifndef PRODUCT
|
||||
@ -899,6 +906,8 @@ static int impl_movx2gpr_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int
|
||||
// 32-bit
|
||||
if (cbuf) {
|
||||
MacroAssembler _masm(cbuf);
|
||||
// EVEX spills remain EVEX: logic complex between full EVEX, partial and AVX, manage EVEX spill code one way.
|
||||
_masm.set_managed();
|
||||
__ movdl(as_Register(Matcher::_regEncode[dst_lo]),
|
||||
as_XMMRegister(Matcher::_regEncode[src_lo]));
|
||||
#ifndef PRODUCT
|
||||
|
@ -42,7 +42,8 @@ define_pd_global(bool, UncommonNullCast, true);
|
||||
define_pd_global(intx, CodeEntryAlignment, 32);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||
define_pd_global(intx, InlineSmallCode, 1000 );
|
||||
define_pd_global(intx, InlineSmallCode, 1000);
|
||||
define_pd_global(intx, InitArrayShortSize, -1); // not used
|
||||
|
||||
#define DEFAULT_STACK_YELLOW_PAGES (2)
|
||||
#define DEFAULT_STACK_RED_PAGES (1)
|
||||
|
@ -249,7 +249,6 @@ public class NMethod extends CodeBlob {
|
||||
// public int age();
|
||||
// public boolean isMarkedForDeoptimization();
|
||||
// public boolean isMarkedForUnloading();
|
||||
// public boolean isMarkedForReclamation();
|
||||
// public int level();
|
||||
// public int version();
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -479,6 +479,11 @@ final class CompilerToVM {
|
||||
*/
|
||||
native String getSymbol(long metaspaceSymbol);
|
||||
|
||||
/**
|
||||
* Lookup a VMSymbol from a String.
|
||||
*/
|
||||
native long lookupSymbol(String symbol);
|
||||
|
||||
/**
|
||||
* Looks for the next Java stack frame matching an entry in {@code methods}.
|
||||
*
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,4 +39,6 @@ public interface HotSpotMemoryAccessProvider extends MemoryAccessProvider {
|
||||
Constant readNarrowKlassPointerConstant(Constant base, long displacement, CompressEncoding encoding);
|
||||
|
||||
Constant readMethodPointerConstant(Constant base, long displacement);
|
||||
|
||||
Constant readSymbolConstant(Constant base, long displacement);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -82,13 +82,13 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider, Ho
|
||||
Object base = asObject(baseConstant);
|
||||
if (base != null) {
|
||||
switch (bits) {
|
||||
case 8:
|
||||
case Byte.SIZE:
|
||||
return UNSAFE.getByte(base, displacement);
|
||||
case 16:
|
||||
case Short.SIZE:
|
||||
return UNSAFE.getShort(base, displacement);
|
||||
case 32:
|
||||
case Integer.SIZE:
|
||||
return UNSAFE.getInt(base, displacement);
|
||||
case 64:
|
||||
case Long.SIZE:
|
||||
return UNSAFE.getLong(base, displacement);
|
||||
default:
|
||||
throw new JVMCIError("%d", bits);
|
||||
@ -96,13 +96,13 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider, Ho
|
||||
} else {
|
||||
long pointer = asRawPointer(baseConstant);
|
||||
switch (bits) {
|
||||
case 8:
|
||||
case Byte.SIZE:
|
||||
return UNSAFE.getByte(pointer + displacement);
|
||||
case 16:
|
||||
case Short.SIZE:
|
||||
return UNSAFE.getShort(pointer + displacement);
|
||||
case 32:
|
||||
case Integer.SIZE:
|
||||
return UNSAFE.getInt(pointer + displacement);
|
||||
case 64:
|
||||
case Long.SIZE:
|
||||
return UNSAFE.getLong(pointer + displacement);
|
||||
default:
|
||||
throw new JVMCIError("%d", bits);
|
||||
@ -151,7 +151,8 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider, Ho
|
||||
Object o = readRawObject(baseConstant, displacement, runtime.getConfig().useCompressedOops);
|
||||
return HotSpotObjectConstantImpl.forObject(o);
|
||||
} else {
|
||||
return readPrimitiveConstant(kind, baseConstant, displacement, kind.getByteCount() * 8);
|
||||
int bits = kind.getByteCount() * Byte.SIZE;
|
||||
return readPrimitiveConstant(kind, baseConstant, displacement, bits);
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,4 +230,16 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider, Ho
|
||||
HotSpotResolvedJavaMethodImpl method = runtime.getCompilerToVM().getResolvedJavaMethod(baseObject, displacement);
|
||||
return HotSpotMetaspaceConstantImpl.forMetaspaceObject(method, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Constant readSymbolConstant(Constant base, long displacement) {
|
||||
int bits = runtime.getConfig().symbolPointerSize * Byte.SIZE;
|
||||
long pointer = readRawValue(base, displacement, bits);
|
||||
if (pointer == 0) {
|
||||
return JavaConstant.NULL_POINTER;
|
||||
} else {
|
||||
String symbol = runtime.getCompilerToVM().getSymbol(pointer);
|
||||
return new HotSpotSymbol(symbol, pointer).asConstant();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -78,6 +78,15 @@ public class HotSpotMetaAccessProvider implements MetaAccessProvider, HotSpotPro
|
||||
return new HotSpotSignature(runtime, signature);
|
||||
}
|
||||
|
||||
public HotSpotSymbol lookupSymbol(String symbol) {
|
||||
long pointer = runtime.getCompilerToVM().lookupSymbol(symbol);
|
||||
if (pointer == 0) {
|
||||
return null;
|
||||
} else {
|
||||
return new HotSpotSymbol(symbol, pointer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link Field} object of {@link Method#slot}.
|
||||
*/
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,4 +29,6 @@ public interface HotSpotMetaspaceConstant extends HotSpotConstant, VMConstant {
|
||||
HotSpotResolvedObjectType asResolvedJavaType();
|
||||
|
||||
HotSpotResolvedJavaMethod asResolvedJavaMethod();
|
||||
|
||||
HotSpotSymbol asSymbol();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -108,4 +108,11 @@ final class HotSpotMetaspaceConstantImpl implements HotSpotMetaspaceConstant, VM
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public HotSpotSymbol asSymbol() {
|
||||
if (metaspaceObject instanceof HotSpotSymbol) {
|
||||
return (HotSpotSymbol) metaspaceObject;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -20,28 +20,38 @@
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package jdk.vm.ci.hotspot;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import sun.net.spi.nameservice.NameService;
|
||||
import jdk.vm.ci.meta.Constant;
|
||||
|
||||
public class ThrowingNameService implements NameService {
|
||||
static boolean firstCall = true;
|
||||
/**
|
||||
* Class to access the C++ {@code vmSymbols} table.
|
||||
*/
|
||||
public final class HotSpotSymbol implements MetaspaceWrapperObject {
|
||||
|
||||
@Override
|
||||
public InetAddress[] lookupAllHostAddr(String host) throws UnknownHostException {
|
||||
if (firstCall) {
|
||||
firstCall = false;
|
||||
// throw unchecked exception first time round
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
private final String symbol;
|
||||
private final long pointer;
|
||||
|
||||
// return any valid address
|
||||
return new InetAddress[] { InetAddress.getLoopbackAddress() };
|
||||
HotSpotSymbol(String symbol, long pointer) {
|
||||
this.symbol = symbol;
|
||||
this.pointer = pointer;
|
||||
}
|
||||
|
||||
public String getSymbol() {
|
||||
return symbol;
|
||||
}
|
||||
|
||||
public Constant asConstant() {
|
||||
return HotSpotMetaspaceConstantImpl.forMetaspaceObject(this, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getHostByAddr(byte[] addr) throws UnknownHostException {
|
||||
throw new IllegalStateException();
|
||||
public long getMetaspacePointer() {
|
||||
return pointer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Symbol<" + symbol + ">";
|
||||
}
|
||||
}
|
@ -1001,6 +1001,7 @@ public class HotSpotVMConfig {
|
||||
@HotSpotVMField(name = "Klass::_modifier_flags", type = "jint", get = HotSpotVMField.Type.OFFSET) @Stable public int klassModifierFlagsOffset;
|
||||
@HotSpotVMField(name = "Klass::_access_flags", type = "AccessFlags", get = HotSpotVMField.Type.OFFSET) @Stable public int klassAccessFlagsOffset;
|
||||
@HotSpotVMField(name = "Klass::_layout_helper", type = "jint", get = HotSpotVMField.Type.OFFSET) @Stable public int klassLayoutHelperOffset;
|
||||
@HotSpotVMField(name = "Klass::_name", type = "Symbol*", get = HotSpotVMField.Type.OFFSET) @Stable public int klassNameOffset;
|
||||
|
||||
@HotSpotVMConstant(name = "Klass::_lh_neutral_value") @Stable public int klassLayoutHelperNeutralValue;
|
||||
@HotSpotVMConstant(name = "Klass::_lh_instance_slow_path_bit") @Stable public int klassLayoutHelperInstanceSlowPathBit;
|
||||
@ -1536,8 +1537,9 @@ public class HotSpotVMConfig {
|
||||
@HotSpotVMAddress(name = "JVMCIRuntime::exception_handler_for_pc") @Stable public long exceptionHandlerForPcAddress;
|
||||
@HotSpotVMAddress(name = "JVMCIRuntime::monitorenter") @Stable public long monitorenterAddress;
|
||||
@HotSpotVMAddress(name = "JVMCIRuntime::monitorexit") @Stable public long monitorexitAddress;
|
||||
@HotSpotVMAddress(name = "JVMCIRuntime::create_null_exception") @Stable public long createNullPointerExceptionAddress;
|
||||
@HotSpotVMAddress(name = "JVMCIRuntime::create_out_of_bounds_exception") @Stable public long createOutOfBoundsExceptionAddress;
|
||||
@HotSpotVMAddress(name = "JVMCIRuntime::throw_and_post_jvmti_exception") @Stable public long throwAndPostJvmtiExceptionAddress;
|
||||
@HotSpotVMAddress(name = "JVMCIRuntime::throw_klass_external_name_exception") @Stable public long throwKlassExternalNameExceptionAddress;
|
||||
@HotSpotVMAddress(name = "JVMCIRuntime::throw_class_cast_exception") @Stable public long throwClassCastExceptionAddress;
|
||||
@HotSpotVMAddress(name = "JVMCIRuntime::log_primitive") @Stable public long logPrimitiveAddress;
|
||||
@HotSpotVMAddress(name = "JVMCIRuntime::log_object") @Stable public long logObjectAddress;
|
||||
@HotSpotVMAddress(name = "JVMCIRuntime::log_printf") @Stable public long logPrintfAddress;
|
||||
|
@ -28,8 +28,7 @@ package jdk.vm.ci.runtime;
|
||||
public interface JVMCICompilerFactory {
|
||||
|
||||
/**
|
||||
* Get the name of this compiler. The compiler will be selected when the jvmci.compiler system
|
||||
* property is equal to this name.
|
||||
* Get the name of this compiler.
|
||||
*/
|
||||
String getCompilerName();
|
||||
|
||||
|
@ -24,6 +24,9 @@
|
||||
*/
|
||||
|
||||
module jdk.vm.ci {
|
||||
// 8153756
|
||||
requires jdk.unsupported;
|
||||
|
||||
uses jdk.vm.ci.hotspot.HotSpotVMEventListener;
|
||||
uses jdk.vm.ci.hotspot.HotSpotJVMCIBackendFactory;
|
||||
uses jdk.vm.ci.runtime.JVMCICompilerFactory;
|
||||
|
@ -277,7 +277,7 @@ bool os::Solaris::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_
|
||||
return false;
|
||||
} else {
|
||||
*fr = os::fetch_frame_from_ucontext(thread, uc);
|
||||
*fr = frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc());
|
||||
*fr = frame(fr->sender_sp(), fr->sp());
|
||||
if (!fr->is_java_frame()) {
|
||||
assert(fr->safe_for_sender(thread), "Safety check");
|
||||
*fr = fr->java_sender();
|
||||
|
@ -209,9 +209,6 @@ bool Compiler::is_intrinsic_supported(const methodHandle& method) {
|
||||
case vmIntrinsics::_putLong_raw:
|
||||
case vmIntrinsics::_putFloat_raw:
|
||||
case vmIntrinsics::_putDouble_raw:
|
||||
case vmIntrinsics::_putOrderedObject:
|
||||
case vmIntrinsics::_putOrderedInt:
|
||||
case vmIntrinsics::_putOrderedLong:
|
||||
case vmIntrinsics::_getShortUnaligned:
|
||||
case vmIntrinsics::_getCharUnaligned:
|
||||
case vmIntrinsics::_getIntUnaligned:
|
||||
|
@ -3450,9 +3450,6 @@ void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee) {
|
||||
case vmIntrinsics::_putLong_raw : append_unsafe_put_raw(callee, T_LONG ); return;
|
||||
case vmIntrinsics::_putFloat_raw : append_unsafe_put_raw(callee, T_FLOAT ); return;
|
||||
case vmIntrinsics::_putDouble_raw : append_unsafe_put_raw(callee, T_DOUBLE); return;
|
||||
case vmIntrinsics::_putOrderedObject : append_unsafe_put_obj(callee, T_OBJECT, true); return;
|
||||
case vmIntrinsics::_putOrderedInt : append_unsafe_put_obj(callee, T_INT, true); return;
|
||||
case vmIntrinsics::_putOrderedLong : append_unsafe_put_obj(callee, T_LONG, true); return;
|
||||
case vmIntrinsics::_compareAndSwapLong:
|
||||
case vmIntrinsics::_compareAndSwapInt:
|
||||
case vmIntrinsics::_compareAndSwapObject: append_unsafe_CAS(callee); return;
|
||||
|
@ -999,8 +999,16 @@ void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_v
|
||||
Phi* phi = sux_val->as_Phi();
|
||||
// cur_val can be null without phi being null in conjunction with inlining
|
||||
if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
|
||||
Phi* cur_phi = cur_val->as_Phi();
|
||||
if (cur_phi != NULL && cur_phi->is_illegal()) {
|
||||
// Phi and local would need to get invalidated
|
||||
// (which is unexpected for Linear Scan).
|
||||
// But this case is very rare so we simply bail out.
|
||||
bailout("propagation of illegal phi");
|
||||
return;
|
||||
}
|
||||
LIR_Opr operand = cur_val->operand();
|
||||
if (cur_val->operand()->is_illegal()) {
|
||||
if (operand->is_illegal()) {
|
||||
assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
|
||||
"these can be produced lazily");
|
||||
operand = operand_for_instruction(cur_val);
|
||||
|
@ -99,14 +99,14 @@ class ValueStack: public CompilationResourceObj {
|
||||
void clear_locals(); // sets all locals to NULL;
|
||||
|
||||
void invalidate_local(int i) {
|
||||
assert(_locals.at(i)->type()->is_single_word() ||
|
||||
assert(!_locals.at(i)->type()->is_double_word() ||
|
||||
_locals.at(i + 1) == NULL, "hi-word of doubleword value must be NULL");
|
||||
_locals.at_put(i, NULL);
|
||||
}
|
||||
|
||||
Value local_at(int i) const {
|
||||
Value x = _locals.at(i);
|
||||
assert(x == NULL || x->type()->is_single_word() ||
|
||||
assert(x == NULL || !x->type()->is_double_word() ||
|
||||
_locals.at(i + 1) == NULL, "hi-word of doubleword value must be NULL");
|
||||
return x;
|
||||
}
|
||||
@ -131,7 +131,7 @@ class ValueStack: public CompilationResourceObj {
|
||||
// stack access
|
||||
Value stack_at(int i) const {
|
||||
Value x = _stack.at(i);
|
||||
assert(x->type()->is_single_word() ||
|
||||
assert(!x->type()->is_double_word() ||
|
||||
_stack.at(i + 1) == NULL, "hi-word of doubleword value must be NULL");
|
||||
return x;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@
|
||||
product(bool, InlineSynchronizedMethods, true, \
|
||||
"Inline synchronized methods") \
|
||||
\
|
||||
develop(bool, InlineNIOCheckIndex, true, \
|
||||
diagnostic(bool, InlineNIOCheckIndex, true, \
|
||||
"Intrinsify java.nio.Buffer.checkIndex") \
|
||||
\
|
||||
develop(bool, CanonicalizeNodes, true, \
|
||||
|
@ -773,7 +773,7 @@ ciMethod* ciEnv::get_method_by_index_impl(const constantPoolHandle& cpool,
|
||||
Symbol* sig_sym = cpool->signature_ref_at(index);
|
||||
|
||||
if (cpool->has_preresolution()
|
||||
|| (holder == ciEnv::MethodHandle_klass() &&
|
||||
|| ((holder == ciEnv::MethodHandle_klass() || holder == ciEnv::VarHandle_klass()) &&
|
||||
MethodHandles::is_signature_polymorphic_name(holder->get_Klass(), name_sym))) {
|
||||
// Short-circuit lookups for JSR 292-related call sites.
|
||||
// That is, do not rely only on name-based lookups, because they may fail
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
/* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1057,8 +1057,6 @@ void* ciReplay::load_inline_data(ciMethod* method, int entry_bci, int comp_level
|
||||
int ciReplay::replay_impl(TRAPS) {
|
||||
HandleMark hm;
|
||||
ResourceMark rm;
|
||||
// Make sure we don't run with background compilation
|
||||
BackgroundCompilation = false;
|
||||
|
||||
if (ReplaySuppressInitializers > 2) {
|
||||
// ReplaySuppressInitializers > 2 means that we want to allow
|
||||
|
@ -1455,8 +1455,7 @@ void ClassLoader::compile_the_world() {
|
||||
EXCEPTION_MARK;
|
||||
HandleMark hm(THREAD);
|
||||
ResourceMark rm(THREAD);
|
||||
// Make sure we don't run with background compilation
|
||||
BackgroundCompilation = false;
|
||||
|
||||
// Find bootstrap loader
|
||||
Handle system_class_loader (THREAD, SystemDictionary::java_system_loader());
|
||||
// Iterate over all bootstrap class path entries
|
||||
|
@ -2563,7 +2563,8 @@ static methodHandle unpack_method_and_appendix(Handle mname,
|
||||
return empty;
|
||||
}
|
||||
|
||||
methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name,
|
||||
methodHandle SystemDictionary::find_method_handle_invoker(KlassHandle klass,
|
||||
Symbol* name,
|
||||
Symbol* signature,
|
||||
KlassHandle accessing_klass,
|
||||
Handle *appendix_result,
|
||||
@ -2574,7 +2575,6 @@ methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name,
|
||||
Handle method_type =
|
||||
SystemDictionary::find_method_handle_type(signature, accessing_klass, CHECK_(empty));
|
||||
|
||||
KlassHandle mh_klass = SystemDictionary::MethodHandle_klass();
|
||||
int ref_kind = JVM_REF_invokeVirtual;
|
||||
Handle name_str = StringTable::intern(name, CHECK_(empty));
|
||||
objArrayHandle appendix_box = oopFactory::new_objArray(SystemDictionary::Object_klass(), 1, CHECK_(empty));
|
||||
@ -2589,7 +2589,7 @@ methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name,
|
||||
JavaCallArguments args;
|
||||
args.push_oop(accessing_klass()->java_mirror());
|
||||
args.push_int(ref_kind);
|
||||
args.push_oop(mh_klass()->java_mirror());
|
||||
args.push_oop(klass()->java_mirror());
|
||||
args.push_oop(name_str());
|
||||
args.push_oop(method_type());
|
||||
args.push_oop(appendix_box());
|
||||
|
@ -153,6 +153,7 @@ class SymbolPropertyTable;
|
||||
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
|
||||
do_klass(DirectMethodHandle_klass, java_lang_invoke_DirectMethodHandle, Opt ) \
|
||||
do_klass(MethodHandle_klass, java_lang_invoke_MethodHandle, Pre ) \
|
||||
do_klass(VarHandle_klass, java_lang_invoke_VarHandle, Pre ) \
|
||||
do_klass(MemberName_klass, java_lang_invoke_MemberName, Pre ) \
|
||||
do_klass(MethodHandleNatives_klass, java_lang_invoke_MethodHandleNatives, Pre ) \
|
||||
do_klass(LambdaForm_klass, java_lang_invoke_LambdaForm, Opt ) \
|
||||
@ -518,7 +519,8 @@ public:
|
||||
// JSR 292
|
||||
// find a java.lang.invoke.MethodHandle.invoke* method for a given signature
|
||||
// (asks Java to compute it if necessary, except in a compiler thread)
|
||||
static methodHandle find_method_handle_invoker(Symbol* name,
|
||||
static methodHandle find_method_handle_invoker(KlassHandle klass,
|
||||
Symbol* name,
|
||||
Symbol* signature,
|
||||
KlassHandle accessing_klass,
|
||||
Handle *appendix_result,
|
||||
|
@ -592,9 +592,6 @@ bool vmIntrinsics::is_disabled_by_flags(const methodHandle& method) {
|
||||
case vmIntrinsics::_putLong_raw:
|
||||
case vmIntrinsics::_putFloat_raw:
|
||||
case vmIntrinsics::_putDouble_raw:
|
||||
case vmIntrinsics::_putOrderedObject:
|
||||
case vmIntrinsics::_putOrderedLong:
|
||||
case vmIntrinsics::_putOrderedInt:
|
||||
case vmIntrinsics::_getAndAddInt:
|
||||
case vmIntrinsics::_getAndAddLong:
|
||||
case vmIntrinsics::_getAndSetInt:
|
||||
|
@ -120,7 +120,7 @@
|
||||
template(jdk_internal_misc_Signal, "jdk/internal/misc/Signal") \
|
||||
template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \
|
||||
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
|
||||
template(sun_misc_PostVMInitHook, "sun/misc/PostVMInitHook") \
|
||||
template(jdk_internal_vm_PostVMInitHook, "jdk/internal/vm/PostVMInitHook") \
|
||||
template(sun_net_www_ParseUtil, "sun/net/www/ParseUtil") \
|
||||
\
|
||||
template(jdk_internal_loader_ClassLoaders_AppClassLoader, "jdk/internal/loader/ClassLoaders$AppClassLoader") \
|
||||
@ -289,6 +289,7 @@
|
||||
template(java_lang_invoke_MutableCallSite, "java/lang/invoke/MutableCallSite") \
|
||||
template(java_lang_invoke_VolatileCallSite, "java/lang/invoke/VolatileCallSite") \
|
||||
template(java_lang_invoke_MethodHandle, "java/lang/invoke/MethodHandle") \
|
||||
template(java_lang_invoke_VarHandle, "java/lang/invoke/VarHandle") \
|
||||
template(java_lang_invoke_MethodType, "java/lang/invoke/MethodType") \
|
||||
template(java_lang_invoke_MethodType_signature, "Ljava/lang/invoke/MethodType;") \
|
||||
template(java_lang_invoke_MemberName_signature, "Ljava/lang/invoke/MemberName;") \
|
||||
@ -655,7 +656,7 @@
|
||||
\
|
||||
/* JVMTI/java.lang.instrument support and VM Attach mechanism */ \
|
||||
template(jdk_internal_module_Modules, "jdk/internal/module/Modules") \
|
||||
template(sun_misc_VMSupport, "sun/misc/VMSupport") \
|
||||
template(jdk_internal_vm_VMSupport, "jdk/internal/vm/VMSupport") \
|
||||
template(transformedByAgent_name, "transformedByAgent") \
|
||||
template(transformedByAgent_signature, "(Ljava/lang/reflect/Module;)V") \
|
||||
template(appendToClassPathForInstrumentation_name, "appendToClassPathForInstrumentation") \
|
||||
@ -1329,16 +1330,6 @@
|
||||
do_intrinsic(_weakCompareAndSwapIntAcquire, jdk_internal_misc_Unsafe, weakCompareAndSwapIntAcquire_name, compareAndSwapInt_signature, F_R) \
|
||||
do_intrinsic(_weakCompareAndSwapIntRelease, jdk_internal_misc_Unsafe, weakCompareAndSwapIntRelease_name, compareAndSwapInt_signature, F_R) \
|
||||
\
|
||||
do_intrinsic(_putOrderedObject, jdk_internal_misc_Unsafe, putOrderedObject_name, putOrderedObject_signature, F_RN) \
|
||||
do_name( putOrderedObject_name, "putOrderedObject") \
|
||||
do_alias( putOrderedObject_signature, /*(LObject;JLObject;)V*/ putObject_signature) \
|
||||
do_intrinsic(_putOrderedLong, jdk_internal_misc_Unsafe, putOrderedLong_name, putOrderedLong_signature, F_RN) \
|
||||
do_name( putOrderedLong_name, "putOrderedLong") \
|
||||
do_alias( putOrderedLong_signature, /*(Ljava/lang/Object;JJ)V*/ putLong_signature) \
|
||||
do_intrinsic(_putOrderedInt, jdk_internal_misc_Unsafe, putOrderedInt_name, putOrderedInt_signature, F_RN) \
|
||||
do_name( putOrderedInt_name, "putOrderedInt") \
|
||||
do_alias( putOrderedInt_signature, /*(Ljava/lang/Object;JI)V*/ putInt_signature) \
|
||||
\
|
||||
do_intrinsic(_getAndAddInt, jdk_internal_misc_Unsafe, getAndAddInt_name, getAndAddInt_signature, F_R) \
|
||||
do_name( getAndAddInt_name, "getAndAddInt") \
|
||||
do_signature(getAndAddInt_signature, "(Ljava/lang/Object;JI)I" ) \
|
||||
|
@ -530,7 +530,6 @@ const char* nmethod::compile_kind() const {
|
||||
void nmethod::init_defaults() {
|
||||
_state = in_use;
|
||||
_unloading_clock = 0;
|
||||
_marked_for_reclamation = 0;
|
||||
_has_flushed_dependencies = 0;
|
||||
_has_unsafe_access = 0;
|
||||
_has_method_handle_invokes = 0;
|
||||
@ -1332,8 +1331,19 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
|
||||
}
|
||||
// Unlink the osr method, so we do not look this up again
|
||||
if (is_osr_method()) {
|
||||
invalidate_osr_method();
|
||||
// Invalidate the osr nmethod only once
|
||||
if (is_in_use()) {
|
||||
invalidate_osr_method();
|
||||
}
|
||||
#ifdef ASSERT
|
||||
if (method() != NULL) {
|
||||
// Make sure osr nmethod is invalidated, i.e. not on the list
|
||||
bool found = method()->method_holder()->remove_osr_nmethod(this);
|
||||
assert(!found, "osr nmethod should have been invalidated");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// If _method is already NULL the Method* is about to be unloaded,
|
||||
// so we don't have to break the cycle. Note that it is possible to
|
||||
// have the Method* live here, in case we unload the nmethod because
|
||||
@ -1387,8 +1397,9 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
|
||||
void nmethod::invalidate_osr_method() {
|
||||
assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
|
||||
// Remove from list of active nmethods
|
||||
if (method() != NULL)
|
||||
if (method() != NULL) {
|
||||
method()->method_holder()->remove_osr_nmethod(this);
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::log_state_change() const {
|
||||
@ -1436,8 +1447,9 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
// invalidate osr nmethod before acquiring the patching lock since
|
||||
// they both acquire leaf locks and we don't want a deadlock.
|
||||
// This logic is equivalent to the logic below for patching the
|
||||
// verified entry point of regular methods.
|
||||
if (is_osr_method()) {
|
||||
// verified entry point of regular methods. We check that the
|
||||
// nmethod is in use to ensure that it is invalidated only once.
|
||||
if (is_osr_method() && is_in_use()) {
|
||||
// this effectively makes the osr nmethod not entrant
|
||||
invalidate_osr_method();
|
||||
}
|
||||
@ -1503,13 +1515,21 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
}
|
||||
} // leave critical region under Patching_lock
|
||||
|
||||
#ifdef ASSERT
|
||||
if (is_osr_method() && method() != NULL) {
|
||||
// Make sure osr nmethod is invalidated, i.e. not on the list
|
||||
bool found = method()->method_holder()->remove_osr_nmethod(this);
|
||||
assert(!found, "osr nmethod should have been invalidated");
|
||||
}
|
||||
#endif
|
||||
|
||||
// When the nmethod becomes zombie it is no longer alive so the
|
||||
// dependencies must be flushed. nmethods in the not_entrant
|
||||
// state will be flushed later when the transition to zombie
|
||||
// happens or they get unloaded.
|
||||
if (state == zombie) {
|
||||
{
|
||||
// Flushing dependecies must be done before any possible
|
||||
// Flushing dependencies must be done before any possible
|
||||
// safepoint can sneak in, otherwise the oops used by the
|
||||
// dependency logic could have become stale.
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
@ -1525,7 +1545,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
|
||||
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
|
||||
// event and it hasn't already been reported for this nmethod then
|
||||
// report it now. The event may have been reported earilier if the GC
|
||||
// report it now. The event may have been reported earlier if the GC
|
||||
// marked it for unloading). JvmtiDeferredEventQueue support means
|
||||
// we no longer go to a safepoint here.
|
||||
post_compiled_method_unload();
|
||||
@ -1553,18 +1573,18 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
|
||||
void nmethod::flush() {
|
||||
// Note that there are no valid oops in the nmethod anymore.
|
||||
assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
|
||||
assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
|
||||
|
||||
assert(!is_osr_method() || is_unloaded() || is_zombie(),
|
||||
"osr nmethod must be unloaded or zombie before flushing");
|
||||
assert(is_zombie() || is_osr_method(), "must be a zombie method");
|
||||
assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
// completely deallocate this method
|
||||
Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this));
|
||||
if (PrintMethodFlushing) {
|
||||
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
|
||||
tty->print_cr("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
|
||||
"/Free CodeCache:" SIZE_FORMAT "Kb",
|
||||
_compile_id, p2i(this), CodeCache::blob_count(),
|
||||
is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
|
||||
CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
|
||||
}
|
||||
|
||||
@ -2916,10 +2936,7 @@ void nmethod::print() const {
|
||||
tty->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
|
||||
tty->print(" for method " INTPTR_FORMAT , p2i(method()));
|
||||
tty->print(" { ");
|
||||
if (is_in_use()) tty->print("in_use ");
|
||||
if (is_not_entrant()) tty->print("not_entrant ");
|
||||
if (is_zombie()) tty->print("zombie ");
|
||||
if (is_unloaded()) tty->print("unloaded ");
|
||||
tty->print_cr("%s ", state());
|
||||
if (on_scavenge_root_list()) tty->print("scavenge_root ");
|
||||
tty->print_cr("}:");
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -188,8 +188,6 @@ class nmethod : public CodeBlob {
|
||||
// protected by CodeCache_lock
|
||||
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
|
||||
|
||||
bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
|
||||
|
||||
enum MarkForDeoptimizationStatus {
|
||||
not_marked,
|
||||
deoptimize,
|
||||
@ -207,7 +205,7 @@ class nmethod : public CodeBlob {
|
||||
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
|
||||
|
||||
// Protected by Patching_lock
|
||||
volatile unsigned char _state; // {alive, not_entrant, zombie, unloaded}
|
||||
volatile unsigned char _state; // {in_use, not_entrant, zombie, unloaded}
|
||||
|
||||
volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
|
||||
|
||||
@ -438,7 +436,20 @@ class nmethod : public CodeBlob {
|
||||
bool is_alive() const { return _state == in_use || _state == not_entrant; }
|
||||
bool is_not_entrant() const { return _state == not_entrant; }
|
||||
bool is_zombie() const { return _state == zombie; }
|
||||
bool is_unloaded() const { return _state == unloaded; }
|
||||
bool is_unloaded() const { return _state == unloaded; }
|
||||
|
||||
// returns a string version of the nmethod state
|
||||
const char* state() const {
|
||||
switch(_state) {
|
||||
case in_use: return "in use";
|
||||
case not_entrant: return "not_entrant";
|
||||
case zombie: return "zombie";
|
||||
case unloaded: return "unloaded";
|
||||
default:
|
||||
fatal("unexpected nmethod state: %d", _state);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_RTM_OPT
|
||||
// rtm state accessing and manipulating
|
||||
@ -490,9 +501,6 @@ class nmethod : public CodeBlob {
|
||||
_has_flushed_dependencies = 1;
|
||||
}
|
||||
|
||||
bool is_marked_for_reclamation() const { return _marked_for_reclamation; }
|
||||
void mark_for_reclamation() { _marked_for_reclamation = 1; }
|
||||
|
||||
bool has_unsafe_access() const { return _has_unsafe_access; }
|
||||
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
|
||||
|
||||
|
@ -773,7 +773,8 @@ void CompileBroker::init_compiler_sweeper_threads(int c1_compiler_count, int c2_
|
||||
#endif // !ZERO && !SHARK
|
||||
// Initialize the compilation queue
|
||||
if (c2_compiler_count > 0) {
|
||||
_c2_compile_queue = new CompileQueue("C2 compile queue");
|
||||
const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue";
|
||||
_c2_compile_queue = new CompileQueue(name);
|
||||
_compilers[1]->set_num_compiler_threads(c2_compiler_count);
|
||||
}
|
||||
if (c1_compiler_count > 0) {
|
||||
@ -1169,7 +1170,8 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
|
||||
CompilationPolicy::policy()->delay_compilation(method());
|
||||
return NULL;
|
||||
}
|
||||
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, !directive->BackgroundCompilationOption, THREAD);
|
||||
bool is_blocking = !directive->BackgroundCompilationOption || CompileTheWorld || ReplayCompiles;
|
||||
compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, comment, is_blocking, THREAD);
|
||||
}
|
||||
|
||||
// return requested nmethod
|
||||
@ -1649,6 +1651,10 @@ void CompileBroker::init_compiler_thread_log() {
|
||||
tty->print_cr("Opening compilation log %s", file_name);
|
||||
}
|
||||
CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file_name, fp, thread_id);
|
||||
if (log == NULL) {
|
||||
fclose(fp);
|
||||
return;
|
||||
}
|
||||
thread->init_log(log);
|
||||
|
||||
if (xtty != NULL) {
|
||||
|
@ -90,10 +90,16 @@ void CallInfo::set_virtual(KlassHandle resolved_klass,
|
||||
void CallInfo::set_handle(const methodHandle& resolved_method,
|
||||
Handle resolved_appendix,
|
||||
Handle resolved_method_type, TRAPS) {
|
||||
set_handle(SystemDictionary::MethodHandle_klass(), resolved_method, resolved_appendix, resolved_method_type, CHECK);
|
||||
}
|
||||
|
||||
void CallInfo::set_handle(KlassHandle resolved_klass,
|
||||
const methodHandle& resolved_method,
|
||||
Handle resolved_appendix,
|
||||
Handle resolved_method_type, TRAPS) {
|
||||
if (resolved_method.is_null()) {
|
||||
THROW_MSG(vmSymbols::java_lang_InternalError(), "resolved method is null");
|
||||
}
|
||||
KlassHandle resolved_klass = SystemDictionary::MethodHandle_klass();
|
||||
assert(resolved_method->intrinsic_id() == vmIntrinsics::_invokeBasic ||
|
||||
resolved_method->is_compiled_lambda_form(),
|
||||
"linkMethod must return one of these");
|
||||
@ -433,7 +439,8 @@ methodHandle LinkResolver::lookup_polymorphic_method(
|
||||
vmIntrinsics::name_at(iid), klass->external_name(),
|
||||
name->as_C_string(), full_signature->as_C_string());
|
||||
}
|
||||
if (klass() == SystemDictionary::MethodHandle_klass() &&
|
||||
if ((klass() == SystemDictionary::MethodHandle_klass() ||
|
||||
klass() == SystemDictionary::VarHandle_klass()) &&
|
||||
iid != vmIntrinsics::_none) {
|
||||
if (MethodHandles::is_signature_polymorphic_intrinsic(iid)) {
|
||||
// Most of these do not need an up-call to Java to resolve, so can be done anywhere.
|
||||
@ -482,6 +489,7 @@ methodHandle LinkResolver::lookup_polymorphic_method(
|
||||
Handle appendix;
|
||||
Handle method_type;
|
||||
methodHandle result = SystemDictionary::find_method_handle_invoker(
|
||||
klass,
|
||||
name,
|
||||
full_signature,
|
||||
link_info.current_klass(),
|
||||
@ -1561,13 +1569,15 @@ void LinkResolver::resolve_handle_call(CallInfo& result,
|
||||
const LinkInfo& link_info,
|
||||
TRAPS) {
|
||||
// JSR 292: this must be an implicitly generated method MethodHandle.invokeExact(*...) or similar
|
||||
assert(link_info.resolved_klass()() == SystemDictionary::MethodHandle_klass(), "");
|
||||
KlassHandle resolved_klass = link_info.resolved_klass();
|
||||
assert(resolved_klass() == SystemDictionary::MethodHandle_klass() ||
|
||||
resolved_klass() == SystemDictionary::VarHandle_klass(), "");
|
||||
assert(MethodHandles::is_signature_polymorphic_name(link_info.name()), "");
|
||||
Handle resolved_appendix;
|
||||
Handle resolved_method_type;
|
||||
methodHandle resolved_method = lookup_polymorphic_method(link_info,
|
||||
&resolved_appendix, &resolved_method_type, CHECK);
|
||||
result.set_handle(resolved_method, resolved_appendix, resolved_method_type, CHECK);
|
||||
result.set_handle(resolved_klass, resolved_method, resolved_appendix, resolved_method_type, CHECK);
|
||||
}
|
||||
|
||||
static void wrap_invokedynamic_exception(TRAPS) {
|
||||
|
@ -69,6 +69,9 @@ class CallInfo : public StackObj {
|
||||
int vtable_index, TRAPS);
|
||||
void set_handle(const methodHandle& resolved_method,
|
||||
Handle resolved_appendix, Handle resolved_method_type, TRAPS);
|
||||
void set_handle(KlassHandle resolved_klass,
|
||||
const methodHandle& resolved_method,
|
||||
Handle resolved_appendix, Handle resolved_method_type, TRAPS);
|
||||
void set_common(KlassHandle resolved_klass, KlassHandle selected_klass,
|
||||
const methodHandle& resolved_method,
|
||||
const methodHandle& selected_method,
|
||||
|
@ -54,8 +54,10 @@ void Rewriter::compute_index_maps() {
|
||||
add_resolved_references_entry(i);
|
||||
break;
|
||||
case JVM_CONSTANT_Utf8:
|
||||
if (_pool->symbol_at(i) == vmSymbols::java_lang_invoke_MethodHandle())
|
||||
if (_pool->symbol_at(i) == vmSymbols::java_lang_invoke_MethodHandle() ||
|
||||
_pool->symbol_at(i) == vmSymbols::java_lang_invoke_VarHandle()) {
|
||||
saw_mh_symbol = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -200,6 +202,12 @@ void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_i
|
||||
// we may need a resolved_refs entry for the appendix
|
||||
add_invokedynamic_resolved_references_entries(cp_index, cache_index);
|
||||
status = +1;
|
||||
} else if (_pool->klass_ref_at_noresolve(cp_index) == vmSymbols::java_lang_invoke_VarHandle() &&
|
||||
MethodHandles::is_signature_polymorphic_name(SystemDictionary::VarHandle_klass(),
|
||||
_pool->name_ref_at(cp_index))) {
|
||||
// we may need a resolved_refs entry for the appendix
|
||||
add_invokedynamic_resolved_references_entries(cp_index, cache_index);
|
||||
status = +1;
|
||||
} else {
|
||||
status = -1;
|
||||
}
|
||||
|
@ -177,7 +177,13 @@ OopMap* CodeInstaller::create_oop_map(Handle debug_info, TRAPS) {
|
||||
return map;
|
||||
}
|
||||
|
||||
Metadata* CodeInstaller::record_metadata_reference(Handle constant, TRAPS) {
|
||||
void* CodeInstaller::record_metadata_reference(Handle constant, TRAPS) {
|
||||
/*
|
||||
* This method needs to return a raw (untyped) pointer, since the value of a pointer to the base
|
||||
* class is in general not equal to the pointer of the subclass. When patching metaspace pointers,
|
||||
* the compiler expects a direct pointer to the subclass (Klass*, Method* or Symbol*), not a
|
||||
* pointer to the base class (Metadata* or MetaspaceObj*).
|
||||
*/
|
||||
oop obj = HotSpotMetaspaceConstantImpl::metaspaceObject(constant);
|
||||
if (obj->is_a(HotSpotResolvedObjectTypeImpl::klass())) {
|
||||
Klass* klass = java_lang_Class::as_Klass(HotSpotResolvedObjectTypeImpl::javaClass(obj));
|
||||
@ -191,6 +197,11 @@ Metadata* CodeInstaller::record_metadata_reference(Handle constant, TRAPS) {
|
||||
int index = _oop_recorder->find_index(method);
|
||||
TRACE_jvmci_3("metadata[%d of %d] = %s", index, _oop_recorder->metadata_count(), method->name()->as_C_string());
|
||||
return method;
|
||||
} else if (obj->is_a(HotSpotSymbol::klass())) {
|
||||
Symbol* symbol = (Symbol*) (address) HotSpotSymbol::pointer(obj);
|
||||
assert(!HotSpotMetaspaceConstantImpl::compressed(constant), "unexpected compressed symbol pointer %s @ " INTPTR_FORMAT, symbol->as_C_string(), p2i(symbol));
|
||||
TRACE_jvmci_3("symbol = %s", symbol->as_C_string());
|
||||
return symbol;
|
||||
} else {
|
||||
JVMCI_ERROR_NULL("unexpected metadata reference for constant of type %s", obj->klass()->signature_name());
|
||||
}
|
||||
@ -706,7 +717,7 @@ JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer,
|
||||
JVMCI_ERROR_OK("unexpected compressed Klass* in 32-bit mode");
|
||||
#endif
|
||||
} else {
|
||||
*((Metadata**) dest) = record_metadata_reference(constant, CHECK_OK);
|
||||
*((void**) dest) = record_metadata_reference(constant, CHECK_OK);
|
||||
}
|
||||
} else if (constant->is_a(HotSpotObjectConstantImpl::klass())) {
|
||||
Handle obj = HotSpotObjectConstantImpl::object(constant);
|
||||
|
@ -189,7 +189,7 @@ protected:
|
||||
ScopeValue* get_scope_value(Handle value, BasicType type, GrowableArray<ScopeValue*>* objects, ScopeValue* &second, TRAPS);
|
||||
MonitorValue* get_monitor_value(Handle value, GrowableArray<ScopeValue*>* objects, TRAPS);
|
||||
|
||||
Metadata* record_metadata_reference(Handle constant, TRAPS);
|
||||
void* record_metadata_reference(Handle constant, TRAPS);
|
||||
#ifdef _LP64
|
||||
narrowKlass record_narrow_metadata_reference(Handle constant, TRAPS);
|
||||
#endif
|
||||
|
@ -448,7 +448,10 @@ C2V_END
|
||||
|
||||
C2V_VMENTRY(jboolean, canInlineMethod,(JNIEnv *, jobject, jobject jvmci_method))
|
||||
methodHandle method = CompilerToVM::asMethod(jvmci_method);
|
||||
return !method->is_not_compilable() && !CompilerOracle::should_not_inline(method) && !method->dont_inline();
|
||||
// In hosted mode ignore the not_compilable flags since they are never set by
|
||||
// the JVMCI compiler.
|
||||
bool is_compilable = UseJVMCICompiler ? !method->is_not_compilable(CompLevel_full_optimization) : true;
|
||||
return is_compilable && !CompilerOracle::should_not_inline(method) && !method->dont_inline();
|
||||
C2V_END
|
||||
|
||||
C2V_VMENTRY(jboolean, shouldInlineMethod,(JNIEnv *, jobject, jobject jvmci_method))
|
||||
@ -1048,6 +1051,11 @@ C2V_VMENTRY(jobject, getSymbol, (JNIEnv*, jobject, jlong symbol))
|
||||
return JNIHandles::make_local(THREAD, sym());
|
||||
C2V_END
|
||||
|
||||
C2V_VMENTRY(jlong, lookupSymbol, (JNIEnv*, jobject, jobject string))
|
||||
Symbol* symbol = java_lang_String::as_symbol_or_null(JNIHandles::resolve(string));
|
||||
return (jlong) symbol;
|
||||
C2V_END
|
||||
|
||||
bool matches(jobjectArray methods, Method* method) {
|
||||
objArrayOop methods_oop = (objArrayOop) JNIHandles::resolve(methods);
|
||||
|
||||
@ -1475,6 +1483,7 @@ JNINativeMethod CompilerToVM::methods[] = {
|
||||
{CC"isMature", CC"("METASPACE_METHOD_DATA")Z", FN_PTR(isMature)},
|
||||
{CC"hasCompiledCodeForOSR", CC"("HS_RESOLVED_METHOD"II)Z", FN_PTR(hasCompiledCodeForOSR)},
|
||||
{CC"getSymbol", CC"(J)"STRING, FN_PTR(getSymbol)},
|
||||
{CC"lookupSymbol", CC"("STRING")J", FN_PTR(lookupSymbol)},
|
||||
{CC"getNextStackFrame", CC"("HS_STACK_FRAME_REF "["RESOLVED_METHOD"I)"HS_STACK_FRAME_REF, FN_PTR(getNextStackFrame)},
|
||||
{CC"materializeVirtualObjects", CC"("HS_STACK_FRAME_REF"Z)V", FN_PTR(materializeVirtualObjects)},
|
||||
{CC"shouldDebugNonSafepoints", CC"()Z", FN_PTR(shouldDebugNonSafepoints)},
|
||||
|
@ -64,6 +64,9 @@ class JVMCIJavaClasses : AllStatic {
|
||||
start_class(HotSpotResolvedJavaMethodImpl) \
|
||||
long_field(HotSpotResolvedJavaMethodImpl, metaspaceMethod) \
|
||||
end_class \
|
||||
start_class(HotSpotSymbol) \
|
||||
long_field(HotSpotSymbol, pointer) \
|
||||
end_class \
|
||||
start_class(InstalledCode) \
|
||||
long_field(InstalledCode, address) \
|
||||
long_field(InstalledCode, entryPoint) \
|
||||
|
@ -363,20 +363,6 @@ address JVMCIRuntime::exception_handler_for_pc(JavaThread* thread) {
|
||||
return continuation;
|
||||
}
|
||||
|
||||
JRT_ENTRY(void, JVMCIRuntime::create_null_exception(JavaThread* thread))
|
||||
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
|
||||
thread->set_vm_result(PENDING_EXCEPTION);
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
JRT_END
|
||||
|
||||
JRT_ENTRY(void, JVMCIRuntime::create_out_of_bounds_exception(JavaThread* thread, jint index))
|
||||
char message[jintAsStringSize];
|
||||
sprintf(message, "%d", index);
|
||||
SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
|
||||
thread->set_vm_result(PENDING_EXCEPTION);
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
JRT_END
|
||||
|
||||
JRT_ENTRY_NO_ASYNC(void, JVMCIRuntime::monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock))
|
||||
IF_TRACE_jvmci_3 {
|
||||
char type[O_BUFLEN];
|
||||
@ -438,6 +424,21 @@ JRT_LEAF(void, JVMCIRuntime::monitorexit(JavaThread* thread, oopDesc* obj, Basic
|
||||
}
|
||||
JRT_END
|
||||
|
||||
JRT_ENTRY(void, JVMCIRuntime::throw_and_post_jvmti_exception(JavaThread* thread, Symbol* name, const char* message))
|
||||
SharedRuntime::throw_and_post_jvmti_exception(thread, name, message);
|
||||
JRT_END
|
||||
|
||||
JRT_ENTRY(void, JVMCIRuntime::throw_klass_external_name_exception(JavaThread* thread, Symbol* exception, Klass* klass))
|
||||
ResourceMark rm(thread);
|
||||
SharedRuntime::throw_and_post_jvmti_exception(thread, exception, klass->external_name());
|
||||
JRT_END
|
||||
|
||||
JRT_ENTRY(void, JVMCIRuntime::throw_class_cast_exception(JavaThread* thread, Symbol* exception, Klass* caster_klass, Klass* target_klass))
|
||||
ResourceMark rm(thread);
|
||||
const char* message = SharedRuntime::generate_class_cast_message(caster_klass, target_klass);
|
||||
SharedRuntime::throw_and_post_jvmti_exception(thread, exception, message);
|
||||
JRT_END
|
||||
|
||||
JRT_LEAF(void, JVMCIRuntime::log_object(JavaThread* thread, oopDesc* obj, bool as_string, bool newline))
|
||||
ttyLocker ttyl;
|
||||
|
||||
@ -800,12 +801,9 @@ void JVMCIRuntime::shutdown(TRAPS) {
|
||||
|
||||
bool JVMCIRuntime::treat_as_trivial(Method* method) {
|
||||
if (_HotSpotJVMCIRuntime_initialized) {
|
||||
oop loader = method->method_holder()->class_loader();
|
||||
if (loader == NULL) {
|
||||
for (int i = 0; i < _trivial_prefixes_count; i++) {
|
||||
if (method->method_holder()->name()->starts_with(_trivial_prefixes[i])) {
|
||||
return true;
|
||||
}
|
||||
for (int i = 0; i < _trivial_prefixes_count; i++) {
|
||||
if (method->method_holder()->name()->starts_with(_trivial_prefixes[i])) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -141,8 +141,6 @@ class JVMCIRuntime: public AllStatic {
|
||||
static address exception_handler_for_pc(JavaThread* thread);
|
||||
static void monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock);
|
||||
static void monitorexit (JavaThread* thread, oopDesc* obj, BasicLock* lock);
|
||||
static void create_null_exception(JavaThread* thread);
|
||||
static void create_out_of_bounds_exception(JavaThread* thread, jint index);
|
||||
static void vm_error(JavaThread* thread, jlong where, jlong format, jlong value);
|
||||
static oopDesc* load_and_clear_exception(JavaThread* thread);
|
||||
static void log_printf(JavaThread* thread, oopDesc* format, jlong v1, jlong v2, jlong v3);
|
||||
@ -157,6 +155,12 @@ class JVMCIRuntime: public AllStatic {
|
||||
static jboolean validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child);
|
||||
static void new_store_pre_barrier(JavaThread* thread);
|
||||
|
||||
// used to throw exceptions from compiled JVMCI code
|
||||
static void throw_and_post_jvmti_exception(JavaThread* thread, Symbol* exception, const char* message);
|
||||
// helper methods to throw exception with complex messages
|
||||
static void throw_klass_external_name_exception(JavaThread* thread, Symbol* exception, Klass* klass);
|
||||
static void throw_class_cast_exception(JavaThread* thread, Symbol* exception, Klass* caster_klass, Klass* target_klass);
|
||||
|
||||
// Test only function
|
||||
static int test_deoptimize_call_int(JavaThread* thread, int value);
|
||||
};
|
||||
|
@ -49,6 +49,7 @@
|
||||
do_klass(HotSpotJVMCIMetaAccessContext_klass, jdk_vm_ci_hotspot_HotSpotJVMCIMetaAccessContext, Jvmci) \
|
||||
do_klass(HotSpotJVMCIRuntime_klass, jdk_vm_ci_hotspot_HotSpotJVMCIRuntime, Jvmci) \
|
||||
do_klass(HotSpotSpeculationLog_klass, jdk_vm_ci_hotspot_HotSpotSpeculationLog, Jvmci) \
|
||||
do_klass(HotSpotSymbol_klass, jdk_vm_ci_hotspot_HotSpotSymbol, Jvmci) \
|
||||
do_klass(Assumptions_ConcreteMethod_klass, jdk_vm_ci_meta_Assumptions_ConcreteMethod, Jvmci) \
|
||||
do_klass(Assumptions_NoFinalizableSubclass_klass, jdk_vm_ci_meta_Assumptions_NoFinalizableSubclass, Jvmci) \
|
||||
do_klass(Assumptions_ConcreteSubtype_klass, jdk_vm_ci_meta_Assumptions_ConcreteSubtype, Jvmci) \
|
||||
|
@ -165,6 +165,7 @@
|
||||
nonstatic_field(Klass, _super_check_offset, juint) \
|
||||
nonstatic_field(Klass, _subklass, Klass*) \
|
||||
nonstatic_field(Klass, _layout_helper, jint) \
|
||||
nonstatic_field(Klass, _name, Symbol*) \
|
||||
nonstatic_field(Klass, _prototype_header, markOop) \
|
||||
nonstatic_field(Klass, _next_sibling, Klass*) \
|
||||
nonstatic_field(Klass, _java_mirror, oop) \
|
||||
@ -551,8 +552,9 @@
|
||||
declare_function(JVMCIRuntime::exception_handler_for_pc) \
|
||||
declare_function(JVMCIRuntime::monitorenter) \
|
||||
declare_function(JVMCIRuntime::monitorexit) \
|
||||
declare_function(JVMCIRuntime::create_null_exception) \
|
||||
declare_function(JVMCIRuntime::create_out_of_bounds_exception) \
|
||||
declare_function(JVMCIRuntime::throw_and_post_jvmti_exception) \
|
||||
declare_function(JVMCIRuntime::throw_klass_external_name_exception) \
|
||||
declare_function(JVMCIRuntime::throw_class_cast_exception) \
|
||||
declare_function(JVMCIRuntime::log_primitive) \
|
||||
declare_function(JVMCIRuntime::log_object) \
|
||||
declare_function(JVMCIRuntime::log_printf) \
|
||||
|
@ -50,6 +50,7 @@
|
||||
template(jdk_vm_ci_hotspot_HotSpotJVMCIMetaAccessContext, "jdk/vm/ci/hotspot/HotSpotJVMCIMetaAccessContext") \
|
||||
template(jdk_vm_ci_hotspot_HotSpotJVMCIRuntime, "jdk/vm/ci/hotspot/HotSpotJVMCIRuntime") \
|
||||
template(jdk_vm_ci_hotspot_HotSpotSpeculationLog, "jdk/vm/ci/hotspot/HotSpotSpeculationLog") \
|
||||
template(jdk_vm_ci_hotspot_HotSpotSymbol, "jdk/vm/ci/hotspot/HotSpotSymbol") \
|
||||
template(jdk_vm_ci_meta_JavaConstant, "jdk/vm/ci/meta/JavaConstant") \
|
||||
template(jdk_vm_ci_meta_PrimitiveConstant, "jdk/vm/ci/meta/PrimitiveConstant") \
|
||||
template(jdk_vm_ci_meta_RawConstant, "jdk/vm/ci/meta/RawConstant") \
|
||||
|
@ -30,8 +30,6 @@
|
||||
|
||||
class VirtualSpaceNode;
|
||||
|
||||
const size_t metadata_chunk_initialize = 0xf7f7f7f7;
|
||||
|
||||
size_t Metachunk::object_alignment() {
|
||||
// Must align pointers and sizes to 8,
|
||||
// so that 64 bit types get correctly aligned.
|
||||
@ -58,12 +56,7 @@ Metachunk::Metachunk(size_t word_size,
|
||||
_top = initial_top();
|
||||
#ifdef ASSERT
|
||||
set_is_tagged_free(false);
|
||||
size_t data_word_size = pointer_delta(end(),
|
||||
_top,
|
||||
sizeof(MetaWord));
|
||||
Copy::fill_to_words((HeapWord*)_top,
|
||||
data_word_size,
|
||||
metadata_chunk_initialize);
|
||||
mangle(uninitMetaWordVal);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -98,12 +91,12 @@ void Metachunk::print_on(outputStream* st) const {
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void Metachunk::mangle() {
|
||||
// Mangle the payload of the chunk and not the links that
|
||||
void Metachunk::mangle(juint word_value) {
|
||||
// Overwrite the payload of the chunk and not the links that
|
||||
// maintain list of chunks.
|
||||
HeapWord* start = (HeapWord*)(bottom() + overhead());
|
||||
HeapWord* start = (HeapWord*)initial_top();
|
||||
size_t size = word_size() - overhead();
|
||||
Copy::fill_to_words(start, size, metadata_chunk_initialize);
|
||||
Copy::fill_to_words(start, size, word_value);
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
|
@ -145,7 +145,9 @@ class Metachunk : public Metabase<Metachunk> {
|
||||
|
||||
bool contains(const void* ptr) { return bottom() <= ptr && ptr < _top; }
|
||||
|
||||
NOT_PRODUCT(void mangle();)
|
||||
#ifndef PRODUCT
|
||||
void mangle(juint word_value);
|
||||
#endif
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
void verify();
|
||||
|
@ -811,11 +811,6 @@ void VirtualSpaceNode::verify_container_count() {
|
||||
BlockFreelist::BlockFreelist() : _dictionary(new BlockTreeDictionary()) {}
|
||||
|
||||
BlockFreelist::~BlockFreelist() {
|
||||
LogHandle(gc, metaspace, freelist) log;
|
||||
if (log.is_trace()) {
|
||||
ResourceMark rm;
|
||||
dictionary()->print_free_lists(log.trace_stream());
|
||||
}
|
||||
delete _dictionary;
|
||||
}
|
||||
|
||||
@ -2145,6 +2140,7 @@ void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
|
||||
// by the call to return_chunk_at_head();
|
||||
Metachunk* next = cur->next();
|
||||
DEBUG_ONLY(cur->set_is_tagged_free(true);)
|
||||
NOT_PRODUCT(cur->mangle(badMetaWordVal);)
|
||||
list->return_chunk_at_head(cur);
|
||||
cur = next;
|
||||
}
|
||||
@ -2169,11 +2165,9 @@ SpaceManager::~SpaceManager() {
|
||||
log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
|
||||
ResourceMark rm;
|
||||
locked_print_chunks_in_use_on(log.trace_stream());
|
||||
block_freelists()->print_on(log.trace_stream());
|
||||
}
|
||||
|
||||
// Do not mangle freed Metachunks. The chunk size inside Metachunks
|
||||
// is during the freeing of a VirtualSpaceNodes.
|
||||
|
||||
// Have to update before the chunks_in_use lists are emptied
|
||||
// below.
|
||||
chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
|
||||
@ -2206,9 +2200,8 @@ SpaceManager::~SpaceManager() {
|
||||
Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
|
||||
|
||||
while (humongous_chunks != NULL) {
|
||||
#ifdef ASSERT
|
||||
humongous_chunks->set_is_tagged_free(true);
|
||||
#endif
|
||||
DEBUG_ONLY(humongous_chunks->set_is_tagged_free(true);)
|
||||
NOT_PRODUCT(humongous_chunks->mangle(badMetaWordVal);)
|
||||
log.trace(PTR_FORMAT " (" SIZE_FORMAT ") ", p2i(humongous_chunks), humongous_chunks->word_size());
|
||||
assert(humongous_chunks->word_size() == (size_t)
|
||||
align_size_up(humongous_chunks->word_size(),
|
||||
@ -2527,7 +2520,7 @@ void SpaceManager::mangle_freed_chunks() {
|
||||
for (Metachunk* curr = chunks_in_use(index);
|
||||
curr != NULL;
|
||||
curr = curr->next()) {
|
||||
curr->mangle();
|
||||
curr->mangle(uninitMetaWordVal);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2614,8 +2614,8 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstanceKlass::remove_osr_nmethod(nmethod* n) {
|
||||
// Remove osr nmethod from the list. Return true if found and removed.
|
||||
bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
|
||||
// This is a short non-blocking critical region, so the no safepoint check is ok.
|
||||
MutexLockerEx ml(OsrList_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(n->is_osr_method(), "wrong kind of nmethod");
|
||||
@ -2624,6 +2624,7 @@ void InstanceKlass::remove_osr_nmethod(nmethod* n) {
|
||||
int max_level = CompLevel_none; // Find the max comp level excluding n
|
||||
Method* m = n->method();
|
||||
// Search for match
|
||||
bool found = false;
|
||||
while(cur != NULL && cur != n) {
|
||||
if (TieredCompilation && m == cur->method()) {
|
||||
// Find max level before n
|
||||
@ -2634,6 +2635,7 @@ void InstanceKlass::remove_osr_nmethod(nmethod* n) {
|
||||
}
|
||||
nmethod* next = NULL;
|
||||
if (cur == n) {
|
||||
found = true;
|
||||
next = cur->osr_link();
|
||||
if (last == NULL) {
|
||||
// Remove first element
|
||||
@ -2654,6 +2656,7 @@ void InstanceKlass::remove_osr_nmethod(nmethod* n) {
|
||||
}
|
||||
m->set_highest_osr_comp_level(max_level);
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
int InstanceKlass::mark_osr_nmethods(const Method* m) {
|
||||
|
@ -878,7 +878,7 @@ public:
|
||||
nmethod* osr_nmethods_head() const { return _osr_nmethods_head; };
|
||||
void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; };
|
||||
void add_osr_nmethod(nmethod* n);
|
||||
void remove_osr_nmethod(nmethod* n);
|
||||
bool remove_osr_nmethod(nmethod* n);
|
||||
int mark_osr_nmethods(const Method* m);
|
||||
nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const;
|
||||
|
||||
|
@ -1351,11 +1351,16 @@ void Method::init_intrinsic_id() {
|
||||
// ditto for method and signature:
|
||||
vmSymbols::SID name_id = vmSymbols::find_sid(name());
|
||||
if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle)
|
||||
&& name_id == vmSymbols::NO_SID)
|
||||
&& klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_VarHandle)
|
||||
&& name_id == vmSymbols::NO_SID) {
|
||||
return;
|
||||
}
|
||||
vmSymbols::SID sig_id = vmSymbols::find_sid(signature());
|
||||
if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle)
|
||||
&& sig_id == vmSymbols::NO_SID) return;
|
||||
&& klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_VarHandle)
|
||||
&& sig_id == vmSymbols::NO_SID) {
|
||||
return;
|
||||
}
|
||||
jshort flags = access_flags().as_short();
|
||||
|
||||
vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
|
||||
@ -1383,8 +1388,9 @@ void Method::init_intrinsic_id() {
|
||||
}
|
||||
break;
|
||||
|
||||
// Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*.
|
||||
// Signature-polymorphic methods: MethodHandle.invoke*, InvokeDynamic.*., VarHandle
|
||||
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_MethodHandle):
|
||||
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_VarHandle):
|
||||
if (!is_native()) break;
|
||||
id = MethodHandles::signature_polymorphic_name_id(method_holder(), name());
|
||||
if (is_static() != MethodHandles::is_signature_polymorphic_static(id))
|
||||
|
@ -595,26 +595,26 @@
|
||||
product(bool, BlockLayoutRotateLoops, true, \
|
||||
"Allow back branches to be fall throughs in the block layour") \
|
||||
\
|
||||
develop(bool, InlineReflectionGetCallerClass, true, \
|
||||
diagnostic(bool, InlineReflectionGetCallerClass, true, \
|
||||
"inline sun.reflect.Reflection.getCallerClass(), known to be " \
|
||||
"part of base library DLL") \
|
||||
\
|
||||
develop(bool, InlineObjectCopy, true, \
|
||||
diagnostic(bool, InlineObjectCopy, true, \
|
||||
"inline Object.clone and Arrays.copyOf[Range] intrinsics") \
|
||||
\
|
||||
develop(bool, SpecialStringCompareTo, true, \
|
||||
diagnostic(bool, SpecialStringCompareTo, true, \
|
||||
"special version of string compareTo") \
|
||||
\
|
||||
develop(bool, SpecialStringIndexOf, true, \
|
||||
diagnostic(bool, SpecialStringIndexOf, true, \
|
||||
"special version of string indexOf") \
|
||||
\
|
||||
develop(bool, SpecialStringEquals, true, \
|
||||
diagnostic(bool, SpecialStringEquals, true, \
|
||||
"special version of string equals") \
|
||||
\
|
||||
develop(bool, SpecialArraysEquals, true, \
|
||||
diagnostic(bool, SpecialArraysEquals, true, \
|
||||
"special version of Arrays.equals(char[],char[])") \
|
||||
\
|
||||
product(bool, SpecialEncodeISOArray, true, \
|
||||
diagnostic(bool, SpecialEncodeISOArray, true, \
|
||||
"special version of ISO_8859_1$Encoder.encodeISOArray") \
|
||||
\
|
||||
develop(bool, BailoutToInterpreterForThrows, false, \
|
||||
@ -716,22 +716,22 @@
|
||||
diagnostic(bool, OptimizeExpensiveOps, true, \
|
||||
"Find best control for expensive operations") \
|
||||
\
|
||||
product(bool, UseMathExactIntrinsics, true, \
|
||||
diagnostic(bool, UseMathExactIntrinsics, true, \
|
||||
"Enables intrinsification of various java.lang.Math functions") \
|
||||
\
|
||||
product(bool, UseMultiplyToLenIntrinsic, false, \
|
||||
diagnostic(bool, UseMultiplyToLenIntrinsic, false, \
|
||||
"Enables intrinsification of BigInteger.multiplyToLen()") \
|
||||
\
|
||||
product(bool, UseSquareToLenIntrinsic, false, \
|
||||
diagnostic(bool, UseSquareToLenIntrinsic, false, \
|
||||
"Enables intrinsification of BigInteger.squareToLen()") \
|
||||
\
|
||||
product(bool, UseMulAddIntrinsic, false, \
|
||||
diagnostic(bool, UseMulAddIntrinsic, false, \
|
||||
"Enables intrinsification of BigInteger.mulAdd()") \
|
||||
\
|
||||
product(bool, UseMontgomeryMultiplyIntrinsic, false, \
|
||||
diagnostic(bool, UseMontgomeryMultiplyIntrinsic, false, \
|
||||
"Enables intrinsification of BigInteger.montgomeryMultiply()") \
|
||||
\
|
||||
product(bool, UseMontgomerySquareIntrinsic, false, \
|
||||
diagnostic(bool, UseMontgomerySquareIntrinsic, false, \
|
||||
"Enables intrinsification of BigInteger.montgomerySquare()") \
|
||||
\
|
||||
product(bool, UseTypeSpeculation, true, \
|
||||
|
@ -484,9 +484,6 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
|
||||
case vmIntrinsics::_putCharUnaligned:
|
||||
case vmIntrinsics::_putIntUnaligned:
|
||||
case vmIntrinsics::_putLongUnaligned:
|
||||
case vmIntrinsics::_putOrderedObject:
|
||||
case vmIntrinsics::_putOrderedInt:
|
||||
case vmIntrinsics::_putOrderedLong:
|
||||
case vmIntrinsics::_loadFence:
|
||||
case vmIntrinsics::_storeFence:
|
||||
case vmIntrinsics::_fullFence:
|
||||
|
@ -621,10 +621,6 @@ bool LibraryCallKit::try_to_inline(int predicate) {
|
||||
case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Relaxed, true);
|
||||
case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Relaxed, true);
|
||||
|
||||
case vmIntrinsics::_putOrderedObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, Release, false);
|
||||
case vmIntrinsics::_putOrderedInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, Release, false);
|
||||
case vmIntrinsics::_putOrderedLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, Release, false);
|
||||
|
||||
case vmIntrinsics::_getObjectAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, Acquire, false);
|
||||
case vmIntrinsics::_getBooleanAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, Acquire, false);
|
||||
case vmIntrinsics::_getByteAcquire: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, Acquire, false);
|
||||
|
@ -1453,20 +1453,14 @@ void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool ad
|
||||
Node *opaq = NULL;
|
||||
if (adjust_min_trip) { // If not maximally unrolling, need adjustment
|
||||
// Search for zero-trip guard.
|
||||
assert( loop_head->is_main_loop(), "" );
|
||||
assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
|
||||
Node *iff = ctrl->in(0);
|
||||
assert( iff->Opcode() == Op_If, "" );
|
||||
Node *bol = iff->in(1);
|
||||
assert( bol->Opcode() == Op_Bool, "" );
|
||||
Node *cmp = bol->in(1);
|
||||
assert( cmp->Opcode() == Op_CmpI, "" );
|
||||
opaq = cmp->in(2);
|
||||
// Occasionally it's possible for a zero-trip guard Opaque1 node to be
|
||||
// optimized away and then another round of loop opts attempted.
|
||||
// We can not optimize this particular loop in that case.
|
||||
if (opaq->Opcode() != Op_Opaque1)
|
||||
return; // Cannot find zero-trip guard! Bail out!
|
||||
|
||||
// Check the shape of the graph at the loop entry. If an inappropriate
|
||||
// graph shape is encountered, the compiler bails out loop unrolling;
|
||||
// compilation of the method will still succeed.
|
||||
if (!is_canonical_main_loop_entry(loop_head)) {
|
||||
return;
|
||||
}
|
||||
opaq = ctrl->in(0)->in(1)->in(1)->in(2);
|
||||
// Zero-trip test uses an 'opaque' node which is not shared.
|
||||
assert(opaq->outcnt() == 1 && opaq->in(1) == limit, "");
|
||||
}
|
||||
@ -2109,7 +2103,6 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
#endif
|
||||
assert(RangeCheckElimination, "");
|
||||
CountedLoopNode *cl = loop->_head->as_CountedLoop();
|
||||
assert(cl->is_main_loop(), "");
|
||||
|
||||
// protect against stride not being a constant
|
||||
if (!cl->stride_is_con())
|
||||
@ -2121,20 +2114,17 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
|
||||
// to not ever trip end tests
|
||||
Node *main_limit = cl->limit();
|
||||
|
||||
// Check graph shape. Cannot optimize a loop if zero-trip
|
||||
// Opaque1 node is optimized away and then another round
|
||||
// of loop opts attempted.
|
||||
if (!is_canonical_main_loop_entry(cl)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Need to find the main-loop zero-trip guard
|
||||
Node *ctrl = cl->in(LoopNode::EntryControl);
|
||||
assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
|
||||
Node *iffm = ctrl->in(0);
|
||||
assert(iffm->Opcode() == Op_If, "");
|
||||
Node *bolzm = iffm->in(1);
|
||||
assert(bolzm->Opcode() == Op_Bool, "");
|
||||
Node *cmpzm = bolzm->in(1);
|
||||
assert(cmpzm->is_Cmp(), "");
|
||||
Node *opqzm = cmpzm->in(2);
|
||||
// Can not optimize a loop if zero-trip Opaque1 node is optimized
|
||||
// away and then another round of loop opts attempted.
|
||||
if (opqzm->Opcode() != Op_Opaque1)
|
||||
return;
|
||||
Node *opqzm = iffm->in(1)->in(1)->in(2);
|
||||
assert(opqzm->in(1) == main_limit, "do not understand situation");
|
||||
|
||||
// Find the pre-loop limit; we will expand its iterations to
|
||||
|
@ -3275,6 +3275,41 @@ Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) {
|
||||
return LCA;
|
||||
}
|
||||
|
||||
// Check the shape of the graph at the loop entry. In some cases,
|
||||
// the shape of the graph does not match the shape outlined below.
|
||||
// That is caused by the Opaque1 node "protecting" the shape of
|
||||
// the graph being removed by, for example, the IGVN performed
|
||||
// in PhaseIdealLoop::build_and_optimize().
|
||||
//
|
||||
// After the Opaque1 node has been removed, optimizations (e.g., split-if,
|
||||
// loop unswitching, and IGVN, or a combination of them) can freely change
|
||||
// the graph's shape. As a result, the graph shape outlined below cannot
|
||||
// be guaranteed anymore.
|
||||
bool PhaseIdealLoop::is_canonical_main_loop_entry(CountedLoopNode* cl) {
|
||||
assert(cl->is_main_loop(), "check should be applied to main loops");
|
||||
Node* ctrl = cl->in(LoopNode::EntryControl);
|
||||
if (ctrl == NULL || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) {
|
||||
return false;
|
||||
}
|
||||
Node* iffm = ctrl->in(0);
|
||||
if (iffm == NULL || !iffm->is_If()) {
|
||||
return false;
|
||||
}
|
||||
Node* bolzm = iffm->in(1);
|
||||
if (bolzm == NULL || !bolzm->is_Bool()) {
|
||||
return false;
|
||||
}
|
||||
Node* cmpzm = bolzm->in(1);
|
||||
if (cmpzm == NULL || !cmpzm->is_Cmp()) {
|
||||
return false;
|
||||
}
|
||||
Node* opqzm = cmpzm->in(2);
|
||||
if (opqzm == NULL || opqzm->Opcode() != Op_Opaque1) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------get_late_ctrl----------------------------------
|
||||
// Compute latest legal control.
|
||||
Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
|
||||
|
@ -656,6 +656,9 @@ class PhaseIdealLoop : public PhaseTransform {
|
||||
bool cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop);
|
||||
|
||||
public:
|
||||
|
||||
static bool is_canonical_main_loop_entry(CountedLoopNode* cl);
|
||||
|
||||
bool has_node( Node* n ) const {
|
||||
guarantee(n != NULL, "No Node.");
|
||||
return _nodes[n->_idx] != NULL;
|
||||
|
@ -742,7 +742,7 @@ void LoadNode::dump_spec(outputStream *st) const {
|
||||
// standard dump does this in Verbose and WizardMode
|
||||
st->print(" #"); _type->dump_on(st);
|
||||
}
|
||||
if (!_depends_only_on_test) {
|
||||
if (!depends_only_on_test()) {
|
||||
st->print(" (does not depend only on test)");
|
||||
}
|
||||
}
|
||||
@ -914,7 +914,7 @@ Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseTransform* phase) const {
|
||||
}
|
||||
}
|
||||
// load depends on the tests that validate the arraycopy
|
||||
ld->as_Load()->_depends_only_on_test = Pinned;
|
||||
ld->as_Load()->_control_dependency = Pinned;
|
||||
return ld;
|
||||
}
|
||||
return NULL;
|
||||
@ -1118,6 +1118,44 @@ Node* LoadNode::Identity(PhaseGVN* phase) {
|
||||
return this;
|
||||
}
|
||||
|
||||
// Construct an equivalent unsigned load.
|
||||
Node* LoadNode::convert_to_unsigned_load(PhaseGVN& gvn) {
|
||||
BasicType bt = T_ILLEGAL;
|
||||
const Type* rt = NULL;
|
||||
switch (Opcode()) {
|
||||
case Op_LoadUB: return this;
|
||||
case Op_LoadUS: return this;
|
||||
case Op_LoadB: bt = T_BOOLEAN; rt = TypeInt::UBYTE; break;
|
||||
case Op_LoadS: bt = T_CHAR; rt = TypeInt::CHAR; break;
|
||||
default:
|
||||
assert(false, "no unsigned variant: %s", Name());
|
||||
return NULL;
|
||||
}
|
||||
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
|
||||
raw_adr_type(), rt, bt, _mo, _control_dependency,
|
||||
is_unaligned_access(), is_mismatched_access());
|
||||
}
|
||||
|
||||
// Construct an equivalent signed load.
|
||||
Node* LoadNode::convert_to_signed_load(PhaseGVN& gvn) {
|
||||
BasicType bt = T_ILLEGAL;
|
||||
const Type* rt = NULL;
|
||||
switch (Opcode()) {
|
||||
case Op_LoadUB: bt = T_BYTE; rt = TypeInt::BYTE; break;
|
||||
case Op_LoadUS: bt = T_SHORT; rt = TypeInt::SHORT; break;
|
||||
case Op_LoadB: // fall through
|
||||
case Op_LoadS: // fall through
|
||||
case Op_LoadI: // fall through
|
||||
case Op_LoadL: return this;
|
||||
default:
|
||||
assert(false, "no signed variant: %s", Name());
|
||||
return NULL;
|
||||
}
|
||||
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
|
||||
raw_adr_type(), rt, bt, _mo, _control_dependency,
|
||||
is_unaligned_access(), is_mismatched_access());
|
||||
}
|
||||
|
||||
// We're loading from an object which has autobox behaviour.
|
||||
// If this object is result of a valueOf call we'll have a phi
|
||||
// merging a newly allocated object and a load from the cache.
|
||||
@ -1582,7 +1620,7 @@ LoadNode::load_array_final_field(const TypeKlassPtr *tkls,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool is_mismatched_access(ciConstant con, BasicType loadbt) {
|
||||
static ciConstant check_mismatched_access(ciConstant con, BasicType loadbt, bool is_unsigned) {
|
||||
BasicType conbt = con.basic_type();
|
||||
switch (conbt) {
|
||||
case T_BOOLEAN: conbt = T_BYTE; break;
|
||||
@ -1594,23 +1632,40 @@ static bool is_mismatched_access(ciConstant con, BasicType loadbt) {
|
||||
case T_ARRAY: loadbt = T_OBJECT; break;
|
||||
case T_ADDRESS: loadbt = T_OBJECT; break;
|
||||
}
|
||||
return (conbt != loadbt);
|
||||
if (conbt == loadbt) {
|
||||
if (is_unsigned && conbt == T_BYTE) {
|
||||
// LoadB (T_BYTE) with a small mask (<=8-bit) is converted to LoadUB (T_BYTE).
|
||||
return ciConstant(T_INT, con.as_int() & 0xFF);
|
||||
} else {
|
||||
return con;
|
||||
}
|
||||
}
|
||||
if (conbt == T_SHORT && loadbt == T_CHAR) {
|
||||
// LoadS (T_SHORT) with a small mask (<=16-bit) is converted to LoadUS (T_CHAR).
|
||||
return ciConstant(T_INT, con.as_int() & 0xFFFF);
|
||||
}
|
||||
return ciConstant(); // T_ILLEGAL
|
||||
}
|
||||
|
||||
// Try to constant-fold a stable array element.
|
||||
static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
|
||||
static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, bool is_unsigned_load, BasicType loadbt) {
|
||||
assert(ary->const_oop(), "array should be constant");
|
||||
assert(ary->is_stable(), "array should be stable");
|
||||
|
||||
// Decode the results of GraphKit::array_element_address.
|
||||
ciArray* aobj = ary->const_oop()->as_array();
|
||||
ciConstant con = aobj->element_value_by_offset(off);
|
||||
if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
|
||||
bool is_mismatched = is_mismatched_access(con, loadbt);
|
||||
assert(!is_mismatched, "conbt=%s; loadbt=%s", type2name(con.basic_type()), type2name(loadbt));
|
||||
ciConstant element_value = aobj->element_value_by_offset(off);
|
||||
if (element_value.basic_type() == T_ILLEGAL) {
|
||||
return NULL; // wrong offset
|
||||
}
|
||||
ciConstant con = check_mismatched_access(element_value, loadbt, is_unsigned_load);
|
||||
assert(con.basic_type() != T_ILLEGAL, "elembt=%s; loadbt=%s; unsigned=%d",
|
||||
type2name(element_value.basic_type()), type2name(loadbt), is_unsigned_load);
|
||||
|
||||
if (con.basic_type() != T_ILLEGAL && // not a mismatched access
|
||||
!con.is_null_or_zero()) { // not a default value
|
||||
const Type* con_type = Type::make_from_constant(con);
|
||||
// Guard against erroneous constant folding.
|
||||
if (!is_mismatched && con_type != NULL) {
|
||||
if (con_type != NULL) {
|
||||
if (con_type->isa_aryptr()) {
|
||||
// Join with the array element type, in case it is also stable.
|
||||
int dim = ary->stable_dimension();
|
||||
@ -1662,7 +1717,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
|
||||
if (FoldStableValues && !is_mismatched_access() && ary->is_stable() && ary->const_oop() != NULL) {
|
||||
// Make sure the reference is not into the header and the offset is constant
|
||||
if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
|
||||
const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
|
||||
const Type* con_type = fold_stable_ary_elem(ary, off, is_unsigned(), memory_type());
|
||||
if (con_type != NULL) {
|
||||
return con_type;
|
||||
}
|
||||
|
@ -148,9 +148,8 @@ public:
|
||||
class LoadNode : public MemNode {
|
||||
public:
|
||||
// Some loads (from unsafe) should be pinned: they don't depend only
|
||||
// on the dominating test. The boolean field _depends_only_on_test
|
||||
// below records whether that node depends only on the dominating
|
||||
// test.
|
||||
// on the dominating test. The field _control_dependency below records
|
||||
// whether that node depends only on the dominating test.
|
||||
// Methods used to build LoadNodes pass an argument of type enum
|
||||
// ControlDependency instead of a boolean because those methods
|
||||
// typically have multiple boolean parameters with default values:
|
||||
@ -162,7 +161,7 @@ public:
|
||||
DependsOnlyOnTest
|
||||
};
|
||||
private:
|
||||
// LoadNode::hash() doesn't take the _depends_only_on_test field
|
||||
// LoadNode::hash() doesn't take the _control_dependency field
|
||||
// into account: If the graph already has a non-pinned LoadNode and
|
||||
// we add a pinned LoadNode with the same inputs, it's safe for GVN
|
||||
// to replace the pinned LoadNode with the non-pinned LoadNode,
|
||||
@ -171,7 +170,7 @@ private:
|
||||
// pinned LoadNode and we add a non pinned LoadNode with the same
|
||||
// inputs, it's safe (but suboptimal) for GVN to replace the
|
||||
// non-pinned LoadNode by the pinned LoadNode.
|
||||
bool _depends_only_on_test;
|
||||
ControlDependency _control_dependency;
|
||||
|
||||
// On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
|
||||
// loads that can be reordered, and such requiring acquire semantics to
|
||||
@ -190,7 +189,7 @@ protected:
|
||||
public:
|
||||
|
||||
LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
|
||||
: MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) {
|
||||
: MemNode(c,mem,adr,at), _type(rt), _mo(mo), _control_dependency(control_dependency) {
|
||||
init_class_id(Class_Load);
|
||||
}
|
||||
inline bool is_unordered() const { return !is_acquire(); }
|
||||
@ -198,6 +197,10 @@ public:
|
||||
assert(_mo == unordered || _mo == acquire, "unexpected");
|
||||
return _mo == acquire;
|
||||
}
|
||||
inline bool is_unsigned() const {
|
||||
int lop = Opcode();
|
||||
return (lop == Op_LoadUB) || (lop == Op_LoadUS);
|
||||
}
|
||||
|
||||
// Polymorphic factory method:
|
||||
static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
|
||||
@ -252,6 +255,9 @@ public:
|
||||
// Check if the load's memory input is a Phi node with the same control.
|
||||
bool is_instance_field_load_with_local_phi(Node* ctrl);
|
||||
|
||||
Node* convert_to_unsigned_load(PhaseGVN& gvn);
|
||||
Node* convert_to_signed_load(PhaseGVN& gvn);
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void dump_spec(outputStream *st) const;
|
||||
#endif
|
||||
@ -274,7 +280,9 @@ protected:
|
||||
// which produce results (new raw memory state) inside of loops preventing all
|
||||
// manner of other optimizations). Basically, it's ugly but so is the alternative.
|
||||
// See comment in macro.cpp, around line 125 expand_allocate_common().
|
||||
virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
|
||||
virtual bool depends_only_on_test() const {
|
||||
return adr_type() != TypeRawPtr::BOTTOM && _control_dependency == DependsOnlyOnTest;
|
||||
}
|
||||
};
|
||||
|
||||
//------------------------------LoadBNode--------------------------------------
|
||||
|
@ -483,11 +483,7 @@ Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (can_reshape &&
|
||||
load->outcnt() == 1 && load->unique_out() == this) {
|
||||
if (lop == Op_LoadS && (mask & 0xFFFF0000) == 0 ) {
|
||||
Node *ldus = new LoadUSNode(load->in(MemNode::Control),
|
||||
load->in(MemNode::Memory),
|
||||
load->in(MemNode::Address),
|
||||
load->adr_type(),
|
||||
TypeInt::CHAR, MemNode::unordered);
|
||||
Node* ldus = load->as_Load()->convert_to_unsigned_load(*phase);
|
||||
ldus = phase->transform(ldus);
|
||||
return new AndINode(ldus, phase->intcon(mask & 0xFFFF));
|
||||
}
|
||||
@ -495,11 +491,7 @@ Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// Masking sign bits off of a Byte? Do an unsigned byte load plus
|
||||
// an and.
|
||||
if (lop == Op_LoadB && (mask & 0xFFFFFF00) == 0) {
|
||||
Node* ldub = new LoadUBNode(load->in(MemNode::Control),
|
||||
load->in(MemNode::Memory),
|
||||
load->in(MemNode::Address),
|
||||
load->adr_type(),
|
||||
TypeInt::UBYTE, MemNode::unordered);
|
||||
Node* ldub = load->as_Load()->convert_to_unsigned_load(*phase);
|
||||
ldub = phase->transform(ldub);
|
||||
return new AndINode(ldub, phase->intcon(mask));
|
||||
}
|
||||
@ -934,11 +926,7 @@ Node *RShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
ld->Opcode() == Op_LoadUS &&
|
||||
ld->outcnt() == 1 && ld->unique_out() == shl)
|
||||
// Replace zero-extension-load with sign-extension-load
|
||||
return new LoadSNode( ld->in(MemNode::Control),
|
||||
ld->in(MemNode::Memory),
|
||||
ld->in(MemNode::Address),
|
||||
ld->adr_type(), TypeInt::SHORT,
|
||||
MemNode::unordered);
|
||||
return ld->as_Load()->convert_to_signed_load(*phase);
|
||||
}
|
||||
|
||||
// Check for "(byte[i] <<24)>>24" which simply sign-extends
|
||||
|
@ -287,7 +287,7 @@ uint PhaseChaitin::split_USE(MachSpillCopyNode::SpillType spill_type, Node *def,
|
||||
Node* clone_node(Node* def, Block *b, Compile* C) {
|
||||
if (def->needs_anti_dependence_check()) {
|
||||
#ifdef ASSERT
|
||||
if (Verbose) {
|
||||
if (PrintOpto && WizardMode) {
|
||||
tty->print_cr("RA attempts to clone node with anti_dependence:");
|
||||
def->dump(-1); tty->cr();
|
||||
tty->print_cr("into block:");
|
||||
|
@ -3074,21 +3074,13 @@ void SuperWord::align_initial_loop_index(MemNode* align_to_ref) {
|
||||
//----------------------------get_pre_loop_end---------------------------
|
||||
// Find pre loop end from main loop. Returns null if none.
|
||||
CountedLoopEndNode* SuperWord::get_pre_loop_end(CountedLoopNode* cl) {
|
||||
Node* ctrl = cl->in(LoopNode::EntryControl);
|
||||
if (!ctrl->is_IfTrue() && !ctrl->is_IfFalse()) return NULL;
|
||||
Node* iffm = ctrl->in(0);
|
||||
if (!iffm->is_If()) return NULL;
|
||||
Node* bolzm = iffm->in(1);
|
||||
if (!bolzm->is_Bool()) return NULL;
|
||||
Node* cmpzm = bolzm->in(1);
|
||||
if (!cmpzm->is_Cmp()) return NULL;
|
||||
Node* opqzm = cmpzm->in(2);
|
||||
// Can not optimize a loop if zero-trip Opaque1 node is optimized
|
||||
// away and then another round of loop opts attempted.
|
||||
if (opqzm->Opcode() != Op_Opaque1) {
|
||||
// The loop cannot be optimized if the graph shape at
|
||||
// the loop entry is inappropriate.
|
||||
if (!PhaseIdealLoop::is_canonical_main_loop_entry(cl)) {
|
||||
return NULL;
|
||||
}
|
||||
Node* p_f = iffm->in(0);
|
||||
|
||||
Node* p_f = cl->in(LoopNode::EntryControl)->in(0)->in(0);
|
||||
if (!p_f->is_IfFalse()) return NULL;
|
||||
if (!p_f->in(0)->is_CountedLoopEnd()) return NULL;
|
||||
CountedLoopEndNode* pre_end = p_f->in(0)->as_CountedLoopEnd();
|
||||
|
@ -318,9 +318,9 @@ oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool
|
||||
|
||||
// JVM 2.9 Special Methods:
|
||||
// A method is signature polymorphic if and only if all of the following conditions hold :
|
||||
// * It is declared in the java.lang.invoke.MethodHandle class.
|
||||
// * It is declared in the java.lang.invoke.MethodHandle/VarHandle classes.
|
||||
// * It has a single formal parameter of type Object[].
|
||||
// * It has a return type of Object.
|
||||
// * It has a return type of Object for a polymorphic return type, otherwise a fixed return type.
|
||||
// * It has the ACC_VARARGS and ACC_NATIVE flags set.
|
||||
bool MethodHandles::is_method_handle_invoke_name(Klass* klass, Symbol* name) {
|
||||
if (klass == NULL)
|
||||
@ -328,14 +328,36 @@ bool MethodHandles::is_method_handle_invoke_name(Klass* klass, Symbol* name) {
|
||||
// The following test will fail spuriously during bootstrap of MethodHandle itself:
|
||||
// if (klass != SystemDictionary::MethodHandle_klass())
|
||||
// Test the name instead:
|
||||
if (klass->name() != vmSymbols::java_lang_invoke_MethodHandle())
|
||||
if (klass->name() != vmSymbols::java_lang_invoke_MethodHandle() &&
|
||||
klass->name() != vmSymbols::java_lang_invoke_VarHandle()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Look up signature polymorphic method with polymorphic return type
|
||||
Symbol* poly_sig = vmSymbols::object_array_object_signature();
|
||||
Method* m = InstanceKlass::cast(klass)->find_method(name, poly_sig);
|
||||
if (m == NULL) return false;
|
||||
int required = JVM_ACC_NATIVE | JVM_ACC_VARARGS;
|
||||
int flags = m->access_flags().as_int();
|
||||
return (flags & required) == required;
|
||||
InstanceKlass* iklass = InstanceKlass::cast(klass);
|
||||
Method* m = iklass->find_method(name, poly_sig);
|
||||
if (m != NULL) {
|
||||
int required = JVM_ACC_NATIVE | JVM_ACC_VARARGS;
|
||||
int flags = m->access_flags().as_int();
|
||||
if ((flags & required) == required) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Look up signature polymorphic method with non-polymorphic (non Object) return type
|
||||
int me;
|
||||
int ms = iklass->find_method_by_name(name, &me);
|
||||
if (ms == -1) return false;
|
||||
for (; ms < me; ms++) {
|
||||
Method* m = iklass->methods()->at(ms);
|
||||
int required = JVM_ACC_NATIVE | JVM_ACC_VARARGS;
|
||||
int flags = m->access_flags().as_int();
|
||||
if ((flags & required) == required && ArgumentCount(m->signature()).size() == 1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -395,8 +417,16 @@ vmIntrinsics::ID MethodHandles::signature_polymorphic_name_id(Symbol* name) {
|
||||
// Cover the case of invokeExact and any future variants of invokeFoo.
|
||||
Klass* mh_klass = SystemDictionary::well_known_klass(
|
||||
SystemDictionary::WK_KLASS_ENUM_NAME(MethodHandle_klass) );
|
||||
if (mh_klass != NULL && is_method_handle_invoke_name(mh_klass, name))
|
||||
if (mh_klass != NULL && is_method_handle_invoke_name(mh_klass, name)) {
|
||||
return vmIntrinsics::_invokeGeneric;
|
||||
}
|
||||
|
||||
// Cover the case of methods on VarHandle.
|
||||
Klass* vh_klass = SystemDictionary::well_known_klass(
|
||||
SystemDictionary::WK_KLASS_ENUM_NAME(VarHandle_klass) );
|
||||
if (vh_klass != NULL && is_method_handle_invoke_name(vh_klass, name)) {
|
||||
return vmIntrinsics::_invokeGeneric;
|
||||
}
|
||||
|
||||
// Note: The pseudo-intrinsic _compiledLambdaForm is never linked against.
|
||||
// Instead it is used to mark lambda forms bound to invokehandle or invokedynamic.
|
||||
@ -405,7 +435,8 @@ vmIntrinsics::ID MethodHandles::signature_polymorphic_name_id(Symbol* name) {
|
||||
|
||||
vmIntrinsics::ID MethodHandles::signature_polymorphic_name_id(Klass* klass, Symbol* name) {
|
||||
if (klass != NULL &&
|
||||
klass->name() == vmSymbols::java_lang_invoke_MethodHandle()) {
|
||||
(klass->name() == vmSymbols::java_lang_invoke_MethodHandle() ||
|
||||
klass->name() == vmSymbols::java_lang_invoke_VarHandle())) {
|
||||
vmIntrinsics::ID iid = signature_polymorphic_name_id(name);
|
||||
if (iid != vmIntrinsics::_none)
|
||||
return iid;
|
||||
@ -1197,10 +1228,10 @@ JVM_ENTRY(jobject, MHN_resolve_Mem(JNIEnv *env, jobject igcls, jobject mname_jh,
|
||||
THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "obsolete MemberName format");
|
||||
}
|
||||
if ((flags & ALL_KINDS) == IS_FIELD) {
|
||||
THROW_MSG_NULL(vmSymbols::java_lang_NoSuchMethodError(), "field resolution failed");
|
||||
THROW_MSG_NULL(vmSymbols::java_lang_NoSuchFieldError(), "field resolution failed");
|
||||
} else if ((flags & ALL_KINDS) == IS_METHOD ||
|
||||
(flags & ALL_KINDS) == IS_CONSTRUCTOR) {
|
||||
THROW_MSG_NULL(vmSymbols::java_lang_NoSuchFieldError(), "method resolution failed");
|
||||
THROW_MSG_NULL(vmSymbols::java_lang_NoSuchMethodError(), "method resolution failed");
|
||||
} else {
|
||||
THROW_MSG_NULL(vmSymbols::java_lang_LinkageError(), "resolution failed");
|
||||
}
|
||||
|
@ -120,7 +120,8 @@ class MethodHandles: AllStatic {
|
||||
iid <= vmIntrinsics::_linkToInterface);
|
||||
}
|
||||
static bool has_member_arg(Symbol* klass, Symbol* name) {
|
||||
if ((klass == vmSymbols::java_lang_invoke_MethodHandle()) &&
|
||||
if ((klass == vmSymbols::java_lang_invoke_MethodHandle() ||
|
||||
klass == vmSymbols::java_lang_invoke_VarHandle()) &&
|
||||
is_signature_polymorphic_name(name)) {
|
||||
vmIntrinsics::ID iid = signature_polymorphic_name_id(name);
|
||||
return has_member_arg(iid);
|
||||
|
@ -378,44 +378,6 @@ DEFINE_GETSETOOP_VOLATILE(jlong, Long);
|
||||
|
||||
#undef DEFINE_GETSETOOP_VOLATILE
|
||||
|
||||
// The non-intrinsified versions of setOrdered just use setVolatile
|
||||
|
||||
UNSAFE_ENTRY(void, Unsafe_SetOrderedInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint x)) {
|
||||
SET_FIELD_VOLATILE(obj, offset, jint, x);
|
||||
} UNSAFE_END
|
||||
|
||||
UNSAFE_ENTRY(void, Unsafe_SetOrderedObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) {
|
||||
oop x = JNIHandles::resolve(x_h);
|
||||
oop p = JNIHandles::resolve(obj);
|
||||
void* addr = index_oop_from_field_offset_long(p, offset);
|
||||
OrderAccess::release();
|
||||
|
||||
if (UseCompressedOops) {
|
||||
oop_store((narrowOop*)addr, x);
|
||||
} else {
|
||||
oop_store((oop*)addr, x);
|
||||
}
|
||||
|
||||
OrderAccess::fence();
|
||||
} UNSAFE_END
|
||||
|
||||
UNSAFE_ENTRY(void, Unsafe_SetOrderedLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong x)) {
|
||||
#ifdef SUPPORTS_NATIVE_CX8
|
||||
SET_FIELD_VOLATILE(obj, offset, jlong, x);
|
||||
#else
|
||||
|
||||
// Keep old code for platforms which may not have atomic long (8 bytes) instructions
|
||||
if (VM_Version::supports_cx8()) {
|
||||
SET_FIELD_VOLATILE(obj, offset, jlong, x);
|
||||
} else {
|
||||
Handle p(THREAD, JNIHandles::resolve(obj));
|
||||
jlong* addr = (jlong*)(index_oop_from_field_offset_long(p(), offset));
|
||||
MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
|
||||
Atomic::store(x, addr);
|
||||
}
|
||||
#endif
|
||||
} UNSAFE_END
|
||||
|
||||
UNSAFE_LEAF(void, Unsafe_LoadFence(JNIEnv *env, jobject unsafe)) {
|
||||
OrderAccess::acquire();
|
||||
} UNSAFE_END
|
||||
@ -1230,9 +1192,6 @@ static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = {
|
||||
{CC "compareAndExchangeIntVolatile", CC "(" OBJ "J""I""I"")I", FN_PTR(Unsafe_CompareAndExchangeInt)},
|
||||
{CC "compareAndExchangeLongVolatile", CC "(" OBJ "J""J""J"")J", FN_PTR(Unsafe_CompareAndExchangeLong)},
|
||||
|
||||
{CC "putOrderedObject", CC "(" OBJ "J" OBJ ")V", FN_PTR(Unsafe_SetOrderedObject)},
|
||||
{CC "putOrderedInt", CC "(" OBJ "JI)V", FN_PTR(Unsafe_SetOrderedInt)},
|
||||
{CC "putOrderedLong", CC "(" OBJ "JJ)V", FN_PTR(Unsafe_SetOrderedLong)},
|
||||
{CC "park", CC "(ZJ)V", FN_PTR(Unsafe_Park)},
|
||||
{CC "unpark", CC "(" OBJ ")V", FN_PTR(Unsafe_Unpark)},
|
||||
|
||||
|
@ -2550,6 +2550,13 @@ bool Arguments::check_vm_args_consistency() {
|
||||
warning("Reserved Stack Area not supported on this platform");
|
||||
}
|
||||
#endif
|
||||
|
||||
if (BackgroundCompilation && (CompileTheWorld || ReplayCompiles)) {
|
||||
if (!FLAG_IS_DEFAULT(BackgroundCompilation)) {
|
||||
warning("BackgroundCompilation disabled due to CompileTheWorld or ReplayCompiles options.");
|
||||
}
|
||||
FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -727,7 +727,7 @@ public:
|
||||
"Control whether SHA instructions can be used " \
|
||||
"on SPARC, on ARM and on x86") \
|
||||
\
|
||||
product(bool, UseGHASHIntrinsics, false, \
|
||||
diagnostic(bool, UseGHASHIntrinsics, false, \
|
||||
"Use intrinsics for GHASH versions of crypto") \
|
||||
\
|
||||
product(size_t, LargePageSizeInBytes, 0, \
|
||||
@ -797,27 +797,27 @@ public:
|
||||
product(bool, UseInlineCaches, true, \
|
||||
"Use Inline Caches for virtual calls ") \
|
||||
\
|
||||
develop(bool, InlineArrayCopy, true, \
|
||||
diagnostic(bool, InlineArrayCopy, true, \
|
||||
"Inline arraycopy native that is known to be part of " \
|
||||
"base library DLL") \
|
||||
\
|
||||
develop(bool, InlineObjectHash, true, \
|
||||
diagnostic(bool, InlineObjectHash, true, \
|
||||
"Inline Object::hashCode() native that is known to be part " \
|
||||
"of base library DLL") \
|
||||
\
|
||||
develop(bool, InlineNatives, true, \
|
||||
diagnostic(bool, InlineNatives, true, \
|
||||
"Inline natives that are known to be part of base library DLL") \
|
||||
\
|
||||
develop(bool, InlineMathNatives, true, \
|
||||
diagnostic(bool, InlineMathNatives, true, \
|
||||
"Inline SinD, CosD, etc.") \
|
||||
\
|
||||
develop(bool, InlineClassNatives, true, \
|
||||
diagnostic(bool, InlineClassNatives, true, \
|
||||
"Inline Class.isInstance, etc") \
|
||||
\
|
||||
develop(bool, InlineThreadNatives, true, \
|
||||
diagnostic(bool, InlineThreadNatives, true, \
|
||||
"Inline Thread.currentThread, etc") \
|
||||
\
|
||||
develop(bool, InlineUnsafeOps, true, \
|
||||
diagnostic(bool, InlineUnsafeOps, true, \
|
||||
"Inline memory ops (native methods) from Unsafe") \
|
||||
\
|
||||
product(bool, CriticalJNINatives, true, \
|
||||
@ -826,34 +826,34 @@ public:
|
||||
notproduct(bool, StressCriticalJNINatives, false, \
|
||||
"Exercise register saving code in critical natives") \
|
||||
\
|
||||
product(bool, UseAESIntrinsics, false, \
|
||||
diagnostic(bool, UseAESIntrinsics, false, \
|
||||
"Use intrinsics for AES versions of crypto") \
|
||||
\
|
||||
product(bool, UseAESCTRIntrinsics, false, \
|
||||
diagnostic(bool, UseAESCTRIntrinsics, false, \
|
||||
"Use intrinsics for the paralleled version of AES/CTR crypto") \
|
||||
\
|
||||
product(bool, UseSHA1Intrinsics, false, \
|
||||
diagnostic(bool, UseSHA1Intrinsics, false, \
|
||||
"Use intrinsics for SHA-1 crypto hash function. " \
|
||||
"Requires that UseSHA is enabled.") \
|
||||
\
|
||||
product(bool, UseSHA256Intrinsics, false, \
|
||||
diagnostic(bool, UseSHA256Intrinsics, false, \
|
||||
"Use intrinsics for SHA-224 and SHA-256 crypto hash functions. " \
|
||||
"Requires that UseSHA is enabled.") \
|
||||
\
|
||||
product(bool, UseSHA512Intrinsics, false, \
|
||||
diagnostic(bool, UseSHA512Intrinsics, false, \
|
||||
"Use intrinsics for SHA-384 and SHA-512 crypto hash functions. " \
|
||||
"Requires that UseSHA is enabled.") \
|
||||
\
|
||||
product(bool, UseCRC32Intrinsics, false, \
|
||||
diagnostic(bool, UseCRC32Intrinsics, false, \
|
||||
"use intrinsics for java.util.zip.CRC32") \
|
||||
\
|
||||
product(bool, UseCRC32CIntrinsics, false, \
|
||||
diagnostic(bool, UseCRC32CIntrinsics, false, \
|
||||
"use intrinsics for java.util.zip.CRC32C") \
|
||||
\
|
||||
product(bool, UseAdler32Intrinsics, false, \
|
||||
diagnostic(bool, UseAdler32Intrinsics, false, \
|
||||
"use intrinsics for java.util.zip.Adler32") \
|
||||
\
|
||||
product(bool, UseVectorizedMismatchIntrinsic, false, \
|
||||
diagnostic(bool, UseVectorizedMismatchIntrinsic, false, \
|
||||
"Enables intrinsification of ArraysSupport.vectorizedMismatch()") \
|
||||
\
|
||||
diagnostic(ccstrlist, DisableIntrinsic, "", \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -146,7 +146,6 @@ volatile bool NMethodSweeper::_force_sweep = false;// Indicates if w
|
||||
volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
|
||||
// 1) alive -> not_entrant
|
||||
// 2) not_entrant -> zombie
|
||||
// 3) zombie -> marked_for_reclamation
|
||||
int NMethodSweeper::_hotness_counter_reset_val = 0;
|
||||
|
||||
long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
|
||||
@ -355,8 +354,8 @@ void NMethodSweeper::possibly_sweep() {
|
||||
bool forced = _force_sweep;
|
||||
|
||||
// Force stack scanning if there is only 10% free space in the code cache.
|
||||
// We force stack scanning only non-profiled code heap gets full, since critical
|
||||
// allocation go to the non-profiled heap and we must be make sure that there is
|
||||
// We force stack scanning only if the non-profiled code heap gets full, since critical
|
||||
// allocations go to the non-profiled heap and we must be make sure that there is
|
||||
// enough space.
|
||||
double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100;
|
||||
if (free_percent <= StartAggressiveSweepingAt) {
|
||||
@ -397,7 +396,6 @@ void NMethodSweeper::sweep_code_cache() {
|
||||
|
||||
int flushed_count = 0;
|
||||
int zombified_count = 0;
|
||||
int marked_for_reclamation_count = 0;
|
||||
int flushed_c2_count = 0;
|
||||
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
@ -423,22 +421,27 @@ void NMethodSweeper::sweep_code_cache() {
|
||||
// Now ready to process nmethod and give up CodeCache_lock
|
||||
{
|
||||
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Save information before potentially flushing the nmethod
|
||||
int size = nm->total_size();
|
||||
bool is_c2_method = nm->is_compiled_by_c2();
|
||||
bool is_osr = nm->is_osr_method();
|
||||
int compile_id = nm->compile_id();
|
||||
intptr_t address = p2i(nm);
|
||||
const char* state_before = nm->state();
|
||||
const char* state_after = "";
|
||||
|
||||
MethodStateChange type = process_nmethod(nm);
|
||||
switch (type) {
|
||||
case Flushed:
|
||||
state_after = "flushed";
|
||||
freed_memory += size;
|
||||
++flushed_count;
|
||||
if (is_c2_method) {
|
||||
++flushed_c2_count;
|
||||
}
|
||||
break;
|
||||
case MarkedForReclamation:
|
||||
++marked_for_reclamation_count;
|
||||
break;
|
||||
case MadeZombie:
|
||||
state_after = "made zombie";
|
||||
++zombified_count;
|
||||
break;
|
||||
case None:
|
||||
@ -446,7 +449,11 @@ void NMethodSweeper::sweep_code_cache() {
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
if (PrintMethodFlushing && Verbose && type != None) {
|
||||
tty->print_cr("### %s nmethod %3d/" PTR_FORMAT " (%s) %s", is_osr ? "osr" : "", compile_id, address, state_before, state_after);
|
||||
}
|
||||
}
|
||||
|
||||
_seen++;
|
||||
handle_safepoint_request();
|
||||
}
|
||||
@ -473,7 +480,6 @@ void NMethodSweeper::sweep_code_cache() {
|
||||
event.set_sweepIndex(_traversals);
|
||||
event.set_sweptCount(swept_count);
|
||||
event.set_flushedCount(flushed_count);
|
||||
event.set_markedCount(marked_for_reclamation_count);
|
||||
event.set_zombifiedCount(zombified_count);
|
||||
event.commit();
|
||||
}
|
||||
@ -533,7 +539,7 @@ class NMethodMarker: public StackObj {
|
||||
NMethodMarker(nmethod* nm) {
|
||||
JavaThread* current = JavaThread::current();
|
||||
assert (current->is_Code_cache_sweeper_thread(), "Must be");
|
||||
_thread = (CodeCacheSweeperThread*)JavaThread::current();
|
||||
_thread = (CodeCacheSweeperThread*)current;
|
||||
if (!nm->is_zombie() && !nm->is_unloaded()) {
|
||||
// Only expose live nmethods for scanning
|
||||
_thread->set_scanned_nmethod(nm);
|
||||
@ -545,6 +551,10 @@ class NMethodMarker: public StackObj {
|
||||
};
|
||||
|
||||
void NMethodSweeper::release_nmethod(nmethod* nm) {
|
||||
// Make sure the released nmethod is no longer referenced by the sweeper thread
|
||||
CodeCacheSweeperThread* thread = (CodeCacheSweeperThread*)JavaThread::current();
|
||||
thread->set_scanned_nmethod(NULL);
|
||||
|
||||
// Clean up any CompiledICHolders
|
||||
{
|
||||
ResourceMark rm;
|
||||
@ -575,7 +585,7 @@ NMethodSweeper::MethodStateChange NMethodSweeper::process_nmethod(nmethod* nm) {
|
||||
if (nm->is_locked_by_vm()) {
|
||||
// But still remember to clean-up inline caches for alive nmethods
|
||||
if (nm->is_alive()) {
|
||||
// Clean inline caches that point to zombie/non-entrant methods
|
||||
// Clean inline caches that point to zombie/non-entrant/unloaded nmethods
|
||||
MutexLocker cl(CompiledIC_lock);
|
||||
nm->cleanup_inline_caches();
|
||||
SWEEP(nm);
|
||||
@ -584,47 +594,41 @@ NMethodSweeper::MethodStateChange NMethodSweeper::process_nmethod(nmethod* nm) {
|
||||
}
|
||||
|
||||
if (nm->is_zombie()) {
|
||||
// If it is the first time we see nmethod then we mark it. Otherwise,
|
||||
// we reclaim it. When we have seen a zombie method twice, we know that
|
||||
// there are no inline caches that refer to it.
|
||||
if (nm->is_marked_for_reclamation()) {
|
||||
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), p2i(nm));
|
||||
}
|
||||
release_nmethod(nm);
|
||||
assert(result == None, "sanity");
|
||||
result = Flushed;
|
||||
} else {
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), p2i(nm));
|
||||
}
|
||||
nm->mark_for_reclamation();
|
||||
// Keep track of code cache state change
|
||||
_bytes_changed += nm->total_size();
|
||||
SWEEP(nm);
|
||||
assert(result == None, "sanity");
|
||||
result = MarkedForReclamation;
|
||||
}
|
||||
// All inline caches that referred to this nmethod were cleaned in the
|
||||
// previous sweeper cycle. Now flush the nmethod from the code cache.
|
||||
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
|
||||
release_nmethod(nm);
|
||||
assert(result == None, "sanity");
|
||||
result = Flushed;
|
||||
} else if (nm->is_not_entrant()) {
|
||||
// If there are no current activations of this method on the
|
||||
// stack we can safely convert it to a zombie method
|
||||
if (nm->can_convert_to_zombie()) {
|
||||
// Clear ICStubs to prevent back patching stubs of zombie or unloaded
|
||||
// Clear ICStubs to prevent back patching stubs of zombie or flushed
|
||||
// nmethods during the next safepoint (see ICStub::finalize).
|
||||
{
|
||||
MutexLocker cl(CompiledIC_lock);
|
||||
nm->clear_ic_stubs();
|
||||
}
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), p2i(nm));
|
||||
}
|
||||
// Code cache state change is tracked in make_zombie()
|
||||
nm->make_zombie();
|
||||
SWEEP(nm);
|
||||
assert(result == None, "sanity");
|
||||
result = MadeZombie;
|
||||
assert(nm->is_zombie(), "nmethod must be zombie");
|
||||
// The nmethod may have been locked by JVMTI after being made zombie (see
|
||||
// JvmtiDeferredEvent::compiled_method_unload_event()). If so, we cannot
|
||||
// flush the osr nmethod directly but have to wait for a later sweeper cycle.
|
||||
if (nm->is_osr_method() && !nm->is_locked_by_vm()) {
|
||||
// No inline caches will ever point to osr methods, so we can just remove it.
|
||||
// Make sure that we unregistered the nmethod with the heap and flushed all
|
||||
// dependencies before removing the nmethod (done in make_zombie()).
|
||||
assert(nm->is_zombie(), "nmethod must be unregistered");
|
||||
release_nmethod(nm);
|
||||
assert(result == None, "sanity");
|
||||
result = Flushed;
|
||||
} else {
|
||||
assert(result == None, "sanity");
|
||||
result = MadeZombie;
|
||||
assert(nm->is_zombie(), "nmethod must be zombie");
|
||||
}
|
||||
} else {
|
||||
// Still alive, clean up its inline caches
|
||||
MutexLocker cl(CompiledIC_lock);
|
||||
@ -632,9 +636,13 @@ NMethodSweeper::MethodStateChange NMethodSweeper::process_nmethod(nmethod* nm) {
|
||||
SWEEP(nm);
|
||||
}
|
||||
} else if (nm->is_unloaded()) {
|
||||
// Unloaded code, just make it a zombie
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), p2i(nm));
|
||||
// Code is unloaded, so there are no activations on the stack.
|
||||
// Convert the nmethod to zombie or flush it directly in the OSR case.
|
||||
{
|
||||
// Clean ICs of unloaded nmethods as well because they may reference other
|
||||
// unloaded nmethods that may be flushed earlier in the sweeper cycle.
|
||||
MutexLocker cl(CompiledIC_lock);
|
||||
nm->cleanup_inline_caches();
|
||||
}
|
||||
if (nm->is_osr_method()) {
|
||||
SWEEP(nm);
|
||||
@ -643,12 +651,6 @@ NMethodSweeper::MethodStateChange NMethodSweeper::process_nmethod(nmethod* nm) {
|
||||
assert(result == None, "sanity");
|
||||
result = Flushed;
|
||||
} else {
|
||||
{
|
||||
// Clean ICs of unloaded nmethods as well because they may reference other
|
||||
// unloaded nmethods that may be flushed earlier in the sweeper cycle.
|
||||
MutexLocker cl(CompiledIC_lock);
|
||||
nm->cleanup_inline_caches();
|
||||
}
|
||||
// Code cache state change is tracked in make_zombie()
|
||||
nm->make_zombie();
|
||||
SWEEP(nm);
|
||||
@ -657,7 +659,7 @@ NMethodSweeper::MethodStateChange NMethodSweeper::process_nmethod(nmethod* nm) {
|
||||
}
|
||||
} else {
|
||||
possibly_flush(nm);
|
||||
// Clean-up all inline caches that point to zombie/non-reentrant methods
|
||||
// Clean inline caches that point to zombie/non-entrant/unloaded nmethods
|
||||
MutexLocker cl(CompiledIC_lock);
|
||||
nm->cleanup_inline_caches();
|
||||
SWEEP(nm);
|
||||
@ -668,10 +670,10 @@ NMethodSweeper::MethodStateChange NMethodSweeper::process_nmethod(nmethod* nm) {
|
||||
|
||||
void NMethodSweeper::possibly_flush(nmethod* nm) {
|
||||
if (UseCodeCacheFlushing) {
|
||||
if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
|
||||
if (!nm->is_locked_by_vm() && !nm->is_native_method()) {
|
||||
bool make_not_entrant = false;
|
||||
|
||||
// Do not make native methods and OSR-methods not-entrant
|
||||
// Do not make native methods not-entrant
|
||||
nm->dec_hotness_counter();
|
||||
// Get the initial value of the hotness counter. This value depends on the
|
||||
// ReservedCodeCacheSize
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -45,12 +45,12 @@ class WhiteBox;
|
||||
// and sweep_code_cache() cannot execute at the same time.
|
||||
// To reclaim memory, nmethods are first marked as 'not-entrant'. Methods can
|
||||
// be made not-entrant by (i) the sweeper, (ii) deoptimization, (iii) dependency
|
||||
// invalidation, and (iv) being replaced be a different method version (tiered
|
||||
// compilation). Not-entrant nmethod cannot be called by Java threads, but they
|
||||
// can still be active on the stack. To ensure that active nmethod are not reclaimed,
|
||||
// invalidation, and (iv) being replaced by a different method version (tiered
|
||||
// compilation). Not-entrant nmethods cannot be called by Java threads, but they
|
||||
// can still be active on the stack. To ensure that active nmethods are not reclaimed,
|
||||
// we have to wait until the next marking phase has completed. If a not-entrant
|
||||
// nmethod was NOT marked as active, it can be converted to 'zombie' state. To safely
|
||||
// remove the nmethod, all inline caches (IC) that point to the the nmethod must be
|
||||
// remove the nmethod, all inline caches (IC) that point to the nmethod must be
|
||||
// cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
|
||||
// state change happens during separate sweeps. It may take at least 3 sweeps before an
|
||||
// nmethod's space is freed.
|
||||
@ -60,7 +60,6 @@ class NMethodSweeper : public AllStatic {
|
||||
enum MethodStateChange {
|
||||
None,
|
||||
MadeZombie,
|
||||
MarkedForReclamation,
|
||||
Flushed
|
||||
};
|
||||
static long _traversals; // Stack scan count, also sweep ID.
|
||||
@ -76,7 +75,6 @@ class NMethodSweeper : public AllStatic {
|
||||
static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
|
||||
// 1) alive -> not_entrant
|
||||
// 2) not_entrant -> zombie
|
||||
// 3) zombie -> marked_for_reclamation
|
||||
// Stat counters
|
||||
static long _total_nof_methods_reclaimed; // Accumulated nof methods flushed
|
||||
static long _total_nof_c2_methods_reclaimed; // Accumulated nof C2-compiled methods flushed
|
||||
|
@ -1051,7 +1051,7 @@ static const char* get_java_runtime_version(TRAPS) {
|
||||
// General purpose hook into Java code, run once when the VM is initialized.
|
||||
// The Java library method itself may be changed independently from the VM.
|
||||
static void call_postVMInitHook(TRAPS) {
|
||||
Klass* k = SystemDictionary::resolve_or_null(vmSymbols::sun_misc_PostVMInitHook(), THREAD);
|
||||
Klass* k = SystemDictionary::resolve_or_null(vmSymbols::jdk_internal_vm_PostVMInitHook(), THREAD);
|
||||
instanceKlassHandle klass (THREAD, k);
|
||||
if (klass.not_null()) {
|
||||
JavaValue result(T_VOID);
|
||||
|
@ -43,7 +43,7 @@ volatile bool AttachListener::_initialized;
|
||||
|
||||
// Implementation of "properties" command.
|
||||
//
|
||||
// Invokes sun.misc.VMSupport.serializePropertiesToByteArray to serialize
|
||||
// Invokes VMSupport.serializePropertiesToByteArray to serialize
|
||||
// the system properties into a byte array.
|
||||
|
||||
static Klass* load_and_initialize_klass(Symbol* sh, TRAPS) {
|
||||
@ -59,8 +59,8 @@ static jint get_properties(AttachOperation* op, outputStream* out, Symbol* seria
|
||||
Thread* THREAD = Thread::current();
|
||||
HandleMark hm;
|
||||
|
||||
// load sun.misc.VMSupport
|
||||
Symbol* klass = vmSymbols::sun_misc_VMSupport();
|
||||
// load VMSupport
|
||||
Symbol* klass = vmSymbols::jdk_internal_vm_VMSupport();
|
||||
Klass* k = load_and_initialize_klass(klass, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
java_lang_Throwable::print(PENDING_EXCEPTION, out);
|
||||
|
@ -316,8 +316,8 @@ int JVMTIAgentLoadDCmd::num_arguments() {
|
||||
}
|
||||
|
||||
void PrintSystemPropertiesDCmd::execute(DCmdSource source, TRAPS) {
|
||||
// load sun.misc.VMSupport
|
||||
Symbol* klass = vmSymbols::sun_misc_VMSupport();
|
||||
// load VMSupport
|
||||
Symbol* klass = vmSymbols::jdk_internal_vm_VMSupport();
|
||||
Klass* k = SystemDictionary::resolve_or_fail(klass, true, CHECK);
|
||||
instanceKlassHandle ik (THREAD, k);
|
||||
if (ik->should_be_initialized()) {
|
||||
|
@ -550,7 +550,6 @@ Declares a structure type that can be used in other events.
|
||||
<value type="INTEGER" field="sweepIndex" label="Sweep Index" relation="SWEEP_ID"/>
|
||||
<value type="UINT" field="sweptCount" label="Methods Swept"/>
|
||||
<value type="UINT" field="flushedCount" label="Methods Flushed"/>
|
||||
<value type="UINT" field="markedCount" label="Methods Reclaimed"/>
|
||||
<value type="UINT" field="zombifiedCount" label="Methods Zombified"/>
|
||||
</event>
|
||||
|
||||
|
@ -1056,6 +1056,7 @@ const int badHandleValue = 0xBC; // value used to zap
|
||||
const int badResourceValue = 0xAB; // value used to zap resource area
|
||||
const int freeBlockPad = 0xBA; // value used to pad freed blocks.
|
||||
const int uninitBlockPad = 0xF1; // value used to zap newly malloc'd blocks.
|
||||
const juint uninitMetaWordVal= 0xf7f7f7f7; // value used to zap newly allocated metachunk
|
||||
const intptr_t badJNIHandleVal = (intptr_t) UCONST64(0xFEFEFEFEFEFEFEFE); // value used to zap jni handle area
|
||||
const juint badHeapWordVal = 0xBAADBABE; // value used to zap heap after GC
|
||||
const juint badMetaWordVal = 0xBAADFADE; // value used to zap metadata heap after GC
|
||||
|
@ -98,7 +98,7 @@ needs_jdk = \
|
||||
serviceability/attach/AttachWithStalePidFile.java \
|
||||
serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java \
|
||||
serviceability/dcmd/vm/DynLibsTest.java \
|
||||
serviceability/tmtools
|
||||
serviceability/tmtools
|
||||
|
||||
|
||||
# JRE adds further tests to compact3
|
||||
@ -248,7 +248,7 @@ needs_g1gc = \
|
||||
gc/metaspace/G1AddMetaspaceDependency.java \
|
||||
gc/metaspace/TestMetaspacePerfCounters.java \
|
||||
gc/startup_warnings/TestG1.java \
|
||||
gc/whitebox/TestConcMarkCycleWB.java
|
||||
gc/whitebox/TestConcMarkCycleWB.java
|
||||
|
||||
hotspot_native_sanity = \
|
||||
native_sanity
|
||||
@ -267,7 +267,7 @@ hotspot_compiler_1 = \
|
||||
-compiler/c2/6792161 \
|
||||
-compiler/c2/7070134 \
|
||||
-compiler/c2/8004867
|
||||
|
||||
|
||||
hotspot_compiler_2 = \
|
||||
compiler/classUnloading/ \
|
||||
compiler/codecache/ \
|
||||
@ -284,8 +284,9 @@ hotspot_compiler_2 = \
|
||||
compiler/interpreter/ \
|
||||
compiler/jvmci/ \
|
||||
-compiler/codegen/7184394 \
|
||||
-compiler/codecache/stress
|
||||
|
||||
-compiler/codecache/stress \
|
||||
-compiler/gcbarriers/PreserveFPRegistersTest.java
|
||||
|
||||
hotspot_compiler_3 = \
|
||||
compiler/intrinsics/ \
|
||||
compiler/jsr292/ \
|
||||
@ -370,4 +371,4 @@ not_needs_nashorn = \
|
||||
-:needs_nashorn
|
||||
|
||||
hotspot_tmtools = \
|
||||
serviceability/tmtools
|
||||
serviceability/tmtools
|
||||
|
@ -29,7 +29,7 @@ import jdk.test.lib.*;
|
||||
* @bug 8132525
|
||||
* @summary Check that correct range of values for CICompilerCount are allowed depending on whether tiered is enabled or not
|
||||
* @library /testlibrary
|
||||
* @modules java.base/sun.misc
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @run main CheckCICompilerCount
|
||||
*/
|
||||
|
@ -28,7 +28,7 @@ import jdk.test.lib.*;
|
||||
* @bug 8059604
|
||||
* @summary "Add CompileThresholdScaling flag to control when methods are first compiled (with +/-TieredCompilation)"
|
||||
* @library /testlibrary
|
||||
* @modules java.base/sun.misc
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @run main CheckCompileThresholdScaling
|
||||
*/
|
||||
|
@ -27,7 +27,7 @@
|
||||
* @summary Verify processing of UseBMI1Instructions option on CPU with
|
||||
* BMI1 feature support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @modules java.base/sun.misc
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseBMI1InstructionsOnSupportedCPU
|
||||
* BMISupportedCPUTest
|
||||
|
@ -27,7 +27,7 @@
|
||||
* @summary Verify processing of UseBMI1Instructions option on CPU without
|
||||
* BMI1 feature support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @modules java.base/sun.misc
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseBMI1InstructionsOnUnsupportedCPU
|
||||
* BMIUnsupportedCPUTest
|
||||
|
@ -27,7 +27,7 @@
|
||||
* @summary Verify processing of UseCountLeadingZerosInstruction option
|
||||
* on CPU with LZCNT support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @modules java.base/sun.misc
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseCountLeadingZerosInstructionOnSupportedCPU
|
||||
* BMISupportedCPUTest
|
||||
|
@ -27,7 +27,7 @@
|
||||
* @summary Verify processing of UseCountLeadingZerosInstruction option
|
||||
* on CPU without LZCNT support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @modules java.base/sun.misc
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseCountLeadingZerosInstructionOnUnsupportedCPU
|
||||
* BMIUnsupportedCPUTest
|
||||
|
@ -27,7 +27,7 @@
|
||||
* @summary Verify processing of UseCountTrailingZerosInstruction option
|
||||
* on CPU with TZCNT (BMI1 feature) support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @modules java.base/sun.misc
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseCountTrailingZerosInstructionOnSupportedCPU
|
||||
* BMISupportedCPUTest
|
||||
|
@ -27,7 +27,7 @@
|
||||
* @summary Verify processing of UseCountTrailingZerosInstruction option
|
||||
* on CPU without TZCNT instruction (BMI1 feature) support.
|
||||
* @library /testlibrary /test/lib
|
||||
* @modules java.base/sun.misc
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestUseCountTrailingZerosInstructionOnUnsupportedCPU
|
||||
* BMIUnsupportedCPUTest
|
||||
|
@ -26,7 +26,7 @@
|
||||
* @bug 8072016
|
||||
* @summary Infinite deoptimization/recompilation cycles in case of arraycopy with tightly coupled allocation
|
||||
* @library /testlibrary /test/lib /compiler/whitebox /
|
||||
* @modules java.base/sun.misc
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build TestArrayCopyNoInitDeopt
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user