This commit is contained in:
Lana Steuck 2014-09-04 14:45:42 -07:00
commit c14ade213d
1602 changed files with 47728 additions and 46703 deletions

View File

@ -269,3 +269,5 @@ c5495e25c7258ab5f96a1ae14610887d76d2be63 jdk9-b18
d9ce05f36ffec3e5e8af62a92455c1c66a63c320 jdk9-b24
13a5c76976fe48e55c9727c25fae2d2ce7c05da0 jdk9-b25
cd6f4557e7fea5799ff3762ed7a80a743e75d5fd jdk9-b26
d06a6d3c66c08293b2a9650f3cc01fd55c620e65 jdk9-b27
f4269e8f454eb77763ecee228a88ae102a9aef6e jdk9-b28

View File

@ -269,3 +269,5 @@ ee4fd72b2ec3d92497f37163352f294aa695c6fb jdk9-b20
1d4a293fbec19dc2d5790bbb2c7dd0ed8f265484 jdk9-b24
aefd8899a8d6615fb34ba99b2e38996a7145baa8 jdk9-b25
d3ec8d048e6c3c46b6e0ee011cc551ad386dfba5 jdk9-b26
ba5645f2735b41ed085d07ba20fa7b322afff318 jdk9-b27
ea2f7981236f3812436958748ab3d26e80a35130 jdk9-b28

View File

@ -136,10 +136,12 @@ help:
$(info . make docs # Create all docs)
$(info . make docs-javadoc # Create just javadocs, depends on less than full docs)
$(info . make profiles # Create complete j2re compact profile images)
$(info . make bootcycle-images # Build images twice, second time with newly build JDK)
$(info . make bootcycle-images # Build images twice, second time with newly built JDK)
$(info . make install # Install the generated images locally)
$(info . make clean # Remove all files generated by make, but not those)
$(info . # generated by configure)
$(info . # generated by configure. Do not run clean and other)
$(info . # targets together as that might behave in an)
$(info . # unexpected way.)
$(info . make dist-clean # Remove all files, including configuration)
$(info . make help # Give some help on using make)
$(info . make test # Run tests, default is all tests (see TEST below))

View File

@ -377,7 +377,6 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
BASIC_REQUIRE_PROGS(CMP, cmp)
BASIC_REQUIRE_PROGS(COMM, comm)
BASIC_REQUIRE_PROGS(CP, cp)
BASIC_REQUIRE_PROGS(CPIO, cpio)
BASIC_REQUIRE_PROGS(CUT, cut)
BASIC_REQUIRE_PROGS(DATE, date)
BASIC_REQUIRE_PROGS(DIFF, [gdiff diff])
@ -427,6 +426,7 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
BASIC_PATH_PROGS(READLINK, [greadlink readlink])
BASIC_PATH_PROGS(DF, df)
BASIC_PATH_PROGS(SETFILE, SetFile)
BASIC_PATH_PROGS(CPIO, [cpio bsdcpio])
])
# Setup basic configuration paths, and platform-specific stuff related to PATHs.
@ -849,7 +849,12 @@ AC_DEFUN([BASIC_CHECK_FIND_DELETE],
if test -f $DELETEDIR/TestIfFindSupportsDelete; then
# No, it does not.
rm $DELETEDIR/TestIfFindSupportsDelete
FIND_DELETE="-exec rm \{\} \+"
if test "x$OPENJDK_TARGET_OS" = "xaix"; then
# AIX 'find' is buggy if called with '-exec {} \+' and an empty file list
FIND_DELETE="-print | xargs rm"
else
FIND_DELETE="-exec rm \{\} \+"
fi
AC_MSG_RESULT([no])
else
AC_MSG_RESULT([yes])
@ -954,7 +959,7 @@ AC_DEFUN([BASIC_CHECK_DIR_ON_LOCAL_DISK],
# not be the case in cygwin in certain conditions.
AC_DEFUN_ONCE([BASIC_CHECK_SRC_PERMS],
[
if test x"$OPENJDK_BUILD_OS" = xwindows; then
if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.cygwin"; then
file_to_test="$SRC_ROOT/LICENSE"
if test `$STAT -c '%a' "$file_to_test"` -lt 400; then
AC_MSG_ERROR([Bad file permissions on src files. This is usually caused by cloning the repositories with a non cygwin hg in a directory not created in cygwin.])

View File

@ -266,6 +266,14 @@ AC_DEFUN([BASIC_FIXUP_EXECUTABLE_MSYS],
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(new_path)
new_path=`$WHICH "$new_path" 2> /dev/null`
# bat and cmd files are not always considered executable in MSYS causing which
# to not find them
if test "x$new_path" = x \
&& test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
&& test "x`$LS \"$path\" 2>/dev/null`" != x; then
new_path="$path"
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(new_path)
fi
if test "x$new_path" = x; then
# It's still not found. Now this is an unrecoverable error.

View File

@ -370,18 +370,27 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
# Maximum amount of heap memory.
# Maximum stack size.
JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2`
if test "x$BUILD_NUM_BITS" = x32; then
JVM_MAX_HEAP=1100M
if test "$JVM_MAX_HEAP" -gt "1100"; then
JVM_MAX_HEAP=1100
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=768
else
# Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit
# pointers are used. Apparently, we need to increase the heap and stack
# space for the jvm. More specifically, when running javac to build huge
# jdk batch
JVM_MAX_HEAP=1600M
if test "$JVM_MAX_HEAP" -gt "1600"; then
JVM_MAX_HEAP=1600
elif test "$JVM_MAX_HEAP" -lt "512"; then
JVM_MAX_HEAP=512
fi
STACK_SIZE=1536
fi
ADD_JVM_ARG_IF_OK([-Xmx$JVM_MAX_HEAP],boot_jdk_jvmargs_big,[$JAVA])
ADD_JVM_ARG_IF_OK([-Xmx${JVM_MAX_HEAP}M],boot_jdk_jvmargs_big,[$JAVA])
ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs_big,[$JAVA])
AC_MSG_RESULT([$boot_jdk_jvmargs_big])

View File

@ -131,8 +131,8 @@ AC_DEFUN_ONCE([BPERF_SETUP_BUILD_JOBS],
if test "x$with_jobs" = x; then
# Number of jobs was not specified, calculate.
AC_MSG_CHECKING([for appropriate number of jobs to run in parallel])
# Approximate memory in GB, rounding up a bit.
memory_gb=`expr $MEMORY_SIZE / 1100`
# Approximate memory in GB.
memory_gb=`expr $MEMORY_SIZE / 1024`
# Pick the lowest of memory in gb and number of cores.
if test "$memory_gb" -lt "$NUM_CORES"; then
JOBS="$memory_gb"
@ -291,16 +291,11 @@ AC_DEFUN_ONCE([BPERF_SETUP_SMART_JAVAC],
AC_MSG_ERROR([Could not execute server java: $SJAVAC_SERVER_JAVA])
fi
else
SJAVAC_SERVER_JAVA=""
# Hotspot specific options.
ADD_JVM_ARG_IF_OK([-verbosegc],SJAVAC_SERVER_JAVA,[$JAVA])
# JRockit specific options.
ADD_JVM_ARG_IF_OK([-Xverbose:gc],SJAVAC_SERVER_JAVA,[$JAVA])
SJAVAC_SERVER_JAVA="$JAVA $SJAVAC_SERVER_JAVA"
SJAVAC_SERVER_JAVA="$JAVA"
fi
AC_SUBST(SJAVAC_SERVER_JAVA)
if test "$MEMORY_SIZE" -gt "2500"; then
if test "$MEMORY_SIZE" -gt "3000"; then
ADD_JVM_ARG_IF_OK([-d64],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
if test "$JVM_ARG_OK" = true; then
JVM_64BIT=true
@ -308,34 +303,33 @@ AC_DEFUN_ONCE([BPERF_SETUP_SMART_JAVAC],
fi
fi
MX_VALUE=`expr $MEMORY_SIZE / 2`
if test "$JVM_64BIT" = true; then
if test "$MEMORY_SIZE" -gt "17000"; then
ADD_JVM_ARG_IF_OK([-Xms10G -Xmx10G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
# Set ms lower than mx since more than one instance of the server might
# get launched at the same time before they figure out which instance won.
MS_VALUE=512
if test "$MX_VALUE" -gt "2048"; then
MX_VALUE=2048
fi
if test "$MEMORY_SIZE" -gt "10000" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms6G -Xmx6G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$MEMORY_SIZE" -gt "5000" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms1G -Xmx3G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$MEMORY_SIZE" -gt "3800" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms1G -Xmx2500M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
else
MS_VALUE=256
if test "$MX_VALUE" -gt "1500"; then
MX_VALUE=1500
fi
fi
if test "$MEMORY_SIZE" -gt "2500" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms1000M -Xmx1500M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$MEMORY_SIZE" -gt "1000" && test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms400M -Xmx1100M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
fi
if test "$JVM_ARG_OK" = false; then
ADD_JVM_ARG_IF_OK([-Xms256M -Xmx512M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
if test "$MX_VALUE" -lt "512"; then
MX_VALUE=512
fi
ADD_JVM_ARG_IF_OK([-Xms${MS_VALUE}M -Xmx${MX_VALUE}M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA])
AC_MSG_CHECKING([whether to use sjavac])
AC_ARG_ENABLE([sjavac], [AS_HELP_STRING([--enable-sjavac],
[use sjavac to do fast incremental compiles @<:@disabled@:>@])],
[ENABLE_SJAVAC="${enableval}"], [ENABLE_SJAVAC='no'])
if test "x$JVM_ARG_OK" = "xfalse"; then
AC_MSG_WARN([Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac])
ENABLE_SJAVAC=no;
fi
AC_MSG_CHECKING([whether to use sjavac])
AC_MSG_RESULT([$ENABLE_SJAVAC])
AC_SUBST(ENABLE_SJAVAC)

View File

@ -142,7 +142,6 @@ JDKOPT_SETUP_JDK_VERSION_NUMBERS
###############################################################################
BOOTJDK_SETUP_BOOT_JDK
BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS
###############################################################################
#
@ -233,6 +232,9 @@ BPERF_SETUP_BUILD_CORES
BPERF_SETUP_BUILD_MEMORY
BPERF_SETUP_BUILD_JOBS
# Setup arguments for the boot jdk (after cores and memory have been setup)
BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS
# Setup smart javac (after cores and memory have been setup)
BPERF_SETUP_SMART_JAVAC

View File

@ -342,17 +342,15 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_FOR_OPTIMIZATION],
# no adjustment
;;
fastdebug )
# Add compile time bounds checks.
CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1"
# no adjustment
;;
slowdebug )
# Add runtime bounds checks and symbol info.
CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1"
# Add runtime stack smashing and undefined behavior checks
CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1"
if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then
CFLAGS_DEBUG_OPTIONS="$CFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG"
CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIsOR_FLAG"
fi
;;
esac
@ -900,7 +898,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_MISC],
case "${TOOLCHAIN_TYPE}" in
microsoft)
CFLAGS_WARNINGS_ARE_ERRORS="/WX"
CFLAGS_WARNINGS_ARE_ERRORS="-WX"
;;
solstudio)
CFLAGS_WARNINGS_ARE_ERRORS="-errtags -errwarn=%all"

File diff suppressed because it is too large Load Diff

View File

@ -173,6 +173,7 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
OPENJDK_BUILD_CPU_ENDIAN="$VAR_CPU_ENDIAN"
AC_SUBST(OPENJDK_BUILD_OS)
AC_SUBST(OPENJDK_BUILD_OS_API)
AC_SUBST(OPENJDK_BUILD_OS_ENV)
AC_SUBST(OPENJDK_BUILD_CPU)
AC_SUBST(OPENJDK_BUILD_CPU_ARCH)
AC_SUBST(OPENJDK_BUILD_CPU_BITS)
@ -194,6 +195,7 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
OPENJDK_TARGET_CPU_ENDIAN="$VAR_CPU_ENDIAN"
AC_SUBST(OPENJDK_TARGET_OS)
AC_SUBST(OPENJDK_TARGET_OS_API)
AC_SUBST(OPENJDK_TARGET_OS_ENV)
AC_SUBST(OPENJDK_TARGET_CPU)
AC_SUBST(OPENJDK_TARGET_CPU_ARCH)
AC_SUBST(OPENJDK_TARGET_CPU_BITS)

View File

@ -106,6 +106,7 @@ OPENJDK_TARGET_OS_EXPORT_DIR:=@OPENJDK_TARGET_OS_EXPORT_DIR@
# When not cross-compiling, it is the same as the target.
OPENJDK_BUILD_OS:=@OPENJDK_BUILD_OS@
OPENJDK_BUILD_OS_API:=@OPENJDK_BUILD_OS_API@
OPENJDK_BUILD_OS_ENV:=@OPENJDK_BUILD_OS_ENV@
OPENJDK_BUILD_CPU:=@OPENJDK_BUILD_CPU@
OPENJDK_BUILD_CPU_ARCH:=@OPENJDK_BUILD_CPU_ARCH@

View File

@ -244,12 +244,22 @@ AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_MSVCR_DLL],
# Need to check if the found msvcr is correct architecture
AC_MSG_CHECKING([found msvcr100.dll architecture])
MSVCR_DLL_FILETYPE=`$FILE -b "$POSSIBLE_MSVCR_DLL"`
if test "x$OPENJDK_TARGET_CPU_BITS" = x32; then
CORRECT_MSVCR_ARCH=386
if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
# The MSYS 'file' command returns "PE32 executable for MS Windows (DLL) (GUI) Intel 80386 32-bit"
# on x32 and "PE32+ executable for MS Windows (DLL) (GUI) Mono/.Net assembly" on x64 systems.
if test "x$OPENJDK_TARGET_CPU_BITS" = x32; then
CORRECT_MSVCR_ARCH="PE32 executable"
else
CORRECT_MSVCR_ARCH="PE32+ executable"
fi
else
CORRECT_MSVCR_ARCH=x86-64
if test "x$OPENJDK_TARGET_CPU_BITS" = x32; then
CORRECT_MSVCR_ARCH=386
else
CORRECT_MSVCR_ARCH=x86-64
fi
fi
if $ECHO "$MSVCR_DLL_FILETYPE" | $GREP $CORRECT_MSVCR_ARCH 2>&1 > /dev/null; then
if $ECHO "$MSVCR_DLL_FILETYPE" | $GREP "$CORRECT_MSVCR_ARCH" 2>&1 > /dev/null; then
AC_MSG_RESULT([ok])
MSVCR_DLL="$POSSIBLE_MSVCR_DLL"
AC_MSG_CHECKING([for msvcr100.dll])

View File

@ -77,6 +77,11 @@ do
shift
done
# debug mode
if [ "${HGFOREST_DEBUG:-false}" = "true" ] ; then
global_opts="${global_opts} --debug"
fi
# silence standard output?
if [ ${qflag} = "true" ] ; then
global_opts="${global_opts} -q"
@ -89,14 +94,26 @@ if [ ${vflag} = "true" ] ; then
fi
# Make sure we have a command.
if [ $# -lt 1 -o -z "${1:-}" ] ; then
echo "ERROR: No command to hg supplied!"
usage
if [ ${#} -lt 1 -o -z "${1:-}" ] ; then
echo "ERROR: No command to hg supplied!" > ${status_output}
usage > ${status_output}
fi
command="$1"; shift
# grab command
command="${1}"; shift
if [ ${vflag} = "true" ] ; then
echo "# Mercurial command: ${command}" > ${status_output}
fi
# capture command options and arguments (if any)
command_args="${@:-}"
if [ ${vflag} = "true" ] ; then
echo "# Mercurial command arguments: ${command_args}" > ${status_output}
fi
# Clean out the temporary directory that stores the pid files.
tmp=/tmp/forest.$$
rm -f -r ${tmp}
@ -104,7 +121,8 @@ mkdir -p ${tmp}
if [ "${HGFOREST_DEBUG:-false}" = "true" ] ; then
echo "DEBUG: temp files are in: ${tmp}"
# ignores redirection.
echo "DEBUG: temp files are in: ${tmp}" >&2
fi
# Check if we can use fifos for monitoring sub-process completion.
@ -377,21 +395,33 @@ else
fi
fi
done
if [ ${have_fifos} = "true" ]; then
# done with the fifo
exec 3>&-
fi
fi
# Wait for all subprocesses to complete
wait
# Terminate with exit 0 only if all subprocesses were successful
# Terminate with highest exit code of subprocesses
ec=0
if [ -d ${tmp} ]; then
rcfiles="`(ls -a ${tmp}/*.pid.rc 2> /dev/null) || echo ''`"
for rc in ${rcfiles} ; do
exit_code=`cat ${rc} | tr -d ' \n\r'`
if [ "${exit_code}" != "0" ] ; then
if [ ${exit_code} -gt 1 ]; then
# mercurial exit codes greater than "1" signal errors.
repo="`echo ${rc} | sed -e 's@^'${tmp}'@@' -e 's@/*\([^/]*\)\.pid\.rc$@\1@' -e 's@_@/@g'`"
echo "WARNING: ${repo} exited abnormally (${exit_code})" > ${status_output}
ec=1
fi
if [ ${exit_code} -gt ${ec} ]; then
# assume that larger exit codes are more significant
ec=${exit_code}
fi
fi
done
fi

View File

@ -269,3 +269,5 @@ ddc07abf4307855c0dc904cc5c96cc764023a930 jdk9-b22
8a44142bb7fc8118f70f91a1b97c12dfc50563ee jdk9-b24
da08cca6b97f41b7081a3e176dcb400af6e4bb26 jdk9-b25
6c777df597bbf5abba3488d44c401edfe73c74af jdk9-b26
7e06bf1dcb0907b80ddf59315426ce9ce775e56d jdk9-b27
a00b04ef067e39f50b9a0fea6f1904e35d632a73 jdk9-b28

View File

@ -50,8 +50,9 @@ $(eval $(call SetupJavaCompilation,BUILD_IDLJ, \
INCLUDES := com/sun/tools/corba/se/idl, \
EXCLUDE_FILES := ResourceBundleUtil.java))
# Force the language to english for predictable source code generation.
TOOL_IDLJ_CMD := $(JAVA) -cp $(CORBA_OUTPUTDIR)/idlj_classes \
com.sun.tools.corba.se.idl.toJavaPortable.Compile
-Duser.language=en com.sun.tools.corba.se.idl.toJavaPortable.Compile
################################################################################

View File

@ -38,11 +38,11 @@ package org.omg.CORBA;
* OMG specifications :
* <ul>
* <li> ORB core as defined by CORBA 2.3.1
* (<a href="http://cgi.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* (<a href="http://www.omg.org/cgi-bin/doc?formal/99-10-07">formal/99-10-07</a>)
* </li>
*
* <li> IDL/Java Language Mapping as defined in
* <a href="http://cgi.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* <a href="http://www.omg.org/cgi-bin/doc?ptc/00-01-08">ptc/00-01-08</a>
* </li>
* </ul>
*/

View File

@ -67,7 +67,7 @@ if [ "x$hgwhere" = "x" ]; then
error "Could not locate Mercurial command"
fi
hgversion="`hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \([^+]*\).*)\$@\1@p'`"
hgversion="`LANGUAGE=en hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \([^+]*\).*)\$@\1@p'`"
if [ "x${hgversion}" = "x" ] ; then
error "Could not determine Mercurial version of $hgwhere"
fi

View File

@ -429,3 +429,5 @@ dd472cdacc32e3afc7c5bfa7ef16ea0e0befb7fa jdk9-b23
dde2d03b0ea46a27650839e3a1d212c7c1f7b4c8 jdk9-b24
6de94e8693240cec8aae11f6b42f43433456a733 jdk9-b25
48b95a073d752d6891cc0d1d2836b321ecf3ce0c jdk9-b26
f95347244306affc32ce3056f27ceff7b2100810 jdk9-b27
657294869d7ff063e055f5492cab7ce5612ca851 jdk9-b28

View File

@ -314,7 +314,7 @@ static void * pathmap_dlopen(const char * name, int mode) {
handle = dlopen(name, mode);
}
if (_libsaproc_debug) {
printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%x\n", name, handle);
printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%lx\n", name, (unsigned long) handle);
}
return handle;
}
@ -661,30 +661,30 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
// read FileMapHeader
size_t n = read(fd, pheader, sizeof(struct FileMapHeader));
if (n != sizeof(struct FileMapHeader)) {
free(pheader);
close(fd);
char errMsg[ERR_MSG_SIZE];
sprintf(errMsg, "unable to read shared archive file map header from %s", classes_jsa);
close(fd);
free(pheader);
THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
}
// check file magic
if (pheader->_magic != 0xf00baba2) {
free(pheader);
close(fd);
char errMsg[ERR_MSG_SIZE];
sprintf(errMsg, "%s has bad shared archive magic 0x%x, expecting 0xf00baba2",
classes_jsa, pheader->_magic);
close(fd);
free(pheader);
THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
}
// check version
if (pheader->_version != CURRENT_ARCHIVE_VERSION) {
free(pheader);
close(fd);
char errMsg[ERR_MSG_SIZE];
sprintf(errMsg, "%s has wrong shared archive version %d, expecting %d",
classes_jsa, pheader->_version, CURRENT_ARCHIVE_VERSION);
close(fd);
free(pheader);
THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
}

View File

@ -45,8 +45,8 @@ import sun.jvm.hotspot.types.TypeDataBase;
public class G1CollectedHeap extends SharedHeap {
// HeapRegionSeq _seq;
static private long hrsFieldOffset;
// MemRegion _g1_committed;
static private long g1CommittedFieldOffset;
// MemRegion _g1_reserved;
static private long g1ReservedFieldOffset;
// size_t _summary_bytes_used;
static private CIntegerField summaryBytesUsedField;
// G1MonitoringSupport* _g1mm;
@ -68,7 +68,6 @@ public class G1CollectedHeap extends SharedHeap {
Type type = db.lookupType("G1CollectedHeap");
hrsFieldOffset = type.getField("_hrs").getOffset();
g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
g1mmField = type.getAddressField("_g1mm");
oldSetFieldOffset = type.getField("_old_set").getOffset();
@ -76,9 +75,7 @@ public class G1CollectedHeap extends SharedHeap {
}
public long capacity() {
Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
MemRegion g1Committed = new MemRegion(g1CommittedAddr);
return g1Committed.byteSize();
return hrs().capacity();
}
public long used() {

View File

@ -93,19 +93,35 @@ public class G1HeapRegionTable extends VMObject {
private class HeapRegionIterator implements Iterator<HeapRegion> {
private long index;
private long length;
private HeapRegion next;
public HeapRegion positionToNext() {
HeapRegion result = next;
while (index < length && at(index) == null) {
index++;
}
if (index < length) {
next = at(index);
index++; // restart search at next element
} else {
next = null;
}
return result;
}
@Override
public boolean hasNext() { return index < length; }
public boolean hasNext() { return next != null; }
@Override
public HeapRegion next() { return at(index++); }
public HeapRegion next() { return positionToNext(); }
@Override
public void remove() { /* not supported */ }
public void remove() { /* not supported */ }
HeapRegionIterator(long committedLength) {
HeapRegionIterator(long totalLength) {
index = 0;
length = committedLength;
length = totalLength;
positionToNext();
}
}

View File

@ -43,7 +43,7 @@ public class HeapRegionSeq extends VMObject {
// G1HeapRegionTable _regions
static private long regionsFieldOffset;
// uint _committed_length
static private CIntegerField committedLengthField;
static private CIntegerField numCommittedField;
static {
VM.registerVMInitializedObserver(new Observer() {
@ -57,7 +57,7 @@ public class HeapRegionSeq extends VMObject {
Type type = db.lookupType("HeapRegionSeq");
regionsFieldOffset = type.getField("_regions").getOffset();
committedLengthField = type.getCIntegerField("_committed_length");
numCommittedField = type.getCIntegerField("_num_committed");
}
private G1HeapRegionTable regions() {
@ -66,16 +66,20 @@ public class HeapRegionSeq extends VMObject {
regionsAddr);
}
public long capacity() {
return length() * HeapRegion.grainBytes();
}
public long length() {
return regions().length();
}
public long committedLength() {
return committedLengthField.getValue(addr);
return numCommittedField.getValue(addr);
}
public Iterator<HeapRegion> heapRegionIterator() {
return regions().heapRegionIterator(committedLength());
return regions().heapRegionIterator(length());
}
public HeapRegionSeq(Address addr) {

View File

@ -508,13 +508,9 @@ endif
ifeq ($(USE_CLANG),)
# Enable bounds checking.
# _FORTIFY_SOURCE appears in GCC 4.0+
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
# compile time size bounds checks
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
# and runtime size bounds checks and paranoid stack smashing checks.
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
# stack smashing checks.
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
endif
endif

View File

@ -70,7 +70,8 @@ ifeq ($(INCLUDE_CDS), false)
CXXFLAGS += -DINCLUDE_CDS=0
CFLAGS += -DINCLUDE_CDS=0
Src_Files_EXCLUDE += filemap.cpp metaspaceShared.cpp
Src_Files_EXCLUDE += filemap.cpp metaspaceShared*.cpp sharedPathsMiscInfo.cpp \
systemDictionaryShared.cpp classLoaderExt.cpp sharedClassUtil.cpp
endif
ifeq ($(INCLUDE_ALL_GCS), false)
@ -119,8 +120,8 @@ ifeq ($(INCLUDE_NMT), false)
CFLAGS += -DINCLUDE_NMT=0
Src_Files_EXCLUDE += \
memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
memTracker.cpp nmtDCmd.cpp
memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
endif
-include $(HS_ALT_MAKE)/excludeSrc.make

View File

@ -356,14 +356,15 @@ jprt.make.rule.test.targets.standard.internalvmtests = \
${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
jprt.make.rule.test.targets.standard.reg.group = \
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GROUP, \
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GROUP, \
${jprt.my.linux.i586}-{product|fastdebug}-c2-GROUP, \
${jprt.my.linux.x64}-{product|fastdebug}-c2-GROUP, \
${jprt.my.windows.i586}-{product|fastdebug}-c2-GROUP, \
${jprt.my.windows.x64}-{product|fastdebug}-c2-GROUP, \
${jprt.my.linux.i586}-{product|fastdebug}-c1-GROUP, \
${jprt.my.windows.i586}-{product|fastdebug}-c1-GROUP
${jprt.my.solaris.sparcv9}-fastdebug-c2-GROUP, \
${jprt.my.solaris.x64}-fastdebug-c2-GROUP, \
${jprt.my.linux.i586}-fastdebug-c2-GROUP, \
${jprt.my.linux.x64}-fastdebug-c2-GROUP, \
${jprt.my.macosx.x64}-fastdebug-c2-GROUP, \
${jprt.my.windows.i586}-fastdebug-c2-GROUP, \
${jprt.my.windows.x64}-fastdebug-c2-GROUP, \
${jprt.my.linux.i586}-fastdebug-c1-GROUP, \
${jprt.my.windows.i586}-fastdebug-c1-GROUP
jprt.make.rule.test.targets.standard = \
${jprt.make.rule.test.targets.standard.client}, \
@ -373,6 +374,7 @@ jprt.make.rule.test.targets.standard = \
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_compiler}, \
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_gc}, \
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime}, \
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime_closed}, \
${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_serviceability}
jprt.make.rule.test.targets.embedded = \

View File

@ -365,16 +365,13 @@ endif
ifeq ($(USE_CLANG),)
# Enable bounds checking.
# _FORTIFY_SOURCE appears in GCC 4.0+
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
# compile time size bounds checks
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
# and runtime size bounds checks and paranoid stack smashing checks.
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
# stack smashing checks.
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
endif
endif
# If we are building HEADLESS, pass on to VM
# so it can set the java.awt.headless property
ifdef HEADLESS

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -58,6 +58,8 @@ DTRACE_SRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/dtrace
DTRACE_COMMON_SRCDIR = $(GAMMADIR)/src/os/posix/dtrace
DTRACE = dtrace
DTRACE.o = $(DTRACE).o
DTRACE_JHELPER = dtrace_jhelper
DTRACE_JHELPER.o = $(DTRACE_JHELPER).o
# to remove '-g' option which causes link problems
# also '-z nodefs' is used as workaround
@ -255,7 +257,10 @@ ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
endif
$(DTRACE).d: $(DTRACE_COMMON_SRCDIR)/hotspot.d $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d \
$(DTRACE_COMMON_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
$(DTRACE_COMMON_SRCDIR)/hs_private.d
$(QUIETLY) cat $^ > $@
$(DTRACE_JHELPER).d: $(DTRACE_SRCDIR)/jhelper.d
$(QUIETLY) cat $^ > $@
DTraced_Files = ciEnv.o \
@ -280,7 +285,7 @@ DTraced_Files = ciEnv.o \
vmGCOperations.o \
# Dtrace is available, so we build $(DTRACE.o)
$(DTRACE.o): $(DTRACE).d $(JVMOFFS).h $(JVMOFFS)Index.h $(DTraced_Files)
$(DTRACE.o): $(DTRACE).d $(DTraced_Files)
@echo Compiling $(DTRACE).d
$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \
@ -344,6 +349,11 @@ $(DtraceOutDir)/hs_private.h: $(DTRACE_COMMON_SRCDIR)/hs_private.d | $(DtraceOut
dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h
# The jhelper.d and hotspot probes are separated into two different SUNW_dof sections.
# Now the jhelper.d is built without the -Xlazyload flag.
$(DTRACE_JHELPER.o) : $(DTRACE_JHELPER).d $(JVMOFFS).h $(JVMOFFS)Index.h
@echo Compiling $(DTRACE_JHELPER).d
$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -o $@ -s $(DTRACE_JHELPER).d
.PHONY: dtraceCheck
@ -372,7 +382,7 @@ endif # ifneq ("$(patchDtraceFound)", "")
ifneq ("${DTRACE_PROG}", "")
ifeq ("${HOTSPOT_DISABLE_DTRACE_PROBES}", "")
DTRACE_OBJS = $(DTRACE.o) $(JVMOFFS.o)
DTRACE_OBJS = $(DTRACE.o) $(JVMOFFS.o) $(DTRACE_JHELPER.o)
CFLAGS += $(DTRACE_INCL) -DDTRACE_ENABLED
MAPFILE_DTRACE_OPT = $(MAPFILE_DTRACE)

View File

@ -240,11 +240,7 @@ ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
endif
# Enable bounds checking.
# _FORTIFY_SOURCE appears in GCC 4.0+
ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1"
# compile time size bounds checks
FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1
# and runtime size bounds checks and paranoid stack smashing checks.
DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1
# stack smashing checks.
DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1
endif

View File

@ -298,6 +298,7 @@ class Assembler : public AbstractAssembler {
LWZ_OPCODE = (32u << OPCODE_SHIFT),
LWZX_OPCODE = (31u << OPCODE_SHIFT | 23u << 1),
LWZU_OPCODE = (33u << OPCODE_SHIFT),
LWBRX_OPCODE = (31u << OPCODE_SHIFT | 534 << 1),
LHA_OPCODE = (42u << OPCODE_SHIFT),
LHAX_OPCODE = (31u << OPCODE_SHIFT | 343u << 1),
@ -306,6 +307,7 @@ class Assembler : public AbstractAssembler {
LHZ_OPCODE = (40u << OPCODE_SHIFT),
LHZX_OPCODE = (31u << OPCODE_SHIFT | 279u << 1),
LHZU_OPCODE = (41u << OPCODE_SHIFT),
LHBRX_OPCODE = (31u << OPCODE_SHIFT | 790 << 1),
LBZ_OPCODE = (34u << OPCODE_SHIFT),
LBZX_OPCODE = (31u << OPCODE_SHIFT | 87u << 1),
@ -1364,11 +1366,17 @@ class Assembler : public AbstractAssembler {
inline void lwax( Register d, Register s1, Register s2);
inline void lwa( Register d, int si16, Register s1);
// 4 bytes reversed
inline void lwbrx( Register d, Register s1, Register s2);
// 2 bytes
inline void lhzx( Register d, Register s1, Register s2);
inline void lhz( Register d, int si16, Register s1);
inline void lhzu( Register d, int si16, Register s1);
// 2 bytes reversed
inline void lhbrx( Register d, Register s1, Register s2);
// 2 bytes
inline void lhax( Register d, Register s1, Register s2);
inline void lha( Register d, int si16, Register s1);
@ -1858,10 +1866,12 @@ class Assembler : public AbstractAssembler {
inline void lwz( Register d, int si16);
inline void lwax( Register d, Register s2);
inline void lwa( Register d, int si16);
inline void lwbrx(Register d, Register s2);
inline void lhzx( Register d, Register s2);
inline void lhz( Register d, int si16);
inline void lhax( Register d, Register s2);
inline void lha( Register d, int si16);
inline void lhbrx(Register d, Register s2);
inline void lbzx( Register d, Register s2);
inline void lbz( Register d, int si16);
inline void ldx( Register d, Register s2);

View File

@ -263,10 +263,14 @@ inline void Assembler::lwzu( Register d, int si16, Register s1) { assert(d !=
inline void Assembler::lwax( Register d, Register s1, Register s2) { emit_int32(LWAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lwa( Register d, int si16, Register s1) { emit_int32(LWA_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
inline void Assembler::lwbrx( Register d, Register s1, Register s2) { emit_int32(LWBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lhzx( Register d, Register s1, Register s2) { emit_int32(LHZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lhz( Register d, int si16, Register s1) { emit_int32(LHZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::lhzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
inline void Assembler::lhbrx( Register d, Register s1, Register s2) { emit_int32(LHBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lhax( Register d, Register s1, Register s2) { emit_int32(LHAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
inline void Assembler::lha( Register d, int si16, Register s1) { emit_int32(LHA_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
inline void Assembler::lhau( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHAU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
@ -736,10 +740,12 @@ inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE
inline void Assembler::lwz( Register d, int si16 ) { emit_int32( LWZ_OPCODE | rt(d) | d1(si16));}
inline void Assembler::lwax( Register d, Register s2) { emit_int32( LWAX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::lwa( Register d, int si16 ) { emit_int32( LWA_OPCODE | rt(d) | ds(si16));}
inline void Assembler::lwbrx(Register d, Register s2) { emit_int32( LWBRX_OPCODE| rt(d) | rb(s2));}
inline void Assembler::lhzx( Register d, Register s2) { emit_int32( LHZX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::lhz( Register d, int si16 ) { emit_int32( LHZ_OPCODE | rt(d) | d1(si16));}
inline void Assembler::lhax( Register d, Register s2) { emit_int32( LHAX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::lha( Register d, int si16 ) { emit_int32( LHA_OPCODE | rt(d) | d1(si16));}
inline void Assembler::lhbrx(Register d, Register s2) { emit_int32( LHBRX_OPCODE| rt(d) | rb(s2));}
inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE | rt(d) | rb(s2));}
inline void Assembler::lbz( Register d, int si16 ) { emit_int32( LBZ_OPCODE | rt(d) | d1(si16));}
inline void Assembler::ld( Register d, int si16 ) { emit_int32( LD_OPCODE | rt(d) | ds(si16));}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,8 +26,9 @@
#ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
#define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
address generate_normal_entry(void);
address generate_native_entry(void);
address generate_normal_entry(bool synchronized);
address generate_native_entry(bool synchronized);
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
void lock_method(void);
void unlock_method(void);

View File

@ -938,8 +938,9 @@ void CppInterpreterGenerator::generate_counter_incr(Label& overflow) {
// Interpreter stub for calling a native method. (C++ interpreter)
// This sets up a somewhat different looking stack for calling the native method
// than the typical interpreter frame setup.
// The synchronized parameter is ignored.
//
address CppInterpreterGenerator::generate_native_entry(void) {
address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
if (native_entry != NULL) return native_entry;
address entry = __ pc();
@ -1729,7 +1730,8 @@ void CppInterpreterGenerator::generate_more_monitors() {
__ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base); // Mark lock as unused
}
address CppInterpreterGenerator::generate_normal_entry(void) {
// The synchronized parameter is ignored
address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
if (interpreter_frame_manager != NULL) return interpreter_frame_manager;
address entry = __ pc();
@ -2789,38 +2791,6 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
return interpreter_frame_manager;
}
// Generate code for various sorts of method entries
//
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized : break;
case Interpreter::native : // Fall thru
case Interpreter::native_synchronized : entry_point = ((CppInterpreterGenerator*)this)->generate_native_entry(); break;
case Interpreter::empty : break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
// These are special interpreter intrinsics which we don't support so far.
case Interpreter::java_lang_math_sin : break;
case Interpreter::java_lang_math_cos : break;
case Interpreter::java_lang_math_tan : break;
case Interpreter::java_lang_math_abs : break;
case Interpreter::java_lang_math_log : break;
case Interpreter::java_lang_math_log10 : break;
case Interpreter::java_lang_math_sqrt : break;
case Interpreter::java_lang_math_pow : break;
case Interpreter::java_lang_math_exp : break;
case Interpreter::java_lang_ref_reference_get: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}
if (entry_point) {
return entry_point;
}
return ((InterpreterGenerator*)this)->generate_normal_entry();
}
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
: CppInterpreterGenerator(code) {

View File

@ -119,9 +119,15 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg)
// Call the Interpreter::remove_activation_preserving_args_entry()
// func to get the address of the same-named entrypoint in the
// generated interpreter code.
#if defined(ABI_ELFv2)
call_c(CAST_FROM_FN_PTR(address,
Interpreter::remove_activation_preserving_args_entry),
relocInfo::none);
#else
call_c(CAST_FROM_FN_PTR(FunctionDescriptor*,
Interpreter::remove_activation_preserving_args_entry),
relocInfo::none);
#endif
// Jump to Interpreter::_remove_activation_preserving_args_entry.
mtctr(R3_RET);
@ -331,29 +337,40 @@ void InterpreterMacroAssembler::empty_expression_stack() {
void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(int bcp_offset,
Register Rdst,
signedOrNot is_signed) {
#if defined(VM_LITTLE_ENDIAN)
if (bcp_offset) {
load_const_optimized(Rdst, bcp_offset);
lhbrx(Rdst, R14_bcp, Rdst);
} else {
lhbrx(Rdst, R14_bcp);
}
if (is_signed == Signed) {
extsh(Rdst, Rdst);
}
#else
// Read Java big endian format.
if (is_signed == Signed) {
lha(Rdst, bcp_offset, R14_bcp);
} else {
lhz(Rdst, bcp_offset, R14_bcp);
}
#if 0
assert(Rtmp != Rdst, "need separate temp register");
Register Rfirst = Rtmp;
lbz(Rfirst, bcp_offset, R14_bcp); // first byte
lbz(Rdst, bcp_offset+1, R14_bcp); // second byte
// Rdst = ((Rfirst<<8) & 0xFF00) | (Rdst &~ 0xFF00)
rldimi(/*RA=*/Rdst, /*RS=*/Rfirst, /*sh=*/8, /*mb=*/48);
if (is_signed == Signed) {
extsh(Rdst, Rdst);
}
#endif
}
void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int bcp_offset,
Register Rdst,
signedOrNot is_signed) {
#if defined(VM_LITTLE_ENDIAN)
if (bcp_offset) {
load_const_optimized(Rdst, bcp_offset);
lwbrx(Rdst, R14_bcp, Rdst);
} else {
lwbrx(Rdst, R14_bcp);
}
if (is_signed == Signed) {
extsw(Rdst, Rdst);
}
#else
// Read Java big endian format.
if (bcp_offset & 3) { // Offset unaligned?
load_const_optimized(Rdst, bcp_offset);
@ -369,18 +386,26 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int bcp_offset
lwz(Rdst, bcp_offset, R14_bcp);
}
}
#endif
}
// Load the constant pool cache index from the bytecode stream.
//
// Kills / writes:
// - Rdst, Rscratch
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
// Cache index is always in the native format, courtesy of Rewriter.
if (index_size == sizeof(u2)) {
get_2_byte_integer_at_bcp(bcp_offset, Rdst, Unsigned);
lhz(Rdst, bcp_offset, R14_bcp);
} else if (index_size == sizeof(u4)) {
get_4_byte_integer_at_bcp(bcp_offset, Rdst, Signed);
if (bcp_offset & 3) {
load_const_optimized(Rdst, bcp_offset);
lwax(Rdst, R14_bcp, Rdst);
} else {
lwa(Rdst, bcp_offset, R14_bcp);
}
assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
nand(Rdst, Rdst, Rdst); // convert to plain index
} else if (index_size == sizeof(u1)) {
@ -397,6 +422,29 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, int b
add(cache, R27_constPoolCache, cache);
}
// Load 4-byte signed or unsigned integer in Java format (that is, big-endian format)
// from (Rsrc)+offset.
void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset,
signedOrNot is_signed) {
#if defined(VM_LITTLE_ENDIAN)
if (offset) {
load_const_optimized(Rdst, offset);
lwbrx(Rdst, Rdst, Rsrc);
} else {
lwbrx(Rdst, Rsrc);
}
if (is_signed == Signed) {
extsw(Rdst, Rdst);
}
#else
if (is_signed == Signed) {
lwa(Rdst, offset, Rsrc);
} else {
lwz(Rdst, offset, Rsrc);
}
#endif
}
// Load object from cpool->resolved_references(index).
void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index) {
assert_different_registers(result, index);

View File

@ -130,6 +130,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2));
void get_u4(Register Rdst, Register Rsrc, int offset, signedOrNot is_signed);
// common code

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -31,7 +31,12 @@
private:
address generate_abstract_entry(void);
address generate_accessor_entry(void);
address generate_jump_to_normal_entry(void);
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_Reference_get_entry(void);
// Not supported
address generate_CRC32_update_entry() { return NULL; }
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP

View File

@ -428,6 +428,19 @@ address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type
return entry;
}
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry.
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
address entry = __ pc();
address normal_entry = Interpreter::entry_for_kind(Interpreter::zerolocals);
assert(normal_entry != NULL, "should already be generated.");
__ branch_to_entry(normal_entry, R11_scratch1);
__ flush();
return entry;
}
// Abstract method entry.
//
address InterpreterGenerator::generate_abstract_entry(void) {
@ -485,203 +498,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
return entry;
}
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry.
address InterpreterGenerator::generate_accessor_entry(void) {
if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
return NULL;
}
Label Lslow_path, Lacquire;
const Register
Rclass_or_obj = R3_ARG1,
Rconst_method = R4_ARG2,
Rcodes = Rconst_method,
Rcpool_cache = R5_ARG3,
Rscratch = R11_scratch1,
Rjvmti_mode = Rscratch,
Roffset = R12_scratch2,
Rflags = R6_ARG4,
Rbtable = R7_ARG5;
static address branch_table[number_of_states];
address entry = __ pc();
// Check for safepoint:
// Ditch this, real man don't need safepoint checks.
// Also check for JVMTI mode
// Check for null obj, take slow path if so.
__ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
__ cmpdi(CCR1, Rclass_or_obj, 0);
__ cmpwi(CCR0, Rjvmti_mode, 0);
__ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
__ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0
// Do 2 things in parallel:
// 1. Load the index out of the first instruction word, which looks like this:
// <0x2a><0xb4><index (2 byte, native endianess)>.
// 2. Load constant pool cache base.
__ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
__ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
__ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
__ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);
// Get the const pool entry by means of <index>.
const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
__ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
__ add(Rcpool_cache, Rscratch, Rcpool_cache);
// Check if cpool cache entry is resolved.
// We are resolved if the indices offset contains the current bytecode.
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
// Big Endian:
__ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
__ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
__ bne(CCR0, Lslow_path);
__ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.
// Finally, start loading the value: Get cp cache entry into regs.
__ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
__ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);
// Following code is from templateTable::getfield_or_static
// Load pointer to branch table
__ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
// Get volatile flag
__ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit
// note: sync is needed before volatile load on PPC64
// Check field type
__ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
#ifdef ASSERT
Label LFlagInvalid;
__ cmpldi(CCR0, Rflags, number_of_states);
__ bge(CCR0, LFlagInvalid);
__ ld(R9_ARG7, 0, R1_SP);
__ ld(R10_ARG8, 0, R21_sender_SP);
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
__ asm_assert_eq("backlink", 0x543);
#endif // ASSERT
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
// Load from branch table and dispatch (volatile case: one instruction ahead)
__ sldi(Rflags, Rflags, LogBytesPerWord);
__ cmpwi(CCR6, Rscratch, 1); // volatile?
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
}
__ ldx(Rbtable, Rbtable, Rflags);
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
}
__ mtctr(Rbtable);
__ bctr();
#ifdef ASSERT
__ bind(LFlagInvalid);
__ stop("got invalid flag", 0x6541);
bool all_uninitialized = true,
all_initialized = true;
for (int i = 0; i<number_of_states; ++i) {
all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
all_initialized = all_initialized && (branch_table[i] != NULL);
}
assert(all_uninitialized != all_initialized, "consistency"); // either or
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
__ stop("unexpected type", 0x6551);
#endif
if (branch_table[itos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[itos] = __ pc(); // non-volatile_entry point
__ lwax(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[ltos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[ltos] = __ pc(); // non-volatile_entry point
__ ldx(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[btos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[btos] = __ pc(); // non-volatile_entry point
__ lbzx(R3_RET, Rclass_or_obj, Roffset);
__ extsb(R3_RET, R3_RET);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[ctos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[ctos] = __ pc(); // non-volatile_entry point
__ lhzx(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[stos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[stos] = __ pc(); // non-volatile_entry point
__ lhax(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
__ blr();
}
if (branch_table[atos] == 0) { // generate only once
__ align(32, 28, 28); // align load
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[atos] = __ pc(); // non-volatile_entry point
__ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
__ verify_oop(R3_RET);
//__ dcbt(R3_RET); // prefetch
__ beq(CCR6, Lacquire);
__ blr();
}
__ align(32, 12);
__ bind(Lacquire);
__ twi_0(R3_RET);
__ isync(); // acquire
__ blr();
#ifdef ASSERT
for (int i = 0; i<number_of_states; ++i) {
assert(branch_table[i], "accessor_entry initialization");
//tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
}
#endif
__ bind(Lslow_path);
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
__ flush();
return entry;
}
// Interpreter intrinsic for WeakReference.get().
// 1. Don't push a full blown frame and go on dispatching, but fetch the value
// into R8 and return quickly
@ -713,7 +529,6 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
// This code is based on generate_accessor_enty.
address entry = __ pc();
@ -768,7 +583,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
return entry;
} else {
return generate_accessor_entry();
return generate_jump_to_normal_entry();
}
}

View File

@ -1283,8 +1283,6 @@ int Compile::ConstantTable::calculate_table_base_offset() const {
bool MachConstantBaseNode::requires_postalloc_expand() const { return true; }
void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
Compile *C = ra_->C;
iRegPdstOper *op_dst = new iRegPdstOper();
MachNode *m1 = new loadToc_hiNode();
MachNode *m2 = new loadToc_loNode();
@ -2229,7 +2227,7 @@ const bool Matcher::isSimpleConstant64(jlong value) {
}
/* TODO: PPC port
// Make a new machine dependent decode node (with its operands).
MachTypeNode *Matcher::make_decode_node(Compile *C) {
MachTypeNode *Matcher::make_decode_node() {
assert(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0,
"This method is only implemented for unscaled cOops mode so far");
MachTypeNode *decode = new decodeN_unscaledNode();
@ -2593,7 +2591,7 @@ typedef struct {
MachNode *_last;
} loadConLNodesTuple;
loadConLNodesTuple loadConLNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
loadConLNodesTuple loadConLNodesTuple_create(PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
OptoReg::Name reg_second, OptoReg::Name reg_first) {
loadConLNodesTuple nodes;
@ -2669,7 +2667,7 @@ encode %{
enc_class postalloc_expand_load_long_constant(iRegLdst dst, immL src, iRegLdst toc) %{
// Create new nodes.
loadConLNodesTuple loadConLNodes =
loadConLNodesTuple_create(C, ra_, n_toc, op_src,
loadConLNodesTuple_create(ra_, n_toc, op_src,
ra_->get_reg_second(this), ra_->get_reg_first(this));
// Push new nodes.
@ -3391,7 +3389,7 @@ encode %{
immLOper *op_repl = new immLOper((jlong)replicate_immF(op_src->constantF()));
loadConLNodesTuple loadConLNodes =
loadConLNodesTuple_create(C, ra_, n_toc, op_repl,
loadConLNodesTuple_create(ra_, n_toc, op_repl,
ra_->get_reg_second(this), ra_->get_reg_first(this));
// Push new nodes.
@ -3611,7 +3609,7 @@ encode %{
// Create the nodes for loading the IC from the TOC.
loadConLNodesTuple loadConLNodes_IC =
loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()),
loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()),
OptoReg::Name(R19_H_num), OptoReg::Name(R19_num));
// Create the call node.
@ -3765,7 +3763,7 @@ encode %{
#if defined(ABI_ELFv2)
jlong entry_address = (jlong) this->entry_point();
assert(entry_address, "need address here");
loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper(entry_address),
loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
#else
// Get the struct that describes the function we are about to call.
@ -3777,13 +3775,13 @@ encode %{
loadConLNodesTuple loadConLNodes_Toc;
// Create nodes and operands for loading the entry point.
loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper(entry_address),
loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
// Create nodes and operands for loading the env pointer.
if (fd->env() != NULL) {
loadConLNodes_Env = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong) fd->env()),
loadConLNodes_Env = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->env()),
OptoReg::Name(R11_H_num), OptoReg::Name(R11_num));
} else {
loadConLNodes_Env._large_hi = NULL;
@ -3796,7 +3794,7 @@ encode %{
}
// Create nodes and operands for loading the Toc point.
loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong) fd->toc()),
loadConLNodes_Toc = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->toc()),
OptoReg::Name(R2_H_num), OptoReg::Name(R2_num));
#endif // ABI_ELFv2
// mtctr node

View File

@ -30,7 +30,6 @@
address generate_normal_entry(bool synchronized);
address generate_native_entry(bool synchronized);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
void unlock_method(bool check_exceptions = true);

View File

@ -176,8 +176,12 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
const Register size = R12_scratch2;
__ get_cache_and_index_at_bcp(cache, 1, index_size);
// Big Endian (get least significant byte of 64 bit value):
// Get least significant byte of 64 bit value:
#if defined(VM_LITTLE_ENDIAN)
__ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache);
#else
__ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
#endif
__ sldi(size, size, Interpreter::logStackElementSize);
__ add(R15_esp, R15_esp, size);
__ dispatch_next(state, step);
@ -598,48 +602,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
// End of helpers
// ============================================================================
// Various method entries
//
// Empty method, generate a very fast return. We must skip this entry if
// someone's debugging, indicated by the flag
// "interp_mode" in the Thread obj.
// Note: empty methods are generated mostly methods that do assertions, which are
// disabled in the "java opt build".
address TemplateInterpreterGenerator::generate_empty_entry(void) {
if (!UseFastEmptyMethods) {
NOT_PRODUCT(__ should_not_reach_here();)
return Interpreter::entry_for_kind(Interpreter::zerolocals);
}
Label Lslow_path;
const Register Rjvmti_mode = R11_scratch1;
address entry = __ pc();
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
__ cmpwi(CCR0, Rjvmti_mode, 0);
__ bne(CCR0, Lslow_path); // jvmti_mode!=0
// Noone's debuggin: Simply return.
// Pop c2i arguments (if any) off when we return.
#ifdef ASSERT
__ ld(R9_ARG7, 0, R1_SP);
__ ld(R10_ARG8, 0, R21_sender_SP);
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
__ asm_assert_eq("backlink", 0x545);
#endif // ASSERT
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
// And we're done.
__ blr();
__ bind(Lslow_path);
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
__ flush();
return entry;
}
// Support abs and sqrt like in compiler.
// For others we can use a normal (native) entry.
@ -858,7 +820,9 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// Our signature handlers copy required arguments to the C stack
// (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13.
__ mr(R3_ARG1, R18_locals);
#if !defined(ABI_ELFv2)
__ ld(signature_handler_fd, 0, signature_handler_fd);
#endif
__ call_stub(signature_handler_fd);
@ -1020,8 +984,13 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// native result across the call. No oop is present.
__ mr(R3_ARG1, R16_thread);
#if defined(ABI_ELFv2)
__ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
relocInfo::none);
#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
relocInfo::none);
#endif
__ bind(sync_check_done);
@ -1278,45 +1247,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry;
}
// =============================================================================
// Entry points
address AbstractInterpreterGenerator::generate_method_entry(
AbstractInterpreter::MethodKind kind) {
// Determine code generation flags.
bool synchronized = false;
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}
if (entry_point) {
return entry_point;
}
return ((InterpreterGenerator*) this)->generate_normal_entry(synchronized);
}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
@ -1344,7 +1274,7 @@ int AbstractInterpreter::size_activation(int max_stack,
int callee_locals,
bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
// in InterpreterGenerator::generate_fixed_frame.
assert(Interpreter::stackElementWords == 1, "sanity");
const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :

View File

@ -189,8 +189,12 @@ void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Reg
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
assert(load_bc_into_bc_reg, "we use bc_reg as temp");
__ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
// Big Endian: ((*(cache+indices))>>((1+byte_no)*8))&0xFF
// ((*(cache+indices))>>((1+byte_no)*8))&0xFF:
#if defined(VM_LITTLE_ENDIAN)
__ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp);
#else
__ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
#endif
__ cmpwi(CCR0, Rnew_bc, 0);
__ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
__ beq(CCR0, L_patch_done);
@ -1839,8 +1843,8 @@ void TemplateTable::tableswitch() {
__ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
// Load lo & hi.
__ lwz(Rlow_byte, BytesPerInt, Rdef_offset_addr);
__ lwz(Rhigh_byte, BytesPerInt * 2, Rdef_offset_addr);
__ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
__ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
// Check for default case (=index outside [low,high]).
__ cmpw(CCR0, R17_tos, Rlow_byte);
@ -1854,12 +1858,17 @@ void TemplateTable::tableswitch() {
__ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
__ sldi(Rindex, Rindex, LogBytesPerInt);
__ addi(Rindex, Rindex, 3 * BytesPerInt);
#if defined(VM_LITTLE_ENDIAN)
__ lwbrx(Roffset, Rdef_offset_addr, Rindex);
__ extsw(Roffset, Roffset);
#else
__ lwax(Roffset, Rdef_offset_addr, Rindex);
#endif
__ b(Ldispatch);
__ bind(Ldefault_case);
__ profile_switch_default(Rhigh_byte, Rscratch1);
__ lwa(Roffset, 0, Rdef_offset_addr);
__ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
__ bind(Ldispatch);
@ -1875,12 +1884,11 @@ void TemplateTable::lookupswitch() {
// Table switch using linear search through cases.
// Bytecode stream format:
// Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
// Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
// Note: Everything is big-endian format here.
void TemplateTable::fast_linearswitch() {
transition(itos, vtos);
Label Lloop_entry, Lsearch_loop, Lfound, Lcontinue_execution, Ldefault_case;
Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case;
Register Rcount = R3_ARG1,
Rcurrent_pair = R4_ARG2,
Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
@ -1894,47 +1902,40 @@ void TemplateTable::fast_linearswitch() {
__ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
// Setup loop counter and limit.
__ lwz(Rcount, BytesPerInt, Rdef_offset_addr); // Load count.
__ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
__ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
// Set up search loop.
__ cmpwi(CCR0, Rcount, 0);
__ beq(CCR0, Ldefault_case);
__ mtctr(Rcount);
__ cmpwi(CCR0, Rcount, 0);
__ bne(CCR0, Lloop_entry);
// linear table search
__ bind(Lsearch_loop);
__ lwz(Rvalue, 0, Rcurrent_pair);
__ lwa(Roffset, 1 * BytesPerInt, Rcurrent_pair);
__ cmpw(CCR0, Rvalue, Rcmp_value);
__ beq(CCR0, Lfound);
__ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
__ bdnz(Lsearch_loop);
// default case
// Default case
__ bind(Ldefault_case);
__ lwa(Roffset, 0, Rdef_offset_addr);
__ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
if (ProfileInterpreter) {
__ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
__ b(Lcontinue_execution);
}
__ b(Lcontinue_execution);
// Next iteration
__ bind(Lsearch_loop);
__ bdz(Ldefault_case);
__ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
__ bind(Lloop_entry);
__ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
__ cmpw(CCR0, Rvalue, Rcmp_value);
__ bne(CCR0, Lsearch_loop);
// Found, load offset.
__ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
// Calculate case index and profile
__ mfctr(Rcurrent_pair);
if (ProfileInterpreter) {
__ sub(Rcurrent_pair, Rcount, Rcurrent_pair);
__ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
}
// Entry found, skip Roffset bytecodes and continue.
__ bind(Lfound);
if (ProfileInterpreter) {
// Calc the num of the pair we hit. Careful, Rcurrent_pair points 2 ints
// beyond the actual current pair due to the auto update load above!
__ sub(Rcurrent_pair, Rcurrent_pair, Rdef_offset_addr);
__ addi(Rcurrent_pair, Rcurrent_pair, - 2 * BytesPerInt);
__ srdi(Rcurrent_pair, Rcurrent_pair, LogBytesPerInt + 1);
__ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
__ bind(Lcontinue_execution);
}
__ bind(Lcontinue_execution);
__ add(R14_bcp, Roffset, R14_bcp);
__ dispatch_next(vtos);
}
@ -1990,7 +1991,7 @@ void TemplateTable::fast_binaryswitch() {
// initialize i & j
__ li(Ri,0);
__ lwz(Rj, -BytesPerInt, Rarray);
__ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned);
// and start.
Label entry;
@ -2007,7 +2008,11 @@ void TemplateTable::fast_binaryswitch() {
// i = h;
// }
__ sldi(Rscratch, Rh, log_entry_size);
#if defined(VM_LITTLE_ENDIAN)
__ lwbrx(Rscratch, Rscratch, Rarray);
#else
__ lwzx(Rscratch, Rscratch, Rarray);
#endif
// if (key < current value)
// Rh = Rj
@ -2039,20 +2044,20 @@ void TemplateTable::fast_binaryswitch() {
// Ri = value offset
__ sldi(Ri, Ri, log_entry_size);
__ add(Ri, Ri, Rarray);
__ lwz(Rscratch, 0, Ri);
__ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned);
Label not_found;
// Ri = offset offset
__ cmpw(CCR0, Rkey, Rscratch);
__ beq(CCR0, not_found);
// entry not found -> j = default offset
__ lwz(Rj, -2 * BytesPerInt, Rarray);
__ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
__ b(default_case);
__ bind(not_found);
// entry found -> j = offset
__ profile_switch_case(Rh, Rj, Rscratch, Rkey);
__ lwz(Rj, BytesPerInt, Ri);
__ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned);
if (ProfileInterpreter) {
__ b(continue_execution);
@ -2147,8 +2152,11 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
// We are resolved if the indices offset contains the current bytecode.
// Big Endian:
#if defined(VM_LITTLE_ENDIAN)
__ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
#else
__ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
#endif
// Acquire by cmp-br-isync (see below).
__ cmpdi(CCR0, Rscratch, (int)bytecode());
__ beq(CCR0, Lresolved);

View File

@ -29,6 +29,7 @@
#include "compiler/disassembler.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/defaultStream.hpp"
#include "vm_version_ppc.hpp"
@ -108,7 +109,7 @@ void VM_Version::initialize() {
(has_vand() ? " vand" : "")
// Make sure number of %s matches num_features!
);
_features_str = strdup(buf);
_features_str = os::strdup(buf);
NOT_PRODUCT(if (Verbose) print_features(););
// PPC64 supports 8-byte compare-exchange operations (see

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
@ -68,9 +69,7 @@ bool CppInterpreter::contains(address pc) {
#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
#define __ _masm->
Label frame_manager_entry;
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
// c++ interpreter entry point this holds that entry point label.
Label frame_manager_entry; // c++ interpreter entry point this holds that entry point label.
static address unctrap_frame_manager_entry = NULL;
@ -452,110 +451,6 @@ address InterpreterGenerator::generate_empty_entry(void) {
return NULL;
}
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry
// Generates code to elide accessor methods
// Uses G3_scratch and G1_scratch as scratch
address InterpreterGenerator::generate_accessor_entry(void) {
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
// parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
address entry = __ pc();
Label slow_path;
if ( UseFastAccessorMethods) {
// Check if we need to reach a safepoint and generate full interpreter
// frame if so.
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
__ delayed()->nop();
// Check if local 0 != NULL
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
__ tst(Otos_i); // check if local 0 == NULL and go the slow path
__ brx(Assembler::zero, false, Assembler::pn, slow_path);
__ delayed()->nop();
// read first instruction word and extract bytecode @ 1 and index @ 2
// get first 4 bytes of the bytecodes (big endian!)
__ ld_ptr(Address(G5_method, in_bytes(Method::const_offset())), G1_scratch);
__ ld(Address(G1_scratch, in_bytes(ConstMethod::codes_offset())), G1_scratch);
// move index @ 2 far left then to the right most two bytes.
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
__ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
// get constant pool cache
__ ld_ptr(G5_method, in_bytes(Method::const_offset()), G3_scratch);
__ ld_ptr(G3_scratch, in_bytes(ConstMethod::constants_offset()), G3_scratch);
__ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
// get specific constant pool cache entry
__ add(G3_scratch, G1_scratch, G3_scratch);
// Check the constant Pool cache entry to see if it has been resolved.
// If not, need the slow path.
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
__ and3(G1_scratch, 0xFF, G1_scratch);
__ cmp(G1_scratch, Bytecodes::_getfield);
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
__ delayed()->nop();
// Get the type and return field offset from the constant pool cache
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch);
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch);
Label xreturn_path;
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Get the type from the constant pool cache
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
// Make sure we don't need to mask G1_scratch after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmp(G1_scratch, atos );
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, itos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ld(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, stos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, ctos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
#ifdef ASSERT
__ cmp(G1_scratch, btos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
__ should_not_reach_here();
#endif
__ ldsb(Otos_i, G3_scratch, Otos_i);
__ bind(xreturn_path);
// _ireturn/_areturn
__ retl(); // return from leaf routine
__ delayed()->mov(O5_savedSP, SP);
// Generate regular method entry
__ bind(slow_path);
__ ba(fast_accessor_slow_entry_path);
__ delayed()->nop();
return entry;
}
return NULL;
}
address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
if (UseG1GC) {
@ -573,7 +468,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
return generate_jump_to_normal_entry();
}
//
@ -1870,23 +1765,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ ba(call_interpreter_2);
__ delayed()->st_ptr(O1, STATE(_stack));
// Fast accessor methods share this entry point.
// This works because frame manager is in the same codelet
// This can either be an entry via call_stub/c1/c2 or a recursive interpreter call
// we need to do a little register fixup here once we distinguish the two of them
if (UseFastAccessorMethods && !synchronized) {
// Call stub_return address still in O7
__ bind(fast_accessor_slow_entry_path);
__ set((intptr_t)return_from_native_method - 8, Gtmp1);
__ cmp(Gtmp1, O7); // returning to interpreter?
__ brx(Assembler::equal, true, Assembler::pt, re_dispatch); // yep
__ delayed()->nop();
__ ba(re_dispatch);
__ delayed()->mov(G0, prevState); // initial entry
}
// interpreter returning to native code (call_stub/c1/c2)
// convert result and unwind initial activation
// L2_scratch - scaled result type index

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,9 +32,11 @@
address generate_normal_entry(bool synchronized);
address generate_native_entry(bool synchronized);
address generate_abstract_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
address generate_accessor_entry(void);
// there are no math intrinsics on sparc
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
address generate_jump_to_normal_entry(void);
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_Reference_get_entry(void);
void lock_method(void);
void save_native_result(void);
@ -43,4 +45,7 @@
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
void generate_counter_overflow(Label& Lcontinue);
// Not supported
address generate_CRC32_update_entry() { return NULL; }
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP

View File

@ -241,6 +241,15 @@ void InterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
// Various method entries
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
address entry = __ pc();
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
AddressLiteral al(Interpreter::entry_for_kind(Interpreter::zerolocals));
__ jump_to(al, G3_scratch);
__ delayed()->nop();
return entry;
}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
//
@ -255,159 +264,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
}
//----------------------------------------------------------------------------------------------------
// Entry points & stack frame layout
//
// Here we generate the various kind of entries into the interpreter.
// The two main entry type are generic bytecode methods and native call method.
// These both come in synchronized and non-synchronized versions but the
// frame layout they create is very similar. The other method entry
// types are really just special purpose entries that are really entry
// and interpretation all in one. These are for trivial methods like
// accessor, empty, or special math methods.
//
// When control flow reaches any of the entry types for the interpreter
// the following holds ->
//
// C2 Calling Conventions:
//
// The entry code below assumes that the following registers are set
// when coming in:
// G5_method: holds the Method* of the method to call
// Lesp: points to the TOS of the callers expression stack
// after having pushed all the parameters
//
// The entry code does the following to setup an interpreter frame
// pop parameters from the callers stack by adjusting Lesp
// set O0 to Lesp
// compute X = (max_locals - num_parameters)
// bump SP up by X to accomadate the extra locals
// compute X = max_expression_stack
// + vm_local_words
// + 16 words of register save area
// save frame doing a save sp, -X, sp growing towards lower addresses
// set Lbcp, Lmethod, LcpoolCache
// set Llocals to i0
// set Lmonitors to FP - rounded_vm_local_words
// set Lesp to Lmonitors - 4
//
// The frame has now been setup to do the rest of the entry code
// Try this optimization: Most method entries could live in a
// "one size fits all" stack frame without all the dynamic size
// calculations. It might be profitable to do all this calculation
// statically and approximately for "small enough" methods.
//-----------------------------------------------------------------------------------------------
// C1 Calling conventions
//
// Upon method entry, the following registers are setup:
//
// g2 G2_thread: current thread
// g5 G5_method: method to activate
// g4 Gargs : pointer to last argument
//
//
// Stack:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : free :
// | |
// +---------------+ <--- Gargs
// | |
// : arguments :
// | |
// +---------------+
// | |
//
//
//
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : :
// | | <--- Lesp
// +---------------+ <--- Lmonitors (fp - 0x18)
// | VM locals |
// +---------------+ <--- fp
// | |
// : reg save area :
// | |
// +---------------+ <--- fp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- fp + 0x5c
// | |
// : free :
// | |
// +---------------+
// | |
// : nonarg locals :
// | |
// +---------------+
// | |
// : arguments :
// | | <--- Llocals
// +---------------+ <--- Gargs
// | |
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
// determine code generation flags
bool synchronized = false;
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : break;
case Interpreter::java_lang_math_cos : break;
case Interpreter::java_lang_math_tan : break;
case Interpreter::java_lang_math_sqrt : break;
case Interpreter::java_lang_math_abs : break;
case Interpreter::java_lang_math_log : break;
case Interpreter::java_lang_math_log10 : break;
case Interpreter::java_lang_math_pow : break;
case Interpreter::java_lang_math_exp : break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
}
if (entry_point) return entry_point;
return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
}
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
// No special entry points that preclude compilation
return true;

View File

@ -6184,7 +6184,11 @@ instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
ins_cost(DEFAULT_COST * 3/2);
format %{ "SET $con,$dst\t! non-oop ptr" %}
ins_encode %{
__ set($con$$constant, $dst$$Register);
if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) {
__ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register);
} else {
__ set($con$$constant, $dst$$Register);
}
%}
ins_pipe(loadConP);
%}

View File

@ -456,6 +456,115 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
// Generate a fixed interpreter frame. This is identical setup for interpreted
// methods and for native methods hence the shared code.
//----------------------------------------------------------------------------------------------------
// Stack frame layout
//
// When control flow reaches any of the entry types for the interpreter
// the following holds ->
//
// C2 Calling Conventions:
//
// The entry code below assumes that the following registers are set
// when coming in:
// G5_method: holds the Method* of the method to call
// Lesp: points to the TOS of the callers expression stack
// after having pushed all the parameters
//
// The entry code does the following to setup an interpreter frame
// pop parameters from the callers stack by adjusting Lesp
// set O0 to Lesp
// compute X = (max_locals - num_parameters)
// bump SP up by X to accomadate the extra locals
// compute X = max_expression_stack
// + vm_local_words
// + 16 words of register save area
// save frame doing a save sp, -X, sp growing towards lower addresses
// set Lbcp, Lmethod, LcpoolCache
// set Llocals to i0
// set Lmonitors to FP - rounded_vm_local_words
// set Lesp to Lmonitors - 4
//
// The frame has now been setup to do the rest of the entry code
// Try this optimization: Most method entries could live in a
// "one size fits all" stack frame without all the dynamic size
// calculations. It might be profitable to do all this calculation
// statically and approximately for "small enough" methods.
//-----------------------------------------------------------------------------------------------
// C1 Calling conventions
//
// Upon method entry, the following registers are setup:
//
// g2 G2_thread: current thread
// g5 G5_method: method to activate
// g4 Gargs : pointer to last argument
//
//
// Stack:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : free :
// | |
// +---------------+ <--- Gargs
// | |
// : arguments :
// | |
// +---------------+
// | |
//
//
//
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : :
// | | <--- Lesp
// +---------------+ <--- Lmonitors (fp - 0x18)
// | VM locals |
// +---------------+ <--- fp
// | |
// : reg save area :
// | |
// +---------------+ <--- fp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- fp + 0x5c
// | |
// : free :
// | |
// +---------------+
// | |
// : nonarg locals :
// | |
// +---------------+
// | |
// : arguments :
// | | <--- Llocals
// +---------------+ <--- Gargs
// | |
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
//
//
@ -599,136 +708,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
}
// Empty method, generate a very fast return.
address InterpreterGenerator::generate_empty_entry(void) {
// A method that does nother but return...
address entry = __ pc();
Label slow_path;
// do nothing for empty methods (do not even increment invocation counter)
if ( UseFastEmptyMethods) {
// If we need a safepoint check, generate full interpreter entry.
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ set(sync_state, G3_scratch);
__ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
// Code: _return
__ retl();
__ delayed()->mov(O5_savedSP, SP);
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry;
}
return NULL;
}
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry
// Generates code to elide accessor methods
// Uses G3_scratch and G1_scratch as scratch
address InterpreterGenerator::generate_accessor_entry(void) {
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
// parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
address entry = __ pc();
Label slow_path;
// XXX: for compressed oops pointer loading and decoding doesn't fit in
// delay slot and damages G1
if ( UseFastAccessorMethods && !UseCompressedOops ) {
// Check if we need to reach a safepoint and generate full interpreter
// frame if so.
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
// Check if local 0 != NULL
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
// check if local 0 == NULL and go the slow path
__ br_null_short(Otos_i, Assembler::pn, slow_path);
// read first instruction word and extract bytecode @ 1 and index @ 2
// get first 4 bytes of the bytecodes (big endian!)
__ ld_ptr(G5_method, Method::const_offset(), G1_scratch);
__ ld(G1_scratch, ConstMethod::codes_offset(), G1_scratch);
// move index @ 2 far left then to the right most two bytes.
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
__ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
// get constant pool cache
__ ld_ptr(G5_method, Method::const_offset(), G3_scratch);
__ ld_ptr(G3_scratch, ConstMethod::constants_offset(), G3_scratch);
__ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
// get specific constant pool cache entry
__ add(G3_scratch, G1_scratch, G3_scratch);
// Check the constant Pool cache entry to see if it has been resolved.
// If not, need the slow path.
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
__ and3(G1_scratch, 0xFF, G1_scratch);
__ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
// Get the type and return field offset from the constant pool cache
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
Label xreturn_path;
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Get the type from the constant pool cache
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
// Make sure we don't need to mask G1_scratch after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmp(G1_scratch, atos );
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, itos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ld(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, stos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
__ cmp(G1_scratch, ctos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
#ifdef ASSERT
__ cmp(G1_scratch, btos);
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
__ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
__ should_not_reach_here();
#endif
__ ldsb(Otos_i, G3_scratch, Otos_i);
__ bind(xreturn_path);
// _ireturn/_areturn
__ retl(); // return from leaf routine
__ delayed()->mov(O5_savedSP, SP);
// Generate regular method entry
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry;
}
return NULL;
}
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
@ -806,7 +785,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
return generate_jump_to_normal_entry();
}
//
@ -1242,8 +1221,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// Generic method entry to (asm) interpreter
//------------------------------------------------------------------------------------------------------------------------
//
address InterpreterGenerator::generate_normal_entry(bool synchronized) {
address entry = __ pc();
@ -1410,123 +1387,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry;
}
//----------------------------------------------------------------------------------------------------
// Entry points & stack frame layout
//
// Here we generate the various kind of entries into the interpreter.
// The two main entry type are generic bytecode methods and native call method.
// These both come in synchronized and non-synchronized versions but the
// frame layout they create is very similar. The other method entry
// types are really just special purpose entries that are really entry
// and interpretation all in one. These are for trivial methods like
// accessor, empty, or special math methods.
//
// When control flow reaches any of the entry types for the interpreter
// the following holds ->
//
// C2 Calling Conventions:
//
// The entry code below assumes that the following registers are set
// when coming in:
// G5_method: holds the Method* of the method to call
// Lesp: points to the TOS of the callers expression stack
// after having pushed all the parameters
//
// The entry code does the following to setup an interpreter frame
// pop parameters from the callers stack by adjusting Lesp
// set O0 to Lesp
// compute X = (max_locals - num_parameters)
// bump SP up by X to accomadate the extra locals
// compute X = max_expression_stack
// + vm_local_words
// + 16 words of register save area
// save frame doing a save sp, -X, sp growing towards lower addresses
// set Lbcp, Lmethod, LcpoolCache
// set Llocals to i0
// set Lmonitors to FP - rounded_vm_local_words
// set Lesp to Lmonitors - 4
//
// The frame has now been setup to do the rest of the entry code
// Try this optimization: Most method entries could live in a
// "one size fits all" stack frame without all the dynamic size
// calculations. It might be profitable to do all this calculation
// statically and approximately for "small enough" methods.
//-----------------------------------------------------------------------------------------------
// C1 Calling conventions
//
// Upon method entry, the following registers are setup:
//
// g2 G2_thread: current thread
// g5 G5_method: method to activate
// g4 Gargs : pointer to last argument
//
//
// Stack:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : free :
// | |
// +---------------+ <--- Gargs
// | |
// : arguments :
// | |
// +---------------+
// | |
//
//
//
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
//
// +---------------+ <--- sp
// | |
// : reg save area :
// | |
// +---------------+ <--- sp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- sp + 0x5c
// | |
// : :
// | | <--- Lesp
// +---------------+ <--- Lmonitors (fp - 0x18)
// | VM locals |
// +---------------+ <--- fp
// | |
// : reg save area :
// | |
// +---------------+ <--- fp + 0x40
// | |
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
// | |
// +---------------+ <--- fp + 0x5c
// | |
// : free :
// | |
// +---------------+
// | |
// : nonarg locals :
// | |
// +---------------+
// | |
// : arguments :
// | | <--- Llocals
// +---------------+ <--- Gargs
// | |
static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
// Figure out the size of an interpreter frame (in words) given that we have a fully allocated

View File

@ -26,6 +26,7 @@
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "vm_version_sparc.hpp"
@ -249,7 +250,7 @@ void VM_Version::initialize() {
(!has_hardware_fsmuld() ? ", no-fsmuld" : ""));
// buf is started with ", " or is empty
_features_str = strdup(strlen(buf) > 2 ? buf + 2 : buf);
_features_str = os::strdup(strlen(buf) > 2 ? buf + 2 : buf);
// There are three 64-bit SPARC families that do not overlap, e.g.,
// both is_ultra3() and is_sparc64() cannot be true at the same time.

View File

@ -3853,6 +3853,15 @@ void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
emit_int8((unsigned char)(0xC0 | encode));
}
// Carry-Less Multiplication Quadword
void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
assert(VM_Version::supports_clmul(), "");
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
emit_int8(0x44);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8((unsigned char)mask);
}
// Carry-Less Multiplication Quadword
void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");

View File

@ -1837,6 +1837,7 @@ private:
void vpbroadcastd(XMMRegister dst, XMMRegister src);
// Carry-Less Multiplication Quadword
void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
// AVX instruction which is used to clear upper 128 bits of YMM registers and

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,21 +27,6 @@
protected:
#if 0
address generate_asm_interpreter_entry(bool synchronized);
address generate_native_entry(bool synchronized);
address generate_abstract_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
address generate_accessor_entry(void);
address generate_Reference_get_entry(void);
void lock_method(void);
void generate_stack_overflow_check(void);
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
void generate_counter_overflow(Label* do_continue);
#endif
void generate_more_monitors();
void generate_deopt_handling();
address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,9 +66,6 @@ extern "C" void RecursiveInterpreterActivation(interpreterState istate )
#define __ _masm->
#define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
// c++ interpreter entry point this holds that entry point label.
// default registers for state and sender_sp
// state and sender_sp are the same on 32bit because we have no choice.
// state could be rsi on 64bit but it is an arg reg and not callee save
@ -660,7 +657,6 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// generate_method_entry) so the guard should work for them too.
//
// monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
@ -794,156 +790,6 @@ void InterpreterGenerator::lock_method(void) {
__ lock_object(monitor);
}
// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
address InterpreterGenerator::generate_accessor_entry(void) {
// rbx: Method*
// rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
Label xreturn_path;
// do fastpath for resolved accessor methods
if (UseFastAccessorMethods) {
address entry_point = __ pc();
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// ASM/C++ Interpreter
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
// rbx,: method
// rcx: receiver
__ movptr(rax, Address(rsp, wordSize));
// check if local 0 != NULL and read field
__ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
// read first instruction word and extract bytecode @ 1 and index @ 2
__ movptr(rdx, Address(rbx, Method::const_offset()));
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2*BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
// rax,: local 0
// rbx,: method
// rcx: receiver - do not destroy since it is needed for slow path!
// rcx: scratch
// rdx: constant pool cache index
// rdi: constant pool cache
// rsi/r13: sender sp
// check if getfield has been resolved and read constant pool cache entry
// check the validity of the cache entry by testing whether _indices field
// contains Bytecode::_getfield in b1 byte.
assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
__ movl(rcx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2*BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
__ jcc(Assembler::notEqual, slow_path);
// Note: constant pool entry is not valid before bytecode is resolved
__ movptr(rcx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
__ movl(rdx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
Label notByte, notShort, notChar;
const Address field_address (rax, rcx, Address::times_1);
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Use the type from the constant pool cache
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask rdx after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
#ifdef _LP64
Label notObj;
__ cmpl(rdx, atos);
__ jcc(Assembler::notEqual, notObj);
// atos
__ movptr(rax, field_address);
__ jmp(xreturn_path);
__ bind(notObj);
#endif // _LP64
__ cmpl(rdx, btos);
__ jcc(Assembler::notEqual, notByte);
__ load_signed_byte(rax, field_address);
__ jmp(xreturn_path);
__ bind(notByte);
__ cmpl(rdx, stos);
__ jcc(Assembler::notEqual, notShort);
__ load_signed_short(rax, field_address);
__ jmp(xreturn_path);
__ bind(notShort);
__ cmpl(rdx, ctos);
__ jcc(Assembler::notEqual, notChar);
__ load_unsigned_short(rax, field_address);
__ jmp(xreturn_path);
__ bind(notChar);
#ifdef ASSERT
Label okay;
#ifndef _LP64
__ cmpl(rdx, atos);
__ jcc(Assembler::equal, okay);
#endif // _LP64
__ cmpl(rdx, itos);
__ jcc(Assembler::equal, okay);
__ stop("what type is this?");
__ bind(okay);
#endif // ASSERT
// All the rest are a 32 bit wordsize
__ movl(rax, field_address);
__ bind(xreturn_path);
// _ireturn/_areturn
__ pop(rdi); // get return address
__ mov(rsp, sender_sp_on_entry); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
// We will enter c++ interpreter looking like it was
// called by the call_stub this will cause it to return
// a tosca result to the invoker which might have been
// the c++ interpreter itself.
__ jmp(fast_accessor_slow_entry_path);
return entry_point;
} else {
return NULL;
}
}
address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
if (UseG1GC) {
@ -961,7 +807,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
return generate_jump_to_normal_entry();
}
//
@ -1670,10 +1516,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
address entry_point = __ pc();
// Fast accessor methods share this entry point.
// This works because frame manager is in the same codelet
if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
Label dispatch_entry_2;
__ movptr(rcx, sender_sp_on_entry);
__ movptr(state, (int32_t)NULL_WORD); // no current activation
@ -2212,40 +2054,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry_point;
}
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
// determine code generation flags
bool synchronized = false;
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : // fall thru
entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
}
if (entry_point) return entry_point;
return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
}
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
: CppInterpreterGenerator(code) {

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#define __ _masm->
// Jump into normal path for accessor and empty entry to jump to normal entry
// The "fast" optimization don't update compilation count therefore can disable inlining
// for these functions that should be inlined.
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
address entry_point = __ pc();
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
__ jump(RuntimeAddress(Interpreter::entry_for_kind(Interpreter::zerolocals)));
return entry_point;
}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {
address entry_point = __ pc();
// abstract method entry
#ifndef CC_INTERP
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
#endif
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry_point;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,8 +36,9 @@
address generate_native_entry(bool synchronized);
address generate_abstract_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
address generate_empty_entry(void);
address generate_accessor_entry(void);
address generate_jump_to_normal_entry(void);
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_Reference_get_entry();
address generate_CRC32_update_entry();
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);

View File

@ -67,45 +67,6 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
}
//
// Various method entries (that c++ and asm interpreter agree upon)
//------------------------------------------------------------------------------------------------------------------------
//
//
// Empty method, generate a very fast return.
address InterpreterGenerator::generate_empty_entry(void) {
// rbx,: Method*
// rcx: receiver (unused)
// rsi: previous interpreter state (C++ interpreter) must preserve
// rsi: sender sp must set sp to this value on return
if (!UseFastEmptyMethods) return NULL;
address entry_point = __ pc();
// If we need a safepoint check, generate full interpreter entry.
Label slow_path;
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// do nothing for empty methods (do not even increment invocation counter)
// Code: _return
// _return
// return w/o popping parameters
__ pop(rax);
__ mov(rsp, rsi);
__ jmp(rax);
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry_point;
}
address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
// rbx,: Method*
@ -216,36 +177,6 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {
// rbx,: Method*
// rcx: receiver (unused)
// rsi: previous interpreter state (C++ interpreter) must preserve
// rsi: sender SP
address entry_point = __ pc();
// abstract method entry
#ifndef CC_INTERP
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
#endif
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry_point;
}
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in

View File

@ -301,66 +301,6 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
return entry_point;
}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {
// rbx: Method*
// r13: sender SP
address entry_point = __ pc();
// abstract method entry
#ifndef CC_INTERP
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
#endif
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry_point;
}
// Empty method, generate a very fast return.
address InterpreterGenerator::generate_empty_entry(void) {
// rbx: Method*
// r13: sender sp must set sp to this value on return
if (!UseFastEmptyMethods) {
return NULL;
}
address entry_point = __ pc();
// If we need a safepoint check, generate full interpreter entry.
Label slow_path;
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// do nothing for empty methods (do not even increment invocation counter)
// Code: _return
// _return
// return w/o popping parameters
__ pop(rax);
__ mov(rsp, r13);
__ jmp(rax);
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry_point;
}
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in

View File

@ -7316,17 +7316,34 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
* Fold 128-bit data chunk
*/
void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
vpclmulhdq(xtmp, xK, xcrc); // [123:64]
vpclmulldq(xcrc, xK, xcrc); // [63:0]
vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
pxor(xcrc, xtmp);
if (UseAVX > 0) {
vpclmulhdq(xtmp, xK, xcrc); // [123:64]
vpclmulldq(xcrc, xK, xcrc); // [63:0]
vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
pxor(xcrc, xtmp);
} else {
movdqa(xtmp, xcrc);
pclmulhdq(xtmp, xK); // [123:64]
pclmulldq(xcrc, xK); // [63:0]
pxor(xcrc, xtmp);
movdqu(xtmp, Address(buf, offset));
pxor(xcrc, xtmp);
}
}
void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
vpclmulhdq(xtmp, xK, xcrc);
vpclmulldq(xcrc, xK, xcrc);
pxor(xcrc, xbuf);
pxor(xcrc, xtmp);
if (UseAVX > 0) {
vpclmulhdq(xtmp, xK, xcrc);
vpclmulldq(xcrc, xK, xcrc);
pxor(xcrc, xbuf);
pxor(xcrc, xtmp);
} else {
movdqa(xtmp, xcrc);
pclmulhdq(xtmp, xK);
pclmulldq(xcrc, xK);
pxor(xcrc, xbuf);
pxor(xcrc, xtmp);
}
}
/**
@ -7444,9 +7461,17 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi
// Fold 128 bits in xmm1 down into 32 bits in crc register.
BIND(L_fold_128b);
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
vpand(xmm3, xmm0, xmm2, false /* vector256 */);
vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
if (UseAVX > 0) {
vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
vpand(xmm3, xmm0, xmm2, false /* vector256 */);
vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
} else {
movdqa(xmm2, xmm0);
pclmulqdq(xmm2, xmm1, 0x1);
movdqa(xmm3, xmm0);
pand(xmm3, xmm2);
pclmulqdq(xmm0, xmm3, 0x1);
}
psrldq(xmm1, 8);
psrldq(xmm2, 4);
pxor(xmm0, xmm1);

View File

@ -966,6 +966,16 @@ public:
void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
void mulss(XMMRegister dst, AddressLiteral src);
// Carry-Less Multiplication Quadword
void pclmulldq(XMMRegister dst, XMMRegister src) {
// 0x00 - multiply lower 64 bits [0:63]
Assembler::pclmulqdq(dst, src, 0x00);
}
void pclmulhdq(XMMRegister dst, XMMRegister src) {
// 0x11 - multiply upper 64 bits [64:127]
Assembler::pclmulqdq(dst, src, 0x11);
}
void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
void sqrtsd(XMMRegister dst, AddressLiteral src);

View File

@ -38,7 +38,7 @@ int AbstractInterpreter::size_activation(int max_stack,
int callee_locals,
bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
// in InterpreterGenerator::generate_fixed_frame.
// fixed size of an interpreter frame:
int overhead = frame::sender_sp_offset -

View File

@ -468,10 +468,10 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// rax,
// NOTE: since the additional locals are also always pushed (wasn't obvious in
// generate_method_entry) so the guard should work for them too.
// generate_fixed_frame) so the guard should work for them too.
//
// monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp
// monitor entry size: see picture of stack in frame_x86.hpp
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
@ -633,145 +633,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
__ movptr(Address(rsp, 0), rsp); // set expression stack bottom
}
// End of helpers
//
// Various method entries
//------------------------------------------------------------------------------------------------------------------------
//
//
// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
address InterpreterGenerator::generate_accessor_entry(void) {
// rbx,: Method*
// rcx: receiver (preserve for slow entry into asm interpreter)
// rsi: senderSP must preserved for slow path, set SP to it on fast path
address entry_point = __ pc();
Label xreturn_path;
// do fastpath for resolved accessor methods
if (UseFastAccessorMethods) {
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
ExternalAddress state(SafepointSynchronize::address_of_state());
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// ASM/C++ Interpreter
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
// rbx,: method
// rcx: receiver
__ movptr(rax, Address(rsp, wordSize));
// check if local 0 != NULL and read field
__ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
// read first instruction word and extract bytecode @ 1 and index @ 2
__ movptr(rdx, Address(rbx, Method::const_offset()));
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2*BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
// rax,: local 0
// rbx,: method
// rcx: receiver - do not destroy since it is needed for slow path!
// rcx: scratch
// rdx: constant pool cache index
// rdi: constant pool cache
// rsi: sender sp
// check if getfield has been resolved and read constant pool cache entry
// check the validity of the cache entry by testing whether _indices field
// contains Bytecode::_getfield in b1 byte.
assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
__ movl(rcx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2*BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
__ jcc(Assembler::notEqual, slow_path);
// Note: constant pool entry is not valid before bytecode is resolved
__ movptr(rcx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
__ movl(rdx,
Address(rdi,
rdx,
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
Label notByte, notShort, notChar;
const Address field_address (rax, rcx, Address::times_1);
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Use the type from the constant pool cache
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask rdx after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmpl(rdx, btos);
__ jcc(Assembler::notEqual, notByte);
__ load_signed_byte(rax, field_address);
__ jmp(xreturn_path);
__ bind(notByte);
__ cmpl(rdx, stos);
__ jcc(Assembler::notEqual, notShort);
__ load_signed_short(rax, field_address);
__ jmp(xreturn_path);
__ bind(notShort);
__ cmpl(rdx, ctos);
__ jcc(Assembler::notEqual, notChar);
__ load_unsigned_short(rax, field_address);
__ jmp(xreturn_path);
__ bind(notChar);
#ifdef ASSERT
Label okay;
__ cmpl(rdx, atos);
__ jcc(Assembler::equal, okay);
__ cmpl(rdx, itos);
__ jcc(Assembler::equal, okay);
__ stop("what type is this?");
__ bind(okay);
#endif // ASSERT
// All the rest are a 32 bit wordsize
// This is ok for now. Since fast accessors should be going away
__ movptr(rax, field_address);
__ bind(xreturn_path);
// _ireturn/_areturn
__ pop(rdi); // get return address
__ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
(void) generate_normal_entry(false);
return entry_point;
}
return NULL;
}
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
@ -862,7 +723,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
return generate_jump_to_normal_entry();
}
/**
@ -1557,100 +1418,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry_point;
}
//------------------------------------------------------------------------------------------------------------------------
// Entry points
//
// Here we generate the various kind of entries into the interpreter.
// The two main entry type are generic bytecode methods and native call method.
// These both come in synchronized and non-synchronized versions but the
// frame layout they create is very similar. The other method entry
// types are really just special purpose entries that are really entry
// and interpretation all in one. These are for trivial methods like
// accessor, empty, or special math methods.
//
// When control flow reaches any of the entry types for the interpreter
// the following holds ->
//
// Arguments:
//
// rbx,: Method*
// rcx: receiver
//
//
// Stack layout immediately at entry
//
// [ return address ] <--- rsp
// [ parameter n ]
// ...
// [ parameter 1 ]
// [ expression stack ] (caller's java expression stack)
// Assuming that we don't go to one of the trivial specialized
// entries the stack will look like below when we are ready to execute
// the first bytecode (or call the native routine). The register usage
// will be as the template based interpreter expects (see interpreter_x86.hpp).
//
// local variables follow incoming parameters immediately; i.e.
// the return address is moved to the end of the locals).
//
// [ monitor entry ] <--- rsp
// ...
// [ monitor entry ]
// [ expr. stack bottom ]
// [ saved rsi ]
// [ current rdi ]
// [ Method* ]
// [ saved rbp, ] <--- rbp,
// [ return address ]
// [ local variable m ]
// ...
// [ local variable 1 ]
// [ parameter n ]
// ...
// [ parameter 1 ] <--- rdi
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
// determine code generation flags
bool synchronized = false;
address entry_point = NULL;
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ig_this->generate_Reference_get_entry(); break;
case Interpreter::java_util_zip_CRC32_update
: entry_point = ig_this->generate_CRC32_update_entry(); break;
case Interpreter::java_util_zip_CRC32_updateBytes
: // fall thru
case Interpreter::java_util_zip_CRC32_updateByteBuffer
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
}
if (entry_point) return entry_point;
return ig_this->generate_normal_entry(synchronized);
}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.

View File

@ -400,7 +400,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// page mechanism will work for that.
//
// NOTE: Since the additional locals are also always pushed (wasn't
// obvious in generate_method_entry) so the guard should work for them
// obvious in generate_fixed_frame) so the guard should work for them
// too.
//
// Args:
@ -411,8 +411,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// rax
void InterpreterGenerator::generate_stack_overflow_check(void) {
// monitor entry size: see picture of stack set
// (generate_method_entry) and frame_amd64.hpp
// monitor entry size: see picture of stack in frame_x86.hpp
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
// total overhead size: entry_size + (saved rbp through expr stack
@ -600,153 +599,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// End of helpers
// Various method entries
//------------------------------------------------------------------------------------------------------------------------
//
//
// Call an accessor method (assuming it is resolved, otherwise drop
// into vanilla (slow path) entry
address InterpreterGenerator::generate_accessor_entry(void) {
// rbx: Method*
// r13: senderSP must preserver for slow path, set SP to it on fast path
address entry_point = __ pc();
Label xreturn_path;
// do fastpath for resolved accessor methods
if (UseFastAccessorMethods) {
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
// thereof; parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// rbx: method
__ movptr(rax, Address(rsp, wordSize));
// check if local 0 != NULL and read field
__ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
// read first instruction word and extract bytecode @ 1 and index @ 2
__ movptr(rdx, Address(rbx, Method::const_offset()));
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2 * BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
// rax: local 0
// rbx: method
// rdx: constant pool cache index
// rdi: constant pool cache
// check if getfield has been resolved and read constant pool cache entry
// check the validity of the cache entry by testing whether _indices field
// contains Bytecode::_getfield in b1 byte.
assert(in_words(ConstantPoolCacheEntry::size()) == 4,
"adjust shift below");
__ movl(rcx,
Address(rdi,
rdx,
Address::times_8,
ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2 * BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
__ jcc(Assembler::notEqual, slow_path);
// Note: constant pool entry is not valid before bytecode is resolved
__ movptr(rcx,
Address(rdi,
rdx,
Address::times_8,
ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset()));
// edx: flags
__ movl(rdx,
Address(rdi,
rdx,
Address::times_8,
ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset()));
Label notObj, notInt, notByte, notShort;
const Address field_address(rax, rcx, Address::times_1);
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Use the type from the constant pool cache
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
// Make sure we don't need to mask edx after the above shift
ConstantPoolCacheEntry::verify_tos_state_shift();
__ cmpl(rdx, atos);
__ jcc(Assembler::notEqual, notObj);
// atos
__ load_heap_oop(rax, field_address);
__ jmp(xreturn_path);
__ bind(notObj);
__ cmpl(rdx, itos);
__ jcc(Assembler::notEqual, notInt);
// itos
__ movl(rax, field_address);
__ jmp(xreturn_path);
__ bind(notInt);
__ cmpl(rdx, btos);
__ jcc(Assembler::notEqual, notByte);
// btos
__ load_signed_byte(rax, field_address);
__ jmp(xreturn_path);
__ bind(notByte);
__ cmpl(rdx, stos);
__ jcc(Assembler::notEqual, notShort);
// stos
__ load_signed_short(rax, field_address);
__ jmp(xreturn_path);
__ bind(notShort);
#ifdef ASSERT
Label okay;
__ cmpl(rdx, ctos);
__ jcc(Assembler::equal, okay);
__ stop("what type is this?");
__ bind(okay);
#endif
// ctos
__ load_unsigned_short(rax, field_address);
__ bind(xreturn_path);
// _ireturn/_areturn
__ pop(rdi);
__ mov(rsp, r13);
__ jmp(rdi);
__ ret(0);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
(void) generate_normal_entry(false);
} else {
(void) generate_normal_entry(false);
}
return entry_point;
}
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
@ -773,8 +625,6 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
// This code is based on generate_accessor_enty.
//
// rbx: Method*
// r13: senderSP must preserve for slow path, set SP to it on fast path
@ -832,7 +682,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
return generate_accessor_entry();
return generate_jump_to_normal_entry();
}
/**
@ -1566,100 +1416,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
return entry_point;
}
// Entry points
//
// Here we generate the various kind of entries into the interpreter.
// The two main entry type are generic bytecode methods and native
// call method. These both come in synchronized and non-synchronized
// versions but the frame layout they create is very similar. The
// other method entry types are really just special purpose entries
// that are really entry and interpretation all in one. These are for
// trivial methods like accessor, empty, or special math methods.
//
// When control flow reaches any of the entry types for the interpreter
// the following holds ->
//
// Arguments:
//
// rbx: Method*
//
// Stack layout immediately at entry
//
// [ return address ] <--- rsp
// [ parameter n ]
// ...
// [ parameter 1 ]
// [ expression stack ] (caller's java expression stack)
// Assuming that we don't go to one of the trivial specialized entries
// the stack will look like below when we are ready to execute the
// first bytecode (or call the native routine). The register usage
// will be as the template based interpreter expects (see
// interpreter_amd64.hpp).
//
// local variables follow incoming parameters immediately; i.e.
// the return address is moved to the end of the locals).
//
// [ monitor entry ] <--- rsp
// ...
// [ monitor entry ]
// [ expr. stack bottom ]
// [ saved r13 ]
// [ current r14 ]
// [ Method* ]
// [ saved ebp ] <--- rbp
// [ return address ]
// [ local variable m ]
// ...
// [ local variable 1 ]
// [ parameter n ]
// ...
// [ parameter 1 ] <--- r14
address AbstractInterpreterGenerator::generate_method_entry(
AbstractInterpreter::MethodKind kind) {
// determine code generation flags
bool synchronized = false;
address entry_point = NULL;
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
switch (kind) {
case Interpreter::zerolocals : break;
case Interpreter::zerolocals_synchronized: synchronized = true; break;
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
case Interpreter::java_lang_math_tan : // fall thru
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
case Interpreter::java_lang_math_sqrt : // fall thru
case Interpreter::java_lang_math_pow : // fall thru
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ig_this->generate_Reference_get_entry(); break;
case Interpreter::java_util_zip_CRC32_update
: entry_point = ig_this->generate_CRC32_update_entry(); break;
case Interpreter::java_util_zip_CRC32_updateBytes
: // fall thru
case Interpreter::java_util_zip_CRC32_updateByteBuffer
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
default:
fatal(err_msg("unexpected method kind: %d", kind));
break;
}
if (entry_point) {
return entry_point;
}
return ig_this->generate_normal_entry(synchronized);
}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.

View File

@ -27,6 +27,7 @@
#include "asm/macroAssembler.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "vm_version_x86.hpp"
@ -514,7 +515,7 @@ void VM_Version::get_processor_features() {
(supports_tscinv() ? ", tscinv": ""),
(supports_bmi1() ? ", bmi1" : ""),
(supports_bmi2() ? ", bmi2" : ""));
_features_str = strdup(buf);
_features_str = os::strdup(buf);
// UseSSE is set to the smaller of what hardware supports and what
// the command line requires. I.e., you cannot set UseSSE to 2 on
@ -559,7 +560,7 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseCLMUL, false);
}
if (UseCLMUL && (UseAVX > 0) && (UseSSE > 2)) {
if (UseCLMUL && (UseSSE > 2)) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
UseCRC32Intrinsics = true;
}
@ -805,6 +806,21 @@ void VM_Version::get_processor_features() {
}
}
}
if ((cpu_family() == 0x06) &&
((extended_cpu_model() == 0x36) || // Centerton
(extended_cpu_model() == 0x37) || // Silvermont
(extended_cpu_model() == 0x4D))) {
#ifdef COMPILER2
if (FLAG_IS_DEFAULT(OptoScheduling)) {
OptoScheduling = true;
}
#endif
if (supports_sse4_2()) { // Silvermont
if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
}
}
}
}
// Use count leading zeros count instruction if available.
@ -892,23 +908,25 @@ void VM_Version::get_processor_features() {
AllocatePrefetchDistance = allocate_prefetch_distance();
AllocatePrefetchStyle = allocate_prefetch_style();
if( is_intel() && cpu_family() == 6 && supports_sse3() ) {
if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core
if (is_intel() && cpu_family() == 6 && supports_sse3()) {
if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core
#ifdef _LP64
AllocatePrefetchDistance = 384;
#else
AllocatePrefetchDistance = 320;
#endif
}
if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
AllocatePrefetchDistance = 192;
AllocatePrefetchLines = 4;
}
#ifdef COMPILER2
if (AggressiveOpts && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
if (supports_sse4_2()) {
if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
}
#endif
}
#endif
}
assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -731,7 +731,7 @@ InterpreterFrame *InterpreterFrame::build(Method* const method, TRAPS) {
if (method->is_static())
object = method->constants()->pool_holder()->java_mirror();
else
object = (oop) locals[0];
object = (oop) (void*)locals[0];
monitor->set_obj(object);
}
@ -831,60 +831,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
return generate_entry((address) CppInterpreter::normal_entry);
}
address AbstractInterpreterGenerator::generate_method_entry(
AbstractInterpreter::MethodKind kind) {
address entry_point = NULL;
switch (kind) {
case Interpreter::zerolocals:
case Interpreter::zerolocals_synchronized:
break;
case Interpreter::native:
entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
break;
case Interpreter::native_synchronized:
entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
break;
case Interpreter::empty:
entry_point = ((InterpreterGenerator*) this)->generate_empty_entry();
break;
case Interpreter::accessor:
entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry();
break;
case Interpreter::abstract:
entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();
break;
case Interpreter::java_lang_math_sin:
case Interpreter::java_lang_math_cos:
case Interpreter::java_lang_math_tan:
case Interpreter::java_lang_math_abs:
case Interpreter::java_lang_math_log:
case Interpreter::java_lang_math_log10:
case Interpreter::java_lang_math_sqrt:
case Interpreter::java_lang_math_pow:
case Interpreter::java_lang_math_exp:
entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
break;
case Interpreter::java_lang_ref_reference_get:
entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry();
break;
default:
ShouldNotReachHere();
}
if (entry_point == NULL)
entry_point = ((InterpreterGenerator*) this)->generate_normal_entry(false);
return entry_point;
}
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
: CppInterpreterGenerator(code) {

View File

@ -26,6 +26,8 @@
#ifndef CPU_ZERO_VM_FRAME_ZERO_INLINE_HPP
#define CPU_ZERO_VM_FRAME_ZERO_INLINE_HPP
#include "code/codeCache.hpp"
// Constructors
inline frame::frame() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -61,6 +61,12 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS
define_pd_global(uintx, TypeProfileLevel, 0);
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
product(bool, UseFastEmptyMethods, true, \
"Use fast method entry code for empty methods") \
\
product(bool, UseFastAccessorMethods, true, \
"Use fast method entry code for accessor methods") \
\
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -39,4 +39,7 @@
address generate_accessor_entry();
address generate_Reference_get_entry();
// Not supported
address generate_CRC32_update_entry() { return NULL; }
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP

View File

@ -58,6 +58,7 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfMemory.hpp"
#include "runtime/sharedRuntime.hpp"
@ -378,10 +379,10 @@ void os::Aix::query_multipage_support() {
// default should be 4K.
size_t data_page_size = SIZE_4K;
{
void* p = ::malloc(SIZE_16M);
void* p = os::malloc(SIZE_16M, mtInternal);
guarantee(p != NULL, "malloc failed");
data_page_size = os::Aix::query_pagesize(p);
::free(p);
os::free(p);
}
// query default shm page size (LDR_CNTRL SHMPSIZE)

View File

@ -24,6 +24,8 @@
#include "asm/assembler.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/os.hpp"
#include "loadlib_aix.hpp"
#include "porting_aix.hpp"
#include "utilities/debug.hpp"
@ -83,7 +85,7 @@ class fixed_strings {
while (n) {
node* p = n;
n = n->next;
free(p->v);
os::free(p->v);
delete p;
}
}
@ -95,7 +97,7 @@ class fixed_strings {
}
}
node* p = new node;
p->v = strdup(s);
p->v = os::strdup_check_oom(s);
p->next = first;
first = p;
return p->v;

View File

@ -2439,23 +2439,25 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
}
// The memory is committed
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
return addr;
}
bool os::release_memory_special(char* base, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
if (rslt == 0) {
tkr.record((address)base, bytes);
return true;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
if (rslt == 0) {
tkr.record((address)base, bytes);
return true;
} else {
return false;
}
} else {
tkr.discard();
return false;
return shmdt(base) == 0;
}
}
size_t os::large_page_size() {

View File

@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
return mapAddress;
}
@ -918,7 +918,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
*addr = mapAddress;
*sizep = size;

View File

@ -2246,7 +2246,7 @@ void os::print_siginfo(outputStream* st, void* siginfo) {
const siginfo_t* si = (const siginfo_t*)siginfo;
os::Posix::print_siginfo_brief(st, si);
#if INCLUDE_CDS
if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
UseSharedSpaces) {
FileMapInfo* mapinfo = FileMapInfo::current_info();
@ -2256,6 +2256,7 @@ void os::print_siginfo(outputStream* st, void* siginfo) {
" possible disk/network problem.");
}
}
#endif
st->cr();
}
@ -3504,9 +3505,12 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
assert(is_ptr_aligned(start, alignment), "Must be");
// os::reserve_memory_special will record this memory area.
// Need to release it here to prevent overlapping reservations.
MemTracker::record_virtual_memory_release((address)start, bytes);
if (MemTracker::tracking_level() > NMT_minimal) {
// os::reserve_memory_special will record this memory area.
// Need to release it here to prevent overlapping reservations.
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
tkr.record((address)start, bytes);
}
char* end = start + bytes;
@ -3601,7 +3605,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
}
// The memory is committed
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
}
return addr;
@ -3617,24 +3621,30 @@ bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
}
bool os::release_memory_special(char* base, size_t bytes) {
assert(UseLargePages, "only for large pages");
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
res = os::Linux::release_memory_special_impl(base, bytes);
if (res) {
tkr.record((address)base, bytes);
}
} else {
res = os::Linux::release_memory_special_impl(base, bytes);
}
return res;
}
bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
assert(UseLargePages, "only for large pages");
bool res;
if (UseSHM) {
res = os::Linux::release_memory_special_shm(base, bytes);
} else {
assert(UseHugeTLBFS, "must be");
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
}
if (res) {
tkr.record((address)base, bytes);
} else {
tkr.discard();
}
return res;
}

View File

@ -108,6 +108,7 @@ class Linux {
static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
static bool release_memory_special_impl(char* base, size_t bytes);
static bool release_memory_special_shm(char* base, size_t bytes);
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);

View File

@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
return mapAddress;
}
@ -924,7 +924,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
*addr = mapAddress;
*sizep = size;

View File

@ -75,21 +75,41 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
VMError::report_coredump_status(buffer, success);
}
address os::get_caller_pc(int n) {
int os::get_native_stack(address* stack, int frames, int toSkip) {
#ifdef _NMT_NOINLINE_
n ++;
toSkip++;
#endif
int frame_idx = 0;
int num_of_frames; // number of frames captured
frame fr = os::current_frame();
while (n > 0 && fr.pc() &&
!os::is_first_C_frame(&fr) && fr.sender_pc()) {
fr = os::get_sender_for_C_frame(&fr);
n --;
while (fr.pc() && frame_idx < frames) {
if (toSkip > 0) {
toSkip --;
} else {
stack[frame_idx ++] = fr.pc();
}
if (fr.fp() == NULL || os::is_first_C_frame(&fr)
||fr.sender_pc() == NULL || fr.cb() != NULL) break;
if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
fr = os::get_sender_for_C_frame(&fr);
} else {
break;
}
}
if (n == 0) {
return fr.pc();
} else {
return NULL;
num_of_frames = frame_idx;
for (; frame_idx < frames; frame_idx ++) {
stack[frame_idx] = NULL;
}
return num_of_frames;
}
bool os::unsetenv(const char* name) {
assert(name != NULL, "Null pointer");
return (::unsetenv(name) == 0);
}
int os::get_last_error() {

View File

@ -199,23 +199,29 @@ class ArgumentIterator : public StackObj {
// Calls from the door function to check that the client credentials
// match this process. Returns 0 if credentials okay, otherwise -1.
static int check_credentials() {
door_cred_t cred_info;
ucred_t *cred_info = NULL;
int ret = -1; // deny by default
// get client credentials
if (door_cred(&cred_info) == -1) {
return -1; // unable to get them
if (door_ucred(&cred_info) == -1) {
return -1; // unable to get them, deny
}
// get our euid/eguid (probably could cache these)
uid_t euid = geteuid();
gid_t egid = getegid();
// check that the effective uid/gid matches - discuss this with Jeff.
if (cred_info.dc_euid == euid && cred_info.dc_egid == egid) {
return 0; // okay
} else {
return -1; // denied
// get euid/egid from ucred_free
uid_t ucred_euid = ucred_geteuid(cred_info);
gid_t ucred_egid = ucred_getegid(cred_info);
// check that the effective uid/gid matches
if (ucred_euid == euid && ucred_egid == egid) {
ret = 0; // allow
}
ucred_free(cred_info);
return ret;
}

View File

@ -770,7 +770,8 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
size, CURRENT_PC, mtInternal);
return mapAddress;
}
@ -941,7 +942,8 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
size, CURRENT_PC, mtInternal);
*addr = mapAddress;
*sizep = size;

View File

@ -138,9 +138,8 @@ BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
// Workaround for issue when a custom launcher doesn't call
// DestroyJavaVM and NMT is trying to track memory when free is
// called from a static destructor
if (MemTracker::is_on()) {
MemTracker::shutdown(MemTracker::NMT_normal);
}
MemTracker::shutdown();
break;
default:
break;
@ -163,6 +162,10 @@ bool os::getenv(const char* name, char* buffer, int len) {
return result > 0 && result < len;
}
bool os::unsetenv(const char* name) {
assert(name != NULL, "Null pointer");
return (SetEnvironmentVariable(name, NULL) == TRUE);
}
// No setuid programs under Windows.
bool os::have_special_privileges() {
@ -319,15 +322,16 @@ extern "C" void breakpoint() {
* So far, this method is only used by Native Memory Tracking, which is
* only supported on Windows XP or later.
*/
address os::get_caller_pc(int n) {
int os::get_native_stack(address* stack, int frames, int toSkip) {
#ifdef _NMT_NOINLINE_
n++;
toSkip ++;
#endif
address pc;
if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
return pc;
int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
(PVOID*)stack, NULL);
for (int index = captured; index < frames; index ++) {
stack[index] = NULL;
}
return NULL;
return captured;
}
@ -2901,7 +2905,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
PAGE_READWRITE);
// If reservation failed, return NULL
if (p_buf == NULL) return NULL;
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
os::release_memory(p_buf, bytes + chunk_size);
// we still need to round up to a page boundary (in case we are using large pages)
@ -2967,7 +2971,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
// need to create a dummy 'reserve' record to match
// the release.
MemTracker::record_virtual_memory_reserve((address)p_buf,
bytes_to_release, mtNone, CALLER_PC);
bytes_to_release, CALLER_PC);
os::release_memory(p_buf, bytes_to_release);
}
#ifdef ASSERT
@ -2986,11 +2990,10 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
}
// Although the memory is allocated individually, it is returned as one.
// NMT records it as one block.
address pc = CALLER_PC;
if ((flags & MEM_COMMIT) != 0) {
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
} else {
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
}
// made it this far, success
@ -3188,8 +3191,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, boo
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
if (res != NULL) {
address pc = CALLER_PC;
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
}
return res;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "oops/oop.inline.hpp"
#include "os_windows.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
@ -1388,7 +1389,7 @@ static HANDLE create_sharedmem_resources(const char* dirname, const char* filena
// the file has been successfully created and the file mapping
// object has been created.
sharedmem_fileHandle = fh;
sharedmem_fileName = strdup(filename);
sharedmem_fileName = os::strdup(filename);
return fmh;
}
@ -1498,7 +1499,8 @@ static char* mapping_create_shared(size_t size) {
(void)memset(mapAddress, '\0', size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
size, CURRENT_PC, mtInternal);
return (char*) mapAddress;
}
@ -1680,7 +1682,8 @@ static void open_file_mapping(const char* user, int vmid,
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size,
CURRENT_PC, mtInternal);
*addrp = (char*)mapAddress;
@ -1834,10 +1837,14 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
return;
}
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
remove_file_mapping(addr);
// it does not go through os api, the operation has to record from here
tkr.record((address)addr, bytes);
if (MemTracker::tracking_level() > NMT_minimal) {
// it does not go through os api, the operation has to record from here
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
remove_file_mapping(addr);
tkr.record((address)addr, bytes);
} else {
remove_file_mapping(addr);
}
}
char* PerfMemory::backing_store_filename() {

View File

@ -23,6 +23,8 @@
*/
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/os.hpp"
#include "vm_version_sparc.hpp"
@ -48,7 +50,7 @@ static void do_sysinfo(int si, const char* string, int* features, int mask) {
// All SI defines used below must be supported.
guarantee(bufsize != -1, "must be supported");
char* buf = (char*) malloc(bufsize);
char* buf = (char*) os::malloc(bufsize, mtInternal);
if (buf == NULL)
return;
@ -60,133 +62,96 @@ static void do_sysinfo(int si, const char* string, int* features, int mask) {
}
}
free(buf);
os::free(buf);
}
int VM_Version::platform_features(int features) {
// getisax(2), SI_ARCHITECTURE_32, and SI_ARCHITECTURE_64 are
// supported on Solaris 10 and later.
if (os::Solaris::supports_getisax()) {
assert(os::Solaris::supports_getisax(), "getisax() must be available");
// Check 32-bit architecture.
do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
// Check 32-bit architecture.
do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
// Check 64-bit architecture.
do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
// Check 64-bit architecture.
do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
// Extract valid instruction set extensions.
uint_t avs[2];
uint_t avn = os::Solaris::getisax(avs, 2);
assert(avn <= 2, "should return two or less av's");
uint_t av = avs[0];
// Extract valid instruction set extensions.
uint_t avs[2];
uint_t avn = os::Solaris::getisax(avs, 2);
assert(avn <= 2, "should return two or less av's");
uint_t av = avs[0];
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print("getisax(2) returned: " PTR32_FORMAT, av);
if (avn > 1) {
tty->print(", " PTR32_FORMAT, avs[1]);
}
tty->cr();
if (PrintMiscellaneous && Verbose) {
tty->print("getisax(2) returned: " PTR32_FORMAT, av);
if (avn > 1) {
tty->print(", " PTR32_FORMAT, avs[1]);
}
tty->cr();
}
#endif
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
if (avn > 1) {
uint_t av2 = avs[1];
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m;
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
if (avn > 1) {
uint_t av2 = avs[1];
#ifndef AV2_SPARC_SPARC5
#define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */
#endif
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
}
if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m;
}
// Next values are not defined before Solaris 10
// but Solaris 8 is used for jdk6 update builds.
// We only build on Solaris 10 and up, but some of the values below
// are not defined on all versions of Solaris 10, so we define them,
// if necessary.
#ifndef AV_SPARC_ASI_BLK_INIT
#define AV_SPARC_ASI_BLK_INIT 0x0080 /* ASI_BLK_INIT_xxx ASI */
#endif
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
#ifndef AV_SPARC_FMAF
#define AV_SPARC_FMAF 0x0100 /* Fused Multiply-Add */
#endif
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
#ifndef AV_SPARC_FMAU
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
#endif
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
#ifndef AV_SPARC_VIS3
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
#endif
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
#ifndef AV_SPARC_CBCOND
#define AV_SPARC_CBCOND 0x10000000 /* compare and branch instrs supported */
#endif
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m;
#ifndef AV_SPARC_AES
#define AV_SPARC_AES 0x00020000 /* aes instrs supported */
#endif
if (av & AV_SPARC_AES) features |= aes_instructions_m;
if (av & AV_SPARC_AES) features |= aes_instructions_m;
#ifndef AV_SPARC_SHA1
#define AV_SPARC_SHA1 0x00400000 /* sha1 instruction supported */
#endif
if (av & AV_SPARC_SHA1) features |= sha1_instruction_m;
if (av & AV_SPARC_SHA1) features |= sha1_instruction_m;
#ifndef AV_SPARC_SHA256
#define AV_SPARC_SHA256 0x00800000 /* sha256 instruction supported */
#endif
if (av & AV_SPARC_SHA256) features |= sha256_instruction_m;
if (av & AV_SPARC_SHA256) features |= sha256_instruction_m;
#ifndef AV_SPARC_SHA512
#define AV_SPARC_SHA512 0x01000000 /* sha512 instruction supported */
#endif
if (av & AV_SPARC_SHA512) features |= sha512_instruction_m;
} else {
// getisax(2) failed, use the old legacy code.
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose)
tty->print_cr("getisax(2) is not supported.");
#endif
char tmp;
size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
char* buf = (char*) malloc(bufsize);
if (buf != NULL) {
if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
// Figure out what kind of sparc we have
char *sparc_string = strstr(buf, "sparc");
if (sparc_string != NULL) { features |= v8_instructions_m;
if (sparc_string[5] == 'v') {
if (sparc_string[6] == '8') {
if (sparc_string[7] == '-') { features |= hardware_mul32_m;
features |= hardware_div32_m;
} else if (sparc_string[7] == 'p') features |= generic_v9_m;
else features |= generic_v8_m;
} else if (sparc_string[6] == '9') features |= generic_v9_m;
}
}
// Check for visualization instructions
char *vis = strstr(buf, "vis");
if (vis != NULL) { features |= vis1_instructions_m;
if (vis[3] == '2') features |= vis2_instructions_m;
}
}
free(buf);
}
}
if (av & AV_SPARC_SHA512) features |= sha512_instruction_m;
// Determine the machine type.
do_sysinfo(SI_MACHINE, "sun4v", &features, sun4v_m);
@ -201,37 +166,18 @@ int VM_Version::platform_features(int features) {
kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
for (int i = 0; i < ksp->ks_ndata; i++) {
if (strcmp((const char*)&(knm[i].name),"implementation") == 0) {
#ifndef KSTAT_DATA_STRING
#define KSTAT_DATA_STRING 9
#endif
if (knm[i].data_type == KSTAT_DATA_CHAR) {
// VM is running on Solaris 8 which does not have value.str.
implementation = &(knm[i].value.c[0]);
} else if (knm[i].data_type == KSTAT_DATA_STRING) {
// VM is running on Solaris 10.
#ifndef KSTAT_NAMED_STR_PTR
// Solaris 8 was used to build VM, define the structure it misses.
struct str_t {
union {
char *ptr; /* NULL-term string */
char __pad[8]; /* 64-bit padding */
} addr;
uint32_t len; /* # bytes for strlen + '\0' */
};
#define KSTAT_NAMED_STR_PTR(knptr) (( (str_t*)&((knptr)->value) )->addr.ptr)
#endif
implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
}
implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("cpu_info.implementation: %s", implementation);
}
#endif
// Convert to UPPER case before compare.
char* impl = strdup(implementation);
char* impl = os::strdup_check_oom(implementation);
for (int i = 0; impl[i] != 0; i++)
impl[i] = (char)toupper((uint)impl[i]);
if (strstr(impl, "SPARC64") != NULL) {
features |= sparc64_family_m;
} else if (strstr(impl, "SPARC-M") != NULL) {
@ -246,13 +192,15 @@ int VM_Version::platform_features(int features) {
if (strstr(impl, "SPARC") == NULL) {
#ifndef PRODUCT
// kstat on Solaris 8 virtual machines (branded zones)
// returns "(unsupported)" implementation.
warning("kstat cpu_info implementation = '%s', should contain SPARC", impl);
// returns "(unsupported)" implementation. Solaris 8 is not
// supported anymore, but include this check to be on the
// safe side.
warning("kstat cpu_info implementation = '%s', assume generic SPARC", impl);
#endif
implementation = "SPARC";
}
}
free((void*)impl);
os::free((void*)impl);
break;
}
} // for(

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -997,7 +997,7 @@ void ArchDesc::build_pipe_classes(FILE *fp_cpp) {
int nopcnt = 0;
for ( _pipeline->_noplist.reset(); (nop = _pipeline->_noplist.iter()) != NULL; nopcnt++ );
fprintf(fp_cpp, "void Bundle::initialize_nops(MachNode * nop_list[%d], Compile *C) {\n", nopcnt);
fprintf(fp_cpp, "void Bundle::initialize_nops(MachNode * nop_list[%d]) {\n", nopcnt);
int i = 0;
for ( _pipeline->_noplist.reset(); (nop = _pipeline->_noplist.iter()) != NULL; i++ ) {
fprintf(fp_cpp, " nop_list[%d] = (MachNode *) new %sNode();\n", i, nop);
@ -1369,7 +1369,7 @@ static void generate_peepreplace( FILE *fp, FormDict &globals, PeepMatch *pmatch
fprintf(fp, " ra_->add_reference(root, inst%d);\n", inst_num);
fprintf(fp, " ra_->set_oop (root, ra_->is_oop(inst%d));\n", inst_num);
fprintf(fp, " ra_->set_pair(root->_idx, ra_->get_reg_second(inst%d), ra_->get_reg_first(inst%d));\n", inst_num, inst_num);
fprintf(fp, " root->_opnds[0] = inst%d->_opnds[0]->clone(C); // result\n", inst_num);
fprintf(fp, " root->_opnds[0] = inst%d->_opnds[0]->clone(); // result\n", inst_num);
fprintf(fp, " // ----- Done with initial setup -----\n");
} else {
if( (op_form == NULL) || (op_form->is_base_constant(globals) == Form::none) ) {
@ -1382,7 +1382,7 @@ static void generate_peepreplace( FILE *fp, FormDict &globals, PeepMatch *pmatch
} else {
fprintf(fp, " // no ideal edge for constants after matching\n");
}
fprintf(fp, " root->_opnds[%d] = inst%d->_opnds[%d]->clone(C);\n",
fprintf(fp, " root->_opnds[%d] = inst%d->_opnds[%d]->clone();\n",
opnds_index, inst_num, inst_op_num );
}
++opnds_index;
@ -1402,7 +1402,7 @@ static void generate_peepreplace( FILE *fp, FormDict &globals, PeepMatch *pmatch
// Define the Peephole method for an instruction node
void ArchDesc::definePeephole(FILE *fp, InstructForm *node) {
// Generate Peephole function header
fprintf(fp, "MachNode *%sNode::peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C ) {\n", node->_ident);
fprintf(fp, "MachNode *%sNode::peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted) {\n", node->_ident);
fprintf(fp, " bool matches = true;\n");
// Identify the maximum instruction position,
@ -1593,7 +1593,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
}
const char *resultOper = new_inst->reduce_result();
fprintf(fp," n%d->set_opnd_array(0, state->MachOperGenerator( %s, C ));\n",
fprintf(fp," n%d->set_opnd_array(0, state->MachOperGenerator(%s));\n",
cnt, machOperEnum(resultOper));
// get the formal operand NameList
@ -1634,7 +1634,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
// If there is no use of the created operand, just skip it
if (new_pos != NameList::Not_in_list) {
//Copy the operand from the original made above
fprintf(fp," n%d->set_opnd_array(%d, op%d->clone(C)); // %s\n",
fprintf(fp," n%d->set_opnd_array(%d, op%d->clone()); // %s\n",
cnt, new_pos, exp_pos-node->num_opnds(), opid);
// Check for who defines this operand & add edge if needed
fprintf(fp," if(tmp%d != NULL)\n", exp_pos);
@ -1662,7 +1662,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
new_pos = new_inst->operand_position(parameter,Component::USE);
if (new_pos != -1) {
// Copy the operand from the ExpandNode to the new node
fprintf(fp," n%d->set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
fprintf(fp," n%d->set_opnd_array(%d, opnd_array(%d)->clone()); // %s\n",
cnt, new_pos, exp_pos, opid);
// For each operand add appropriate input edges by looking at tmp's
fprintf(fp," if(tmp%d == this) {\n", exp_pos);
@ -1729,14 +1729,14 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
declared_def = true;
}
if (op && op->_interface && op->_interface->is_RegInterface()) {
fprintf(fp," def = new MachTempNode(state->MachOperGenerator( %s, C ));\n",
fprintf(fp," def = new MachTempNode(state->MachOperGenerator(%s));\n",
machOperEnum(op->_ident));
fprintf(fp," add_req(def);\n");
// The operand for TEMP is already constructed during
// this mach node construction, see buildMachNode().
//
// int idx = node->operand_position_format(comp->_name);
// fprintf(fp," set_opnd_array(%d, state->MachOperGenerator( %s, C ));\n",
// fprintf(fp," set_opnd_array(%d, state->MachOperGenerator(%s));\n",
// idx, machOperEnum(op->_ident));
} else {
assert(false, "can't have temps which aren't registers");
@ -1802,7 +1802,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
uint j = node->unique_opnds_idx(i);
// unique_opnds_idx(i) is unique if unique_opnds_idx(j) is not unique.
if( j != node->unique_opnds_idx(j) ) {
fprintf(fp," set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
fprintf(fp," set_opnd_array(%d, opnd_array(%d)->clone()); // %s\n",
new_num_opnds, i, comp->_name);
// delete not unique edges here
fprintf(fp," for(unsigned i = 0; i < num%d; i++) {\n", i);
@ -2839,12 +2839,12 @@ static void defineIn_RegMask(FILE *fp, FormDict &globals, OperandForm &oper) {
// generate code to create a clone for a class derived from MachOper
//
// (0) MachOper *MachOperXOper::clone(Compile* C) const {
// (0) MachOper *MachOperXOper::clone() const {
// (1) return new MachXOper( _ccode, _c0, _c1, ..., _cn);
// (2) }
//
static void defineClone(FILE *fp, FormDict &globalNames, OperandForm &oper) {
fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper._ident);
fprintf(fp,"MachOper *%sOper::clone() const {\n", oper._ident);
// Check for constants that need to be copied over
const int num_consts = oper.num_consts(globalNames);
const bool is_ideal_bool = oper.is_ideal_bool();
@ -3043,7 +3043,7 @@ void ArchDesc::define_oper_interface(FILE *fp, OperandForm &oper, FormDict &glob
static void define_fill_new_machnode(bool used, FILE *fp_cpp) {
fprintf(fp_cpp, "\n");
fprintf(fp_cpp, "// Copy _idx, inputs and operands to new node\n");
fprintf(fp_cpp, "void MachNode::fill_new_machnode( MachNode* node, Compile* C) const {\n");
fprintf(fp_cpp, "void MachNode::fill_new_machnode(MachNode* node) const {\n");
if( !used ) {
fprintf(fp_cpp, " // This architecture does not have cisc or short branch instructions\n");
fprintf(fp_cpp, " ShouldNotCallThis();\n");
@ -3064,7 +3064,7 @@ static void define_fill_new_machnode(bool used, FILE *fp_cpp) {
fprintf(fp_cpp, " MachOper **to = node->_opnds;\n");
fprintf(fp_cpp, " for( int i = 0; i < nopnds; i++ ) {\n");
fprintf(fp_cpp, " if( i != cisc_operand() ) \n");
fprintf(fp_cpp, " to[i] = _opnds[i]->clone(C);\n");
fprintf(fp_cpp, " to[i] = _opnds[i]->clone();\n");
fprintf(fp_cpp, " }\n");
fprintf(fp_cpp, "}\n");
}
@ -3105,7 +3105,7 @@ void ArchDesc::defineClasses(FILE *fp) {
if ( strcmp(oper->_ident,"label") == 0 ) {
defineIn_RegMask(_CPP_MISC_file._fp, _globalNames, *oper);
fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper->_ident);
fprintf(fp,"MachOper *%sOper::clone() const {\n", oper->_ident);
fprintf(fp," return new %sOper(_label, _block_num);\n", oper->_ident);
fprintf(fp,"}\n");
@ -3124,7 +3124,7 @@ void ArchDesc::defineClasses(FILE *fp) {
if ( strcmp(oper->_ident,"method") == 0 ) {
defineIn_RegMask(_CPP_MISC_file._fp, _globalNames, *oper);
fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper->_ident);
fprintf(fp,"MachOper *%sOper::clone() const {\n", oper->_ident);
fprintf(fp," return new %sOper(_method);\n", oper->_ident);
fprintf(fp,"}\n");
@ -3845,7 +3845,7 @@ void ArchDesc::buildMachOperGenerator(FILE *fp_cpp) {
"// that invokes 'new' on the corresponding class constructor.\n");
fprintf(fp_cpp, "\n");
fprintf(fp_cpp, "MachOper *State::MachOperGenerator");
fprintf(fp_cpp, "(int opcode, Compile* C)");
fprintf(fp_cpp, "(int opcode)");
fprintf(fp_cpp, "{\n");
fprintf(fp_cpp, "\n");
fprintf(fp_cpp, " switch(opcode) {\n");
@ -3921,7 +3921,7 @@ void ArchDesc::buildMachNode(FILE *fp_cpp, InstructForm *inst, const char *inden
int index = clist.operand_position(comp->_name, comp->_usedef, inst);
const char *opcode = machOperEnum(comp->_type);
fprintf(fp_cpp, "%s node->set_opnd_array(%d, ", indent, index);
fprintf(fp_cpp, "MachOperGenerator(%s, C));\n", opcode);
fprintf(fp_cpp, "MachOperGenerator(%s));\n", opcode);
}
}
else if ( inst->is_chain_of_constant(_globalNames, opType) ) {
@ -3978,7 +3978,7 @@ void InstructForm::declare_cisc_version(ArchDesc &AD, FILE *fp_hpp) {
InstructForm *inst_cisc = cisc_spill_alternate();
if (inst_cisc != NULL) {
fprintf(fp_hpp, " virtual int cisc_operand() const { return %d; }\n", cisc_spill_operand());
fprintf(fp_hpp, " virtual MachNode *cisc_version(int offset, Compile* C);\n");
fprintf(fp_hpp, " virtual MachNode *cisc_version(int offset);\n");
fprintf(fp_hpp, " virtual void use_cisc_RegMask();\n");
fprintf(fp_hpp, " virtual const RegMask *cisc_RegMask() const { return _cisc_RegMask; }\n");
}
@ -4008,7 +4008,7 @@ bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
// Construct CISC version of this instruction
fprintf(fp_cpp, "\n");
fprintf(fp_cpp, "// Build CISC version of this instruction\n");
fprintf(fp_cpp, "MachNode *%sNode::cisc_version( int offset, Compile* C ) {\n", this->_ident);
fprintf(fp_cpp, "MachNode *%sNode::cisc_version(int offset) {\n", this->_ident);
// Create the MachNode object
fprintf(fp_cpp, " %sNode *node = new %sNode();\n", name, name);
// Fill in the bottom_type where requested
@ -4023,7 +4023,7 @@ bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
fprintf(fp_cpp, "\n");
fprintf(fp_cpp, " // Copy _idx, inputs and operands to new node\n");
fprintf(fp_cpp, " fill_new_machnode(node, C);\n");
fprintf(fp_cpp, " fill_new_machnode(node);\n");
// Construct operand to access [stack_pointer + offset]
fprintf(fp_cpp, " // Construct operand to access [stack_pointer + offset]\n");
fprintf(fp_cpp, " node->set_opnd_array(cisc_operand(), new %sOper(offset));\n", cisc_oper_name);
@ -4042,7 +4042,7 @@ bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
// Build prototypes for short branch methods
void InstructForm::declare_short_branch_methods(FILE *fp_hpp) {
if (has_short_branch_form()) {
fprintf(fp_hpp, " virtual MachNode *short_branch_version(Compile* C);\n");
fprintf(fp_hpp, " virtual MachNode *short_branch_version();\n");
}
}
@ -4055,7 +4055,7 @@ bool InstructForm::define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp) {
// Construct short_branch_version() method.
fprintf(fp_cpp, "// Build short branch version of this instruction\n");
fprintf(fp_cpp, "MachNode *%sNode::short_branch_version(Compile* C) {\n", this->_ident);
fprintf(fp_cpp, "MachNode *%sNode::short_branch_version() {\n", this->_ident);
// Create the MachNode object
fprintf(fp_cpp, " %sNode *node = new %sNode();\n", name, name);
if( is_ideal_if() ) {
@ -4071,7 +4071,7 @@ bool InstructForm::define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp) {
// Short branch version must use same node index for access
// through allocator's tables
fprintf(fp_cpp, " // Copy _idx, inputs and operands to new node\n");
fprintf(fp_cpp, " fill_new_machnode(node, C);\n");
fprintf(fp_cpp, " fill_new_machnode(node);\n");
// Return result and exit scope
fprintf(fp_cpp, " return node;\n");
@ -4097,7 +4097,7 @@ void ArchDesc::buildMachNodeGenerator(FILE *fp_cpp) {
"// that invokes 'new' on the corresponding class constructor.\n");
fprintf(fp_cpp, "\n");
fprintf(fp_cpp, "MachNode *State::MachNodeGenerator");
fprintf(fp_cpp, "(int opcode, Compile* C)");
fprintf(fp_cpp, "(int opcode)");
fprintf(fp_cpp, "{\n");
fprintf(fp_cpp, " switch(opcode) {\n");

View File

@ -1119,7 +1119,7 @@ void ArchDesc::declare_pipe_classes(FILE *fp_hpp) {
fprintf(fp_hpp, " _nop_count = %d\n",
_pipeline->_nopcnt);
fprintf(fp_hpp, " };\n\n");
fprintf(fp_hpp, " static void initialize_nops(MachNode *nop_list[%d], Compile* C);\n\n",
fprintf(fp_hpp, " static void initialize_nops(MachNode *nop_list[%d]);\n\n",
_pipeline->_nopcnt);
fprintf(fp_hpp, "#ifndef PRODUCT\n");
fprintf(fp_hpp, " void dump(outputStream *st = tty) const;\n");
@ -1240,7 +1240,7 @@ void ArchDesc::declareClasses(FILE *fp) {
constant_type, _globalNames);
// Clone function
fprintf(fp," virtual MachOper *clone(Compile* C) const;\n");
fprintf(fp," virtual MachOper *clone() const;\n");
// Support setting a spill offset into a constant operand.
// We only support setting an 'int' offset, while in the
@ -1718,7 +1718,7 @@ void ArchDesc::declareClasses(FILE *fp) {
// If there is an explicit peephole rule, build it
if ( instr->peepholes() != NULL ) {
fprintf(fp," virtual MachNode *peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile *C);\n");
fprintf(fp," virtual MachNode *peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted);\n");
}
// Output the declaration for number of relocation entries
@ -1863,7 +1863,7 @@ void ArchDesc::declareClasses(FILE *fp) {
}
if ( instr->num_post_match_opnds() != 0
|| instr->is_chain_of_constant(_globalNames) ) {
fprintf(fp," friend MachNode *State::MachNodeGenerator(int opcode, Compile* C);\n");
fprintf(fp," friend MachNode *State::MachNodeGenerator(int opcode);\n");
}
if ( instr->rematerialize(_globalNames, get_registers()) ) {
fprintf(fp," // Rematerialize %s\n", instr->_ident);
@ -2071,8 +2071,8 @@ void ArchDesc::defineStateClass(FILE *fp) {
fprintf(fp," DEBUG_ONLY( ~State(void); ) // Destructor\n");
fprintf(fp,"\n");
fprintf(fp," // Methods created by ADLC and invoked by Reduce\n");
fprintf(fp," MachOper *MachOperGenerator( int opcode, Compile* C );\n");
fprintf(fp," MachNode *MachNodeGenerator( int opcode, Compile* C );\n");
fprintf(fp," MachOper *MachOperGenerator(int opcode);\n");
fprintf(fp," MachNode *MachNodeGenerator(int opcode);\n");
fprintf(fp,"\n");
fprintf(fp," // Assign a state to a node, definition of method produced by ADLC\n");
fprintf(fp," bool DFA( int opcode, const Node *ideal );\n");

View File

@ -269,7 +269,7 @@ address CodeBuffer::decode_begin() {
GrowableArray<int>* CodeBuffer::create_patch_overflow() {
if (_overflow_arena == NULL) {
_overflow_arena = new (mtCode) Arena();
_overflow_arena = new (mtCode) Arena(mtCode);
}
return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
}

View File

@ -48,7 +48,7 @@ Compiler::Compiler() : AbstractCompiler(c1) {
void Compiler::init_c1_runtime() {
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
Arena* arena = new (mtCompiler) Arena();
Arena* arena = new (mtCompiler) Arena(mtCompiler);
Runtime1::initialize(buffer_blob);
FrameMap::initialize();
// initialize data structures

View File

@ -30,6 +30,7 @@
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciInstance.hpp"
#include "runtime/os.hpp"
void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
// we must have enough patching space so that call can be inserted
@ -848,7 +849,7 @@ void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
stringStream st;
st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
#ifdef SPARC
_masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
_masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__);
#else
_masm->verify_oop(r->as_Register());
#endif

View File

@ -1613,25 +1613,22 @@ void LinearScan::allocate_registers() {
Interval* precolored_cpu_intervals, *not_precolored_cpu_intervals;
Interval* precolored_fpu_intervals, *not_precolored_fpu_intervals;
create_unhandled_lists(&precolored_cpu_intervals, &not_precolored_cpu_intervals, is_precolored_cpu_interval, is_virtual_cpu_interval);
if (has_fpu_registers()) {
create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
#ifdef ASSERT
} else {
// fpu register allocation is omitted because no virtual fpu registers are present
// just check this again...
create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
assert(not_precolored_fpu_intervals == Interval::end(), "missed an uncolored fpu interval");
#endif
}
// allocate cpu registers
create_unhandled_lists(&precolored_cpu_intervals, &not_precolored_cpu_intervals,
is_precolored_cpu_interval, is_virtual_cpu_interval);
// allocate fpu registers
create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals,
is_precolored_fpu_interval, is_virtual_fpu_interval);
// the fpu interval allocation cannot be moved down below with the fpu section as
// the cpu_lsw.walk() changes interval positions.
LinearScanWalker cpu_lsw(this, precolored_cpu_intervals, not_precolored_cpu_intervals);
cpu_lsw.walk();
cpu_lsw.finish_allocation();
if (has_fpu_registers()) {
// allocate fpu registers
LinearScanWalker fpu_lsw(this, precolored_fpu_intervals, not_precolored_fpu_intervals);
fpu_lsw.walk();
fpu_lsw.finish_allocation();

View File

@ -546,13 +546,18 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
// normal bytecode execution.
thread->clear_exception_oop_and_pc();
Handle original_exception(thread, exception());
continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
// If an exception was thrown during exception dispatch, the exception oop may have changed
thread->set_exception_oop(exception());
thread->set_exception_pc(pc);
// the exception cache is used only by non-implicit exceptions
if (continuation != NULL) {
// Update the exception cache only when there didn't happen
// another exception during the computation of the compiled
// exception handler.
if (continuation != NULL && original_exception() == exception()) {
nm->add_handler_for_exception_and_pc(exception, pc, continuation);
}
}

View File

@ -86,7 +86,8 @@ static bool firstEnv = true;
// ------------------------------------------------------------------
// ciEnv::ciEnv
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter)
: _ciEnv_arena(mtCompiler) {
VM_ENTRY_MARK;
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
@ -144,7 +145,7 @@ ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
_jvmti_can_pop_frame = false;
}
ciEnv::ciEnv(Arena* arena) {
ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
ASSERT_IN_VM;
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -112,7 +112,7 @@ void ciObjectFactory::initialize() {
// This Arena is long lived and exists in the resource mark of the
// compiler thread that initializes the initial ciObjectFactory which
// creates the shared ciObjects that all later ciObjectFactories use.
Arena* arena = new (mtCompiler) Arena();
Arena* arena = new (mtCompiler) Arena(mtCompiler);
ciEnv initial(arena);
ciEnv* env = ciEnv::current();
env->_factory->init_shared_objects();

View File

@ -31,6 +31,9 @@
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#if INCLUDE_CDS
#include "classfile/systemDictionaryShared.hpp"
#endif
#include "classfile/verificationType.hpp"
#include "classfile/verifier.hpp"
#include "classfile/vmSymbols.hpp"
@ -60,6 +63,7 @@
#include "services/threadService.hpp"
#include "utilities/array.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
// We generally try to create the oops directly when parsing, rather than
// allocating temporary data structures and copying the bytes twice. A
@ -3786,7 +3790,15 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
instanceKlassHandle nullHandle;
// Figure out whether we can skip format checking (matching classic VM behavior)
_need_verify = Verifier::should_verify_for(class_loader(), verify);
if (DumpSharedSpaces) {
// verify == true means it's a 'remote' class (i.e., non-boot class)
// Verification decision is based on BytecodeVerificationRemote flag
// for those classes.
_need_verify = (verify) ? BytecodeVerificationRemote :
BytecodeVerificationLocal;
} else {
_need_verify = Verifier::should_verify_for(class_loader(), verify);
}
// Set the verify flag in stream
cfs->set_verify(_need_verify);
@ -3805,6 +3817,18 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
u2 minor_version = cfs->get_u2_fast();
u2 major_version = cfs->get_u2_fast();
if (DumpSharedSpaces && major_version < JAVA_1_5_VERSION) {
ResourceMark rm;
warning("Pre JDK 1.5 class not supported by CDS: %u.%u %s",
major_version, minor_version, name->as_C_string());
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_UnsupportedClassVersionError(),
"Unsupported major.minor version for dump time %u.%u",
major_version,
minor_version);
}
// Check version numbers - we check this even with verifier off
if (!is_supported_version(major_version, minor_version)) {
if (name == NULL) {
@ -3912,6 +3936,18 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
if (cfs->source() != NULL) tty->print(" from %s", cfs->source());
tty->print_cr("]");
}
#if INCLUDE_CDS
if (DumpLoadedClassList != NULL && cfs->source() != NULL && classlist_file->is_open()) {
// Only dump the classes that can be stored into CDS archive
if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
if (name != NULL) {
ResourceMark rm(THREAD);
classlist_file->print_cr("%s", name->as_C_string());
classlist_file->flush();
}
}
}
#endif
u2 super_class_index = cfs->get_u2_fast();
instanceKlassHandle super_klass = parse_super_class(super_class_index,

View File

@ -26,8 +26,13 @@
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderExt.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/javaClasses.hpp"
#if INCLUDE_CDS
#include "classfile/sharedPathsMiscInfo.hpp"
#include "classfile/sharedClassUtil.hpp"
#endif
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
@ -35,6 +40,7 @@
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/oopMapCache.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "memory/generation.hpp"
#include "memory/oopFactory.hpp"
#include "memory/universe.inline.hpp"
@ -114,8 +120,12 @@ PerfCounter* ClassLoader::_load_instance_class_failCounter = NULL;
ClassPathEntry* ClassLoader::_first_entry = NULL;
ClassPathEntry* ClassLoader::_last_entry = NULL;
int ClassLoader::_num_entries = 0;
PackageHashtable* ClassLoader::_package_hash_table = NULL;
#if INCLUDE_CDS
SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL;
#endif
// helper routines
bool string_starts_with(const char* str, const char* str_to_find) {
size_t str_len = strlen(str);
@ -194,6 +204,14 @@ ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
// check if file exists
struct stat st;
if (os::stat(path, &st) == 0) {
#if INCLUDE_CDS
if (DumpSharedSpaces) {
// We have already check in ClassLoader::check_shared_classpath() that the directory is empty, so
// we should never find a file underneath it -- unless user has added a new file while we are running
// the dump, in which case let's quit!
ShouldNotReachHere();
}
#endif
// found file, open it
int file_handle = os::open(path, 0, 0);
if (file_handle != -1) {
@ -228,13 +246,13 @@ ClassPathZipEntry::~ClassPathZipEntry() {
FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
}
ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
// enable call to C land
u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {
// enable call to C land
JavaThread* thread = JavaThread::current();
ThreadToNativeFromVM ttn(thread);
// check whether zip archive contains name
jint filesize, name_len;
jzentry* entry = (*FindEntry)(_zip, name, &filesize, &name_len);
jint name_len;
jzentry* entry = (*FindEntry)(_zip, name, filesize, &name_len);
if (entry == NULL) return NULL;
u1* buffer;
char name_buf[128];
@ -245,19 +263,33 @@ ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
filename = NEW_RESOURCE_ARRAY(char, name_len + 1);
}
// file found, get pointer to class in mmaped jar file.
// file found, get pointer to the entry in mmapped jar file.
if (ReadMappedEntry == NULL ||
!(*ReadMappedEntry)(_zip, entry, &buffer, filename)) {
// mmaped access not available, perhaps due to compression,
// mmapped access not available, perhaps due to compression,
// read contents into resource array
buffer = NEW_RESOURCE_ARRAY(u1, filesize);
int size = (*filesize) + ((nul_terminate) ? 1 : 0);
buffer = NEW_RESOURCE_ARRAY(u1, size);
if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL;
}
// return result
if (nul_terminate) {
buffer[*filesize] = 0;
}
return buffer;
}
ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
jint filesize;
u1* buffer = open_entry(name, &filesize, false, CHECK_NULL);
if (buffer == NULL) {
return NULL;
}
if (UsePerfData) {
ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize);
}
// return result
return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated
return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated
}
// invoke function for each entry in the zip file
@ -272,12 +304,17 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
}
}
LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
_path = strdup(path);
LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception) : ClassPathEntry() {
_path = os::strdup_check_oom(path);
_st = *st;
_meta_index = NULL;
_resolved_entry = NULL;
_has_error = false;
_throw_exception = throw_exception;
}
LazyClassPathEntry::~LazyClassPathEntry() {
os::free(_path);
}
bool LazyClassPathEntry::is_jar_file() {
@ -289,7 +326,11 @@ ClassPathEntry* LazyClassPathEntry::resolve_entry(TRAPS) {
return (ClassPathEntry*) _resolved_entry;
}
ClassPathEntry* new_entry = NULL;
new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL);
new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, _throw_exception, CHECK_NULL);
if (!_throw_exception && new_entry == NULL) {
assert(!HAS_PENDING_EXCEPTION, "must be");
return NULL;
}
{
ThreadCritical tc;
if (_resolved_entry == NULL) {
@ -323,6 +364,23 @@ bool LazyClassPathEntry::is_lazy() {
return true;
}
u1* LazyClassPathEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {
if (_has_error) {
return NULL;
}
ClassPathEntry* cpe = resolve_entry(THREAD);
if (cpe == NULL) {
_has_error = true;
return NULL;
} else if (cpe->is_jar_file()) {
return ((ClassPathZipEntry*)cpe)->open_entry(name, filesize, nul_terminate,THREAD);
} else {
ShouldNotReachHere();
*filesize = 0;
return NULL;
}
}
static void print_meta_index(LazyClassPathEntry* entry,
GrowableArray<char*>& meta_packages) {
tty->print("[Meta index for %s=", entry->name());
@ -333,15 +391,62 @@ static void print_meta_index(LazyClassPathEntry* entry,
tty->print_cr("]");
}
#if INCLUDE_CDS
void ClassLoader::exit_with_path_failure(const char* error, const char* message) {
assert(DumpSharedSpaces, "only called at dump time");
tty->print_cr("Hint: enable -XX:+TraceClassPaths to diagnose the failure");
vm_exit_during_initialization(error, message);
}
#endif
void ClassLoader::setup_meta_index() {
void ClassLoader::trace_class_path(const char* msg, const char* name) {
if (!TraceClassPaths) {
return;
}
if (msg) {
tty->print("%s", msg);
}
if (name) {
if (strlen(name) < 256) {
tty->print("%s", name);
} else {
// For very long paths, we need to print each character separately,
// as print_cr() has a length limit
while (name[0] != '\0') {
tty->print("%c", name[0]);
name++;
}
}
}
if (msg && msg[0] == '[') {
tty->print_cr("]");
} else {
tty->cr();
}
}
void ClassLoader::setup_bootstrap_meta_index() {
// Set up meta index which allows us to open boot jars lazily if
// class data sharing is enabled
const char* meta_index_path = Arguments::get_meta_index_path();
const char* meta_index_dir = Arguments::get_meta_index_dir();
setup_meta_index(meta_index_path, meta_index_dir, 0);
}
void ClassLoader::setup_meta_index(const char* meta_index_path, const char* meta_index_dir, int start_index) {
const char* known_version = "% VERSION 2";
char* meta_index_path = Arguments::get_meta_index_path();
char* meta_index_dir = Arguments::get_meta_index_dir();
FILE* file = fopen(meta_index_path, "r");
int line_no = 0;
#if INCLUDE_CDS
if (DumpSharedSpaces) {
if (file != NULL) {
_shared_paths_misc_info->add_required_file(meta_index_path);
} else {
_shared_paths_misc_info->add_nonexist_path(meta_index_path);
}
}
#endif
if (file != NULL) {
ResourceMark rm;
LazyClassPathEntry* cur_entry = NULL;
@ -376,7 +481,7 @@ void ClassLoader::setup_meta_index() {
// Hand off current packages to current lazy entry (if any)
if ((cur_entry != NULL) &&
(boot_class_path_packages.length() > 0)) {
if (TraceClassLoading && Verbose) {
if ((TraceClassLoading || TraceClassPaths) && Verbose) {
print_meta_index(cur_entry, boot_class_path_packages);
}
MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
@ -387,8 +492,10 @@ void ClassLoader::setup_meta_index() {
boot_class_path_packages.clear();
// Find lazy entry corresponding to this jar file
for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next()) {
if (entry->is_lazy() &&
int count = 0;
for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next(), count++) {
if (count >= start_index &&
entry->is_lazy() &&
string_starts_with(entry->name(), meta_index_dir) &&
string_ends_with(entry->name(), &package_name[2])) {
cur_entry = (LazyClassPathEntry*) entry;
@ -416,7 +523,7 @@ void ClassLoader::setup_meta_index() {
default:
{
if (!skipCurrentJar && cur_entry != NULL) {
char* new_name = strdup(package_name);
char* new_name = os::strdup_check_oom(package_name);
boot_class_path_packages.append(new_name);
}
}
@ -425,7 +532,7 @@ void ClassLoader::setup_meta_index() {
// Hand off current packages to current lazy entry (if any)
if ((cur_entry != NULL) &&
(boot_class_path_packages.length() > 0)) {
if (TraceClassLoading && Verbose) {
if ((TraceClassLoading || TraceClassPaths) && Verbose) {
print_meta_index(cur_entry, boot_class_path_packages);
}
MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
@ -436,36 +543,88 @@ void ClassLoader::setup_meta_index() {
}
}
void ClassLoader::setup_bootstrap_search_path() {
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
char* sys_class_path = os::strdup(Arguments::get_sysclasspath());
if (TraceClassLoading && Verbose) {
tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
#if INCLUDE_CDS
void ClassLoader::check_shared_classpath(const char *path) {
if (strcmp(path, "") == 0) {
exit_with_path_failure("Cannot have empty path in archived classpaths", NULL);
}
int len = (int)strlen(sys_class_path);
struct stat st;
if (os::stat(path, &st) == 0) {
if ((st.st_mode & S_IFREG) != S_IFREG) { // is directory
if (!os::dir_is_empty(path)) {
tty->print_cr("Error: non-empty directory '%s'", path);
exit_with_path_failure("CDS allows only empty directories in archived classpaths", NULL);
}
}
}
}
#endif
void ClassLoader::setup_bootstrap_search_path() {
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
char* sys_class_path = os::strdup_check_oom(Arguments::get_sysclasspath());
if (!PrintSharedArchiveAndExit) {
trace_class_path("[Bootstrap loader class path=", sys_class_path);
}
#if INCLUDE_CDS
if (DumpSharedSpaces) {
_shared_paths_misc_info->add_boot_classpath(Arguments::get_sysclasspath());
}
#endif
setup_search_path(sys_class_path);
os::free(sys_class_path);
}
#if INCLUDE_CDS
int ClassLoader::get_shared_paths_misc_info_size() {
return _shared_paths_misc_info->get_used_bytes();
}
void* ClassLoader::get_shared_paths_misc_info() {
return _shared_paths_misc_info->buffer();
}
bool ClassLoader::check_shared_paths_misc_info(void *buf, int size) {
SharedPathsMiscInfo* checker = SharedClassUtil::allocate_shared_paths_misc_info((char*)buf, size);
bool result = checker->check();
delete checker;
return result;
}
#endif
void ClassLoader::setup_search_path(char *class_path) {
int offset = 0;
int len = (int)strlen(class_path);
int end = 0;
// Iterate over class path entries
for (int start = 0; start < len; start = end) {
while (sys_class_path[end] && sys_class_path[end] != os::path_separator()[0]) {
while (class_path[end] && class_path[end] != os::path_separator()[0]) {
end++;
}
char* path = NEW_C_HEAP_ARRAY(char, end-start+1, mtClass);
strncpy(path, &sys_class_path[start], end-start);
path[end-start] = '\0';
EXCEPTION_MARK;
ResourceMark rm(THREAD);
char* path = NEW_RESOURCE_ARRAY(char, end - start + 1);
strncpy(path, &class_path[start], end - start);
path[end - start] = '\0';
update_class_path_entry_list(path, false);
FREE_C_HEAP_ARRAY(char, path, mtClass);
while (sys_class_path[end] == os::path_separator()[0]) {
#if INCLUDE_CDS
if (DumpSharedSpaces) {
check_shared_classpath(path);
}
#endif
while (class_path[end] == os::path_separator()[0]) {
end++;
}
}
}
ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st,
bool lazy, bool throw_exception, TRAPS) {
JavaThread* thread = JavaThread::current();
if (lazy) {
return new LazyClassPathEntry(path, st);
return new LazyClassPathEntry(path, st, throw_exception);
}
ClassPathEntry* new_entry = NULL;
if ((st->st_mode & S_IFREG) == S_IFREG) {
@ -474,7 +633,11 @@ ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct st
char canonical_path[JVM_MAXPATHLEN];
if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
// This matches the classic VM
THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
if (throw_exception) {
THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
} else {
return NULL;
}
}
char* error_msg = NULL;
jzfile* zip;
@ -486,7 +649,7 @@ ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct st
}
if (zip != NULL && error_msg == NULL) {
new_entry = new ClassPathZipEntry(zip, path);
if (TraceClassLoading) {
if (TraceClassLoading || TraceClassPaths) {
tty->print_cr("[Opened %s]", path);
}
} else {
@ -500,12 +663,16 @@ ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct st
msg = NEW_RESOURCE_ARRAY(char, len); ;
jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
}
THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
if (throw_exception) {
THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
} else {
return NULL;
}
}
} else {
// Directory
new_entry = new ClassPathDirEntry(path);
if (TraceClassLoading) {
if (TraceClassLoading || TraceClassPaths) {
tty->print_cr("[Path %s]", path);
}
}
@ -566,23 +733,37 @@ void ClassLoader::add_to_list(ClassPathEntry *new_entry) {
_last_entry = new_entry;
}
}
_num_entries ++;
}
void ClassLoader::update_class_path_entry_list(char *path,
bool check_for_duplicates) {
// Returns true IFF the file/dir exists and the entry was successfully created.
bool ClassLoader::update_class_path_entry_list(char *path,
bool check_for_duplicates,
bool throw_exception) {
struct stat st;
if (os::stat(path, &st) == 0) {
// File or directory found
ClassPathEntry* new_entry = NULL;
Thread* THREAD = Thread::current();
new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK);
new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, throw_exception, CHECK_(false));
if (new_entry == NULL) {
return false;
}
// The kernel VM adds dynamically to the end of the classloader path and
// doesn't reorder the bootclasspath which would break java.lang.Package
// (see PackageInfo).
// Add new entry to linked list
if (!check_for_duplicates || !contains_entry(new_entry)) {
add_to_list(new_entry);
ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry);
}
return true;
} else {
#if INCLUDE_CDS
if (DumpSharedSpaces) {
_shared_paths_misc_info->add_nonexist_path(path);
}
return false;
#endif
}
}
@ -734,10 +915,10 @@ public:
assert(n == number_of_entries(), "just checking");
}
void copy_table(char** top, char* end, PackageHashtable* table);
CDS_ONLY(void copy_table(char** top, char* end, PackageHashtable* table);)
};
#if INCLUDE_CDS
void PackageHashtable::copy_table(char** top, char* end,
PackageHashtable* table) {
// Copy (relocate) the table to the shared space.
@ -745,33 +926,30 @@ void PackageHashtable::copy_table(char** top, char* end,
// Calculate the space needed for the package name strings.
int i;
int n = 0;
for (i = 0; i < table_size(); ++i) {
for (PackageInfo* pp = table->bucket(i);
pp != NULL;
pp = pp->next()) {
n += (int)(strlen(pp->pkgname()) + 1);
}
}
if (*top + n + sizeof(intptr_t) >= end) {
report_out_of_shared_space(SharedMiscData);
}
// Copy the table data (the strings) to the shared space.
n = align_size_up(n, sizeof(HeapWord));
*(intptr_t*)(*top) = n;
*top += sizeof(intptr_t);
intptr_t* tableSize = (intptr_t*)(*top);
*top += sizeof(intptr_t); // For table size
char* tableStart = *top;
for (i = 0; i < table_size(); ++i) {
for (PackageInfo* pp = table->bucket(i);
pp != NULL;
pp = pp->next()) {
int n1 = (int)(strlen(pp->pkgname()) + 1);
if (*top + n1 >= end) {
report_out_of_shared_space(SharedMiscData);
}
pp->set_pkgname((char*)memcpy(*top, pp->pkgname(), n1));
*top += n1;
}
}
*top = (char*)align_size_up((intptr_t)*top, sizeof(HeapWord));
if (*top >= end) {
report_out_of_shared_space(SharedMiscData);
}
// Write table size
intptr_t len = *top - (char*)tableStart;
*tableSize = len;
}
@ -782,7 +960,7 @@ void ClassLoader::copy_package_info_buckets(char** top, char* end) {
void ClassLoader::copy_package_info_table(char** top, char* end) {
_package_hash_table->copy_table(top, end, _package_hash_table);
}
#endif
PackageInfo* ClassLoader::lookup_package(const char *pkgname) {
const char *cp = strrchr(pkgname, '/');
@ -875,7 +1053,8 @@ objArrayOop ClassLoader::get_system_packages(TRAPS) {
instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
ResourceMark rm(THREAD);
EventMark m("loading class %s", h_name->as_C_string());
const char* class_name = h_name->as_C_string();
EventMark m("loading class %s", class_name);
ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
stringStream st;
@ -883,18 +1062,24 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
// st.print("%s.class", h_name->as_utf8());
st.print_raw(h_name->as_utf8());
st.print_raw(".class");
char* name = st.as_string();
const char* file_name = st.as_string();
ClassLoaderExt::Context context(class_name, file_name, THREAD);
// Lookup stream for parsing .class file
ClassFileStream* stream = NULL;
int classpath_index = 0;
ClassPathEntry* e = NULL;
instanceKlassHandle h;
{
PerfClassTraceTime vmtimer(perf_sys_class_lookup_time(),
((JavaThread*) THREAD)->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::CLASS_LOAD);
ClassPathEntry* e = _first_entry;
e = _first_entry;
while (e != NULL) {
stream = e->open_stream(name, CHECK_NULL);
stream = e->open_stream(file_name, CHECK_NULL);
if (!context.check(stream, classpath_index)) {
return h; // NULL
}
if (stream != NULL) {
break;
}
@ -903,9 +1088,7 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
}
}
instanceKlassHandle h;
if (stream != NULL) {
// class file found, parse it
ClassFileParser parser(stream);
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
@ -915,12 +1098,19 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
loader_data,
protection_domain,
parsed_name,
false,
CHECK_(h));
// add to package table
if (add_package(name, classpath_index, THREAD)) {
h = result;
context.should_verify(classpath_index),
THREAD);
if (HAS_PENDING_EXCEPTION) {
ResourceMark rm;
if (DumpSharedSpaces) {
tty->print_cr("Preload Error: Failed to load %s", class_name);
}
return h;
}
h = context.record_result(classpath_index, e, result, THREAD);
} else {
if (DumpSharedSpaces) {
tty->print_cr("Preload Error: Cannot find %s", class_name);
}
}
@ -1015,14 +1205,27 @@ void ClassLoader::initialize() {
// lookup zip library entry points
load_zip_library();
#if INCLUDE_CDS
// initialize search path
if (DumpSharedSpaces) {
_shared_paths_misc_info = SharedClassUtil::allocate_shared_paths_misc_info();
}
#endif
setup_bootstrap_search_path();
if (LazyBootClassLoader) {
// set up meta index which makes boot classpath initialization lazier
setup_meta_index();
setup_bootstrap_meta_index();
}
}
#if INCLUDE_CDS
void ClassLoader::initialize_shared_path() {
if (DumpSharedSpaces) {
ClassLoaderExt::setup_search_paths();
_shared_paths_misc_info->write_jint(0); // see comments in SharedPathsMiscInfo::check()
}
}
#endif
jlong ClassLoader::classloader_time_ms() {
return UsePerfData ?

View File

@ -107,6 +107,7 @@ class ClassPathZipEntry: public ClassPathEntry {
const char* name() { return _zip_name; }
ClassPathZipEntry(jzfile* zip, const char* zip_name);
~ClassPathZipEntry();
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
ClassFileStream* open_stream(const char* name, TRAPS);
void contents_do(void f(const char* name, void* context), void* context);
// Debugging
@ -122,12 +123,16 @@ class LazyClassPathEntry: public ClassPathEntry {
struct stat _st;
MetaIndex* _meta_index;
bool _has_error;
bool _throw_exception;
volatile ClassPathEntry* _resolved_entry;
ClassPathEntry* resolve_entry(TRAPS);
public:
bool is_jar_file();
const char* name() { return _path; }
LazyClassPathEntry(char* path, const struct stat* st);
LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception);
virtual ~LazyClassPathEntry();
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
ClassFileStream* open_stream(const char* name, TRAPS);
void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
virtual bool is_lazy();
@ -138,6 +143,7 @@ class LazyClassPathEntry: public ClassPathEntry {
class PackageHashtable;
class PackageInfo;
class SharedPathsMiscInfo;
template <MEMFLAGS F> class HashtableBucket;
class ClassLoader: AllStatic {
@ -145,7 +151,7 @@ class ClassLoader: AllStatic {
enum SomeConstants {
package_hash_table_size = 31 // Number of buckets
};
private:
protected:
friend class LazyClassPathEntry;
// Performance counters
@ -187,10 +193,15 @@ class ClassLoader: AllStatic {
static ClassPathEntry* _first_entry;
// Last entry in linked list of ClassPathEntry instances
static ClassPathEntry* _last_entry;
static int _num_entries;
// Hash table used to keep track of loaded packages
static PackageHashtable* _package_hash_table;
static const char* _shared_archive;
// Info used by CDS
CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;)
// Hash function
static unsigned int hash(const char *s, int n);
// Returns the package file name corresponding to the specified package
@ -201,19 +212,23 @@ class ClassLoader: AllStatic {
static bool add_package(const char *pkgname, int classpath_index, TRAPS);
// Initialization
static void setup_meta_index();
static void setup_bootstrap_meta_index();
static void setup_meta_index(const char* meta_index_path, const char* meta_index_dir,
int start_index);
static void setup_bootstrap_search_path();
static void setup_search_path(char *class_path);
static void load_zip_library();
static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
bool lazy, TRAPS);
bool lazy, bool throw_exception, TRAPS);
// Canonicalizes path names, so strcmp will work properly. This is mainly
// to avoid confusing the zip library
static bool get_canonical_path(char* orig, char* out, int len);
public:
// Used by the kernel jvm.
static void update_class_path_entry_list(char *path,
bool check_for_duplicates);
static bool update_class_path_entry_list(char *path,
bool check_for_duplicates,
bool throw_exception=true);
static void print_bootclasspath();
// Timing
@ -296,6 +311,7 @@ class ClassLoader: AllStatic {
// Initialization
static void initialize();
CDS_ONLY(static void initialize_shared_path();)
static void create_package_info_table();
static void create_package_info_table(HashtableBucket<mtClass> *t, int length,
int number_of_entries);
@ -310,10 +326,21 @@ class ClassLoader: AllStatic {
return e;
}
#if INCLUDE_CDS
// Sharing dump and restore
static void copy_package_info_buckets(char** top, char* end);
static void copy_package_info_table(char** top, char* end);
static void check_shared_classpath(const char *path);
static void finalize_shared_paths_misc_info();
static int get_shared_paths_misc_info_size();
static void* get_shared_paths_misc_info();
static bool check_shared_paths_misc_info(void* info, int size);
static void exit_with_path_failure(const char* error, const char* message);
#endif
static void trace_class_path(const char* msg, const char* name = NULL);
// VM monitoring and management support
static jlong classloader_time_ms();
static jlong class_method_total_size();
@ -337,7 +364,7 @@ class ClassLoader: AllStatic {
// Force compilation of all methods in all classes in bootstrap class path (stress test)
#ifndef PRODUCT
private:
protected:
static int _compile_the_world_class_counter;
static int _compile_the_world_method_counter;
public:

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
#define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
#include "classfile/classLoader.hpp"
class ClassLoaderExt: public ClassLoader { // AllStatic
public:
class Context {
const char* _file_name;
public:
Context(const char* class_name, const char* file_name, TRAPS) {
_file_name = file_name;
}
bool check(ClassFileStream* stream, const int classpath_index) {
return true;
}
bool should_verify(int classpath_index) {
return false;
}
instanceKlassHandle record_result(const int classpath_index,
ClassPathEntry* e, instanceKlassHandle result, TRAPS) {
if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) {
if (DumpSharedSpaces) {
result->set_shared_classpath_index(classpath_index);
}
return result;
} else {
return instanceKlassHandle(); // NULL
}
}
};
static void add_class_path_entry(char* path, bool check_for_duplicates,
ClassPathEntry* new_entry) {
ClassLoader::add_to_list(new_entry);
}
static void setup_search_paths() {}
};
#endif // SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP

View File

@ -130,15 +130,13 @@ void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_dom
}
bool Dictionary::do_unloading() {
void Dictionary::do_unloading() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
bool class_was_unloaded = false;
int index = 0; // Defined here for portability! Do not move
// Remove unloadable entries and classes from system dictionary
// The placeholder array has been handled in always_strong_oops_do.
DictionaryEntry* probe = NULL;
for (index = 0; index < table_size(); index++) {
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
probe = *p;
Klass* e = probe->klass();
@ -158,16 +156,8 @@ bool Dictionary::do_unloading() {
// Do we need to delete this system dictionary entry?
if (loader_data->is_unloading()) {
// If the loader is not live this entry should always be
// removed (will never be looked up again). Note that this is
// not the same as unloading the referred class.
if (k_def_class_loader_data == loader_data) {
// This is the defining entry, so the referred class is about
// to be unloaded.
class_was_unloaded = true;
}
// Also remove this system dictionary entry.
// removed (will never be looked up again).
purge_entry = true;
} else {
// The loader in this entry is alive. If the klass is dead,
// (determined by checking the defining class loader)
@ -196,7 +186,6 @@ bool Dictionary::do_unloading() {
p = probe->next_addr();
}
}
return class_was_unloaded;
}
void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
@ -220,6 +209,29 @@ void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
_pd_cache_table->roots_oops_do(strong, weak);
}
void Dictionary::remove_classes_in_error_state() {
assert(DumpSharedSpaces, "supported only when dumping");
DictionaryEntry* probe = NULL;
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
probe = *p;
InstanceKlass* ik = InstanceKlass::cast(probe->klass());
if (ik->is_in_error_state()) { // purge this entry
*p = probe->next();
if (probe == _current_class_entry) {
_current_class_entry = NULL;
}
free_entry(probe);
ResourceMark rm;
tty->print_cr("Removed error class: %s", ik->external_name());
continue;
}
p = probe->next_addr();
}
}
}
void Dictionary::always_strong_oops_do(OopClosure* blk) {
// Follow all system classes and temporary placeholders in dictionary; only
// protection domain oops contain references into the heap. In a first
@ -693,16 +705,17 @@ void SymbolPropertyTable::methods_do(void f(Method*)) {
// ----------------------------------------------------------------------------
#ifndef PRODUCT
void Dictionary::print() {
void Dictionary::print(bool details) {
ResourceMark rm;
HandleMark hm;
tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
table_size(), number_of_entries());
tty->print_cr("^ indicates that initiating loader is different from "
"defining loader");
if (details) {
tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
table_size(), number_of_entries());
tty->print_cr("^ indicates that initiating loader is different from "
"defining loader");
}
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry* probe = bucket(index);
@ -713,21 +726,28 @@ void Dictionary::print() {
ClassLoaderData* loader_data = probe->loader_data();
bool is_defining_class =
(loader_data == InstanceKlass::cast(e)->class_loader_data());
tty->print("%s%s", is_defining_class ? " " : "^",
tty->print("%s%s", ((!details) || is_defining_class) ? " " : "^",
e->external_name());
if (details) {
tty->print(", loader ");
loader_data->print_value();
if (loader_data != NULL) {
loader_data->print_value();
} else {
tty->print("NULL");
}
}
tty->cr();
}
}
tty->cr();
_pd_cache_table->print();
if (details) {
tty->cr();
_pd_cache_table->print();
}
tty->cr();
}
#endif
void Dictionary::verify() {
guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");

View File

@ -100,6 +100,7 @@ public:
void methods_do(void f(Method*));
void unlink(BoolObjectClosure* is_alive);
void remove_classes_in_error_state();
// Classes loaded by the bootstrap loader are always strongly reachable.
// If we're not doing class unloading, all classes are strongly reachable.
@ -108,9 +109,8 @@ public:
return (loader_data->is_the_null_class_loader_data() || !ClassUnloading);
}
// Unload (that is, break root links to) all unmarked classes and
// loaders. Returns "true" iff something was unloaded.
bool do_unloading();
// Unload (that is, break root links to) all unmarked classes and loaders.
void do_unloading();
// Protection domains
Klass* find(int index, unsigned int hash, Symbol* name,
@ -127,9 +127,7 @@ public:
ProtectionDomainCacheEntry* cache_get(oop protection_domain);
#ifndef PRODUCT
void print();
#endif
void print(bool details = true);
void verify();
};

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
#define SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
#include "classfile/sharedPathsMiscInfo.hpp"
#include "memory/filemap.hpp"
class SharedClassUtil : AllStatic {
public:
static SharedPathsMiscInfo* allocate_shared_paths_misc_info() {
return new SharedPathsMiscInfo();
}
static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) {
return new SharedPathsMiscInfo(buf, size);
}
static FileMapInfo::FileMapHeader* allocate_file_map_header() {
return new FileMapInfo::FileMapHeader();
}
static size_t file_map_header_size() {
return sizeof(FileMapInfo::FileMapHeader);
}
static size_t shared_class_path_entry_size() {
return sizeof(SharedClassPathEntry);
}
static void update_shared_classpath(ClassPathEntry *cpe,
SharedClassPathEntry* ent,
time_t timestamp,
long filesize, TRAPS) {
ent->_timestamp = timestamp;
ent->_filesize = filesize;
}
static void initialize(TRAPS) {}
inline static bool is_shared_boot_class(Klass* klass) {
return (klass->_shared_class_path_index >= 0);
}
};
#endif // SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP

View File

@ -0,0 +1,154 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/sharedPathsMiscInfo.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/metaspaceShared.hpp"
#include "runtime/arguments.hpp"
void SharedPathsMiscInfo::add_path(const char* path, int type) {
if (TraceClassPaths) {
tty->print("[type=%s] ", type_name(type));
trace_class_path("[Add misc shared path ", path);
}
write(path, strlen(path) + 1);
write_jint(jint(type));
}
void SharedPathsMiscInfo::ensure_size(size_t needed_bytes) {
assert(_allocated, "cannot modify buffer during validation.");
int used = get_used_bytes();
int target = used + int(needed_bytes);
if (target > _buf_size) {
_buf_size = _buf_size * 2 + (int)needed_bytes;
_buf_start = REALLOC_C_HEAP_ARRAY(char, _buf_start, _buf_size, mtClass);
_cur_ptr = _buf_start + used;
_end_ptr = _buf_start + _buf_size;
}
}
void SharedPathsMiscInfo::write(const void* ptr, size_t size) {
ensure_size(size);
memcpy(_cur_ptr, ptr, size);
_cur_ptr += size;
}
bool SharedPathsMiscInfo::read(void* ptr, size_t size) {
if (_cur_ptr + size <= _end_ptr) {
memcpy(ptr, _cur_ptr, size);
_cur_ptr += size;
return true;
}
return false;
}
bool SharedPathsMiscInfo::fail(const char* msg, const char* name) {
ClassLoader::trace_class_path(msg, name);
MetaspaceShared::set_archive_loading_failed();
return false;
}
bool SharedPathsMiscInfo::check() {
// The whole buffer must be 0 terminated so that we can use strlen and strcmp
// without fear.
_end_ptr -= sizeof(jint);
if (_cur_ptr >= _end_ptr) {
return fail("Truncated archive file header");
}
if (*_end_ptr != 0) {
return fail("Corrupted archive file header");
}
while (_cur_ptr < _end_ptr) {
jint type;
const char* path = _cur_ptr;
_cur_ptr += strlen(path) + 1;
if (!read_jint(&type)) {
return fail("Corrupted archive file header");
}
if (TraceClassPaths) {
tty->print("[type=%s ", type_name(type));
print_path(tty, type, path);
tty->print_cr("]");
}
if (!check(type, path)) {
if (!PrintSharedArchiveAndExit) {
return false;
}
} else {
trace_class_path("[ok");
}
}
return true;
}
bool SharedPathsMiscInfo::check(jint type, const char* path) {
switch (type) {
case BOOT:
if (strcmp(path, Arguments::get_sysclasspath()) != 0) {
return fail("[BOOT classpath mismatch, actual: -Dsun.boot.class.path=", Arguments::get_sysclasspath());
}
break;
case NON_EXIST: // fall-through
case REQUIRED:
{
struct stat st;
if (os::stat(path, &st) != 0) {
// The file does not actually exist
if (type == REQUIRED) {
// but we require it to exist -> fail
return fail("Required file doesn't exist");
}
} else {
// The file actually exists
if (type == NON_EXIST) {
// But we want it to not exist -> fail
return fail("File must not exist");
}
time_t timestamp;
long filesize;
if (!read_time(&timestamp) || !read_long(&filesize)) {
return fail("Corrupted archive file header");
}
if (timestamp != st.st_mtime) {
return fail("Timestamp mismatch");
}
if (filesize != st.st_size) {
return fail("File size mismatch");
}
}
}
break;
default:
return fail("Corrupted archive file header");
}
return true;
}

View File

@ -0,0 +1,187 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
#define SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
#include "runtime/os.hpp"
// During dumping time, when processing class paths, we build up the dump-time
// classpath. The JAR files that exist are stored in the list ClassLoader::_first_entry.
// However, we need to store other "misc" information for run-time checking, such as
//
// + The values of Arguments::get_sysclasspath() used during dumping.
//
// + The meta-index file(s) used during dumping (incl modification time and size)
//
// + The class path elements specified during dumping but did not exist --
// these elements must also be specified at run time, and they also must not
// exist at run time.
//
// These misc items are stored in a linear buffer in SharedPathsMiscInfo.
// The storage format is stream oriented to minimize its size.
//
// When writing the information to the archive file, SharedPathsMiscInfo is stored in
// the archive file header. At run-time, this information is used only during initialization
// (accessed using read() instead of mmap()), and is deallocated afterwards to save space.
//
// The SharedPathsMiscInfo class is used for both creating the the information (during
// dumping time) and validation (at run time). Different constructors are used in the
// two situations. See below.
class SharedPathsMiscInfo : public CHeapObj<mtClass> {
protected:
char* _buf_start;
char* _cur_ptr;
char* _end_ptr;
int _buf_size;
bool _allocated; // was _buf_start allocated by me?
void ensure_size(size_t needed_bytes);
void add_path(const char* path, int type);
void write(const void* ptr, size_t size);
bool read(void* ptr, size_t size);
static void trace_class_path(const char* msg, const char* name = NULL) {
ClassLoader::trace_class_path(msg, name);
}
protected:
static bool fail(const char* msg, const char* name = NULL);
virtual bool check(jint type, const char* path);
public:
enum {
INITIAL_BUF_SIZE = 128
};
// This constructor is used when creating the misc information (during dump)
SharedPathsMiscInfo() {
_buf_size = INITIAL_BUF_SIZE;
_cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass);
_allocated = true;
}
// This constructor is used when validating the misc info (during run time)
SharedPathsMiscInfo(char *buff, int size) {
_cur_ptr = _buf_start = buff;
_end_ptr = _buf_start + size;
_buf_size = size;
_allocated = false;
}
~SharedPathsMiscInfo() {
if (_allocated) {
FREE_C_HEAP_ARRAY(char, _buf_start, mtClass);
}
}
int get_used_bytes() {
return _cur_ptr - _buf_start;
}
void* buffer() {
return _buf_start;
}
// writing --
// The path must not exist at run-time
void add_nonexist_path(const char* path) {
add_path(path, NON_EXIST);
}
// The path must exist and have required size and modification time
void add_required_file(const char* path) {
add_path(path, REQUIRED);
struct stat st;
if (os::stat(path, &st) != 0) {
assert(0, "sanity");
ClassLoader::exit_with_path_failure("failed to os::stat(%s)", path); // should not happen
}
write_time(st.st_mtime);
write_long(st.st_size);
}
// The path must exist, and must contain exactly <num_entries> files/dirs
void add_boot_classpath(const char* path) {
add_path(path, BOOT);
}
int write_jint(jint num) {
write(&num, sizeof(num));
return 0;
}
void write_time(time_t t) {
write(&t, sizeof(t));
}
void write_long(long l) {
write(&l, sizeof(l));
}
bool dump_to_file(int fd) {
int n = get_used_bytes();
return (os::write(fd, _buf_start, n) == (size_t)n);
}
// reading --
enum {
BOOT = 1,
NON_EXIST = 2,
REQUIRED = 3
};
virtual const char* type_name(int type) {
switch (type) {
case BOOT: return "BOOT";
case NON_EXIST: return "NON_EXIST";
case REQUIRED: return "REQUIRED";
default: ShouldNotReachHere(); return "?";
}
}
virtual void print_path(outputStream* out, int type, const char* path) {
switch (type) {
case BOOT:
out->print("Expecting -Dsun.boot.class.path=%s", path);
break;
case NON_EXIST:
out->print("Expecting that %s does not exist", path);
break;
case REQUIRED:
out->print("Expecting that file %s must exist and not altered", path);
break;
default:
ShouldNotReachHere();
}
}
bool check();
bool read_jint(jint *ptr) {
return read(ptr, sizeof(jint));
}
bool read_long(long *ptr) {
return read(ptr, sizeof(long));
}
bool read_time(time_t *ptr) {
return read(ptr, sizeof(time_t));
}
};
#endif // SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,21 +54,6 @@ StackMapFrame* StackMapFrame::frame_in_exception_handler(u1 flags) {
return frame;
}
bool StackMapFrame::has_new_object() const {
int32_t i;
for (i = 0; i < _max_locals; i++) {
if (_locals[i].is_uninitialized()) {
return true;
}
}
for (i = 0; i < _stack_size; i++) {
if (_stack[i].is_uninitialized()) {
return true;
}
}
return false;
}
void StackMapFrame::initialize_object(
VerificationType old_object, VerificationType new_object) {
int32_t i;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -154,10 +154,6 @@ class StackMapFrame : public ResourceObj {
VerificationType set_locals_from_arg(
const methodHandle m, VerificationType thisKlass, TRAPS);
// Search local variable type array and stack type array.
// Return true if an uninitialized object is found.
bool has_new_object() const;
// Search local variable type array and stack type array.
// Set every element with type of old_object to new_object.
void initialize_object(

View File

@ -70,24 +70,26 @@ int StackMapTable::get_index_from_offset(int32_t offset) const {
bool StackMapTable::match_stackmap(
StackMapFrame* frame, int32_t target,
bool match, bool update, ErrorContext* ctx, TRAPS) const {
bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const {
int index = get_index_from_offset(target);
return match_stackmap(frame, target, index, match, update, ctx, THREAD);
return match_stackmap(frame, target, index, match, update, handler, ctx, THREAD);
}
// Match and/or update current_frame to the frame in stackmap table with
// specified offset and frame index. Return true if the two frames match.
// handler is true if the frame in stackmap_table is for an exception handler.
//
// The values of match and update are: _match__update_
// The values of match and update are: _match__update__handler
//
// checking a branch target/exception handler: true false
// checking a branch target: true false false
// checking an exception handler: true false true
// linear bytecode verification following an
// unconditional branch: false true
// unconditional branch: false true false
// linear bytecode verification not following an
// unconditional branch: true true
// unconditional branch: true true false
bool StackMapTable::match_stackmap(
StackMapFrame* frame, int32_t target, int32_t frame_index,
bool match, bool update, ErrorContext* ctx, TRAPS) const {
bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const {
if (frame_index < 0 || frame_index >= _frame_count) {
*ctx = ErrorContext::missing_stackmap(frame->offset());
frame->verifier()->verify_error(
@ -98,11 +100,9 @@ bool StackMapTable::match_stackmap(
StackMapFrame *stackmap_frame = _frame_array[frame_index];
bool result = true;
if (match) {
// when checking handler target, match == true && update == false
bool is_exception_handler = !update;
// Has direct control flow from last instruction, need to match the two
// frames.
result = frame->is_assignable_to(stackmap_frame, is_exception_handler,
result = frame->is_assignable_to(stackmap_frame, handler,
ctx, CHECK_VERIFY_(frame->verifier(), result));
}
if (update) {
@ -126,24 +126,10 @@ void StackMapTable::check_jump_target(
StackMapFrame* frame, int32_t target, TRAPS) const {
ErrorContext ctx;
bool match = match_stackmap(
frame, target, true, false, &ctx, CHECK_VERIFY(frame->verifier()));
frame, target, true, false, false, &ctx, CHECK_VERIFY(frame->verifier()));
if (!match || (target < 0 || target >= _code_length)) {
frame->verifier()->verify_error(ctx,
"Inconsistent stackmap frames at branch target %d", target);
return;
}
// check if uninitialized objects exist on backward branches
check_new_object(frame, target, CHECK_VERIFY(frame->verifier()));
frame->verifier()->update_furthest_jump(target);
}
void StackMapTable::check_new_object(
const StackMapFrame* frame, int32_t target, TRAPS) const {
if (frame->offset() > target && frame->has_new_object()) {
frame->verifier()->verify_error(
ErrorContext::bad_code(frame->offset()),
"Uninitialized object exists on backward branch %d", target);
return;
}
}

Some files were not shown because too many files have changed in this diff Show More