Merge
This commit is contained in:
commit
2ee9645ccb
1
.hgtags
1
.hgtags
@ -269,3 +269,4 @@ c5495e25c7258ab5f96a1ae14610887d76d2be63 jdk9-b18
|
||||
d9ce05f36ffec3e5e8af62a92455c1c66a63c320 jdk9-b24
|
||||
13a5c76976fe48e55c9727c25fae2d2ce7c05da0 jdk9-b25
|
||||
cd6f4557e7fea5799ff3762ed7a80a743e75d5fd jdk9-b26
|
||||
d06a6d3c66c08293b2a9650f3cc01fd55c620e65 jdk9-b27
|
||||
|
@ -269,3 +269,4 @@ ee4fd72b2ec3d92497f37163352f294aa695c6fb jdk9-b20
|
||||
1d4a293fbec19dc2d5790bbb2c7dd0ed8f265484 jdk9-b24
|
||||
aefd8899a8d6615fb34ba99b2e38996a7145baa8 jdk9-b25
|
||||
d3ec8d048e6c3c46b6e0ee011cc551ad386dfba5 jdk9-b26
|
||||
ba5645f2735b41ed085d07ba20fa7b322afff318 jdk9-b27
|
||||
|
@ -377,7 +377,6 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
|
||||
BASIC_REQUIRE_PROGS(CMP, cmp)
|
||||
BASIC_REQUIRE_PROGS(COMM, comm)
|
||||
BASIC_REQUIRE_PROGS(CP, cp)
|
||||
BASIC_REQUIRE_PROGS(CPIO, cpio)
|
||||
BASIC_REQUIRE_PROGS(CUT, cut)
|
||||
BASIC_REQUIRE_PROGS(DATE, date)
|
||||
BASIC_REQUIRE_PROGS(DIFF, [gdiff diff])
|
||||
@ -427,6 +426,7 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS],
|
||||
BASIC_PATH_PROGS(READLINK, [greadlink readlink])
|
||||
BASIC_PATH_PROGS(DF, df)
|
||||
BASIC_PATH_PROGS(SETFILE, SetFile)
|
||||
BASIC_PATH_PROGS(CPIO, [cpio bsdcpio])
|
||||
])
|
||||
|
||||
# Setup basic configuration paths, and platform-specific stuff related to PATHs.
|
||||
@ -954,7 +954,7 @@ AC_DEFUN([BASIC_CHECK_DIR_ON_LOCAL_DISK],
|
||||
# not be the case in cygwin in certain conditions.
|
||||
AC_DEFUN_ONCE([BASIC_CHECK_SRC_PERMS],
|
||||
[
|
||||
if test x"$OPENJDK_BUILD_OS" = xwindows; then
|
||||
if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.cygwin"; then
|
||||
file_to_test="$SRC_ROOT/LICENSE"
|
||||
if test `$STAT -c '%a' "$file_to_test"` -lt 400; then
|
||||
AC_MSG_ERROR([Bad file permissions on src files. This is usually caused by cloning the repositories with a non cygwin hg in a directory not created in cygwin.])
|
||||
|
@ -266,6 +266,14 @@ AC_DEFUN([BASIC_FIXUP_EXECUTABLE_MSYS],
|
||||
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(new_path)
|
||||
|
||||
new_path=`$WHICH "$new_path" 2> /dev/null`
|
||||
# bat and cmd files are not always considered executable in MSYS causing which
|
||||
# to not find them
|
||||
if test "x$new_path" = x \
|
||||
&& test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \
|
||||
&& test "x`$LS \"$path\" 2>/dev/null`" != x; then
|
||||
new_path="$path"
|
||||
BASIC_WINDOWS_REWRITE_AS_UNIX_PATH(new_path)
|
||||
fi
|
||||
|
||||
if test "x$new_path" = x; then
|
||||
# It's still not found. Now this is an unrecoverable error.
|
||||
|
@ -900,7 +900,7 @@ AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_MISC],
|
||||
|
||||
case "${TOOLCHAIN_TYPE}" in
|
||||
microsoft)
|
||||
CFLAGS_WARNINGS_ARE_ERRORS="/WX"
|
||||
CFLAGS_WARNINGS_ARE_ERRORS="-WX"
|
||||
;;
|
||||
solstudio)
|
||||
CFLAGS_WARNINGS_ARE_ERRORS="-errtags -errwarn=%all"
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -173,6 +173,7 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
|
||||
OPENJDK_BUILD_CPU_ENDIAN="$VAR_CPU_ENDIAN"
|
||||
AC_SUBST(OPENJDK_BUILD_OS)
|
||||
AC_SUBST(OPENJDK_BUILD_OS_API)
|
||||
AC_SUBST(OPENJDK_BUILD_OS_ENV)
|
||||
AC_SUBST(OPENJDK_BUILD_CPU)
|
||||
AC_SUBST(OPENJDK_BUILD_CPU_ARCH)
|
||||
AC_SUBST(OPENJDK_BUILD_CPU_BITS)
|
||||
@ -194,6 +195,7 @@ AC_DEFUN([PLATFORM_EXTRACT_TARGET_AND_BUILD],
|
||||
OPENJDK_TARGET_CPU_ENDIAN="$VAR_CPU_ENDIAN"
|
||||
AC_SUBST(OPENJDK_TARGET_OS)
|
||||
AC_SUBST(OPENJDK_TARGET_OS_API)
|
||||
AC_SUBST(OPENJDK_TARGET_OS_ENV)
|
||||
AC_SUBST(OPENJDK_TARGET_CPU)
|
||||
AC_SUBST(OPENJDK_TARGET_CPU_ARCH)
|
||||
AC_SUBST(OPENJDK_TARGET_CPU_BITS)
|
||||
|
@ -106,6 +106,7 @@ OPENJDK_TARGET_OS_EXPORT_DIR:=@OPENJDK_TARGET_OS_EXPORT_DIR@
|
||||
# When not cross-compiling, it is the same as the target.
|
||||
OPENJDK_BUILD_OS:=@OPENJDK_BUILD_OS@
|
||||
OPENJDK_BUILD_OS_API:=@OPENJDK_BUILD_OS_API@
|
||||
OPENJDK_BUILD_OS_ENV:=@OPENJDK_BUILD_OS_ENV@
|
||||
|
||||
OPENJDK_BUILD_CPU:=@OPENJDK_BUILD_CPU@
|
||||
OPENJDK_BUILD_CPU_ARCH:=@OPENJDK_BUILD_CPU_ARCH@
|
||||
|
@ -244,12 +244,22 @@ AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_MSVCR_DLL],
|
||||
# Need to check if the found msvcr is correct architecture
|
||||
AC_MSG_CHECKING([found msvcr100.dll architecture])
|
||||
MSVCR_DLL_FILETYPE=`$FILE -b "$POSSIBLE_MSVCR_DLL"`
|
||||
if test "x$OPENJDK_TARGET_CPU_BITS" = x32; then
|
||||
CORRECT_MSVCR_ARCH=386
|
||||
if test "x$OPENJDK_BUILD_OS_ENV" = "xwindows.msys"; then
|
||||
# The MSYS 'file' command returns "PE32 executable for MS Windows (DLL) (GUI) Intel 80386 32-bit"
|
||||
# on x32 and "PE32+ executable for MS Windows (DLL) (GUI) Mono/.Net assembly" on x64 systems.
|
||||
if test "x$OPENJDK_TARGET_CPU_BITS" = x32; then
|
||||
CORRECT_MSVCR_ARCH="PE32 executable"
|
||||
else
|
||||
CORRECT_MSVCR_ARCH="PE32+ executable"
|
||||
fi
|
||||
else
|
||||
CORRECT_MSVCR_ARCH=x86-64
|
||||
if test "x$OPENJDK_TARGET_CPU_BITS" = x32; then
|
||||
CORRECT_MSVCR_ARCH=386
|
||||
else
|
||||
CORRECT_MSVCR_ARCH=x86-64
|
||||
fi
|
||||
fi
|
||||
if $ECHO "$MSVCR_DLL_FILETYPE" | $GREP $CORRECT_MSVCR_ARCH 2>&1 > /dev/null; then
|
||||
if $ECHO "$MSVCR_DLL_FILETYPE" | $GREP "$CORRECT_MSVCR_ARCH" 2>&1 > /dev/null; then
|
||||
AC_MSG_RESULT([ok])
|
||||
MSVCR_DLL="$POSSIBLE_MSVCR_DLL"
|
||||
AC_MSG_CHECKING([for msvcr100.dll])
|
||||
|
@ -269,3 +269,5 @@ ddc07abf4307855c0dc904cc5c96cc764023a930 jdk9-b22
|
||||
8a44142bb7fc8118f70f91a1b97c12dfc50563ee jdk9-b24
|
||||
da08cca6b97f41b7081a3e176dcb400af6e4bb26 jdk9-b25
|
||||
6c777df597bbf5abba3488d44c401edfe73c74af jdk9-b26
|
||||
7e06bf1dcb0907b80ddf59315426ce9ce775e56d jdk9-b27
|
||||
a00b04ef067e39f50b9a0fea6f1904e35d632a73 jdk9-b28
|
||||
|
@ -429,3 +429,4 @@ dd472cdacc32e3afc7c5bfa7ef16ea0e0befb7fa jdk9-b23
|
||||
dde2d03b0ea46a27650839e3a1d212c7c1f7b4c8 jdk9-b24
|
||||
6de94e8693240cec8aae11f6b42f43433456a733 jdk9-b25
|
||||
48b95a073d752d6891cc0d1d2836b321ecf3ce0c jdk9-b26
|
||||
f95347244306affc32ce3056f27ceff7b2100810 jdk9-b27
|
||||
|
@ -314,7 +314,7 @@ static void * pathmap_dlopen(const char * name, int mode) {
|
||||
handle = dlopen(name, mode);
|
||||
}
|
||||
if (_libsaproc_debug) {
|
||||
printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%x\n", name, handle);
|
||||
printf("libsaproc DEBUG: pathmap_dlopen %s return 0x%lx\n", name, (unsigned long) handle);
|
||||
}
|
||||
return handle;
|
||||
}
|
||||
@ -661,30 +661,30 @@ init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name
|
||||
// read FileMapHeader
|
||||
size_t n = read(fd, pheader, sizeof(struct FileMapHeader));
|
||||
if (n != sizeof(struct FileMapHeader)) {
|
||||
free(pheader);
|
||||
close(fd);
|
||||
char errMsg[ERR_MSG_SIZE];
|
||||
sprintf(errMsg, "unable to read shared archive file map header from %s", classes_jsa);
|
||||
close(fd);
|
||||
free(pheader);
|
||||
THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
|
||||
}
|
||||
|
||||
// check file magic
|
||||
if (pheader->_magic != 0xf00baba2) {
|
||||
free(pheader);
|
||||
close(fd);
|
||||
char errMsg[ERR_MSG_SIZE];
|
||||
sprintf(errMsg, "%s has bad shared archive magic 0x%x, expecting 0xf00baba2",
|
||||
classes_jsa, pheader->_magic);
|
||||
close(fd);
|
||||
free(pheader);
|
||||
THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
|
||||
}
|
||||
|
||||
// check version
|
||||
if (pheader->_version != CURRENT_ARCHIVE_VERSION) {
|
||||
free(pheader);
|
||||
close(fd);
|
||||
char errMsg[ERR_MSG_SIZE];
|
||||
sprintf(errMsg, "%s has wrong shared archive version %d, expecting %d",
|
||||
classes_jsa, pheader->_version, CURRENT_ARCHIVE_VERSION);
|
||||
close(fd);
|
||||
free(pheader);
|
||||
THROW_NEW_DEBUGGER_EXCEPTION_(errMsg, 1);
|
||||
}
|
||||
|
||||
|
@ -119,8 +119,8 @@ ifeq ($(INCLUDE_NMT), false)
|
||||
CFLAGS += -DINCLUDE_NMT=0
|
||||
|
||||
Src_Files_EXCLUDE += \
|
||||
memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
|
||||
memTracker.cpp nmtDCmd.cpp
|
||||
memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \
|
||||
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
|
||||
endif
|
||||
|
||||
-include $(HS_ALT_MAKE)/excludeSrc.make
|
||||
|
@ -356,14 +356,15 @@ jprt.make.rule.test.targets.standard.internalvmtests = \
|
||||
${jprt.my.windows.x64}-fastdebug-c2-internalvmtests
|
||||
|
||||
jprt.make.rule.test.targets.standard.reg.group = \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GROUP, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-GROUP, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-c1-GROUP
|
||||
${jprt.my.solaris.sparcv9}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.solaris.x64}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.linux.i586}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.linux.x64}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.macosx.x64}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.windows.i586}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.windows.x64}-fastdebug-c2-GROUP, \
|
||||
${jprt.my.linux.i586}-fastdebug-c1-GROUP, \
|
||||
${jprt.my.windows.i586}-fastdebug-c1-GROUP
|
||||
|
||||
jprt.make.rule.test.targets.standard = \
|
||||
${jprt.make.rule.test.targets.standard.client}, \
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -58,6 +58,8 @@ DTRACE_SRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/dtrace
|
||||
DTRACE_COMMON_SRCDIR = $(GAMMADIR)/src/os/posix/dtrace
|
||||
DTRACE = dtrace
|
||||
DTRACE.o = $(DTRACE).o
|
||||
DTRACE_JHELPER = dtrace_jhelper
|
||||
DTRACE_JHELPER.o = $(DTRACE_JHELPER).o
|
||||
|
||||
# to remove '-g' option which causes link problems
|
||||
# also '-z nodefs' is used as workaround
|
||||
@ -255,7 +257,10 @@ ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
|
||||
endif
|
||||
|
||||
$(DTRACE).d: $(DTRACE_COMMON_SRCDIR)/hotspot.d $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d \
|
||||
$(DTRACE_COMMON_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
|
||||
$(DTRACE_COMMON_SRCDIR)/hs_private.d
|
||||
$(QUIETLY) cat $^ > $@
|
||||
|
||||
$(DTRACE_JHELPER).d: $(DTRACE_SRCDIR)/jhelper.d
|
||||
$(QUIETLY) cat $^ > $@
|
||||
|
||||
DTraced_Files = ciEnv.o \
|
||||
@ -280,7 +285,7 @@ DTraced_Files = ciEnv.o \
|
||||
vmGCOperations.o \
|
||||
|
||||
# Dtrace is available, so we build $(DTRACE.o)
|
||||
$(DTRACE.o): $(DTRACE).d $(JVMOFFS).h $(JVMOFFS)Index.h $(DTraced_Files)
|
||||
$(DTRACE.o): $(DTRACE).d $(DTraced_Files)
|
||||
@echo Compiling $(DTRACE).d
|
||||
|
||||
$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \
|
||||
@ -344,6 +349,11 @@ $(DtraceOutDir)/hs_private.h: $(DTRACE_COMMON_SRCDIR)/hs_private.d | $(DtraceOut
|
||||
|
||||
dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h
|
||||
|
||||
# The jhelper.d and hotspot probes are separated into two different SUNW_dof sections.
|
||||
# Now the jhelper.d is built without the -Xlazyload flag.
|
||||
$(DTRACE_JHELPER.o) : $(DTRACE_JHELPER).d $(JVMOFFS).h $(JVMOFFS)Index.h
|
||||
@echo Compiling $(DTRACE_JHELPER).d
|
||||
$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -o $@ -s $(DTRACE_JHELPER).d
|
||||
|
||||
.PHONY: dtraceCheck
|
||||
|
||||
@ -372,7 +382,7 @@ endif # ifneq ("$(patchDtraceFound)", "")
|
||||
ifneq ("${DTRACE_PROG}", "")
|
||||
ifeq ("${HOTSPOT_DISABLE_DTRACE_PROBES}", "")
|
||||
|
||||
DTRACE_OBJS = $(DTRACE.o) $(JVMOFFS.o)
|
||||
DTRACE_OBJS = $(DTRACE.o) $(JVMOFFS.o) $(DTRACE_JHELPER.o)
|
||||
CFLAGS += $(DTRACE_INCL) -DDTRACE_ENABLED
|
||||
MAPFILE_DTRACE_OPT = $(MAPFILE_DTRACE)
|
||||
|
||||
|
@ -298,6 +298,7 @@ class Assembler : public AbstractAssembler {
|
||||
LWZ_OPCODE = (32u << OPCODE_SHIFT),
|
||||
LWZX_OPCODE = (31u << OPCODE_SHIFT | 23u << 1),
|
||||
LWZU_OPCODE = (33u << OPCODE_SHIFT),
|
||||
LWBRX_OPCODE = (31u << OPCODE_SHIFT | 534 << 1),
|
||||
|
||||
LHA_OPCODE = (42u << OPCODE_SHIFT),
|
||||
LHAX_OPCODE = (31u << OPCODE_SHIFT | 343u << 1),
|
||||
@ -306,6 +307,7 @@ class Assembler : public AbstractAssembler {
|
||||
LHZ_OPCODE = (40u << OPCODE_SHIFT),
|
||||
LHZX_OPCODE = (31u << OPCODE_SHIFT | 279u << 1),
|
||||
LHZU_OPCODE = (41u << OPCODE_SHIFT),
|
||||
LHBRX_OPCODE = (31u << OPCODE_SHIFT | 790 << 1),
|
||||
|
||||
LBZ_OPCODE = (34u << OPCODE_SHIFT),
|
||||
LBZX_OPCODE = (31u << OPCODE_SHIFT | 87u << 1),
|
||||
@ -1364,11 +1366,17 @@ class Assembler : public AbstractAssembler {
|
||||
inline void lwax( Register d, Register s1, Register s2);
|
||||
inline void lwa( Register d, int si16, Register s1);
|
||||
|
||||
// 4 bytes reversed
|
||||
inline void lwbrx( Register d, Register s1, Register s2);
|
||||
|
||||
// 2 bytes
|
||||
inline void lhzx( Register d, Register s1, Register s2);
|
||||
inline void lhz( Register d, int si16, Register s1);
|
||||
inline void lhzu( Register d, int si16, Register s1);
|
||||
|
||||
// 2 bytes reversed
|
||||
inline void lhbrx( Register d, Register s1, Register s2);
|
||||
|
||||
// 2 bytes
|
||||
inline void lhax( Register d, Register s1, Register s2);
|
||||
inline void lha( Register d, int si16, Register s1);
|
||||
@ -1858,10 +1866,12 @@ class Assembler : public AbstractAssembler {
|
||||
inline void lwz( Register d, int si16);
|
||||
inline void lwax( Register d, Register s2);
|
||||
inline void lwa( Register d, int si16);
|
||||
inline void lwbrx(Register d, Register s2);
|
||||
inline void lhzx( Register d, Register s2);
|
||||
inline void lhz( Register d, int si16);
|
||||
inline void lhax( Register d, Register s2);
|
||||
inline void lha( Register d, int si16);
|
||||
inline void lhbrx(Register d, Register s2);
|
||||
inline void lbzx( Register d, Register s2);
|
||||
inline void lbz( Register d, int si16);
|
||||
inline void ldx( Register d, Register s2);
|
||||
|
@ -263,10 +263,14 @@ inline void Assembler::lwzu( Register d, int si16, Register s1) { assert(d !=
|
||||
inline void Assembler::lwax( Register d, Register s1, Register s2) { emit_int32(LWAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::lwa( Register d, int si16, Register s1) { emit_int32(LWA_OPCODE | rt(d) | ds(si16) | ra0mem(s1));}
|
||||
|
||||
inline void Assembler::lwbrx( Register d, Register s1, Register s2) { emit_int32(LWBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
|
||||
inline void Assembler::lhzx( Register d, Register s1, Register s2) { emit_int32(LHZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::lhz( Register d, int si16, Register s1) { emit_int32(LHZ_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
|
||||
inline void Assembler::lhzu( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
|
||||
|
||||
inline void Assembler::lhbrx( Register d, Register s1, Register s2) { emit_int32(LHBRX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
|
||||
inline void Assembler::lhax( Register d, Register s1, Register s2) { emit_int32(LHAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::lha( Register d, int si16, Register s1) { emit_int32(LHA_OPCODE | rt(d) | d1(si16) | ra0mem(s1));}
|
||||
inline void Assembler::lhau( Register d, int si16, Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHAU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
|
||||
@ -736,10 +740,12 @@ inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE
|
||||
inline void Assembler::lwz( Register d, int si16 ) { emit_int32( LWZ_OPCODE | rt(d) | d1(si16));}
|
||||
inline void Assembler::lwax( Register d, Register s2) { emit_int32( LWAX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::lwa( Register d, int si16 ) { emit_int32( LWA_OPCODE | rt(d) | ds(si16));}
|
||||
inline void Assembler::lwbrx(Register d, Register s2) { emit_int32( LWBRX_OPCODE| rt(d) | rb(s2));}
|
||||
inline void Assembler::lhzx( Register d, Register s2) { emit_int32( LHZX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::lhz( Register d, int si16 ) { emit_int32( LHZ_OPCODE | rt(d) | d1(si16));}
|
||||
inline void Assembler::lhax( Register d, Register s2) { emit_int32( LHAX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::lha( Register d, int si16 ) { emit_int32( LHA_OPCODE | rt(d) | d1(si16));}
|
||||
inline void Assembler::lhbrx(Register d, Register s2) { emit_int32( LHBRX_OPCODE| rt(d) | rb(s2));}
|
||||
inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::lbz( Register d, int si16 ) { emit_int32( LBZ_OPCODE | rt(d) | d1(si16));}
|
||||
inline void Assembler::ld( Register d, int si16 ) { emit_int32( LD_OPCODE | rt(d) | ds(si16));}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,8 +26,9 @@
|
||||
#ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
|
||||
#define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
|
||||
|
||||
address generate_normal_entry(void);
|
||||
address generate_native_entry(void);
|
||||
address generate_normal_entry(bool synchronized);
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
|
||||
void lock_method(void);
|
||||
void unlock_method(void);
|
||||
|
@ -938,8 +938,9 @@ void CppInterpreterGenerator::generate_counter_incr(Label& overflow) {
|
||||
// Interpreter stub for calling a native method. (C++ interpreter)
|
||||
// This sets up a somewhat different looking stack for calling the native method
|
||||
// than the typical interpreter frame setup.
|
||||
// The synchronized parameter is ignored.
|
||||
//
|
||||
address CppInterpreterGenerator::generate_native_entry(void) {
|
||||
address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
if (native_entry != NULL) return native_entry;
|
||||
address entry = __ pc();
|
||||
|
||||
@ -1729,7 +1730,8 @@ void CppInterpreterGenerator::generate_more_monitors() {
|
||||
__ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base); // Mark lock as unused
|
||||
}
|
||||
|
||||
address CppInterpreterGenerator::generate_normal_entry(void) {
|
||||
// The synchronized parameter is ignored
|
||||
address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
if (interpreter_frame_manager != NULL) return interpreter_frame_manager;
|
||||
|
||||
address entry = __ pc();
|
||||
@ -2789,38 +2791,6 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
|
||||
return interpreter_frame_manager;
|
||||
}
|
||||
|
||||
// Generate code for various sorts of method entries
|
||||
//
|
||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized : break;
|
||||
case Interpreter::native : // Fall thru
|
||||
case Interpreter::native_synchronized : entry_point = ((CppInterpreterGenerator*)this)->generate_native_entry(); break;
|
||||
case Interpreter::empty : break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
||||
// These are special interpreter intrinsics which we don't support so far.
|
||||
case Interpreter::java_lang_math_sin : break;
|
||||
case Interpreter::java_lang_math_cos : break;
|
||||
case Interpreter::java_lang_math_tan : break;
|
||||
case Interpreter::java_lang_math_abs : break;
|
||||
case Interpreter::java_lang_math_log : break;
|
||||
case Interpreter::java_lang_math_log10 : break;
|
||||
case Interpreter::java_lang_math_sqrt : break;
|
||||
case Interpreter::java_lang_math_pow : break;
|
||||
case Interpreter::java_lang_math_exp : break;
|
||||
case Interpreter::java_lang_ref_reference_get: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
|
||||
if (entry_point) {
|
||||
return entry_point;
|
||||
}
|
||||
return ((InterpreterGenerator*)this)->generate_normal_entry();
|
||||
}
|
||||
|
||||
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
||||
: CppInterpreterGenerator(code) {
|
||||
|
@ -119,9 +119,15 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg)
|
||||
// Call the Interpreter::remove_activation_preserving_args_entry()
|
||||
// func to get the address of the same-named entrypoint in the
|
||||
// generated interpreter code.
|
||||
#if defined(ABI_ELFv2)
|
||||
call_c(CAST_FROM_FN_PTR(address,
|
||||
Interpreter::remove_activation_preserving_args_entry),
|
||||
relocInfo::none);
|
||||
#else
|
||||
call_c(CAST_FROM_FN_PTR(FunctionDescriptor*,
|
||||
Interpreter::remove_activation_preserving_args_entry),
|
||||
relocInfo::none);
|
||||
#endif
|
||||
|
||||
// Jump to Interpreter::_remove_activation_preserving_args_entry.
|
||||
mtctr(R3_RET);
|
||||
@ -331,29 +337,40 @@ void InterpreterMacroAssembler::empty_expression_stack() {
|
||||
void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(int bcp_offset,
|
||||
Register Rdst,
|
||||
signedOrNot is_signed) {
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
if (bcp_offset) {
|
||||
load_const_optimized(Rdst, bcp_offset);
|
||||
lhbrx(Rdst, R14_bcp, Rdst);
|
||||
} else {
|
||||
lhbrx(Rdst, R14_bcp);
|
||||
}
|
||||
if (is_signed == Signed) {
|
||||
extsh(Rdst, Rdst);
|
||||
}
|
||||
#else
|
||||
// Read Java big endian format.
|
||||
if (is_signed == Signed) {
|
||||
lha(Rdst, bcp_offset, R14_bcp);
|
||||
} else {
|
||||
lhz(Rdst, bcp_offset, R14_bcp);
|
||||
}
|
||||
#if 0
|
||||
assert(Rtmp != Rdst, "need separate temp register");
|
||||
Register Rfirst = Rtmp;
|
||||
lbz(Rfirst, bcp_offset, R14_bcp); // first byte
|
||||
lbz(Rdst, bcp_offset+1, R14_bcp); // second byte
|
||||
|
||||
// Rdst = ((Rfirst<<8) & 0xFF00) | (Rdst &~ 0xFF00)
|
||||
rldimi(/*RA=*/Rdst, /*RS=*/Rfirst, /*sh=*/8, /*mb=*/48);
|
||||
if (is_signed == Signed) {
|
||||
extsh(Rdst, Rdst);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int bcp_offset,
|
||||
Register Rdst,
|
||||
signedOrNot is_signed) {
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
if (bcp_offset) {
|
||||
load_const_optimized(Rdst, bcp_offset);
|
||||
lwbrx(Rdst, R14_bcp, Rdst);
|
||||
} else {
|
||||
lwbrx(Rdst, R14_bcp);
|
||||
}
|
||||
if (is_signed == Signed) {
|
||||
extsw(Rdst, Rdst);
|
||||
}
|
||||
#else
|
||||
// Read Java big endian format.
|
||||
if (bcp_offset & 3) { // Offset unaligned?
|
||||
load_const_optimized(Rdst, bcp_offset);
|
||||
@ -369,18 +386,26 @@ void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int bcp_offset
|
||||
lwz(Rdst, bcp_offset, R14_bcp);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Load the constant pool cache index from the bytecode stream.
|
||||
//
|
||||
// Kills / writes:
|
||||
// - Rdst, Rscratch
|
||||
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size) {
|
||||
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
|
||||
// Cache index is always in the native format, courtesy of Rewriter.
|
||||
if (index_size == sizeof(u2)) {
|
||||
get_2_byte_integer_at_bcp(bcp_offset, Rdst, Unsigned);
|
||||
lhz(Rdst, bcp_offset, R14_bcp);
|
||||
} else if (index_size == sizeof(u4)) {
|
||||
get_4_byte_integer_at_bcp(bcp_offset, Rdst, Signed);
|
||||
if (bcp_offset & 3) {
|
||||
load_const_optimized(Rdst, bcp_offset);
|
||||
lwax(Rdst, R14_bcp, Rdst);
|
||||
} else {
|
||||
lwa(Rdst, bcp_offset, R14_bcp);
|
||||
}
|
||||
assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
|
||||
nand(Rdst, Rdst, Rdst); // convert to plain index
|
||||
} else if (index_size == sizeof(u1)) {
|
||||
@ -397,6 +422,29 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, int b
|
||||
add(cache, R27_constPoolCache, cache);
|
||||
}
|
||||
|
||||
// Load 4-byte signed or unsigned integer in Java format (that is, big-endian format)
|
||||
// from (Rsrc)+offset.
|
||||
void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset,
|
||||
signedOrNot is_signed) {
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
if (offset) {
|
||||
load_const_optimized(Rdst, offset);
|
||||
lwbrx(Rdst, Rdst, Rsrc);
|
||||
} else {
|
||||
lwbrx(Rdst, Rsrc);
|
||||
}
|
||||
if (is_signed == Signed) {
|
||||
extsw(Rdst, Rdst);
|
||||
}
|
||||
#else
|
||||
if (is_signed == Signed) {
|
||||
lwa(Rdst, offset, Rsrc);
|
||||
} else {
|
||||
lwz(Rdst, offset, Rsrc);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Load object from cpool->resolved_references(index).
|
||||
void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index) {
|
||||
assert_different_registers(result, index);
|
||||
|
@ -130,6 +130,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2));
|
||||
|
||||
void get_u4(Register Rdst, Register Rsrc, int offset, signedOrNot is_signed);
|
||||
|
||||
// common code
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2013 SAP AG. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -31,7 +31,12 @@
|
||||
private:
|
||||
|
||||
address generate_abstract_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
address generate_jump_to_normal_entry(void);
|
||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_Reference_get_entry(void);
|
||||
|
||||
// Not supported
|
||||
address generate_CRC32_update_entry() { return NULL; }
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
|
||||
|
@ -428,6 +428,19 @@ address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||
// vanilla (slow path) entry.
|
||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||
address entry = __ pc();
|
||||
address normal_entry = Interpreter::entry_for_kind(Interpreter::zerolocals);
|
||||
assert(normal_entry != NULL, "should already be generated.");
|
||||
__ branch_to_entry(normal_entry, R11_scratch1);
|
||||
__ flush();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Abstract method entry.
|
||||
//
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
@ -485,203 +498,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||
// vanilla (slow path) entry.
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Label Lslow_path, Lacquire;
|
||||
|
||||
const Register
|
||||
Rclass_or_obj = R3_ARG1,
|
||||
Rconst_method = R4_ARG2,
|
||||
Rcodes = Rconst_method,
|
||||
Rcpool_cache = R5_ARG3,
|
||||
Rscratch = R11_scratch1,
|
||||
Rjvmti_mode = Rscratch,
|
||||
Roffset = R12_scratch2,
|
||||
Rflags = R6_ARG4,
|
||||
Rbtable = R7_ARG5;
|
||||
|
||||
static address branch_table[number_of_states];
|
||||
|
||||
address entry = __ pc();
|
||||
|
||||
// Check for safepoint:
|
||||
// Ditch this, real man don't need safepoint checks.
|
||||
|
||||
// Also check for JVMTI mode
|
||||
// Check for null obj, take slow path if so.
|
||||
__ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
|
||||
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
|
||||
__ cmpdi(CCR1, Rclass_or_obj, 0);
|
||||
__ cmpwi(CCR0, Rjvmti_mode, 0);
|
||||
__ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
|
||||
__ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0
|
||||
|
||||
// Do 2 things in parallel:
|
||||
// 1. Load the index out of the first instruction word, which looks like this:
|
||||
// <0x2a><0xb4><index (2 byte, native endianess)>.
|
||||
// 2. Load constant pool cache base.
|
||||
__ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
|
||||
__ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
|
||||
|
||||
__ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
|
||||
__ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);
|
||||
|
||||
// Get the const pool entry by means of <index>.
|
||||
const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
|
||||
__ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
|
||||
__ add(Rcpool_cache, Rscratch, Rcpool_cache);
|
||||
|
||||
// Check if cpool cache entry is resolved.
|
||||
// We are resolved if the indices offset contains the current bytecode.
|
||||
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
|
||||
// Big Endian:
|
||||
__ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
|
||||
__ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
|
||||
__ bne(CCR0, Lslow_path);
|
||||
__ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.
|
||||
|
||||
// Finally, start loading the value: Get cp cache entry into regs.
|
||||
__ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
|
||||
__ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);
|
||||
|
||||
// Following code is from templateTable::getfield_or_static
|
||||
// Load pointer to branch table
|
||||
__ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
|
||||
|
||||
// Get volatile flag
|
||||
__ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit
|
||||
// note: sync is needed before volatile load on PPC64
|
||||
|
||||
// Check field type
|
||||
__ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
|
||||
|
||||
#ifdef ASSERT
|
||||
Label LFlagInvalid;
|
||||
__ cmpldi(CCR0, Rflags, number_of_states);
|
||||
__ bge(CCR0, LFlagInvalid);
|
||||
|
||||
__ ld(R9_ARG7, 0, R1_SP);
|
||||
__ ld(R10_ARG8, 0, R21_sender_SP);
|
||||
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
|
||||
__ asm_assert_eq("backlink", 0x543);
|
||||
#endif // ASSERT
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
|
||||
// Load from branch table and dispatch (volatile case: one instruction ahead)
|
||||
__ sldi(Rflags, Rflags, LogBytesPerWord);
|
||||
__ cmpwi(CCR6, Rscratch, 1); // volatile?
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
|
||||
}
|
||||
__ ldx(Rbtable, Rbtable, Rflags);
|
||||
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
|
||||
}
|
||||
__ mtctr(Rbtable);
|
||||
__ bctr();
|
||||
|
||||
#ifdef ASSERT
|
||||
__ bind(LFlagInvalid);
|
||||
__ stop("got invalid flag", 0x6541);
|
||||
|
||||
bool all_uninitialized = true,
|
||||
all_initialized = true;
|
||||
for (int i = 0; i<number_of_states; ++i) {
|
||||
all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
|
||||
all_initialized = all_initialized && (branch_table[i] != NULL);
|
||||
}
|
||||
assert(all_uninitialized != all_initialized, "consistency"); // either or
|
||||
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
|
||||
if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
|
||||
if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
|
||||
__ stop("unexpected type", 0x6551);
|
||||
#endif
|
||||
|
||||
if (branch_table[itos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[itos] = __ pc(); // non-volatile_entry point
|
||||
__ lwax(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
if (branch_table[ltos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[ltos] = __ pc(); // non-volatile_entry point
|
||||
__ ldx(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
if (branch_table[btos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[btos] = __ pc(); // non-volatile_entry point
|
||||
__ lbzx(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ extsb(R3_RET, R3_RET);
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
if (branch_table[ctos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[ctos] = __ pc(); // non-volatile_entry point
|
||||
__ lhzx(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
if (branch_table[stos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[stos] = __ pc(); // non-volatile_entry point
|
||||
__ lhax(R3_RET, Rclass_or_obj, Roffset);
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
if (branch_table[atos] == 0) { // generate only once
|
||||
__ align(32, 28, 28); // align load
|
||||
__ fence(); // volatile entry point (one instruction before non-volatile_entry point)
|
||||
branch_table[atos] = __ pc(); // non-volatile_entry point
|
||||
__ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
|
||||
__ verify_oop(R3_RET);
|
||||
//__ dcbt(R3_RET); // prefetch
|
||||
__ beq(CCR6, Lacquire);
|
||||
__ blr();
|
||||
}
|
||||
|
||||
__ align(32, 12);
|
||||
__ bind(Lacquire);
|
||||
__ twi_0(R3_RET);
|
||||
__ isync(); // acquire
|
||||
__ blr();
|
||||
|
||||
#ifdef ASSERT
|
||||
for (int i = 0; i<number_of_states; ++i) {
|
||||
assert(branch_table[i], "accessor_entry initialization");
|
||||
//tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
|
||||
}
|
||||
#endif
|
||||
|
||||
__ bind(Lslow_path);
|
||||
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
|
||||
__ flush();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Interpreter intrinsic for WeakReference.get().
|
||||
// 1. Don't push a full blown frame and go on dispatching, but fetch the value
|
||||
// into R8 and return quickly
|
||||
@ -713,7 +529,6 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
||||
// regular method entry code to generate the NPE.
|
||||
//
|
||||
// This code is based on generate_accessor_enty.
|
||||
|
||||
address entry = __ pc();
|
||||
|
||||
@ -768,7 +583,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
return entry;
|
||||
} else {
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1283,8 +1283,6 @@ int Compile::ConstantTable::calculate_table_base_offset() const {
|
||||
|
||||
bool MachConstantBaseNode::requires_postalloc_expand() const { return true; }
|
||||
void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
|
||||
Compile *C = ra_->C;
|
||||
|
||||
iRegPdstOper *op_dst = new iRegPdstOper();
|
||||
MachNode *m1 = new loadToc_hiNode();
|
||||
MachNode *m2 = new loadToc_loNode();
|
||||
@ -2229,7 +2227,7 @@ const bool Matcher::isSimpleConstant64(jlong value) {
|
||||
}
|
||||
/* TODO: PPC port
|
||||
// Make a new machine dependent decode node (with its operands).
|
||||
MachTypeNode *Matcher::make_decode_node(Compile *C) {
|
||||
MachTypeNode *Matcher::make_decode_node() {
|
||||
assert(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0,
|
||||
"This method is only implemented for unscaled cOops mode so far");
|
||||
MachTypeNode *decode = new decodeN_unscaledNode();
|
||||
@ -2593,7 +2591,7 @@ typedef struct {
|
||||
MachNode *_last;
|
||||
} loadConLNodesTuple;
|
||||
|
||||
loadConLNodesTuple loadConLNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
|
||||
loadConLNodesTuple loadConLNodesTuple_create(PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
|
||||
OptoReg::Name reg_second, OptoReg::Name reg_first) {
|
||||
loadConLNodesTuple nodes;
|
||||
|
||||
@ -2669,7 +2667,7 @@ encode %{
|
||||
enc_class postalloc_expand_load_long_constant(iRegLdst dst, immL src, iRegLdst toc) %{
|
||||
// Create new nodes.
|
||||
loadConLNodesTuple loadConLNodes =
|
||||
loadConLNodesTuple_create(C, ra_, n_toc, op_src,
|
||||
loadConLNodesTuple_create(ra_, n_toc, op_src,
|
||||
ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
|
||||
// Push new nodes.
|
||||
@ -3391,7 +3389,7 @@ encode %{
|
||||
immLOper *op_repl = new immLOper((jlong)replicate_immF(op_src->constantF()));
|
||||
|
||||
loadConLNodesTuple loadConLNodes =
|
||||
loadConLNodesTuple_create(C, ra_, n_toc, op_repl,
|
||||
loadConLNodesTuple_create(ra_, n_toc, op_repl,
|
||||
ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
|
||||
// Push new nodes.
|
||||
@ -3611,7 +3609,7 @@ encode %{
|
||||
|
||||
// Create the nodes for loading the IC from the TOC.
|
||||
loadConLNodesTuple loadConLNodes_IC =
|
||||
loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()),
|
||||
loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()),
|
||||
OptoReg::Name(R19_H_num), OptoReg::Name(R19_num));
|
||||
|
||||
// Create the call node.
|
||||
@ -3765,7 +3763,7 @@ encode %{
|
||||
#if defined(ABI_ELFv2)
|
||||
jlong entry_address = (jlong) this->entry_point();
|
||||
assert(entry_address, "need address here");
|
||||
loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper(entry_address),
|
||||
loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
|
||||
OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
|
||||
#else
|
||||
// Get the struct that describes the function we are about to call.
|
||||
@ -3777,13 +3775,13 @@ encode %{
|
||||
loadConLNodesTuple loadConLNodes_Toc;
|
||||
|
||||
// Create nodes and operands for loading the entry point.
|
||||
loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper(entry_address),
|
||||
loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
|
||||
OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
|
||||
|
||||
|
||||
// Create nodes and operands for loading the env pointer.
|
||||
if (fd->env() != NULL) {
|
||||
loadConLNodes_Env = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong) fd->env()),
|
||||
loadConLNodes_Env = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->env()),
|
||||
OptoReg::Name(R11_H_num), OptoReg::Name(R11_num));
|
||||
} else {
|
||||
loadConLNodes_Env._large_hi = NULL;
|
||||
@ -3796,7 +3794,7 @@ encode %{
|
||||
}
|
||||
|
||||
// Create nodes and operands for loading the Toc point.
|
||||
loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong) fd->toc()),
|
||||
loadConLNodes_Toc = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->toc()),
|
||||
OptoReg::Name(R2_H_num), OptoReg::Name(R2_num));
|
||||
#endif // ABI_ELFv2
|
||||
// mtctr node
|
||||
|
@ -30,7 +30,6 @@
|
||||
address generate_normal_entry(bool synchronized);
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
|
||||
void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
|
||||
void unlock_method(bool check_exceptions = true);
|
||||
|
@ -176,8 +176,12 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
const Register size = R12_scratch2;
|
||||
__ get_cache_and_index_at_bcp(cache, 1, index_size);
|
||||
|
||||
// Big Endian (get least significant byte of 64 bit value):
|
||||
// Get least significant byte of 64 bit value:
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
__ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()), cache);
|
||||
#else
|
||||
__ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
|
||||
#endif
|
||||
__ sldi(size, size, Interpreter::logStackElementSize);
|
||||
__ add(R15_esp, R15_esp, size);
|
||||
__ dispatch_next(state, step);
|
||||
@ -598,48 +602,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
|
||||
|
||||
// End of helpers
|
||||
|
||||
// ============================================================================
|
||||
// Various method entries
|
||||
//
|
||||
|
||||
// Empty method, generate a very fast return. We must skip this entry if
|
||||
// someone's debugging, indicated by the flag
|
||||
// "interp_mode" in the Thread obj.
|
||||
// Note: empty methods are generated mostly methods that do assertions, which are
|
||||
// disabled in the "java opt build".
|
||||
address TemplateInterpreterGenerator::generate_empty_entry(void) {
|
||||
if (!UseFastEmptyMethods) {
|
||||
NOT_PRODUCT(__ should_not_reach_here();)
|
||||
return Interpreter::entry_for_kind(Interpreter::zerolocals);
|
||||
}
|
||||
|
||||
Label Lslow_path;
|
||||
const Register Rjvmti_mode = R11_scratch1;
|
||||
address entry = __ pc();
|
||||
|
||||
__ lwz(Rjvmti_mode, thread_(interp_only_mode));
|
||||
__ cmpwi(CCR0, Rjvmti_mode, 0);
|
||||
__ bne(CCR0, Lslow_path); // jvmti_mode!=0
|
||||
|
||||
// Noone's debuggin: Simply return.
|
||||
// Pop c2i arguments (if any) off when we return.
|
||||
#ifdef ASSERT
|
||||
__ ld(R9_ARG7, 0, R1_SP);
|
||||
__ ld(R10_ARG8, 0, R21_sender_SP);
|
||||
__ cmpd(CCR0, R9_ARG7, R10_ARG8);
|
||||
__ asm_assert_eq("backlink", 0x545);
|
||||
#endif // ASSERT
|
||||
__ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
|
||||
|
||||
// And we're done.
|
||||
__ blr();
|
||||
|
||||
__ bind(Lslow_path);
|
||||
__ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
|
||||
__ flush();
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Support abs and sqrt like in compiler.
|
||||
// For others we can use a normal (native) entry.
|
||||
@ -858,7 +820,9 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Our signature handlers copy required arguments to the C stack
|
||||
// (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13.
|
||||
__ mr(R3_ARG1, R18_locals);
|
||||
#if !defined(ABI_ELFv2)
|
||||
__ ld(signature_handler_fd, 0, signature_handler_fd);
|
||||
#endif
|
||||
|
||||
__ call_stub(signature_handler_fd);
|
||||
|
||||
@ -1020,8 +984,13 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// native result across the call. No oop is present.
|
||||
|
||||
__ mr(R3_ARG1, R16_thread);
|
||||
#if defined(ABI_ELFv2)
|
||||
__ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
|
||||
relocInfo::none);
|
||||
#else
|
||||
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
|
||||
relocInfo::none);
|
||||
#endif
|
||||
|
||||
__ bind(sync_check_done);
|
||||
|
||||
@ -1278,45 +1247,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return entry;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Entry points
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(
|
||||
AbstractInterpreter::MethodKind kind) {
|
||||
// Determine code generation flags.
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
case Interpreter::java_lang_math_tan : // fall thru
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
|
||||
if (entry_point) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
return ((InterpreterGenerator*) this)->generate_normal_entry(synchronized);
|
||||
}
|
||||
|
||||
// These should never be compiled since the interpreter will prefer
|
||||
// the compiled version to the intrinsic version.
|
||||
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||
@ -1344,7 +1274,7 @@ int AbstractInterpreter::size_activation(int max_stack,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in AbstractInterpreterGenerator::generate_method_entry.
|
||||
// in InterpreterGenerator::generate_fixed_frame.
|
||||
assert(Interpreter::stackElementWords == 1, "sanity");
|
||||
const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
|
||||
const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
|
||||
|
@ -189,8 +189,12 @@ void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Reg
|
||||
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
|
||||
assert(load_bc_into_bc_reg, "we use bc_reg as temp");
|
||||
__ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
|
||||
// Big Endian: ((*(cache+indices))>>((1+byte_no)*8))&0xFF
|
||||
// ((*(cache+indices))>>((1+byte_no)*8))&0xFF:
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
__ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp);
|
||||
#else
|
||||
__ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
|
||||
#endif
|
||||
__ cmpwi(CCR0, Rnew_bc, 0);
|
||||
__ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
|
||||
__ beq(CCR0, L_patch_done);
|
||||
@ -1839,8 +1843,8 @@ void TemplateTable::tableswitch() {
|
||||
__ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
|
||||
|
||||
// Load lo & hi.
|
||||
__ lwz(Rlow_byte, BytesPerInt, Rdef_offset_addr);
|
||||
__ lwz(Rhigh_byte, BytesPerInt * 2, Rdef_offset_addr);
|
||||
__ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
|
||||
__ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
|
||||
|
||||
// Check for default case (=index outside [low,high]).
|
||||
__ cmpw(CCR0, R17_tos, Rlow_byte);
|
||||
@ -1854,12 +1858,17 @@ void TemplateTable::tableswitch() {
|
||||
__ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
|
||||
__ sldi(Rindex, Rindex, LogBytesPerInt);
|
||||
__ addi(Rindex, Rindex, 3 * BytesPerInt);
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
__ lwbrx(Roffset, Rdef_offset_addr, Rindex);
|
||||
__ extsw(Roffset, Roffset);
|
||||
#else
|
||||
__ lwax(Roffset, Rdef_offset_addr, Rindex);
|
||||
#endif
|
||||
__ b(Ldispatch);
|
||||
|
||||
__ bind(Ldefault_case);
|
||||
__ profile_switch_default(Rhigh_byte, Rscratch1);
|
||||
__ lwa(Roffset, 0, Rdef_offset_addr);
|
||||
__ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
|
||||
|
||||
__ bind(Ldispatch);
|
||||
|
||||
@ -1875,12 +1884,11 @@ void TemplateTable::lookupswitch() {
|
||||
// Table switch using linear search through cases.
|
||||
// Bytecode stream format:
|
||||
// Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
|
||||
// Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
|
||||
// Note: Everything is big-endian format here.
|
||||
void TemplateTable::fast_linearswitch() {
|
||||
transition(itos, vtos);
|
||||
|
||||
Label Lloop_entry, Lsearch_loop, Lfound, Lcontinue_execution, Ldefault_case;
|
||||
|
||||
Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case;
|
||||
Register Rcount = R3_ARG1,
|
||||
Rcurrent_pair = R4_ARG2,
|
||||
Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
|
||||
@ -1894,47 +1902,40 @@ void TemplateTable::fast_linearswitch() {
|
||||
__ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
|
||||
|
||||
// Setup loop counter and limit.
|
||||
__ lwz(Rcount, BytesPerInt, Rdef_offset_addr); // Load count.
|
||||
__ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
|
||||
__ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
|
||||
|
||||
// Set up search loop.
|
||||
__ cmpwi(CCR0, Rcount, 0);
|
||||
__ beq(CCR0, Ldefault_case);
|
||||
|
||||
__ mtctr(Rcount);
|
||||
__ cmpwi(CCR0, Rcount, 0);
|
||||
__ bne(CCR0, Lloop_entry);
|
||||
|
||||
// linear table search
|
||||
__ bind(Lsearch_loop);
|
||||
|
||||
__ lwz(Rvalue, 0, Rcurrent_pair);
|
||||
__ lwa(Roffset, 1 * BytesPerInt, Rcurrent_pair);
|
||||
|
||||
__ cmpw(CCR0, Rvalue, Rcmp_value);
|
||||
__ beq(CCR0, Lfound);
|
||||
|
||||
__ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
|
||||
__ bdnz(Lsearch_loop);
|
||||
|
||||
// default case
|
||||
// Default case
|
||||
__ bind(Ldefault_case);
|
||||
|
||||
__ lwa(Roffset, 0, Rdef_offset_addr);
|
||||
__ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
|
||||
if (ProfileInterpreter) {
|
||||
__ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
|
||||
__ b(Lcontinue_execution);
|
||||
}
|
||||
__ b(Lcontinue_execution);
|
||||
|
||||
// Next iteration
|
||||
__ bind(Lsearch_loop);
|
||||
__ bdz(Ldefault_case);
|
||||
__ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
|
||||
__ bind(Lloop_entry);
|
||||
__ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
|
||||
__ cmpw(CCR0, Rvalue, Rcmp_value);
|
||||
__ bne(CCR0, Lsearch_loop);
|
||||
|
||||
// Found, load offset.
|
||||
__ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
|
||||
// Calculate case index and profile
|
||||
__ mfctr(Rcurrent_pair);
|
||||
if (ProfileInterpreter) {
|
||||
__ sub(Rcurrent_pair, Rcount, Rcurrent_pair);
|
||||
__ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
|
||||
}
|
||||
|
||||
// Entry found, skip Roffset bytecodes and continue.
|
||||
__ bind(Lfound);
|
||||
if (ProfileInterpreter) {
|
||||
// Calc the num of the pair we hit. Careful, Rcurrent_pair points 2 ints
|
||||
// beyond the actual current pair due to the auto update load above!
|
||||
__ sub(Rcurrent_pair, Rcurrent_pair, Rdef_offset_addr);
|
||||
__ addi(Rcurrent_pair, Rcurrent_pair, - 2 * BytesPerInt);
|
||||
__ srdi(Rcurrent_pair, Rcurrent_pair, LogBytesPerInt + 1);
|
||||
__ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
|
||||
__ bind(Lcontinue_execution);
|
||||
}
|
||||
__ bind(Lcontinue_execution);
|
||||
__ add(R14_bcp, Roffset, R14_bcp);
|
||||
__ dispatch_next(vtos);
|
||||
}
|
||||
@ -1990,7 +1991,7 @@ void TemplateTable::fast_binaryswitch() {
|
||||
|
||||
// initialize i & j
|
||||
__ li(Ri,0);
|
||||
__ lwz(Rj, -BytesPerInt, Rarray);
|
||||
__ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned);
|
||||
|
||||
// and start.
|
||||
Label entry;
|
||||
@ -2007,7 +2008,11 @@ void TemplateTable::fast_binaryswitch() {
|
||||
// i = h;
|
||||
// }
|
||||
__ sldi(Rscratch, Rh, log_entry_size);
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
__ lwbrx(Rscratch, Rscratch, Rarray);
|
||||
#else
|
||||
__ lwzx(Rscratch, Rscratch, Rarray);
|
||||
#endif
|
||||
|
||||
// if (key < current value)
|
||||
// Rh = Rj
|
||||
@ -2039,20 +2044,20 @@ void TemplateTable::fast_binaryswitch() {
|
||||
// Ri = value offset
|
||||
__ sldi(Ri, Ri, log_entry_size);
|
||||
__ add(Ri, Ri, Rarray);
|
||||
__ lwz(Rscratch, 0, Ri);
|
||||
__ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned);
|
||||
|
||||
Label not_found;
|
||||
// Ri = offset offset
|
||||
__ cmpw(CCR0, Rkey, Rscratch);
|
||||
__ beq(CCR0, not_found);
|
||||
// entry not found -> j = default offset
|
||||
__ lwz(Rj, -2 * BytesPerInt, Rarray);
|
||||
__ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
|
||||
__ b(default_case);
|
||||
|
||||
__ bind(not_found);
|
||||
// entry found -> j = offset
|
||||
__ profile_switch_case(Rh, Rj, Rscratch, Rkey);
|
||||
__ lwz(Rj, BytesPerInt, Ri);
|
||||
__ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned);
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
__ b(continue_execution);
|
||||
@ -2147,8 +2152,11 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
|
||||
|
||||
assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
|
||||
// We are resolved if the indices offset contains the current bytecode.
|
||||
// Big Endian:
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
__ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
|
||||
#else
|
||||
__ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
|
||||
#endif
|
||||
// Acquire by cmp-br-isync (see below).
|
||||
__ cmpdi(CCR0, Rscratch, (int)bytecode());
|
||||
__ beq(CCR0, Lresolved);
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "vm_version_ppc.hpp"
|
||||
@ -108,7 +109,7 @@ void VM_Version::initialize() {
|
||||
(has_vand() ? " vand" : "")
|
||||
// Make sure number of %s matches num_features!
|
||||
);
|
||||
_features_str = strdup(buf);
|
||||
_features_str = os::strdup(buf);
|
||||
NOT_PRODUCT(if (Verbose) print_features(););
|
||||
|
||||
// PPC64 supports 8-byte compare-exchange operations (see
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,7 @@
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterGenerator.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/method.hpp"
|
||||
@ -68,9 +69,7 @@ bool CppInterpreter::contains(address pc) {
|
||||
#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
|
||||
#define __ _masm->
|
||||
|
||||
Label frame_manager_entry;
|
||||
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
|
||||
// c++ interpreter entry point this holds that entry point label.
|
||||
Label frame_manager_entry; // c++ interpreter entry point this holds that entry point label.
|
||||
|
||||
static address unctrap_frame_manager_entry = NULL;
|
||||
|
||||
@ -452,110 +451,6 @@ address InterpreterGenerator::generate_empty_entry(void) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||
// vanilla (slow path) entry
|
||||
|
||||
// Generates code to elide accessor methods
|
||||
// Uses G3_scratch and G1_scratch as scratch
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
|
||||
// parameter size = 1
|
||||
// Note: We can only use this code if the getfield has been resolved
|
||||
// and if we don't have a null-pointer exception => check for
|
||||
// these conditions first and use slow path if necessary.
|
||||
address entry = __ pc();
|
||||
Label slow_path;
|
||||
|
||||
if ( UseFastAccessorMethods) {
|
||||
// Check if we need to reach a safepoint and generate full interpreter
|
||||
// frame if so.
|
||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
||||
__ load_contents(sync_state, G3_scratch);
|
||||
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
|
||||
__ delayed()->nop();
|
||||
|
||||
// Check if local 0 != NULL
|
||||
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
|
||||
__ tst(Otos_i); // check if local 0 == NULL and go the slow path
|
||||
__ brx(Assembler::zero, false, Assembler::pn, slow_path);
|
||||
__ delayed()->nop();
|
||||
|
||||
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
// get first 4 bytes of the bytecodes (big endian!)
|
||||
__ ld_ptr(Address(G5_method, in_bytes(Method::const_offset())), G1_scratch);
|
||||
__ ld(Address(G1_scratch, in_bytes(ConstMethod::codes_offset())), G1_scratch);
|
||||
|
||||
// move index @ 2 far left then to the right most two bytes.
|
||||
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
|
||||
__ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
|
||||
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
|
||||
|
||||
// get constant pool cache
|
||||
__ ld_ptr(G5_method, in_bytes(Method::const_offset()), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, in_bytes(ConstMethod::constants_offset()), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
|
||||
|
||||
// get specific constant pool cache entry
|
||||
__ add(G3_scratch, G1_scratch, G3_scratch);
|
||||
|
||||
// Check the constant Pool cache entry to see if it has been resolved.
|
||||
// If not, need the slow path.
|
||||
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
|
||||
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
|
||||
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
|
||||
__ and3(G1_scratch, 0xFF, G1_scratch);
|
||||
__ cmp(G1_scratch, Bytecodes::_getfield);
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
|
||||
__ delayed()->nop();
|
||||
|
||||
// Get the type and return field offset from the constant pool cache
|
||||
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch);
|
||||
__ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch);
|
||||
|
||||
Label xreturn_path;
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Get the type from the constant pool cache
|
||||
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
|
||||
// Make sure we don't need to mask G1_scratch after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ cmp(G1_scratch, atos );
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, itos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ld(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, stos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, ctos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
|
||||
#ifdef ASSERT
|
||||
__ cmp(G1_scratch, btos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
|
||||
__ should_not_reach_here();
|
||||
#endif
|
||||
__ ldsb(Otos_i, G3_scratch, Otos_i);
|
||||
__ bind(xreturn_path);
|
||||
|
||||
// _ireturn/_areturn
|
||||
__ retl(); // return from leaf routine
|
||||
__ delayed()->mov(O5_savedSP, SP);
|
||||
|
||||
// Generate regular method entry
|
||||
__ bind(slow_path);
|
||||
__ ba(fast_accessor_slow_entry_path);
|
||||
__ delayed()->nop();
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseG1GC) {
|
||||
@ -573,7 +468,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
|
||||
//
|
||||
@ -1870,23 +1765,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ ba(call_interpreter_2);
|
||||
__ delayed()->st_ptr(O1, STATE(_stack));
|
||||
|
||||
|
||||
// Fast accessor methods share this entry point.
|
||||
// This works because frame manager is in the same codelet
|
||||
// This can either be an entry via call_stub/c1/c2 or a recursive interpreter call
|
||||
// we need to do a little register fixup here once we distinguish the two of them
|
||||
if (UseFastAccessorMethods && !synchronized) {
|
||||
// Call stub_return address still in O7
|
||||
__ bind(fast_accessor_slow_entry_path);
|
||||
__ set((intptr_t)return_from_native_method - 8, Gtmp1);
|
||||
__ cmp(Gtmp1, O7); // returning to interpreter?
|
||||
__ brx(Assembler::equal, true, Assembler::pt, re_dispatch); // yep
|
||||
__ delayed()->nop();
|
||||
__ ba(re_dispatch);
|
||||
__ delayed()->mov(G0, prevState); // initial entry
|
||||
|
||||
}
|
||||
|
||||
// interpreter returning to native code (call_stub/c1/c2)
|
||||
// convert result and unwind initial activation
|
||||
// L2_scratch - scaled result type index
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,9 +32,11 @@
|
||||
address generate_normal_entry(bool synchronized);
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_abstract_entry(void);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
// there are no math intrinsics on sparc
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
address generate_jump_to_normal_entry(void);
|
||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_Reference_get_entry(void);
|
||||
void lock_method(void);
|
||||
void save_native_result(void);
|
||||
@ -43,4 +45,7 @@
|
||||
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
||||
void generate_counter_overflow(Label& Lcontinue);
|
||||
|
||||
// Not supported
|
||||
address generate_CRC32_update_entry() { return NULL; }
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
|
||||
|
@ -241,6 +241,15 @@ void InterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
|
||||
|
||||
// Various method entries
|
||||
|
||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||
address entry = __ pc();
|
||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
|
||||
AddressLiteral al(Interpreter::entry_for_kind(Interpreter::zerolocals));
|
||||
__ jump_to(al, G3_scratch);
|
||||
__ delayed()->nop();
|
||||
return entry;
|
||||
}
|
||||
|
||||
// Abstract method entry
|
||||
// Attempt to execute abstract method. Throw exception
|
||||
//
|
||||
@ -255,159 +264,6 @@ address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Entry points & stack frame layout
|
||||
//
|
||||
// Here we generate the various kind of entries into the interpreter.
|
||||
// The two main entry type are generic bytecode methods and native call method.
|
||||
// These both come in synchronized and non-synchronized versions but the
|
||||
// frame layout they create is very similar. The other method entry
|
||||
// types are really just special purpose entries that are really entry
|
||||
// and interpretation all in one. These are for trivial methods like
|
||||
// accessor, empty, or special math methods.
|
||||
//
|
||||
// When control flow reaches any of the entry types for the interpreter
|
||||
// the following holds ->
|
||||
//
|
||||
// C2 Calling Conventions:
|
||||
//
|
||||
// The entry code below assumes that the following registers are set
|
||||
// when coming in:
|
||||
// G5_method: holds the Method* of the method to call
|
||||
// Lesp: points to the TOS of the callers expression stack
|
||||
// after having pushed all the parameters
|
||||
//
|
||||
// The entry code does the following to setup an interpreter frame
|
||||
// pop parameters from the callers stack by adjusting Lesp
|
||||
// set O0 to Lesp
|
||||
// compute X = (max_locals - num_parameters)
|
||||
// bump SP up by X to accomadate the extra locals
|
||||
// compute X = max_expression_stack
|
||||
// + vm_local_words
|
||||
// + 16 words of register save area
|
||||
// save frame doing a save sp, -X, sp growing towards lower addresses
|
||||
// set Lbcp, Lmethod, LcpoolCache
|
||||
// set Llocals to i0
|
||||
// set Lmonitors to FP - rounded_vm_local_words
|
||||
// set Lesp to Lmonitors - 4
|
||||
//
|
||||
// The frame has now been setup to do the rest of the entry code
|
||||
|
||||
// Try this optimization: Most method entries could live in a
|
||||
// "one size fits all" stack frame without all the dynamic size
|
||||
// calculations. It might be profitable to do all this calculation
|
||||
// statically and approximately for "small enough" methods.
|
||||
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
|
||||
// C1 Calling conventions
|
||||
//
|
||||
// Upon method entry, the following registers are setup:
|
||||
//
|
||||
// g2 G2_thread: current thread
|
||||
// g5 G5_method: method to activate
|
||||
// g4 Gargs : pointer to last argument
|
||||
//
|
||||
//
|
||||
// Stack:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
// : arguments :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
//
|
||||
//
|
||||
//
|
||||
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : :
|
||||
// | | <--- Lesp
|
||||
// +---------------+ <--- Lmonitors (fp - 0x18)
|
||||
// | VM locals |
|
||||
// +---------------+ <--- fp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : nonarg locals :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : arguments :
|
||||
// | | <--- Llocals
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
||||
// determine code generation flags
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : break;
|
||||
case Interpreter::java_lang_math_cos : break;
|
||||
case Interpreter::java_lang_math_tan : break;
|
||||
case Interpreter::java_lang_math_sqrt : break;
|
||||
case Interpreter::java_lang_math_abs : break;
|
||||
case Interpreter::java_lang_math_log : break;
|
||||
case Interpreter::java_lang_math_log10 : break;
|
||||
case Interpreter::java_lang_math_pow : break;
|
||||
case Interpreter::java_lang_math_exp : break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default:
|
||||
fatal(err_msg("unexpected method kind: %d", kind));
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_point) return entry_point;
|
||||
|
||||
return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
|
||||
}
|
||||
|
||||
|
||||
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
|
||||
// No special entry points that preclude compilation
|
||||
return true;
|
||||
|
@ -6184,7 +6184,11 @@ instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
|
||||
ins_cost(DEFAULT_COST * 3/2);
|
||||
format %{ "SET $con,$dst\t! non-oop ptr" %}
|
||||
ins_encode %{
|
||||
__ set($con$$constant, $dst$$Register);
|
||||
if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) {
|
||||
__ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register);
|
||||
} else {
|
||||
__ set($con$$constant, $dst$$Register);
|
||||
}
|
||||
%}
|
||||
ins_pipe(loadConP);
|
||||
%}
|
||||
|
@ -456,6 +456,115 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe
|
||||
// Generate a fixed interpreter frame. This is identical setup for interpreted
|
||||
// methods and for native methods hence the shared code.
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Stack frame layout
|
||||
//
|
||||
// When control flow reaches any of the entry types for the interpreter
|
||||
// the following holds ->
|
||||
//
|
||||
// C2 Calling Conventions:
|
||||
//
|
||||
// The entry code below assumes that the following registers are set
|
||||
// when coming in:
|
||||
// G5_method: holds the Method* of the method to call
|
||||
// Lesp: points to the TOS of the callers expression stack
|
||||
// after having pushed all the parameters
|
||||
//
|
||||
// The entry code does the following to setup an interpreter frame
|
||||
// pop parameters from the callers stack by adjusting Lesp
|
||||
// set O0 to Lesp
|
||||
// compute X = (max_locals - num_parameters)
|
||||
// bump SP up by X to accomadate the extra locals
|
||||
// compute X = max_expression_stack
|
||||
// + vm_local_words
|
||||
// + 16 words of register save area
|
||||
// save frame doing a save sp, -X, sp growing towards lower addresses
|
||||
// set Lbcp, Lmethod, LcpoolCache
|
||||
// set Llocals to i0
|
||||
// set Lmonitors to FP - rounded_vm_local_words
|
||||
// set Lesp to Lmonitors - 4
|
||||
//
|
||||
// The frame has now been setup to do the rest of the entry code
|
||||
|
||||
// Try this optimization: Most method entries could live in a
|
||||
// "one size fits all" stack frame without all the dynamic size
|
||||
// calculations. It might be profitable to do all this calculation
|
||||
// statically and approximately for "small enough" methods.
|
||||
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
|
||||
// C1 Calling conventions
|
||||
//
|
||||
// Upon method entry, the following registers are setup:
|
||||
//
|
||||
// g2 G2_thread: current thread
|
||||
// g5 G5_method: method to activate
|
||||
// g4 Gargs : pointer to last argument
|
||||
//
|
||||
//
|
||||
// Stack:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
// : arguments :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
//
|
||||
//
|
||||
//
|
||||
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : :
|
||||
// | | <--- Lesp
|
||||
// +---------------+ <--- Lmonitors (fp - 0x18)
|
||||
// | VM locals |
|
||||
// +---------------+ <--- fp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : nonarg locals :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : arguments :
|
||||
// | | <--- Llocals
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
|
||||
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
//
|
||||
//
|
||||
@ -599,136 +708,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
|
||||
}
|
||||
|
||||
// Empty method, generate a very fast return.
|
||||
|
||||
address InterpreterGenerator::generate_empty_entry(void) {
|
||||
|
||||
// A method that does nother but return...
|
||||
|
||||
address entry = __ pc();
|
||||
Label slow_path;
|
||||
|
||||
// do nothing for empty methods (do not even increment invocation counter)
|
||||
if ( UseFastEmptyMethods) {
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
||||
__ set(sync_state, G3_scratch);
|
||||
__ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
|
||||
|
||||
// Code: _return
|
||||
__ retl();
|
||||
__ delayed()->mov(O5_savedSP, SP);
|
||||
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into
|
||||
// vanilla (slow path) entry
|
||||
|
||||
// Generates code to elide accessor methods
|
||||
// Uses G3_scratch and G1_scratch as scratch
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
|
||||
// parameter size = 1
|
||||
// Note: We can only use this code if the getfield has been resolved
|
||||
// and if we don't have a null-pointer exception => check for
|
||||
// these conditions first and use slow path if necessary.
|
||||
address entry = __ pc();
|
||||
Label slow_path;
|
||||
|
||||
|
||||
// XXX: for compressed oops pointer loading and decoding doesn't fit in
|
||||
// delay slot and damages G1
|
||||
if ( UseFastAccessorMethods && !UseCompressedOops ) {
|
||||
// Check if we need to reach a safepoint and generate full interpreter
|
||||
// frame if so.
|
||||
AddressLiteral sync_state(SafepointSynchronize::address_of_state());
|
||||
__ load_contents(sync_state, G3_scratch);
|
||||
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
|
||||
__ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
|
||||
|
||||
// Check if local 0 != NULL
|
||||
__ ld_ptr(Gargs, G0, Otos_i ); // get local 0
|
||||
// check if local 0 == NULL and go the slow path
|
||||
__ br_null_short(Otos_i, Assembler::pn, slow_path);
|
||||
|
||||
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
// get first 4 bytes of the bytecodes (big endian!)
|
||||
__ ld_ptr(G5_method, Method::const_offset(), G1_scratch);
|
||||
__ ld(G1_scratch, ConstMethod::codes_offset(), G1_scratch);
|
||||
|
||||
// move index @ 2 far left then to the right most two bytes.
|
||||
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
|
||||
__ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
|
||||
ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
|
||||
|
||||
// get constant pool cache
|
||||
__ ld_ptr(G5_method, Method::const_offset(), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, ConstMethod::constants_offset(), G3_scratch);
|
||||
__ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
|
||||
|
||||
// get specific constant pool cache entry
|
||||
__ add(G3_scratch, G1_scratch, G3_scratch);
|
||||
|
||||
// Check the constant Pool cache entry to see if it has been resolved.
|
||||
// If not, need the slow path.
|
||||
ByteSize cp_base_offset = ConstantPoolCache::base_offset();
|
||||
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
|
||||
__ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
|
||||
__ and3(G1_scratch, 0xFF, G1_scratch);
|
||||
__ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
|
||||
|
||||
// Get the type and return field offset from the constant pool cache
|
||||
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
|
||||
__ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
|
||||
|
||||
Label xreturn_path;
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Get the type from the constant pool cache
|
||||
__ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
|
||||
// Make sure we don't need to mask G1_scratch after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ cmp(G1_scratch, atos );
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, itos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ld(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, stos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
|
||||
__ cmp(G1_scratch, ctos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
|
||||
#ifdef ASSERT
|
||||
__ cmp(G1_scratch, btos);
|
||||
__ br(Assembler::equal, true, Assembler::pt, xreturn_path);
|
||||
__ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
|
||||
__ should_not_reach_here();
|
||||
#endif
|
||||
__ ldsb(Otos_i, G3_scratch, Otos_i);
|
||||
__ bind(xreturn_path);
|
||||
|
||||
// _ireturn/_areturn
|
||||
__ retl(); // return from leaf routine
|
||||
__ delayed()->mov(O5_savedSP, SP);
|
||||
|
||||
// Generate regular method entry
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
return entry;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Method entry for java.lang.ref.Reference.get.
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
@ -806,7 +785,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
|
||||
//
|
||||
@ -1242,8 +1221,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
|
||||
// Generic method entry to (asm) interpreter
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
//
|
||||
address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
address entry = __ pc();
|
||||
|
||||
@ -1410,123 +1387,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
// Entry points & stack frame layout
|
||||
//
|
||||
// Here we generate the various kind of entries into the interpreter.
|
||||
// The two main entry type are generic bytecode methods and native call method.
|
||||
// These both come in synchronized and non-synchronized versions but the
|
||||
// frame layout they create is very similar. The other method entry
|
||||
// types are really just special purpose entries that are really entry
|
||||
// and interpretation all in one. These are for trivial methods like
|
||||
// accessor, empty, or special math methods.
|
||||
//
|
||||
// When control flow reaches any of the entry types for the interpreter
|
||||
// the following holds ->
|
||||
//
|
||||
// C2 Calling Conventions:
|
||||
//
|
||||
// The entry code below assumes that the following registers are set
|
||||
// when coming in:
|
||||
// G5_method: holds the Method* of the method to call
|
||||
// Lesp: points to the TOS of the callers expression stack
|
||||
// after having pushed all the parameters
|
||||
//
|
||||
// The entry code does the following to setup an interpreter frame
|
||||
// pop parameters from the callers stack by adjusting Lesp
|
||||
// set O0 to Lesp
|
||||
// compute X = (max_locals - num_parameters)
|
||||
// bump SP up by X to accomadate the extra locals
|
||||
// compute X = max_expression_stack
|
||||
// + vm_local_words
|
||||
// + 16 words of register save area
|
||||
// save frame doing a save sp, -X, sp growing towards lower addresses
|
||||
// set Lbcp, Lmethod, LcpoolCache
|
||||
// set Llocals to i0
|
||||
// set Lmonitors to FP - rounded_vm_local_words
|
||||
// set Lesp to Lmonitors - 4
|
||||
//
|
||||
// The frame has now been setup to do the rest of the entry code
|
||||
|
||||
// Try this optimization: Most method entries could live in a
|
||||
// "one size fits all" stack frame without all the dynamic size
|
||||
// calculations. It might be profitable to do all this calculation
|
||||
// statically and approximately for "small enough" methods.
|
||||
|
||||
//-----------------------------------------------------------------------------------------------
|
||||
|
||||
// C1 Calling conventions
|
||||
//
|
||||
// Upon method entry, the following registers are setup:
|
||||
//
|
||||
// g2 G2_thread: current thread
|
||||
// g5 G5_method: method to activate
|
||||
// g4 Gargs : pointer to last argument
|
||||
//
|
||||
//
|
||||
// Stack:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
// : arguments :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
//
|
||||
//
|
||||
//
|
||||
// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
|
||||
//
|
||||
// +---------------+ <--- sp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- sp + 0x5c
|
||||
// | |
|
||||
// : :
|
||||
// | | <--- Lesp
|
||||
// +---------------+ <--- Lmonitors (fp - 0x18)
|
||||
// | VM locals |
|
||||
// +---------------+ <--- fp
|
||||
// | |
|
||||
// : reg save area :
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x40
|
||||
// | |
|
||||
// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
|
||||
// | |
|
||||
// +---------------+ <--- fp + 0x5c
|
||||
// | |
|
||||
// : free :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : nonarg locals :
|
||||
// | |
|
||||
// +---------------+
|
||||
// | |
|
||||
// : arguments :
|
||||
// | | <--- Llocals
|
||||
// +---------------+ <--- Gargs
|
||||
// | |
|
||||
|
||||
static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
|
||||
|
||||
// Figure out the size of an interpreter frame (in words) given that we have a fully allocated
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "vm_version_sparc.hpp"
|
||||
|
||||
@ -249,7 +250,7 @@ void VM_Version::initialize() {
|
||||
(!has_hardware_fsmuld() ? ", no-fsmuld" : ""));
|
||||
|
||||
// buf is started with ", " or is empty
|
||||
_features_str = strdup(strlen(buf) > 2 ? buf + 2 : buf);
|
||||
_features_str = os::strdup(strlen(buf) > 2 ? buf + 2 : buf);
|
||||
|
||||
// There are three 64-bit SPARC families that do not overlap, e.g.,
|
||||
// both is_ultra3() and is_sparc64() cannot be true at the same time.
|
||||
|
@ -3853,6 +3853,15 @@ void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src) {
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
}
|
||||
|
||||
// Carry-Less Multiplication Quadword
|
||||
void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
|
||||
assert(VM_Version::supports_clmul(), "");
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
|
||||
emit_int8(0x44);
|
||||
emit_int8((unsigned char)(0xC0 | encode));
|
||||
emit_int8((unsigned char)mask);
|
||||
}
|
||||
|
||||
// Carry-Less Multiplication Quadword
|
||||
void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
|
||||
assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
|
||||
|
@ -1837,6 +1837,7 @@ private:
|
||||
void vpbroadcastd(XMMRegister dst, XMMRegister src);
|
||||
|
||||
// Carry-Less Multiplication Quadword
|
||||
void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
|
||||
void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
|
||||
|
||||
// AVX instruction which is used to clear upper 128 bits of YMM registers and
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,21 +27,6 @@
|
||||
|
||||
protected:
|
||||
|
||||
#if 0
|
||||
address generate_asm_interpreter_entry(bool synchronized);
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_abstract_entry(void);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
address generate_Reference_get_entry(void);
|
||||
void lock_method(void);
|
||||
void generate_stack_overflow_check(void);
|
||||
|
||||
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
||||
void generate_counter_overflow(Label* do_continue);
|
||||
#endif
|
||||
|
||||
void generate_more_monitors();
|
||||
void generate_deopt_handling();
|
||||
address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -66,9 +66,6 @@ extern "C" void RecursiveInterpreterActivation(interpreterState istate )
|
||||
#define __ _masm->
|
||||
#define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
|
||||
|
||||
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
|
||||
// c++ interpreter entry point this holds that entry point label.
|
||||
|
||||
// default registers for state and sender_sp
|
||||
// state and sender_sp are the same on 32bit because we have no choice.
|
||||
// state could be rsi on 64bit but it is an arg reg and not callee save
|
||||
@ -660,7 +657,6 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
// generate_method_entry) so the guard should work for them too.
|
||||
//
|
||||
|
||||
// monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
|
||||
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
||||
|
||||
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
|
||||
@ -794,156 +790,6 @@ void InterpreterGenerator::lock_method(void) {
|
||||
__ lock_object(monitor);
|
||||
}
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
|
||||
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
// rbx: Method*
|
||||
|
||||
// rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
|
||||
|
||||
Label xreturn_path;
|
||||
|
||||
// do fastpath for resolved accessor methods
|
||||
if (UseFastAccessorMethods) {
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
ExternalAddress state(SafepointSynchronize::address_of_state());
|
||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
||||
SafepointSynchronize::_not_synchronized);
|
||||
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
// ASM/C++ Interpreter
|
||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
|
||||
// Note: We can only use this code if the getfield has been resolved
|
||||
// and if we don't have a null-pointer exception => check for
|
||||
// these conditions first and use slow path if necessary.
|
||||
// rbx,: method
|
||||
// rcx: receiver
|
||||
__ movptr(rax, Address(rsp, wordSize));
|
||||
|
||||
// check if local 0 != NULL and read field
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, slow_path);
|
||||
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
__ movptr(rdx, Address(rbx, Method::const_offset()));
|
||||
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
|
||||
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
|
||||
// Shift codes right to get the index on the right.
|
||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
||||
__ shrl(rdx, 2*BitsPerByte);
|
||||
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
|
||||
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
|
||||
|
||||
// rax,: local 0
|
||||
// rbx,: method
|
||||
// rcx: receiver - do not destroy since it is needed for slow path!
|
||||
// rcx: scratch
|
||||
// rdx: constant pool cache index
|
||||
// rdi: constant pool cache
|
||||
// rsi/r13: sender sp
|
||||
|
||||
// check if getfield has been resolved and read constant pool cache entry
|
||||
// check the validity of the cache entry by testing whether _indices field
|
||||
// contains Bytecode::_getfield in b1 byte.
|
||||
assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
|
||||
__ movl(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
|
||||
__ shrl(rcx, 2*BitsPerByte);
|
||||
__ andl(rcx, 0xFF);
|
||||
__ cmpl(rcx, Bytecodes::_getfield);
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
|
||||
// Note: constant pool entry is not valid before bytecode is resolved
|
||||
__ movptr(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
|
||||
__ movl(rdx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
|
||||
|
||||
Label notByte, notShort, notChar;
|
||||
const Address field_address (rax, rcx, Address::times_1);
|
||||
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Use the type from the constant pool cache
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask rdx after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
#ifdef _LP64
|
||||
Label notObj;
|
||||
__ cmpl(rdx, atos);
|
||||
__ jcc(Assembler::notEqual, notObj);
|
||||
// atos
|
||||
__ movptr(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notObj);
|
||||
#endif // _LP64
|
||||
__ cmpl(rdx, btos);
|
||||
__ jcc(Assembler::notEqual, notByte);
|
||||
__ load_signed_byte(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notByte);
|
||||
__ cmpl(rdx, stos);
|
||||
__ jcc(Assembler::notEqual, notShort);
|
||||
__ load_signed_short(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notShort);
|
||||
__ cmpl(rdx, ctos);
|
||||
__ jcc(Assembler::notEqual, notChar);
|
||||
__ load_unsigned_short(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notChar);
|
||||
#ifdef ASSERT
|
||||
Label okay;
|
||||
#ifndef _LP64
|
||||
__ cmpl(rdx, atos);
|
||||
__ jcc(Assembler::equal, okay);
|
||||
#endif // _LP64
|
||||
__ cmpl(rdx, itos);
|
||||
__ jcc(Assembler::equal, okay);
|
||||
__ stop("what type is this?");
|
||||
__ bind(okay);
|
||||
#endif // ASSERT
|
||||
// All the rest are a 32 bit wordsize
|
||||
__ movl(rax, field_address);
|
||||
|
||||
__ bind(xreturn_path);
|
||||
|
||||
// _ireturn/_areturn
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, sender_sp_on_entry); // set sp to sender sp
|
||||
__ jmp(rdi);
|
||||
|
||||
// generate a vanilla interpreter entry as the slow path
|
||||
__ bind(slow_path);
|
||||
// We will enter c++ interpreter looking like it was
|
||||
// called by the call_stub this will cause it to return
|
||||
// a tosca result to the invoker which might have been
|
||||
// the c++ interpreter itself.
|
||||
|
||||
__ jmp(fast_accessor_slow_entry_path);
|
||||
return entry_point;
|
||||
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
if (UseG1GC) {
|
||||
@ -961,7 +807,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
|
||||
//
|
||||
@ -1670,10 +1516,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// Fast accessor methods share this entry point.
|
||||
// This works because frame manager is in the same codelet
|
||||
if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
|
||||
|
||||
Label dispatch_entry_2;
|
||||
__ movptr(rcx, sender_sp_on_entry);
|
||||
__ movptr(state, (int32_t)NULL_WORD); // no current activation
|
||||
@ -2212,40 +2054,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
||||
// determine code generation flags
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
case Interpreter::java_lang_math_tan : // fall thru
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp : // fall thru
|
||||
entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
|
||||
default : ShouldNotReachHere(); break;
|
||||
}
|
||||
|
||||
if (entry_point) return entry_point;
|
||||
|
||||
return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
|
||||
|
||||
}
|
||||
|
||||
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
||||
: CppInterpreterGenerator(code) {
|
||||
|
66
hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp
Normal file
66
hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterGenerator.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "interpreter/interp_masm.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
|
||||
// Jump into normal path for accessor and empty entry to jump to normal entry
|
||||
// The "fast" optimization don't update compilation count therefore can disable inlining
|
||||
// for these functions that should be inlined.
|
||||
address InterpreterGenerator::generate_jump_to_normal_entry(void) {
|
||||
address entry_point = __ pc();
|
||||
|
||||
assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
|
||||
__ jump(RuntimeAddress(Interpreter::entry_for_kind(Interpreter::zerolocals)));
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
// Abstract method entry
|
||||
// Attempt to execute abstract method. Throw exception
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// abstract method entry
|
||||
|
||||
#ifndef CC_INTERP
|
||||
// pop return address, reset last_sp to NULL
|
||||
__ empty_expression_stack();
|
||||
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
|
||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
||||
#endif
|
||||
|
||||
// throw exception
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
|
||||
// the call_VM checks for exception, so we should never return here.
|
||||
__ should_not_reach_here();
|
||||
|
||||
return entry_point;
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,8 +36,9 @@
|
||||
address generate_native_entry(bool synchronized);
|
||||
address generate_abstract_entry(void);
|
||||
address generate_math_entry(AbstractInterpreter::MethodKind kind);
|
||||
address generate_empty_entry(void);
|
||||
address generate_accessor_entry(void);
|
||||
address generate_jump_to_normal_entry(void);
|
||||
address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
|
||||
address generate_Reference_get_entry();
|
||||
address generate_CRC32_update_entry();
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
|
||||
|
@ -67,45 +67,6 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Various method entries (that c++ and asm interpreter agree upon)
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
//
|
||||
//
|
||||
|
||||
// Empty method, generate a very fast return.
|
||||
|
||||
address InterpreterGenerator::generate_empty_entry(void) {
|
||||
|
||||
// rbx,: Method*
|
||||
// rcx: receiver (unused)
|
||||
// rsi: previous interpreter state (C++ interpreter) must preserve
|
||||
// rsi: sender sp must set sp to this value on return
|
||||
|
||||
if (!UseFastEmptyMethods) return NULL;
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
Label slow_path;
|
||||
ExternalAddress state(SafepointSynchronize::address_of_state());
|
||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
||||
SafepointSynchronize::_not_synchronized);
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
|
||||
// do nothing for empty methods (do not even increment invocation counter)
|
||||
// Code: _return
|
||||
// _return
|
||||
// return w/o popping parameters
|
||||
__ pop(rax);
|
||||
__ mov(rsp, rsi);
|
||||
__ jmp(rax);
|
||||
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
|
||||
|
||||
// rbx,: Method*
|
||||
@ -216,36 +177,6 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
|
||||
}
|
||||
|
||||
|
||||
// Abstract method entry
|
||||
// Attempt to execute abstract method. Throw exception
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
|
||||
// rbx,: Method*
|
||||
// rcx: receiver (unused)
|
||||
// rsi: previous interpreter state (C++ interpreter) must preserve
|
||||
|
||||
// rsi: sender SP
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// abstract method entry
|
||||
|
||||
#ifndef CC_INTERP
|
||||
// pop return address, reset last_sp to NULL
|
||||
__ empty_expression_stack();
|
||||
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
|
||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
||||
#endif
|
||||
|
||||
// throw exception
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
|
||||
// the call_VM checks for exception, so we should never return here.
|
||||
__ should_not_reach_here();
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
|
||||
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
||||
|
||||
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
|
||||
|
@ -301,66 +301,6 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
|
||||
// Abstract method entry
|
||||
// Attempt to execute abstract method. Throw exception
|
||||
address InterpreterGenerator::generate_abstract_entry(void) {
|
||||
// rbx: Method*
|
||||
// r13: sender SP
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// abstract method entry
|
||||
|
||||
#ifndef CC_INTERP
|
||||
// pop return address, reset last_sp to NULL
|
||||
__ empty_expression_stack();
|
||||
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
|
||||
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
|
||||
#endif
|
||||
|
||||
// throw exception
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::throw_AbstractMethodError));
|
||||
// the call_VM checks for exception, so we should never return here.
|
||||
__ should_not_reach_here();
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
|
||||
// Empty method, generate a very fast return.
|
||||
|
||||
address InterpreterGenerator::generate_empty_entry(void) {
|
||||
// rbx: Method*
|
||||
// r13: sender sp must set sp to this value on return
|
||||
|
||||
if (!UseFastEmptyMethods) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
address entry_point = __ pc();
|
||||
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
Label slow_path;
|
||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
||||
SafepointSynchronize::_not_synchronized);
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
|
||||
// do nothing for empty methods (do not even increment invocation counter)
|
||||
// Code: _return
|
||||
// _return
|
||||
// return w/o popping parameters
|
||||
__ pop(rax);
|
||||
__ mov(rsp, r13);
|
||||
__ jmp(rax);
|
||||
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
return entry_point;
|
||||
|
||||
}
|
||||
|
||||
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
|
||||
|
||||
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
|
||||
|
@ -7316,17 +7316,34 @@ void MacroAssembler::update_byte_crc32(Register crc, Register val, Register tabl
|
||||
* Fold 128-bit data chunk
|
||||
*/
|
||||
void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
|
||||
vpclmulhdq(xtmp, xK, xcrc); // [123:64]
|
||||
vpclmulldq(xcrc, xK, xcrc); // [63:0]
|
||||
vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
|
||||
pxor(xcrc, xtmp);
|
||||
if (UseAVX > 0) {
|
||||
vpclmulhdq(xtmp, xK, xcrc); // [123:64]
|
||||
vpclmulldq(xcrc, xK, xcrc); // [63:0]
|
||||
vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
|
||||
pxor(xcrc, xtmp);
|
||||
} else {
|
||||
movdqa(xtmp, xcrc);
|
||||
pclmulhdq(xtmp, xK); // [123:64]
|
||||
pclmulldq(xcrc, xK); // [63:0]
|
||||
pxor(xcrc, xtmp);
|
||||
movdqu(xtmp, Address(buf, offset));
|
||||
pxor(xcrc, xtmp);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
|
||||
vpclmulhdq(xtmp, xK, xcrc);
|
||||
vpclmulldq(xcrc, xK, xcrc);
|
||||
pxor(xcrc, xbuf);
|
||||
pxor(xcrc, xtmp);
|
||||
if (UseAVX > 0) {
|
||||
vpclmulhdq(xtmp, xK, xcrc);
|
||||
vpclmulldq(xcrc, xK, xcrc);
|
||||
pxor(xcrc, xbuf);
|
||||
pxor(xcrc, xtmp);
|
||||
} else {
|
||||
movdqa(xtmp, xcrc);
|
||||
pclmulhdq(xtmp, xK);
|
||||
pclmulldq(xcrc, xK);
|
||||
pxor(xcrc, xbuf);
|
||||
pxor(xcrc, xtmp);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -7444,9 +7461,17 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi
|
||||
// Fold 128 bits in xmm1 down into 32 bits in crc register.
|
||||
BIND(L_fold_128b);
|
||||
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
|
||||
vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
|
||||
vpand(xmm3, xmm0, xmm2, false /* vector256 */);
|
||||
vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
|
||||
if (UseAVX > 0) {
|
||||
vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
|
||||
vpand(xmm3, xmm0, xmm2, false /* vector256 */);
|
||||
vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
|
||||
} else {
|
||||
movdqa(xmm2, xmm0);
|
||||
pclmulqdq(xmm2, xmm1, 0x1);
|
||||
movdqa(xmm3, xmm0);
|
||||
pand(xmm3, xmm2);
|
||||
pclmulqdq(xmm0, xmm3, 0x1);
|
||||
}
|
||||
psrldq(xmm1, 8);
|
||||
psrldq(xmm2, 4);
|
||||
pxor(xmm0, xmm1);
|
||||
|
@ -966,6 +966,16 @@ public:
|
||||
void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
|
||||
void mulss(XMMRegister dst, AddressLiteral src);
|
||||
|
||||
// Carry-Less Multiplication Quadword
|
||||
void pclmulldq(XMMRegister dst, XMMRegister src) {
|
||||
// 0x00 - multiply lower 64 bits [0:63]
|
||||
Assembler::pclmulqdq(dst, src, 0x00);
|
||||
}
|
||||
void pclmulhdq(XMMRegister dst, XMMRegister src) {
|
||||
// 0x11 - multiply upper 64 bits [64:127]
|
||||
Assembler::pclmulqdq(dst, src, 0x11);
|
||||
}
|
||||
|
||||
void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
|
||||
void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
|
||||
void sqrtsd(XMMRegister dst, AddressLiteral src);
|
||||
|
@ -38,7 +38,7 @@ int AbstractInterpreter::size_activation(int max_stack,
|
||||
int callee_locals,
|
||||
bool is_top_frame) {
|
||||
// Note: This calculation must exactly parallel the frame setup
|
||||
// in AbstractInterpreterGenerator::generate_method_entry.
|
||||
// in InterpreterGenerator::generate_fixed_frame.
|
||||
|
||||
// fixed size of an interpreter frame:
|
||||
int overhead = frame::sender_sp_offset -
|
||||
|
@ -468,10 +468,10 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
// rax,
|
||||
|
||||
// NOTE: since the additional locals are also always pushed (wasn't obvious in
|
||||
// generate_method_entry) so the guard should work for them too.
|
||||
// generate_fixed_frame) so the guard should work for them too.
|
||||
//
|
||||
|
||||
// monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp
|
||||
// monitor entry size: see picture of stack in frame_x86.hpp
|
||||
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
||||
|
||||
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
|
||||
@ -633,145 +633,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
__ movptr(Address(rsp, 0), rsp); // set expression stack bottom
|
||||
}
|
||||
|
||||
// End of helpers
|
||||
|
||||
//
|
||||
// Various method entries
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
//
|
||||
//
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
|
||||
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
|
||||
// rbx,: Method*
|
||||
// rcx: receiver (preserve for slow entry into asm interpreter)
|
||||
|
||||
// rsi: senderSP must preserved for slow path, set SP to it on fast path
|
||||
|
||||
address entry_point = __ pc();
|
||||
Label xreturn_path;
|
||||
|
||||
// do fastpath for resolved accessor methods
|
||||
if (UseFastAccessorMethods) {
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
ExternalAddress state(SafepointSynchronize::address_of_state());
|
||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
||||
SafepointSynchronize::_not_synchronized);
|
||||
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
// ASM/C++ Interpreter
|
||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
|
||||
// Note: We can only use this code if the getfield has been resolved
|
||||
// and if we don't have a null-pointer exception => check for
|
||||
// these conditions first and use slow path if necessary.
|
||||
// rbx,: method
|
||||
// rcx: receiver
|
||||
__ movptr(rax, Address(rsp, wordSize));
|
||||
|
||||
// check if local 0 != NULL and read field
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, slow_path);
|
||||
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
__ movptr(rdx, Address(rbx, Method::const_offset()));
|
||||
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
|
||||
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
|
||||
// Shift codes right to get the index on the right.
|
||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
||||
__ shrl(rdx, 2*BitsPerByte);
|
||||
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
|
||||
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
|
||||
|
||||
// rax,: local 0
|
||||
// rbx,: method
|
||||
// rcx: receiver - do not destroy since it is needed for slow path!
|
||||
// rcx: scratch
|
||||
// rdx: constant pool cache index
|
||||
// rdi: constant pool cache
|
||||
// rsi: sender sp
|
||||
|
||||
// check if getfield has been resolved and read constant pool cache entry
|
||||
// check the validity of the cache entry by testing whether _indices field
|
||||
// contains Bytecode::_getfield in b1 byte.
|
||||
assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
|
||||
__ movl(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
|
||||
__ shrl(rcx, 2*BitsPerByte);
|
||||
__ andl(rcx, 0xFF);
|
||||
__ cmpl(rcx, Bytecodes::_getfield);
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
|
||||
// Note: constant pool entry is not valid before bytecode is resolved
|
||||
__ movptr(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
|
||||
__ movl(rdx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
|
||||
|
||||
Label notByte, notShort, notChar;
|
||||
const Address field_address (rax, rcx, Address::times_1);
|
||||
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Use the type from the constant pool cache
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask rdx after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
__ cmpl(rdx, btos);
|
||||
__ jcc(Assembler::notEqual, notByte);
|
||||
__ load_signed_byte(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notByte);
|
||||
__ cmpl(rdx, stos);
|
||||
__ jcc(Assembler::notEqual, notShort);
|
||||
__ load_signed_short(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notShort);
|
||||
__ cmpl(rdx, ctos);
|
||||
__ jcc(Assembler::notEqual, notChar);
|
||||
__ load_unsigned_short(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notChar);
|
||||
#ifdef ASSERT
|
||||
Label okay;
|
||||
__ cmpl(rdx, atos);
|
||||
__ jcc(Assembler::equal, okay);
|
||||
__ cmpl(rdx, itos);
|
||||
__ jcc(Assembler::equal, okay);
|
||||
__ stop("what type is this?");
|
||||
__ bind(okay);
|
||||
#endif // ASSERT
|
||||
// All the rest are a 32 bit wordsize
|
||||
// This is ok for now. Since fast accessors should be going away
|
||||
__ movptr(rax, field_address);
|
||||
|
||||
__ bind(xreturn_path);
|
||||
|
||||
// _ireturn/_areturn
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, rsi); // set sp to sender sp
|
||||
__ jmp(rdi);
|
||||
|
||||
// generate a vanilla interpreter entry as the slow path
|
||||
__ bind(slow_path);
|
||||
|
||||
(void) generate_normal_entry(false);
|
||||
return entry_point;
|
||||
}
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
// Method entry for java.lang.ref.Reference.get.
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
@ -862,7 +723,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1557,100 +1418,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
// Entry points
|
||||
//
|
||||
// Here we generate the various kind of entries into the interpreter.
|
||||
// The two main entry type are generic bytecode methods and native call method.
|
||||
// These both come in synchronized and non-synchronized versions but the
|
||||
// frame layout they create is very similar. The other method entry
|
||||
// types are really just special purpose entries that are really entry
|
||||
// and interpretation all in one. These are for trivial methods like
|
||||
// accessor, empty, or special math methods.
|
||||
//
|
||||
// When control flow reaches any of the entry types for the interpreter
|
||||
// the following holds ->
|
||||
//
|
||||
// Arguments:
|
||||
//
|
||||
// rbx,: Method*
|
||||
// rcx: receiver
|
||||
//
|
||||
//
|
||||
// Stack layout immediately at entry
|
||||
//
|
||||
// [ return address ] <--- rsp
|
||||
// [ parameter n ]
|
||||
// ...
|
||||
// [ parameter 1 ]
|
||||
// [ expression stack ] (caller's java expression stack)
|
||||
|
||||
// Assuming that we don't go to one of the trivial specialized
|
||||
// entries the stack will look like below when we are ready to execute
|
||||
// the first bytecode (or call the native routine). The register usage
|
||||
// will be as the template based interpreter expects (see interpreter_x86.hpp).
|
||||
//
|
||||
// local variables follow incoming parameters immediately; i.e.
|
||||
// the return address is moved to the end of the locals).
|
||||
//
|
||||
// [ monitor entry ] <--- rsp
|
||||
// ...
|
||||
// [ monitor entry ]
|
||||
// [ expr. stack bottom ]
|
||||
// [ saved rsi ]
|
||||
// [ current rdi ]
|
||||
// [ Method* ]
|
||||
// [ saved rbp, ] <--- rbp,
|
||||
// [ return address ]
|
||||
// [ local variable m ]
|
||||
// ...
|
||||
// [ local variable 1 ]
|
||||
// [ parameter n ]
|
||||
// ...
|
||||
// [ parameter 1 ] <--- rdi
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
|
||||
// determine code generation flags
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
case Interpreter::java_lang_math_tan : // fall thru
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ig_this->generate_Reference_get_entry(); break;
|
||||
case Interpreter::java_util_zip_CRC32_update
|
||||
: entry_point = ig_this->generate_CRC32_update_entry(); break;
|
||||
case Interpreter::java_util_zip_CRC32_updateBytes
|
||||
: // fall thru
|
||||
case Interpreter::java_util_zip_CRC32_updateByteBuffer
|
||||
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
|
||||
default:
|
||||
fatal(err_msg("unexpected method kind: %d", kind));
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_point) return entry_point;
|
||||
|
||||
return ig_this->generate_normal_entry(synchronized);
|
||||
|
||||
}
|
||||
|
||||
// These should never be compiled since the interpreter will prefer
|
||||
// the compiled version to the intrinsic version.
|
||||
|
@ -400,7 +400,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
||||
// page mechanism will work for that.
|
||||
//
|
||||
// NOTE: Since the additional locals are also always pushed (wasn't
|
||||
// obvious in generate_method_entry) so the guard should work for them
|
||||
// obvious in generate_fixed_frame) so the guard should work for them
|
||||
// too.
|
||||
//
|
||||
// Args:
|
||||
@ -411,8 +411,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
|
||||
// rax
|
||||
void InterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
|
||||
// monitor entry size: see picture of stack set
|
||||
// (generate_method_entry) and frame_amd64.hpp
|
||||
// monitor entry size: see picture of stack in frame_x86.hpp
|
||||
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
|
||||
|
||||
// total overhead size: entry_size + (saved rbp through expr stack
|
||||
@ -600,153 +599,6 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
|
||||
// End of helpers
|
||||
|
||||
// Various method entries
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
//
|
||||
//
|
||||
|
||||
// Call an accessor method (assuming it is resolved, otherwise drop
|
||||
// into vanilla (slow path) entry
|
||||
address InterpreterGenerator::generate_accessor_entry(void) {
|
||||
// rbx: Method*
|
||||
|
||||
// r13: senderSP must preserver for slow path, set SP to it on fast path
|
||||
|
||||
address entry_point = __ pc();
|
||||
Label xreturn_path;
|
||||
|
||||
// do fastpath for resolved accessor methods
|
||||
if (UseFastAccessorMethods) {
|
||||
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
|
||||
// thereof; parameter size = 1
|
||||
// Note: We can only use this code if the getfield has been resolved
|
||||
// and if we don't have a null-pointer exception => check for
|
||||
// these conditions first and use slow path if necessary.
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
|
||||
SafepointSynchronize::_not_synchronized);
|
||||
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
// rbx: method
|
||||
__ movptr(rax, Address(rsp, wordSize));
|
||||
|
||||
// check if local 0 != NULL and read field
|
||||
__ testptr(rax, rax);
|
||||
__ jcc(Assembler::zero, slow_path);
|
||||
|
||||
// read first instruction word and extract bytecode @ 1 and index @ 2
|
||||
__ movptr(rdx, Address(rbx, Method::const_offset()));
|
||||
__ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
|
||||
__ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
|
||||
// Shift codes right to get the index on the right.
|
||||
// The bytecode fetched looks like <index><0xb4><0x2a>
|
||||
__ shrl(rdx, 2 * BitsPerByte);
|
||||
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
|
||||
__ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
|
||||
|
||||
// rax: local 0
|
||||
// rbx: method
|
||||
// rdx: constant pool cache index
|
||||
// rdi: constant pool cache
|
||||
|
||||
// check if getfield has been resolved and read constant pool cache entry
|
||||
// check the validity of the cache entry by testing whether _indices field
|
||||
// contains Bytecode::_getfield in b1 byte.
|
||||
assert(in_words(ConstantPoolCacheEntry::size()) == 4,
|
||||
"adjust shift below");
|
||||
__ movl(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_8,
|
||||
ConstantPoolCache::base_offset() +
|
||||
ConstantPoolCacheEntry::indices_offset()));
|
||||
__ shrl(rcx, 2 * BitsPerByte);
|
||||
__ andl(rcx, 0xFF);
|
||||
__ cmpl(rcx, Bytecodes::_getfield);
|
||||
__ jcc(Assembler::notEqual, slow_path);
|
||||
|
||||
// Note: constant pool entry is not valid before bytecode is resolved
|
||||
__ movptr(rcx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_8,
|
||||
ConstantPoolCache::base_offset() +
|
||||
ConstantPoolCacheEntry::f2_offset()));
|
||||
// edx: flags
|
||||
__ movl(rdx,
|
||||
Address(rdi,
|
||||
rdx,
|
||||
Address::times_8,
|
||||
ConstantPoolCache::base_offset() +
|
||||
ConstantPoolCacheEntry::flags_offset()));
|
||||
|
||||
Label notObj, notInt, notByte, notShort;
|
||||
const Address field_address(rax, rcx, Address::times_1);
|
||||
|
||||
// Need to differentiate between igetfield, agetfield, bgetfield etc.
|
||||
// because they are different sizes.
|
||||
// Use the type from the constant pool cache
|
||||
__ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
|
||||
// Make sure we don't need to mask edx after the above shift
|
||||
ConstantPoolCacheEntry::verify_tos_state_shift();
|
||||
|
||||
__ cmpl(rdx, atos);
|
||||
__ jcc(Assembler::notEqual, notObj);
|
||||
// atos
|
||||
__ load_heap_oop(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notObj);
|
||||
__ cmpl(rdx, itos);
|
||||
__ jcc(Assembler::notEqual, notInt);
|
||||
// itos
|
||||
__ movl(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notInt);
|
||||
__ cmpl(rdx, btos);
|
||||
__ jcc(Assembler::notEqual, notByte);
|
||||
// btos
|
||||
__ load_signed_byte(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notByte);
|
||||
__ cmpl(rdx, stos);
|
||||
__ jcc(Assembler::notEqual, notShort);
|
||||
// stos
|
||||
__ load_signed_short(rax, field_address);
|
||||
__ jmp(xreturn_path);
|
||||
|
||||
__ bind(notShort);
|
||||
#ifdef ASSERT
|
||||
Label okay;
|
||||
__ cmpl(rdx, ctos);
|
||||
__ jcc(Assembler::equal, okay);
|
||||
__ stop("what type is this?");
|
||||
__ bind(okay);
|
||||
#endif
|
||||
// ctos
|
||||
__ load_unsigned_short(rax, field_address);
|
||||
|
||||
__ bind(xreturn_path);
|
||||
|
||||
// _ireturn/_areturn
|
||||
__ pop(rdi);
|
||||
__ mov(rsp, r13);
|
||||
__ jmp(rdi);
|
||||
__ ret(0);
|
||||
|
||||
// generate a vanilla interpreter entry as the slow path
|
||||
__ bind(slow_path);
|
||||
(void) generate_normal_entry(false);
|
||||
} else {
|
||||
(void) generate_normal_entry(false);
|
||||
}
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
// Method entry for java.lang.ref.Reference.get.
|
||||
address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
#if INCLUDE_ALL_GCS
|
||||
@ -773,8 +625,6 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
// and so we don't need to call the G1 pre-barrier. Thus we can use the
|
||||
// regular method entry code to generate the NPE.
|
||||
//
|
||||
// This code is based on generate_accessor_enty.
|
||||
//
|
||||
// rbx: Method*
|
||||
|
||||
// r13: senderSP must preserve for slow path, set SP to it on fast path
|
||||
@ -832,7 +682,7 @@ address InterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
|
||||
// If G1 is not enabled then attempt to go through the accessor entry point
|
||||
// Reference.get is an accessor
|
||||
return generate_accessor_entry();
|
||||
return generate_jump_to_normal_entry();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1566,100 +1416,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
// Entry points
|
||||
//
|
||||
// Here we generate the various kind of entries into the interpreter.
|
||||
// The two main entry type are generic bytecode methods and native
|
||||
// call method. These both come in synchronized and non-synchronized
|
||||
// versions but the frame layout they create is very similar. The
|
||||
// other method entry types are really just special purpose entries
|
||||
// that are really entry and interpretation all in one. These are for
|
||||
// trivial methods like accessor, empty, or special math methods.
|
||||
//
|
||||
// When control flow reaches any of the entry types for the interpreter
|
||||
// the following holds ->
|
||||
//
|
||||
// Arguments:
|
||||
//
|
||||
// rbx: Method*
|
||||
//
|
||||
// Stack layout immediately at entry
|
||||
//
|
||||
// [ return address ] <--- rsp
|
||||
// [ parameter n ]
|
||||
// ...
|
||||
// [ parameter 1 ]
|
||||
// [ expression stack ] (caller's java expression stack)
|
||||
|
||||
// Assuming that we don't go to one of the trivial specialized entries
|
||||
// the stack will look like below when we are ready to execute the
|
||||
// first bytecode (or call the native routine). The register usage
|
||||
// will be as the template based interpreter expects (see
|
||||
// interpreter_amd64.hpp).
|
||||
//
|
||||
// local variables follow incoming parameters immediately; i.e.
|
||||
// the return address is moved to the end of the locals).
|
||||
//
|
||||
// [ monitor entry ] <--- rsp
|
||||
// ...
|
||||
// [ monitor entry ]
|
||||
// [ expr. stack bottom ]
|
||||
// [ saved r13 ]
|
||||
// [ current r14 ]
|
||||
// [ Method* ]
|
||||
// [ saved ebp ] <--- rbp
|
||||
// [ return address ]
|
||||
// [ local variable m ]
|
||||
// ...
|
||||
// [ local variable 1 ]
|
||||
// [ parameter n ]
|
||||
// ...
|
||||
// [ parameter 1 ] <--- r14
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(
|
||||
AbstractInterpreter::MethodKind kind) {
|
||||
// determine code generation flags
|
||||
bool synchronized = false;
|
||||
address entry_point = NULL;
|
||||
InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals : break;
|
||||
case Interpreter::zerolocals_synchronized: synchronized = true; break;
|
||||
case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
|
||||
case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
|
||||
case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
|
||||
case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
|
||||
case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
|
||||
|
||||
case Interpreter::java_lang_math_sin : // fall thru
|
||||
case Interpreter::java_lang_math_cos : // fall thru
|
||||
case Interpreter::java_lang_math_tan : // fall thru
|
||||
case Interpreter::java_lang_math_abs : // fall thru
|
||||
case Interpreter::java_lang_math_log : // fall thru
|
||||
case Interpreter::java_lang_math_log10 : // fall thru
|
||||
case Interpreter::java_lang_math_sqrt : // fall thru
|
||||
case Interpreter::java_lang_math_pow : // fall thru
|
||||
case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
|
||||
case Interpreter::java_lang_ref_reference_get
|
||||
: entry_point = ig_this->generate_Reference_get_entry(); break;
|
||||
case Interpreter::java_util_zip_CRC32_update
|
||||
: entry_point = ig_this->generate_CRC32_update_entry(); break;
|
||||
case Interpreter::java_util_zip_CRC32_updateBytes
|
||||
: // fall thru
|
||||
case Interpreter::java_util_zip_CRC32_updateByteBuffer
|
||||
: entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
|
||||
default:
|
||||
fatal(err_msg("unexpected method kind: %d", kind));
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_point) {
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
return ig_this->generate_normal_entry(synchronized);
|
||||
}
|
||||
|
||||
// These should never be compiled since the interpreter will prefer
|
||||
// the compiled version to the intrinsic version.
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "vm_version_x86.hpp"
|
||||
|
||||
@ -514,7 +515,7 @@ void VM_Version::get_processor_features() {
|
||||
(supports_tscinv() ? ", tscinv": ""),
|
||||
(supports_bmi1() ? ", bmi1" : ""),
|
||||
(supports_bmi2() ? ", bmi2" : ""));
|
||||
_features_str = strdup(buf);
|
||||
_features_str = os::strdup(buf);
|
||||
|
||||
// UseSSE is set to the smaller of what hardware supports and what
|
||||
// the command line requires. I.e., you cannot set UseSSE to 2 on
|
||||
@ -559,7 +560,7 @@ void VM_Version::get_processor_features() {
|
||||
FLAG_SET_DEFAULT(UseCLMUL, false);
|
||||
}
|
||||
|
||||
if (UseCLMUL && (UseAVX > 0) && (UseSSE > 2)) {
|
||||
if (UseCLMUL && (UseSSE > 2)) {
|
||||
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
|
||||
UseCRC32Intrinsics = true;
|
||||
}
|
||||
@ -805,6 +806,21 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
}
|
||||
}
|
||||
if ((cpu_family() == 0x06) &&
|
||||
((extended_cpu_model() == 0x36) || // Centerton
|
||||
(extended_cpu_model() == 0x37) || // Silvermont
|
||||
(extended_cpu_model() == 0x4D))) {
|
||||
#ifdef COMPILER2
|
||||
if (FLAG_IS_DEFAULT(OptoScheduling)) {
|
||||
OptoScheduling = true;
|
||||
}
|
||||
#endif
|
||||
if (supports_sse4_2()) { // Silvermont
|
||||
if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
|
||||
UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use count leading zeros count instruction if available.
|
||||
@ -892,23 +908,25 @@ void VM_Version::get_processor_features() {
|
||||
AllocatePrefetchDistance = allocate_prefetch_distance();
|
||||
AllocatePrefetchStyle = allocate_prefetch_style();
|
||||
|
||||
if( is_intel() && cpu_family() == 6 && supports_sse3() ) {
|
||||
if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core
|
||||
if (is_intel() && cpu_family() == 6 && supports_sse3()) {
|
||||
if (AllocatePrefetchStyle == 2) { // watermark prefetching on Core
|
||||
#ifdef _LP64
|
||||
AllocatePrefetchDistance = 384;
|
||||
#else
|
||||
AllocatePrefetchDistance = 320;
|
||||
#endif
|
||||
}
|
||||
if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
|
||||
if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
|
||||
AllocatePrefetchDistance = 192;
|
||||
AllocatePrefetchLines = 4;
|
||||
}
|
||||
#ifdef COMPILER2
|
||||
if (AggressiveOpts && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
|
||||
if (supports_sse4_2()) {
|
||||
if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
|
||||
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
}
|
||||
assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -831,60 +831,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
return generate_entry((address) CppInterpreter::normal_entry);
|
||||
}
|
||||
|
||||
address AbstractInterpreterGenerator::generate_method_entry(
|
||||
AbstractInterpreter::MethodKind kind) {
|
||||
address entry_point = NULL;
|
||||
|
||||
switch (kind) {
|
||||
case Interpreter::zerolocals:
|
||||
case Interpreter::zerolocals_synchronized:
|
||||
break;
|
||||
|
||||
case Interpreter::native:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
|
||||
break;
|
||||
|
||||
case Interpreter::native_synchronized:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
|
||||
break;
|
||||
|
||||
case Interpreter::empty:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_empty_entry();
|
||||
break;
|
||||
|
||||
case Interpreter::accessor:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry();
|
||||
break;
|
||||
|
||||
case Interpreter::abstract:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();
|
||||
break;
|
||||
|
||||
case Interpreter::java_lang_math_sin:
|
||||
case Interpreter::java_lang_math_cos:
|
||||
case Interpreter::java_lang_math_tan:
|
||||
case Interpreter::java_lang_math_abs:
|
||||
case Interpreter::java_lang_math_log:
|
||||
case Interpreter::java_lang_math_log10:
|
||||
case Interpreter::java_lang_math_sqrt:
|
||||
case Interpreter::java_lang_math_pow:
|
||||
case Interpreter::java_lang_math_exp:
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
|
||||
break;
|
||||
|
||||
case Interpreter::java_lang_ref_reference_get:
|
||||
entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry();
|
||||
break;
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (entry_point == NULL)
|
||||
entry_point = ((InterpreterGenerator*) this)->generate_normal_entry(false);
|
||||
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
|
||||
: CppInterpreterGenerator(code) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -61,6 +61,12 @@ define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS
|
||||
|
||||
define_pd_global(uintx, TypeProfileLevel, 0);
|
||||
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
|
||||
#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
|
||||
product(bool, UseFastEmptyMethods, true, \
|
||||
"Use fast method entry code for empty methods") \
|
||||
\
|
||||
product(bool, UseFastAccessorMethods, true, \
|
||||
"Use fast method entry code for accessor methods") \
|
||||
\
|
||||
|
||||
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -39,4 +39,7 @@
|
||||
address generate_accessor_entry();
|
||||
address generate_Reference_get_entry();
|
||||
|
||||
// Not supported
|
||||
address generate_CRC32_update_entry() { return NULL; }
|
||||
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
|
||||
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
|
||||
|
@ -58,6 +58,7 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/objectMonitor.hpp"
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/osThread.hpp"
|
||||
#include "runtime/perfMemory.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
@ -378,10 +379,10 @@ void os::Aix::query_multipage_support() {
|
||||
// default should be 4K.
|
||||
size_t data_page_size = SIZE_4K;
|
||||
{
|
||||
void* p = ::malloc(SIZE_16M);
|
||||
void* p = os::malloc(SIZE_16M, mtInternal);
|
||||
guarantee(p != NULL, "malloc failed");
|
||||
data_page_size = os::Aix::query_pagesize(p);
|
||||
::free(p);
|
||||
os::free(p);
|
||||
}
|
||||
|
||||
// query default shm page size (LDR_CNTRL SHMPSIZE)
|
||||
|
@ -24,6 +24,8 @@
|
||||
|
||||
#include "asm/assembler.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "loadlib_aix.hpp"
|
||||
#include "porting_aix.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -83,7 +85,7 @@ class fixed_strings {
|
||||
while (n) {
|
||||
node* p = n;
|
||||
n = n->next;
|
||||
free(p->v);
|
||||
os::free(p->v);
|
||||
delete p;
|
||||
}
|
||||
}
|
||||
@ -95,7 +97,7 @@ class fixed_strings {
|
||||
}
|
||||
}
|
||||
node* p = new node;
|
||||
p->v = strdup(s);
|
||||
p->v = os::strdup_check_oom(s);
|
||||
p->next = first;
|
||||
first = p;
|
||||
return p->v;
|
||||
|
@ -2439,23 +2439,25 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
|
||||
}
|
||||
|
||||
// The memory is committed
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
bool os::release_memory_special(char* base, size_t bytes) {
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
||||
int rslt = shmdt(base);
|
||||
if (rslt == 0) {
|
||||
tkr.record((address)base, bytes);
|
||||
return true;
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
// detaching the SHM segment will also delete it, see reserve_memory_special()
|
||||
int rslt = shmdt(base);
|
||||
if (rslt == 0) {
|
||||
tkr.record((address)base, bytes);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
tkr.discard();
|
||||
return false;
|
||||
return shmdt(base) == 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
size_t os::large_page_size() {
|
||||
|
@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
|
||||
(void)::memset((void*) mapAddress, 0, size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||
|
||||
return mapAddress;
|
||||
}
|
||||
@ -918,7 +918,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||
|
||||
*addr = mapAddress;
|
||||
*sizep = size;
|
||||
|
@ -3504,9 +3504,12 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
|
||||
|
||||
assert(is_ptr_aligned(start, alignment), "Must be");
|
||||
|
||||
// os::reserve_memory_special will record this memory area.
|
||||
// Need to release it here to prevent overlapping reservations.
|
||||
MemTracker::record_virtual_memory_release((address)start, bytes);
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
// os::reserve_memory_special will record this memory area.
|
||||
// Need to release it here to prevent overlapping reservations.
|
||||
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
tkr.record((address)start, bytes);
|
||||
}
|
||||
|
||||
char* end = start + bytes;
|
||||
|
||||
@ -3601,7 +3604,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
|
||||
}
|
||||
|
||||
// The memory is committed
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
|
||||
}
|
||||
|
||||
return addr;
|
||||
@ -3617,24 +3620,30 @@ bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
|
||||
}
|
||||
|
||||
bool os::release_memory_special(char* base, size_t bytes) {
|
||||
assert(UseLargePages, "only for large pages");
|
||||
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
|
||||
bool res;
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
res = os::Linux::release_memory_special_impl(base, bytes);
|
||||
if (res) {
|
||||
tkr.record((address)base, bytes);
|
||||
}
|
||||
|
||||
} else {
|
||||
res = os::Linux::release_memory_special_impl(base, bytes);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
|
||||
assert(UseLargePages, "only for large pages");
|
||||
bool res;
|
||||
|
||||
if (UseSHM) {
|
||||
res = os::Linux::release_memory_special_shm(base, bytes);
|
||||
} else {
|
||||
assert(UseHugeTLBFS, "must be");
|
||||
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
|
||||
}
|
||||
|
||||
if (res) {
|
||||
tkr.record((address)base, bytes);
|
||||
} else {
|
||||
tkr.discard();
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -108,6 +108,7 @@ class Linux {
|
||||
static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
|
||||
static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
|
||||
|
||||
static bool release_memory_special_impl(char* base, size_t bytes);
|
||||
static bool release_memory_special_shm(char* base, size_t bytes);
|
||||
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
|
||||
|
||||
|
@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
|
||||
(void)::memset((void*) mapAddress, 0, size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||
|
||||
return mapAddress;
|
||||
}
|
||||
@ -924,7 +924,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
|
||||
|
||||
*addr = mapAddress;
|
||||
*sizep = size;
|
||||
|
@ -75,21 +75,41 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
|
||||
VMError::report_coredump_status(buffer, success);
|
||||
}
|
||||
|
||||
address os::get_caller_pc(int n) {
|
||||
int os::get_native_stack(address* stack, int frames, int toSkip) {
|
||||
#ifdef _NMT_NOINLINE_
|
||||
n ++;
|
||||
toSkip++;
|
||||
#endif
|
||||
|
||||
int frame_idx = 0;
|
||||
int num_of_frames; // number of frames captured
|
||||
frame fr = os::current_frame();
|
||||
while (n > 0 && fr.pc() &&
|
||||
!os::is_first_C_frame(&fr) && fr.sender_pc()) {
|
||||
fr = os::get_sender_for_C_frame(&fr);
|
||||
n --;
|
||||
while (fr.pc() && frame_idx < frames) {
|
||||
if (toSkip > 0) {
|
||||
toSkip --;
|
||||
} else {
|
||||
stack[frame_idx ++] = fr.pc();
|
||||
}
|
||||
if (fr.fp() == NULL || os::is_first_C_frame(&fr)
|
||||
||fr.sender_pc() == NULL || fr.cb() != NULL) break;
|
||||
|
||||
if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
|
||||
fr = os::get_sender_for_C_frame(&fr);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (n == 0) {
|
||||
return fr.pc();
|
||||
} else {
|
||||
return NULL;
|
||||
num_of_frames = frame_idx;
|
||||
for (; frame_idx < frames; frame_idx ++) {
|
||||
stack[frame_idx] = NULL;
|
||||
}
|
||||
|
||||
return num_of_frames;
|
||||
}
|
||||
|
||||
|
||||
bool os::unsetenv(const char* name) {
|
||||
assert(name != NULL, "Null pointer");
|
||||
return (::unsetenv(name) == 0);
|
||||
}
|
||||
|
||||
int os::get_last_error() {
|
||||
|
@ -199,23 +199,29 @@ class ArgumentIterator : public StackObj {
|
||||
// Calls from the door function to check that the client credentials
|
||||
// match this process. Returns 0 if credentials okay, otherwise -1.
|
||||
static int check_credentials() {
|
||||
door_cred_t cred_info;
|
||||
ucred_t *cred_info = NULL;
|
||||
int ret = -1; // deny by default
|
||||
|
||||
// get client credentials
|
||||
if (door_cred(&cred_info) == -1) {
|
||||
return -1; // unable to get them
|
||||
if (door_ucred(&cred_info) == -1) {
|
||||
return -1; // unable to get them, deny
|
||||
}
|
||||
|
||||
// get our euid/eguid (probably could cache these)
|
||||
uid_t euid = geteuid();
|
||||
gid_t egid = getegid();
|
||||
|
||||
// check that the effective uid/gid matches - discuss this with Jeff.
|
||||
if (cred_info.dc_euid == euid && cred_info.dc_egid == egid) {
|
||||
return 0; // okay
|
||||
} else {
|
||||
return -1; // denied
|
||||
// get euid/egid from ucred_free
|
||||
uid_t ucred_euid = ucred_geteuid(cred_info);
|
||||
gid_t ucred_egid = ucred_getegid(cred_info);
|
||||
|
||||
// check that the effective uid/gid matches
|
||||
if (ucred_euid == euid && ucred_egid == egid) {
|
||||
ret = 0; // allow
|
||||
}
|
||||
|
||||
ucred_free(cred_info);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
@ -770,7 +770,8 @@ static char* mmap_create_shared(size_t size) {
|
||||
(void)::memset((void*) mapAddress, 0, size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
|
||||
size, CURRENT_PC, mtInternal);
|
||||
|
||||
return mapAddress;
|
||||
}
|
||||
@ -941,7 +942,8 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
|
||||
size, CURRENT_PC, mtInternal);
|
||||
|
||||
*addr = mapAddress;
|
||||
*sizep = size;
|
||||
|
@ -138,9 +138,8 @@ BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
|
||||
// Workaround for issue when a custom launcher doesn't call
|
||||
// DestroyJavaVM and NMT is trying to track memory when free is
|
||||
// called from a static destructor
|
||||
if (MemTracker::is_on()) {
|
||||
MemTracker::shutdown(MemTracker::NMT_normal);
|
||||
}
|
||||
MemTracker::shutdown();
|
||||
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -163,6 +162,10 @@ bool os::getenv(const char* name, char* buffer, int len) {
|
||||
return result > 0 && result < len;
|
||||
}
|
||||
|
||||
bool os::unsetenv(const char* name) {
|
||||
assert(name != NULL, "Null pointer");
|
||||
return (SetEnvironmentVariable(name, NULL) == TRUE);
|
||||
}
|
||||
|
||||
// No setuid programs under Windows.
|
||||
bool os::have_special_privileges() {
|
||||
@ -319,15 +322,16 @@ extern "C" void breakpoint() {
|
||||
* So far, this method is only used by Native Memory Tracking, which is
|
||||
* only supported on Windows XP or later.
|
||||
*/
|
||||
address os::get_caller_pc(int n) {
|
||||
int os::get_native_stack(address* stack, int frames, int toSkip) {
|
||||
#ifdef _NMT_NOINLINE_
|
||||
n++;
|
||||
toSkip ++;
|
||||
#endif
|
||||
address pc;
|
||||
if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
|
||||
return pc;
|
||||
int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
|
||||
(PVOID*)stack, NULL);
|
||||
for (int index = captured; index < frames; index ++) {
|
||||
stack[index] = NULL;
|
||||
}
|
||||
return NULL;
|
||||
return captured;
|
||||
}
|
||||
|
||||
|
||||
@ -2901,7 +2905,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
PAGE_READWRITE);
|
||||
// If reservation failed, return NULL
|
||||
if (p_buf == NULL) return NULL;
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
|
||||
os::release_memory(p_buf, bytes + chunk_size);
|
||||
|
||||
// we still need to round up to a page boundary (in case we are using large pages)
|
||||
@ -2967,7 +2971,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
// need to create a dummy 'reserve' record to match
|
||||
// the release.
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf,
|
||||
bytes_to_release, mtNone, CALLER_PC);
|
||||
bytes_to_release, CALLER_PC);
|
||||
os::release_memory(p_buf, bytes_to_release);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
@ -2986,11 +2990,10 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
}
|
||||
// Although the memory is allocated individually, it is returned as one.
|
||||
// NMT records it as one block.
|
||||
address pc = CALLER_PC;
|
||||
if ((flags & MEM_COMMIT) != 0) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
|
||||
} else {
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
|
||||
}
|
||||
|
||||
// made it this far, success
|
||||
@ -3188,8 +3191,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, boo
|
||||
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
|
||||
char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
|
||||
if (res != NULL) {
|
||||
address pc = CALLER_PC;
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
|
||||
}
|
||||
|
||||
return res;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,7 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "os_windows.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/perfMemory.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
@ -1388,7 +1389,7 @@ static HANDLE create_sharedmem_resources(const char* dirname, const char* filena
|
||||
// the file has been successfully created and the file mapping
|
||||
// object has been created.
|
||||
sharedmem_fileHandle = fh;
|
||||
sharedmem_fileName = strdup(filename);
|
||||
sharedmem_fileName = os::strdup(filename);
|
||||
|
||||
return fmh;
|
||||
}
|
||||
@ -1498,7 +1499,8 @@ static char* mapping_create_shared(size_t size) {
|
||||
(void)memset(mapAddress, '\0', size);
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
|
||||
size, CURRENT_PC, mtInternal);
|
||||
|
||||
return (char*) mapAddress;
|
||||
}
|
||||
@ -1680,7 +1682,8 @@ static void open_file_mapping(const char* user, int vmid,
|
||||
}
|
||||
|
||||
// it does not go through os api, the operation has to record from here
|
||||
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size,
|
||||
CURRENT_PC, mtInternal);
|
||||
|
||||
|
||||
*addrp = (char*)mapAddress;
|
||||
@ -1834,10 +1837,14 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
|
||||
return;
|
||||
}
|
||||
|
||||
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
remove_file_mapping(addr);
|
||||
// it does not go through os api, the operation has to record from here
|
||||
tkr.record((address)addr, bytes);
|
||||
if (MemTracker::tracking_level() > NMT_minimal) {
|
||||
// it does not go through os api, the operation has to record from here
|
||||
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
|
||||
remove_file_mapping(addr);
|
||||
tkr.record((address)addr, bytes);
|
||||
} else {
|
||||
remove_file_mapping(addr);
|
||||
}
|
||||
}
|
||||
|
||||
char* PerfMemory::backing_store_filename() {
|
||||
|
@ -23,6 +23,8 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "vm_version_sparc.hpp"
|
||||
|
||||
@ -48,7 +50,7 @@ static void do_sysinfo(int si, const char* string, int* features, int mask) {
|
||||
// All SI defines used below must be supported.
|
||||
guarantee(bufsize != -1, "must be supported");
|
||||
|
||||
char* buf = (char*) malloc(bufsize);
|
||||
char* buf = (char*) os::malloc(bufsize, mtInternal);
|
||||
|
||||
if (buf == NULL)
|
||||
return;
|
||||
@ -60,7 +62,7 @@ static void do_sysinfo(int si, const char* string, int* features, int mask) {
|
||||
}
|
||||
}
|
||||
|
||||
free(buf);
|
||||
os::free(buf);
|
||||
}
|
||||
|
||||
int VM_Version::platform_features(int features) {
|
||||
@ -161,7 +163,7 @@ int VM_Version::platform_features(int features) {
|
||||
|
||||
char tmp;
|
||||
size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1);
|
||||
char* buf = (char*) malloc(bufsize);
|
||||
char* buf = (char*) os::malloc(bufsize, mtInternal);
|
||||
|
||||
if (buf != NULL) {
|
||||
if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) {
|
||||
@ -184,7 +186,7 @@ int VM_Version::platform_features(int features) {
|
||||
if (vis[3] == '2') features |= vis2_instructions_m;
|
||||
}
|
||||
}
|
||||
free(buf);
|
||||
os::free(buf);
|
||||
}
|
||||
}
|
||||
|
||||
@ -228,7 +230,7 @@ int VM_Version::platform_features(int features) {
|
||||
}
|
||||
#endif
|
||||
// Convert to UPPER case before compare.
|
||||
char* impl = strdup(implementation);
|
||||
char* impl = os::strdup_check_oom(implementation);
|
||||
|
||||
for (int i = 0; impl[i] != 0; i++)
|
||||
impl[i] = (char)toupper((uint)impl[i]);
|
||||
@ -252,7 +254,7 @@ int VM_Version::platform_features(int features) {
|
||||
implementation = "SPARC";
|
||||
}
|
||||
}
|
||||
free((void*)impl);
|
||||
os::free((void*)impl);
|
||||
break;
|
||||
}
|
||||
} // for(
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -997,7 +997,7 @@ void ArchDesc::build_pipe_classes(FILE *fp_cpp) {
|
||||
int nopcnt = 0;
|
||||
for ( _pipeline->_noplist.reset(); (nop = _pipeline->_noplist.iter()) != NULL; nopcnt++ );
|
||||
|
||||
fprintf(fp_cpp, "void Bundle::initialize_nops(MachNode * nop_list[%d], Compile *C) {\n", nopcnt);
|
||||
fprintf(fp_cpp, "void Bundle::initialize_nops(MachNode * nop_list[%d]) {\n", nopcnt);
|
||||
int i = 0;
|
||||
for ( _pipeline->_noplist.reset(); (nop = _pipeline->_noplist.iter()) != NULL; i++ ) {
|
||||
fprintf(fp_cpp, " nop_list[%d] = (MachNode *) new %sNode();\n", i, nop);
|
||||
@ -1369,7 +1369,7 @@ static void generate_peepreplace( FILE *fp, FormDict &globals, PeepMatch *pmatch
|
||||
fprintf(fp, " ra_->add_reference(root, inst%d);\n", inst_num);
|
||||
fprintf(fp, " ra_->set_oop (root, ra_->is_oop(inst%d));\n", inst_num);
|
||||
fprintf(fp, " ra_->set_pair(root->_idx, ra_->get_reg_second(inst%d), ra_->get_reg_first(inst%d));\n", inst_num, inst_num);
|
||||
fprintf(fp, " root->_opnds[0] = inst%d->_opnds[0]->clone(C); // result\n", inst_num);
|
||||
fprintf(fp, " root->_opnds[0] = inst%d->_opnds[0]->clone(); // result\n", inst_num);
|
||||
fprintf(fp, " // ----- Done with initial setup -----\n");
|
||||
} else {
|
||||
if( (op_form == NULL) || (op_form->is_base_constant(globals) == Form::none) ) {
|
||||
@ -1382,7 +1382,7 @@ static void generate_peepreplace( FILE *fp, FormDict &globals, PeepMatch *pmatch
|
||||
} else {
|
||||
fprintf(fp, " // no ideal edge for constants after matching\n");
|
||||
}
|
||||
fprintf(fp, " root->_opnds[%d] = inst%d->_opnds[%d]->clone(C);\n",
|
||||
fprintf(fp, " root->_opnds[%d] = inst%d->_opnds[%d]->clone();\n",
|
||||
opnds_index, inst_num, inst_op_num );
|
||||
}
|
||||
++opnds_index;
|
||||
@ -1402,7 +1402,7 @@ static void generate_peepreplace( FILE *fp, FormDict &globals, PeepMatch *pmatch
|
||||
// Define the Peephole method for an instruction node
|
||||
void ArchDesc::definePeephole(FILE *fp, InstructForm *node) {
|
||||
// Generate Peephole function header
|
||||
fprintf(fp, "MachNode *%sNode::peephole( Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile* C ) {\n", node->_ident);
|
||||
fprintf(fp, "MachNode *%sNode::peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted) {\n", node->_ident);
|
||||
fprintf(fp, " bool matches = true;\n");
|
||||
|
||||
// Identify the maximum instruction position,
|
||||
@ -1593,7 +1593,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
||||
}
|
||||
|
||||
const char *resultOper = new_inst->reduce_result();
|
||||
fprintf(fp," n%d->set_opnd_array(0, state->MachOperGenerator( %s, C ));\n",
|
||||
fprintf(fp," n%d->set_opnd_array(0, state->MachOperGenerator(%s));\n",
|
||||
cnt, machOperEnum(resultOper));
|
||||
|
||||
// get the formal operand NameList
|
||||
@ -1634,7 +1634,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
||||
// If there is no use of the created operand, just skip it
|
||||
if (new_pos != NameList::Not_in_list) {
|
||||
//Copy the operand from the original made above
|
||||
fprintf(fp," n%d->set_opnd_array(%d, op%d->clone(C)); // %s\n",
|
||||
fprintf(fp," n%d->set_opnd_array(%d, op%d->clone()); // %s\n",
|
||||
cnt, new_pos, exp_pos-node->num_opnds(), opid);
|
||||
// Check for who defines this operand & add edge if needed
|
||||
fprintf(fp," if(tmp%d != NULL)\n", exp_pos);
|
||||
@ -1662,7 +1662,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
||||
new_pos = new_inst->operand_position(parameter,Component::USE);
|
||||
if (new_pos != -1) {
|
||||
// Copy the operand from the ExpandNode to the new node
|
||||
fprintf(fp," n%d->set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
|
||||
fprintf(fp," n%d->set_opnd_array(%d, opnd_array(%d)->clone()); // %s\n",
|
||||
cnt, new_pos, exp_pos, opid);
|
||||
// For each operand add appropriate input edges by looking at tmp's
|
||||
fprintf(fp," if(tmp%d == this) {\n", exp_pos);
|
||||
@ -1729,14 +1729,14 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
||||
declared_def = true;
|
||||
}
|
||||
if (op && op->_interface && op->_interface->is_RegInterface()) {
|
||||
fprintf(fp," def = new MachTempNode(state->MachOperGenerator( %s, C ));\n",
|
||||
fprintf(fp," def = new MachTempNode(state->MachOperGenerator(%s));\n",
|
||||
machOperEnum(op->_ident));
|
||||
fprintf(fp," add_req(def);\n");
|
||||
// The operand for TEMP is already constructed during
|
||||
// this mach node construction, see buildMachNode().
|
||||
//
|
||||
// int idx = node->operand_position_format(comp->_name);
|
||||
// fprintf(fp," set_opnd_array(%d, state->MachOperGenerator( %s, C ));\n",
|
||||
// fprintf(fp," set_opnd_array(%d, state->MachOperGenerator(%s));\n",
|
||||
// idx, machOperEnum(op->_ident));
|
||||
} else {
|
||||
assert(false, "can't have temps which aren't registers");
|
||||
@ -1802,7 +1802,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
||||
uint j = node->unique_opnds_idx(i);
|
||||
// unique_opnds_idx(i) is unique if unique_opnds_idx(j) is not unique.
|
||||
if( j != node->unique_opnds_idx(j) ) {
|
||||
fprintf(fp," set_opnd_array(%d, opnd_array(%d)->clone(C)); // %s\n",
|
||||
fprintf(fp," set_opnd_array(%d, opnd_array(%d)->clone()); // %s\n",
|
||||
new_num_opnds, i, comp->_name);
|
||||
// delete not unique edges here
|
||||
fprintf(fp," for(unsigned i = 0; i < num%d; i++) {\n", i);
|
||||
@ -2839,12 +2839,12 @@ static void defineIn_RegMask(FILE *fp, FormDict &globals, OperandForm &oper) {
|
||||
|
||||
// generate code to create a clone for a class derived from MachOper
|
||||
//
|
||||
// (0) MachOper *MachOperXOper::clone(Compile* C) const {
|
||||
// (0) MachOper *MachOperXOper::clone() const {
|
||||
// (1) return new MachXOper( _ccode, _c0, _c1, ..., _cn);
|
||||
// (2) }
|
||||
//
|
||||
static void defineClone(FILE *fp, FormDict &globalNames, OperandForm &oper) {
|
||||
fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper._ident);
|
||||
fprintf(fp,"MachOper *%sOper::clone() const {\n", oper._ident);
|
||||
// Check for constants that need to be copied over
|
||||
const int num_consts = oper.num_consts(globalNames);
|
||||
const bool is_ideal_bool = oper.is_ideal_bool();
|
||||
@ -3043,7 +3043,7 @@ void ArchDesc::define_oper_interface(FILE *fp, OperandForm &oper, FormDict &glob
|
||||
static void define_fill_new_machnode(bool used, FILE *fp_cpp) {
|
||||
fprintf(fp_cpp, "\n");
|
||||
fprintf(fp_cpp, "// Copy _idx, inputs and operands to new node\n");
|
||||
fprintf(fp_cpp, "void MachNode::fill_new_machnode( MachNode* node, Compile* C) const {\n");
|
||||
fprintf(fp_cpp, "void MachNode::fill_new_machnode(MachNode* node) const {\n");
|
||||
if( !used ) {
|
||||
fprintf(fp_cpp, " // This architecture does not have cisc or short branch instructions\n");
|
||||
fprintf(fp_cpp, " ShouldNotCallThis();\n");
|
||||
@ -3064,7 +3064,7 @@ static void define_fill_new_machnode(bool used, FILE *fp_cpp) {
|
||||
fprintf(fp_cpp, " MachOper **to = node->_opnds;\n");
|
||||
fprintf(fp_cpp, " for( int i = 0; i < nopnds; i++ ) {\n");
|
||||
fprintf(fp_cpp, " if( i != cisc_operand() ) \n");
|
||||
fprintf(fp_cpp, " to[i] = _opnds[i]->clone(C);\n");
|
||||
fprintf(fp_cpp, " to[i] = _opnds[i]->clone();\n");
|
||||
fprintf(fp_cpp, " }\n");
|
||||
fprintf(fp_cpp, "}\n");
|
||||
}
|
||||
@ -3105,7 +3105,7 @@ void ArchDesc::defineClasses(FILE *fp) {
|
||||
if ( strcmp(oper->_ident,"label") == 0 ) {
|
||||
defineIn_RegMask(_CPP_MISC_file._fp, _globalNames, *oper);
|
||||
|
||||
fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper->_ident);
|
||||
fprintf(fp,"MachOper *%sOper::clone() const {\n", oper->_ident);
|
||||
fprintf(fp," return new %sOper(_label, _block_num);\n", oper->_ident);
|
||||
fprintf(fp,"}\n");
|
||||
|
||||
@ -3124,7 +3124,7 @@ void ArchDesc::defineClasses(FILE *fp) {
|
||||
if ( strcmp(oper->_ident,"method") == 0 ) {
|
||||
defineIn_RegMask(_CPP_MISC_file._fp, _globalNames, *oper);
|
||||
|
||||
fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper->_ident);
|
||||
fprintf(fp,"MachOper *%sOper::clone() const {\n", oper->_ident);
|
||||
fprintf(fp," return new %sOper(_method);\n", oper->_ident);
|
||||
fprintf(fp,"}\n");
|
||||
|
||||
@ -3845,7 +3845,7 @@ void ArchDesc::buildMachOperGenerator(FILE *fp_cpp) {
|
||||
"// that invokes 'new' on the corresponding class constructor.\n");
|
||||
fprintf(fp_cpp, "\n");
|
||||
fprintf(fp_cpp, "MachOper *State::MachOperGenerator");
|
||||
fprintf(fp_cpp, "(int opcode, Compile* C)");
|
||||
fprintf(fp_cpp, "(int opcode)");
|
||||
fprintf(fp_cpp, "{\n");
|
||||
fprintf(fp_cpp, "\n");
|
||||
fprintf(fp_cpp, " switch(opcode) {\n");
|
||||
@ -3921,7 +3921,7 @@ void ArchDesc::buildMachNode(FILE *fp_cpp, InstructForm *inst, const char *inden
|
||||
int index = clist.operand_position(comp->_name, comp->_usedef, inst);
|
||||
const char *opcode = machOperEnum(comp->_type);
|
||||
fprintf(fp_cpp, "%s node->set_opnd_array(%d, ", indent, index);
|
||||
fprintf(fp_cpp, "MachOperGenerator(%s, C));\n", opcode);
|
||||
fprintf(fp_cpp, "MachOperGenerator(%s));\n", opcode);
|
||||
}
|
||||
}
|
||||
else if ( inst->is_chain_of_constant(_globalNames, opType) ) {
|
||||
@ -3978,7 +3978,7 @@ void InstructForm::declare_cisc_version(ArchDesc &AD, FILE *fp_hpp) {
|
||||
InstructForm *inst_cisc = cisc_spill_alternate();
|
||||
if (inst_cisc != NULL) {
|
||||
fprintf(fp_hpp, " virtual int cisc_operand() const { return %d; }\n", cisc_spill_operand());
|
||||
fprintf(fp_hpp, " virtual MachNode *cisc_version(int offset, Compile* C);\n");
|
||||
fprintf(fp_hpp, " virtual MachNode *cisc_version(int offset);\n");
|
||||
fprintf(fp_hpp, " virtual void use_cisc_RegMask();\n");
|
||||
fprintf(fp_hpp, " virtual const RegMask *cisc_RegMask() const { return _cisc_RegMask; }\n");
|
||||
}
|
||||
@ -4008,7 +4008,7 @@ bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
|
||||
// Construct CISC version of this instruction
|
||||
fprintf(fp_cpp, "\n");
|
||||
fprintf(fp_cpp, "// Build CISC version of this instruction\n");
|
||||
fprintf(fp_cpp, "MachNode *%sNode::cisc_version( int offset, Compile* C ) {\n", this->_ident);
|
||||
fprintf(fp_cpp, "MachNode *%sNode::cisc_version(int offset) {\n", this->_ident);
|
||||
// Create the MachNode object
|
||||
fprintf(fp_cpp, " %sNode *node = new %sNode();\n", name, name);
|
||||
// Fill in the bottom_type where requested
|
||||
@ -4023,7 +4023,7 @@ bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
|
||||
|
||||
fprintf(fp_cpp, "\n");
|
||||
fprintf(fp_cpp, " // Copy _idx, inputs and operands to new node\n");
|
||||
fprintf(fp_cpp, " fill_new_machnode(node, C);\n");
|
||||
fprintf(fp_cpp, " fill_new_machnode(node);\n");
|
||||
// Construct operand to access [stack_pointer + offset]
|
||||
fprintf(fp_cpp, " // Construct operand to access [stack_pointer + offset]\n");
|
||||
fprintf(fp_cpp, " node->set_opnd_array(cisc_operand(), new %sOper(offset));\n", cisc_oper_name);
|
||||
@ -4042,7 +4042,7 @@ bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
|
||||
// Build prototypes for short branch methods
|
||||
void InstructForm::declare_short_branch_methods(FILE *fp_hpp) {
|
||||
if (has_short_branch_form()) {
|
||||
fprintf(fp_hpp, " virtual MachNode *short_branch_version(Compile* C);\n");
|
||||
fprintf(fp_hpp, " virtual MachNode *short_branch_version();\n");
|
||||
}
|
||||
}
|
||||
|
||||
@ -4055,7 +4055,7 @@ bool InstructForm::define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp) {
|
||||
|
||||
// Construct short_branch_version() method.
|
||||
fprintf(fp_cpp, "// Build short branch version of this instruction\n");
|
||||
fprintf(fp_cpp, "MachNode *%sNode::short_branch_version(Compile* C) {\n", this->_ident);
|
||||
fprintf(fp_cpp, "MachNode *%sNode::short_branch_version() {\n", this->_ident);
|
||||
// Create the MachNode object
|
||||
fprintf(fp_cpp, " %sNode *node = new %sNode();\n", name, name);
|
||||
if( is_ideal_if() ) {
|
||||
@ -4071,7 +4071,7 @@ bool InstructForm::define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp) {
|
||||
// Short branch version must use same node index for access
|
||||
// through allocator's tables
|
||||
fprintf(fp_cpp, " // Copy _idx, inputs and operands to new node\n");
|
||||
fprintf(fp_cpp, " fill_new_machnode(node, C);\n");
|
||||
fprintf(fp_cpp, " fill_new_machnode(node);\n");
|
||||
|
||||
// Return result and exit scope
|
||||
fprintf(fp_cpp, " return node;\n");
|
||||
@ -4097,7 +4097,7 @@ void ArchDesc::buildMachNodeGenerator(FILE *fp_cpp) {
|
||||
"// that invokes 'new' on the corresponding class constructor.\n");
|
||||
fprintf(fp_cpp, "\n");
|
||||
fprintf(fp_cpp, "MachNode *State::MachNodeGenerator");
|
||||
fprintf(fp_cpp, "(int opcode, Compile* C)");
|
||||
fprintf(fp_cpp, "(int opcode)");
|
||||
fprintf(fp_cpp, "{\n");
|
||||
fprintf(fp_cpp, " switch(opcode) {\n");
|
||||
|
||||
|
@ -1119,7 +1119,7 @@ void ArchDesc::declare_pipe_classes(FILE *fp_hpp) {
|
||||
fprintf(fp_hpp, " _nop_count = %d\n",
|
||||
_pipeline->_nopcnt);
|
||||
fprintf(fp_hpp, " };\n\n");
|
||||
fprintf(fp_hpp, " static void initialize_nops(MachNode *nop_list[%d], Compile* C);\n\n",
|
||||
fprintf(fp_hpp, " static void initialize_nops(MachNode *nop_list[%d]);\n\n",
|
||||
_pipeline->_nopcnt);
|
||||
fprintf(fp_hpp, "#ifndef PRODUCT\n");
|
||||
fprintf(fp_hpp, " void dump(outputStream *st = tty) const;\n");
|
||||
@ -1240,7 +1240,7 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
constant_type, _globalNames);
|
||||
|
||||
// Clone function
|
||||
fprintf(fp," virtual MachOper *clone(Compile* C) const;\n");
|
||||
fprintf(fp," virtual MachOper *clone() const;\n");
|
||||
|
||||
// Support setting a spill offset into a constant operand.
|
||||
// We only support setting an 'int' offset, while in the
|
||||
@ -1718,7 +1718,7 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
|
||||
// If there is an explicit peephole rule, build it
|
||||
if ( instr->peepholes() != NULL ) {
|
||||
fprintf(fp," virtual MachNode *peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted, Compile *C);\n");
|
||||
fprintf(fp," virtual MachNode *peephole(Block *block, int block_index, PhaseRegAlloc *ra_, int &deleted);\n");
|
||||
}
|
||||
|
||||
// Output the declaration for number of relocation entries
|
||||
@ -1863,7 +1863,7 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
}
|
||||
if ( instr->num_post_match_opnds() != 0
|
||||
|| instr->is_chain_of_constant(_globalNames) ) {
|
||||
fprintf(fp," friend MachNode *State::MachNodeGenerator(int opcode, Compile* C);\n");
|
||||
fprintf(fp," friend MachNode *State::MachNodeGenerator(int opcode);\n");
|
||||
}
|
||||
if ( instr->rematerialize(_globalNames, get_registers()) ) {
|
||||
fprintf(fp," // Rematerialize %s\n", instr->_ident);
|
||||
@ -2071,8 +2071,8 @@ void ArchDesc::defineStateClass(FILE *fp) {
|
||||
fprintf(fp," DEBUG_ONLY( ~State(void); ) // Destructor\n");
|
||||
fprintf(fp,"\n");
|
||||
fprintf(fp," // Methods created by ADLC and invoked by Reduce\n");
|
||||
fprintf(fp," MachOper *MachOperGenerator( int opcode, Compile* C );\n");
|
||||
fprintf(fp," MachNode *MachNodeGenerator( int opcode, Compile* C );\n");
|
||||
fprintf(fp," MachOper *MachOperGenerator(int opcode);\n");
|
||||
fprintf(fp," MachNode *MachNodeGenerator(int opcode);\n");
|
||||
fprintf(fp,"\n");
|
||||
fprintf(fp," // Assign a state to a node, definition of method produced by ADLC\n");
|
||||
fprintf(fp," bool DFA( int opcode, const Node *ideal );\n");
|
||||
|
@ -269,7 +269,7 @@ address CodeBuffer::decode_begin() {
|
||||
|
||||
GrowableArray<int>* CodeBuffer::create_patch_overflow() {
|
||||
if (_overflow_arena == NULL) {
|
||||
_overflow_arena = new (mtCode) Arena();
|
||||
_overflow_arena = new (mtCode) Arena(mtCode);
|
||||
}
|
||||
return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ Compiler::Compiler() : AbstractCompiler(c1) {
|
||||
|
||||
void Compiler::init_c1_runtime() {
|
||||
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
|
||||
Arena* arena = new (mtCompiler) Arena();
|
||||
Arena* arena = new (mtCompiler) Arena(mtCompiler);
|
||||
Runtime1::initialize(buffer_blob);
|
||||
FrameMap::initialize();
|
||||
// initialize data structures
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "c1/c1_MacroAssembler.hpp"
|
||||
#include "c1/c1_ValueStack.hpp"
|
||||
#include "ci/ciInstance.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
|
||||
// we must have enough patching space so that call can be inserted
|
||||
@ -848,7 +849,7 @@ void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
|
||||
stringStream st;
|
||||
st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
|
||||
#ifdef SPARC
|
||||
_masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
|
||||
_masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__);
|
||||
#else
|
||||
_masm->verify_oop(r->as_Register());
|
||||
#endif
|
||||
|
@ -1613,25 +1613,22 @@ void LinearScan::allocate_registers() {
|
||||
Interval* precolored_cpu_intervals, *not_precolored_cpu_intervals;
|
||||
Interval* precolored_fpu_intervals, *not_precolored_fpu_intervals;
|
||||
|
||||
create_unhandled_lists(&precolored_cpu_intervals, ¬_precolored_cpu_intervals, is_precolored_cpu_interval, is_virtual_cpu_interval);
|
||||
if (has_fpu_registers()) {
|
||||
create_unhandled_lists(&precolored_fpu_intervals, ¬_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
|
||||
#ifdef ASSERT
|
||||
} else {
|
||||
// fpu register allocation is omitted because no virtual fpu registers are present
|
||||
// just check this again...
|
||||
create_unhandled_lists(&precolored_fpu_intervals, ¬_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
|
||||
assert(not_precolored_fpu_intervals == Interval::end(), "missed an uncolored fpu interval");
|
||||
#endif
|
||||
}
|
||||
|
||||
// allocate cpu registers
|
||||
create_unhandled_lists(&precolored_cpu_intervals, ¬_precolored_cpu_intervals,
|
||||
is_precolored_cpu_interval, is_virtual_cpu_interval);
|
||||
|
||||
// allocate fpu registers
|
||||
create_unhandled_lists(&precolored_fpu_intervals, ¬_precolored_fpu_intervals,
|
||||
is_precolored_fpu_interval, is_virtual_fpu_interval);
|
||||
|
||||
// the fpu interval allocation cannot be moved down below with the fpu section as
|
||||
// the cpu_lsw.walk() changes interval positions.
|
||||
|
||||
LinearScanWalker cpu_lsw(this, precolored_cpu_intervals, not_precolored_cpu_intervals);
|
||||
cpu_lsw.walk();
|
||||
cpu_lsw.finish_allocation();
|
||||
|
||||
if (has_fpu_registers()) {
|
||||
// allocate fpu registers
|
||||
LinearScanWalker fpu_lsw(this, precolored_fpu_intervals, not_precolored_fpu_intervals);
|
||||
fpu_lsw.walk();
|
||||
fpu_lsw.finish_allocation();
|
||||
|
@ -86,7 +86,8 @@ static bool firstEnv = true;
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciEnv::ciEnv
|
||||
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
|
||||
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter)
|
||||
: _ciEnv_arena(mtCompiler) {
|
||||
VM_ENTRY_MARK;
|
||||
|
||||
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
|
||||
@ -144,7 +145,7 @@ ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
|
||||
_jvmti_can_pop_frame = false;
|
||||
}
|
||||
|
||||
ciEnv::ciEnv(Arena* arena) {
|
||||
ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
|
||||
ASSERT_IN_VM;
|
||||
|
||||
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -112,7 +112,7 @@ void ciObjectFactory::initialize() {
|
||||
// This Arena is long lived and exists in the resource mark of the
|
||||
// compiler thread that initializes the initial ciObjectFactory which
|
||||
// creates the shared ciObjects that all later ciObjectFactories use.
|
||||
Arena* arena = new (mtCompiler) Arena();
|
||||
Arena* arena = new (mtCompiler) Arena(mtCompiler);
|
||||
ciEnv initial(arena);
|
||||
ciEnv* env = ciEnv::current();
|
||||
env->_factory->init_shared_objects();
|
||||
|
@ -273,13 +273,17 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi
|
||||
}
|
||||
|
||||
LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
|
||||
_path = strdup(path);
|
||||
_path = os::strdup_check_oom(path);
|
||||
_st = *st;
|
||||
_meta_index = NULL;
|
||||
_resolved_entry = NULL;
|
||||
_has_error = false;
|
||||
}
|
||||
|
||||
LazyClassPathEntry::~LazyClassPathEntry() {
|
||||
os::free(_path);
|
||||
}
|
||||
|
||||
bool LazyClassPathEntry::is_jar_file() {
|
||||
return ((_st.st_mode & S_IFREG) == S_IFREG);
|
||||
}
|
||||
@ -416,7 +420,7 @@ void ClassLoader::setup_meta_index() {
|
||||
default:
|
||||
{
|
||||
if (!skipCurrentJar && cur_entry != NULL) {
|
||||
char* new_name = strdup(package_name);
|
||||
char* new_name = os::strdup_check_oom(package_name);
|
||||
boot_class_path_packages.append(new_name);
|
||||
}
|
||||
}
|
||||
@ -438,7 +442,7 @@ void ClassLoader::setup_meta_index() {
|
||||
|
||||
void ClassLoader::setup_bootstrap_search_path() {
|
||||
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
|
||||
char* sys_class_path = os::strdup(Arguments::get_sysclasspath());
|
||||
char* sys_class_path = os::strdup_check_oom(Arguments::get_sysclasspath());
|
||||
if (TraceClassLoading && Verbose) {
|
||||
tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
|
||||
}
|
||||
@ -460,6 +464,7 @@ void ClassLoader::setup_bootstrap_search_path() {
|
||||
end++;
|
||||
}
|
||||
}
|
||||
os::free(sys_class_path);
|
||||
}
|
||||
|
||||
ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
|
||||
|
@ -128,6 +128,8 @@ class LazyClassPathEntry: public ClassPathEntry {
|
||||
bool is_jar_file();
|
||||
const char* name() { return _path; }
|
||||
LazyClassPathEntry(char* path, const struct stat* st);
|
||||
virtual ~LazyClassPathEntry();
|
||||
|
||||
ClassFileStream* open_stream(const char* name, TRAPS);
|
||||
void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
|
||||
virtual bool is_lazy();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,21 +54,6 @@ StackMapFrame* StackMapFrame::frame_in_exception_handler(u1 flags) {
|
||||
return frame;
|
||||
}
|
||||
|
||||
bool StackMapFrame::has_new_object() const {
|
||||
int32_t i;
|
||||
for (i = 0; i < _max_locals; i++) {
|
||||
if (_locals[i].is_uninitialized()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < _stack_size; i++) {
|
||||
if (_stack[i].is_uninitialized()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void StackMapFrame::initialize_object(
|
||||
VerificationType old_object, VerificationType new_object) {
|
||||
int32_t i;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -154,10 +154,6 @@ class StackMapFrame : public ResourceObj {
|
||||
VerificationType set_locals_from_arg(
|
||||
const methodHandle m, VerificationType thisKlass, TRAPS);
|
||||
|
||||
// Search local variable type array and stack type array.
|
||||
// Return true if an uninitialized object is found.
|
||||
bool has_new_object() const;
|
||||
|
||||
// Search local variable type array and stack type array.
|
||||
// Set every element with type of old_object to new_object.
|
||||
void initialize_object(
|
||||
|
@ -70,24 +70,26 @@ int StackMapTable::get_index_from_offset(int32_t offset) const {
|
||||
|
||||
bool StackMapTable::match_stackmap(
|
||||
StackMapFrame* frame, int32_t target,
|
||||
bool match, bool update, ErrorContext* ctx, TRAPS) const {
|
||||
bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const {
|
||||
int index = get_index_from_offset(target);
|
||||
return match_stackmap(frame, target, index, match, update, ctx, THREAD);
|
||||
return match_stackmap(frame, target, index, match, update, handler, ctx, THREAD);
|
||||
}
|
||||
|
||||
// Match and/or update current_frame to the frame in stackmap table with
|
||||
// specified offset and frame index. Return true if the two frames match.
|
||||
// handler is true if the frame in stackmap_table is for an exception handler.
|
||||
//
|
||||
// The values of match and update are: _match__update_
|
||||
// The values of match and update are: _match__update__handler
|
||||
//
|
||||
// checking a branch target/exception handler: true false
|
||||
// checking a branch target: true false false
|
||||
// checking an exception handler: true false true
|
||||
// linear bytecode verification following an
|
||||
// unconditional branch: false true
|
||||
// unconditional branch: false true false
|
||||
// linear bytecode verification not following an
|
||||
// unconditional branch: true true
|
||||
// unconditional branch: true true false
|
||||
bool StackMapTable::match_stackmap(
|
||||
StackMapFrame* frame, int32_t target, int32_t frame_index,
|
||||
bool match, bool update, ErrorContext* ctx, TRAPS) const {
|
||||
bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const {
|
||||
if (frame_index < 0 || frame_index >= _frame_count) {
|
||||
*ctx = ErrorContext::missing_stackmap(frame->offset());
|
||||
frame->verifier()->verify_error(
|
||||
@ -98,11 +100,9 @@ bool StackMapTable::match_stackmap(
|
||||
StackMapFrame *stackmap_frame = _frame_array[frame_index];
|
||||
bool result = true;
|
||||
if (match) {
|
||||
// when checking handler target, match == true && update == false
|
||||
bool is_exception_handler = !update;
|
||||
// Has direct control flow from last instruction, need to match the two
|
||||
// frames.
|
||||
result = frame->is_assignable_to(stackmap_frame, is_exception_handler,
|
||||
result = frame->is_assignable_to(stackmap_frame, handler,
|
||||
ctx, CHECK_VERIFY_(frame->verifier(), result));
|
||||
}
|
||||
if (update) {
|
||||
@ -126,24 +126,10 @@ void StackMapTable::check_jump_target(
|
||||
StackMapFrame* frame, int32_t target, TRAPS) const {
|
||||
ErrorContext ctx;
|
||||
bool match = match_stackmap(
|
||||
frame, target, true, false, &ctx, CHECK_VERIFY(frame->verifier()));
|
||||
frame, target, true, false, false, &ctx, CHECK_VERIFY(frame->verifier()));
|
||||
if (!match || (target < 0 || target >= _code_length)) {
|
||||
frame->verifier()->verify_error(ctx,
|
||||
"Inconsistent stackmap frames at branch target %d", target);
|
||||
return;
|
||||
}
|
||||
// check if uninitialized objects exist on backward branches
|
||||
check_new_object(frame, target, CHECK_VERIFY(frame->verifier()));
|
||||
frame->verifier()->update_furthest_jump(target);
|
||||
}
|
||||
|
||||
void StackMapTable::check_new_object(
|
||||
const StackMapFrame* frame, int32_t target, TRAPS) const {
|
||||
if (frame->offset() > target && frame->has_new_object()) {
|
||||
frame->verifier()->verify_error(
|
||||
ErrorContext::bad_code(frame->offset()),
|
||||
"Uninitialized object exists on backward branch %d", target);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,12 +60,12 @@ class StackMapTable : public StackObj {
|
||||
// specified offset. Return true if the two frames match.
|
||||
bool match_stackmap(
|
||||
StackMapFrame* current_frame, int32_t offset,
|
||||
bool match, bool update, ErrorContext* ctx, TRAPS) const;
|
||||
bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const;
|
||||
// Match and/or update current_frame to the frame in stackmap table with
|
||||
// specified offset and frame index. Return true if the two frames match.
|
||||
bool match_stackmap(
|
||||
StackMapFrame* current_frame, int32_t offset, int32_t frame_index,
|
||||
bool match, bool update, ErrorContext* ctx, TRAPS) const;
|
||||
bool match, bool update, bool handler, ErrorContext* ctx, TRAPS) const;
|
||||
|
||||
// Check jump instructions. Make sure there are no uninitialized
|
||||
// instances on backward branch.
|
||||
@ -76,10 +76,6 @@ class StackMapTable : public StackObj {
|
||||
// Returns the frame array index where the frame with offset is stored.
|
||||
int get_index_from_offset(int32_t offset) const;
|
||||
|
||||
// Make sure that there's no uninitialized object exist on backward branch.
|
||||
void check_new_object(
|
||||
const StackMapFrame* frame, int32_t target, TRAPS) const;
|
||||
|
||||
void print_on(outputStream* str) const;
|
||||
};
|
||||
|
||||
|
@ -70,9 +70,9 @@ Symbol* SymbolTable::allocate_symbol(const u1* name, int len, bool c_heap, TRAPS
|
||||
void SymbolTable::initialize_symbols(int arena_alloc_size) {
|
||||
// Initialize the arena for global symbols, size passed in depends on CDS.
|
||||
if (arena_alloc_size == 0) {
|
||||
_arena = new (mtSymbol) Arena();
|
||||
_arena = new (mtSymbol) Arena(mtSymbol);
|
||||
} else {
|
||||
_arena = new (mtSymbol) Arena(arena_alloc_size);
|
||||
_arena = new (mtSymbol) Arena(mtSymbol, arena_alloc_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -620,8 +620,6 @@ void ClassVerifier::verify_method(methodHandle m, TRAPS) {
|
||||
// flow from current instruction to the next
|
||||
// instruction in sequence
|
||||
|
||||
set_furthest_jump(0);
|
||||
|
||||
Bytecodes::Code opcode;
|
||||
while (!bcs.is_last_bytecode()) {
|
||||
// Check for recursive re-verification before each bytecode.
|
||||
@ -1780,7 +1778,7 @@ u2 ClassVerifier::verify_stackmap_table(u2 stackmap_index, u2 bci,
|
||||
// If matched, current_frame will be updated by this method.
|
||||
bool matches = stackmap_table->match_stackmap(
|
||||
current_frame, this_offset, stackmap_index,
|
||||
!no_control_flow, true, &ctx, CHECK_VERIFY_(this, 0));
|
||||
!no_control_flow, true, false, &ctx, CHECK_VERIFY_(this, 0));
|
||||
if (!matches) {
|
||||
// report type error
|
||||
verify_error(ctx, "Instruction type does not match stack map");
|
||||
@ -1827,7 +1825,7 @@ void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit, S
|
||||
}
|
||||
ErrorContext ctx;
|
||||
bool matches = stackmap_table->match_stackmap(
|
||||
new_frame, handler_pc, true, false, &ctx, CHECK_VERIFY(this));
|
||||
new_frame, handler_pc, true, false, true, &ctx, CHECK_VERIFY(this));
|
||||
if (!matches) {
|
||||
verify_error(ctx, "Stack map does not match the one at "
|
||||
"exception handler %d", handler_pc);
|
||||
@ -2219,6 +2217,181 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
|
||||
}
|
||||
}
|
||||
|
||||
// Look at the method's handlers. If the bci is in the handler's try block
|
||||
// then check if the handler_pc is already on the stack. If not, push it.
|
||||
void ClassVerifier::push_handlers(ExceptionTable* exhandlers,
|
||||
GrowableArray<u4>* handler_stack,
|
||||
u4 bci) {
|
||||
int exlength = exhandlers->length();
|
||||
for(int x = 0; x < exlength; x++) {
|
||||
if (bci >= exhandlers->start_pc(x) && bci < exhandlers->end_pc(x)) {
|
||||
handler_stack->append_if_missing(exhandlers->handler_pc(x));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return TRUE if all code paths starting with start_bc_offset end in
|
||||
// bytecode athrow or loop.
|
||||
bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
|
||||
ResourceMark rm;
|
||||
// Create bytecode stream.
|
||||
RawBytecodeStream bcs(method());
|
||||
u4 code_length = method()->code_size();
|
||||
bcs.set_start(start_bc_offset);
|
||||
u4 target;
|
||||
// Create stack for storing bytecode start offsets for if* and *switch.
|
||||
GrowableArray<u4>* bci_stack = new GrowableArray<u4>(30);
|
||||
// Create stack for handlers for try blocks containing this handler.
|
||||
GrowableArray<u4>* handler_stack = new GrowableArray<u4>(30);
|
||||
// Create list of visited branch opcodes (goto* and if*).
|
||||
GrowableArray<u4>* visited_branches = new GrowableArray<u4>(30);
|
||||
ExceptionTable exhandlers(_method());
|
||||
|
||||
while (true) {
|
||||
if (bcs.is_last_bytecode()) {
|
||||
// if no more starting offsets to parse or if at the end of the
|
||||
// method then return false.
|
||||
if ((bci_stack->is_empty()) || ((u4)bcs.end_bci() == code_length))
|
||||
return false;
|
||||
// Pop a bytecode starting offset and scan from there.
|
||||
bcs.set_start(bci_stack->pop());
|
||||
}
|
||||
Bytecodes::Code opcode = bcs.raw_next();
|
||||
u4 bci = bcs.bci();
|
||||
|
||||
// If the bytecode is in a TRY block, push its handlers so they
|
||||
// will get parsed.
|
||||
push_handlers(&exhandlers, handler_stack, bci);
|
||||
|
||||
switch (opcode) {
|
||||
case Bytecodes::_if_icmpeq:
|
||||
case Bytecodes::_if_icmpne:
|
||||
case Bytecodes::_if_icmplt:
|
||||
case Bytecodes::_if_icmpge:
|
||||
case Bytecodes::_if_icmpgt:
|
||||
case Bytecodes::_if_icmple:
|
||||
case Bytecodes::_ifeq:
|
||||
case Bytecodes::_ifne:
|
||||
case Bytecodes::_iflt:
|
||||
case Bytecodes::_ifge:
|
||||
case Bytecodes::_ifgt:
|
||||
case Bytecodes::_ifle:
|
||||
case Bytecodes::_if_acmpeq:
|
||||
case Bytecodes::_if_acmpne:
|
||||
case Bytecodes::_ifnull:
|
||||
case Bytecodes::_ifnonnull:
|
||||
target = bcs.dest();
|
||||
if (visited_branches->contains(bci)) {
|
||||
if (bci_stack->is_empty()) return true;
|
||||
// Pop a bytecode starting offset and scan from there.
|
||||
bcs.set_start(bci_stack->pop());
|
||||
} else {
|
||||
if (target > bci) { // forward branch
|
||||
if (target >= code_length) return false;
|
||||
// Push the branch target onto the stack.
|
||||
bci_stack->push(target);
|
||||
// then, scan bytecodes starting with next.
|
||||
bcs.set_start(bcs.next_bci());
|
||||
} else { // backward branch
|
||||
// Push bytecode offset following backward branch onto the stack.
|
||||
bci_stack->push(bcs.next_bci());
|
||||
// Check bytecodes starting with branch target.
|
||||
bcs.set_start(target);
|
||||
}
|
||||
// Record target so we don't branch here again.
|
||||
visited_branches->append(bci);
|
||||
}
|
||||
break;
|
||||
|
||||
case Bytecodes::_goto:
|
||||
case Bytecodes::_goto_w:
|
||||
target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w());
|
||||
if (visited_branches->contains(bci)) {
|
||||
if (bci_stack->is_empty()) return true;
|
||||
// Been here before, pop new starting offset from stack.
|
||||
bcs.set_start(bci_stack->pop());
|
||||
} else {
|
||||
if (target >= code_length) return false;
|
||||
// Continue scanning from the target onward.
|
||||
bcs.set_start(target);
|
||||
// Record target so we don't branch here again.
|
||||
visited_branches->append(bci);
|
||||
}
|
||||
break;
|
||||
|
||||
// Check that all switch alternatives end in 'athrow' bytecodes. Since it
|
||||
// is difficult to determine where each switch alternative ends, parse
|
||||
// each switch alternative until either hit a 'return', 'athrow', or reach
|
||||
// the end of the method's bytecodes. This is gross but should be okay
|
||||
// because:
|
||||
// 1. tableswitch and lookupswitch byte codes in handlers for ctor explicit
|
||||
// constructor invocations should be rare.
|
||||
// 2. if each switch alternative ends in an athrow then the parsing should be
|
||||
// short. If there is no athrow then it is bogus code, anyway.
|
||||
case Bytecodes::_lookupswitch:
|
||||
case Bytecodes::_tableswitch:
|
||||
{
|
||||
address aligned_bcp = (address) round_to((intptr_t)(bcs.bcp() + 1), jintSize);
|
||||
u4 default_offset = Bytes::get_Java_u4(aligned_bcp) + bci;
|
||||
int keys, delta;
|
||||
if (opcode == Bytecodes::_tableswitch) {
|
||||
jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
|
||||
jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
|
||||
// This is invalid, but let the regular bytecode verifier
|
||||
// report this because the user will get a better error message.
|
||||
if (low > high) return true;
|
||||
keys = high - low + 1;
|
||||
delta = 1;
|
||||
} else {
|
||||
keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
|
||||
delta = 2;
|
||||
}
|
||||
// Invalid, let the regular bytecode verifier deal with it.
|
||||
if (keys < 0) return true;
|
||||
|
||||
// Push the offset of the next bytecode onto the stack.
|
||||
bci_stack->push(bcs.next_bci());
|
||||
|
||||
// Push the switch alternatives onto the stack.
|
||||
for (int i = 0; i < keys; i++) {
|
||||
u4 target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
|
||||
if (target > code_length) return false;
|
||||
bci_stack->push(target);
|
||||
}
|
||||
|
||||
// Start bytecode parsing for the switch at the default alternative.
|
||||
if (default_offset > code_length) return false;
|
||||
bcs.set_start(default_offset);
|
||||
break;
|
||||
}
|
||||
|
||||
case Bytecodes::_return:
|
||||
return false;
|
||||
|
||||
case Bytecodes::_athrow:
|
||||
{
|
||||
if (bci_stack->is_empty()) {
|
||||
if (handler_stack->is_empty()) {
|
||||
return true;
|
||||
} else {
|
||||
// Parse the catch handlers for try blocks containing athrow.
|
||||
bcs.set_start(handler_stack->pop());
|
||||
}
|
||||
} else {
|
||||
// Pop a bytecode offset and starting scanning from there.
|
||||
bcs.set_start(bci_stack->pop());
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
;
|
||||
} // end switch
|
||||
} // end while loop
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void ClassVerifier::verify_invoke_init(
|
||||
RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type,
|
||||
StackMapFrame* current_frame, u4 code_length, bool *this_uninit,
|
||||
@ -2238,25 +2411,26 @@ void ClassVerifier::verify_invoke_init(
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure that this call is not jumped over.
|
||||
if (bci < furthest_jump()) {
|
||||
verify_error(ErrorContext::bad_code(bci),
|
||||
"Bad <init> method call from inside of a branch");
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure that this call is not done from within a TRY block because
|
||||
// that can result in returning an incomplete object. Simply checking
|
||||
// (bci >= start_pc) also ensures that this call is not done after a TRY
|
||||
// block. That is also illegal because this call must be the first Java
|
||||
// statement in the constructor.
|
||||
// Check if this call is done from inside of a TRY block. If so, make
|
||||
// sure that all catch clause paths end in a throw. Otherwise, this
|
||||
// can result in returning an incomplete object.
|
||||
ExceptionTable exhandlers(_method());
|
||||
int exlength = exhandlers.length();
|
||||
for(int i = 0; i < exlength; i++) {
|
||||
if (bci >= exhandlers.start_pc(i)) {
|
||||
verify_error(ErrorContext::bad_code(bci),
|
||||
"Bad <init> method call from after the start of a try block");
|
||||
return;
|
||||
u2 start_pc = exhandlers.start_pc(i);
|
||||
u2 end_pc = exhandlers.end_pc(i);
|
||||
|
||||
if (bci >= start_pc && bci < end_pc) {
|
||||
if (!ends_in_athrow(exhandlers.handler_pc(i))) {
|
||||
verify_error(ErrorContext::bad_code(bci),
|
||||
"Bad <init> method call from after the start of a try block");
|
||||
return;
|
||||
} else if (VerboseVerification) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr(
|
||||
"Survived call to ends_in_athrow(): %s",
|
||||
current_class()->name()->as_C_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
|
||||
// The verifier class
|
||||
@ -258,9 +259,6 @@ class ClassVerifier : public StackObj {
|
||||
|
||||
ErrorContext _error_context; // contains information about an error
|
||||
|
||||
// Used to detect illegal jumps over calls to super() nd this() in ctors.
|
||||
int32_t _furthest_jump;
|
||||
|
||||
void verify_method(methodHandle method, TRAPS);
|
||||
char* generate_code_data(methodHandle m, u4 code_length, TRAPS);
|
||||
void verify_exception_handler_table(u4 code_length, char* code_data,
|
||||
@ -306,6 +304,16 @@ class ClassVerifier : public StackObj {
|
||||
StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
|
||||
constantPoolHandle cp, TRAPS);
|
||||
|
||||
// Used by ends_in_athrow() to push all handlers that contain bci onto
|
||||
// the handler_stack, if the handler is not already on the stack.
|
||||
void push_handlers(ExceptionTable* exhandlers,
|
||||
GrowableArray<u4>* handler_stack,
|
||||
u4 bci);
|
||||
|
||||
// Returns true if all paths starting with start_bc_offset end in athrow
|
||||
// bytecode or loop.
|
||||
bool ends_in_athrow(u4 start_bc_offset);
|
||||
|
||||
void verify_invoke_instructions(
|
||||
RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
|
||||
bool* this_uninit, VerificationType return_type,
|
||||
@ -407,19 +415,6 @@ class ClassVerifier : public StackObj {
|
||||
|
||||
TypeOrigin ref_ctx(const char* str, TRAPS);
|
||||
|
||||
// Keep track of the furthest branch done in a method to make sure that
|
||||
// there are no branches over calls to super() or this() from inside of
|
||||
// a constructor.
|
||||
int32_t furthest_jump() { return _furthest_jump; }
|
||||
|
||||
void set_furthest_jump(int32_t target) {
|
||||
_furthest_jump = target;
|
||||
}
|
||||
|
||||
void update_furthest_jump(int32_t target) {
|
||||
if (target > _furthest_jump) _furthest_jump = target;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
inline int ClassVerifier::change_sig_to_verificationType(
|
||||
|
@ -407,56 +407,66 @@ void Dependencies::check_valid_dependency_type(DepType dept) {
|
||||
// for the sake of the compiler log, print out current dependencies:
|
||||
void Dependencies::log_all_dependencies() {
|
||||
if (log() == NULL) return;
|
||||
ciBaseObject* args[max_arg_count];
|
||||
ResourceMark rm;
|
||||
for (int deptv = (int)FIRST_TYPE; deptv < (int)TYPE_LIMIT; deptv++) {
|
||||
DepType dept = (DepType)deptv;
|
||||
GrowableArray<ciBaseObject*>* deps = _deps[dept];
|
||||
if (deps->length() == 0) continue;
|
||||
int deplen = deps->length();
|
||||
if (deplen == 0) {
|
||||
continue;
|
||||
}
|
||||
int stride = dep_args(dept);
|
||||
GrowableArray<ciBaseObject*>* ciargs = new GrowableArray<ciBaseObject*>(stride);
|
||||
for (int i = 0; i < deps->length(); i += stride) {
|
||||
for (int j = 0; j < stride; j++) {
|
||||
// flush out the identities before printing
|
||||
args[j] = deps->at(i+j);
|
||||
ciargs->push(deps->at(i+j));
|
||||
}
|
||||
write_dependency_to(log(), dept, stride, args);
|
||||
write_dependency_to(log(), dept, ciargs);
|
||||
ciargs->clear();
|
||||
}
|
||||
guarantee(deplen == deps->length(), "deps array cannot grow inside nested ResoureMark scope");
|
||||
}
|
||||
}
|
||||
|
||||
void Dependencies::write_dependency_to(CompileLog* log,
|
||||
DepType dept,
|
||||
int nargs, DepArgument args[],
|
||||
GrowableArray<DepArgument>* args,
|
||||
Klass* witness) {
|
||||
if (log == NULL) {
|
||||
return;
|
||||
}
|
||||
ResourceMark rm;
|
||||
ciEnv* env = ciEnv::current();
|
||||
ciBaseObject* ciargs[max_arg_count];
|
||||
assert(nargs <= max_arg_count, "oob");
|
||||
for (int j = 0; j < nargs; j++) {
|
||||
if (args[j].is_oop()) {
|
||||
ciargs[j] = env->get_object(args[j].oop_value());
|
||||
GrowableArray<ciBaseObject*>* ciargs = new GrowableArray<ciBaseObject*>(args->length());
|
||||
for (GrowableArrayIterator<DepArgument> it = args->begin(); it != args->end(); ++it) {
|
||||
DepArgument arg = *it;
|
||||
if (arg.is_oop()) {
|
||||
ciargs->push(env->get_object(arg.oop_value()));
|
||||
} else {
|
||||
ciargs[j] = env->get_metadata(args[j].metadata_value());
|
||||
ciargs->push(env->get_metadata(arg.metadata_value()));
|
||||
}
|
||||
}
|
||||
Dependencies::write_dependency_to(log, dept, nargs, ciargs, witness);
|
||||
int argslen = ciargs->length();
|
||||
Dependencies::write_dependency_to(log, dept, ciargs, witness);
|
||||
guarantee(argslen == ciargs->length(), "ciargs array cannot grow inside nested ResoureMark scope");
|
||||
}
|
||||
|
||||
void Dependencies::write_dependency_to(CompileLog* log,
|
||||
DepType dept,
|
||||
int nargs, ciBaseObject* args[],
|
||||
GrowableArray<ciBaseObject*>* args,
|
||||
Klass* witness) {
|
||||
if (log == NULL) return;
|
||||
assert(nargs <= max_arg_count, "oob");
|
||||
int argids[max_arg_count];
|
||||
int ctxkj = dep_context_arg(dept); // -1 if no context arg
|
||||
int j;
|
||||
for (j = 0; j < nargs; j++) {
|
||||
if (args[j]->is_object()) {
|
||||
argids[j] = log->identify(args[j]->as_object());
|
||||
if (log == NULL) {
|
||||
return;
|
||||
}
|
||||
ResourceMark rm;
|
||||
GrowableArray<int>* argids = new GrowableArray<int>(args->length());
|
||||
for (GrowableArrayIterator<ciBaseObject*> it = args->begin(); it != args->end(); ++it) {
|
||||
ciBaseObject* obj = *it;
|
||||
if (obj->is_object()) {
|
||||
argids->push(log->identify(obj->as_object()));
|
||||
} else {
|
||||
argids[j] = log->identify(args[j]->as_metadata());
|
||||
argids->push(log->identify(obj->as_metadata()));
|
||||
}
|
||||
}
|
||||
if (witness != NULL) {
|
||||
@ -465,16 +475,17 @@ void Dependencies::write_dependency_to(CompileLog* log,
|
||||
log->begin_elem("dependency");
|
||||
}
|
||||
log->print(" type='%s'", dep_name(dept));
|
||||
if (ctxkj >= 0) {
|
||||
log->print(" ctxk='%d'", argids[ctxkj]);
|
||||
const int ctxkj = dep_context_arg(dept); // -1 if no context arg
|
||||
if (ctxkj >= 0 && ctxkj < argids->length()) {
|
||||
log->print(" ctxk='%d'", argids->at(ctxkj));
|
||||
}
|
||||
// write remaining arguments, if any.
|
||||
for (j = 0; j < nargs; j++) {
|
||||
for (int j = 0; j < argids->length(); j++) {
|
||||
if (j == ctxkj) continue; // already logged
|
||||
if (j == 1) {
|
||||
log->print( " x='%d'", argids[j]);
|
||||
log->print( " x='%d'", argids->at(j));
|
||||
} else {
|
||||
log->print(" x%d='%d'", j, argids[j]);
|
||||
log->print(" x%d='%d'", j, argids->at(j));
|
||||
}
|
||||
}
|
||||
if (witness != NULL) {
|
||||
@ -486,9 +497,12 @@ void Dependencies::write_dependency_to(CompileLog* log,
|
||||
|
||||
void Dependencies::write_dependency_to(xmlStream* xtty,
|
||||
DepType dept,
|
||||
int nargs, DepArgument args[],
|
||||
GrowableArray<DepArgument>* args,
|
||||
Klass* witness) {
|
||||
if (xtty == NULL) return;
|
||||
if (xtty == NULL) {
|
||||
return;
|
||||
}
|
||||
ResourceMark rm;
|
||||
ttyLocker ttyl;
|
||||
int ctxkj = dep_context_arg(dept); // -1 if no context arg
|
||||
if (witness != NULL) {
|
||||
@ -498,23 +512,24 @@ void Dependencies::write_dependency_to(xmlStream* xtty,
|
||||
}
|
||||
xtty->print(" type='%s'", dep_name(dept));
|
||||
if (ctxkj >= 0) {
|
||||
xtty->object("ctxk", args[ctxkj].metadata_value());
|
||||
xtty->object("ctxk", args->at(ctxkj).metadata_value());
|
||||
}
|
||||
// write remaining arguments, if any.
|
||||
for (int j = 0; j < nargs; j++) {
|
||||
for (int j = 0; j < args->length(); j++) {
|
||||
if (j == ctxkj) continue; // already logged
|
||||
DepArgument arg = args->at(j);
|
||||
if (j == 1) {
|
||||
if (args[j].is_oop()) {
|
||||
xtty->object("x", args[j].oop_value());
|
||||
if (arg.is_oop()) {
|
||||
xtty->object("x", arg.oop_value());
|
||||
} else {
|
||||
xtty->object("x", args[j].metadata_value());
|
||||
xtty->object("x", arg.metadata_value());
|
||||
}
|
||||
} else {
|
||||
char xn[10]; sprintf(xn, "x%d", j);
|
||||
if (args[j].is_oop()) {
|
||||
xtty->object(xn, args[j].oop_value());
|
||||
if (arg.is_oop()) {
|
||||
xtty->object(xn, arg.oop_value());
|
||||
} else {
|
||||
xtty->object(xn, args[j].metadata_value());
|
||||
xtty->object(xn, arg.metadata_value());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -525,7 +540,7 @@ void Dependencies::write_dependency_to(xmlStream* xtty,
|
||||
xtty->end_elem();
|
||||
}
|
||||
|
||||
void Dependencies::print_dependency(DepType dept, int nargs, DepArgument args[],
|
||||
void Dependencies::print_dependency(DepType dept, GrowableArray<DepArgument>* args,
|
||||
Klass* witness) {
|
||||
ResourceMark rm;
|
||||
ttyLocker ttyl; // keep the following output all in one block
|
||||
@ -534,8 +549,8 @@ void Dependencies::print_dependency(DepType dept, int nargs, DepArgument args[],
|
||||
dep_name(dept));
|
||||
// print arguments
|
||||
int ctxkj = dep_context_arg(dept); // -1 if no context arg
|
||||
for (int j = 0; j < nargs; j++) {
|
||||
DepArgument arg = args[j];
|
||||
for (int j = 0; j < args->length(); j++) {
|
||||
DepArgument arg = args->at(j);
|
||||
bool put_star = false;
|
||||
if (arg.is_null()) continue;
|
||||
const char* what;
|
||||
@ -571,31 +586,33 @@ void Dependencies::print_dependency(DepType dept, int nargs, DepArgument args[],
|
||||
void Dependencies::DepStream::log_dependency(Klass* witness) {
|
||||
if (_deps == NULL && xtty == NULL) return; // fast cutout for runtime
|
||||
ResourceMark rm;
|
||||
int nargs = argument_count();
|
||||
DepArgument args[max_arg_count];
|
||||
const int nargs = argument_count();
|
||||
GrowableArray<DepArgument>* args = new GrowableArray<DepArgument>(nargs);
|
||||
for (int j = 0; j < nargs; j++) {
|
||||
if (type() == call_site_target_value) {
|
||||
args[j] = argument_oop(j);
|
||||
args->push(argument_oop(j));
|
||||
} else {
|
||||
args[j] = argument(j);
|
||||
args->push(argument(j));
|
||||
}
|
||||
}
|
||||
int argslen = args->length();
|
||||
if (_deps != NULL && _deps->log() != NULL) {
|
||||
Dependencies::write_dependency_to(_deps->log(),
|
||||
type(), nargs, args, witness);
|
||||
Dependencies::write_dependency_to(_deps->log(), type(), args, witness);
|
||||
} else {
|
||||
Dependencies::write_dependency_to(xtty,
|
||||
type(), nargs, args, witness);
|
||||
Dependencies::write_dependency_to(xtty, type(), args, witness);
|
||||
}
|
||||
guarantee(argslen == args->length(), "args array cannot grow inside nested ResoureMark scope");
|
||||
}
|
||||
|
||||
void Dependencies::DepStream::print_dependency(Klass* witness, bool verbose) {
|
||||
ResourceMark rm;
|
||||
int nargs = argument_count();
|
||||
DepArgument args[max_arg_count];
|
||||
GrowableArray<DepArgument>* args = new GrowableArray<DepArgument>(nargs);
|
||||
for (int j = 0; j < nargs; j++) {
|
||||
args[j] = argument(j);
|
||||
args->push(argument(j));
|
||||
}
|
||||
Dependencies::print_dependency(type(), nargs, args, witness);
|
||||
int argslen = args->length();
|
||||
Dependencies::print_dependency(type(), args, witness);
|
||||
if (verbose) {
|
||||
if (_code != NULL) {
|
||||
tty->print(" code: ");
|
||||
@ -603,6 +620,7 @@ void Dependencies::DepStream::print_dependency(Klass* witness, bool verbose) {
|
||||
tty->cr();
|
||||
}
|
||||
}
|
||||
guarantee(argslen == args->length(), "args array cannot grow inside nested ResoureMark scope");
|
||||
}
|
||||
|
||||
|
||||
|
@ -369,20 +369,36 @@ class Dependencies: public ResourceObj {
|
||||
void copy_to(nmethod* nm);
|
||||
|
||||
void log_all_dependencies();
|
||||
void log_dependency(DepType dept, int nargs, ciBaseObject* args[]) {
|
||||
write_dependency_to(log(), dept, nargs, args);
|
||||
|
||||
void log_dependency(DepType dept, GrowableArray<ciBaseObject*>* args) {
|
||||
ResourceMark rm;
|
||||
int argslen = args->length();
|
||||
write_dependency_to(log(), dept, args);
|
||||
guarantee(argslen == args->length(),
|
||||
"args array cannot grow inside nested ResoureMark scope");
|
||||
}
|
||||
|
||||
void log_dependency(DepType dept,
|
||||
ciBaseObject* x0,
|
||||
ciBaseObject* x1 = NULL,
|
||||
ciBaseObject* x2 = NULL) {
|
||||
if (log() == NULL) return;
|
||||
ciBaseObject* args[max_arg_count];
|
||||
args[0] = x0;
|
||||
args[1] = x1;
|
||||
args[2] = x2;
|
||||
assert(2 < max_arg_count, "");
|
||||
log_dependency(dept, dep_args(dept), args);
|
||||
if (log() == NULL) {
|
||||
return;
|
||||
}
|
||||
ResourceMark rm;
|
||||
GrowableArray<ciBaseObject*>* ciargs =
|
||||
new GrowableArray<ciBaseObject*>(dep_args(dept));
|
||||
assert (x0 != NULL, "no log x0");
|
||||
ciargs->push(x0);
|
||||
|
||||
if (x1 != NULL) {
|
||||
ciargs->push(x1);
|
||||
}
|
||||
if (x2 != NULL) {
|
||||
ciargs->push(x2);
|
||||
}
|
||||
assert(ciargs->length() == dep_args(dept), "");
|
||||
log_dependency(dept, ciargs);
|
||||
}
|
||||
|
||||
class DepArgument : public ResourceObj {
|
||||
@ -405,20 +421,8 @@ class Dependencies: public ResourceObj {
|
||||
Metadata* metadata_value() const { assert(!_is_oop && _valid, "must be"); return (Metadata*) _value; }
|
||||
};
|
||||
|
||||
static void write_dependency_to(CompileLog* log,
|
||||
DepType dept,
|
||||
int nargs, ciBaseObject* args[],
|
||||
Klass* witness = NULL);
|
||||
static void write_dependency_to(CompileLog* log,
|
||||
DepType dept,
|
||||
int nargs, DepArgument args[],
|
||||
Klass* witness = NULL);
|
||||
static void write_dependency_to(xmlStream* xtty,
|
||||
DepType dept,
|
||||
int nargs, DepArgument args[],
|
||||
Klass* witness = NULL);
|
||||
static void print_dependency(DepType dept,
|
||||
int nargs, DepArgument args[],
|
||||
GrowableArray<DepArgument>* args,
|
||||
Klass* witness = NULL);
|
||||
|
||||
private:
|
||||
@ -427,6 +431,18 @@ class Dependencies: public ResourceObj {
|
||||
|
||||
static Klass* ctxk_encoded_as_null(DepType dept, Metadata* x);
|
||||
|
||||
static void write_dependency_to(CompileLog* log,
|
||||
DepType dept,
|
||||
GrowableArray<ciBaseObject*>* args,
|
||||
Klass* witness = NULL);
|
||||
static void write_dependency_to(CompileLog* log,
|
||||
DepType dept,
|
||||
GrowableArray<DepArgument>* args,
|
||||
Klass* witness = NULL);
|
||||
static void write_dependency_to(xmlStream* xtty,
|
||||
DepType dept,
|
||||
GrowableArray<DepArgument>* args,
|
||||
Klass* witness = NULL);
|
||||
public:
|
||||
// Use this to iterate over an nmethod's dependency set.
|
||||
// Works on new and old dependency sets.
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "oops/symbol.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
class MethodMatcher : public CHeapObj<mtCompiler> {
|
||||
public:
|
||||
@ -175,7 +176,11 @@ class MethodOptionMatcher: public MethodMatcher {
|
||||
Symbol* method_name, Mode method_mode,
|
||||
Symbol* signature, const char * opt, MethodMatcher* next):
|
||||
MethodMatcher(class_name, class_mode, method_name, method_mode, signature, next) {
|
||||
option = opt;
|
||||
option = os::strdup_check_oom(opt);
|
||||
}
|
||||
|
||||
virtual ~MethodOptionMatcher() {
|
||||
os::free((void*)option);
|
||||
}
|
||||
|
||||
bool match(methodHandle method, const char* opt) {
|
||||
@ -498,7 +503,7 @@ void CompilerOracle::parse_from_line(char* line) {
|
||||
tty->print("CompilerOracle: %s ", command_names[command]);
|
||||
match->print();
|
||||
}
|
||||
match = add_option_string(c_name, c_match, m_name, m_match, signature, strdup(option));
|
||||
match = add_option_string(c_name, c_match, m_name, m_match, signature, option);
|
||||
line += bytes_read;
|
||||
}
|
||||
} else {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,7 +52,8 @@ void ConcurrentMarkSweepPolicy::initialize_alignments() {
|
||||
}
|
||||
|
||||
void ConcurrentMarkSweepPolicy::initialize_generations() {
|
||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
||||
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
|
||||
CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||
if (_generations == NULL)
|
||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||
|
||||
|
@ -5987,6 +5987,8 @@ public:
|
||||
};
|
||||
|
||||
void CMSRefProcTaskProxy::work(uint worker_id) {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
assert(_collector->_span.equals(_span), "Inconsistency in _span");
|
||||
CMSParKeepAliveClosure par_keep_alive(_collector, _span,
|
||||
_mark_bit_map,
|
||||
|
@ -2167,7 +2167,9 @@ void ConcurrentMark::cleanup() {
|
||||
g1h->increment_total_collections();
|
||||
|
||||
// Clean out dead classes and update Metaspace sizes.
|
||||
ClassLoaderDataGraph::purge();
|
||||
if (ClassUnloadingWithConcurrentMark) {
|
||||
ClassLoaderDataGraph::purge();
|
||||
}
|
||||
MetaspaceGC::compute_new_size();
|
||||
|
||||
// We reclaimed old regions so we should calculate the sizes to make
|
||||
@ -2403,6 +2405,8 @@ public:
|
||||
}
|
||||
|
||||
virtual void work(uint worker_id) {
|
||||
ResourceMark rm;
|
||||
HandleMark hm;
|
||||
CMTask* task = _cm->task(worker_id);
|
||||
G1CMIsAliveClosure g1_is_alive(_g1h);
|
||||
G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
|
||||
@ -2595,24 +2599,27 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||
assert(_markStack.isEmpty(), "Marking should have completed");
|
||||
|
||||
// Unload Klasses, String, Symbols, Code Cache, etc.
|
||||
|
||||
G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
|
||||
|
||||
bool purged_classes;
|
||||
|
||||
{
|
||||
G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
|
||||
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
|
||||
}
|
||||
G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
|
||||
|
||||
{
|
||||
G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
|
||||
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
|
||||
}
|
||||
if (ClassUnloadingWithConcurrentMark) {
|
||||
bool purged_classes;
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
|
||||
G1StringDedup::unlink(&g1_is_alive);
|
||||
{
|
||||
G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
|
||||
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
|
||||
}
|
||||
|
||||
{
|
||||
G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
|
||||
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
|
||||
}
|
||||
}
|
||||
|
||||
if (G1StringDedup::is_enabled()) {
|
||||
G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
|
||||
G1StringDedup::unlink(&g1_is_alive);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1926,6 +1926,8 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
|
||||
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
|
||||
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
|
||||
_humongous_is_live(),
|
||||
_has_humongous_reclaim_candidates(false),
|
||||
_free_regions_coming(false),
|
||||
_young_list(new YoungList(this)),
|
||||
_gc_time_stamp(0),
|
||||
@ -2082,6 +2084,7 @@ jint G1CollectedHeap::initialize() {
|
||||
_g1h = this;
|
||||
|
||||
_in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
|
||||
_humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
|
||||
|
||||
// Create the ConcurrentMark data structure and thread.
|
||||
// (Must do this late, so that "max_regions" is defined.)
|
||||
@ -2177,6 +2180,11 @@ void G1CollectedHeap::stop() {
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::clear_humongous_is_live_table() {
|
||||
guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Should only be called if true");
|
||||
_humongous_is_live.clear();
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::conservative_max_heap_alignment() {
|
||||
return HeapRegion::max_region_size();
|
||||
}
|
||||
@ -2574,15 +2582,12 @@ bool G1CollectedHeap::is_in(const void* p) const {
|
||||
|
||||
// Iteration functions.
|
||||
|
||||
// Iterates an OopClosure over all ref-containing fields of objects
|
||||
// within a HeapRegion.
|
||||
// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
|
||||
|
||||
class IterateOopClosureRegionClosure: public HeapRegionClosure {
|
||||
MemRegion _mr;
|
||||
ExtendedOopClosure* _cl;
|
||||
public:
|
||||
IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
|
||||
: _mr(mr), _cl(cl) {}
|
||||
IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->continuesHumongous()) {
|
||||
r->oop_iterate(_cl);
|
||||
@ -2592,12 +2597,7 @@ public:
|
||||
};
|
||||
|
||||
void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
|
||||
IterateOopClosureRegionClosure blk(_g1_committed, cl);
|
||||
heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
|
||||
IterateOopClosureRegionClosure blk(mr, cl);
|
||||
IterateOopClosureRegionClosure blk(cl);
|
||||
heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
@ -3771,6 +3771,61 @@ size_t G1CollectedHeap::cards_scanned() {
|
||||
return g1_rem_set()->cardsScanned();
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
|
||||
HeapRegion* region = region_at(index);
|
||||
assert(region->startsHumongous(), "Must start a humongous object");
|
||||
return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
|
||||
}
|
||||
|
||||
class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
|
||||
private:
|
||||
size_t _total_humongous;
|
||||
size_t _candidate_humongous;
|
||||
public:
|
||||
RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
|
||||
}
|
||||
|
||||
virtual bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->startsHumongous()) {
|
||||
return false;
|
||||
}
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
uint region_idx = r->hrs_index();
|
||||
bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
|
||||
// Is_candidate already filters out humongous regions with some remembered set.
|
||||
// This will not lead to humongous object that we mistakenly keep alive because
|
||||
// during young collection the remembered sets will only be added to.
|
||||
if (is_candidate) {
|
||||
g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
|
||||
_candidate_humongous++;
|
||||
}
|
||||
_total_humongous++;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t total_humongous() const { return _total_humongous; }
|
||||
size_t candidate_humongous() const { return _candidate_humongous; }
|
||||
};
|
||||
|
||||
void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
|
||||
if (!G1ReclaimDeadHumongousObjectsAtYoungGC) {
|
||||
g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
RegisterHumongousWithInCSetFastTestClosure cl;
|
||||
heap_region_iterate(&cl);
|
||||
g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(),
|
||||
cl.candidate_humongous());
|
||||
_has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
|
||||
|
||||
if (_has_humongous_reclaim_candidates) {
|
||||
clear_humongous_is_live_table();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
G1CollectedHeap::setup_surviving_young_words() {
|
||||
assert(_surviving_young_words == NULL, "pre-condition");
|
||||
@ -4058,6 +4113,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
|
||||
g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
|
||||
|
||||
register_humongous_regions_with_in_cset_fast_test();
|
||||
|
||||
_cm->note_start_of_gc();
|
||||
// We should not verify the per-thread SATB buffers given that
|
||||
// we have not filtered them yet (we'll do so during the
|
||||
@ -4108,6 +4165,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
true /* verify_fingers */);
|
||||
|
||||
free_collection_set(g1_policy()->collection_set(), evacuation_info);
|
||||
|
||||
eagerly_reclaim_humongous_regions();
|
||||
|
||||
g1_policy()->clear_collection_set();
|
||||
|
||||
cleanup_surviving_young_words();
|
||||
@ -4608,7 +4668,9 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
|
||||
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
|
||||
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
|
||||
|
||||
if (state == G1CollectedHeap::InCSet) {
|
||||
oop forwardee;
|
||||
if (obj->is_forwarded()) {
|
||||
forwardee = obj->forwardee();
|
||||
@ -4627,6 +4689,9 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
do_klass_barrier(p, forwardee);
|
||||
}
|
||||
} else {
|
||||
if (state == G1CollectedHeap::IsHumongous) {
|
||||
_g1->set_humongous_is_live(obj);
|
||||
}
|
||||
// The object is not in collection set. If we're a root scanning
|
||||
// closure during an initial mark pause then attempt to mark the object.
|
||||
if (do_mark_object == G1MarkFromRoot) {
|
||||
@ -4719,11 +4784,6 @@ protected:
|
||||
Mutex _stats_lock;
|
||||
Mutex* stats_lock() { return &_stats_lock; }
|
||||
|
||||
size_t getNCards() {
|
||||
return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
|
||||
/ G1BlockOffsetSharedArray::N_bytes;
|
||||
}
|
||||
|
||||
public:
|
||||
G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
|
||||
: AbstractGangTask("G1 collection"),
|
||||
@ -4847,10 +4907,15 @@ public:
|
||||
if (_g1h->g1_policy()->during_initial_mark_pause()) {
|
||||
// We also need to mark copied objects.
|
||||
strong_root_cl = &scan_mark_root_cl;
|
||||
weak_root_cl = &scan_mark_weak_root_cl;
|
||||
strong_cld_cl = &scan_mark_cld_cl;
|
||||
weak_cld_cl = &scan_mark_weak_cld_cl;
|
||||
strong_code_cl = &scan_mark_code_cl;
|
||||
if (ClassUnloadingWithConcurrentMark) {
|
||||
weak_root_cl = &scan_mark_weak_root_cl;
|
||||
weak_cld_cl = &scan_mark_weak_cld_cl;
|
||||
} else {
|
||||
weak_root_cl = &scan_mark_root_cl;
|
||||
weak_cld_cl = &scan_mark_cld_cl;
|
||||
}
|
||||
} else {
|
||||
strong_root_cl = &scan_only_root_cl;
|
||||
weak_root_cl = &scan_only_root_cl;
|
||||
@ -4921,6 +4986,7 @@ g1_process_roots(OopClosure* scan_non_heap_roots,
|
||||
double closure_app_time_sec = 0.0;
|
||||
|
||||
bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
|
||||
bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
|
||||
|
||||
BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
|
||||
BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
|
||||
@ -4930,8 +4996,8 @@ g1_process_roots(OopClosure* scan_non_heap_roots,
|
||||
&buf_scan_non_heap_roots,
|
||||
&buf_scan_non_heap_weak_roots,
|
||||
scan_strong_clds,
|
||||
// Initial Mark handles the weak CLDs separately.
|
||||
(during_im ? NULL : scan_weak_clds),
|
||||
// Unloading Initial Marks handle the weak CLDs separately.
|
||||
(trace_metadata ? NULL : scan_weak_clds),
|
||||
scan_strong_code);
|
||||
|
||||
// Now the CM ref_processor roots.
|
||||
@ -4943,7 +5009,7 @@ g1_process_roots(OopClosure* scan_non_heap_roots,
|
||||
ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
|
||||
}
|
||||
|
||||
if (during_im) {
|
||||
if (trace_metadata) {
|
||||
// Barrier to make sure all workers passed
|
||||
// the strong CLD and strong nmethods phases.
|
||||
active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
|
||||
@ -5450,12 +5516,21 @@ class G1KeepAliveClosure: public OopClosure {
|
||||
public:
|
||||
G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
|
||||
void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
|
||||
void do_oop( oop* p) {
|
||||
void do_oop(oop* p) {
|
||||
oop obj = *p;
|
||||
|
||||
if (_g1->obj_in_cs(obj)) {
|
||||
G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
|
||||
if (obj == NULL || cset_state == G1CollectedHeap::InNeither) {
|
||||
return;
|
||||
}
|
||||
if (cset_state == G1CollectedHeap::InCSet) {
|
||||
assert( obj->is_forwarded(), "invariant" );
|
||||
*p = obj->forwardee();
|
||||
} else {
|
||||
assert(!obj->is_forwarded(), "invariant" );
|
||||
assert(cset_state == G1CollectedHeap::IsHumongous,
|
||||
err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
|
||||
_g1->set_humongous_is_live(obj);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -5485,7 +5560,7 @@ public:
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
|
||||
if (_g1h->obj_in_cs(obj)) {
|
||||
if (_g1h->is_in_cset_or_humongous(obj)) {
|
||||
// If the referent object has been forwarded (either copied
|
||||
// to a new location or to itself in the event of an
|
||||
// evacuation failure) then we need to update the reference
|
||||
@ -5510,10 +5585,10 @@ public:
|
||||
assert(!Metaspace::contains((const void*)p),
|
||||
err_msg("Unexpectedly found a pointer from metadata: "
|
||||
PTR_FORMAT, p));
|
||||
_copy_non_heap_obj_cl->do_oop(p);
|
||||
}
|
||||
_copy_non_heap_obj_cl->do_oop(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Serial drain queue closure. Called as the 'complete_gc'
|
||||
@ -6435,6 +6510,154 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& e
|
||||
policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
|
||||
}
|
||||
|
||||
class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
||||
private:
|
||||
FreeRegionList* _free_region_list;
|
||||
HeapRegionSet* _proxy_set;
|
||||
HeapRegionSetCount _humongous_regions_removed;
|
||||
size_t _freed_bytes;
|
||||
public:
|
||||
|
||||
G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
|
||||
_free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
|
||||
}
|
||||
|
||||
virtual bool doHeapRegion(HeapRegion* r) {
|
||||
if (!r->startsHumongous()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
oop obj = (oop)r->bottom();
|
||||
CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
|
||||
|
||||
// The following checks whether the humongous object is live are sufficient.
|
||||
// The main additional check (in addition to having a reference from the roots
|
||||
// or the young gen) is whether the humongous object has a remembered set entry.
|
||||
//
|
||||
// A humongous object cannot be live if there is no remembered set for it
|
||||
// because:
|
||||
// - there can be no references from within humongous starts regions referencing
|
||||
// the object because we never allocate other objects into them.
|
||||
// (I.e. there are no intra-region references that may be missed by the
|
||||
// remembered set)
|
||||
// - as soon there is a remembered set entry to the humongous starts region
|
||||
// (i.e. it has "escaped" to an old object) this remembered set entry will stay
|
||||
// until the end of a concurrent mark.
|
||||
//
|
||||
// It is not required to check whether the object has been found dead by marking
|
||||
// or not, in fact it would prevent reclamation within a concurrent cycle, as
|
||||
// all objects allocated during that time are considered live.
|
||||
// SATB marking is even more conservative than the remembered set.
|
||||
// So if at this point in the collection there is no remembered set entry,
|
||||
// nobody has a reference to it.
|
||||
// At the start of collection we flush all refinement logs, and remembered sets
|
||||
// are completely up-to-date wrt to references to the humongous object.
|
||||
//
|
||||
// Other implementation considerations:
|
||||
// - never consider object arrays: while they are a valid target, they have not
|
||||
// been observed to be used as temporary objects.
|
||||
// - they would also pose considerable effort for cleaning up the the remembered
|
||||
// sets.
|
||||
// While this cleanup is not strictly necessary to be done (or done instantly),
|
||||
// given that their occurrence is very low, this saves us this additional
|
||||
// complexity.
|
||||
uint region_idx = r->hrs_index();
|
||||
if (g1h->humongous_is_live(region_idx) ||
|
||||
g1h->humongous_region_is_always_live(region_idx)) {
|
||||
|
||||
if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
|
||||
gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
|
||||
r->isHumongous(),
|
||||
region_idx,
|
||||
r->rem_set()->occupied(),
|
||||
r->rem_set()->strong_code_roots_list_length(),
|
||||
next_bitmap->isMarked(r->bottom()),
|
||||
g1h->humongous_is_live(region_idx),
|
||||
obj->is_objArray()
|
||||
);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
guarantee(!obj->is_objArray(),
|
||||
err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
|
||||
r->bottom()));
|
||||
|
||||
if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
|
||||
gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
|
||||
r->isHumongous(),
|
||||
r->bottom(),
|
||||
region_idx,
|
||||
r->region_num(),
|
||||
r->rem_set()->occupied(),
|
||||
r->rem_set()->strong_code_roots_list_length(),
|
||||
next_bitmap->isMarked(r->bottom()),
|
||||
g1h->humongous_is_live(region_idx),
|
||||
obj->is_objArray()
|
||||
);
|
||||
}
|
||||
// Need to clear mark bit of the humongous object if already set.
|
||||
if (next_bitmap->isMarked(r->bottom())) {
|
||||
next_bitmap->clear(r->bottom());
|
||||
}
|
||||
_freed_bytes += r->used();
|
||||
r->set_containing_set(NULL);
|
||||
_humongous_regions_removed.increment(1u, r->capacity());
|
||||
g1h->free_humongous_region(r, _free_region_list, false);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
HeapRegionSetCount& humongous_free_count() {
|
||||
return _humongous_regions_removed;
|
||||
}
|
||||
|
||||
size_t bytes_freed() const {
|
||||
return _freed_bytes;
|
||||
}
|
||||
|
||||
size_t humongous_reclaimed() const {
|
||||
return _humongous_regions_removed.length();
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
|
||||
assert_at_safepoint(true);
|
||||
|
||||
if (!G1ReclaimDeadHumongousObjectsAtYoungGC || !_has_humongous_reclaim_candidates) {
|
||||
g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
double start_time = os::elapsedTime();
|
||||
|
||||
FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
|
||||
|
||||
G1FreeHumongousRegionClosure cl(&local_cleanup_list);
|
||||
heap_region_iterate(&cl);
|
||||
|
||||
HeapRegionSetCount empty_set;
|
||||
remove_from_old_sets(empty_set, cl.humongous_free_count());
|
||||
|
||||
G1HRPrinter* hr_printer = _g1h->hr_printer();
|
||||
if (hr_printer->is_active()) {
|
||||
FreeRegionListIterator iter(&local_cleanup_list);
|
||||
while (iter.more_available()) {
|
||||
HeapRegion* hr = iter.get_next();
|
||||
hr_printer->cleanup(hr);
|
||||
}
|
||||
}
|
||||
|
||||
prepend_to_freelist(&local_cleanup_list);
|
||||
decrement_summary_bytes(cl.bytes_freed());
|
||||
|
||||
g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
|
||||
cl.humongous_reclaimed());
|
||||
}
|
||||
|
||||
// This routine is similar to the above but does not record
|
||||
// any policy statistics or update free lists; we are abandoning
|
||||
// the current incremental collection set in preparation of a
|
||||
|
@ -197,16 +197,6 @@ public:
|
||||
bool do_object_b(oop p);
|
||||
};
|
||||
|
||||
// Instances of this class are used for quick tests on whether a reference points
|
||||
// into the collection set. Each of the array's elements denotes whether the
|
||||
// corresponding region is in the collection set.
|
||||
class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<bool> {
|
||||
protected:
|
||||
bool default_value() const { return false; }
|
||||
public:
|
||||
void clear() { G1BiasedMappedArray<bool>::clear(); }
|
||||
};
|
||||
|
||||
class RefineCardTableEntryClosure;
|
||||
|
||||
class G1CollectedHeap : public SharedHeap {
|
||||
@ -237,6 +227,7 @@ class G1CollectedHeap : public SharedHeap {
|
||||
friend class EvacPopObjClosure;
|
||||
friend class G1ParCleanupCTTask;
|
||||
|
||||
friend class G1FreeHumongousRegionClosure;
|
||||
// Other related classes.
|
||||
friend class G1MarkSweep;
|
||||
|
||||
@ -267,6 +258,9 @@ private:
|
||||
// It keeps track of the humongous regions.
|
||||
HeapRegionSet _humongous_set;
|
||||
|
||||
void clear_humongous_is_live_table();
|
||||
void eagerly_reclaim_humongous_regions();
|
||||
|
||||
// The number of regions we could create by expansion.
|
||||
uint _expansion_regions;
|
||||
|
||||
@ -367,10 +361,25 @@ private:
|
||||
// than the current allocation region.
|
||||
size_t _summary_bytes_used;
|
||||
|
||||
// This array is used for a quick test on whether a reference points into
|
||||
// the collection set or not. Each of the array's elements denotes whether the
|
||||
// corresponding region is in the collection set or not.
|
||||
G1FastCSetBiasedMappedArray _in_cset_fast_test;
|
||||
// Records whether the region at the given index is kept live by roots or
|
||||
// references from the young generation.
|
||||
class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
|
||||
protected:
|
||||
bool default_value() const { return false; }
|
||||
public:
|
||||
void clear() { G1BiasedMappedArray<bool>::clear(); }
|
||||
void set_live(uint region) {
|
||||
set_by_index(region, true);
|
||||
}
|
||||
bool is_live(uint region) {
|
||||
return get_by_index(region);
|
||||
}
|
||||
};
|
||||
|
||||
HumongousIsLiveBiasedMappedArray _humongous_is_live;
|
||||
// Stores whether during humongous object registration we found candidate regions.
|
||||
// If not, we can skip a few steps.
|
||||
bool _has_humongous_reclaim_candidates;
|
||||
|
||||
volatile unsigned _gc_time_stamp;
|
||||
|
||||
@ -690,10 +699,24 @@ public:
|
||||
virtual void gc_prologue(bool full);
|
||||
virtual void gc_epilogue(bool full);
|
||||
|
||||
inline void set_humongous_is_live(oop obj);
|
||||
|
||||
bool humongous_is_live(uint region) {
|
||||
return _humongous_is_live.is_live(region);
|
||||
}
|
||||
|
||||
// Returns whether the given region (which must be a humongous (start) region)
|
||||
// is to be considered conservatively live regardless of any other conditions.
|
||||
bool humongous_region_is_always_live(uint index);
|
||||
// Register the given region to be part of the collection set.
|
||||
inline void register_humongous_region_with_in_cset_fast_test(uint index);
|
||||
// Register regions with humongous objects (actually on the start region) in
|
||||
// the in_cset_fast_test table.
|
||||
void register_humongous_regions_with_in_cset_fast_test();
|
||||
// We register a region with the fast "in collection set" test. We
|
||||
// simply set to true the array slot corresponding to this region.
|
||||
void register_region_with_in_cset_fast_test(HeapRegion* r) {
|
||||
_in_cset_fast_test.set_by_index(r->hrs_index(), true);
|
||||
_in_cset_fast_test.set_in_cset(r->hrs_index());
|
||||
}
|
||||
|
||||
// This is a fast test on whether a reference points into the
|
||||
@ -1283,9 +1306,61 @@ public:
|
||||
virtual bool is_in(const void* p) const;
|
||||
|
||||
// Return "TRUE" iff the given object address is within the collection
|
||||
// set.
|
||||
// set. Slow implementation.
|
||||
inline bool obj_in_cs(oop obj);
|
||||
|
||||
inline bool is_in_cset(oop obj);
|
||||
|
||||
inline bool is_in_cset_or_humongous(const oop obj);
|
||||
|
||||
enum in_cset_state_t {
|
||||
InNeither, // neither in collection set nor humongous
|
||||
InCSet, // region is in collection set only
|
||||
IsHumongous // region is a humongous start region
|
||||
};
|
||||
private:
|
||||
// Instances of this class are used for quick tests on whether a reference points
|
||||
// into the collection set or is a humongous object (points into a humongous
|
||||
// object).
|
||||
// Each of the array's elements denotes whether the corresponding region is in
|
||||
// the collection set or a humongous region.
|
||||
// We use this to quickly reclaim humongous objects: by making a humongous region
|
||||
// succeed this test, we sort-of add it to the collection set. During the reference
|
||||
// iteration closures, when we see a humongous region, we simply mark it as
|
||||
// referenced, i.e. live.
|
||||
class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
|
||||
protected:
|
||||
char default_value() const { return G1CollectedHeap::InNeither; }
|
||||
public:
|
||||
void set_humongous(uintptr_t index) {
|
||||
assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values");
|
||||
set_by_index(index, G1CollectedHeap::IsHumongous);
|
||||
}
|
||||
|
||||
void clear_humongous(uintptr_t index) {
|
||||
set_by_index(index, G1CollectedHeap::InNeither);
|
||||
}
|
||||
|
||||
void set_in_cset(uintptr_t index) {
|
||||
assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value");
|
||||
set_by_index(index, G1CollectedHeap::InCSet);
|
||||
}
|
||||
|
||||
bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
|
||||
bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
|
||||
G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
|
||||
void clear() { G1BiasedMappedArray<char>::clear(); }
|
||||
};
|
||||
|
||||
// This array is used for a quick test on whether a reference points into
|
||||
// the collection set or not. Each of the array's elements denotes whether the
|
||||
// corresponding region is in the collection set or not.
|
||||
G1FastCSetBiasedMappedArray _in_cset_fast_test;
|
||||
|
||||
public:
|
||||
|
||||
inline in_cset_state_t in_cset_state(const oop obj);
|
||||
|
||||
// Return "TRUE" iff the given object address is in the reserved
|
||||
// region of g1.
|
||||
bool is_in_g1_reserved(const void* p) const {
|
||||
@ -1320,9 +1395,6 @@ public:
|
||||
// "cl.do_oop" on each.
|
||||
virtual void oop_iterate(ExtendedOopClosure* cl);
|
||||
|
||||
// Same as above, restricted to a memory region.
|
||||
void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
|
||||
|
||||
// Iterate over all objects, calling "cl.do_object" on each.
|
||||
virtual void object_iterate(ObjectClosure* cl);
|
||||
|
||||
@ -1340,6 +1412,10 @@ public:
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
inline HeapRegion* region_at(uint index) const;
|
||||
|
||||
// Calculate the region index of the given address. Given address must be
|
||||
// within the heap.
|
||||
inline uint addr_to_region(HeapWord* addr) const;
|
||||
|
||||
// Divide the heap region sequence into "chunks" of some size (the number
|
||||
// of regions divided by the number of parallel threads times some
|
||||
// overpartition factor, currently 4). Assumes that this will be called
|
||||
|
@ -40,6 +40,13 @@
|
||||
// Return the region with the given index. It assumes the index is valid.
|
||||
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
|
||||
|
||||
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
|
||||
assert(is_in_reserved(addr),
|
||||
err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
|
||||
p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
|
||||
return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline HeapRegion*
|
||||
G1CollectedHeap::heap_region_containing_raw(const T addr) const {
|
||||
@ -172,12 +179,11 @@ inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
|
||||
return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
|
||||
}
|
||||
|
||||
|
||||
// This is a fast test on whether a reference points into the
|
||||
// collection set or not. Assume that the reference
|
||||
// points into the heap.
|
||||
inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
|
||||
bool ret = _in_cset_fast_test.get_by_address((HeapWord*)obj);
|
||||
inline bool G1CollectedHeap::is_in_cset(oop obj) {
|
||||
bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj);
|
||||
// let's make sure the result is consistent with what the slower
|
||||
// test returns
|
||||
assert( ret || !obj_in_cs(obj), "sanity");
|
||||
@ -185,6 +191,18 @@ inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
|
||||
return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
|
||||
}
|
||||
|
||||
G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
|
||||
return _in_cset_fast_test.at((HeapWord*)obj);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) {
|
||||
_in_cset_fast_test.set_humongous(index);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Support for G1EvacuationFailureALot
|
||||
|
||||
@ -288,4 +306,22 @@ inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
|
||||
return is_obj_ill(obj, heap_region_containing(obj));
|
||||
}
|
||||
|
||||
inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
|
||||
uint region = addr_to_region((HeapWord*)obj);
|
||||
// We not only set the "live" flag in the humongous_is_live table, but also
|
||||
// reset the entry in the _in_cset_fast_test table so that subsequent references
|
||||
// to the same humongous object do not go into the slow path again.
|
||||
// This is racy, as multiple threads may at the same time enter here, but this
|
||||
// is benign.
|
||||
// During collection we only ever set the "live" flag, and only ever clear the
|
||||
// entry in the in_cset_fast_table.
|
||||
// We only ever evaluate the contents of these tables (in the VM thread) after
|
||||
// having synchronized the worker threads with the VM thread, or in the same
|
||||
// thread (i.e. within the VM thread).
|
||||
if (!_humongous_is_live.is_live(region)) {
|
||||
_humongous_is_live.set_live(region);
|
||||
_in_cset_fast_test.clear_humongous(region);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
|
||||
|
@ -237,8 +237,10 @@ void G1GCPhaseTimes::note_gc_end() {
|
||||
_last_gc_worker_times_ms.verify();
|
||||
_last_gc_worker_other_times_ms.verify();
|
||||
|
||||
_last_redirty_logged_cards_time_ms.verify();
|
||||
_last_redirty_logged_cards_processed_cards.verify();
|
||||
if (G1DeferredRSUpdate) {
|
||||
_last_redirty_logged_cards_time_ms.verify();
|
||||
_last_redirty_logged_cards_processed_cards.verify();
|
||||
}
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::note_string_dedup_fixup_start() {
|
||||
@ -255,6 +257,10 @@ void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
|
||||
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::print_stats(int level, const char* str, size_t value) {
|
||||
LineBuffer(level).append_and_print_cr("[%s: "SIZE_FORMAT"]", str, value);
|
||||
}
|
||||
|
||||
void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
|
||||
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);
|
||||
}
|
||||
@ -357,6 +363,14 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
|
||||
_last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards");
|
||||
}
|
||||
}
|
||||
if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
|
||||
print_stats(2, "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
|
||||
if (G1Log::finest()) {
|
||||
print_stats(3, "Humongous Total", _cur_fast_reclaim_humongous_total);
|
||||
print_stats(3, "Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
|
||||
print_stats(3, "Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
|
||||
}
|
||||
}
|
||||
print_stats(2, "Free CSet",
|
||||
(_recorded_young_free_cset_time_ms +
|
||||
_recorded_non_young_free_cset_time_ms));
|
||||
|
@ -157,11 +157,17 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
double _recorded_young_free_cset_time_ms;
|
||||
double _recorded_non_young_free_cset_time_ms;
|
||||
|
||||
double _cur_fast_reclaim_humongous_time_ms;
|
||||
size_t _cur_fast_reclaim_humongous_total;
|
||||
size_t _cur_fast_reclaim_humongous_candidates;
|
||||
size_t _cur_fast_reclaim_humongous_reclaimed;
|
||||
|
||||
double _cur_verify_before_time_ms;
|
||||
double _cur_verify_after_time_ms;
|
||||
|
||||
// Helper methods for detailed logging
|
||||
void print_stats(int level, const char* str, double value);
|
||||
void print_stats(int level, const char* str, size_t value);
|
||||
void print_stats(int level, const char* str, double value, uint workers);
|
||||
|
||||
public:
|
||||
@ -282,6 +288,16 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
_recorded_non_young_free_cset_time_ms = time_ms;
|
||||
}
|
||||
|
||||
void record_fast_reclaim_humongous_stats(size_t total, size_t candidates) {
|
||||
_cur_fast_reclaim_humongous_total = total;
|
||||
_cur_fast_reclaim_humongous_candidates = candidates;
|
||||
}
|
||||
|
||||
void record_fast_reclaim_humongous_time_ms(double value, size_t reclaimed) {
|
||||
_cur_fast_reclaim_humongous_time_ms = value;
|
||||
_cur_fast_reclaim_humongous_reclaimed = reclaimed;
|
||||
}
|
||||
|
||||
void record_young_cset_choice_time_ms(double time_ms) {
|
||||
_recorded_young_cset_choice_time_ms = time_ms;
|
||||
}
|
||||
@ -348,6 +364,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
return _recorded_non_young_free_cset_time_ms;
|
||||
}
|
||||
|
||||
double fast_reclaim_humongous_time_ms() {
|
||||
return _cur_fast_reclaim_humongous_time_ms;
|
||||
}
|
||||
|
||||
double average_last_update_rs_time() {
|
||||
return _last_update_rs_times_ms.average();
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ template <class T>
|
||||
inline void FilterIntoCSClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop) &&
|
||||
_g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) {
|
||||
_g1->is_in_cset_or_humongous(oopDesc::decode_heap_oop_not_null(heap_oop))) {
|
||||
_oc->do_oop(p);
|
||||
}
|
||||
}
|
||||
@ -67,7 +67,8 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
|
||||
if (state == G1CollectedHeap::InCSet) {
|
||||
// We're not going to even bother checking whether the object is
|
||||
// already forwarded or not, as this usually causes an immediate
|
||||
// stall. We'll try to prefetch the object (for write, given that
|
||||
@ -86,6 +87,9 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
|
||||
|
||||
_par_scan_state->push_on_queue(p);
|
||||
} else {
|
||||
if (state == G1CollectedHeap::IsHumongous) {
|
||||
_g1->set_humongous_is_live(obj);
|
||||
}
|
||||
_par_scan_state->update_rs(_from, p, _worker_id);
|
||||
}
|
||||
}
|
||||
@ -97,12 +101,14 @@ inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
|
||||
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (_g1->in_cset_fast_test(obj)) {
|
||||
if (_g1->is_in_cset_or_humongous(obj)) {
|
||||
Prefetch::write(obj->mark_addr(), 0);
|
||||
Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
|
||||
|
||||
// Place on the references queue
|
||||
_par_scan_state->push_on_queue(p);
|
||||
} else {
|
||||
assert(!_g1->obj_in_cs(obj), "checking");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -288,7 +288,12 @@ void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj
|
||||
}
|
||||
|
||||
HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
|
||||
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
|
||||
HeapWord* obj = NULL;
|
||||
if (purpose == GCAllocForSurvived) {
|
||||
obj = alloc_buffer(GCAllocForSurvived)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
|
||||
} else {
|
||||
obj = alloc_buffer(GCAllocForTenured)->allocate(word_sz);
|
||||
}
|
||||
if (obj != NULL) {
|
||||
return obj;
|
||||
}
|
||||
|
@ -52,15 +52,20 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from
|
||||
// set, due to (benign) races in the claim mechanism during RSet scanning more
|
||||
// than one thread might claim the same card. So the same card may be
|
||||
// processed multiple times. So redo this check.
|
||||
if (_g1h->in_cset_fast_test(obj)) {
|
||||
G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
|
||||
if (in_cset_state == G1CollectedHeap::InCSet) {
|
||||
oop forwardee;
|
||||
if (obj->is_forwarded()) {
|
||||
forwardee = obj->forwardee();
|
||||
} else {
|
||||
forwardee = copy_to_survivor_space(obj);
|
||||
}
|
||||
assert(forwardee != NULL, "forwardee should not be NULL");
|
||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||
} else if (in_cset_state == G1CollectedHeap::IsHumongous) {
|
||||
_g1h->set_humongous_is_live(obj);
|
||||
} else {
|
||||
assert(in_cset_state == G1CollectedHeap::InNeither,
|
||||
err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state));
|
||||
}
|
||||
|
||||
assert(obj != NULL, "Must be");
|
||||
|
@ -349,23 +349,8 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
|
||||
|
||||
assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
|
||||
|
||||
// The two flags below were introduced temporarily to serialize
|
||||
// the updating and scanning of remembered sets. There are some
|
||||
// race conditions when these two operations are done in parallel
|
||||
// and they are causing failures. When we resolve said race
|
||||
// conditions, we'll revert back to parallel remembered set
|
||||
// updating and scanning. See CRs 6677707 and 6677708.
|
||||
if (G1UseParallelRSetUpdating || (worker_i == 0)) {
|
||||
updateRS(&into_cset_dcq, worker_i);
|
||||
} else {
|
||||
_g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0);
|
||||
_g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
|
||||
}
|
||||
if (G1UseParallelRSetScanning || (worker_i == 0)) {
|
||||
scanRS(oc, code_root_cl, worker_i);
|
||||
} else {
|
||||
_g1p->phase_times()->record_scan_rs_time(worker_i, 0.0);
|
||||
}
|
||||
updateRS(&into_cset_dcq, worker_i);
|
||||
scanRS(oc, code_root_cl, worker_i);
|
||||
|
||||
// We now clear the cached values of _cset_rs_update_cl for this worker
|
||||
_cset_rs_update_cl[worker_i] = NULL;
|
||||
|
@ -220,14 +220,6 @@
|
||||
product(uintx, G1HeapRegionSize, 0, \
|
||||
"Size of the G1 regions.") \
|
||||
\
|
||||
experimental(bool, G1UseParallelRSetUpdating, true, \
|
||||
"Enables the parallelization of remembered set updating " \
|
||||
"during evacuation pauses") \
|
||||
\
|
||||
experimental(bool, G1UseParallelRSetScanning, true, \
|
||||
"Enables the parallelization of remembered set scanning " \
|
||||
"during evacuation pauses") \
|
||||
\
|
||||
product(uintx, G1ConcRefinementThreads, 0, \
|
||||
"If non-0 is the number of parallel rem set update threads, " \
|
||||
"otherwise the value is determined ergonomically.") \
|
||||
@ -289,6 +281,13 @@
|
||||
"The amount of code root chunks that should be kept at most " \
|
||||
"as percentage of already allocated.") \
|
||||
\
|
||||
experimental(bool, G1ReclaimDeadHumongousObjectsAtYoungGC, true, \
|
||||
"Try to reclaim dead large objects at every young GC.") \
|
||||
\
|
||||
experimental(bool, G1TraceReclaimDeadHumongousObjectsAtYoungGC, false, \
|
||||
"Print some information about large object liveness " \
|
||||
"at every young GC.") \
|
||||
\
|
||||
experimental(uintx, G1OldCSetRegionThresholdPercent, 10, \
|
||||
"An upper bound for the number of old CSet regions expressed " \
|
||||
"as a percentage of the heap size.") \
|
||||
|
@ -94,26 +94,37 @@ G1OffsetTableContigSpace::block_start_const(const void* p) const {
|
||||
inline bool
|
||||
HeapRegion::block_is_obj(const HeapWord* p) const {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
return !g1h->is_obj_dead(oop(p), this);
|
||||
if (ClassUnloadingWithConcurrentMark) {
|
||||
return !g1h->is_obj_dead(oop(p), this);
|
||||
}
|
||||
return p < top();
|
||||
}
|
||||
|
||||
inline size_t
|
||||
HeapRegion::block_size(const HeapWord *addr) const {
|
||||
if (addr == top()) {
|
||||
return pointer_delta(end(), addr);
|
||||
}
|
||||
|
||||
if (block_is_obj(addr)) {
|
||||
return oop(addr)->size();
|
||||
}
|
||||
|
||||
assert(ClassUnloadingWithConcurrentMark,
|
||||
err_msg("All blocks should be objects if G1 Class Unloading isn't used. "
|
||||
"HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") "
|
||||
"addr: " PTR_FORMAT,
|
||||
p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)));
|
||||
|
||||
// Old regions' dead objects may have dead classes
|
||||
// We need to find the next live object in some other
|
||||
// manner than getting the oop size
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
if (g1h->is_obj_dead(oop(addr), this)) {
|
||||
HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
|
||||
getNextMarkedWordAddress(addr, prev_top_at_mark_start());
|
||||
HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
|
||||
getNextMarkedWordAddress(addr, prev_top_at_mark_start());
|
||||
|
||||
assert(next > addr, "must get the next live object");
|
||||
|
||||
return pointer_delta(next, addr);
|
||||
} else if (addr == top()) {
|
||||
return pointer_delta(end(), addr);
|
||||
}
|
||||
return oop(addr)->size();
|
||||
assert(next > addr, "must get the next live object");
|
||||
return pointer_delta(next, addr);
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
|
||||
|
@ -289,7 +289,7 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
|
||||
}
|
||||
|
||||
_fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
|
||||
mtGC, 0, AllocFailStrategy::RETURN_NULL);
|
||||
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
|
||||
|
||||
if (_fine_grain_regions == NULL) {
|
||||
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
|
||||
@ -695,6 +695,9 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
|
||||
clear_fcc();
|
||||
}
|
||||
|
||||
bool OtherRegionsTable::is_empty() const {
|
||||
return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
|
||||
}
|
||||
|
||||
size_t OtherRegionsTable::occupied() const {
|
||||
size_t sum = occ_fine();
|
||||
|
@ -185,6 +185,9 @@ public:
|
||||
// objects.
|
||||
void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
|
||||
|
||||
// Returns whether this remembered set (and all sub-sets) contain no entries.
|
||||
bool is_empty() const;
|
||||
|
||||
size_t occupied() const;
|
||||
size_t occ_fine() const;
|
||||
size_t occ_coarse() const;
|
||||
@ -269,6 +272,10 @@ public:
|
||||
return _other_regions.hr();
|
||||
}
|
||||
|
||||
bool is_empty() const {
|
||||
return (strong_code_roots_list_length() == 0) && _other_regions.is_empty();
|
||||
}
|
||||
|
||||
size_t occupied() {
|
||||
MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
|
||||
return occupied_locked();
|
||||
@ -371,7 +378,7 @@ public:
|
||||
void strong_code_roots_do(CodeBlobClosure* blk) const;
|
||||
|
||||
// Returns the number of elements in the strong code roots list
|
||||
size_t strong_code_roots_list_length() {
|
||||
size_t strong_code_roots_list_length() const {
|
||||
return _code_roots.length();
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user