This commit is contained in:
J. Duke 2017-07-05 16:40:54 +02:00
commit dcf598a1cd
217 changed files with 20498 additions and 18736 deletions

View File

@ -8,3 +8,4 @@ cbc8ad9dd0e085a607427ea35411990982f19a36 jdk7-b25
3300a35a0bd56d695b92fe0b34f03ebbfc939064 jdk7-b31 3300a35a0bd56d695b92fe0b34f03ebbfc939064 jdk7-b31
64da805be725721bf2004e7409a0d7a16fc8ddbc jdk7-b32 64da805be725721bf2004e7409a0d7a16fc8ddbc jdk7-b32
bb1ef4ee3d2c8cbf43a37d372325a7952be590b9 jdk7-b33 bb1ef4ee3d2c8cbf43a37d372325a7952be590b9 jdk7-b33
46a989ab932992b2084b946eeb322fa99b9fee6c jdk7-b34

View File

@ -8,3 +8,4 @@ c0252adbb2abbfdd6c35595429ac6fbdd98e20ac jdk7-b30
ef6af34d75a7b44e77083f1d4ee47631fa09d3b4 jdk7-b31 ef6af34d75a7b44e77083f1d4ee47631fa09d3b4 jdk7-b31
80a0f46a6203e727012bd579fe38a609b83decce jdk7-b32 80a0f46a6203e727012bd579fe38a609b83decce jdk7-b32
6a5b9d2f8b20de54e3bfe33cd12bd0793caedc4e jdk7-b33 6a5b9d2f8b20de54e3bfe33cd12bd0793caedc4e jdk7-b33
0a812b9824e5d17b073765d1505594b49ff88a10 jdk7-b34

View File

@ -8,3 +8,4 @@ d1605aabd0a15ecf93787c47de63073c33fba52d jdk7-b30
9c2ecc2ffb125f14fab3857fe7689598956348a0 jdk7-b31 9c2ecc2ffb125f14fab3857fe7689598956348a0 jdk7-b31
b727c32788a906c04839516ae7443a085185a300 jdk7-b32 b727c32788a906c04839516ae7443a085185a300 jdk7-b32
585535ec8a14adafa6bfea65d6975e29094c8cec jdk7-b33 585535ec8a14adafa6bfea65d6975e29094c8cec jdk7-b33
5251a9cd8eb8743eee647365bee1c8afdc131556 jdk7-b34

View File

@ -8,4 +8,4 @@ FIND=$MKS_HOME/find
SED=$MKS_HOME/sed SED=$MKS_HOME/sed
SORT=$MKS_HOME/sort SORT=$MKS_HOME/sort
$CD ../src/share/classes; $FIND sun/jvm/hotspot \( -name SCCS -prune \) -o -type d -print | $SED -e 's/\//./g' | $SORT > ../../../make/pkglist.txt $CD ../src/share/classes; $FIND sun/jvm/hotspot com/sun/java/swing -type d -print | $SED -e 's/\//./g' | $SORT > ../../../make/pkglist.txt

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2008
HS_MAJOR_VER=14 HS_MAJOR_VER=14
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=03 HS_BUILD_NUMBER=04
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=7 JDK_MINOR_VER=7

View File

@ -41,8 +41,9 @@ GENERATED = $(TOPDIR)/../generated
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
# gnumake 3.78.1 does not accept the *s that # gnumake 3.78.1 does not accept the *s that
# are in AGENT_ALLFILES, so use the shell to expand them # are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them
AGENT_ALLFILES := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_ALLFILES)) AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
AGENT_FILES2 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES2))
SA_CLASSDIR = $(GENERATED)/saclasses SA_CLASSDIR = $(GENERATED)/saclasses
@ -58,7 +59,7 @@ all:
$(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \ $(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \
fi fi
$(GENERATED)/sa-jdi.jar: $(AGENT_ALLFILES) $(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2)
$(QUIETLY) echo "Making $@" $(QUIETLY) echo "Making $@"
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
@ -72,9 +73,18 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_ALLFILES)
$(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \ $(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \
mkdir -p $(SA_CLASSDIR); \ mkdir -p $(SA_CLASSDIR); \
fi fi
$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -g -d $(SA_CLASSDIR) $(AGENT_ALLFILES)
$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1)
$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2)
$(QUIETLY) $(REMOTE) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer $(QUIETLY) $(REMOTE) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES) $(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
$(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/*
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/
$(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/
$(QUIETLY) $(REMOTE) $(RUN.JAR) cf $@ -C $(SA_CLASSDIR)/ . $(QUIETLY) $(REMOTE) $(RUN.JAR) cf $@ -C $(SA_CLASSDIR)/ .
$(QUIETLY) $(REMOTE) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector $(QUIETLY) $(REMOTE) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector
$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext $(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext

View File

@ -33,40 +33,23 @@
AGENT_SRC_DIR = $(AGENT_DIR)/src/share/classes AGENT_SRC_DIR = $(AGENT_DIR)/src/share/classes
AGENT_ALLFILES = \ # Splitted the set of files into two sets because on linux plaform
$(AGENT_SRC_DIR)/sun/jvm/hotspot/DebugServer.java \ # listing or compiling all the files results in 'Argument list too long' error.
$(AGENT_SRC_DIR)/sun/jvm/hotspot/HelloWorld.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/HotSpotAgent.java \ AGENT_FILES1 = \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/HotSpotSolarisVtblAccess.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/HotSpotTypeDataBase.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/LinuxVtblAccess.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/amd64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/ObjectHistogram.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ia64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/RMIHelper.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/StackTrace.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/TestDebugger.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/bugspot/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/Win32VtblAccess.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/bugspot/tree/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/Immediate.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ImmediateOrRegister.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/Operand.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/Register.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/amd64/AMD64Register.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/amd64/AMD64Registers.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ia64/IA64Register.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ia64/IA64Registers.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCArgument.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCRegister.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCRegisterType.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCRegisters.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86Register.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86RegisterPart.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86Registers.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86SegmentRegister.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86SegmentRegisters.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/bugspot/BugSpotAgent.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/c1/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/c1/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/code/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/code/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/compiler/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/compiler/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/amd64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/x86/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/x86/*.java \
@ -75,7 +58,6 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/sparc/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/sparc/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/x86/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dummy/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dummy/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/amd64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/ia64/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/ia64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/amd64/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/amd64/*.java \
@ -107,7 +89,10 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/interpreter/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/jdi/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/jdi/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/livejvm/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/livejvm/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/memory/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/memory/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/oops/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/oops/*.java
AGENT_FILES2 = \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/amd64/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/amd64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/ia64/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/ia64/*.java \
@ -127,7 +112,17 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/x86/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/jcore/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/jcore/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/soql/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/types/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/types/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/types/basic/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/types/basic/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/memo/*.java \ $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/memo/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/action/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/classbrowser/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/table/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/tree/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/treetable/*.java \
$(AGENT_SRC_DIR)/com/sun/java/swing/action/*.java \
$(AGENT_SRC_DIR)/com/sun/java/swing/ui/*.java

File diff suppressed because it is too large Load Diff

View File

@ -8,20 +8,20 @@ text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o;
text: .text%__1cQAgentLibraryList2t6M_v_: arguments.o; text: .text%__1cQAgentLibraryList2t6M_v_: arguments.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable.o;
text: .text%__1cFRInfo2t6M_v_: c1_AllocTable.o; text: .text%__1cFRInfo2t6M_v_: c1_AllocTable.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_AllocTable_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_AllocTable_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals.o;
text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals.o; text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Canonicalizer.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Canonicalizer.o;
text: .text%__1cFRInfo2t6M_v_: c1_Canonicalizer.o; text: .text%__1cFRInfo2t6M_v_: c1_Canonicalizer.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator.o;
text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator.o; text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeStubs_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeStubs_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_CodeStubs_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_CodeStubs_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compilation.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compilation.o;
text: .text%__1cFRInfo2t6M_v_: c1_Compilation.o; text: .text%__1cFRInfo2t6M_v_: c1_Compilation.o;
text: .text%__1cMelapsedTimer2t6M_v_: c1_Compilation.o; text: .text%__1cMelapsedTimer2t6M_v_: c1_Compilation.o;
@ -29,9 +29,9 @@ text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compiler.o;
text: .text%__1cFRInfo2t6M_v_: c1_Compiler.o; text: .text%__1cFRInfo2t6M_v_: c1_Compiler.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap.o;
text: .text%__1cFRInfo2t6M_v_: c1_FrameMap.o; text: .text%__1cFRInfo2t6M_v_: c1_FrameMap.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_FrameMap_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_FrameMap_x86.o;
text: .text%__1cKc1_RegMask2t6M_v_: c1_FrameMap_i486.o; text: .text%__1cKc1_RegMask2t6M_v_: c1_FrameMap_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_GraphBuilder.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_GraphBuilder.o;
text: .text%__1cFRInfo2t6M_v_: c1_GraphBuilder.o; text: .text%__1cFRInfo2t6M_v_: c1_GraphBuilder.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_IR.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_IR.o;
@ -43,41 +43,41 @@ text: .text%__1cFRInfo2t6M_v_: c1_InstructionPrinter.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items.o;
text: .text%__1cFRInfo2t6M_v_: c1_Items.o; text: .text%__1cFRInfo2t6M_v_: c1_Items.o;
text: .text%__1cIHintItem2t6MpnJValueType_i_v_: c1_Items.o; text: .text%__1cIHintItem2t6MpnJValueType_i_v_: c1_Items.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_Items_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_Items_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIR.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIR.o;
text: .text%__1cFRInfo2t6M_v_: c1_LIR.o; text: .text%__1cFRInfo2t6M_v_: c1_LIR.o;
text: .text%__1cLLIR_OprFactHillegal6F_pnLLIR_OprDesc__: c1_LIR.o; text: .text%__1cLLIR_OprFactHillegal6F_pnLLIR_OprDesc__: c1_LIR.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler.o;
text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler.o; text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter.o;
text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter.o; text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer.o;
text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer.o; text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Loops.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Loops.o;
text: .text%__1cFRInfo2t6M_v_: c1_Loops.o; text: .text%__1cFRInfo2t6M_v_: c1_Loops.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_MacroAssembler_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_MacroAssembler_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_MacroAssembler_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_MacroAssembler_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Optimizer.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Optimizer.o;
text: .text%__1cFRInfo2t6M_v_: c1_Optimizer.o; text: .text%__1cFRInfo2t6M_v_: c1_Optimizer.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo.o;
text: .text%__1cFRInfo2t6M_v_: c1_RInfo.o; text: .text%__1cFRInfo2t6M_v_: c1_RInfo.o;
text: .text%__1cKc1_RegMask2t6M_v_: c1_RInfo.o; text: .text%__1cKc1_RegMask2t6M_v_: c1_RInfo.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_RInfo_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_RInfo_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc.o;
text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc.o; text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1.o;
text: .text%__1cFRInfo2t6M_v_: c1_Runtime1.o; text: .text%__1cFRInfo2t6M_v_: c1_Runtime1.o;
text: .text%__1cIiEntries2t6M_v_; text: .text%__1cIiEntries2t6M_v_;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_Runtime1_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_Runtime1_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ScanBlocks.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ScanBlocks.o;
text: .text%__1cFRInfo2t6M_v_: c1_ScanBlocks.o; text: .text%__1cFRInfo2t6M_v_: c1_ScanBlocks.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueMap.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueMap.o;
@ -105,8 +105,8 @@ text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o;
text: .text%__1cMelapsedTimer2t6M_v_: fprofiler.o; text: .text%__1cMelapsedTimer2t6M_v_: fprofiler.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame.o;
text: .text%__1cFRInfo2t6M_v_: frame.o; text: .text%__1cFRInfo2t6M_v_: frame.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame_x86.o;
text: .text%__1cFRInfo2t6M_v_: frame_i486.o; text: .text%__1cFRInfo2t6M_v_: frame_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o;
text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o; text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o;
text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o; text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o;
@ -117,8 +117,8 @@ text: .text%__1cNCellTypeStateImake_top6F_0_: generateOopMap.o;
text: .text%__1cMelapsedTimer2t6M_v_: generateOopMap.o; text: .text%__1cMelapsedTimer2t6M_v_: generateOopMap.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o;
text: .text%__1cKEntryPoint2t6M_v_; text: .text%__1cKEntryPoint2t6M_v_;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter_x86.o;
text: .text%__1cFRInfo2t6M_v_: interpreter_i486.o; text: .text%__1cFRInfo2t6M_v_: interpreter_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: java.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: java.o;
text: .text%__1cFRInfo2t6M_v_: java.o; text: .text%__1cFRInfo2t6M_v_: java.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o;
@ -151,16 +151,16 @@ text: .text%__1cNGrowableArray4CpnKMemoryPool__2t6Mii_v_: memoryService.o;
text: .text%__1cNGrowableArray4CpnNMemoryManager__2t6Mii_v_: memoryService.o; text: .text%__1cNGrowableArray4CpnNMemoryManager__2t6Mii_v_: memoryService.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodOop.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodOop.o;
text: .text%__1cFRInfo2t6M_v_: methodOop.o; text: .text%__1cFRInfo2t6M_v_: methodOop.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nativeInst_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nativeInst_x86.o;
text: .text%__1cFRInfo2t6M_v_: nativeInst_i486.o; text: .text%__1cFRInfo2t6M_v_: nativeInst_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nmethod.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nmethod.o;
text: .text%__1cFRInfo2t6M_v_: nmethod.o; text: .text%__1cFRInfo2t6M_v_: nmethod.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o;
text: .text%__1cQDoNothingClosure2t6M_v_: oopMap.o; text: .text%__1cQDoNothingClosure2t6M_v_: oopMap.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris.o;
text: .text%__1cFRInfo2t6M_v_: os_solaris.o; text: .text%__1cFRInfo2t6M_v_: os_solaris.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris_x86.o;
text: .text%__1cFRInfo2t6M_v_: os_solaris_i486.o; text: .text%__1cFRInfo2t6M_v_: os_solaris_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o;
text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: parGCAllocBuffer.o; text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: parGCAllocBuffer.o;
text: .text%__1cRalign_object_size6Fi_i_: parGCAllocBuffer.o; text: .text%__1cRalign_object_size6Fi_i_: parGCAllocBuffer.o;
@ -181,8 +181,8 @@ text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o;
text: .text%__1cJTimeStamp2t6M_v_: runtimeService.o; text: .text%__1cJTimeStamp2t6M_v_: runtimeService.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint.o;
text: .text%__1cFRInfo2t6M_v_: safepoint.o; text: .text%__1cFRInfo2t6M_v_: safepoint.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint_solaris_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint_solaris_x86.o;
text: .text%__1cFRInfo2t6M_v_: safepoint_solaris_i486.o; text: .text%__1cFRInfo2t6M_v_: safepoint_solaris_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o;
text: .text%__1cTAssertIsPermClosure2t6M_v_: sharedHeap.o; text: .text%__1cTAssertIsPermClosure2t6M_v_: sharedHeap.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedRuntime.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedRuntime.o;
@ -197,10 +197,10 @@ text: .text%__1cFRInfo2t6M_v_: vmStructs.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o;
text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_; text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_;
text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_; text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vtableStubs_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vtableStubs_x86.o;
text: .text%__1cFRInfo2t6M_v_: vtableStubs_i486.o; text: .text%__1cFRInfo2t6M_v_: vtableStubs_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer_i486.o; text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer_x86.o;
text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer_i486.o; text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer_x86.o;
text: .text%JNI_CreateJavaVM; text: .text%JNI_CreateJavaVM;
text: .text%__1cCosVatomic_xchg_bootstrap6Fipoi_i_; text: .text%__1cCosVatomic_xchg_bootstrap6Fipoi_i_;
text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_; text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_;
@ -279,7 +279,7 @@ text: .text%__1cSThreadLocalStorageEinit6F_v_;
text: .text%__1cSThreadLocalStorageHpd_init6F_v_; text: .text%__1cSThreadLocalStorageHpd_init6F_v_;
text: .text%__1cCosbDallocate_thread_local_storage6F_i_; text: .text%__1cCosbDallocate_thread_local_storage6F_i_;
text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_; text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_;
text: .text%__1cRAllocateTLSOffset6F_v_: threadLS_solaris_i486.o; text: .text%__1cRAllocateTLSOffset6F_v_: threadLS_solaris_x86.o;
text: .text%__1cPvm_init_globals6F_v_; text: .text%__1cPvm_init_globals6F_v_;
text: .text%__1cScheck_ThreadShadow6F_v_; text: .text%__1cScheck_ThreadShadow6F_v_;
text: .text%__1cRcheck_basic_types6F_v_; text: .text%__1cRcheck_basic_types6F_v_;
@ -463,7 +463,7 @@ text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o;
text: .text%__1cXresource_allocate_bytes6FI_pc_; text: .text%__1cXresource_allocate_bytes6FI_pc_;
text: .text%__1cKCodeBuffer2t6MpCi_v_; text: .text%__1cKCodeBuffer2t6MpCi_v_;
text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_; text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_;
text: .text%__1cYVM_Version_StubGeneratorTgenerate_getPsrInfo6M_pC_: vm_version_i486.o; text: .text%__1cYVM_Version_StubGeneratorTgenerate_getPsrInfo6M_pC_: vm_version_x86.o;
text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_; text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_;
text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_; text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_;
text: .text%__1cJAssemblerFpushl6MpnMRegisterImpl__v_; text: .text%__1cJAssemblerFpushl6MpnMRegisterImpl__v_;
@ -497,14 +497,14 @@ text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_;
text: .text%__1cFForteNregister_stub6FpkcpC3_v_; text: .text%__1cFForteNregister_stub6FpkcpC3_v_;
text: .text%__1cKVM_VersionWget_processor_features6F_v_; text: .text%__1cKVM_VersionWget_processor_features6F_v_;
text: .text%__1cCosMsupports_sse6F_i_; text: .text%__1cCosMsupports_sse6F_i_;
text: .text%__1cVcheck_for_sse_support6F_v_: os_solaris_i486.o; text: .text%__1cVcheck_for_sse_support6F_v_: os_solaris_x86.o;
text: .text%jio_snprintf; text: .text%jio_snprintf;
text: .text%jio_vsnprintf; text: .text%jio_vsnprintf;
text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_; text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_;
text: .text%__1cSstubRoutines_init16F_v_; text: .text%__1cSstubRoutines_init16F_v_;
text: .text%__1cMStubRoutinesLinitialize16F_v_; text: .text%__1cMStubRoutinesLinitialize16F_v_;
text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_; text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_;
text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_i486.o; text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_x86.o;
text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl__v_; text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl__v_;
text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCi_v_; text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCi_v_;
text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpCi_v_; text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpCi_v_;
@ -525,7 +525,7 @@ text: .text%__1cJAssemblerEleal6MpnMRegisterImpl_nHAddress__v_;
text: .text%__1cJAssemblerEmovl6MnHAddress_i_v_; text: .text%__1cJAssemblerEmovl6MnHAddress_i_v_;
text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_; text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_;
text: .text%__1cJAssemblerDjmp6MpnMRegisterImpl_nJrelocInfoJrelocType__v_; text: .text%__1cJAssemblerDjmp6MpnMRegisterImpl_nJrelocInfoJrelocType__v_;
text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_i486.o; text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_x86.o;
text: .text%__1cOMacroAssemblerFenter6M_v_; text: .text%__1cOMacroAssemblerFenter6M_v_;
text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_i_v_; text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_i_v_;
text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_2_v_; text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_2_v_;
@ -534,14 +534,14 @@ text: .text%__1cJAssemblerEcall6MpnMRegisterImpl_nJrelocInfoJrelocType__v_;
text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_i_v_; text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_i_v_;
text: .text%__1cJAssemblerGfstp_s6MnHAddress__v_; text: .text%__1cJAssemblerGfstp_s6MnHAddress__v_;
text: .text%__1cJAssemblerGfstp_d6MnHAddress__v_; text: .text%__1cJAssemblerGfstp_d6MnHAddress__v_;
text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_i486.o; text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_x86.o;
text: .text%__1cJAssemblerDjmp6MpCnJrelocInfoJrelocType__v_; text: .text%__1cJAssemblerDjmp6MpCnJrelocInfoJrelocType__v_;
text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_: stubGenerator_i486.o; text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_: stubGenerator_x86.o;
text: .text%__1cJAssemblerExchg6MpnMRegisterImpl_nHAddress__v_; text: .text%__1cJAssemblerExchg6MpnMRegisterImpl_nHAddress__v_;
text: .text%__1cJAssemblerGpushad6M_v_; text: .text%__1cJAssemblerGpushad6M_v_;
text: .text%__1cJAssemblerFpopad6M_v_; text: .text%__1cJAssemblerFpopad6M_v_;
text: .text%__1cNStubGeneratorYgenerate_get_previous_fp6M_pC_: stubGenerator_i486.o; text: .text%__1cNStubGeneratorYgenerate_get_previous_fp6M_pC_: stubGenerator_x86.o;
text: .text%__1cNStubGeneratorUgenerate_d2i_wrapper6MpC_1_: stubGenerator_i486.o; text: .text%__1cNStubGeneratorUgenerate_d2i_wrapper6MpC_1_: stubGenerator_x86.o;
text: .text%__1cOMacroAssemblerOpush_FPU_state6M_v_; text: .text%__1cOMacroAssemblerOpush_FPU_state6M_v_;
text: .text%__1cJAssemblerGfnsave6MnHAddress__v_; text: .text%__1cJAssemblerGfnsave6MnHAddress__v_;
text: .text%__1cJAssemblerFfwait6M_v_; text: .text%__1cJAssemblerFfwait6M_v_;
@ -552,7 +552,7 @@ text: .text%__1cJAssemblerFffree6Mi_v_;
text: .text%__1cJAssemblerLemit_farith6Miii_v_; text: .text%__1cJAssemblerLemit_farith6Miii_v_;
text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_; text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_;
text: .text%__1cJAssemblerGfrstor6MnHAddress__v_; text: .text%__1cJAssemblerGfrstor6MnHAddress__v_;
text: .text%__1cNStubGeneratorUcreate_control_words6M_v_: stubGenerator_i486.o; text: .text%__1cNStubGeneratorUcreate_control_words6M_v_: stubGenerator_x86.o;
text: .text%__1cJTraceTime2T6M_v_; text: .text%__1cJTraceTime2T6M_v_;
text: .text%__1cNcarSpace_init6F_v_; text: .text%__1cNcarSpace_init6F_v_;
text: .text%__1cICarSpaceEinit6F_v_; text: .text%__1cICarSpaceEinit6F_v_;
@ -773,7 +773,7 @@ text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_;
text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_2_v_; text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_2_v_;
text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_nHAddress__v_; text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_nHAddress__v_;
text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_; text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_;
text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_i486.o; text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_x86.o;
text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_; text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_;
text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MpC_v_; text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MpC_v_;
text: .text%__1cJAssemblerEnegl6MpnMRegisterImpl__v_; text: .text%__1cJAssemblerEnegl6MpnMRegisterImpl__v_;
@ -785,7 +785,7 @@ text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_nHAddress__v_;
text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_; text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_;
text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_; text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_;
text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl_i_v_; text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl_i_v_;
text: .text%__1cLlog2_intptr6Fi_i_: interpreter_i486.o; text: .text%__1cLlog2_intptr6Fi_i_: interpreter_x86.o;
text: .text%__1cOMacroAssemblerQload_signed_byte6MpnMRegisterImpl_nHAddress__i_; text: .text%__1cOMacroAssemblerQload_signed_byte6MpnMRegisterImpl_nHAddress__i_;
text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_nHAddress__v_; text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_nHAddress__v_;
text: .text%__1cOMacroAssemblerQload_signed_word6MpnMRegisterImpl_nHAddress__i_; text: .text%__1cOMacroAssemblerQload_signed_word6MpnMRegisterImpl_nHAddress__i_;
@ -982,7 +982,7 @@ text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_;
text: .text%__1cOMacroAssemblerIfcmp2int6MpnMRegisterImpl_i_v_; text: .text%__1cOMacroAssemblerIfcmp2int6MpnMRegisterImpl_i_v_;
text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_; text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_;
text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_; text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_;
text: .text%__1cFj_not6FnNTemplateTableJCondition__nJAssemblerJCondition__: templateTable_i486.o; text: .text%__1cFj_not6FnNTemplateTableJCondition__nJAssemblerJCondition__: templateTable_x86.o;
text: .text%__1cNTemplateTableGbranch6Fii_v_; text: .text%__1cNTemplateTableGbranch6Fii_v_;
text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_; text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_;
text: .text%__1cZInterpreterMacroAssemblerNdispatch_only6MnITosState__v_; text: .text%__1cZInterpreterMacroAssemblerNdispatch_only6MnITosState__v_;
@ -1488,7 +1488,7 @@ text: .text%__1cKSharedInfoLset_regName6F_v_;
text: .text%__1cIRegAllocYinit_register_allocation6F_v_; text: .text%__1cIRegAllocYinit_register_allocation6F_v_;
text: .text%__1cIFrameMapEinit6F_v_; text: .text%__1cIFrameMapEinit6F_v_;
text: .text%__1cKc1_RegMaskKinit_masks6Fi_v_; text: .text%__1cKc1_RegMaskKinit_masks6Fi_v_;
text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_FrameMap_i486.o; text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_FrameMap_x86.o;
text: .text%__1cNc1_AllocTableLinit_tables6F_v_; text: .text%__1cNc1_AllocTableLinit_tables6F_v_;
text: .text%__1cIFrameMapOfirst_register6F_pnMRegisterImpl__; text: .text%__1cIFrameMapOfirst_register6F_pnMRegisterImpl__;
text: .text%__1cIFrameMapLcpu_reg2rnr6FpnMRegisterImpl__i_; text: .text%__1cIFrameMapLcpu_reg2rnr6FpnMRegisterImpl__i_;
@ -1502,7 +1502,7 @@ text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffe
text: .text%__1cKCodeBufferQalloc_relocation6MI_v_; text: .text%__1cKCodeBufferQalloc_relocation6MI_v_;
text: .text%__1cJOopMapSet2t6M_v_; text: .text%__1cJOopMapSet2t6M_v_;
text: .text%__1cJAssemblerEsubl6MnHAddress_i_v_; text: .text%__1cJAssemblerEsubl6MnHAddress_i_v_;
text: .text%__1cTsave_live_registers6FpnOMacroAssembler_i_pnGOopMap__: c1_Runtime1_i486.o; text: .text%__1cTsave_live_registers6FpnOMacroAssembler_i_pnGOopMap__: c1_Runtime1_x86.o;
text: .text%__1cJAssemblerGfldenv6MnHAddress__v_; text: .text%__1cJAssemblerGfldenv6MnHAddress__v_;
text: .text%__1cGOopMap2t6Mii_v_; text: .text%__1cGOopMap2t6Mii_v_;
text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_; text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_;
@ -1564,10 +1564,10 @@ text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pCi_i_;
text: .text%__1cJStubFrame2T6M_v_; text: .text%__1cJStubFrame2T6M_v_;
text: .text%__1cIRuntime1Ygenerate_exception_throw6FpnNStubAssembler_pCpnMRegisterImpl__pnJOopMapSet__; text: .text%__1cIRuntime1Ygenerate_exception_throw6FpnNStubAssembler_pCpnMRegisterImpl__pnJOopMapSet__;
text: .text%__1cOMacroAssemblerLtlab_refill6MrnFLabel_22_v_; text: .text%__1cOMacroAssemblerLtlab_refill6MrnFLabel_22_v_;
text: .text%__1cLlog2_intptr6Fi_i_: assembler_i486.o; text: .text%__1cLlog2_intptr6Fi_i_: assembler_x86.o;
text: .text%__1cOMacroAssemblerNeden_allocate6MpnMRegisterImpl_2i2rnFLabel__v_; text: .text%__1cOMacroAssemblerNeden_allocate6MpnMRegisterImpl_2i2rnFLabel__v_;
text: .text%__1cOMacroAssemblerLverify_tlab6M_v_; text: .text%__1cOMacroAssemblerLverify_tlab6M_v_;
text: .text%__1cLlog2_intptr6Fi_i_: c1_Runtime1_i486.o; text: .text%__1cLlog2_intptr6Fi_i_: c1_Runtime1_x86.o;
text: .text%__1cOMacroAssemblerNtlab_allocate6MpnMRegisterImpl_2i22rnFLabel__v_; text: .text%__1cOMacroAssemblerNtlab_allocate6MpnMRegisterImpl_2i22rnFLabel__v_;
text: .text%__1cRC1_MacroAssemblerRinitialize_object6MpnMRegisterImpl_22i22_v_; text: .text%__1cRC1_MacroAssemblerRinitialize_object6MpnMRegisterImpl_22i22_v_;
text: .text%__1cRC1_MacroAssemblerRinitialize_header6MpnMRegisterImpl_22_v_; text: .text%__1cRC1_MacroAssemblerRinitialize_header6MpnMRegisterImpl_22_v_;
@ -1581,7 +1581,7 @@ text: .text%__1cIiEntries2t6Miiii_v_;
text: .text%__1cRNativeGeneralJumpQjump_destination6kM_pC_; text: .text%__1cRNativeGeneralJumpQjump_destination6kM_pC_;
text: .text%__1cJAssemblerOlocate_operand6FpCn0AMWhichOperand__1_; text: .text%__1cJAssemblerOlocate_operand6FpCn0AMWhichOperand__1_;
text: .text%__1cIRuntime1Rgenerate_patching6FpnNStubAssembler_pC_pnJOopMapSet__; text: .text%__1cIRuntime1Rgenerate_patching6FpnNStubAssembler_pC_pnJOopMapSet__;
text: .text%__1cWrestore_live_registers6FpnOMacroAssembler__v_: c1_Runtime1_i486.o; text: .text%__1cWrestore_live_registers6FpnOMacroAssembler__v_: c1_Runtime1_x86.o;
text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_; text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_;
text: .text%__1cNSafepointBlob2n6FII_pv_; text: .text%__1cNSafepointBlob2n6FII_pv_;
text: .text%__1cNSafepointBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_; text: .text%__1cNSafepointBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_;
@ -1778,8 +1778,8 @@ text: .text%__1cYsun_reflect_ConstantPoolPcompute_offsets6F_v_;
text: .text%__1cZsun_misc_AtomicLongCSImplPcompute_offsets6F_v_; text: .text%__1cZsun_misc_AtomicLongCSImplPcompute_offsets6F_v_;
text: .text%__1cSstubRoutines_init26F_v_; text: .text%__1cSstubRoutines_init26F_v_;
text: .text%__1cMStubRoutinesLinitialize26F_v_; text: .text%__1cMStubRoutinesLinitialize26F_v_;
text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_i486.o; text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_x86.o;
text: .text%__1cNStubGeneratorTgenerate_verify_oop6M_pC_: stubGenerator_i486.o; text: .text%__1cNStubGeneratorTgenerate_verify_oop6M_pC_: stubGenerator_x86.o;
text: .text%__1cJAssemblerEincl6MnHAddress__v_; text: .text%__1cJAssemblerEincl6MnHAddress__v_;
text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_; text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_;
text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_; text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_;
@ -3074,11 +3074,11 @@ text: .text%__1cEItemRget_jint_constant6kM_i_;
text: .text%__1cLLIR_EmitterRarithmetic_op_int6MnJBytecodesECode_pnLLIR_OprDesc_44nFRInfo__v_; text: .text%__1cLLIR_EmitterRarithmetic_op_int6MnJBytecodesECode_pnLLIR_OprDesc_44nFRInfo__v_;
text: .text%__1cLLIR_EmitterNarithmetic_op6MnJBytecodesECode_pnLLIR_OprDesc_44inFRInfo_pnMCodeEmitInfo__v_; text: .text%__1cLLIR_EmitterNarithmetic_op6MnJBytecodesECode_pnLLIR_OprDesc_44inFRInfo_pnMCodeEmitInfo__v_;
text: .text%__1cLLIR_EmitterYstrength_reduce_multiply6MpnLLIR_OprDesc_i22_i_; text: .text%__1cLLIR_EmitterYstrength_reduce_multiply6MpnLLIR_OprDesc_i22_i_;
text: .text%__1cILIR_ListHreg2reg6MnFRInfo_1nJBasicType__v_: c1_LIREmitter_i486.o; text: .text%__1cILIR_ListHreg2reg6MnFRInfo_1nJBasicType__v_: c1_LIREmitter_x86.o;
text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIREmitter_i486.o; text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIREmitter_x86.o;
text: .text%__1cLlog2_intptr6Fi_i_: c1_LIREmitter_i486.o; text: .text%__1cLlog2_intptr6Fi_i_: c1_LIREmitter_x86.o;
text: .text%__1cILIR_ListKshift_left6MpnLLIR_OprDesc_222_v_; text: .text%__1cILIR_ListKshift_left6MpnLLIR_OprDesc_222_v_;
text: .text%__1cILIR_ListDsub6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter_i486.o; text: .text%__1cILIR_ListDsub6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter_x86.o;
text: .text%__1cIValueGenWcan_inline_as_constant6MpnEItem__i_; text: .text%__1cIValueGenWcan_inline_as_constant6MpnEItem__i_;
text: .text%__1cIRegAllocPget_register_rc6kMnFRInfo__i_; text: .text%__1cIRegAllocPget_register_rc6kMnFRInfo__i_;
text: .text%__1cLGetRefCountGdo_cpu6Mi_v_: c1_RegAlloc.o; text: .text%__1cLGetRefCountGdo_cpu6Mi_v_: c1_RegAlloc.o;
@ -3098,7 +3098,7 @@ text: .text%__1cMLIR_OpBranch2t6Mn0ANLIR_Condition_pnICodeStub_pnMCodeEmitInfo__
text: .text%__1cLLIR_EmitterMindexed_load6MnFRInfo_nJBasicType_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_; text: .text%__1cLLIR_EmitterMindexed_load6MnFRInfo_nJBasicType_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_;
text: .text%__1cLLIR_EmitterNarray_address6MpnLLIR_OprDesc_2inJBasicType__pnLLIR_Address__; text: .text%__1cLLIR_EmitterNarray_address6MpnLLIR_OprDesc_2inJBasicType__pnLLIR_Address__;
text: .text%__1cLLIR_AddressFscale6FnJBasicType__n0AFScale__; text: .text%__1cLLIR_AddressFscale6FnJBasicType__n0AFScale__;
text: .text%__1cILIR_ListEmove6MpnLLIR_Address_pnLLIR_OprDesc_pnMCodeEmitInfo__v_: c1_LIREmitter_i486.o; text: .text%__1cILIR_ListEmove6MpnLLIR_Address_pnLLIR_OprDesc_pnMCodeEmitInfo__v_: c1_LIREmitter_x86.o;
text: .text%__1cIRegAllocNoops_in_spill6kM_pnIintStack__; text: .text%__1cIRegAllocNoops_in_spill6kM_pnIintStack__;
text: .text%__1cIRegAllocRoops_in_registers6kM_pnPRInfoCollection__; text: .text%__1cIRegAllocRoops_in_registers6kM_pnPRInfoCollection__;
text: .text%__1cIValueGenbDsafepoint_poll_needs_register6F_i_; text: .text%__1cIValueGenbDsafepoint_poll_needs_register6F_i_;
@ -3137,9 +3137,9 @@ text: .text%__1cHLIR_Op1Fvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cPRegisterManagerElock6MnFRInfo__v_; text: .text%__1cPRegisterManagerElock6MnFRInfo__v_;
text: .text%__1cHLIR_Op2Fvisit6MpnQLIR_OpVisitState__v_; text: .text%__1cHLIR_Op2Fvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cMLIR_OpBranchFvisit6MpnQLIR_OpVisitState__v_; text: .text%__1cMLIR_OpBranchFvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cORangeCheckStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o; text: .text%__1cORangeCheckStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_CodeStubs_i486.o; text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_CodeStubs_x86.o;
text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeStubs_i486.o; text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeStubs_x86.o;
text: .text%__1cNc1_AllocTableFmerge6Mp0_v_; text: .text%__1cNc1_AllocTableFmerge6Mp0_v_;
text: .text%__1cGLIR_OpFvisit6MpnQLIR_OpVisitState__v_; text: .text%__1cGLIR_OpFvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cQLIR_LocalCachingXcache_locals_for_blocks6MpnJBlockList_pnPRegisterManager_i_pnMLocalMapping__; text: .text%__1cQLIR_LocalCachingXcache_locals_for_blocks6MpnJBlockList_pnPRegisterManager_i_pnMLocalMapping__;
@ -3201,7 +3201,7 @@ text: .text%__1cJLabelListIindex_of6kMkpnFLabel__i_: c1_LIROptimizer.o;
text: .text%__1cRLIR_PeepholeStateYset_disable_optimization6Mi_v_; text: .text%__1cRLIR_PeepholeStateYset_disable_optimization6Mi_v_;
text: .text%__1cLLIR_OpLabelJemit_code6MpnVLIR_AbstractAssembler__v_; text: .text%__1cLLIR_OpLabelJemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerMemit_opLabel6MpnLLIR_OpLabel__v_; text: .text%__1cNLIR_OptimizerMemit_opLabel6MpnLLIR_OpLabel__v_;
text: .text%__1cNLIR_OptimizerFvisit6M_v_: c1_LIROptimizer_i486.o; text: .text%__1cNLIR_OptimizerFvisit6M_v_: c1_LIROptimizer_x86.o;
text: .text%__1cHLIR_Op0Jemit_code6MpnVLIR_AbstractAssembler__v_; text: .text%__1cHLIR_Op0Jemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerIemit_op06MpnHLIR_Op0__v_; text: .text%__1cNLIR_OptimizerIemit_op06MpnHLIR_Op0__v_;
text: .text%__1cHLIR_Op2Jemit_code6MpnVLIR_AbstractAssembler__v_; text: .text%__1cHLIR_Op2Jemit_code6MpnVLIR_AbstractAssembler__v_;
@ -3225,7 +3225,7 @@ text: .text%__1cNLIR_OptimizerRreplace_stack_opr6MpnLLIR_OprDesc__2_;
text: .text%__1cNLIR_OptimizerNoptimize_move6MpnHLIR_Op1_rpnLLIR_OprDesc_5_i_; text: .text%__1cNLIR_OptimizerNoptimize_move6MpnHLIR_Op1_rpnLLIR_OprDesc_5_i_;
text: .text%__1cRLIR_PeepholeStatebFequivalent_register_or_constant6MpnLLIR_OprDesc__2_; text: .text%__1cRLIR_PeepholeStatebFequivalent_register_or_constant6MpnLLIR_OprDesc__2_;
text: .text%__1cRLIR_PeepholeStateOequivalent_opr6MpnLLIR_OprDesc__2_; text: .text%__1cRLIR_PeepholeStateOequivalent_opr6MpnLLIR_OprDesc__2_;
text: .text%__1cNLIR_OptimizerKmaybe_opto6MpnLLIR_OprDesc_2_2_: c1_LIROptimizer_i486.o; text: .text%__1cNLIR_OptimizerKmaybe_opto6MpnLLIR_OprDesc_2_2_: c1_LIROptimizer_x86.o;
text: .text%__1cNLIR_OptimizerMis_cache_reg6MpnLLIR_OprDesc__i_; text: .text%__1cNLIR_OptimizerMis_cache_reg6MpnLLIR_OprDesc__i_;
text: .text%__1cMLocalMappingMis_cache_reg6kMpnLLIR_OprDesc__i_; text: .text%__1cMLocalMappingMis_cache_reg6kMpnLLIR_OprDesc__i_;
text: .text%__1cMLocalMappingMis_cache_reg6kMnFRInfo__i_; text: .text%__1cMLocalMappingMis_cache_reg6kMnFRInfo__i_;
@ -3294,13 +3294,13 @@ text: .text%__1cNLIR_AssemblerVsetup_locals_at_entry6M_v_;
text: .text%__1cIFrameMapYsignature_type_array_for6FpknIciMethod__pnNBasicTypeList__; text: .text%__1cIFrameMapYsignature_type_array_for6FpknIciMethod__pnNBasicTypeList__;
text: .text%__1cIFrameMapScalling_convention6FpknIciMethod_pnIintArray__pnRCallingConvention__; text: .text%__1cIFrameMapScalling_convention6FpknIciMethod_pnIintArray__pnRCallingConvention__;
text: .text%__1cIFrameMapScalling_convention6FirknOBasicTypeArray_pnIintArray__pnRCallingConvention__; text: .text%__1cIFrameMapScalling_convention6FirknOBasicTypeArray_pnIintArray__pnRCallingConvention__;
text: .text%__1cIintArray2t6Mki1_v_: c1_FrameMap_i486.o; text: .text%__1cIintArray2t6Mki1_v_: c1_FrameMap_x86.o;
text: .text%__1cIFrameMapRname_for_argument6Fi_i_; text: .text%__1cIFrameMapRname_for_argument6Fi_i_;
text: .text%__1cIFrameMapSfp_offset_for_name6kMiii_i_; text: .text%__1cIFrameMapSfp_offset_for_name6kMiii_i_;
text: .text%__1cIFrameMapPnum_local_names6kM_i_; text: .text%__1cIFrameMapPnum_local_names6kM_i_;
text: .text%__1cIFrameMapNlocal_to_slot6kMii_i_; text: .text%__1cIFrameMapNlocal_to_slot6kMii_i_;
text: .text%__1cIFrameMapSfp_offset_for_slot6kMi_i_; text: .text%__1cIFrameMapSfp_offset_for_slot6kMi_i_;
text: .text%__1cQArgumentLocation2t6Mci_v_: c1_FrameMap_i486.o; text: .text%__1cQArgumentLocation2t6Mci_v_: c1_FrameMap_x86.o;
text: .text%__1cQArgumentLocationSset_stack_location6Mi_v_; text: .text%__1cQArgumentLocationSset_stack_location6Mi_v_;
text: .text%__1cIFrameMapQaddress_for_name6kMiii_nHAddress__; text: .text%__1cIFrameMapQaddress_for_name6kMiii_nHAddress__;
text: .text%__1cIFrameMapQmake_new_address6kMi_nHAddress__; text: .text%__1cIFrameMapQmake_new_address6kMi_nHAddress__;
@ -3321,12 +3321,12 @@ text: .text%__1cNLIR_AssemblerbIadd_debug_info_for_null_check_here6MpnMCodeEmitI
text: .text%__1cNLIR_AssemblerLcode_offset6kM_i_; text: .text%__1cNLIR_AssemblerLcode_offset6kM_i_;
text: .text%__1cNLIR_AssemblerbDadd_debug_info_for_null_check6MipnMCodeEmitInfo__v_; text: .text%__1cNLIR_AssemblerbDadd_debug_info_for_null_check6MipnMCodeEmitInfo__v_;
text: .text%__1cNLIR_AssemblerOemit_code_stub6MpnICodeStub__v_; text: .text%__1cNLIR_AssemblerOemit_code_stub6MpnICodeStub__v_;
text: .text%__1cVImplicitNullCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o; text: .text%__1cVImplicitNullCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerCpc6kM_pC_; text: .text%__1cNLIR_AssemblerCpc6kM_pC_;
text: .text%__1cICodeStubLset_code_pc6MpC_v_: c1_CodeStubs_i486.o; text: .text%__1cICodeStubLset_code_pc6MpC_v_: c1_CodeStubs_x86.o;
text: .text%__1cICodeStubMis_call_stub6kM_i_: c1_CodeStubs_i486.o; text: .text%__1cICodeStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o;
text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler.o; text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler.o;
text: .text%__1cORangeCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o; text: .text%__1cORangeCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerOsafepoint_poll6MnFRInfo_pnMCodeEmitInfo__v_; text: .text%__1cNLIR_AssemblerOsafepoint_poll6MnFRInfo_pnMCodeEmitInfo__v_;
text: .text%__1cNLIR_AssemblerZadd_debug_info_for_branch6MpnMCodeEmitInfo__v_; text: .text%__1cNLIR_AssemblerZadd_debug_info_for_branch6MpnMCodeEmitInfo__v_;
text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o; text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o;
@ -3396,7 +3396,7 @@ text: .text%__1cNLIR_AssemblerWemit_exception_handler6M_i_;
text: .text%__1cRC1_MacroAssemblerRexception_handler6Mii_v_; text: .text%__1cRC1_MacroAssemblerRexception_handler6Mii_v_;
text: .text%__1cNLIR_AssemblerPemit_call_stubs6M_v_; text: .text%__1cNLIR_AssemblerPemit_call_stubs6M_v_;
text: .text%__1cNLIR_AssemblerbCmaybe_adjust_stack_alignment6MpnIciMethod__v_; text: .text%__1cNLIR_AssemblerbCmaybe_adjust_stack_alignment6MpnIciMethod__v_;
text: .text%__1cKreal_index6FpnIFrameMap_i_i_: c1_LIRAssembler_i486.o; text: .text%__1cKreal_index6FpnIFrameMap_i_i_: c1_LIRAssembler_x86.o;
text: .text%__1cLCompilationbEgenerate_exception_range_table6M_v_; text: .text%__1cLCompilationbEgenerate_exception_range_table6M_v_;
text: .text%__1cOExceptionScopeGequals6kMp0_i_; text: .text%__1cOExceptionScopeGequals6kMp0_i_;
text: .text%__1cLCompilationbBadd_exception_range_entries6MiipnOExceptionScope_ip2pi_v_; text: .text%__1cLCompilationbBadd_exception_range_entries6MiipnOExceptionScope_ip2pi_v_;
@ -3582,10 +3582,10 @@ text: .text%__1cLNewInstanceOas_NewInstance6M_p0_: c1_Instruction.o;
text: .text%__1cIValueGenQexceptionPcRInfo6F_nFRInfo__; text: .text%__1cIValueGenQexceptionPcRInfo6F_nFRInfo__;
text: .text%__1cILIR_ListPthrow_exception6MnFRInfo_1pnMCodeEmitInfo__v_: c1_CodeGenerator.o; text: .text%__1cILIR_ListPthrow_exception6MnFRInfo_1pnMCodeEmitInfo__v_: c1_CodeGenerator.o;
text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator.o; text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator.o;
text: .text%__1cPNewInstanceStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o; text: .text%__1cPNewInstanceStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cOLIR_OpJavaCallFvisit6MpnQLIR_OpVisitState__v_; text: .text%__1cOLIR_OpJavaCallFvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_LIR.o; text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_LIR.o;
text: .text%__1cOStaticCallStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o; text: .text%__1cOStaticCallStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cIFrameMapWcaller_save_cpu_reg_at6Fi_pnLLIR_OprDesc__; text: .text%__1cIFrameMapWcaller_save_cpu_reg_at6Fi_pnLLIR_OprDesc__;
text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_Instruction.o; text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_Instruction.o;
text: .text%__1cIVoidTypeDtag6kM_nIValueTag__: c1_ValueType.o; text: .text%__1cIVoidTypeDtag6kM_nIValueTag__: c1_ValueType.o;
@ -3604,12 +3604,12 @@ text: .text%__1cOoop_RelocationJpack_data6M_i_;
text: .text%__1cNLIR_AssemblerPpatching_epilog6MpnMPatchingStub_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_; text: .text%__1cNLIR_AssemblerPpatching_epilog6MpnMPatchingStub_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_;
text: .text%__1cMPatchingStubHinstall6MpnOMacroAssembler_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_: c1_LIRAssembler.o; text: .text%__1cMPatchingStubHinstall6MpnOMacroAssembler_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_: c1_LIRAssembler.o;
text: .text%__1cNLIR_AssemblerUappend_patching_stub6MpnMPatchingStub__v_; text: .text%__1cNLIR_AssemblerUappend_patching_stub6MpnMPatchingStub__v_;
text: .text%__1cPNewInstanceStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o; text: .text%__1cPNewInstanceStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerJemit_call6MpnOLIR_OpJavaCall__v_; text: .text%__1cNLIR_AssemblerJemit_call6MpnOLIR_OpJavaCall__v_;
text: .text%__1cNLIR_AssemblerKalign_call6MnILIR_Code__v_; text: .text%__1cNLIR_AssemblerKalign_call6MnILIR_Code__v_;
text: .text%__1cICodeStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o; text: .text%__1cICodeStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cOStaticCallStubLset_code_pc6MpC_v_: c1_CodeStubs_i486.o; text: .text%__1cOStaticCallStubLset_code_pc6MpC_v_: c1_CodeStubs_x86.o;
text: .text%__1cOStaticCallStubMis_call_stub6kM_i_: c1_CodeStubs_i486.o; text: .text%__1cOStaticCallStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerEcall6MpCnJrelocInfoJrelocType_pnMCodeEmitInfo__v_; text: .text%__1cNLIR_AssemblerEcall6MpCnJrelocInfoJrelocType_pnMCodeEmitInfo__v_;
text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o; text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o; text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o;
@ -4010,15 +4010,15 @@ text: .text%__1cJTypeCheckPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBui
text: .text%__1cQNullCheckVisitorNdo_InstanceOf6MpnKInstanceOf__v_; text: .text%__1cQNullCheckVisitorNdo_InstanceOf6MpnKInstanceOf__v_;
text: .text%__1cQNullCheckVisitorMdo_CheckCast6MpnJCheckCast__v_; text: .text%__1cQNullCheckVisitorMdo_CheckCast6MpnJCheckCast__v_;
text: .text%__1cIValueGenNdo_InstanceOf6MpnKInstanceOf__v_; text: .text%__1cIValueGenNdo_InstanceOf6MpnKInstanceOf__v_;
text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator_i486.o; text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator_x86.o;
text: .text%__1cLLIR_EmitterNinstanceof_op6MpnLLIR_OprDesc_2pnHciKlass_nFRInfo_5ipnMCodeEmitInfo__v_; text: .text%__1cLLIR_EmitterNinstanceof_op6MpnLLIR_OprDesc_2pnHciKlass_nFRInfo_5ipnMCodeEmitInfo__v_;
text: .text%__1cILIR_ListKinstanceof6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo__v_; text: .text%__1cILIR_ListKinstanceof6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo__v_;
text: .text%__1cPLIR_OpTypeCheck2t6MnILIR_Code_pnLLIR_OprDesc_3pnHciKlass_33ipnMCodeEmitInfo_7pnICodeStub__v_; text: .text%__1cPLIR_OpTypeCheck2t6MnILIR_Code_pnLLIR_OprDesc_3pnHciKlass_33ipnMCodeEmitInfo_7pnICodeStub__v_;
text: .text%__1cIValueGenMdo_CheckCast6MpnJCheckCast__v_; text: .text%__1cIValueGenMdo_CheckCast6MpnJCheckCast__v_;
text: .text%__1cILIR_ListJcheckcast6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo_6pnICodeStub__v_; text: .text%__1cILIR_ListJcheckcast6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo_6pnICodeStub__v_;
text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator_i486.o; text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator_x86.o;
text: .text%__1cPLIR_OpTypeCheckFvisit6MpnQLIR_OpVisitState__v_; text: .text%__1cPLIR_OpTypeCheckFvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cTSimpleExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o; text: .text%__1cTSimpleExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cPLIR_OpTypeCheckJemit_code6MpnVLIR_AbstractAssembler__v_; text: .text%__1cPLIR_OpTypeCheckJemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_; text: .text%__1cNLIR_OptimizerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_;
text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIROptimizer.o; text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIROptimizer.o;
@ -4026,7 +4026,7 @@ text: .text%__1cIintArrayIindex_of6kMki_i_: c1_LIROptimizer.o;
text: .text%__1cNLIR_AssemblerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_; text: .text%__1cNLIR_AssemblerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_;
text: .text%__1cIciObjectIencoding6M_pnI_jobject__; text: .text%__1cIciObjectIencoding6M_pnI_jobject__;
text: .text%__1cJAssemblerEcmpl6MnHAddress_pnI_jobject__v_; text: .text%__1cJAssemblerEcmpl6MnHAddress_pnI_jobject__v_;
text: .text%__1cTSimpleExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o; text: .text%__1cTSimpleExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cTSimpleExceptionStubJemit_code6MpnNLIR_Assembler__v_; text: .text%__1cTSimpleExceptionStubJemit_code6MpnNLIR_Assembler__v_;
text: .text%__1cJLoadFieldIis_equal6kMpnLInstruction__i_: c1_Instruction.o; text: .text%__1cJLoadFieldIis_equal6kMpnLInstruction__i_: c1_Instruction.o;
text: .text%__1cJLoadFieldMas_LoadField6M_p0_: c1_Instruction.o; text: .text%__1cJLoadFieldMas_LoadField6M_p0_: c1_Instruction.o;
@ -4194,7 +4194,7 @@ text: .text%__1cLLIR_EmitterOnew_type_array6MnFRInfo_nJBasicType_pnLLIR_OprDesc_
text: .text%__1cQNewTypeArrayStub2t6MnFRInfo_11pnMCodeEmitInfo__v_; text: .text%__1cQNewTypeArrayStub2t6MnFRInfo_11pnMCodeEmitInfo__v_;
text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_; text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_;
text: .text%__1cQciTypeArrayKlassJmake_impl6FnJBasicType__p0_; text: .text%__1cQciTypeArrayKlassJmake_impl6FnJBasicType__p0_;
text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter_i486.o; text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter_x86.o;
text: .text%__1cILIR_ListOallocate_array6MnFRInfo_11111nJBasicType_1pnICodeStub__v_; text: .text%__1cILIR_ListOallocate_array6MnFRInfo_11111nJBasicType_1pnICodeStub__v_;
text: .text%__1cIValueGenMdo_Intrinsic6MpnJIntrinsic__v_; text: .text%__1cIValueGenMdo_Intrinsic6MpnJIntrinsic__v_;
text: .text%__1cIValueGenMdo_ArrayCopy6MpnJIntrinsic__v_; text: .text%__1cIValueGenMdo_ArrayCopy6MpnJIntrinsic__v_;
@ -4209,12 +4209,12 @@ text: .text%__1cLInstructionNdeclared_type6kM_pnGciType__: c1_Instruction.o;
text: .text%__1cRpositive_constant6FpnLInstruction__i_: c1_CodeGenerator.o; text: .text%__1cRpositive_constant6FpnLInstruction__i_: c1_CodeGenerator.o;
text: .text%__1cLArrayLengthOas_ArrayLength6M_p0_: c1_GraphBuilder.o; text: .text%__1cLArrayLengthOas_ArrayLength6M_p0_: c1_GraphBuilder.o;
text: .text%__1cQis_constant_zero6FpnLInstruction__i_: c1_CodeGenerator.o; text: .text%__1cQis_constant_zero6FpnLInstruction__i_: c1_CodeGenerator.o;
text: .text%__1cILIR_ListJarraycopy6MpnLLIR_OprDesc_22222pnMciArrayKlass_ipnMCodeEmitInfo__v_: c1_CodeGenerator_i486.o; text: .text%__1cILIR_ListJarraycopy6MpnLLIR_OprDesc_22222pnMciArrayKlass_ipnMCodeEmitInfo__v_: c1_CodeGenerator_x86.o;
text: .text%__1cLLIR_EmitterNwrite_barrier6MpnLLIR_OprDesc_2_v_; text: .text%__1cLLIR_EmitterNwrite_barrier6MpnLLIR_OprDesc_2_v_;
text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter_i486.o; text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter_x86.o;
text: .text%__1cILIR_ListUunsigned_shift_right6MpnLLIR_OprDesc_222_v_; text: .text%__1cILIR_ListUunsigned_shift_right6MpnLLIR_OprDesc_222_v_;
text: .text%__1cQLIR_OpAllocArrayFvisit6MpnQLIR_OpVisitState__v_; text: .text%__1cQLIR_OpAllocArrayFvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cQNewTypeArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o; text: .text%__1cQNewTypeArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cPLIR_OpArrayCopyFvisit6MpnQLIR_OpVisitState__v_; text: .text%__1cPLIR_OpArrayCopyFvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cQLIR_OpAllocArrayJemit_code6MpnVLIR_AbstractAssembler__v_; text: .text%__1cQLIR_OpAllocArrayJemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerQemit_alloc_array6MpnQLIR_OpAllocArray__v_; text: .text%__1cNLIR_OptimizerQemit_alloc_array6MpnQLIR_OpAllocArray__v_;
@ -4229,12 +4229,12 @@ text: .text%__1cNLIR_AssemblerQemit_alloc_array6MpnQLIR_OpAllocArray__v_;
text: .text%__1cNLIR_AssemblerSarray_element_size6kMnJBasicType__nHAddressLScaleFactor__; text: .text%__1cNLIR_AssemblerSarray_element_size6kMnJBasicType__nHAddressLScaleFactor__;
text: .text%__1cRC1_MacroAssemblerOallocate_array6MpnMRegisterImpl_222inHAddressLScaleFactor_2rnFLabel__v_; text: .text%__1cRC1_MacroAssemblerOallocate_array6MpnMRegisterImpl_222inHAddressLScaleFactor_2rnFLabel__v_;
text: .text%__1cRC1_MacroAssemblerMtry_allocate6MpnMRegisterImpl_2i22rnFLabel__v_; text: .text%__1cRC1_MacroAssemblerMtry_allocate6MpnMRegisterImpl_2i22rnFLabel__v_;
text: .text%__1cQNewTypeArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o; text: .text%__1cQNewTypeArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerOemit_arraycopy6MpnPLIR_OpArrayCopy__v_; text: .text%__1cNLIR_AssemblerOemit_arraycopy6MpnPLIR_OpArrayCopy__v_;
text: .text%__1cMciArrayKlassMelement_type6M_pnGciType__; text: .text%__1cMciArrayKlassMelement_type6M_pnGciType__;
text: .text%__1cNArrayCopyStub2t6MpnMCodeEmitInfo_pnOStaticCallStub__v_; text: .text%__1cNArrayCopyStub2t6MpnMCodeEmitInfo_pnOStaticCallStub__v_;
text: .text%__1cFRInfoMset_word_reg6MkpnMRegisterImpl__v_; text: .text%__1cFRInfoMset_word_reg6MkpnMRegisterImpl__v_;
text: .text%__1cNArrayCopyStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o; text: .text%__1cNArrayCopyStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerOpush_parameter6MpnMRegisterImpl_i_v_; text: .text%__1cNLIR_AssemblerOpush_parameter6MpnMRegisterImpl_i_v_;
text: .text%__1cQNewTypeArrayStubJemit_code6MpnNLIR_Assembler__v_; text: .text%__1cQNewTypeArrayStubJemit_code6MpnNLIR_Assembler__v_;
text: .text%__1cNArrayCopyStubJemit_code6MpnNLIR_Assembler__v_; text: .text%__1cNArrayCopyStubJemit_code6MpnNLIR_Assembler__v_;
@ -4295,14 +4295,14 @@ text: .text%__1cLLIR_EmitterIshift_op6MnJBytecodesECode_nFRInfo_pnLLIR_OprDesc_5
text: .text%__1cILIR_ListKshift_left6MnFRInfo_i1_v_: c1_LIREmitter.o; text: .text%__1cILIR_ListKshift_left6MnFRInfo_i1_v_: c1_LIREmitter.o;
text: .text%__1cILIR_ListKlogical_or6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o; text: .text%__1cILIR_ListKlogical_or6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o;
text: .text%__1cOLIR_OpAllocObjFvisit6MpnQLIR_OpVisitState__v_; text: .text%__1cOLIR_OpAllocObjFvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cSNewObjectArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o; text: .text%__1cSNewObjectArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cOLIR_OpAllocObjJemit_code6MpnVLIR_AbstractAssembler__v_; text: .text%__1cOLIR_OpAllocObjJemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_; text: .text%__1cNLIR_OptimizerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_;
text: .text%__1cNLIR_AssemblerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_; text: .text%__1cNLIR_AssemblerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_;
text: .text%__1cRC1_MacroAssemblerPallocate_object6MpnMRegisterImpl_22ii2rnFLabel__v_; text: .text%__1cRC1_MacroAssemblerPallocate_object6MpnMRegisterImpl_22ii2rnFLabel__v_;
text: .text%__1cNLIR_AssemblerOmembar_release6M_v_; text: .text%__1cNLIR_AssemblerOmembar_release6M_v_;
text: .text%__1cNLIR_AssemblerGmembar6M_v_; text: .text%__1cNLIR_AssemblerGmembar6M_v_;
text: .text%__1cSNewObjectArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o; text: .text%__1cSNewObjectArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerOmembar_acquire6M_v_; text: .text%__1cNLIR_AssemblerOmembar_acquire6M_v_;
text: .text%__1cEBaseHas_Base6M_p0_: c1_IR.o; text: .text%__1cEBaseHas_Base6M_p0_: c1_IR.o;
text: .text%__1cNLIR_AssemblerOemit_osr_entry6MpnHIRScope_ipnFLabel_i_v_; text: .text%__1cNLIR_AssemblerOemit_osr_entry6MpnHIRScope_ipnFLabel_i_v_;
@ -4708,11 +4708,11 @@ text: .text%__1cILIR_ListLshift_right6MpnLLIR_OprDesc_222_v_;
text: .text%__1cIValueGenLdo_NegateOp6MpnINegateOp__v_; text: .text%__1cIValueGenLdo_NegateOp6MpnINegateOp__v_;
text: .text%__1cLLIR_EmitterGnegate6MnFRInfo_pnLLIR_OprDesc__v_; text: .text%__1cLLIR_EmitterGnegate6MnFRInfo_pnLLIR_OprDesc__v_;
text: .text%__1cILIR_ListGnegate6MnFRInfo_1_v_: c1_LIREmitter.o; text: .text%__1cILIR_ListGnegate6MnFRInfo_1_v_: c1_LIREmitter.o;
text: .text%__1cXArrayStoreExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o; text: .text%__1cXArrayStoreExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cXArrayStoreExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o; text: .text%__1cXArrayStoreExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerEleal6MpnLLIR_OprDesc_2_v_; text: .text%__1cNLIR_AssemblerEleal6MpnLLIR_OprDesc_2_v_;
text: .text%__1cNLIR_AssemblerGnegate6MpnLLIR_OprDesc_2_v_; text: .text%__1cNLIR_AssemblerGnegate6MpnLLIR_OprDesc_2_v_;
text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler_i486.o; text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler_x86.o;
text: .text%__1cXArrayStoreExceptionStubJemit_code6MpnNLIR_Assembler__v_; text: .text%__1cXArrayStoreExceptionStubJemit_code6MpnNLIR_Assembler__v_;
text: .text%__1cIRuntime1Tresolve_static_call6FpnKJavaThread_pnHoopDesc__pC_; text: .text%__1cIRuntime1Tresolve_static_call6FpnKJavaThread_pnHoopDesc__pC_;
text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_; text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_;
@ -4788,7 +4788,7 @@ text: .text%__1cNLIR_AssemblerIfpu_push6MnFRInfo__v_;
text: .text%__1cIFrameMapLFpuStackSimEpush6Mi_v_; text: .text%__1cIFrameMapLFpuStackSimEpush6Mi_v_;
text: .text%__1cNLIR_AssemblerKfpu_on_tos6MnFRInfo__v_; text: .text%__1cNLIR_AssemblerKfpu_on_tos6MnFRInfo__v_;
text: .text%__1cIFrameMapLFpuStackSimPoffset_from_tos6kMi_i_; text: .text%__1cIFrameMapLFpuStackSimPoffset_from_tos6kMi_i_;
text: .text%__1cIintArrayIindex_of6kMki_i_: c1_FrameMap_i486.o; text: .text%__1cIintArrayIindex_of6kMki_i_: c1_FrameMap_x86.o;
text: .text%__1cNLIR_AssemblerHfpu_pop6MnFRInfo__v_; text: .text%__1cNLIR_AssemblerHfpu_pop6MnFRInfo__v_;
text: .text%__1cIFrameMapLFpuStackSimDpop6Mi_i_; text: .text%__1cIFrameMapLFpuStackSimDpop6Mi_i_;
text: .text%__1cNLIR_AssemblerKround32_op6MpnLLIR_OprDesc_2_v_; text: .text%__1cNLIR_AssemblerKround32_op6MpnLLIR_OprDesc_2_v_;
@ -4797,7 +4797,7 @@ text: .text%__1cNLIR_AssemblerJreset_FPU6M_v_;
text: .text%__1cNLIR_AssemblerIemit_op36MpnHLIR_Op3__v_; text: .text%__1cNLIR_AssemblerIemit_op36MpnHLIR_Op3__v_;
text: .text%__1cNLIR_AssemblerParithmetic_idiv6MnILIR_Code_pnLLIR_OprDesc_333pnMCodeEmitInfo__v_; text: .text%__1cNLIR_AssemblerParithmetic_idiv6MnILIR_Code_pnLLIR_OprDesc_333pnMCodeEmitInfo__v_;
text: .text%__1cNLIR_AssemblerXadd_debug_info_for_div06MipnMCodeEmitInfo__v_; text: .text%__1cNLIR_AssemblerXadd_debug_info_for_div06MipnMCodeEmitInfo__v_;
text: .text%__1cNDivByZeroStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o; text: .text%__1cNDivByZeroStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNDivByZeroStubJemit_code6MpnNLIR_Assembler__v_; text: .text%__1cNDivByZeroStubJemit_code6MpnNLIR_Assembler__v_;
text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o; text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o;
text: .text%__1cLInstructionOas_ArrayLength6M_pnLArrayLength__: c1_GraphBuilder.o; text: .text%__1cLInstructionOas_ArrayLength6M_pnLArrayLength__: c1_GraphBuilder.o;
@ -4874,12 +4874,12 @@ text: .text%__1cFKlassQup_cast_abstract6M_p0_;
text: .text%__1cRComputeEntryStackHdo_byte6M_v_: generateOopMap.o; text: .text%__1cRComputeEntryStackHdo_byte6M_v_: generateOopMap.o;
text: .text%__1cNSharedRuntimeDd2i6Fd_i_; text: .text%__1cNSharedRuntimeDd2i6Fd_i_;
text: .text%__1cSInterpreterRuntimeWslow_signature_handler6FpnKJavaThread_pnNmethodOopDesc_pi5_pC_; text: .text%__1cSInterpreterRuntimeWslow_signature_handler6FpnKJavaThread_pnNmethodOopDesc_pi5_pC_;
text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_i486.o; text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_x86.o;
text: .text%__1cUSlowSignatureHandlerLpass_object6M_v_: interpreterRT_i486.o; text: .text%__1cUSlowSignatureHandlerLpass_object6M_v_: interpreterRT_x86.o;
text: .text%__1cXNativeSignatureIteratorIdo_array6Mii_v_: interpreterRT_i486.o; text: .text%__1cXNativeSignatureIteratorIdo_array6Mii_v_: interpreterRT_x86.o;
text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_i486.o; text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_x86.o;
text: .text%__1cUSlowSignatureHandlerIpass_int6M_v_: interpreterRT_i486.o; text: .text%__1cUSlowSignatureHandlerIpass_int6M_v_: interpreterRT_x86.o;
text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_i486.o; text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_x86.o;
text: .text%jni_GetFloatArrayRegion: jni.o; text: .text%jni_GetFloatArrayRegion: jni.o;
text: .text%jni_GetCharArrayRegion: jni.o; text: .text%jni_GetCharArrayRegion: jni.o;
text: .text%jni_SetFloatField: jni.o; text: .text%jni_SetFloatField: jni.o;
@ -4906,8 +4906,8 @@ text: .text%__1cLLIR_EmitterQreturn_op_prolog6Mi_v_;
text: .text%__1cLLIR_EmitterMmonitor_exit6MnFRInfo_11i_v_; text: .text%__1cLLIR_EmitterMmonitor_exit6MnFRInfo_11i_v_;
text: .text%__1cILIR_ListNunlock_object6MnFRInfo_11pnICodeStub__v_; text: .text%__1cILIR_ListNunlock_object6MnFRInfo_11pnICodeStub__v_;
text: .text%__1cKLIR_OpLockFvisit6MpnQLIR_OpVisitState__v_; text: .text%__1cKLIR_OpLockFvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cQMonitorEnterStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o; text: .text%__1cQMonitorEnterStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cRMonitorAccessStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o; text: .text%__1cRMonitorAccessStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cKLIR_OpLockJemit_code6MpnVLIR_AbstractAssembler__v_; text: .text%__1cKLIR_OpLockJemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerJemit_lock6MpnKLIR_OpLock__v_; text: .text%__1cNLIR_OptimizerJemit_lock6MpnKLIR_OpLock__v_;
text: .text%__1cNLIR_AssemblerPmonitor_address6MinFRInfo__v_; text: .text%__1cNLIR_AssemblerPmonitor_address6MinFRInfo__v_;
@ -4915,7 +4915,7 @@ text: .text%__1cIFrameMapbEaddress_for_monitor_lock_index6kMi_nHAddress__;
text: .text%__1cIFrameMapbAfp_offset_for_monitor_lock6kMi_i_; text: .text%__1cIFrameMapbAfp_offset_for_monitor_lock6kMi_i_;
text: .text%__1cNLIR_AssemblerJemit_lock6MpnKLIR_OpLock__v_; text: .text%__1cNLIR_AssemblerJemit_lock6MpnKLIR_OpLock__v_;
text: .text%__1cRC1_MacroAssemblerLlock_object6MpnMRegisterImpl_22rnFLabel__v_; text: .text%__1cRC1_MacroAssemblerLlock_object6MpnMRegisterImpl_22rnFLabel__v_;
text: .text%__1cQMonitorEnterStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o; text: .text%__1cQMonitorEnterStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cIFrameMapWmonitor_object_regname6kMi_nHOptoRegEName__; text: .text%__1cIFrameMapWmonitor_object_regname6kMi_nHOptoRegEName__;
text: .text%__1cIFrameMapbCfp_offset_for_monitor_object6kMi_i_; text: .text%__1cIFrameMapbCfp_offset_for_monitor_object6kMi_i_;
text: .text%__1cMCodeEmitInfobHlocation_for_monitor_object_index6Mi_nILocation__; text: .text%__1cMCodeEmitInfobHlocation_for_monitor_object_index6Mi_nILocation__;
@ -4925,7 +4925,7 @@ text: .text%__1cIFrameMapbFlocation_for_monitor_lock_index6kMipnILocation__i_;
text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_; text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_;
text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_; text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_;
text: .text%__1cRC1_MacroAssemblerNunlock_object6MpnMRegisterImpl_22rnFLabel__v_; text: .text%__1cRC1_MacroAssemblerNunlock_object6MpnMRegisterImpl_22rnFLabel__v_;
text: .text%__1cPMonitorExitStubMis_call_stub6kM_i_: c1_CodeStubs_i486.o; text: .text%__1cPMonitorExitStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o;
text: .text%__1cQMonitorEnterStubJemit_code6MpnNLIR_Assembler__v_; text: .text%__1cQMonitorEnterStubJemit_code6MpnNLIR_Assembler__v_;
text: .text%__1cNLIR_AssemblerRload_receiver_reg6MpnMRegisterImpl__v_; text: .text%__1cNLIR_AssemblerRload_receiver_reg6MpnMRegisterImpl__v_;
text: .text%__1cNLIR_AssemblerLmonitorexit6MnFRInfo_1pnMRegisterImpl_i3_v_; text: .text%__1cNLIR_AssemblerLmonitorexit6MnFRInfo_1pnMRegisterImpl_i3_v_;
@ -5168,7 +5168,7 @@ text: .text%__1cFRInfoOas_register_lo6kM_pnMRegisterImpl__;
text: .text%__1cCosHrealloc6FpvI_1_; text: .text%__1cCosHrealloc6FpvI_1_;
text: .text%Unsafe_GetNativeFloat; text: .text%Unsafe_GetNativeFloat;
text: .text%__1cIValueGenQdo_currentThread6MpnJIntrinsic__v_; text: .text%__1cIValueGenQdo_currentThread6MpnJIntrinsic__v_;
text: .text%__1cILIR_ListKget_thread6MnFRInfo__v_: c1_CodeGenerator_i486.o; text: .text%__1cILIR_ListKget_thread6MnFRInfo__v_: c1_CodeGenerator_x86.o;
text: .text%__1cNLIR_AssemblerKget_thread6MpnLLIR_OprDesc__v_; text: .text%__1cNLIR_AssemblerKget_thread6MpnLLIR_OprDesc__v_;
text: .text%__1cIValueGenSload_item_patching6MpnHIRScope_ipnEItem_pnKValueStack_pnOExceptionScope__v_; text: .text%__1cIValueGenSload_item_patching6MpnHIRScope_ipnEItem_pnKValueStack_pnOExceptionScope__v_;
text: .text%__1cEItemUget_jobject_constant6kM_pnIciObject__; text: .text%__1cEItemUget_jobject_constant6kM_pnIciObject__;
@ -5246,7 +5246,7 @@ text: .text%__1cGThreadLnmethods_do6M_v_;
text: .text%__1cFframeLnmethods_do6M_v_; text: .text%__1cFframeLnmethods_do6M_v_;
text: .text%__1cFframeVnmethods_code_blob_do6M_v_; text: .text%__1cFframeVnmethods_code_blob_do6M_v_;
text: .text%__1cILIR_ListEidiv6MnFRInfo_i11pnMCodeEmitInfo__v_; text: .text%__1cILIR_ListEidiv6MnFRInfo_i11pnMCodeEmitInfo__v_;
text: .text%__1cLlog2_intptr6Fi_i_: c1_LIRAssembler_i486.o; text: .text%__1cLlog2_intptr6Fi_i_: c1_LIRAssembler_x86.o;
text: .text%__1cONMethodSweeperPprocess_nmethod6FpnHnmethod__v_; text: .text%__1cONMethodSweeperPprocess_nmethod6FpnHnmethod__v_;
text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o; text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o;
text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o; text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o;
@ -5423,13 +5423,13 @@ text: .text%__1cIValueGenPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_;
text: .text%__1cLLIR_EmitterOget_raw_unsafe6MnFRInfo_pnLLIR_OprDesc_3inJBasicType__v_; text: .text%__1cLLIR_EmitterOget_raw_unsafe6MnFRInfo_pnLLIR_OprDesc_3inJBasicType__v_;
text: .text%__1cILIR_ListMload_mem_reg6MpnLLIR_Address_nFRInfo_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_; text: .text%__1cILIR_ListMload_mem_reg6MpnLLIR_Address_nFRInfo_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_;
text: .text%__1cIValueGenPdo_LookupSwitch6MpnMLookupSwitch__v_; text: .text%__1cIValueGenPdo_LookupSwitch6MpnMLookupSwitch__v_;
text: .text%__1cUcreate_lookup_ranges6FpnMLookupSwitch__pnQLookupRangeArray__: c1_CodeGenerator_i486.o; text: .text%__1cUcreate_lookup_ranges6FpnMLookupSwitch__pnQLookupRangeArray__: c1_CodeGenerator_x86.o;
text: .text%__1cLLIR_EmitterVlookupswitch_range_op6MpnLLIR_OprDesc_iipnKBlockBegin__v_; text: .text%__1cLLIR_EmitterVlookupswitch_range_op6MpnLLIR_OprDesc_iipnKBlockBegin__v_;
text: .text%__1cNSharedRuntimeEldiv6Fxx_x_; text: .text%__1cNSharedRuntimeEldiv6Fxx_x_;
text: .text%Unsafe_GetObjectVolatile; text: .text%Unsafe_GetObjectVolatile;
text: .text%signalHandler; text: .text%signalHandler;
text: .text%JVM_handle_solaris_signal; text: .text%JVM_handle_solaris_signal;
text: .text%__1cKJavaThreadUin_stack_yellow_zone6MpC_i_: os_solaris_i486.o; text: .text%__1cKJavaThreadUin_stack_yellow_zone6MpC_i_: os_solaris_x86.o;
text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_; text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_;
text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__pC_; text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__pC_;
text: .text%__1cbCCompiledCodeSafepointHandlerbDhandle_polling_page_exception6M_pC_; text: .text%__1cbCCompiledCodeSafepointHandlerbDhandle_polling_page_exception6M_pC_;

View File

@ -37,8 +37,9 @@ GENERATED = ../generated
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
# gnumake 3.78.1 does not accept the *s that # gnumake 3.78.1 does not accept the *s that
# are in AGENT_ALLFILES, so use the shell to expand them # are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them
AGENT_ALLFILES := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_ALLFILES)) AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
AGENT_FILES2 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES2))
SA_CLASSDIR = $(GENERATED)/saclasses SA_CLASSDIR = $(GENERATED)/saclasses
@ -52,7 +53,7 @@ all:
$(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \ $(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \
fi fi
$(GENERATED)/sa-jdi.jar: $(AGENT_ALLFILES) $(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2)
$(QUIETLY) echo "Making $@"; $(QUIETLY) echo "Making $@";
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
@ -66,9 +67,17 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_ALLFILES)
$(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \ $(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \
mkdir -p $(SA_CLASSDIR); \ mkdir -p $(SA_CLASSDIR); \
fi fi
$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -g -d $(SA_CLASSDIR) $(AGENT_ALLFILES) $(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1)
$(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2)
$(QUIETLY) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer $(QUIETLY) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES) $(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
$(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/*
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/
$(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/
$(QUIETLY) $(RUN.JAR) cf $@ -C $(SA_CLASSDIR)/ . $(QUIETLY) $(RUN.JAR) cf $@ -C $(SA_CLASSDIR)/ .
$(QUIETLY) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector $(QUIETLY) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector
$(QUIETLY) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal $(QUIETLY) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal

View File

@ -49,15 +49,22 @@ SA_PROPERTIES = $(SA_CLASSDIR)\sa.properties
default:: $(GENERATED)\sa-jdi.jar default:: $(GENERATED)\sa-jdi.jar
$(GENERATED)\sa-jdi.jar: $(AGENT_ALLFILES:/=\) $(GENERATED)\sa-jdi.jar: $(AGENT_FILES1:/=\) $(AGENT_FILES2:/=\)
@if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR) @if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR)
@echo ...Building sa-jdi.jar @echo ...Building sa-jdi.jar
@echo ...$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -g -d $(SA_CLASSDIR) .... @echo ...$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -g -d $(SA_CLASSDIR) ....
@$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -g -d $(SA_CLASSDIR) $(AGENT_ALLFILES:/=\) @$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\)
@$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\)
$(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer $(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo $(SA_BUILD_VERSION_PROP) > $(SA_PROPERTIES) $(QUIETLY) echo $(SA_BUILD_VERSION_PROP) > $(SA_PROPERTIES)
$(RUN_JAR) cf $@ -C saclasses . $(RUN_JAR) cf $@ -C saclasses .
$(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector $(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
$(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/*
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/
$(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext $(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext

View File

@ -956,7 +956,8 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
size->load_item(); size->load_item();
store_stack_parameter (size->result(), store_stack_parameter (size->result(),
in_ByteSize(STACK_BIAS + in_ByteSize(STACK_BIAS +
(i + frame::memory_parameter_word_sp_offset) * wordSize)); frame::memory_parameter_word_sp_offset * wordSize +
i * sizeof(jint)));
} }
// This instruction can be deoptimized in the slow path : use // This instruction can be deoptimized in the slow path : use

View File

@ -204,3 +204,9 @@ void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen)
NativeInstruction* ni = nativeInstruction_at(x); NativeInstruction* ni = nativeInstruction_at(x);
ni->set_long_at(0, u.l); ni->set_long_at(0, u.l);
} }
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
}
void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
}

View File

@ -465,9 +465,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
case T_LONG: case T_LONG:
assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
#ifdef COMPILER2
#ifdef _LP64 #ifdef _LP64
// Can't be tiered (yet)
if (int_reg < int_reg_max) { if (int_reg < int_reg_max) {
Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
regs[i].set2(r->as_VMReg()); regs[i].set2(r->as_VMReg());
@ -476,11 +474,12 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
stk_reg_pairs += 2; stk_reg_pairs += 2;
} }
#else #else
#ifdef COMPILER2
// For 32-bit build, can't pass longs in O-regs because they become // For 32-bit build, can't pass longs in O-regs because they become
// I-regs and get trashed. Use G-regs instead. G1 and G4 are almost // I-regs and get trashed. Use G-regs instead. G1 and G4 are almost
// spare and available. This convention isn't used by the Sparc ABI or // spare and available. This convention isn't used by the Sparc ABI or
// anywhere else. If we're tiered then we don't use G-regs because c1 // anywhere else. If we're tiered then we don't use G-regs because c1
// can't deal with them as a "pair". // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
// G0: zero // G0: zero
// G1: 1st Long arg // G1: 1st Long arg
// G2: global allocated to TLS // G2: global allocated to TLS
@ -500,7 +499,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
stk_reg_pairs += 2; stk_reg_pairs += 2;
} }
#endif // _LP64
#else // COMPILER2 #else // COMPILER2
if (int_reg_pairs + 1 < int_reg_max) { if (int_reg_pairs + 1 < int_reg_max) {
if (is_outgoing) { if (is_outgoing) {
@ -514,6 +512,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
stk_reg_pairs += 2; stk_reg_pairs += 2;
} }
#endif // COMPILER2 #endif // COMPILER2
#endif // _LP64
break; break;
case T_FLOAT: case T_FLOAT:
@ -699,17 +698,16 @@ Register AdapterGenerator::next_arg_slot(const int st_off){
// Stores long into offset pointed to by base // Stores long into offset pointed to by base
void AdapterGenerator::store_c2i_long(Register r, Register base, void AdapterGenerator::store_c2i_long(Register r, Register base,
const int st_off, bool is_stack) { const int st_off, bool is_stack) {
#ifdef COMPILER2
#ifdef _LP64 #ifdef _LP64
// In V9, longs are given 2 64-bit slots in the interpreter, but the // In V9, longs are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot. // data is passed in only 1 slot.
__ stx(r, base, next_arg_slot(st_off)); __ stx(r, base, next_arg_slot(st_off));
#else #else
#ifdef COMPILER2
// Misaligned store of 64-bit data // Misaligned store of 64-bit data
__ stw(r, base, arg_slot(st_off)); // lo bits __ stw(r, base, arg_slot(st_off)); // lo bits
__ srlx(r, 32, r); __ srlx(r, 32, r);
__ stw(r, base, next_arg_slot(st_off)); // hi bits __ stw(r, base, next_arg_slot(st_off)); // hi bits
#endif // _LP64
#else #else
if (is_stack) { if (is_stack) {
// Misaligned store of 64-bit data // Misaligned store of 64-bit data
@ -721,6 +719,7 @@ void AdapterGenerator::store_c2i_long(Register r, Register base,
__ stw(r , base, next_arg_slot(st_off)); // hi bits __ stw(r , base, next_arg_slot(st_off)); // hi bits
} }
#endif // COMPILER2 #endif // COMPILER2
#endif // _LP64
tag_c2i_arg(frame::TagCategory2, base, st_off, r); tag_c2i_arg(frame::TagCategory2, base, st_off, r);
} }
@ -1637,7 +1636,7 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
} }
} else if (dst.is_single_phys_reg()) { } else if (dst.is_single_phys_reg()) {
if (src.is_adjacent_aligned_on_stack(2)) { if (src.is_adjacent_aligned_on_stack(2)) {
__ ld_long(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
} else { } else {
// dst is a single reg. // dst is a single reg.
// Remember lo is low address not msb for stack slots // Remember lo is low address not msb for stack slots
@ -1811,7 +1810,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
VMRegPair *in_regs, VMRegPair *in_regs,
BasicType ret_type) { BasicType ret_type) {
// Native nmethod wrappers never take possesion of the oop arguments. // Native nmethod wrappers never take possesion of the oop arguments.
// So the caller will gc the arguments. The only thing we need an // So the caller will gc the arguments. The only thing we need an
// oopMap for is if the call is static // oopMap for is if the call is static

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -22,12 +22,6 @@
* *
*/ */
inline void Assembler::emit_long64(jlong x) {
*(jlong*) _code_pos = x;
_code_pos += sizeof(jlong);
code_section()->set_end(_code_pos);
}
inline void MacroAssembler::pd_patch_instruction(address branch, address target) { inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
unsigned char op = branch[0]; unsigned char op = branch[0];
assert(op == 0xE8 /* call */ || assert(op == 0xE8 /* call */ ||
@ -69,18 +63,25 @@ inline void MacroAssembler::pd_print_patched_instruction(address branch) {
} }
#endif // ndef PRODUCT #endif // ndef PRODUCT
inline void MacroAssembler::movptr(Address dst, intptr_t src) { #ifndef _LP64
#ifdef _LP64 inline int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { return reg_enc; }
Assembler::mov64(dst, src); inline int Assembler::prefixq_and_encode(int reg_enc) { return reg_enc; }
#else
Assembler::movl(dst, src);
#endif // _LP64
}
inline void MacroAssembler::movptr(Register dst, intptr_t src) { inline int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) { return dst_enc << 3 | src_enc; }
#ifdef _LP64 inline int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { return dst_enc << 3 | src_enc; }
Assembler::mov64(dst, src);
inline void Assembler::prefix(Register reg) {}
inline void Assembler::prefix(Address adr) {}
inline void Assembler::prefixq(Address adr) {}
inline void Assembler::prefix(Address adr, Register reg, bool byteinst) {}
inline void Assembler::prefixq(Address adr, Register reg) {}
inline void Assembler::prefix(Address adr, XMMRegister reg) {}
#else #else
Assembler::movl(dst, src); inline void Assembler::emit_long64(jlong x) {
#endif // _LP64 *(jlong*) _code_pos = x;
_code_pos += sizeof(jlong);
code_section()->set_end(_code_pos);
} }
#endif // _LP64

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +0,0 @@
/*
* Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
unsigned char op = branch[0];
assert(op == 0xE8 /* call */ ||
op == 0xE9 /* jmp */ ||
op == 0xEB /* short jmp */ ||
(op & 0xF0) == 0x70 /* short jcc */ ||
op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
"Invalid opcode at patch point");
if (op == 0xEB || (op & 0xF0) == 0x70) {
// short offset operators (jmp and jcc)
char* disp = (char*) &branch[1];
int imm8 = target - (address) &disp[1];
guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
*disp = imm8;
} else {
int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
int imm32 = target - (address) &disp[1];
*disp = imm32;
}
}
#ifndef PRODUCT
inline void MacroAssembler::pd_print_patched_instruction(address branch) {
const char* s;
unsigned char op = branch[0];
if (op == 0xE8) {
s = "call";
} else if (op == 0xE9 || op == 0xEB) {
s = "jmp";
} else if ((op & 0xF0) == 0x70) {
s = "jcc";
} else if (op == 0x0F) {
s = "jcc";
} else {
s = "????";
}
tty->print("%s (unresolved)", s);
}
#endif // ndef PRODUCT

File diff suppressed because it is too large Load Diff

View File

@ -43,11 +43,12 @@ void ConversionStub::emit_code(LIR_Assembler* ce) {
__ comisd(input()->as_xmm_double_reg(), __ comisd(input()->as_xmm_double_reg(),
ExternalAddress((address)&double_zero)); ExternalAddress((address)&double_zero));
} else { } else {
__ pushl(rax); LP64_ONLY(ShouldNotReachHere());
__ push(rax);
__ ftst(); __ ftst();
__ fnstsw_ax(); __ fnstsw_ax();
__ sahf(); __ sahf();
__ popl(rax); __ pop(rax);
} }
Label NaN, do_return; Label NaN, do_return;
@ -61,7 +62,7 @@ void ConversionStub::emit_code(LIR_Assembler* ce) {
// input is NaN -> return 0 // input is NaN -> return 0
__ bind(NaN); __ bind(NaN);
__ xorl(result()->as_register(), result()->as_register()); __ xorptr(result()->as_register(), result()->as_register());
__ bind(do_return); __ bind(do_return);
__ jmp(_continuation); __ jmp(_continuation);
@ -139,7 +140,7 @@ NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKl
void NewInstanceStub::emit_code(LIR_Assembler* ce) { void NewInstanceStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed"); assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry); __ bind(_entry);
__ movl(rdx, _klass_reg->as_register()); __ movptr(rdx, _klass_reg->as_register());
__ call(RuntimeAddress(Runtime1::entry_for(_stub_id))); __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
ce->add_call_info_here(_info); ce->add_call_info_here(_info);
ce->verify_oop_map(_info); ce->verify_oop_map(_info);
@ -306,10 +307,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(_obj != noreg, "must be a valid register"); assert(_obj != noreg, "must be a valid register");
Register tmp = rax; Register tmp = rax;
if (_obj == tmp) tmp = rbx; if (_obj == tmp) tmp = rbx;
__ pushl(tmp); __ push(tmp);
__ get_thread(tmp); __ get_thread(tmp);
__ cmpl(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); __ cmpptr(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
__ popl(tmp); __ pop(tmp);
__ jcc(Assembler::notEqual, call_patch); __ jcc(Assembler::notEqual, call_patch);
// access_field patches may execute the patched code before it's // access_field patches may execute the patched code before it's
@ -434,7 +435,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
VMReg r_1 = args[i].first(); VMReg r_1 = args[i].first();
if (r_1->is_stack()) { if (r_1->is_stack()) {
int st_off = r_1->reg2stack() * wordSize; int st_off = r_1->reg2stack() * wordSize;
__ movl (Address(rsp, st_off), r[i]); __ movptr (Address(rsp, st_off), r[i]);
} else { } else {
assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
} }
@ -449,7 +450,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
ce->add_call_info_here(info()); ce->add_call_info_here(info());
#ifndef PRODUCT #ifndef PRODUCT
__ increment(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
#endif #endif
__ jmp(_continuation); __ jmp(_continuation);

View File

@ -36,27 +36,34 @@ enum {
// registers // registers
enum { enum {
pd_nof_cpu_regs_frame_map = 8, // number of registers used during code emission pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_fpu_regs_frame_map = 8, // number of registers used during code emission pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_xmm_regs_frame_map = 8, // number of registers used during code emission pd_nof_xmm_regs_frame_map = XMMRegisterImpl::number_of_registers, // number of registers used during code emission
pd_nof_caller_save_cpu_regs_frame_map = 6, // number of registers killed by calls
pd_nof_caller_save_fpu_regs_frame_map = 8, // number of registers killed by calls
pd_nof_caller_save_xmm_regs_frame_map = 8, // number of registers killed by calls
pd_nof_cpu_regs_reg_alloc = 6, // number of registers that are visible to register allocator #ifdef _LP64
#define UNALLOCATED 4 // rsp, rbp, r15, r10
#else
#define UNALLOCATED 2 // rsp, rbp
#endif // LP64
pd_nof_caller_save_cpu_regs_frame_map = pd_nof_cpu_regs_frame_map - UNALLOCATED, // number of registers killed by calls
pd_nof_caller_save_fpu_regs_frame_map = pd_nof_fpu_regs_frame_map, // number of registers killed by calls
pd_nof_caller_save_xmm_regs_frame_map = pd_nof_xmm_regs_frame_map, // number of registers killed by calls
pd_nof_cpu_regs_reg_alloc = pd_nof_caller_save_cpu_regs_frame_map, // number of registers that are visible to register allocator
pd_nof_fpu_regs_reg_alloc = 6, // number of registers that are visible to register allocator pd_nof_fpu_regs_reg_alloc = 6, // number of registers that are visible to register allocator
pd_nof_cpu_regs_linearscan = 8, // number of registers visible to linear scan pd_nof_cpu_regs_linearscan = pd_nof_cpu_regs_frame_map, // number of registers visible to linear scan
pd_nof_fpu_regs_linearscan = 8, // number of registers visible to linear scan pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of registers visible to linear scan
pd_nof_xmm_regs_linearscan = 8, // number of registers visible to linear scan pd_nof_xmm_regs_linearscan = pd_nof_xmm_regs_frame_map, // number of registers visible to linear scan
pd_first_cpu_reg = 0, pd_first_cpu_reg = 0,
pd_last_cpu_reg = 5, pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11),
pd_first_byte_reg = 2, pd_first_byte_reg = 2,
pd_last_byte_reg = 5, pd_last_byte_reg = 5,
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map, pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
pd_last_fpu_reg = pd_first_fpu_reg + 7, pd_last_fpu_reg = pd_first_fpu_reg + 7,
pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map, pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map,
pd_last_xmm_reg = pd_first_xmm_reg + 7 pd_last_xmm_reg = pd_first_xmm_reg + pd_nof_xmm_regs_frame_map - 1
}; };

View File

@ -39,10 +39,15 @@ LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) {
opr = LIR_OprFact::address(new LIR_Address(rsp_opr, st_off, type)); opr = LIR_OprFact::address(new LIR_Address(rsp_opr, st_off, type));
} else if (r_1->is_Register()) { } else if (r_1->is_Register()) {
Register reg = r_1->as_Register(); Register reg = r_1->as_Register();
if (r_2->is_Register()) { if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
Register reg2 = r_2->as_Register(); Register reg2 = r_2->as_Register();
#ifdef _LP64
assert(reg2 == reg, "must be same register");
opr = as_long_opr(reg);
#else
opr = as_long_opr(reg2, reg); opr = as_long_opr(reg2, reg);
} else if (type == T_OBJECT) { #endif // _LP64
} else if (type == T_OBJECT || type == T_ARRAY) {
opr = as_oop_opr(reg); opr = as_oop_opr(reg);
} else { } else {
opr = as_opr(reg); opr = as_opr(reg);
@ -88,18 +93,39 @@ LIR_Opr FrameMap::rax_oop_opr;
LIR_Opr FrameMap::rdx_oop_opr; LIR_Opr FrameMap::rdx_oop_opr;
LIR_Opr FrameMap::rcx_oop_opr; LIR_Opr FrameMap::rcx_oop_opr;
LIR_Opr FrameMap::rax_rdx_long_opr; LIR_Opr FrameMap::long0_opr;
LIR_Opr FrameMap::rbx_rcx_long_opr; LIR_Opr FrameMap::long1_opr;
LIR_Opr FrameMap::fpu0_float_opr; LIR_Opr FrameMap::fpu0_float_opr;
LIR_Opr FrameMap::fpu0_double_opr; LIR_Opr FrameMap::fpu0_double_opr;
LIR_Opr FrameMap::xmm0_float_opr; LIR_Opr FrameMap::xmm0_float_opr;
LIR_Opr FrameMap::xmm0_double_opr; LIR_Opr FrameMap::xmm0_double_opr;
#ifdef _LP64
LIR_Opr FrameMap::r8_opr;
LIR_Opr FrameMap::r9_opr;
LIR_Opr FrameMap::r10_opr;
LIR_Opr FrameMap::r11_opr;
LIR_Opr FrameMap::r12_opr;
LIR_Opr FrameMap::r13_opr;
LIR_Opr FrameMap::r14_opr;
LIR_Opr FrameMap::r15_opr;
// r10 and r15 can never contain oops since they aren't available to
// the allocator
LIR_Opr FrameMap::r8_oop_opr;
LIR_Opr FrameMap::r9_oop_opr;
LIR_Opr FrameMap::r11_oop_opr;
LIR_Opr FrameMap::r12_oop_opr;
LIR_Opr FrameMap::r13_oop_opr;
LIR_Opr FrameMap::r14_oop_opr;
#endif // _LP64
LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, }; LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, };
LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, }; LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, };
LIR_Opr FrameMap::_caller_save_xmm_regs[] = { 0, }; LIR_Opr FrameMap::_caller_save_xmm_regs[] = { 0, };
XMMRegister FrameMap::_xmm_regs [8] = { 0, }; XMMRegister FrameMap::_xmm_regs [] = { 0, };
XMMRegister FrameMap::nr2xmmreg(int rnr) { XMMRegister FrameMap::nr2xmmreg(int rnr) {
assert(_init_done, "tables not initialized"); assert(_init_done, "tables not initialized");
@ -113,18 +139,39 @@ XMMRegister FrameMap::nr2xmmreg(int rnr) {
void FrameMap::init() { void FrameMap::init() {
if (_init_done) return; if (_init_done) return;
assert(nof_cpu_regs == 8, "wrong number of CPU registers"); assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers");
map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0); rsi_oop_opr = LIR_OprFact::single_cpu_oop(0); map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0);
map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1); rdi_oop_opr = LIR_OprFact::single_cpu_oop(1); map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1);
map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2); rbx_oop_opr = LIR_OprFact::single_cpu_oop(2); map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2);
map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3); rax_oop_opr = LIR_OprFact::single_cpu_oop(3); map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3);
map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4); rdx_oop_opr = LIR_OprFact::single_cpu_oop(4); map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4);
map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5); rcx_oop_opr = LIR_OprFact::single_cpu_oop(5); map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5);
map_register(6, rsp); rsp_opr = LIR_OprFact::single_cpu(6);
map_register(7, rbp); rbp_opr = LIR_OprFact::single_cpu(7);
rax_rdx_long_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/); #ifndef _LP64
rbx_rcx_long_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/); // The unallocatable registers are at the end
map_register(6, rsp);
map_register(7, rbp);
#else
map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6);
map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7);
map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8);
map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9);
map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10);
map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11);
// The unallocatable registers are at the end
map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12);
map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13);
map_register(14, rsp);
map_register(15, rbp);
#endif // _LP64
#ifdef _LP64
long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 3 /*eax*/);
long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 2 /*ebx*/);
#else
long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/);
long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/);
#endif // _LP64
fpu0_float_opr = LIR_OprFact::single_fpu(0); fpu0_float_opr = LIR_OprFact::single_fpu(0);
fpu0_double_opr = LIR_OprFact::double_fpu(0); fpu0_double_opr = LIR_OprFact::double_fpu(0);
xmm0_float_opr = LIR_OprFact::single_xmm(0); xmm0_float_opr = LIR_OprFact::single_xmm(0);
@ -137,6 +184,15 @@ void FrameMap::init() {
_caller_save_cpu_regs[4] = rdx_opr; _caller_save_cpu_regs[4] = rdx_opr;
_caller_save_cpu_regs[5] = rcx_opr; _caller_save_cpu_regs[5] = rcx_opr;
#ifdef _LP64
_caller_save_cpu_regs[6] = r8_opr;
_caller_save_cpu_regs[7] = r9_opr;
_caller_save_cpu_regs[8] = r11_opr;
_caller_save_cpu_regs[9] = r12_opr;
_caller_save_cpu_regs[10] = r13_opr;
_caller_save_cpu_regs[11] = r14_opr;
#endif // _LP64
_xmm_regs[0] = xmm0; _xmm_regs[0] = xmm0;
_xmm_regs[1] = xmm1; _xmm_regs[1] = xmm1;
@ -147,18 +203,51 @@ void FrameMap::init() {
_xmm_regs[6] = xmm6; _xmm_regs[6] = xmm6;
_xmm_regs[7] = xmm7; _xmm_regs[7] = xmm7;
#ifdef _LP64
_xmm_regs[8] = xmm8;
_xmm_regs[9] = xmm9;
_xmm_regs[10] = xmm10;
_xmm_regs[11] = xmm11;
_xmm_regs[12] = xmm12;
_xmm_regs[13] = xmm13;
_xmm_regs[14] = xmm14;
_xmm_regs[15] = xmm15;
#endif // _LP64
for (int i = 0; i < 8; i++) { for (int i = 0; i < 8; i++) {
_caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i); _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
}
for (int i = 0; i < nof_caller_save_xmm_regs ; i++) {
_caller_save_xmm_regs[i] = LIR_OprFact::single_xmm(i); _caller_save_xmm_regs[i] = LIR_OprFact::single_xmm(i);
} }
_init_done = true; _init_done = true;
rsi_oop_opr = as_oop_opr(rsi);
rdi_oop_opr = as_oop_opr(rdi);
rbx_oop_opr = as_oop_opr(rbx);
rax_oop_opr = as_oop_opr(rax);
rdx_oop_opr = as_oop_opr(rdx);
rcx_oop_opr = as_oop_opr(rcx);
rsp_opr = as_pointer_opr(rsp);
rbp_opr = as_pointer_opr(rbp);
#ifdef _LP64
r8_oop_opr = as_oop_opr(r8);
r9_oop_opr = as_oop_opr(r9);
r11_oop_opr = as_oop_opr(r11);
r12_oop_opr = as_oop_opr(r12);
r13_oop_opr = as_oop_opr(r13);
r14_oop_opr = as_oop_opr(r14);
#endif // _LP64
VMRegPair regs; VMRegPair regs;
BasicType sig_bt = T_OBJECT; BasicType sig_bt = T_OBJECT;
SharedRuntime::java_calling_convention(&sig_bt, &regs, 1, true); SharedRuntime::java_calling_convention(&sig_bt, &regs, 1, true);
receiver_opr = as_oop_opr(regs.first()->as_Register()); receiver_opr = as_oop_opr(regs.first()->as_Register());
assert(receiver_opr == rcx_oop_opr, "rcvr ought to be rcx");
} }

View File

@ -38,8 +38,13 @@
nof_xmm_regs = pd_nof_xmm_regs_frame_map, nof_xmm_regs = pd_nof_xmm_regs_frame_map,
nof_caller_save_xmm_regs = pd_nof_caller_save_xmm_regs_frame_map, nof_caller_save_xmm_regs = pd_nof_caller_save_xmm_regs_frame_map,
first_available_sp_in_frame = 0, first_available_sp_in_frame = 0,
#ifndef _LP64
frame_pad_in_bytes = 8, frame_pad_in_bytes = 8,
nof_reg_args = 2 nof_reg_args = 2
#else
frame_pad_in_bytes = 16,
nof_reg_args = 6
#endif // _LP64
}; };
private: private:
@ -65,17 +70,49 @@
static LIR_Opr rax_oop_opr; static LIR_Opr rax_oop_opr;
static LIR_Opr rdx_oop_opr; static LIR_Opr rdx_oop_opr;
static LIR_Opr rcx_oop_opr; static LIR_Opr rcx_oop_opr;
#ifdef _LP64
static LIR_Opr rax_rdx_long_opr; static LIR_Opr r8_opr;
static LIR_Opr rbx_rcx_long_opr; static LIR_Opr r9_opr;
static LIR_Opr r10_opr;
static LIR_Opr r11_opr;
static LIR_Opr r12_opr;
static LIR_Opr r13_opr;
static LIR_Opr r14_opr;
static LIR_Opr r15_opr;
static LIR_Opr r8_oop_opr;
static LIR_Opr r9_oop_opr;
static LIR_Opr r11_oop_opr;
static LIR_Opr r12_oop_opr;
static LIR_Opr r13_oop_opr;
static LIR_Opr r14_oop_opr;
#endif // _LP64
static LIR_Opr long0_opr;
static LIR_Opr long1_opr;
static LIR_Opr fpu0_float_opr; static LIR_Opr fpu0_float_opr;
static LIR_Opr fpu0_double_opr; static LIR_Opr fpu0_double_opr;
static LIR_Opr xmm0_float_opr; static LIR_Opr xmm0_float_opr;
static LIR_Opr xmm0_double_opr; static LIR_Opr xmm0_double_opr;
#ifdef _LP64
static LIR_Opr as_long_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
}
static LIR_Opr as_pointer_opr(Register r) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
}
#else
static LIR_Opr as_long_opr(Register r, Register r2) { static LIR_Opr as_long_opr(Register r, Register r2) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r2)); return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r2));
} }
static LIR_Opr as_pointer_opr(Register r) {
return LIR_OprFact::single_cpu(cpu_reg2rnr(r));
}
#endif // _LP64
// VMReg name for spilled physical FPU stack slot n // VMReg name for spilled physical FPU stack slot n
static VMReg fpu_regname (int n); static VMReg fpu_regname (int n);

File diff suppressed because it is too large Load Diff

View File

@ -36,13 +36,20 @@
address float_constant(float f); address float_constant(float f);
address double_constant(double d); address double_constant(double d);
bool is_literal_address(LIR_Address* addr);
// When we need to use something other than rscratch1 use this
// method.
Address as_Address(LIR_Address* addr, Register tmp);
public: public:
void store_parameter(Register r, int offset_from_esp_in_words); void store_parameter(Register r, int offset_from_esp_in_words);
void store_parameter(jint c, int offset_from_esp_in_words); void store_parameter(jint c, int offset_from_esp_in_words);
void store_parameter(jobject c, int offset_from_esp_in_words); void store_parameter(jobject c, int offset_from_esp_in_words);
enum { call_stub_size = 15, enum { call_stub_size = NOT_LP64(15) LP64_ONLY(28),
exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175), exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
deopt_handler_size = 10 deopt_handler_size = NOT_LP64(10) LP64_ONLY(17)
}; };

View File

@ -77,7 +77,7 @@ LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
switch (type->tag()) { switch (type->tag()) {
case intTag: opr = FrameMap::rax_opr; break; case intTag: opr = FrameMap::rax_opr; break;
case objectTag: opr = FrameMap::rax_oop_opr; break; case objectTag: opr = FrameMap::rax_oop_opr; break;
case longTag: opr = FrameMap::rax_rdx_long_opr; break; case longTag: opr = FrameMap::long0_opr; break;
case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break; case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break; case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
@ -117,12 +117,14 @@ bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
bool LIRGenerator::can_inline_as_constant(Value v) const { bool LIRGenerator::can_inline_as_constant(Value v) const {
if (v->type()->tag() == longTag) return false;
return v->type()->tag() != objectTag || return v->type()->tag() != objectTag ||
(v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object()); (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object());
} }
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
if (c->type() == T_LONG) return false;
return c->type() != T_OBJECT || c->as_jobject() == NULL; return c->type() != T_OBJECT || c->as_jobject() == NULL;
} }
@ -155,6 +157,13 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
addr = new LIR_Address(array_opr, addr = new LIR_Address(array_opr,
offset_in_bytes + index_opr->as_jint() * elem_size, type); offset_in_bytes + index_opr->as_jint() * elem_size, type);
} else { } else {
#ifdef _LP64
if (index_opr->type() == T_INT) {
LIR_Opr tmp = new_register(T_LONG);
__ convert(Bytecodes::_i2l, index_opr, tmp);
index_opr = tmp;
}
#endif // _LP64
addr = new LIR_Address(array_opr, addr = new LIR_Address(array_opr,
index_opr, index_opr,
LIR_Address::scale(type), LIR_Address::scale(type),
@ -164,7 +173,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
// This store will need a precise card mark, so go ahead and // This store will need a precise card mark, so go ahead and
// compute the full adddres instead of computing once for the // compute the full adddres instead of computing once for the
// store and again for the card mark. // store and again for the card mark.
LIR_Opr tmp = new_register(T_INT); LIR_Opr tmp = new_pointer_register();
__ leal(LIR_OprFact::address(addr), tmp); __ leal(LIR_OprFact::address(addr), tmp);
return new LIR_Address(tmp, 0, type); return new LIR_Address(tmp, 0, type);
} else { } else {
@ -174,9 +183,8 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
void LIRGenerator::increment_counter(address counter, int step) { void LIRGenerator::increment_counter(address counter, int step) {
LIR_Opr temp = new_register(T_INT); LIR_Opr pointer = new_pointer_register();
LIR_Opr pointer = new_register(T_INT); __ move(LIR_OprFact::intptrConst(counter), pointer);
__ move(LIR_OprFact::intConst((int)counter), pointer);
LIR_Address* addr = new LIR_Address(pointer, 0, T_INT); LIR_Address* addr = new LIR_Address(pointer, 0, T_INT);
increment_counter(addr, step); increment_counter(addr, step);
} }
@ -481,7 +489,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
left.load_item(); left.load_item();
right.load_item(); right.load_item();
LIR_Opr reg = FrameMap::rax_rdx_long_opr; LIR_Opr reg = FrameMap::long0_opr;
arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL); arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL);
LIR_Opr result = rlock_result(x); LIR_Opr result = rlock_result(x);
__ move(reg, result); __ move(reg, result);
@ -690,10 +698,10 @@ void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value
// compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction // compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction
cmp_value.load_item_force(FrameMap::rax_rdx_long_opr); cmp_value.load_item_force(FrameMap::long0_opr);
// new value must be in rcx,ebx (hi,lo) // new value must be in rcx,ebx (hi,lo)
new_value.load_item_force(FrameMap::rbx_rcx_long_opr); new_value.load_item_force(FrameMap::long1_opr);
// object pointer register is overwritten with field address // object pointer register is overwritten with field address
obj.load_item(); obj.load_item();
@ -720,7 +728,10 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
assert(obj.type()->tag() == objectTag, "invalid type"); assert(obj.type()->tag() == objectTag, "invalid type");
assert(offset.type()->tag() == intTag, "invalid type");
// In 64bit the type can be long, sparc doesn't have this assert
// assert(offset.type()->tag() == intTag, "invalid type");
assert(cmp.type()->tag() == type->tag(), "invalid type"); assert(cmp.type()->tag() == type->tag(), "invalid type");
assert(val.type()->tag() == type->tag(), "invalid type"); assert(val.type()->tag() == type->tag(), "invalid type");
@ -735,8 +746,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
cmp.load_item_force(FrameMap::rax_opr); cmp.load_item_force(FrameMap::rax_opr);
val.load_item(); val.load_item();
} else if (type == longType) { } else if (type == longType) {
cmp.load_item_force(FrameMap::rax_rdx_long_opr); cmp.load_item_force(FrameMap::long0_opr);
val.load_item_force(FrameMap::rbx_rcx_long_opr); val.load_item_force(FrameMap::long1_opr);
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
} }
@ -833,12 +844,33 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
// operands for arraycopy must use fixed registers, otherwise // operands for arraycopy must use fixed registers, otherwise
// LinearScan will fail allocation (because arraycopy always needs a // LinearScan will fail allocation (because arraycopy always needs a
// call) // call)
#ifndef _LP64
src.load_item_force (FrameMap::rcx_oop_opr); src.load_item_force (FrameMap::rcx_oop_opr);
src_pos.load_item_force (FrameMap::rdx_opr); src_pos.load_item_force (FrameMap::rdx_opr);
dst.load_item_force (FrameMap::rax_oop_opr); dst.load_item_force (FrameMap::rax_oop_opr);
dst_pos.load_item_force (FrameMap::rbx_opr); dst_pos.load_item_force (FrameMap::rbx_opr);
length.load_item_force (FrameMap::rdi_opr); length.load_item_force (FrameMap::rdi_opr);
LIR_Opr tmp = (FrameMap::rsi_opr); LIR_Opr tmp = (FrameMap::rsi_opr);
#else
// The java calling convention will give us enough registers
// so that on the stub side the args will be perfect already.
// On the other slow/special case side we call C and the arg
// positions are not similar enough to pick one as the best.
// Also because the java calling convention is a "shifted" version
// of the C convention we can process the java args trivially into C
// args without worry of overwriting during the xfer
src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
length.load_item_force (FrameMap::as_opr(j_rarg4));
LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
#endif // LP64
set_no_result(x); set_no_result(x);
int flags; int flags;
@ -857,7 +889,7 @@ LIR_Opr fixed_register_for(BasicType type) {
case T_FLOAT: return FrameMap::fpu0_float_opr; case T_FLOAT: return FrameMap::fpu0_float_opr;
case T_DOUBLE: return FrameMap::fpu0_double_opr; case T_DOUBLE: return FrameMap::fpu0_double_opr;
case T_INT: return FrameMap::rax_opr; case T_INT: return FrameMap::rax_opr;
case T_LONG: return FrameMap::rax_rdx_long_opr; case T_LONG: return FrameMap::long0_opr;
default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
} }
} }
@ -1161,9 +1193,13 @@ void LIRGenerator::do_If(If* x) {
LIR_Opr LIRGenerator::getThreadPointer() { LIR_Opr LIRGenerator::getThreadPointer() {
#ifdef _LP64
return FrameMap::as_pointer_opr(r15_thread);
#else
LIR_Opr result = new_register(T_INT); LIR_Opr result = new_register(T_INT);
__ get_thread(result); __ get_thread(result);
return result; return result;
#endif //
} }
void LIRGenerator::trace_block_entry(BlockBegin* block) { void LIRGenerator::trace_block_entry(BlockBegin* block) {

View File

@ -23,18 +23,29 @@
*/ */
inline bool LinearScan::is_processed_reg_num(int reg_num) { inline bool LinearScan::is_processed_reg_num(int reg_num) {
#ifndef _LP64
// rsp and rbp (numbers 6 ancd 7) are ignored // rsp and rbp (numbers 6 ancd 7) are ignored
assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below"); assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below");
assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below"); assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below");
assert(reg_num >= 0, "invalid reg_num"); assert(reg_num >= 0, "invalid reg_num");
return reg_num < 6 || reg_num > 7; return reg_num < 6 || reg_num > 7;
#else
// rsp and rbp, r10, r15 (numbers 6 ancd 7) are ignored
assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below");
assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below");
assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below");
assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below");
assert(reg_num >= 0, "invalid reg_num");
return reg_num < 12 || reg_num > 15;
#endif // _LP64
} }
inline int LinearScan::num_physical_regs(BasicType type) { inline int LinearScan::num_physical_regs(BasicType type) {
// Intel requires two cpu registers for long, // Intel requires two cpu registers for long,
// but requires only one fpu register for double // but requires only one fpu register for double
if (type == T_LONG) { if (LP64_ONLY(false &&) type == T_LONG) {
return 2; return 2;
} }
return 1; return 1;

View File

@ -26,18 +26,17 @@
#include "incls/_c1_MacroAssembler_x86.cpp.incl" #include "incls/_c1_MacroAssembler_x86.cpp.incl"
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) { int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
const int aligned_mask = 3; const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes(); const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction"); assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction");
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
assert(BytesPerWord == 4, "adjust aligned_mask and code");
Label done; Label done;
int null_check_offset = -1; int null_check_offset = -1;
verify_oop(obj); verify_oop(obj);
// save object being locked into the BasicObjectLock // save object being locked into the BasicObjectLock
movl(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj); movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
if (UseBiasedLocking) { if (UseBiasedLocking) {
assert(scratch != noreg, "should have scratch register at this point"); assert(scratch != noreg, "should have scratch register at this point");
@ -47,16 +46,16 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
} }
// Load object header // Load object header
movl(hdr, Address(obj, hdr_offset)); movptr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked // and mark it as unlocked
orl(hdr, markOopDesc::unlocked_value); orptr(hdr, markOopDesc::unlocked_value);
// save unlocked object header into the displaced header location on the stack // save unlocked object header into the displaced header location on the stack
movl(Address(disp_hdr, 0), hdr); movptr(Address(disp_hdr, 0), hdr);
// test if object header is still the same (i.e. unlocked), and if so, store the // test if object header is still the same (i.e. unlocked), and if so, store the
// displaced header address in the object header - if it is not the same, get the // displaced header address in the object header - if it is not the same, get the
// object header instead // object header instead
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg! if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
cmpxchg(disp_hdr, Address(obj, hdr_offset)); cmpxchgptr(disp_hdr, Address(obj, hdr_offset));
// if the object header was the same, we're done // if the object header was the same, we're done
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::equal, cond_inc32(Assembler::equal,
@ -76,11 +75,11 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
// //
// assuming both the stack pointer and page_size have their least // assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2 // significant 2 bits cleared and page_size is a power of 2
subl(hdr, rsp); subptr(hdr, rsp);
andl(hdr, aligned_mask - os::vm_page_size()); andptr(hdr, aligned_mask - os::vm_page_size());
// for recursive locking, the result is zero => save it in the displaced header // for recursive locking, the result is zero => save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking) // location (NULL in the displaced hdr location indicates recursive locking)
movl(Address(disp_hdr, 0), hdr); movptr(Address(disp_hdr, 0), hdr);
// otherwise we don't care about the result and handle locking via runtime call // otherwise we don't care about the result and handle locking via runtime call
jcc(Assembler::notZero, slow_case); jcc(Assembler::notZero, slow_case);
// done // done
@ -90,35 +89,34 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
const int aligned_mask = 3; const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes(); const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction"); assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction");
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
assert(BytesPerWord == 4, "adjust aligned_mask and code");
Label done; Label done;
if (UseBiasedLocking) { if (UseBiasedLocking) {
// load object // load object
movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
biased_locking_exit(obj, hdr, done); biased_locking_exit(obj, hdr, done);
} }
// load displaced header // load displaced header
movl(hdr, Address(disp_hdr, 0)); movptr(hdr, Address(disp_hdr, 0));
// if the loaded hdr is NULL we had recursive locking // if the loaded hdr is NULL we had recursive locking
testl(hdr, hdr); testptr(hdr, hdr);
// if we had recursive locking, we are done // if we had recursive locking, we are done
jcc(Assembler::zero, done); jcc(Assembler::zero, done);
if (!UseBiasedLocking) { if (!UseBiasedLocking) {
// load object // load object
movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
} }
verify_oop(obj); verify_oop(obj);
// test if object header is pointing to the displaced header, and if so, restore // test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to // the displaced header in the object - if the object header is not pointing to
// the displaced header, get the object header instead // the displaced header, get the object header instead
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg! if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
cmpxchg(hdr, Address(obj, hdr_offset)); cmpxchgptr(hdr, Address(obj, hdr_offset));
// if the object header was not pointing to the displaced header, // if the object header was not pointing to the displaced header,
// we do unlocking via runtime call // we do unlocking via runtime call
jcc(Assembler::notEqual, slow_case); jcc(Assembler::notEqual, slow_case);
@ -141,13 +139,14 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
assert_different_registers(obj, klass, len); assert_different_registers(obj, klass, len);
if (UseBiasedLocking && !len->is_valid()) { if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2); assert_different_registers(obj, klass, len, t1, t2);
movl(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); movptr(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
movl(Address(obj, oopDesc::mark_offset_in_bytes()), t1); movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
} else { } else {
movl(Address(obj, oopDesc::mark_offset_in_bytes ()), (int)markOopDesc::prototype()); // This assumes that all prototype bits fit in an int32_t
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
} }
movl(Address(obj, oopDesc::klass_offset_in_bytes()), klass); movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
if (len->is_valid()) { if (len->is_valid()) {
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
} }
@ -160,25 +159,27 @@ void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int
assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different"); assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different");
assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord"); assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord");
Register index = len_in_bytes; Register index = len_in_bytes;
subl(index, hdr_size_in_bytes); // index is positive and ptr sized
subptr(index, hdr_size_in_bytes);
jcc(Assembler::zero, done); jcc(Assembler::zero, done);
// initialize topmost word, divide index by 2, check if odd and test if zero // initialize topmost word, divide index by 2, check if odd and test if zero
// note: for the remaining code to work, index must be a multiple of BytesPerWord // note: for the remaining code to work, index must be a multiple of BytesPerWord
#ifdef ASSERT #ifdef ASSERT
{ Label L; { Label L;
testl(index, BytesPerWord - 1); testptr(index, BytesPerWord - 1);
jcc(Assembler::zero, L); jcc(Assembler::zero, L);
stop("index is not a multiple of BytesPerWord"); stop("index is not a multiple of BytesPerWord");
bind(L); bind(L);
} }
#endif #endif
xorl(t1, t1); // use _zero reg to clear memory (shorter code) xorptr(t1, t1); // use _zero reg to clear memory (shorter code)
if (UseIncDec) { if (UseIncDec) {
shrl(index, 3); // divide by 8 and set carry flag if bit 2 was set shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set
} else { } else {
shrl(index, 2); // use 2 instructions to avoid partial flag stall shrptr(index, 2); // use 2 instructions to avoid partial flag stall
shrl(index, 1); shrptr(index, 1);
} }
#ifndef _LP64
// index could have been not a multiple of 8 (i.e., bit 2 was set) // index could have been not a multiple of 8 (i.e., bit 2 was set)
{ Label even; { Label even;
// note: if index was a multiple of 8, than it cannot // note: if index was a multiple of 8, than it cannot
@ -186,16 +187,17 @@ void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int
// => if it is even, we don't need to check for 0 again // => if it is even, we don't need to check for 0 again
jcc(Assembler::carryClear, even); jcc(Assembler::carryClear, even);
// clear topmost word (no jump needed if conditional assignment would work here) // clear topmost word (no jump needed if conditional assignment would work here)
movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1); movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1);
// index could be 0 now, need to check again // index could be 0 now, need to check again
jcc(Assembler::zero, done); jcc(Assembler::zero, done);
bind(even); bind(even);
} }
#endif // !_LP64
// initialize remaining object fields: rdx is a multiple of 2 now // initialize remaining object fields: rdx is a multiple of 2 now
{ Label loop; { Label loop;
bind(loop); bind(loop);
movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1); movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1);
movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1); NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);)
decrement(index); decrement(index);
jcc(Assembler::notZero, loop); jcc(Assembler::notZero, loop);
} }
@ -227,30 +229,30 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register
const Register index = t2; const Register index = t2;
const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below) const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below)
if (var_size_in_bytes != noreg) { if (var_size_in_bytes != noreg) {
movl(index, var_size_in_bytes); mov(index, var_size_in_bytes);
initialize_body(obj, index, hdr_size_in_bytes, t1_zero); initialize_body(obj, index, hdr_size_in_bytes, t1_zero);
} else if (con_size_in_bytes <= threshold) { } else if (con_size_in_bytes <= threshold) {
// use explicit null stores // use explicit null stores
// code size = 2 + 3*n bytes (n = number of fields to clear) // code size = 2 + 3*n bytes (n = number of fields to clear)
xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord) for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
movl(Address(obj, i), t1_zero); movptr(Address(obj, i), t1_zero);
} else if (con_size_in_bytes > hdr_size_in_bytes) { } else if (con_size_in_bytes > hdr_size_in_bytes) {
// use loop to null out the fields // use loop to null out the fields
// code size = 16 bytes for even n (n = number of fields to clear) // code size = 16 bytes for even n (n = number of fields to clear)
// initialize last object field first if odd number of fields // initialize last object field first if odd number of fields
xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code) xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
movl(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3); movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3);
// initialize last object field if constant size is odd // initialize last object field if constant size is odd
if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0) if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0)
movl(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero); movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero);
// initialize remaining object fields: rdx is a multiple of 2 // initialize remaining object fields: rdx is a multiple of 2
{ Label loop; { Label loop;
bind(loop); bind(loop);
movl(Address(obj, index, Address::times_8, movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)),
hdr_size_in_bytes - (1*BytesPerWord)), t1_zero); t1_zero);
movl(Address(obj, index, Address::times_8, NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)),
hdr_size_in_bytes - (2*BytesPerWord)), t1_zero); t1_zero);)
decrement(index); decrement(index);
jcc(Assembler::notZero, loop); jcc(Assembler::notZero, loop);
} }
@ -269,17 +271,17 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
assert_different_registers(obj, len, t1, t2, klass); assert_different_registers(obj, len, t1, t2, klass);
// determine alignment mask // determine alignment mask
assert(BytesPerWord == 4, "must be a multiple of 2 for masking code to work"); assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
// check for negative or excessive length // check for negative or excessive length
cmpl(len, max_array_allocation_length); cmpptr(len, (int32_t)max_array_allocation_length);
jcc(Assembler::above, slow_case); jcc(Assembler::above, slow_case);
const Register arr_size = t2; // okay to be the same const Register arr_size = t2; // okay to be the same
// align object end // align object end
movl(arr_size, header_size * BytesPerWord + MinObjAlignmentInBytesMask); movptr(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
leal(arr_size, Address(arr_size, len, f)); lea(arr_size, Address(arr_size, len, f));
andl(arr_size, ~MinObjAlignmentInBytesMask); andptr(arr_size, ~MinObjAlignmentInBytesMask);
try_allocate(obj, arr_size, 0, t1, t2, slow_case); try_allocate(obj, arr_size, 0, t1, t2, slow_case);
@ -305,12 +307,13 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
// check against inline cache // check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset(); int start_offset = offset();
cmpl(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
// if icache check fails, then jump to runtime routine // if icache check fails, then jump to runtime routine
// Note: RECEIVER must still contain the receiver! // Note: RECEIVER must still contain the receiver!
jump_cc(Assembler::notEqual, jump_cc(Assembler::notEqual,
RuntimeAddress(SharedRuntime::get_ic_miss_stub())); RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
assert(offset() - start_offset == 9, "check alignment in emit_method_entry"); const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
assert(offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
} }
@ -364,7 +367,7 @@ void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
void C1_MacroAssembler::verify_not_null_oop(Register r) { void C1_MacroAssembler::verify_not_null_oop(Register r) {
if (!VerifyOops) return; if (!VerifyOops) return;
Label not_null; Label not_null;
testl(r, r); testptr(r, r);
jcc(Assembler::notZero, not_null); jcc(Assembler::notZero, not_null);
stop("non-null oop required"); stop("non-null oop required");
bind(not_null); bind(not_null);
@ -373,12 +376,12 @@ void C1_MacroAssembler::verify_not_null_oop(Register r) {
void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) { void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) {
#ifdef ASSERT #ifdef ASSERT
if (inv_rax) movl(rax, 0xDEAD); if (inv_rax) movptr(rax, 0xDEAD);
if (inv_rbx) movl(rbx, 0xDEAD); if (inv_rbx) movptr(rbx, 0xDEAD);
if (inv_rcx) movl(rcx, 0xDEAD); if (inv_rcx) movptr(rcx, 0xDEAD);
if (inv_rdx) movl(rdx, 0xDEAD); if (inv_rdx) movptr(rdx, 0xDEAD);
if (inv_rsi) movl(rsi, 0xDEAD); if (inv_rsi) movptr(rsi, 0xDEAD);
if (inv_rdi) movl(rdi, 0xDEAD); if (inv_rdi) movptr(rdi, 0xDEAD);
#endif #endif
} }

View File

@ -94,16 +94,17 @@
// Note: NEVER push values directly, but only through following push_xxx functions; // Note: NEVER push values directly, but only through following push_xxx functions;
// This helps us to track the rsp changes compared to the entry rsp (->_rsp_offset) // This helps us to track the rsp changes compared to the entry rsp (->_rsp_offset)
void push_jint (jint i) { _rsp_offset++; pushl(i); } void push_jint (jint i) { _rsp_offset++; push(i); }
void push_oop (jobject o) { _rsp_offset++; pushoop(o); } void push_oop (jobject o) { _rsp_offset++; pushoop(o); }
void push_addr (Address a) { _rsp_offset++; pushl(a); } // Seems to always be in wordSize
void push_reg (Register r) { _rsp_offset++; pushl(r); } void push_addr (Address a) { _rsp_offset++; pushptr(a); }
void pop (Register r) { _rsp_offset--; popl (r); assert(_rsp_offset >= 0, "stack offset underflow"); } void push_reg (Register r) { _rsp_offset++; push(r); }
void pop_reg (Register r) { _rsp_offset--; pop(r); assert(_rsp_offset >= 0, "stack offset underflow"); }
void dec_stack (int nof_words) { void dec_stack (int nof_words) {
_rsp_offset -= nof_words; _rsp_offset -= nof_words;
assert(_rsp_offset >= 0, "stack offset underflow"); assert(_rsp_offset >= 0, "stack offset underflow");
addl(rsp, wordSize * nof_words); addptr(rsp, wordSize * nof_words);
} }
void dec_stack_after_call (int nof_words) { void dec_stack_after_call (int nof_words) {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -98,24 +98,24 @@ void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
// table. // table.
#ifdef WIN32 #ifdef WIN32
__ pushl(rcx); // save "this" __ push(rcx); // save "this"
#endif #endif
__ movl(rcx, rax); __ mov(rcx, rax);
__ shrl(rcx, 8); // isolate vtable identifier. __ shrptr(rcx, 8); // isolate vtable identifier.
__ shll(rcx, LogBytesPerWord); __ shlptr(rcx, LogBytesPerWord);
Address index(noreg, rcx, Address::times_1); Address index(noreg, rcx, Address::times_1);
ExternalAddress vtbl((address)vtbl_list); ExternalAddress vtbl((address)vtbl_list);
__ movptr(rdx, ArrayAddress(vtbl, index)); // get correct vtable address. __ movptr(rdx, ArrayAddress(vtbl, index)); // get correct vtable address.
#ifdef WIN32 #ifdef WIN32
__ popl(rcx); // restore "this" __ pop(rcx); // restore "this"
#else #else
__ movl(rcx, Address(rsp, 4)); // fetch "this" __ movptr(rcx, Address(rsp, BytesPerWord)); // fetch "this"
#endif #endif
__ movl(Address(rcx, 0), rdx); // update vtable pointer. __ movptr(Address(rcx, 0), rdx); // update vtable pointer.
__ andl(rax, 0x00ff); // isolate vtable method index __ andptr(rax, 0x00ff); // isolate vtable method index
__ shll(rax, LogBytesPerWord); __ shlptr(rax, LogBytesPerWord);
__ addl(rax, rdx); // address of real method pointer. __ addptr(rax, rdx); // address of real method pointer.
__ jmp(Address(rax, 0)); // get real method pointer. __ jmp(Address(rax, 0)); // get real method pointer.
__ flush(); __ flush();

View File

@ -90,22 +90,22 @@ void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
// are on the stack and the "this" pointer is in c_rarg0. In addition, rax // are on the stack and the "this" pointer is in c_rarg0. In addition, rax
// was set (above) to the offset of the method in the table. // was set (above) to the offset of the method in the table.
__ pushq(c_rarg1); // save & free register __ push(c_rarg1); // save & free register
__ pushq(c_rarg0); // save "this" __ push(c_rarg0); // save "this"
__ movq(c_rarg0, rax); __ mov(c_rarg0, rax);
__ shrq(c_rarg0, 8); // isolate vtable identifier. __ shrptr(c_rarg0, 8); // isolate vtable identifier.
__ shlq(c_rarg0, LogBytesPerWord); __ shlptr(c_rarg0, LogBytesPerWord);
__ lea(c_rarg1, ExternalAddress((address)vtbl_list)); // ptr to correct vtable list. __ lea(c_rarg1, ExternalAddress((address)vtbl_list)); // ptr to correct vtable list.
__ addq(c_rarg1, c_rarg0); // ptr to list entry. __ addptr(c_rarg1, c_rarg0); // ptr to list entry.
__ movq(c_rarg1, Address(c_rarg1, 0)); // get correct vtable address. __ movptr(c_rarg1, Address(c_rarg1, 0)); // get correct vtable address.
__ popq(c_rarg0); // restore "this" __ pop(c_rarg0); // restore "this"
__ movq(Address(c_rarg0, 0), c_rarg1); // update vtable pointer. __ movptr(Address(c_rarg0, 0), c_rarg1); // update vtable pointer.
__ andq(rax, 0x00ff); // isolate vtable method index __ andptr(rax, 0x00ff); // isolate vtable method index
__ shlq(rax, LogBytesPerWord); __ shlptr(rax, LogBytesPerWord);
__ addq(rax, c_rarg1); // address of real method pointer. __ addptr(rax, c_rarg1); // address of real method pointer.
__ popq(c_rarg1); // restore register. __ pop(c_rarg1); // restore register.
__ movq(rax, Address(rax, 0)); // get real method pointer. __ movptr(rax, Address(rax, 0)); // get real method pointer.
__ jmp(rax); // jump to the real method. __ jmp(rax); // jump to the real method.
__ flush(); __ flush();

View File

@ -217,7 +217,8 @@ bool frame::safe_for_sender(JavaThread *thread) {
void frame::patch_pc(Thread* thread, address pc) { void frame::patch_pc(Thread* thread, address pc) {
if (TracePcPatching) { if (TracePcPatching) {
tty->print_cr("patch_pc at address 0x%x [0x%x -> 0x%x] ", &((address *)sp())[-1], ((address *)sp())[-1], pc); tty->print_cr("patch_pc at address" INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "] ",
&((address *)sp())[-1], ((address *)sp())[-1], pc);
} }
((address *)sp())[-1] = pc; ((address *)sp())[-1] = pc;
_cb = CodeCache::find_blob(pc); _cb = CodeCache::find_blob(pc);

View File

@ -159,7 +159,7 @@ inline intptr_t** frame::interpreter_frame_locals_addr() const {
inline intptr_t* frame::interpreter_frame_bcx_addr() const { inline intptr_t* frame::interpreter_frame_bcx_addr() const {
assert(is_interpreted_frame(), "must be interpreted"); assert(is_interpreted_frame(), "must be interpreted");
return (jint*) &(get_interpreterState()->_bcp); return (intptr_t*) &(get_interpreterState()->_bcp);
} }
@ -179,7 +179,7 @@ inline methodOop* frame::interpreter_frame_method_addr() const {
inline intptr_t* frame::interpreter_frame_mdx_addr() const { inline intptr_t* frame::interpreter_frame_mdx_addr() const {
assert(is_interpreted_frame(), "must be interpreted"); assert(is_interpreted_frame(), "must be interpreted");
return (jint*) &(get_interpreterState()->_mdx); return (intptr_t*) &(get_interpreterState()->_mdx);
} }
// top of expression stack // top of expression stack

View File

@ -48,7 +48,7 @@ void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flu
__ bind(flush_line); __ bind(flush_line);
__ clflush(Address(addr, 0)); __ clflush(Address(addr, 0));
__ addq(addr, ICache::line_size); __ addptr(addr, ICache::line_size);
__ decrementl(lines); __ decrementl(lines);
__ jcc(Assembler::notZero, flush_line); __ jcc(Assembler::notZero, flush_line);
@ -60,7 +60,7 @@ void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flu
const Address magic(rsp, 3*wordSize); const Address magic(rsp, 3*wordSize);
__ lock(); __ addl(Address(rsp, 0), 0); __ lock(); __ addl(Address(rsp, 0), 0);
#endif // AMD64 #endif // AMD64
__ movl(rax, magic); // Handshake with caller to make sure it happened! __ movptr(rax, magic); // Handshake with caller to make sure it happened!
__ ret(0); __ ret(0);
// Must be set here so StubCodeMark destructor can call the flush stub. // Must be set here so StubCodeMark destructor can call the flush stub.

View File

@ -29,8 +29,8 @@
// Implementation of InterpreterMacroAssembler // Implementation of InterpreterMacroAssembler
#ifdef CC_INTERP #ifdef CC_INTERP
void InterpreterMacroAssembler::get_method(Register reg) { void InterpreterMacroAssembler::get_method(Register reg) {
movl(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize))); movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize)));
movl(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method))); movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method)));
} }
#endif // CC_INTERP #endif // CC_INTERP
@ -53,7 +53,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(
// when jvm built with ASSERTs. // when jvm built with ASSERTs.
#ifdef ASSERT #ifdef ASSERT
{ Label L; { Label L;
cmpl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
jcc(Assembler::equal, L); jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base: last_sp != NULL"); stop("InterpreterMacroAssembler::call_VM_leaf_base: last_sp != NULL");
bind(L); bind(L);
@ -79,7 +79,7 @@ void InterpreterMacroAssembler::call_VM_base(
) { ) {
#ifdef ASSERT #ifdef ASSERT
{ Label L; { Label L;
cmpl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
jcc(Assembler::equal, L); jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_base: last_sp != NULL"); stop("InterpreterMacroAssembler::call_VM_base: last_sp != NULL");
bind(L); bind(L);
@ -132,10 +132,11 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset() const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset()
+ in_ByteSize(wordSize)); + in_ByteSize(wordSize));
switch (state) { switch (state) {
case atos: movl(rax, oop_addr); case atos: movptr(rax, oop_addr);
movl(oop_addr, NULL_WORD); movptr(oop_addr, (int32_t)NULL_WORD);
verify_oop(rax, state); break; verify_oop(rax, state); break;
case ltos: movl(rdx, val_addr1); // fall through case ltos:
movl(rdx, val_addr1); // fall through
case btos: // fall through case btos: // fall through
case ctos: // fall through case ctos: // fall through
case stos: // fall through case stos: // fall through
@ -146,9 +147,9 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
default : ShouldNotReachHere(); default : ShouldNotReachHere();
} }
// Clean up tos value in the thread object // Clean up tos value in the thread object
movl(tos_addr, (int) ilgl); movl(tos_addr, (int32_t) ilgl);
movl(val_addr, NULL_WORD); movptr(val_addr, (int32_t)NULL_WORD);
movl(val_addr1, NULL_WORD); NOT_LP64(movl(val_addr1, (int32_t)NULL_WORD));
} }
@ -156,8 +157,8 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
if (JvmtiExport::can_force_early_return()) { if (JvmtiExport::can_force_early_return()) {
Label L; Label L;
Register tmp = java_thread; Register tmp = java_thread;
movl(tmp, Address(tmp, JavaThread::jvmti_thread_state_offset())); movptr(tmp, Address(tmp, JavaThread::jvmti_thread_state_offset()));
testl(tmp, tmp); testptr(tmp, tmp);
jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit;
// Initiate earlyret handling only if it is not already being processed. // Initiate earlyret handling only if it is not already being processed.
@ -170,7 +171,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
// Call Interpreter::remove_activation_early_entry() to get the address of the // Call Interpreter::remove_activation_early_entry() to get the address of the
// same-named entrypoint in the generated interpreter code. // same-named entrypoint in the generated interpreter code.
get_thread(java_thread); get_thread(java_thread);
movl(tmp, Address(java_thread, JavaThread::jvmti_thread_state_offset())); movptr(tmp, Address(java_thread, JavaThread::jvmti_thread_state_offset()));
pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset())); pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset()));
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1); call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1);
jmp(rax); jmp(rax);
@ -183,7 +184,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
movl(reg, Address(rsi, bcp_offset)); movl(reg, Address(rsi, bcp_offset));
bswap(reg); bswapl(reg);
shrl(reg, 16); shrl(reg, 16);
} }
@ -192,9 +193,9 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert(cache != index, "must use different registers"); assert(cache != index, "must use different registers");
load_unsigned_word(index, Address(rsi, bcp_offset)); load_unsigned_word(index, Address(rsi, bcp_offset));
movl(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below"); assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
shll(index, 2); // convert from field index to ConstantPoolCacheEntry index shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index
} }
@ -206,10 +207,10 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, R
// convert from field index to ConstantPoolCacheEntry index // convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset // and from word offset to byte offset
shll(tmp, 2 + LogBytesPerWord); shll(tmp, 2 + LogBytesPerWord);
movl(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
// skip past the header // skip past the header
addl(cache, in_bytes(constantPoolCacheOopDesc::base_offset())); addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
addl(cache, tmp); // construct pointer to cache entry addptr(cache, tmp); // construct pointer to cache entry
} }
@ -232,22 +233,22 @@ void InterpreterMacroAssembler::gen_subtype_check( Register Rsub_klass, Label &o
// if the super-klass is an interface or exceptionally deep in the Java // if the super-klass is an interface or exceptionally deep in the Java
// hierarchy and we have to scan the secondary superclass list the hard way. // hierarchy and we have to scan the secondary superclass list the hard way.
// See if we get an immediate positive hit // See if we get an immediate positive hit
cmpl( rax, Address(Rsub_klass,rcx,Address::times_1) ); cmpptr( rax, Address(Rsub_klass,rcx,Address::times_1) );
jcc( Assembler::equal,ok_is_subtype ); jcc( Assembler::equal,ok_is_subtype );
// Check for immediate negative hit // Check for immediate negative hit
cmpl( rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() ); cmpl( rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
jcc( Assembler::notEqual, not_subtype ); jcc( Assembler::notEqual, not_subtype );
// Check for self // Check for self
cmpl( Rsub_klass, rax ); cmpptr( Rsub_klass, rax );
jcc( Assembler::equal, ok_is_subtype ); jcc( Assembler::equal, ok_is_subtype );
// Now do a linear scan of the secondary super-klass chain. // Now do a linear scan of the secondary super-klass chain.
movl( rdi, Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()) ); movptr( rdi, Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()) );
// EDI holds the objArrayOop of secondary supers. // EDI holds the objArrayOop of secondary supers.
movl( rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));// Load the array length movl( rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));// Load the array length
// Skip to start of data; also clear Z flag incase ECX is zero // Skip to start of data; also clear Z flag incase ECX is zero
addl( rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT) ); addptr( rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT) );
// Scan ECX words at [EDI] for occurance of EAX // Scan ECX words at [EDI] for occurance of EAX
// Set NZ/Z based on last compare // Set NZ/Z based on last compare
repne_scan(); repne_scan();
@ -255,7 +256,7 @@ void InterpreterMacroAssembler::gen_subtype_check( Register Rsub_klass, Label &o
// Not equal? // Not equal?
jcc( Assembler::notEqual, not_subtype ); jcc( Assembler::notEqual, not_subtype );
// Must be equal but missed in cache. Update cache. // Must be equal but missed in cache. Update cache.
movl( Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax ); movptr( Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax );
jmp( ok_is_subtype ); jmp( ok_is_subtype );
bind(not_subtype); bind(not_subtype);
@ -276,7 +277,6 @@ void InterpreterMacroAssembler::d2ieee() {
fld_d(Address(rsp, 0)); fld_d(Address(rsp, 0));
} }
} }
#endif // CC_INTERP
// Java Expression Stack // Java Expression Stack
@ -284,11 +284,11 @@ void InterpreterMacroAssembler::d2ieee() {
void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) { void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
Label okay; Label okay;
cmpl(Address(rsp, wordSize), (int)t); cmpptr(Address(rsp, wordSize), (int32_t)t);
jcc(Assembler::equal, okay); jcc(Assembler::equal, okay);
// Also compare if the stack value is zero, then the tag might // Also compare if the stack value is zero, then the tag might
// not have been set coming from deopt. // not have been set coming from deopt.
cmpl(Address(rsp, 0), 0); cmpptr(Address(rsp, 0), 0);
jcc(Assembler::equal, okay); jcc(Assembler::equal, okay);
stop("Java Expression stack tag value is bad"); stop("Java Expression stack tag value is bad");
bind(okay); bind(okay);
@ -298,43 +298,43 @@ void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
void InterpreterMacroAssembler::pop_ptr(Register r) { void InterpreterMacroAssembler::pop_ptr(Register r) {
debug_only(verify_stack_tag(frame::TagReference)); debug_only(verify_stack_tag(frame::TagReference));
popl(r); pop(r);
if (TaggedStackInterpreter) addl(rsp, 1 * wordSize); if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
} }
void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) { void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
popl(r); pop(r);
// Tag may not be reference for jsr, can be returnAddress // Tag may not be reference for jsr, can be returnAddress
if (TaggedStackInterpreter) popl(tag); if (TaggedStackInterpreter) pop(tag);
} }
void InterpreterMacroAssembler::pop_i(Register r) { void InterpreterMacroAssembler::pop_i(Register r) {
debug_only(verify_stack_tag(frame::TagValue)); debug_only(verify_stack_tag(frame::TagValue));
popl(r); pop(r);
if (TaggedStackInterpreter) addl(rsp, 1 * wordSize); if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
} }
void InterpreterMacroAssembler::pop_l(Register lo, Register hi) { void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
debug_only(verify_stack_tag(frame::TagValue)); debug_only(verify_stack_tag(frame::TagValue));
popl(lo); pop(lo);
if (TaggedStackInterpreter) addl(rsp, 1 * wordSize); if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
debug_only(verify_stack_tag(frame::TagValue)); debug_only(verify_stack_tag(frame::TagValue));
popl(hi); pop(hi);
if (TaggedStackInterpreter) addl(rsp, 1 * wordSize); if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
} }
void InterpreterMacroAssembler::pop_f() { void InterpreterMacroAssembler::pop_f() {
debug_only(verify_stack_tag(frame::TagValue)); debug_only(verify_stack_tag(frame::TagValue));
fld_s(Address(rsp, 0)); fld_s(Address(rsp, 0));
addl(rsp, 1 * wordSize); addptr(rsp, 1 * wordSize);
if (TaggedStackInterpreter) addl(rsp, 1 * wordSize); if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
} }
void InterpreterMacroAssembler::pop_d() { void InterpreterMacroAssembler::pop_d() {
// Write double to stack contiguously and load into ST0 // Write double to stack contiguously and load into ST0
pop_dtos_to_rsp(); pop_dtos_to_rsp();
fld_d(Address(rsp, 0)); fld_d(Address(rsp, 0));
addl(rsp, 2 * wordSize); addptr(rsp, 2 * wordSize);
} }
@ -344,22 +344,22 @@ void InterpreterMacroAssembler::pop_dtos_to_rsp() {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
// Pop double value into scratch registers // Pop double value into scratch registers
debug_only(verify_stack_tag(frame::TagValue)); debug_only(verify_stack_tag(frame::TagValue));
popl(rax); pop(rax);
addl(rsp, 1* wordSize); addptr(rsp, 1* wordSize);
debug_only(verify_stack_tag(frame::TagValue)); debug_only(verify_stack_tag(frame::TagValue));
popl(rdx); pop(rdx);
addl(rsp, 1* wordSize); addptr(rsp, 1* wordSize);
pushl(rdx); push(rdx);
pushl(rax); push(rax);
} }
} }
void InterpreterMacroAssembler::pop_ftos_to_rsp() { void InterpreterMacroAssembler::pop_ftos_to_rsp() {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
debug_only(verify_stack_tag(frame::TagValue)); debug_only(verify_stack_tag(frame::TagValue));
popl(rax); pop(rax);
addl(rsp, 1 * wordSize); addptr(rsp, 1 * wordSize);
pushl(rax); // ftos is at rsp push(rax); // ftos is at rsp
} }
} }
@ -380,31 +380,31 @@ void InterpreterMacroAssembler::pop(TosState state) {
} }
void InterpreterMacroAssembler::push_ptr(Register r) { void InterpreterMacroAssembler::push_ptr(Register r) {
if (TaggedStackInterpreter) pushl(frame::TagReference); if (TaggedStackInterpreter) push(frame::TagReference);
pushl(r); push(r);
} }
void InterpreterMacroAssembler::push_ptr(Register r, Register tag) { void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
if (TaggedStackInterpreter) pushl(tag); // tag first if (TaggedStackInterpreter) push(tag); // tag first
pushl(r); push(r);
} }
void InterpreterMacroAssembler::push_i(Register r) { void InterpreterMacroAssembler::push_i(Register r) {
if (TaggedStackInterpreter) pushl(frame::TagValue); if (TaggedStackInterpreter) push(frame::TagValue);
pushl(r); push(r);
} }
void InterpreterMacroAssembler::push_l(Register lo, Register hi) { void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
if (TaggedStackInterpreter) pushl(frame::TagValue); if (TaggedStackInterpreter) push(frame::TagValue);
pushl(hi); push(hi);
if (TaggedStackInterpreter) pushl(frame::TagValue); if (TaggedStackInterpreter) push(frame::TagValue);
pushl(lo); push(lo);
} }
void InterpreterMacroAssembler::push_f() { void InterpreterMacroAssembler::push_f() {
if (TaggedStackInterpreter) pushl(frame::TagValue); if (TaggedStackInterpreter) push(frame::TagValue);
// Do not schedule for no AGI! Never write beyond rsp! // Do not schedule for no AGI! Never write beyond rsp!
subl(rsp, 1 * wordSize); subptr(rsp, 1 * wordSize);
fstp_s(Address(rsp, 0)); fstp_s(Address(rsp, 0));
} }
@ -415,8 +415,8 @@ void InterpreterMacroAssembler::push_d(Register r) {
// high // high
// tag // tag
// low // low
pushl(frame::TagValue); push(frame::TagValue);
subl(rsp, 3 * wordSize); subptr(rsp, 3 * wordSize);
fstp_d(Address(rsp, 0)); fstp_d(Address(rsp, 0));
// move high word up to slot n-1 // move high word up to slot n-1
movl(r, Address(rsp, 1*wordSize)); movl(r, Address(rsp, 1*wordSize));
@ -425,7 +425,7 @@ void InterpreterMacroAssembler::push_d(Register r) {
movl(Address(rsp, 1*wordSize), frame::TagValue); movl(Address(rsp, 1*wordSize), frame::TagValue);
} else { } else {
// Do not schedule for no AGI! Never write beyond rsp! // Do not schedule for no AGI! Never write beyond rsp!
subl(rsp, 2 * wordSize); subptr(rsp, 2 * wordSize);
fstp_d(Address(rsp, 0)); fstp_d(Address(rsp, 0));
} }
} }
@ -447,22 +447,21 @@ void InterpreterMacroAssembler::push(TosState state) {
} }
} }
#ifndef CC_INTERP
// Tagged stack helpers for swap and dup // Tagged stack helpers for swap and dup
void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val, void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
Register tag) { Register tag) {
movl(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
movl(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n))); movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
} }
} }
void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val, void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
Register tag) { Register tag) {
movl(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
movl(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag); movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
} }
} }
@ -471,10 +470,10 @@ void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) { void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) { if (tag == frame::TagCategory2) {
movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int)frame::TagValue); movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)frame::TagValue);
movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int)frame::TagValue); movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)frame::TagValue);
} else { } else {
movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int)tag); movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
} }
} }
} }
@ -482,13 +481,13 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) { void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) { if (tag == frame::TagCategory2) {
movl(Address(rdi, idx, Interpreter::stackElementScale(), movptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(1)), (int)frame::TagValue); Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
movl(Address(rdi, idx, Interpreter::stackElementScale(), movptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(0)), (int)frame::TagValue); Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
} else { } else {
movl(Address(rdi, idx, Interpreter::stackElementScale(), movptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(0)), (int)tag); Interpreter::local_tag_offset_in_bytes(0)), (int32_t)tag);
} }
} }
} }
@ -496,7 +495,7 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
void InterpreterMacroAssembler::tag_local(Register tag, Register idx) { void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
// can only be TagValue or TagReference // can only be TagValue or TagReference
movl(Address(rdi, idx, Interpreter::stackElementScale(), movptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(0)), tag); Interpreter::local_tag_offset_in_bytes(0)), tag);
} }
} }
@ -505,7 +504,7 @@ void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
void InterpreterMacroAssembler::tag_local(Register tag, int n) { void InterpreterMacroAssembler::tag_local(Register tag, int n) {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
// can only be TagValue or TagReference // can only be TagValue or TagReference
movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), tag); movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), tag);
} }
} }
@ -516,17 +515,17 @@ void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
if (tag == frame::TagCategory2) { if (tag == frame::TagCategory2) {
Label nbl; Label nbl;
t = frame::TagValue; // change to what is stored in locals t = frame::TagValue; // change to what is stored in locals
cmpl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int)t); cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
jcc(Assembler::equal, nbl); jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double"); stop("Local tag is bad for long/double");
bind(nbl); bind(nbl);
} }
Label notBad; Label notBad;
cmpl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int)t); cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
jcc(Assembler::equal, notBad); jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might // Also compare if the local value is zero, then the tag might
// not have been set coming from deopt. // not have been set coming from deopt.
cmpl(Address(rdi, Interpreter::local_offset_in_bytes(n)), 0); cmpptr(Address(rdi, Interpreter::local_offset_in_bytes(n)), 0);
jcc(Assembler::equal, notBad); jcc(Assembler::equal, notBad);
stop("Local tag is bad"); stop("Local tag is bad");
bind(notBad); bind(notBad);
@ -539,19 +538,19 @@ void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
if (tag == frame::TagCategory2) { if (tag == frame::TagCategory2) {
Label nbl; Label nbl;
t = frame::TagValue; // change to what is stored in locals t = frame::TagValue; // change to what is stored in locals
cmpl(Address(rdi, idx, Interpreter::stackElementScale(), cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(1)), (int)t); Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
jcc(Assembler::equal, nbl); jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double"); stop("Local tag is bad for long/double");
bind(nbl); bind(nbl);
} }
Label notBad; Label notBad;
cmpl(Address(rdi, idx, Interpreter::stackElementScale(), cmpl(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(0)), (int)t); Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
jcc(Assembler::equal, notBad); jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might // Also compare if the local value is zero, then the tag might
// not have been set coming from deopt. // not have been set coming from deopt.
cmpl(Address(rdi, idx, Interpreter::stackElementScale(), cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_offset_in_bytes(0)), 0); Interpreter::local_offset_in_bytes(0)), 0);
jcc(Assembler::equal, notBad); jcc(Assembler::equal, notBad);
stop("Local tag is bad"); stop("Local tag is bad");
@ -567,22 +566,22 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1) { void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1) {
pushl(arg_1); push(arg_1);
MacroAssembler::call_VM_leaf_base(entry_point, 1); MacroAssembler::call_VM_leaf_base(entry_point, 1);
} }
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
pushl(arg_2); push(arg_2);
pushl(arg_1); push(arg_1);
MacroAssembler::call_VM_leaf_base(entry_point, 2); MacroAssembler::call_VM_leaf_base(entry_point, 2);
} }
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
pushl(arg_3); push(arg_3);
pushl(arg_2); push(arg_2);
pushl(arg_1); push(arg_1);
MacroAssembler::call_VM_leaf_base(entry_point, 3); MacroAssembler::call_VM_leaf_base(entry_point, 3);
} }
@ -591,9 +590,9 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register
// in this thread in which case we must call the i2i entry // in this thread in which case we must call the i2i entry
void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
// set sender sp // set sender sp
leal(rsi, Address(rsp, wordSize)); lea(rsi, Address(rsp, wordSize));
// record last_sp // record last_sp
movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), rsi); movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), rsi);
if (JvmtiExport::can_post_interpreter_events()) { if (JvmtiExport::can_post_interpreter_events()) {
Label run_compiled_code; Label run_compiled_code;
@ -629,16 +628,16 @@ void InterpreterMacroAssembler::dispatch_base(TosState state, address* table,
verify_FPU(1, state); verify_FPU(1, state);
if (VerifyActivationFrameSize) { if (VerifyActivationFrameSize) {
Label L; Label L;
movl(rcx, rbp); mov(rcx, rbp);
subl(rcx, rsp); subptr(rcx, rsp);
int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize; int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize;
cmpl(rcx, min_frame_size); cmpptr(rcx, min_frame_size);
jcc(Assembler::greaterEqual, L); jcc(Assembler::greaterEqual, L);
stop("broken stack frame"); stop("broken stack frame");
bind(L); bind(L);
} }
if (verifyoop) verify_oop(rax, state); if (verifyoop) verify_oop(rax, state);
Address index(noreg, rbx, Address::times_4); Address index(noreg, rbx, Address::times_ptr);
ExternalAddress tbl((address)table); ExternalAddress tbl((address)table);
ArrayAddress dispatch(tbl, index); ArrayAddress dispatch(tbl, index);
jump(dispatch); jump(dispatch);
@ -700,10 +699,10 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
movbool(rbx, do_not_unlock_if_synchronized); movbool(rbx, do_not_unlock_if_synchronized);
movl(rdi,rbx); mov(rdi,rbx);
movbool(do_not_unlock_if_synchronized, false); // reset the flag movbool(do_not_unlock_if_synchronized, false); // reset the flag
movl(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); // get method access flags movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); // get method access flags
movl(rcx, Address(rbx, methodOopDesc::access_flags_offset())); movl(rcx, Address(rbx, methodOopDesc::access_flags_offset()));
testl(rcx, JVM_ACC_SYNCHRONIZED); testl(rcx, JVM_ACC_SYNCHRONIZED);
@ -711,7 +710,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
// Don't unlock anything if the _do_not_unlock_if_synchronized flag // Don't unlock anything if the _do_not_unlock_if_synchronized flag
// is set. // is set.
movl(rcx,rdi); mov(rcx,rdi);
testbool(rcx); testbool(rcx);
jcc(Assembler::notZero, no_unlock); jcc(Assembler::notZero, no_unlock);
@ -721,11 +720,11 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
// BasicObjectLock will be first in list, since this is a synchronized method. However, need // BasicObjectLock will be first in list, since this is a synchronized method. However, need
// to check that the object has not been unlocked by an explicit monitorexit bytecode. // to check that the object has not been unlocked by an explicit monitorexit bytecode.
const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock)); const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
leal (rdx, monitor); // address of first monitor lea (rdx, monitor); // address of first monitor
movl (rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); movptr (rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
testl (rax, rax); testptr(rax, rax);
jcc (Assembler::notZero, unlock); jcc (Assembler::notZero, unlock);
pop(state); pop(state);
if (throw_monitor_exception) { if (throw_monitor_exception) {
@ -762,8 +761,8 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize); const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
bind(restart); bind(restart);
movl(rcx, monitor_block_top); // points to current entry, starting with top-most entry movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
leal(rbx, monitor_block_bot); // points to word before bottom of monitor block lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
jmp(entry); jmp(entry);
// Entry already locked, need to throw exception // Entry already locked, need to throw exception
@ -780,7 +779,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
// Unlock does not block, so don't have to worry about the frame // Unlock does not block, so don't have to worry about the frame
push(state); push(state);
movl(rdx, rcx); mov(rdx, rcx);
unlock_object(rdx); unlock_object(rdx);
pop(state); pop(state);
@ -793,12 +792,12 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
} }
bind(loop); bind(loop);
cmpl(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); // check if current entry is used cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
jcc(Assembler::notEqual, exception); jcc(Assembler::notEqual, exception);
addl(rcx, entry_size); // otherwise advance to next entry addptr(rcx, entry_size); // otherwise advance to next entry
bind(entry); bind(entry);
cmpl(rcx, rbx); // check if bottom reached cmpptr(rcx, rbx); // check if bottom reached
jcc(Assembler::notEqual, loop); // if not at bottom then check this entry jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
} }
@ -812,22 +811,22 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
} }
// remove activation // remove activation
movl(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
leave(); // remove frame anchor leave(); // remove frame anchor
popl(ret_addr); // get return address pop(ret_addr); // get return address
movl(rsp, rbx); // set sp to sender sp mov(rsp, rbx); // set sp to sender sp
if (UseSSE) { if (UseSSE) {
// float and double are returned in xmm register in SSE-mode // float and double are returned in xmm register in SSE-mode
if (state == ftos && UseSSE >= 1) { if (state == ftos && UseSSE >= 1) {
subl(rsp, wordSize); subptr(rsp, wordSize);
fstp_s(Address(rsp, 0)); fstp_s(Address(rsp, 0));
movflt(xmm0, Address(rsp, 0)); movflt(xmm0, Address(rsp, 0));
addl(rsp, wordSize); addptr(rsp, wordSize);
} else if (state == dtos && UseSSE >= 2) { } else if (state == dtos && UseSSE >= 2) {
subl(rsp, 2*wordSize); subptr(rsp, 2*wordSize);
fstp_d(Address(rsp, 0)); fstp_d(Address(rsp, 0));
movdbl(xmm0, Address(rsp, 0)); movdbl(xmm0, Address(rsp, 0));
addl(rsp, 2*wordSize); addptr(rsp, 2*wordSize);
} }
} }
} }
@ -858,7 +857,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
Label slow_case; Label slow_case;
// Load object pointer into obj_reg %rcx // Load object pointer into obj_reg %rcx
movl(obj_reg, Address(lock_reg, obj_offset)); movptr(obj_reg, Address(lock_reg, obj_offset));
if (UseBiasedLocking) { if (UseBiasedLocking) {
// Note: we use noreg for the temporary register since it's hard // Note: we use noreg for the temporary register since it's hard
@ -867,19 +866,19 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
} }
// Load immediate 1 into swap_reg %rax, // Load immediate 1 into swap_reg %rax,
movl(swap_reg, 1); movptr(swap_reg, (int32_t)1);
// Load (object->mark() | 1) into swap_reg %rax, // Load (object->mark() | 1) into swap_reg %rax,
orl(swap_reg, Address(obj_reg, 0)); orptr(swap_reg, Address(obj_reg, 0));
// Save (object->mark() | 1) into BasicLock's displaced header // Save (object->mark() | 1) into BasicLock's displaced header
movl(Address(lock_reg, mark_offset), swap_reg); movptr(Address(lock_reg, mark_offset), swap_reg);
assert(lock_offset == 0, "displached header must be first word in BasicObjectLock"); assert(lock_offset == 0, "displached header must be first word in BasicObjectLock");
if (os::is_MP()) { if (os::is_MP()) {
lock(); lock();
} }
cmpxchg(lock_reg, Address(obj_reg, 0)); cmpxchgptr(lock_reg, Address(obj_reg, 0));
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::zero, cond_inc32(Assembler::zero,
ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
@ -895,11 +894,11 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
// assuming both stack pointer and pagesize have their // assuming both stack pointer and pagesize have their
// least significant 2 bits clear. // least significant 2 bits clear.
// NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
subl(swap_reg, rsp); subptr(swap_reg, rsp);
andl(swap_reg, 3 - os::vm_page_size()); andptr(swap_reg, 3 - os::vm_page_size());
// Save the test result, for recursive case, the result is zero // Save the test result, for recursive case, the result is zero
movl(Address(lock_reg, mark_offset), swap_reg); movptr(Address(lock_reg, mark_offset), swap_reg);
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::zero, cond_inc32(Assembler::zero,
@ -939,36 +938,36 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// Convert from BasicObjectLock structure to object and BasicLock structure // Convert from BasicObjectLock structure to object and BasicLock structure
// Store the BasicLock address into %rax, // Store the BasicLock address into %rax,
leal(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
// Load oop into obj_reg(%rcx) // Load oop into obj_reg(%rcx)
movl(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes ())); movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes ()));
// Free entry // Free entry
movl(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
if (UseBiasedLocking) { if (UseBiasedLocking) {
biased_locking_exit(obj_reg, header_reg, done); biased_locking_exit(obj_reg, header_reg, done);
} }
// Load the old header from BasicLock structure // Load the old header from BasicLock structure
movl(header_reg, Address(swap_reg, BasicLock::displaced_header_offset_in_bytes())); movptr(header_reg, Address(swap_reg, BasicLock::displaced_header_offset_in_bytes()));
// Test for recursion // Test for recursion
testl(header_reg, header_reg); testptr(header_reg, header_reg);
// zero for recursive case // zero for recursive case
jcc(Assembler::zero, done); jcc(Assembler::zero, done);
// Atomic swap back the old header // Atomic swap back the old header
if (os::is_MP()) lock(); if (os::is_MP()) lock();
cmpxchg(header_reg, Address(obj_reg, 0)); cmpxchgptr(header_reg, Address(obj_reg, 0));
// zero for recursive case // zero for recursive case
jcc(Assembler::zero, done); jcc(Assembler::zero, done);
// Call the runtime routine for slow case. // Call the runtime routine for slow case.
movl(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
bind(done); bind(done);
@ -983,8 +982,8 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// Test ImethodDataPtr. If it is null, continue at the specified label // Test ImethodDataPtr. If it is null, continue at the specified label
void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) { void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
movl(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize)); movptr(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize));
testl(mdp, mdp); testptr(mdp, mdp);
jcc(Assembler::zero, zero_continue); jcc(Assembler::zero, zero_continue);
} }
@ -993,13 +992,13 @@ void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& ze
void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
Label zero_continue; Label zero_continue;
pushl(rax); push(rax);
pushl(rbx); push(rbx);
get_method(rbx); get_method(rbx);
// Test MDO to avoid the call if it is NULL. // Test MDO to avoid the call if it is NULL.
movl(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
testl(rax, rax); testptr(rax, rax);
jcc(Assembler::zero, zero_continue); jcc(Assembler::zero, zero_continue);
// rbx,: method // rbx,: method
@ -1007,53 +1006,55 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi); call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi);
// rax,: mdi // rax,: mdi
movl(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
testl(rbx, rbx); testptr(rbx, rbx);
jcc(Assembler::zero, zero_continue); jcc(Assembler::zero, zero_continue);
addl(rbx, in_bytes(methodDataOopDesc::data_offset())); addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
addl(rbx, rax); addptr(rbx, rax);
movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
bind(zero_continue); bind(zero_continue);
popl(rbx); pop(rbx);
popl(rax); pop(rax);
} }
void InterpreterMacroAssembler::verify_method_data_pointer() { void InterpreterMacroAssembler::verify_method_data_pointer() {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
#ifdef ASSERT #ifdef ASSERT
Label verify_continue; Label verify_continue;
pushl(rax); push(rax);
pushl(rbx); push(rbx);
pushl(rcx); push(rcx);
pushl(rdx); push(rdx);
test_method_data_pointer(rcx, verify_continue); // If mdp is zero, continue test_method_data_pointer(rcx, verify_continue); // If mdp is zero, continue
get_method(rbx); get_method(rbx);
// If the mdp is valid, it will point to a DataLayout header which is // If the mdp is valid, it will point to a DataLayout header which is
// consistent with the bcp. The converse is highly probable also. // consistent with the bcp. The converse is highly probable also.
load_unsigned_word(rdx, Address(rcx, in_bytes(DataLayout::bci_offset()))); load_unsigned_word(rdx, Address(rcx, in_bytes(DataLayout::bci_offset())));
addl(rdx, Address(rbx, methodOopDesc::const_offset())); addptr(rdx, Address(rbx, methodOopDesc::const_offset()));
leal(rdx, Address(rdx, constMethodOopDesc::codes_offset())); lea(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
cmpl(rdx, rsi); cmpptr(rdx, rsi);
jcc(Assembler::equal, verify_continue); jcc(Assembler::equal, verify_continue);
// rbx,: method // rbx,: method
// rsi: bcp // rsi: bcp
// rcx: mdp // rcx: mdp
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), rbx, rsi, rcx); call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), rbx, rsi, rcx);
bind(verify_continue); bind(verify_continue);
popl(rdx); pop(rdx);
popl(rcx); pop(rcx);
popl(rbx); pop(rbx);
popl(rax); pop(rax);
#endif // ASSERT #endif // ASSERT
} }
void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int constant, Register value) { void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int constant, Register value) {
// %%% this seems to be used to store counter data which is surely 32bits
// however 64bit side stores 64 bits which seems wrong
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
Address data(mdp_in, constant); Address data(mdp_in, constant);
movl(data, value); movptr(data, value);
} }
@ -1073,6 +1074,7 @@ void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" ); assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" );
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
// %%% 64bit treats this as 64 bit which seems unlikely
if (decrement) { if (decrement) {
// Decrement the register. Set condition codes. // Decrement the register. Set condition codes.
addl(data, -DataLayout::counter_increment); addl(data, -DataLayout::counter_increment);
@ -1119,11 +1121,11 @@ void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
Label& not_equal_continue) { Label& not_equal_continue) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
if (test_value_out == noreg) { if (test_value_out == noreg) {
cmpl(value, Address(mdp_in, offset)); cmpptr(value, Address(mdp_in, offset));
} else { } else {
// Put the test value into a register, so caller can use it: // Put the test value into a register, so caller can use it:
movl(test_value_out, Address(mdp_in, offset)); movptr(test_value_out, Address(mdp_in, offset));
cmpl(test_value_out, value); cmpptr(test_value_out, value);
} }
jcc(Assembler::notEqual, not_equal_continue); jcc(Assembler::notEqual, not_equal_continue);
} }
@ -1132,31 +1134,31 @@ void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp) { void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
Address disp_address(mdp_in, offset_of_disp); Address disp_address(mdp_in, offset_of_disp);
addl(mdp_in,disp_address); addptr(mdp_in,disp_address);
movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
} }
void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp) { void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);
addl(mdp_in, disp_address); addptr(mdp_in, disp_address);
movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
} }
void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) { void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
addl(mdp_in, constant); addptr(mdp_in, constant);
movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
} }
void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
pushl(return_bci); // save/restore across call_VM push(return_bci); // save/restore across call_VM
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
popl(return_bci); pop(return_bci);
} }
@ -1172,6 +1174,8 @@ void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bump
// We inline increment_mdp_data_at to return bumped_count in a register // We inline increment_mdp_data_at to return bumped_count in a register
//increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
Address data(mdp, in_bytes(JumpData::taken_offset())); Address data(mdp, in_bytes(JumpData::taken_offset()));
// %%% 64bit treats these cells as 64 bit but they seem to be 32 bit
movl(bumped_count,data); movl(bumped_count,data);
assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" ); assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" );
addl(bumped_count, DataLayout::counter_increment); addl(bumped_count, DataLayout::counter_increment);
@ -1289,7 +1293,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
if (row == start_row) { if (row == start_row) {
// Failed the equality check on receiver[n]... Test for null. // Failed the equality check on receiver[n]... Test for null.
testl(reg2, reg2); testptr(reg2, reg2);
if (start_row == last_row) { if (start_row == last_row) {
// The only thing left to do is handle the null case. // The only thing left to do is handle the null case.
jcc(Assembler::notZero, done); jcc(Assembler::notZero, done);
@ -1315,7 +1319,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
set_mdp_data_at(mdp, recvr_offset, receiver); set_mdp_data_at(mdp, recvr_offset, receiver);
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
movl(reg2, DataLayout::counter_increment); movptr(reg2, (int32_t)DataLayout::counter_increment);
set_mdp_data_at(mdp, count_offset, reg2); set_mdp_data_at(mdp, count_offset, reg2);
jmp(done); jmp(done);
} }
@ -1454,9 +1458,11 @@ void InterpreterMacroAssembler::profile_switch_case(Register index, Register mdp
test_method_data_pointer(mdp, profile_continue); test_method_data_pointer(mdp, profile_continue);
// Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()
movl(reg2, in_bytes(MultiBranchData::per_case_size())); movptr(reg2, (int32_t)in_bytes(MultiBranchData::per_case_size()));
imull(index, reg2); // index is positive and so should have correct value if this code were
addl(index, in_bytes(MultiBranchData::case_array_offset())); // used on 64bits
imulptr(index, reg2);
addptr(index, in_bytes(MultiBranchData::case_array_offset()));
// Update the case count // Update the case count
increment_mdp_data_at(mdp, index, in_bytes(MultiBranchData::relative_count_offset())); increment_mdp_data_at(mdp, index, in_bytes(MultiBranchData::relative_count_offset()));
@ -1535,12 +1541,12 @@ void InterpreterMacroAssembler::notify_method_exit(
{ {
SkipIfEqual skip_if(this, &DTraceMethodProbes, 0); SkipIfEqual skip_if(this, &DTraceMethodProbes, 0);
push(state); NOT_CC_INTERP(push(state));
get_thread(rbx); get_thread(rbx);
get_method(rcx); get_method(rcx);
call_VM_leaf( call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
rbx, rcx); rbx, rcx);
pop(state); NOT_CC_INTERP(pop(state));
} }
} }

View File

@ -65,15 +65,15 @@ class InterpreterMacroAssembler: public MacroAssembler {
#else #else
void save_bcp() { movl(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), rsi); } void save_bcp() { movptr(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), rsi); }
void restore_bcp() { movl(rsi, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize)); } void restore_bcp() { movptr(rsi, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize)); }
void restore_locals() { movl(rdi, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); } void restore_locals() { movptr(rdi, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); }
// Helpers for runtime call arguments/results // Helpers for runtime call arguments/results
void get_method(Register reg) { movl(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); } void get_method(Register reg) { movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); }
void get_constant_pool(Register reg) { get_method(reg); movl(reg, Address(reg, methodOopDesc::constants_offset())); } void get_constant_pool(Register reg) { get_method(reg); movptr(reg, Address(reg, methodOopDesc::constants_offset())); }
void get_constant_pool_cache(Register reg) { get_constant_pool(reg); movl(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); } void get_constant_pool_cache(Register reg) { get_constant_pool(reg); movptr(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); }
void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movl(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes())); void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
} }
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset); void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset);
@ -82,8 +82,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Expression stack // Expression stack
void f2ieee(); // truncate ftos to 32bits void f2ieee(); // truncate ftos to 32bits
void d2ieee(); // truncate dtos to 64bits void d2ieee(); // truncate dtos to 64bits
#endif // CC_INTERP
void pop_ptr(Register r = rax); void pop_ptr(Register r = rax);
void pop_ptr(Register r, Register tag); void pop_ptr(Register r, Register tag);
@ -104,14 +102,25 @@ class InterpreterMacroAssembler: public MacroAssembler {
void pop(TosState state); // transition vtos -> state void pop(TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos void push(TosState state); // transition state -> vtos
void pop(Register r ) { ((MacroAssembler*)this)->pop(r); }
void push(Register r ) { ((MacroAssembler*)this)->push(r); }
void push(int32_t imm ) { ((MacroAssembler*)this)->push(imm); }
// These are dummies to prevent surprise implicit conversions to Register
void pop(void* v ); // Add unimplemented ambiguous method
void push(void* v ); // Add unimplemented ambiguous method
DEBUG_ONLY(void verify_stack_tag(frame::Tag t);) DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
#endif // CC_INTERP
#ifndef CC_INTERP #ifndef CC_INTERP
void empty_expression_stack() { void empty_expression_stack() {
movl(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize)); movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call // NULL last_sp until next java call
movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
} }
// Tagged stack helpers for swap and dup // Tagged stack helpers for swap and dup

View File

@ -28,6 +28,15 @@
// Implementation of InterpreterMacroAssembler // Implementation of InterpreterMacroAssembler
#ifdef CC_INTERP
void InterpreterMacroAssembler::get_method(Register reg) {
movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize)));
movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method)));
}
#endif // CC_INTERP
#ifndef CC_INTERP
void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
int number_of_arguments) { int number_of_arguments) {
// interpreter specific // interpreter specific
@ -39,7 +48,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
save_bcp(); save_bcp();
{ {
Label L; Label L;
cmpq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD); cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
jcc(Assembler::equal, L); jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:" stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" last_sp != NULL"); " last_sp != NULL");
@ -52,7 +61,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
#ifdef ASSERT #ifdef ASSERT
{ {
Label L; Label L;
cmpq(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize)); cmpptr(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize));
jcc(Assembler::equal, L); jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:" stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" r13 not callee saved?"); " r13 not callee saved?");
@ -60,7 +69,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
} }
{ {
Label L; Label L;
cmpq(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); cmpptr(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
jcc(Assembler::equal, L); jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:" stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" r14 not callee saved?"); " r14 not callee saved?");
@ -86,7 +95,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
#ifdef ASSERT #ifdef ASSERT
{ {
Label L; Label L;
cmpq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD); cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
jcc(Assembler::equal, L); jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:" stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" last_sp != NULL"); " last_sp != NULL");
@ -127,15 +136,15 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread)
void InterpreterMacroAssembler::load_earlyret_value(TosState state) { void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
movq(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); movptr(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset()); const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset());
const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset()); const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset());
const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset()); const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset());
switch (state) { switch (state) {
case atos: movq(rax, oop_addr); case atos: movptr(rax, oop_addr);
movptr(oop_addr, NULL_WORD); movptr(oop_addr, (int32_t)NULL_WORD);
verify_oop(rax, state); break; verify_oop(rax, state); break;
case ltos: movq(rax, val_addr); break; case ltos: movptr(rax, val_addr); break;
case btos: // fall through case btos: // fall through
case ctos: // fall through case ctos: // fall through
case stos: // fall through case stos: // fall through
@ -147,15 +156,15 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
} }
// Clean up tos value in the thread object // Clean up tos value in the thread object
movl(tos_addr, (int) ilgl); movl(tos_addr, (int) ilgl);
movl(val_addr, (int) NULL_WORD); movl(val_addr, (int32_t) NULL_WORD);
} }
void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
if (JvmtiExport::can_force_early_return()) { if (JvmtiExport::can_force_early_return()) {
Label L; Label L;
movq(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
testq(c_rarg0, c_rarg0); testptr(c_rarg0, c_rarg0);
jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit; jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit;
// Initiate earlyret handling only if it is not already being processed. // Initiate earlyret handling only if it is not already being processed.
@ -167,7 +176,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
// Call Interpreter::remove_activation_early_entry() to get the address of the // Call Interpreter::remove_activation_early_entry() to get the address of the
// same-named entrypoint in the generated interpreter code. // same-named entrypoint in the generated interpreter code.
movq(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_tos_offset())); movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_tos_offset()));
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), c_rarg0); call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), c_rarg0);
jmp(rax); jmp(rax);
@ -192,7 +201,7 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert(cache != index, "must use different registers"); assert(cache != index, "must use different registers");
load_unsigned_word(index, Address(r13, bcp_offset)); load_unsigned_word(index, Address(r13, bcp_offset));
movq(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index // convert from field index to ConstantPoolCacheEntry index
shll(index, 2); shll(index, 2);
@ -209,10 +218,10 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
// convert from field index to ConstantPoolCacheEntry index // convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset // and from word offset to byte offset
shll(tmp, 2 + LogBytesPerWord); shll(tmp, 2 + LogBytesPerWord);
movq(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
// skip past the header // skip past the header
addq(cache, in_bytes(constantPoolCacheOopDesc::base_offset())); addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
addq(cache, tmp); // construct pointer to cache entry addptr(cache, tmp); // construct pointer to cache entry
} }
@ -247,24 +256,24 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
// exceptionally deep in the Java hierarchy and we have to scan the // exceptionally deep in the Java hierarchy and we have to scan the
// secondary superclass list the hard way. See if we get an // secondary superclass list the hard way. See if we get an
// immediate positive hit // immediate positive hit
cmpq(rax, Address(Rsub_klass, rcx, Address::times_1)); cmpptr(rax, Address(Rsub_klass, rcx, Address::times_1));
jcc(Assembler::equal,ok_is_subtype); jcc(Assembler::equal,ok_is_subtype);
// Check for immediate negative hit // Check for immediate negative hit
cmpl(rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()); cmpl(rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
jcc( Assembler::notEqual, not_subtype ); jcc( Assembler::notEqual, not_subtype );
// Check for self // Check for self
cmpq(Rsub_klass, rax); cmpptr(Rsub_klass, rax);
jcc(Assembler::equal, ok_is_subtype); jcc(Assembler::equal, ok_is_subtype);
// Now do a linear scan of the secondary super-klass chain. // Now do a linear scan of the secondary super-klass chain.
movq(rdi, Address(Rsub_klass, sizeof(oopDesc) + movptr(rdi, Address(Rsub_klass, sizeof(oopDesc) +
Klass::secondary_supers_offset_in_bytes())); Klass::secondary_supers_offset_in_bytes()));
// rdi holds the objArrayOop of secondary supers. // rdi holds the objArrayOop of secondary supers.
// Load the array length // Load the array length
movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
// Skip to start of data; also clear Z flag incase rcx is zero // Skip to start of data; also clear Z flag incase rcx is zero
addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Scan rcx words at [rdi] for occurance of rax // Scan rcx words at [rdi] for occurance of rax
// Set NZ/Z based on last compare // Set NZ/Z based on last compare
@ -272,30 +281,31 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
// and we store values in objArrays always encoded, thus we need to encode value // and we store values in objArrays always encoded, thus we need to encode value
// before repne // before repne
if (UseCompressedOops) { if (UseCompressedOops) {
pushq(rax); push(rax);
encode_heap_oop(rax); encode_heap_oop(rax);
repne_scanl(); repne_scanl();
// Not equal? // Not equal?
jcc(Assembler::notEqual, not_subtype_pop); jcc(Assembler::notEqual, not_subtype_pop);
// restore heap oop here for movq // restore heap oop here for movq
popq(rax); pop(rax);
} else { } else {
repne_scanq(); repne_scan();
jcc(Assembler::notEqual, not_subtype); jcc(Assembler::notEqual, not_subtype);
} }
// Must be equal but missed in cache. Update cache. // Must be equal but missed in cache. Update cache.
movq(Address(Rsub_klass, sizeof(oopDesc) + movptr(Address(Rsub_klass, sizeof(oopDesc) +
Klass::secondary_super_cache_offset_in_bytes()), rax); Klass::secondary_super_cache_offset_in_bytes()), rax);
jmp(ok_is_subtype); jmp(ok_is_subtype);
bind(not_subtype_pop); bind(not_subtype_pop);
// restore heap oop here for miss // restore heap oop here for miss
if (UseCompressedOops) popq(rax); if (UseCompressedOops) pop(rax);
bind(not_subtype); bind(not_subtype);
profile_typecheck_failed(rcx); // blows rcx profile_typecheck_failed(rcx); // blows rcx
} }
// Java Expression Stack // Java Expression Stack
#ifdef ASSERT #ifdef ASSERT
@ -307,17 +317,17 @@ void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
if (t == frame::TagCategory2) { if (t == frame::TagCategory2) {
tag = frame::TagValue; tag = frame::TagValue;
Label hokay; Label hokay;
cmpq(Address(rsp, 3*wordSize), (int)tag); cmpptr(Address(rsp, 3*wordSize), (int32_t)tag);
jcc(Assembler::equal, hokay); jcc(Assembler::equal, hokay);
stop("Java Expression stack tag high value is bad"); stop("Java Expression stack tag high value is bad");
bind(hokay); bind(hokay);
} }
Label okay; Label okay;
cmpq(Address(rsp, wordSize), (int)tag); cmpptr(Address(rsp, wordSize), (int32_t)tag);
jcc(Assembler::equal, okay); jcc(Assembler::equal, okay);
// Also compare if the stack value is zero, then the tag might // Also compare if the stack value is zero, then the tag might
// not have been set coming from deopt. // not have been set coming from deopt.
cmpq(Address(rsp, 0), 0); cmpptr(Address(rsp, 0), 0);
jcc(Assembler::equal, okay); jcc(Assembler::equal, okay);
stop("Java Expression stack tag value is bad"); stop("Java Expression stack tag value is bad");
bind(okay); bind(okay);
@ -327,83 +337,83 @@ void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
void InterpreterMacroAssembler::pop_ptr(Register r) { void InterpreterMacroAssembler::pop_ptr(Register r) {
debug_only(verify_stack_tag(frame::TagReference)); debug_only(verify_stack_tag(frame::TagReference));
popq(r); pop(r);
if (TaggedStackInterpreter) addq(rsp, 1 * wordSize); if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
} }
void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) { void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
popq(r); pop(r);
if (TaggedStackInterpreter) popq(tag); if (TaggedStackInterpreter) pop(tag);
} }
void InterpreterMacroAssembler::pop_i(Register r) { void InterpreterMacroAssembler::pop_i(Register r) {
// XXX can't use popq currently, upper half non clean // XXX can't use pop currently, upper half non clean
debug_only(verify_stack_tag(frame::TagValue)); debug_only(verify_stack_tag(frame::TagValue));
movl(r, Address(rsp, 0)); movl(r, Address(rsp, 0));
addq(rsp, wordSize); addptr(rsp, wordSize);
if (TaggedStackInterpreter) addq(rsp, 1 * wordSize); if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
} }
void InterpreterMacroAssembler::pop_l(Register r) { void InterpreterMacroAssembler::pop_l(Register r) {
debug_only(verify_stack_tag(frame::TagCategory2)); debug_only(verify_stack_tag(frame::TagCategory2));
movq(r, Address(rsp, 0)); movq(r, Address(rsp, 0));
addq(rsp, 2 * Interpreter::stackElementSize()); addptr(rsp, 2 * Interpreter::stackElementSize());
} }
void InterpreterMacroAssembler::pop_f(XMMRegister r) { void InterpreterMacroAssembler::pop_f(XMMRegister r) {
debug_only(verify_stack_tag(frame::TagValue)); debug_only(verify_stack_tag(frame::TagValue));
movflt(r, Address(rsp, 0)); movflt(r, Address(rsp, 0));
addq(rsp, wordSize); addptr(rsp, wordSize);
if (TaggedStackInterpreter) addq(rsp, 1 * wordSize); if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
} }
void InterpreterMacroAssembler::pop_d(XMMRegister r) { void InterpreterMacroAssembler::pop_d(XMMRegister r) {
debug_only(verify_stack_tag(frame::TagCategory2)); debug_only(verify_stack_tag(frame::TagCategory2));
movdbl(r, Address(rsp, 0)); movdbl(r, Address(rsp, 0));
addq(rsp, 2 * Interpreter::stackElementSize()); addptr(rsp, 2 * Interpreter::stackElementSize());
} }
void InterpreterMacroAssembler::push_ptr(Register r) { void InterpreterMacroAssembler::push_ptr(Register r) {
if (TaggedStackInterpreter) pushq(frame::TagReference); if (TaggedStackInterpreter) push(frame::TagReference);
pushq(r); push(r);
} }
void InterpreterMacroAssembler::push_ptr(Register r, Register tag) { void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
if (TaggedStackInterpreter) pushq(tag); if (TaggedStackInterpreter) push(tag);
pushq(r); push(r);
} }
void InterpreterMacroAssembler::push_i(Register r) { void InterpreterMacroAssembler::push_i(Register r) {
if (TaggedStackInterpreter) pushq(frame::TagValue); if (TaggedStackInterpreter) push(frame::TagValue);
pushq(r); push(r);
} }
void InterpreterMacroAssembler::push_l(Register r) { void InterpreterMacroAssembler::push_l(Register r) {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
pushq(frame::TagValue); push(frame::TagValue);
subq(rsp, 1 * wordSize); subptr(rsp, 1 * wordSize);
pushq(frame::TagValue); push(frame::TagValue);
subq(rsp, 1 * wordSize); subptr(rsp, 1 * wordSize);
} else { } else {
subq(rsp, 2 * wordSize); subptr(rsp, 2 * wordSize);
} }
movq(Address(rsp, 0), r); movq(Address(rsp, 0), r);
} }
void InterpreterMacroAssembler::push_f(XMMRegister r) { void InterpreterMacroAssembler::push_f(XMMRegister r) {
if (TaggedStackInterpreter) pushq(frame::TagValue); if (TaggedStackInterpreter) push(frame::TagValue);
subq(rsp, wordSize); subptr(rsp, wordSize);
movflt(Address(rsp, 0), r); movflt(Address(rsp, 0), r);
} }
void InterpreterMacroAssembler::push_d(XMMRegister r) { void InterpreterMacroAssembler::push_d(XMMRegister r) {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
pushq(frame::TagValue); push(frame::TagValue);
subq(rsp, 1 * wordSize); subptr(rsp, 1 * wordSize);
pushq(frame::TagValue); push(frame::TagValue);
subq(rsp, 1 * wordSize); subptr(rsp, 1 * wordSize);
} else { } else {
subq(rsp, 2 * wordSize); subptr(rsp, 2 * wordSize);
} }
movdbl(Address(rsp, 0), r); movdbl(Address(rsp, 0), r);
} }
@ -441,20 +451,22 @@ void InterpreterMacroAssembler::push(TosState state) {
} }
// Tagged stack helpers for swap and dup // Tagged stack helpers for swap and dup
void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val, void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
Register tag) { Register tag) {
movq(val, Address(rsp, Interpreter::expr_offset_in_bytes(n))); movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
movq(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n))); movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
} }
} }
void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val, void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
Register tag) { Register tag) {
movq(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val); movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
movq(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag); movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
} }
} }
@ -463,12 +475,12 @@ void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) { void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) { if (tag == frame::TagCategory2) {
mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)),
(intptr_t)frame::TagValue); (int32_t)frame::TagValue);
mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)),
(intptr_t)frame::TagValue); (int32_t)frame::TagValue);
} else { } else {
mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (intptr_t)tag); movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
} }
} }
} }
@ -476,13 +488,13 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) { void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) { if (tag == frame::TagCategory2) {
mov64(Address(r14, idx, Address::times_8, movptr(Address(r14, idx, Address::times_8,
Interpreter::local_tag_offset_in_bytes(1)), (intptr_t)frame::TagValue); Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
mov64(Address(r14, idx, Address::times_8, movptr(Address(r14, idx, Address::times_8,
Interpreter::local_tag_offset_in_bytes(0)), (intptr_t)frame::TagValue); Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
} else { } else {
mov64(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)),
(intptr_t)tag); (int32_t)tag);
} }
} }
} }
@ -490,7 +502,7 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
void InterpreterMacroAssembler::tag_local(Register tag, Register idx) { void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
// can only be TagValue or TagReference // can only be TagValue or TagReference
movq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag); movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag);
} }
} }
@ -498,7 +510,7 @@ void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
void InterpreterMacroAssembler::tag_local(Register tag, int n) { void InterpreterMacroAssembler::tag_local(Register tag, int n) {
if (TaggedStackInterpreter) { if (TaggedStackInterpreter) {
// can only be TagValue or TagReference // can only be TagValue or TagReference
movq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag); movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag);
} }
} }
@ -509,17 +521,17 @@ void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
if (tag == frame::TagCategory2) { if (tag == frame::TagCategory2) {
Label nbl; Label nbl;
t = frame::TagValue; // change to what is stored in locals t = frame::TagValue; // change to what is stored in locals
cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int)t); cmpptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
jcc(Assembler::equal, nbl); jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double"); stop("Local tag is bad for long/double");
bind(nbl); bind(nbl);
} }
Label notBad; Label notBad;
cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int)t); cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
jcc(Assembler::equal, notBad); jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might // Also compare if the local value is zero, then the tag might
// not have been set coming from deopt. // not have been set coming from deopt.
cmpq(Address(r14, Interpreter::local_offset_in_bytes(n)), 0); cmpptr(Address(r14, Interpreter::local_offset_in_bytes(n)), 0);
jcc(Assembler::equal, notBad); jcc(Assembler::equal, notBad);
stop("Local tag is bad"); stop("Local tag is bad");
bind(notBad); bind(notBad);
@ -532,17 +544,17 @@ void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
if (tag == frame::TagCategory2) { if (tag == frame::TagCategory2) {
Label nbl; Label nbl;
t = frame::TagValue; // change to what is stored in locals t = frame::TagValue; // change to what is stored in locals
cmpq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int)t); cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
jcc(Assembler::equal, nbl); jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double"); stop("Local tag is bad for long/double");
bind(nbl); bind(nbl);
} }
Label notBad; Label notBad;
cmpq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int)t); cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
jcc(Assembler::equal, notBad); jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might // Also compare if the local value is zero, then the tag might
// not have been set coming from deopt. // not have been set coming from deopt.
cmpq(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0); cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0);
jcc(Assembler::equal, notBad); jcc(Assembler::equal, notBad);
stop("Local tag is bad"); stop("Local tag is bad");
bind(notBad); bind(notBad);
@ -559,7 +571,7 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
Register arg_1) { Register arg_1) {
if (c_rarg0 != arg_1) { if (c_rarg0 != arg_1) {
movq(c_rarg0, arg_1); mov(c_rarg0, arg_1);
} }
MacroAssembler::call_VM_leaf_base(entry_point, 1); MacroAssembler::call_VM_leaf_base(entry_point, 1);
} }
@ -571,10 +583,10 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
assert(c_rarg0 != arg_2, "smashed argument"); assert(c_rarg0 != arg_2, "smashed argument");
assert(c_rarg1 != arg_1, "smashed argument"); assert(c_rarg1 != arg_1, "smashed argument");
if (c_rarg0 != arg_1) { if (c_rarg0 != arg_1) {
movq(c_rarg0, arg_1); mov(c_rarg0, arg_1);
} }
if (c_rarg1 != arg_2) { if (c_rarg1 != arg_2) {
movq(c_rarg1, arg_2); mov(c_rarg1, arg_2);
} }
MacroAssembler::call_VM_leaf_base(entry_point, 2); MacroAssembler::call_VM_leaf_base(entry_point, 2);
} }
@ -590,13 +602,13 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
assert(c_rarg2 != arg_1, "smashed argument"); assert(c_rarg2 != arg_1, "smashed argument");
assert(c_rarg2 != arg_2, "smashed argument"); assert(c_rarg2 != arg_2, "smashed argument");
if (c_rarg0 != arg_1) { if (c_rarg0 != arg_1) {
movq(c_rarg0, arg_1); mov(c_rarg0, arg_1);
} }
if (c_rarg1 != arg_2) { if (c_rarg1 != arg_2) {
movq(c_rarg1, arg_2); mov(c_rarg1, arg_2);
} }
if (c_rarg2 != arg_3) { if (c_rarg2 != arg_3) {
movq(c_rarg2, arg_3); mov(c_rarg2, arg_3);
} }
MacroAssembler::call_VM_leaf_base(entry_point, 3); MacroAssembler::call_VM_leaf_base(entry_point, 3);
} }
@ -605,9 +617,9 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
// in this thread in which case we must call the i2i entry // in this thread in which case we must call the i2i entry
void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
// set sender sp // set sender sp
leaq(r13, Address(rsp, wordSize)); lea(r13, Address(rsp, wordSize));
// record last_sp // record last_sp
movq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13); movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13);
if (JvmtiExport::can_post_interpreter_events()) { if (JvmtiExport::can_post_interpreter_events()) {
Label run_compiled_code; Label run_compiled_code;
@ -644,12 +656,12 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
verify_FPU(1, state); verify_FPU(1, state);
if (VerifyActivationFrameSize) { if (VerifyActivationFrameSize) {
Label L; Label L;
movq(rcx, rbp); mov(rcx, rbp);
subq(rcx, rsp); subptr(rcx, rsp);
int min_frame_size = int32_t min_frame_size =
(frame::link_offset - frame::interpreter_frame_initial_sp_offset) * (frame::link_offset - frame::interpreter_frame_initial_sp_offset) *
wordSize; wordSize;
cmpq(rcx, min_frame_size); cmpptr(rcx, (int32_t)min_frame_size);
jcc(Assembler::greaterEqual, L); jcc(Assembler::greaterEqual, L);
stop("broken stack frame"); stop("broken stack frame");
bind(L); bind(L);
@ -678,7 +690,7 @@ void InterpreterMacroAssembler::dispatch_next(TosState state, int step) {
// load next bytecode (load before advancing r13 to prevent AGI) // load next bytecode (load before advancing r13 to prevent AGI)
load_unsigned_byte(rbx, Address(r13, step)); load_unsigned_byte(rbx, Address(r13, step));
// advance r13 // advance r13
incrementq(r13, step); increment(r13, step);
dispatch_base(state, Interpreter::dispatch_table(state)); dispatch_base(state, Interpreter::dispatch_table(state));
} }
@ -718,7 +730,7 @@ void InterpreterMacroAssembler::remove_activation(
movbool(do_not_unlock_if_synchronized, false); // reset the flag movbool(do_not_unlock_if_synchronized, false); // reset the flag
// get method access flags // get method access flags
movq(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
movl(rcx, Address(rbx, methodOopDesc::access_flags_offset())); movl(rcx, Address(rbx, methodOopDesc::access_flags_offset()));
testl(rcx, JVM_ACC_SYNCHRONIZED); testl(rcx, JVM_ACC_SYNCHRONIZED);
jcc(Assembler::zero, unlocked); jcc(Assembler::zero, unlocked);
@ -738,10 +750,10 @@ void InterpreterMacroAssembler::remove_activation(
wordSize - (int) sizeof(BasicObjectLock)); wordSize - (int) sizeof(BasicObjectLock));
// We use c_rarg1 so that if we go slow path it will be the correct // We use c_rarg1 so that if we go slow path it will be the correct
// register for unlock_object to pass to VM directly // register for unlock_object to pass to VM directly
leaq(c_rarg1, monitor); // address of first monitor lea(c_rarg1, monitor); // address of first monitor
movq(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); movptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
testq(rax, rax); testptr(rax, rax);
jcc(Assembler::notZero, unlock); jcc(Assembler::notZero, unlock);
pop(state); pop(state);
@ -783,9 +795,9 @@ void InterpreterMacroAssembler::remove_activation(
bind(restart); bind(restart);
// We use c_rarg1 so that if we go slow path it will be the correct // We use c_rarg1 so that if we go slow path it will be the correct
// register for unlock_object to pass to VM directly // register for unlock_object to pass to VM directly
movq(c_rarg1, monitor_block_top); // points to current entry, starting movptr(c_rarg1, monitor_block_top); // points to current entry, starting
// with top-most entry // with top-most entry
leaq(rbx, monitor_block_bot); // points to word before bottom of lea(rbx, monitor_block_bot); // points to word before bottom of
// monitor block // monitor block
jmp(entry); jmp(entry);
@ -818,12 +830,12 @@ void InterpreterMacroAssembler::remove_activation(
bind(loop); bind(loop);
// check if current entry is used // check if current entry is used
cmpq(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), (int) NULL); cmpptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
jcc(Assembler::notEqual, exception); jcc(Assembler::notEqual, exception);
addq(c_rarg1, entry_size); // otherwise advance to next entry addptr(c_rarg1, entry_size); // otherwise advance to next entry
bind(entry); bind(entry);
cmpq(c_rarg1, rbx); // check if bottom reached cmpptr(c_rarg1, rbx); // check if bottom reached
jcc(Assembler::notEqual, loop); // if not at bottom then check this entry jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
} }
@ -838,13 +850,15 @@ void InterpreterMacroAssembler::remove_activation(
// remove activation // remove activation
// get sender sp // get sender sp
movq(rbx, movptr(rbx,
Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
leave(); // remove frame anchor leave(); // remove frame anchor
popq(ret_addr); // get return address pop(ret_addr); // get return address
movq(rsp, rbx); // set sp to sender sp mov(rsp, rbx); // set sp to sender sp
} }
#endif // C_INTERP
// Lock object // Lock object
// //
// Args: // Args:
@ -875,7 +889,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
Label slow_case; Label slow_case;
// Load object pointer into obj_reg %c_rarg3 // Load object pointer into obj_reg %c_rarg3
movq(obj_reg, Address(lock_reg, obj_offset)); movptr(obj_reg, Address(lock_reg, obj_offset));
if (UseBiasedLocking) { if (UseBiasedLocking) {
biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, done, &slow_case); biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, done, &slow_case);
@ -885,16 +899,16 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
movl(swap_reg, 1); movl(swap_reg, 1);
// Load (object->mark() | 1) into swap_reg %rax // Load (object->mark() | 1) into swap_reg %rax
orq(swap_reg, Address(obj_reg, 0)); orptr(swap_reg, Address(obj_reg, 0));
// Save (object->mark() | 1) into BasicLock's displaced header // Save (object->mark() | 1) into BasicLock's displaced header
movq(Address(lock_reg, mark_offset), swap_reg); movptr(Address(lock_reg, mark_offset), swap_reg);
assert(lock_offset == 0, assert(lock_offset == 0,
"displached header must be first word in BasicObjectLock"); "displached header must be first word in BasicObjectLock");
if (os::is_MP()) lock(); if (os::is_MP()) lock();
cmpxchgq(lock_reg, Address(obj_reg, 0)); cmpxchgptr(lock_reg, Address(obj_reg, 0));
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::zero, cond_inc32(Assembler::zero,
ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr())); ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
@ -910,11 +924,11 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
// assuming both stack pointer and pagesize have their // assuming both stack pointer and pagesize have their
// least significant 3 bits clear. // least significant 3 bits clear.
// NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
subq(swap_reg, rsp); subptr(swap_reg, rsp);
andq(swap_reg, 7 - os::vm_page_size()); andptr(swap_reg, 7 - os::vm_page_size());
// Save the test result, for recursive case, the result is zero // Save the test result, for recursive case, the result is zero
movq(Address(lock_reg, mark_offset), swap_reg); movptr(Address(lock_reg, mark_offset), swap_reg);
if (PrintBiasedLockingStatistics) { if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::zero, cond_inc32(Assembler::zero,
@ -963,37 +977,37 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// Convert from BasicObjectLock structure to object and BasicLock // Convert from BasicObjectLock structure to object and BasicLock
// structure Store the BasicLock address into %rax // structure Store the BasicLock address into %rax
leaq(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes())); lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
// Load oop into obj_reg(%c_rarg3) // Load oop into obj_reg(%c_rarg3)
movq(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
// Free entry // Free entry
movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
if (UseBiasedLocking) { if (UseBiasedLocking) {
biased_locking_exit(obj_reg, header_reg, done); biased_locking_exit(obj_reg, header_reg, done);
} }
// Load the old header from BasicLock structure // Load the old header from BasicLock structure
movq(header_reg, Address(swap_reg, movptr(header_reg, Address(swap_reg,
BasicLock::displaced_header_offset_in_bytes())); BasicLock::displaced_header_offset_in_bytes()));
// Test for recursion // Test for recursion
testq(header_reg, header_reg); testptr(header_reg, header_reg);
// zero for recursive case // zero for recursive case
jcc(Assembler::zero, done); jcc(Assembler::zero, done);
// Atomic swap back the old header // Atomic swap back the old header
if (os::is_MP()) lock(); if (os::is_MP()) lock();
cmpxchgq(header_reg, Address(obj_reg, 0)); cmpxchgptr(header_reg, Address(obj_reg, 0));
// zero for recursive case // zero for recursive case
jcc(Assembler::zero, done); jcc(Assembler::zero, done);
// Call the runtime routine for slow case. // Call the runtime routine for slow case.
movq(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()),
obj_reg); // restore obj obj_reg); // restore obj
call_VM(noreg, call_VM(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
@ -1005,12 +1019,13 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
} }
} }
#ifndef CC_INTERP
void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
Label& zero_continue) { Label& zero_continue) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
movq(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize)); movptr(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize));
testq(mdp, mdp); testptr(mdp, mdp);
jcc(Assembler::zero, zero_continue); jcc(Assembler::zero, zero_continue);
} }
@ -1019,13 +1034,13 @@ void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
Label zero_continue; Label zero_continue;
pushq(rax); push(rax);
pushq(rbx); push(rbx);
get_method(rbx); get_method(rbx);
// Test MDO to avoid the call if it is NULL. // Test MDO to avoid the call if it is NULL.
movq(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
testq(rax, rax); testptr(rax, rax);
jcc(Assembler::zero, zero_continue); jcc(Assembler::zero, zero_continue);
// rbx: method // rbx: method
@ -1033,26 +1048,26 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13); call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13);
// rax: mdi // rax: mdi
movq(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
testq(rbx, rbx); testptr(rbx, rbx);
jcc(Assembler::zero, zero_continue); jcc(Assembler::zero, zero_continue);
addq(rbx, in_bytes(methodDataOopDesc::data_offset())); addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
addq(rbx, rax); addptr(rbx, rax);
movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx); movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
bind(zero_continue); bind(zero_continue);
popq(rbx); pop(rbx);
popq(rax); pop(rax);
} }
void InterpreterMacroAssembler::verify_method_data_pointer() { void InterpreterMacroAssembler::verify_method_data_pointer() {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
#ifdef ASSERT #ifdef ASSERT
Label verify_continue; Label verify_continue;
pushq(rax); push(rax);
pushq(rbx); push(rbx);
pushq(c_rarg3); push(c_rarg3);
pushq(c_rarg2); push(c_rarg2);
test_method_data_pointer(c_rarg3, verify_continue); // If mdp is zero, continue test_method_data_pointer(c_rarg3, verify_continue); // If mdp is zero, continue
get_method(rbx); get_method(rbx);
@ -1060,9 +1075,9 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
// consistent with the bcp. The converse is highly probable also. // consistent with the bcp. The converse is highly probable also.
load_unsigned_word(c_rarg2, load_unsigned_word(c_rarg2,
Address(c_rarg3, in_bytes(DataLayout::bci_offset()))); Address(c_rarg3, in_bytes(DataLayout::bci_offset())));
addq(c_rarg2, Address(rbx, methodOopDesc::const_offset())); addptr(c_rarg2, Address(rbx, methodOopDesc::const_offset()));
leaq(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset())); lea(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset()));
cmpq(c_rarg2, r13); cmpptr(c_rarg2, r13);
jcc(Assembler::equal, verify_continue); jcc(Assembler::equal, verify_continue);
// rbx: method // rbx: method
// r13: bcp // r13: bcp
@ -1070,10 +1085,10 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
rbx, r13, c_rarg3); rbx, r13, c_rarg3);
bind(verify_continue); bind(verify_continue);
popq(c_rarg2); pop(c_rarg2);
popq(c_rarg3); pop(c_rarg3);
popq(rbx); pop(rbx);
popq(rax); pop(rax);
#endif // ASSERT #endif // ASSERT
} }
@ -1083,7 +1098,7 @@ void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
Register value) { Register value) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
Address data(mdp_in, constant); Address data(mdp_in, constant);
movq(data, value); movptr(data, value);
} }
@ -1099,22 +1114,24 @@ void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
void InterpreterMacroAssembler::increment_mdp_data_at(Address data, void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
bool decrement) { bool decrement) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
// %%% this does 64bit counters at best it is wasting space
// at worst it is a rare bug when counters overflow
if (decrement) { if (decrement) {
// Decrement the register. Set condition codes. // Decrement the register. Set condition codes.
addq(data, -DataLayout::counter_increment); addptr(data, (int32_t) -DataLayout::counter_increment);
// If the decrement causes the counter to overflow, stay negative // If the decrement causes the counter to overflow, stay negative
Label L; Label L;
jcc(Assembler::negative, L); jcc(Assembler::negative, L);
addq(data, DataLayout::counter_increment); addptr(data, (int32_t) DataLayout::counter_increment);
bind(L); bind(L);
} else { } else {
assert(DataLayout::counter_increment == 1, assert(DataLayout::counter_increment == 1,
"flow-free idiom only works with 1"); "flow-free idiom only works with 1");
// Increment the register. Set carry flag. // Increment the register. Set carry flag.
addq(data, DataLayout::counter_increment); addptr(data, DataLayout::counter_increment);
// If the increment causes the counter to overflow, pull back by 1. // If the increment causes the counter to overflow, pull back by 1.
sbbq(data, 0); sbbptr(data, (int32_t)0);
} }
} }
@ -1146,11 +1163,11 @@ void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
Label& not_equal_continue) { Label& not_equal_continue) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
if (test_value_out == noreg) { if (test_value_out == noreg) {
cmpq(value, Address(mdp_in, offset)); cmpptr(value, Address(mdp_in, offset));
} else { } else {
// Put the test value into a register, so caller can use it: // Put the test value into a register, so caller can use it:
movq(test_value_out, Address(mdp_in, offset)); movptr(test_value_out, Address(mdp_in, offset));
cmpq(test_value_out, value); cmpptr(test_value_out, value);
} }
jcc(Assembler::notEqual, not_equal_continue); jcc(Assembler::notEqual, not_equal_continue);
} }
@ -1160,8 +1177,8 @@ void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
int offset_of_disp) { int offset_of_disp) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
Address disp_address(mdp_in, offset_of_disp); Address disp_address(mdp_in, offset_of_disp);
addq(mdp_in, disp_address); addptr(mdp_in, disp_address);
movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
} }
@ -1170,26 +1187,26 @@ void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
int offset_of_disp) { int offset_of_disp) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp); Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);
addq(mdp_in, disp_address); addptr(mdp_in, disp_address);
movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
} }
void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
int constant) { int constant) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
addq(mdp_in, constant); addptr(mdp_in, constant);
movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in); movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
} }
void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
pushq(return_bci); // save/restore across call_VM push(return_bci); // save/restore across call_VM
call_VM(noreg, call_VM(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
return_bci); return_bci);
popq(return_bci); pop(return_bci);
} }
@ -1206,12 +1223,12 @@ void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
// We inline increment_mdp_data_at to return bumped_count in a register // We inline increment_mdp_data_at to return bumped_count in a register
//increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
Address data(mdp, in_bytes(JumpData::taken_offset())); Address data(mdp, in_bytes(JumpData::taken_offset()));
movq(bumped_count, data); movptr(bumped_count, data);
assert(DataLayout::counter_increment == 1, assert(DataLayout::counter_increment == 1,
"flow-free idiom only works with 1"); "flow-free idiom only works with 1");
addq(bumped_count, DataLayout::counter_increment); addptr(bumped_count, DataLayout::counter_increment);
sbbq(bumped_count, 0); sbbptr(bumped_count, 0);
movq(data, bumped_count); // Store back out movptr(data, bumped_count); // Store back out
// The method data pointer needs to be updated to reflect the new target. // The method data pointer needs to be updated to reflect the new target.
update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
@ -1339,7 +1356,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
if (test_for_null_also) { if (test_for_null_also) {
// Failed the equality check on receiver[n]... Test for null. // Failed the equality check on receiver[n]... Test for null.
testq(reg2, reg2); testptr(reg2, reg2);
if (start_row == last_row) { if (start_row == last_row) {
// The only thing left to do is handle the null case. // The only thing left to do is handle the null case.
jcc(Assembler::notZero, done); jcc(Assembler::notZero, done);
@ -1535,8 +1552,8 @@ void InterpreterMacroAssembler::profile_switch_case(Register index,
// Build the base (index * per_case_size_in_bytes()) + // Build the base (index * per_case_size_in_bytes()) +
// case_array_offset_in_bytes() // case_array_offset_in_bytes()
movl(reg2, in_bytes(MultiBranchData::per_case_size())); movl(reg2, in_bytes(MultiBranchData::per_case_size()));
imulq(index, reg2); // XXX l ? imulptr(index, reg2); // XXX l ?
addq(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ? addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
// Update the case count // Update the case count
increment_mdp_data_at(mdp, increment_mdp_data_at(mdp,
@ -1554,6 +1571,7 @@ void InterpreterMacroAssembler::profile_switch_case(Register index,
} }
void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
if (state == atos) { if (state == atos) {
MacroAssembler::verify_oop(reg); MacroAssembler::verify_oop(reg);
@ -1562,6 +1580,7 @@ void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
} }
#endif // !CC_INTERP
void InterpreterMacroAssembler::notify_method_entry() { void InterpreterMacroAssembler::notify_method_entry() {
@ -1598,22 +1617,25 @@ void InterpreterMacroAssembler::notify_method_exit(
// method result is saved across the call to post_method_exit. If this // method result is saved across the call to post_method_exit. If this
// is changed then the interpreter_frame_result implementation will // is changed then the interpreter_frame_result implementation will
// need to be updated too. // need to be updated too.
push(state);
// For c++ interpreter the result is always stored at a known location in the frame
// template interpreter will leave it on the top of the stack.
NOT_CC_INTERP(push(state);)
movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset())); movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset()));
testl(rdx, rdx); testl(rdx, rdx);
jcc(Assembler::zero, L); jcc(Assembler::zero, L);
call_VM(noreg, call_VM(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
bind(L); bind(L);
pop(state); NOT_CC_INTERP(pop(state));
} }
{ {
SkipIfEqual skip(this, &DTraceMethodProbes, false); SkipIfEqual skip(this, &DTraceMethodProbes, false);
push(state); NOT_CC_INTERP(push(state));
get_method(c_rarg1); get_method(c_rarg1);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
r15_thread, c_rarg1); r15_thread, c_rarg1);
pop(state); NOT_CC_INTERP(pop(state));
} }
} }

View File

@ -25,8 +25,8 @@
// This file specializes the assember with interpreter-specific macros // This file specializes the assember with interpreter-specific macros
class InterpreterMacroAssembler class InterpreterMacroAssembler: public MacroAssembler {
: public MacroAssembler { #ifndef CC_INTERP
protected: protected:
// Interpreter specific version of call_VM_base // Interpreter specific version of call_VM_base
virtual void call_VM_leaf_base(address entry_point, virtual void call_VM_leaf_base(address entry_point,
@ -44,52 +44,53 @@ class InterpreterMacroAssembler
// base routine for all dispatches // base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true); void dispatch_base(TosState state, address* table, bool verifyoop = true);
#endif // CC_INTERP
public: public:
InterpreterMacroAssembler(CodeBuffer* code) InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
: MacroAssembler(code)
{}
void load_earlyret_value(TosState state); void load_earlyret_value(TosState state);
#ifdef CC_INTERP
void save_bcp() { /* not needed in c++ interpreter and harmless */ }
void restore_bcp() { /* not needed in c++ interpreter and harmless */ }
// Helpers for runtime call arguments/results
void get_method(Register reg);
#else
// Interpreter-specific registers // Interpreter-specific registers
void save_bcp() void save_bcp() {
{ movptr(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), r13);
movq(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), r13);
} }
void restore_bcp() void restore_bcp() {
{ movptr(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize));
movq(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize));
} }
void restore_locals() void restore_locals() {
{ movptr(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
movq(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
} }
// Helpers for runtime call arguments/results // Helpers for runtime call arguments/results
void get_method(Register reg) void get_method(Register reg) {
{ movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
movq(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
} }
void get_constant_pool(Register reg) void get_constant_pool(Register reg) {
{
get_method(reg); get_method(reg);
movq(reg, Address(reg, methodOopDesc::constants_offset())); movptr(reg, Address(reg, methodOopDesc::constants_offset()));
} }
void get_constant_pool_cache(Register reg) void get_constant_pool_cache(Register reg) {
{
get_constant_pool(reg); get_constant_pool(reg);
movq(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); movptr(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes()));
} }
void get_cpool_and_tags(Register cpool, Register tags) void get_cpool_and_tags(Register cpool, Register tags) {
{
get_constant_pool(cpool); get_constant_pool(cpool);
movq(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes())); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
} }
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
@ -98,6 +99,7 @@ class InterpreterMacroAssembler
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset); int bcp_offset);
void pop_ptr(Register r = rax); void pop_ptr(Register r = rax);
void pop_i(Register r = rax); void pop_i(Register r = rax);
void pop_l(Register r = rax); void pop_l(Register r = rax);
@ -109,15 +111,23 @@ class InterpreterMacroAssembler
void push_f(XMMRegister r = xmm0); void push_f(XMMRegister r = xmm0);
void push_d(XMMRegister r = xmm0); void push_d(XMMRegister r = xmm0);
void pop(Register r ) { ((MacroAssembler*)this)->pop(r); }
void push(Register r ) { ((MacroAssembler*)this)->push(r); }
void push(int32_t imm ) { ((MacroAssembler*)this)->push(imm); }
void pop(TosState state); // transition vtos -> state void pop(TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos void push(TosState state); // transition state -> vtos
// Tagged stack support, pop and push both tag and value. // Tagged stack support, pop and push both tag and value.
void pop_ptr(Register r, Register tag); void pop_ptr(Register r, Register tag);
void push_ptr(Register r, Register tag); void push_ptr(Register r, Register tag);
#endif // CC_INTERP
DEBUG_ONLY(void verify_stack_tag(frame::Tag t);) DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
#ifndef CC_INTERP
// Tagged stack helpers for swap and dup // Tagged stack helpers for swap and dup
void load_ptr_and_tag(int n, Register val, Register tag); void load_ptr_and_tag(int n, Register val, Register tag);
void store_ptr_and_tag(int n, Register val, Register tag); void store_ptr_and_tag(int n, Register val, Register tag);
@ -133,12 +143,12 @@ class InterpreterMacroAssembler
void verify_local_tag(frame::Tag tag, Register idx); void verify_local_tag(frame::Tag tag, Register idx);
#endif // ASSERT #endif // ASSERT
void empty_expression_stack() void empty_expression_stack()
{ {
movq(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
wordSize));
// NULL last_sp until next java call // NULL last_sp until next java call
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
} }
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
@ -185,11 +195,14 @@ class InterpreterMacroAssembler
bool throw_monitor_exception = true, bool throw_monitor_exception = true,
bool install_monitor_exception = true, bool install_monitor_exception = true,
bool notify_jvmdi = true); bool notify_jvmdi = true);
#endif // CC_INTERP
// Object locking // Object locking
void lock_object (Register lock_reg); void lock_object (Register lock_reg);
void unlock_object(Register lock_reg); void unlock_object(Register lock_reg);
#ifndef CC_INTERP
// Interpreter profiling operations // Interpreter profiling operations
void set_method_data_pointer_for_bcp(); void set_method_data_pointer_for_bcp();
void test_method_data_pointer(Register mdp, Label& zero_continue); void test_method_data_pointer(Register mdp, Label& zero_continue);
@ -237,6 +250,8 @@ class InterpreterMacroAssembler
// only if +VerifyFPU && (state == ftos || state == dtos) // only if +VerifyFPU && (state == ftos || state == dtos)
void verify_FPU(int stack_depth, TosState state = ftos); void verify_FPU(int stack_depth, TosState state = ftos);
#endif // !CC_INTERP
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode; typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
// support for jvmti/dtrace // support for jvmti/dtrace

View File

@ -50,13 +50,13 @@ void InterpreterRuntime::SignatureHandlerGenerator::move(int from_offset, int to
void InterpreterRuntime::SignatureHandlerGenerator::box(int from_offset, int to_offset) { void InterpreterRuntime::SignatureHandlerGenerator::box(int from_offset, int to_offset) {
__ leal(temp(), Address(from(), Interpreter::local_offset_in_bytes(from_offset))); __ lea(temp(), Address(from(), Interpreter::local_offset_in_bytes(from_offset)));
__ cmpl(Address(from(), Interpreter::local_offset_in_bytes(from_offset)), 0); // do not use temp() to avoid AGI __ cmpptr(Address(from(), Interpreter::local_offset_in_bytes(from_offset)), (int32_t)NULL_WORD); // do not use temp() to avoid AGI
Label L; Label L;
__ jcc(Assembler::notZero, L); __ jcc(Assembler::notZero, L);
__ movl(temp(), 0); __ movptr(temp(), ((int32_t)NULL_WORD));
__ bind(L); __ bind(L);
__ movl(Address(to(), to_offset * wordSize), temp()); __ movptr(Address(to(), to_offset * wordSize), temp());
} }

View File

@ -93,49 +93,49 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
#ifdef _WIN64 #ifdef _WIN64
switch (_num_args) { switch (_num_args) {
case 0: case 0:
__ movq(c_rarg1, src); __ movptr(c_rarg1, src);
_num_args++; _num_args++;
break; break;
case 1: case 1:
__ movq(c_rarg2, src); __ movptr(c_rarg2, src);
_num_args++; _num_args++;
break; break;
case 2: case 2:
__ movq(c_rarg3, src); __ movptr(c_rarg3, src);
_num_args++; _num_args++;
break; break;
case 3: case 3:
default: default:
__ movq(rax, src); __ movptr(rax, src);
__ movq(Address(to(), _stack_offset), rax); __ movptr(Address(to(), _stack_offset), rax);
_stack_offset += wordSize; _stack_offset += wordSize;
break; break;
} }
#else #else
switch (_num_int_args) { switch (_num_int_args) {
case 0: case 0:
__ movq(c_rarg1, src); __ movptr(c_rarg1, src);
_num_int_args++; _num_int_args++;
break; break;
case 1: case 1:
__ movq(c_rarg2, src); __ movptr(c_rarg2, src);
_num_int_args++; _num_int_args++;
break; break;
case 2: case 2:
__ movq(c_rarg3, src); __ movptr(c_rarg3, src);
_num_int_args++; _num_int_args++;
break; break;
case 3: case 3:
__ movq(c_rarg4, src); __ movptr(c_rarg4, src);
_num_int_args++; _num_int_args++;
break; break;
case 4: case 4:
__ movq(c_rarg5, src); __ movptr(c_rarg5, src);
_num_int_args++; _num_int_args++;
break; break;
default: default:
__ movq(rax, src); __ movptr(rax, src);
__ movq(Address(to(), _stack_offset), rax); __ movptr(Address(to(), _stack_offset), rax);
_stack_offset += wordSize; _stack_offset += wordSize;
break; break;
} }
@ -171,16 +171,16 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
if (_num_args < Argument::n_float_register_parameters_c-1) { if (_num_args < Argument::n_float_register_parameters_c-1) {
__ movdbl(as_XMMRegister(++_num_args), src); __ movdbl(as_XMMRegister(++_num_args), src);
} else { } else {
__ movq(rax, src); __ movptr(rax, src);
__ movq(Address(to(), _stack_offset), rax); __ movptr(Address(to(), _stack_offset), rax);
_stack_offset += wordSize; _stack_offset += wordSize;
} }
#else #else
if (_num_fp_args < Argument::n_float_register_parameters_c) { if (_num_fp_args < Argument::n_float_register_parameters_c) {
__ movdbl(as_XMMRegister(_num_fp_args++), src); __ movdbl(as_XMMRegister(_num_fp_args++), src);
} else { } else {
__ movq(rax, src); __ movptr(rax, src);
__ movq(Address(to(), _stack_offset), rax); __ movptr(Address(to(), _stack_offset), rax);
_stack_offset += wordSize; _stack_offset += wordSize;
} }
#endif #endif
@ -193,29 +193,29 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
switch (_num_args) { switch (_num_args) {
case 0: case 0:
assert(offset() == 0, "argument register 1 can only be (non-null) receiver"); assert(offset() == 0, "argument register 1 can only be (non-null) receiver");
__ leaq(c_rarg1, src); __ lea(c_rarg1, src);
_num_args++; _num_args++;
break; break;
case 1: case 1:
__ leaq(rax, src); __ lea(rax, src);
__ xorl(c_rarg2, c_rarg2); __ xorl(c_rarg2, c_rarg2);
__ cmpq(src, 0); __ cmpptr(src, 0);
__ cmovq(Assembler::notEqual, c_rarg2, rax); __ cmov(Assembler::notEqual, c_rarg2, rax);
_num_args++; _num_args++;
break; break;
case 2: case 2:
__ leaq(rax, src); __ lea(rax, src);
__ xorl(c_rarg3, c_rarg3); __ xorl(c_rarg3, c_rarg3);
__ cmpq(src, 0); __ cmpptr(src, 0);
__ cmovq(Assembler::notEqual, c_rarg3, rax); __ cmov(Assembler::notEqual, c_rarg3, rax);
_num_args++; _num_args++;
break; break;
default: default:
__ leaq(rax, src); __ lea(rax, src);
__ xorl(temp(), temp()); __ xorl(temp(), temp());
__ cmpq(src, 0); __ cmpptr(src, 0);
__ cmovq(Assembler::notEqual, temp(), rax); __ cmov(Assembler::notEqual, temp(), rax);
__ movq(Address(to(), _stack_offset), temp()); __ movptr(Address(to(), _stack_offset), temp());
_stack_offset += wordSize; _stack_offset += wordSize;
break; break;
} }
@ -223,43 +223,43 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
switch (_num_int_args) { switch (_num_int_args) {
case 0: case 0:
assert(offset() == 0, "argument register 1 can only be (non-null) receiver"); assert(offset() == 0, "argument register 1 can only be (non-null) receiver");
__ leaq(c_rarg1, src); __ lea(c_rarg1, src);
_num_int_args++; _num_int_args++;
break; break;
case 1: case 1:
__ leaq(rax, src); __ lea(rax, src);
__ xorl(c_rarg2, c_rarg2); __ xorl(c_rarg2, c_rarg2);
__ cmpq(src, 0); __ cmpptr(src, 0);
__ cmovq(Assembler::notEqual, c_rarg2, rax); __ cmov(Assembler::notEqual, c_rarg2, rax);
_num_int_args++; _num_int_args++;
break; break;
case 2: case 2:
__ leaq(rax, src); __ lea(rax, src);
__ xorl(c_rarg3, c_rarg3); __ xorl(c_rarg3, c_rarg3);
__ cmpq(src, 0); __ cmpptr(src, 0);
__ cmovq(Assembler::notEqual, c_rarg3, rax); __ cmov(Assembler::notEqual, c_rarg3, rax);
_num_int_args++; _num_int_args++;
break; break;
case 3: case 3:
__ leaq(rax, src); __ lea(rax, src);
__ xorl(c_rarg4, c_rarg4); __ xorl(c_rarg4, c_rarg4);
__ cmpq(src, 0); __ cmpptr(src, 0);
__ cmovq(Assembler::notEqual, c_rarg4, rax); __ cmov(Assembler::notEqual, c_rarg4, rax);
_num_int_args++; _num_int_args++;
break; break;
case 4: case 4:
__ leaq(rax, src); __ lea(rax, src);
__ xorl(c_rarg5, c_rarg5); __ xorl(c_rarg5, c_rarg5);
__ cmpq(src, 0); __ cmpptr(src, 0);
__ cmovq(Assembler::notEqual, c_rarg5, rax); __ cmov(Assembler::notEqual, c_rarg5, rax);
_num_int_args++; _num_int_args++;
break; break;
default: default:
__ leaq(rax, src); __ lea(rax, src);
__ xorl(temp(), temp()); __ xorl(temp(), temp());
__ cmpq(src, 0); __ cmpptr(src, 0);
__ cmovq(Assembler::notEqual, temp(), rax); __ cmov(Assembler::notEqual, temp(), rax);
__ movq(Address(to(), _stack_offset), temp()); __ movptr(Address(to(), _stack_offset), temp());
_stack_offset += wordSize; _stack_offset += wordSize;
break; break;
} }

View File

@ -38,7 +38,7 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// rcx: temporary // rcx: temporary
// rdi: pointer to locals // rdi: pointer to locals
// rsp: end of copied parameters area // rsp: end of copied parameters area
__ movl(rcx, rsp); __ mov(rcx, rsp);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rbx, rdi, rcx); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rbx, rdi, rcx);
__ ret(0); __ ret(0);
return entry; return entry;
@ -75,8 +75,8 @@ address InterpreterGenerator::generate_empty_entry(void) {
// Code: _return // Code: _return
// _return // _return
// return w/o popping parameters // return w/o popping parameters
__ popl(rax); __ pop(rax);
__ movl(rsp, rsi); __ mov(rsp, rsi);
__ jmp(rax); __ jmp(rax);
__ bind(slow_path); __ bind(slow_path);
@ -135,7 +135,7 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
__ pushl(Address(rsp, 3*wordSize)); // push hi (and note rsp -= wordSize) __ pushl(Address(rsp, 3*wordSize)); // push hi (and note rsp -= wordSize)
__ pushl(Address(rsp, 2*wordSize)); // push lo __ pushl(Address(rsp, 2*wordSize)); // push lo
__ fld_d(Address(rsp, 0)); // get double in ST0 __ fld_d(Address(rsp, 0)); // get double in ST0
__ addl(rsp, 2*wordSize); __ addptr(rsp, 2*wordSize);
} else { } else {
__ fld_d(Address(rsp, 1*wordSize)); __ fld_d(Address(rsp, 1*wordSize));
} }
@ -173,15 +173,15 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
// return double result in xmm0 for interpreter and compilers. // return double result in xmm0 for interpreter and compilers.
if (UseSSE >= 2) { if (UseSSE >= 2) {
__ subl(rsp, 2*wordSize); __ subptr(rsp, 2*wordSize);
__ fstp_d(Address(rsp, 0)); __ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0)); __ movdbl(xmm0, Address(rsp, 0));
__ addl(rsp, 2*wordSize); __ addptr(rsp, 2*wordSize);
} }
// done, result in FPU ST(0) or XMM0 // done, result in FPU ST(0) or XMM0
__ popl(rdi); // get return address __ pop(rdi); // get return address
__ movl(rsp, rsi); // set sp to sender sp __ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi); __ jmp(rdi);
return entry_point; return entry_point;
@ -202,10 +202,10 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// abstract method entry // abstract method entry
// remove return address. Not really needed, since exception handling throws away expression stack // remove return address. Not really needed, since exception handling throws away expression stack
__ popl(rbx); __ pop(rbx);
// adjust stack to what a normal return would do // adjust stack to what a normal return would do
__ movl(rsp, rsi); __ mov(rsp, rsi);
// throw exception // throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here. // the call_VM checks for exception, so we should never return here.

View File

@ -35,9 +35,9 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// rbx: method // rbx: method
// r14: pointer to locals // r14: pointer to locals
// c_rarg3: first stack arg - wordSize // c_rarg3: first stack arg - wordSize
__ movq(c_rarg3, rsp); __ mov(c_rarg3, rsp);
// adjust rsp // adjust rsp
__ subq(rsp, 4 * wordSize); __ subptr(rsp, 4 * wordSize);
__ call_VM(noreg, __ call_VM(noreg,
CAST_FROM_FN_PTR(address, CAST_FROM_FN_PTR(address,
InterpreterRuntime::slow_signature_handler), InterpreterRuntime::slow_signature_handler),
@ -70,13 +70,13 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
case 0: case 0:
__ movl(rscratch1, Address(rbx, methodOopDesc::access_flags_offset())); __ movl(rscratch1, Address(rbx, methodOopDesc::access_flags_offset()));
__ testl(rscratch1, JVM_ACC_STATIC); __ testl(rscratch1, JVM_ACC_STATIC);
__ cmovq(Assembler::zero, c_rarg1, Address(rsp, 0)); __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
break; break;
case 1: case 1:
__ movq(c_rarg2, Address(rsp, wordSize)); __ movptr(c_rarg2, Address(rsp, wordSize));
break; break;
case 2: case 2:
__ movq(c_rarg3, Address(rsp, 2 * wordSize)); __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
break; break;
default: default:
break; break;
@ -101,7 +101,7 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// restore rsp // restore rsp
__ addq(rsp, 4 * wordSize); __ addptr(rsp, 4 * wordSize);
__ ret(0); __ ret(0);
@ -114,9 +114,9 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// rbx: method // rbx: method
// r14: pointer to locals // r14: pointer to locals
// c_rarg3: first stack arg - wordSize // c_rarg3: first stack arg - wordSize
__ movq(c_rarg3, rsp); __ mov(c_rarg3, rsp);
// adjust rsp // adjust rsp
__ subq(rsp, 14 * wordSize); __ subptr(rsp, 14 * wordSize);
__ call_VM(noreg, __ call_VM(noreg,
CAST_FROM_FN_PTR(address, CAST_FROM_FN_PTR(address,
InterpreterRuntime::slow_signature_handler), InterpreterRuntime::slow_signature_handler),
@ -155,15 +155,15 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// Now handle integrals. Only do c_rarg1 if not static. // Now handle integrals. Only do c_rarg1 if not static.
__ movl(c_rarg3, Address(rbx, methodOopDesc::access_flags_offset())); __ movl(c_rarg3, Address(rbx, methodOopDesc::access_flags_offset()));
__ testl(c_rarg3, JVM_ACC_STATIC); __ testl(c_rarg3, JVM_ACC_STATIC);
__ cmovq(Assembler::zero, c_rarg1, Address(rsp, 0)); __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
__ movq(c_rarg2, Address(rsp, wordSize)); __ movptr(c_rarg2, Address(rsp, wordSize));
__ movq(c_rarg3, Address(rsp, 2 * wordSize)); __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
__ movq(c_rarg4, Address(rsp, 3 * wordSize)); __ movptr(c_rarg4, Address(rsp, 3 * wordSize));
__ movq(c_rarg5, Address(rsp, 4 * wordSize)); __ movptr(c_rarg5, Address(rsp, 4 * wordSize));
// restore rsp // restore rsp
__ addq(rsp, 14 * wordSize); __ addptr(rsp, 14 * wordSize);
__ ret(0); __ ret(0);
@ -176,15 +176,14 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// Various method entries // Various method entries
// //
address InterpreterGenerator::generate_math_entry( address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
AbstractInterpreter::MethodKind kind) {
// rbx: methodOop // rbx,: methodOop
// rcx: scratrch
// r13: sender sp
if (!InlineIntrinsics) return NULL; // Generate a vanilla entry if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
assert(kind == Interpreter::java_lang_math_sqrt,
"Other intrinsics are not special");
address entry_point = __ pc(); address entry_point = __ pc();
// These don't need a safepoint check because they aren't virtually // These don't need a safepoint check because they aren't virtually
@ -197,6 +196,11 @@ address InterpreterGenerator::generate_math_entry(
// in order to avoid monotonicity bugs when switching // in order to avoid monotonicity bugs when switching
// from interpreter to compiler in the middle of some // from interpreter to compiler in the middle of some
// computation) // computation)
//
// stack: [ ret adr ] <-- rsp
// [ lo(arg) ]
// [ hi(arg) ]
//
// Note: For JDK 1.2 StrictMath doesn't exist and Math.sin/cos/sqrt are // Note: For JDK 1.2 StrictMath doesn't exist and Math.sin/cos/sqrt are
// native methods. Interpreter::method_kind(...) does a check for // native methods. Interpreter::method_kind(...) does a check for
@ -218,10 +222,46 @@ address InterpreterGenerator::generate_math_entry(
// Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are // Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are
// java methods. Interpreter::method_kind(...) will select // java methods. Interpreter::method_kind(...) will select
// this entry point for the corresponding methods in JDK 1.3. // this entry point for the corresponding methods in JDK 1.3.
__ sqrtsd(xmm0, Address(rsp, wordSize)); // get argument
__ popq(rax); if (kind == Interpreter::java_lang_math_sqrt) {
__ movq(rsp, r13); __ sqrtsd(xmm0, Address(rsp, wordSize));
} else {
__ fld_d(Address(rsp, wordSize));
switch (kind) {
case Interpreter::java_lang_math_sin :
__ trigfunc('s');
break;
case Interpreter::java_lang_math_cos :
__ trigfunc('c');
break;
case Interpreter::java_lang_math_tan :
__ trigfunc('t');
break;
case Interpreter::java_lang_math_abs:
__ fabs();
break;
case Interpreter::java_lang_math_log:
__ flog();
break;
case Interpreter::java_lang_math_log10:
__ flog10();
break;
default :
ShouldNotReachHere();
}
// return double result in xmm0 for interpreter and compilers.
__ subptr(rsp, 2*wordSize);
// Round to 64bit precision
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
__ addptr(rsp, 2*wordSize);
}
__ pop(rax);
__ mov(rsp, r13);
__ jmp(rax); __ jmp(rax);
return entry_point; return entry_point;
@ -239,10 +279,10 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// abstract method entry // abstract method entry
// remove return address. Not really needed, since exception // remove return address. Not really needed, since exception
// handling throws away expression stack // handling throws away expression stack
__ popq(rbx); __ pop(rbx);
// adjust stack to what a normal return would do // adjust stack to what a normal return would do
__ movq(rsp, r13); __ mov(rsp, r13);
// throw exception // throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, __ call_VM(noreg, CAST_FROM_FN_PTR(address,
@ -276,8 +316,8 @@ address InterpreterGenerator::generate_empty_entry(void) {
// Code: _return // Code: _return
// _return // _return
// return w/o popping parameters // return w/o popping parameters
__ popq(rax); __ pop(rax);
__ movq(rsp, r13); __ mov(rsp, r13);
__ jmp(rax); __ jmp(rax);
__ bind(slow_path); __ bind(slow_path);
@ -286,148 +326,6 @@ address InterpreterGenerator::generate_empty_entry(void) {
} }
// Call an accessor method (assuming it is resolved, otherwise drop
// into vanilla (slow path) entry
address InterpreterGenerator::generate_accessor_entry(void) {
// rbx: methodOop
// r13: senderSP must preserver for slow path, set SP to it on fast path
address entry_point = __ pc();
Label xreturn_path;
// do fastpath for resolved accessor methods
if (UseFastAccessorMethods) {
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
// thereof; parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// rbx: method
__ movq(rax, Address(rsp, wordSize));
// check if local 0 != NULL and read field
__ testq(rax, rax);
__ jcc(Assembler::zero, slow_path);
__ movq(rdi, Address(rbx, methodOopDesc::constants_offset()));
// read first instruction word and extract bytecode @ 1 and index @ 2
__ movq(rdx, Address(rbx, methodOopDesc::const_offset()));
__ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2 * BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
__ movq(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
// rax: local 0
// rbx: method
// rdx: constant pool cache index
// rdi: constant pool cache
// check if getfield has been resolved and read constant pool cache entry
// check the validity of the cache entry by testing whether _indices field
// contains Bytecode::_getfield in b1 byte.
assert(in_words(ConstantPoolCacheEntry::size()) == 4,
"adjust shift below");
__ movl(rcx,
Address(rdi,
rdx,
Address::times_8,
constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2 * BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
__ jcc(Assembler::notEqual, slow_path);
// Note: constant pool entry is not valid before bytecode is resolved
__ movq(rcx,
Address(rdi,
rdx,
Address::times_8,
constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset()));
// edx: flags
__ movl(rdx,
Address(rdi,
rdx,
Address::times_8,
constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset()));
Label notObj, notInt, notByte, notShort;
const Address field_address(rax, rcx, Address::times_1);
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Use the type from the constant pool cache
__ shrl(rdx, ConstantPoolCacheEntry::tosBits);
// Make sure we don't need to mask edx for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
__ cmpl(rdx, atos);
__ jcc(Assembler::notEqual, notObj);
// atos
__ load_heap_oop(rax, field_address);
__ jmp(xreturn_path);
__ bind(notObj);
__ cmpl(rdx, itos);
__ jcc(Assembler::notEqual, notInt);
// itos
__ movl(rax, field_address);
__ jmp(xreturn_path);
__ bind(notInt);
__ cmpl(rdx, btos);
__ jcc(Assembler::notEqual, notByte);
// btos
__ load_signed_byte(rax, field_address);
__ jmp(xreturn_path);
__ bind(notByte);
__ cmpl(rdx, stos);
__ jcc(Assembler::notEqual, notShort);
// stos
__ load_signed_word(rax, field_address);
__ jmp(xreturn_path);
__ bind(notShort);
#ifdef ASSERT
Label okay;
__ cmpl(rdx, ctos);
__ jcc(Assembler::equal, okay);
__ stop("what type is this?");
__ bind(okay);
#endif
// ctos
__ load_unsigned_word(rax, field_address);
__ bind(xreturn_path);
// _ireturn/_areturn
__ popq(rdi);
__ movq(rsp, r13);
__ jmp(rdi);
__ ret(0);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
(void) generate_normal_entry(false);
} else {
(void) generate_normal_entry(false);
}
return entry_point;
}
// This method tells the deoptimizer how big an interpreted frame must be: // This method tells the deoptimizer how big an interpreted frame must be:
int AbstractInterpreter::size_activation(methodOop method, int AbstractInterpreter::size_activation(methodOop method,
int tempcount, int tempcount,

View File

@ -72,25 +72,25 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
__ testb (rcx, 1); __ testb (rcx, 1);
__ jcc (Assembler::notZero, slow); __ jcc (Assembler::notZero, slow);
if (os::is_MP()) { if (os::is_MP()) {
__ movl (rax, rcx); __ mov(rax, rcx);
__ andl (rax, 1); // rax, must end up 0 __ andptr(rax, 1); // rax, must end up 0
__ movl (rdx, Address(rsp, rax, Address::times_1, 2*wordSize)); __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
// obj, notice rax, is 0. // obj, notice rax, is 0.
// rdx is data dependent on rcx. // rdx is data dependent on rcx.
} else { } else {
__ movl (rdx, Address(rsp, 2*wordSize)); // obj __ movptr (rdx, Address(rsp, 2*wordSize)); // obj
} }
__ movl (rax, Address(rsp, 3*wordSize)); // jfieldID __ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID
__ movl (rdx, Address(rdx, 0)); // *obj __ movptr(rdx, Address(rdx, 0)); // *obj
__ shrl (rax, 2); // offset __ shrptr (rax, 2); // offset
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc(); speculative_load_pclist[count] = __ pc();
switch (type) { switch (type) {
case T_BOOLEAN: __ movzxb (rax, Address(rdx, rax, Address::times_1)); break; case T_BOOLEAN: __ movzbl (rax, Address(rdx, rax, Address::times_1)); break;
case T_BYTE: __ movsxb (rax, Address(rdx, rax, Address::times_1)); break; case T_BYTE: __ movsbl (rax, Address(rdx, rax, Address::times_1)); break;
case T_CHAR: __ movzxw (rax, Address(rdx, rax, Address::times_1)); break; case T_CHAR: __ movzwl (rax, Address(rdx, rax, Address::times_1)); break;
case T_SHORT: __ movsxw (rax, Address(rdx, rax, Address::times_1)); break; case T_SHORT: __ movswl (rax, Address(rdx, rax, Address::times_1)); break;
case T_INT: __ movl (rax, Address(rdx, rax, Address::times_1)); break; case T_INT: __ movl (rax, Address(rdx, rax, Address::times_1)); break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
@ -98,8 +98,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
Address ca1; Address ca1;
if (os::is_MP()) { if (os::is_MP()) {
__ lea(rdx, counter); __ lea(rdx, counter);
__ xorl(rdx, rax); __ xorptr(rdx, rax);
__ xorl(rdx, rax); __ xorptr(rdx, rax);
__ cmp32(rcx, Address(rdx, 0)); __ cmp32(rcx, Address(rdx, 0));
// ca1 is the same as ca because // ca1 is the same as ca because
// rax, ^ counter_addr ^ rax, = address // rax, ^ counter_addr ^ rax, = address
@ -184,35 +184,37 @@ address JNI_FastGetField::generate_fast_get_long_field() {
ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr()); ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr());
__ pushl (rsi); __ push (rsi);
__ mov32 (rcx, counter); __ mov32 (rcx, counter);
__ testb (rcx, 1); __ testb (rcx, 1);
__ jcc (Assembler::notZero, slow); __ jcc (Assembler::notZero, slow);
if (os::is_MP()) { if (os::is_MP()) {
__ movl (rax, rcx); __ mov(rax, rcx);
__ andl (rax, 1); // rax, must end up 0 __ andptr(rax, 1); // rax, must end up 0
__ movl (rdx, Address(rsp, rax, Address::times_1, 3*wordSize)); __ movptr(rdx, Address(rsp, rax, Address::times_1, 3*wordSize));
// obj, notice rax, is 0. // obj, notice rax, is 0.
// rdx is data dependent on rcx. // rdx is data dependent on rcx.
} else { } else {
__ movl (rdx, Address(rsp, 3*wordSize)); // obj __ movptr(rdx, Address(rsp, 3*wordSize)); // obj
} }
__ movl (rsi, Address(rsp, 4*wordSize)); // jfieldID __ movptr(rsi, Address(rsp, 4*wordSize)); // jfieldID
__ movl (rdx, Address(rdx, 0)); // *obj __ movptr(rdx, Address(rdx, 0)); // *obj
__ shrl (rsi, 2); // offset __ shrptr(rsi, 2); // offset
assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small"); assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small");
speculative_load_pclist[count++] = __ pc(); speculative_load_pclist[count++] = __ pc();
__ movl (rax, Address(rdx, rsi, Address::times_1)); __ movptr(rax, Address(rdx, rsi, Address::times_1));
#ifndef _LP64
speculative_load_pclist[count] = __ pc(); speculative_load_pclist[count] = __ pc();
__ movl (rdx, Address(rdx, rsi, Address::times_1, 4)); __ movl(rdx, Address(rdx, rsi, Address::times_1, 4));
#endif // _LP64
if (os::is_MP()) { if (os::is_MP()) {
__ lea (rsi, counter); __ lea(rsi, counter);
__ xorl (rsi, rdx); __ xorptr(rsi, rdx);
__ xorl (rsi, rax); __ xorptr(rsi, rax);
__ xorl (rsi, rdx); __ xorptr(rsi, rdx);
__ xorl (rsi, rax); __ xorptr(rsi, rax);
__ cmp32(rcx, Address(rsi, 0)); __ cmp32(rcx, Address(rsi, 0));
// ca1 is the same as ca because // ca1 is the same as ca because
// rax, ^ rdx ^ counter_addr ^ rax, ^ rdx = address // rax, ^ rdx ^ counter_addr ^ rax, ^ rdx = address
@ -222,7 +224,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
} }
__ jcc (Assembler::notEqual, slow); __ jcc (Assembler::notEqual, slow);
__ popl (rsi); __ pop (rsi);
#ifndef _WINDOWS #ifndef _WINDOWS
__ ret (0); __ ret (0);
@ -234,7 +236,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
slowcase_entry_pclist[count-1] = __ pc(); slowcase_entry_pclist[count-1] = __ pc();
slowcase_entry_pclist[count++] = __ pc(); slowcase_entry_pclist[count++] = __ pc();
__ bind (slow); __ bind (slow);
__ popl (rsi); __ pop (rsi);
address slow_case_addr = jni_GetLongField_addr();; address slow_case_addr = jni_GetLongField_addr();;
// tail call // tail call
__ jump (ExternalAddress(slow_case_addr)); __ jump (ExternalAddress(slow_case_addr));
@ -276,23 +278,28 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
__ testb (rcx, 1); __ testb (rcx, 1);
__ jcc (Assembler::notZero, slow); __ jcc (Assembler::notZero, slow);
if (os::is_MP()) { if (os::is_MP()) {
__ movl (rax, rcx); __ mov(rax, rcx);
__ andl (rax, 1); // rax, must end up 0 __ andptr(rax, 1); // rax, must end up 0
__ movl (rdx, Address(rsp, rax, Address::times_1, 2*wordSize)); __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
// obj, notice rax, is 0. // obj, notice rax, is 0.
// rdx is data dependent on rcx. // rdx is data dependent on rcx.
} else { } else {
__ movl (rdx, Address(rsp, 2*wordSize)); // obj __ movptr(rdx, Address(rsp, 2*wordSize)); // obj
} }
__ movl (rax, Address(rsp, 3*wordSize)); // jfieldID __ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID
__ movl (rdx, Address(rdx, 0)); // *obj __ movptr(rdx, Address(rdx, 0)); // *obj
__ shrl (rax, 2); // offset __ shrptr(rax, 2); // offset
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc(); speculative_load_pclist[count] = __ pc();
switch (type) { switch (type) {
#ifndef _LP64
case T_FLOAT: __ fld_s (Address(rdx, rax, Address::times_1)); break; case T_FLOAT: __ fld_s (Address(rdx, rax, Address::times_1)); break;
case T_DOUBLE: __ fld_d (Address(rdx, rax, Address::times_1)); break; case T_DOUBLE: __ fld_d (Address(rdx, rax, Address::times_1)); break;
#else
case T_FLOAT: __ movflt (xmm0, Address(robj, roffset, Address::times_1)); break;
case T_DOUBLE: __ movdbl (xmm0, Address(robj, roffset, Address::times_1)); break;
#endif // _LP64
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
@ -301,8 +308,9 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
__ fst_s (Address(rsp, -4)); __ fst_s (Address(rsp, -4));
__ lea(rdx, counter); __ lea(rdx, counter);
__ movl (rax, Address(rsp, -4)); __ movl (rax, Address(rsp, -4));
__ xorl(rdx, rax); // garbage hi-order bits on 64bit are harmless.
__ xorl(rdx, rax); __ xorptr(rdx, rax);
__ xorptr(rdx, rax);
__ cmp32(rcx, Address(rdx, 0)); __ cmp32(rcx, Address(rdx, 0));
// rax, ^ counter_addr ^ rax, = address // rax, ^ counter_addr ^ rax, = address
// ca1 is data dependent on the field // ca1 is data dependent on the field

View File

@ -67,18 +67,18 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr()); ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr());
__ mov32 (rcounter, counter); __ mov32 (rcounter, counter);
__ movq (robj, c_rarg1); __ mov (robj, c_rarg1);
__ testb (rcounter, 1); __ testb (rcounter, 1);
__ jcc (Assembler::notZero, slow); __ jcc (Assembler::notZero, slow);
if (os::is_MP()) { if (os::is_MP()) {
__ xorq (robj, rcounter); __ xorptr(robj, rcounter);
__ xorq (robj, rcounter); // obj, since __ xorptr(robj, rcounter); // obj, since
// robj ^ rcounter ^ rcounter == robj // robj ^ rcounter ^ rcounter == robj
// robj is data dependent on rcounter. // robj is data dependent on rcounter.
} }
__ movq (robj, Address(robj, 0)); // *obj __ movptr(robj, Address(robj, 0)); // *obj
__ movq (roffset, c_rarg2); __ mov (roffset, c_rarg2);
__ shrq (roffset, 2); // offset __ shrptr(roffset, 2); // offset
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc(); speculative_load_pclist[count] = __ pc();
@ -95,8 +95,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
if (os::is_MP()) { if (os::is_MP()) {
__ lea(rcounter_addr, counter); __ lea(rcounter_addr, counter);
// ca is data dependent on rax. // ca is data dependent on rax.
__ xorq (rcounter_addr, rax); __ xorptr(rcounter_addr, rax);
__ xorq (rcounter_addr, rax); __ xorptr(rcounter_addr, rax);
__ cmpl (rcounter, Address(rcounter_addr, 0)); __ cmpl (rcounter, Address(rcounter_addr, 0));
} else { } else {
__ cmp32 (rcounter, counter); __ cmp32 (rcounter, counter);
@ -165,18 +165,18 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr()); ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr());
__ mov32 (rcounter, counter); __ mov32 (rcounter, counter);
__ movq (robj, c_rarg1); __ mov (robj, c_rarg1);
__ testb (rcounter, 1); __ testb (rcounter, 1);
__ jcc (Assembler::notZero, slow); __ jcc (Assembler::notZero, slow);
if (os::is_MP()) { if (os::is_MP()) {
__ xorq (robj, rcounter); __ xorptr(robj, rcounter);
__ xorq (robj, rcounter); // obj, since __ xorptr(robj, rcounter); // obj, since
// robj ^ rcounter ^ rcounter == robj // robj ^ rcounter ^ rcounter == robj
// robj is data dependent on rcounter. // robj is data dependent on rcounter.
} }
__ movq (robj, Address(robj, 0)); // *obj __ movptr(robj, Address(robj, 0)); // *obj
__ movq (roffset, c_rarg2); __ mov (roffset, c_rarg2);
__ shrq (roffset, 2); // offset __ shrptr(roffset, 2); // offset
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc(); speculative_load_pclist[count] = __ pc();
@ -190,8 +190,8 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
__ lea(rcounter_addr, counter); __ lea(rcounter_addr, counter);
__ movdq (rax, xmm0); __ movdq (rax, xmm0);
// counter address is data dependent on xmm0. // counter address is data dependent on xmm0.
__ xorq (rcounter_addr, rax); __ xorptr(rcounter_addr, rax);
__ xorq (rcounter_addr, rax); __ xorptr(rcounter_addr, rax);
__ cmpl (rcounter, Address(rcounter_addr, 0)); __ cmpl (rcounter, Address(rcounter_addr, 0));
} else { } else {
__ cmp32 (rcounter, counter); __ cmp32 (rcounter, counter);

View File

@ -223,49 +223,150 @@ void NativeMovConstReg::print() {
//------------------------------------------------------------------- //-------------------------------------------------------------------
#ifndef AMD64 int NativeMovRegMem::instruction_start() const {
int off = 0;
u_char instr_0 = ubyte_at(off);
void NativeMovRegMem::copy_instruction_to(address new_instruction_address) { // First check to see if we have a (prefixed or not) xor
int inst_size = instruction_size; if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
instr_0 <= instruction_prefix_wide_hi) { // 0x4f
// See if there's an instruction size prefix override. off++;
if ( *(address(this)) == instruction_operandsize_prefix && instr_0 = ubyte_at(off);
*(address(this)+1) != instruction_code_xmm_code ) { // Not SSE instr
inst_size += 1;
} }
if ( *(address(this)) == instruction_extended_prefix ) inst_size += 1;
for (int i = 0; i < instruction_size; i++) { if (instr_0 == instruction_code_xor) {
*(new_instruction_address + i) = *(address(this) + i); off += 2;
instr_0 = ubyte_at(off);
} }
// Now look for the real instruction and the many prefix/size specifiers.
if (instr_0 == instruction_operandsize_prefix ) { // 0x66
off++; // Not SSE instructions
instr_0 = ubyte_at(off);
}
if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3
instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2
off++;
instr_0 = ubyte_at(off);
}
if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
instr_0 <= instruction_prefix_wide_hi) { // 0x4f
off++;
instr_0 = ubyte_at(off);
}
if (instr_0 == instruction_extended_prefix ) { // 0x0f
off++;
}
return off;
}
address NativeMovRegMem::instruction_address() const {
return addr_at(instruction_start());
}
address NativeMovRegMem::next_instruction_address() const {
address ret = instruction_address() + instruction_size;
u_char instr_0 = *(u_char*) instruction_address();
switch (instr_0) {
case instruction_operandsize_prefix:
fatal("should have skipped instruction_operandsize_prefix");
break;
case instruction_extended_prefix:
fatal("should have skipped instruction_extended_prefix");
break;
case instruction_code_mem2reg_movslq: // 0x63
case instruction_code_mem2reg_movzxb: // 0xB6
case instruction_code_mem2reg_movsxb: // 0xBE
case instruction_code_mem2reg_movzxw: // 0xB7
case instruction_code_mem2reg_movsxw: // 0xBF
case instruction_code_reg2mem: // 0x89 (q/l)
case instruction_code_mem2reg: // 0x8B (q/l)
case instruction_code_reg2memb: // 0x88
case instruction_code_mem2regb: // 0x8a
case instruction_code_float_s: // 0xd9 fld_s a
case instruction_code_float_d: // 0xdd fld_d a
case instruction_code_xmm_load: // 0x10
case instruction_code_xmm_store: // 0x11
case instruction_code_xmm_lpd: // 0x12
{
// If there is an SIB then instruction is longer than expected
u_char mod_rm = *(u_char*)(instruction_address() + 1);
if ((mod_rm & 7) == 0x4) {
ret++;
}
}
case instruction_code_xor:
fatal("should have skipped xor lead in");
break;
default:
fatal("not a NativeMovRegMem");
}
return ret;
}
int NativeMovRegMem::offset() const{
int off = data_offset + instruction_start();
u_char mod_rm = *(u_char*)(instruction_address() + 1);
// nnnn(r12|rsp) isn't coded as simple mod/rm since that is
// the encoding to use an SIB byte. Which will have the nnnn
// field off by one byte
if ((mod_rm & 7) == 0x4) {
off++;
}
return int_at(off);
}
void NativeMovRegMem::set_offset(int x) {
int off = data_offset + instruction_start();
u_char mod_rm = *(u_char*)(instruction_address() + 1);
// nnnn(r12|rsp) isn't coded as simple mod/rm since that is
// the encoding to use an SIB byte. Which will have the nnnn
// field off by one byte
if ((mod_rm & 7) == 0x4) {
off++;
}
set_int_at(off, x);
} }
void NativeMovRegMem::verify() { void NativeMovRegMem::verify() {
// make sure code pattern is actually a mov [reg+offset], reg instruction // make sure code pattern is actually a mov [reg+offset], reg instruction
u_char test_byte = *(u_char*)instruction_address(); u_char test_byte = *(u_char*)instruction_address();
if ( ! ( (test_byte == instruction_code_reg2memb) switch (test_byte) {
|| (test_byte == instruction_code_mem2regb) case instruction_code_reg2memb: // 0x88 movb a, r
|| (test_byte == instruction_code_mem2regl) case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit)
|| (test_byte == instruction_code_reg2meml) case instruction_code_mem2regb: // 0x8a movb r, a
|| (test_byte == instruction_code_mem2reg_movzxb ) case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit)
|| (test_byte == instruction_code_mem2reg_movzxw ) break;
|| (test_byte == instruction_code_mem2reg_movsxb )
|| (test_byte == instruction_code_mem2reg_movsxw ) case instruction_code_mem2reg_movslq: // 0x63 movsql r, a
|| (test_byte == instruction_code_float_s) case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb)
|| (test_byte == instruction_code_float_d) case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw)
|| (test_byte == instruction_code_long_volatile) ) ) case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb)
{ case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw)
u_char byte1 = ((u_char*)instruction_address())[1]; break;
u_char byte2 = ((u_char*)instruction_address())[2];
if ((test_byte != instruction_code_xmm_ss_prefix && case instruction_code_float_s: // 0xd9 fld_s a
test_byte != instruction_code_xmm_sd_prefix && case instruction_code_float_d: // 0xdd fld_d a
test_byte != instruction_operandsize_prefix) || case instruction_code_xmm_load: // 0x10 movsd xmm, a
byte1 != instruction_code_xmm_code || case instruction_code_xmm_store: // 0x11 movsd a, xmm
(byte2 != instruction_code_xmm_load && case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a
byte2 != instruction_code_xmm_lpd && break;
byte2 != instruction_code_xmm_store)) {
default:
fatal ("not a mov [reg+offs], reg instruction"); fatal ("not a mov [reg+offs], reg instruction");
}
} }
} }
@ -279,7 +380,14 @@ void NativeMovRegMem::print() {
void NativeLoadAddress::verify() { void NativeLoadAddress::verify() {
// make sure code pattern is actually a mov [reg+offset], reg instruction // make sure code pattern is actually a mov [reg+offset], reg instruction
u_char test_byte = *(u_char*)instruction_address(); u_char test_byte = *(u_char*)instruction_address();
if ( ! (test_byte == instruction_code) ) { #ifdef _LP64
if ( (test_byte == instruction_prefix_wide ||
test_byte == instruction_prefix_wide_extended) ) {
test_byte = *(u_char*)(instruction_address() + 1);
}
#endif // _LP64
if ( ! ((test_byte == lea_instruction_code)
LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
fatal ("not a lea reg, [reg+offs] instruction"); fatal ("not a lea reg, [reg+offs] instruction");
} }
} }
@ -289,8 +397,6 @@ void NativeLoadAddress::print() {
tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset()); tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset());
} }
#endif // !AMD64
//-------------------------------------------------------------------------------- //--------------------------------------------------------------------------------
void NativeJump::verify() { void NativeJump::verify() {

View File

@ -235,16 +235,15 @@ class NativeMovConstRegPatching: public NativeMovConstReg {
} }
}; };
#ifndef AMD64
// An interface for accessing/manipulating native moves of the form: // An interface for accessing/manipulating native moves of the form:
// mov[b/w/l] [reg + offset], reg (instruction_code_reg2mem) // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem)
// mov[b/w/l] reg, [reg+offset] (instruction_code_mem2reg // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg
// mov[s/z]x[w/b] [reg + offset], reg // mov[s/z]x[w/b/q] [reg + offset], reg
// fld_s [reg+offset] // fld_s [reg+offset]
// fld_d [reg+offset] // fld_d [reg+offset]
// fstp_s [reg + offset] // fstp_s [reg + offset]
// fstp_d [reg + offset] // fstp_d [reg + offset]
// mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
// //
// Warning: These routines must be able to handle any instruction sequences // Warning: These routines must be able to handle any instruction sequences
// that are generated as a result of the load/store byte,word,long // that are generated as a result of the load/store byte,word,long
@ -255,15 +254,18 @@ class NativeMovConstRegPatching: public NativeMovConstReg {
class NativeMovRegMem: public NativeInstruction { class NativeMovRegMem: public NativeInstruction {
public: public:
enum Intel_specific_constants { enum Intel_specific_constants {
instruction_prefix_wide_lo = Assembler::REX,
instruction_prefix_wide_hi = Assembler::REX_WRXB,
instruction_code_xor = 0x33, instruction_code_xor = 0x33,
instruction_extended_prefix = 0x0F, instruction_extended_prefix = 0x0F,
instruction_code_mem2reg_movslq = 0x63,
instruction_code_mem2reg_movzxb = 0xB6, instruction_code_mem2reg_movzxb = 0xB6,
instruction_code_mem2reg_movsxb = 0xBE, instruction_code_mem2reg_movsxb = 0xBE,
instruction_code_mem2reg_movzxw = 0xB7, instruction_code_mem2reg_movzxw = 0xB7,
instruction_code_mem2reg_movsxw = 0xBF, instruction_code_mem2reg_movsxw = 0xBF,
instruction_operandsize_prefix = 0x66, instruction_operandsize_prefix = 0x66,
instruction_code_reg2meml = 0x89, instruction_code_reg2mem = 0x89,
instruction_code_mem2regl = 0x8b, instruction_code_mem2reg = 0x8b,
instruction_code_reg2memb = 0x88, instruction_code_reg2memb = 0x88,
instruction_code_mem2regb = 0x8a, instruction_code_mem2regb = 0x8a,
instruction_code_float_s = 0xd9, instruction_code_float_s = 0xd9,
@ -282,73 +284,18 @@ class NativeMovRegMem: public NativeInstruction {
next_instruction_offset = 4 next_instruction_offset = 4
}; };
address instruction_address() const { // helper
if (*addr_at(instruction_offset) == instruction_operandsize_prefix && int instruction_start() const;
*addr_at(instruction_offset+1) != instruction_code_xmm_code) {
return addr_at(instruction_offset+1); // Not SSE instructions
}
else if (*addr_at(instruction_offset) == instruction_extended_prefix) {
return addr_at(instruction_offset+1);
}
else if (*addr_at(instruction_offset) == instruction_code_xor) {
return addr_at(instruction_offset+2);
}
else return addr_at(instruction_offset);
}
address next_instruction_address() const { address instruction_address() const;
switch (*addr_at(instruction_offset)) {
case instruction_operandsize_prefix:
if (*addr_at(instruction_offset+1) == instruction_code_xmm_code)
return instruction_address() + instruction_size; // SSE instructions
case instruction_extended_prefix:
return instruction_address() + instruction_size + 1;
case instruction_code_reg2meml:
case instruction_code_mem2regl:
case instruction_code_reg2memb:
case instruction_code_mem2regb:
case instruction_code_xor:
return instruction_address() + instruction_size + 2;
default:
return instruction_address() + instruction_size;
}
}
int offset() const{
if (*addr_at(instruction_offset) == instruction_operandsize_prefix &&
*addr_at(instruction_offset+1) != instruction_code_xmm_code) {
return int_at(data_offset+1); // Not SSE instructions
}
else if (*addr_at(instruction_offset) == instruction_extended_prefix) {
return int_at(data_offset+1);
}
else if (*addr_at(instruction_offset) == instruction_code_xor ||
*addr_at(instruction_offset) == instruction_code_xmm_ss_prefix ||
*addr_at(instruction_offset) == instruction_code_xmm_sd_prefix ||
*addr_at(instruction_offset) == instruction_operandsize_prefix) {
return int_at(data_offset+2);
}
else return int_at(data_offset);
}
void set_offset(int x) { address next_instruction_address() const;
if (*addr_at(instruction_offset) == instruction_operandsize_prefix &&
*addr_at(instruction_offset+1) != instruction_code_xmm_code) { int offset() const;
set_int_at(data_offset+1, x); // Not SSE instructions
} void set_offset(int x);
else if (*addr_at(instruction_offset) == instruction_extended_prefix) {
set_int_at(data_offset+1, x);
}
else if (*addr_at(instruction_offset) == instruction_code_xor ||
*addr_at(instruction_offset) == instruction_code_xmm_ss_prefix ||
*addr_at(instruction_offset) == instruction_code_xmm_sd_prefix ||
*addr_at(instruction_offset) == instruction_operandsize_prefix) {
set_int_at(data_offset+2, x);
}
else set_int_at(data_offset, x);
}
void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); }
void copy_instruction_to(address new_instruction_address);
void verify(); void verify();
void print (); void print ();
@ -385,9 +332,19 @@ class NativeMovRegMemPatching: public NativeMovRegMem {
// leal reg, [reg + offset] // leal reg, [reg + offset]
class NativeLoadAddress: public NativeMovRegMem { class NativeLoadAddress: public NativeMovRegMem {
#ifdef AMD64
static const bool has_rex = true;
static const int rex_size = 1;
#else
static const bool has_rex = false;
static const int rex_size = 0;
#endif // AMD64
public: public:
enum Intel_specific_constants { enum Intel_specific_constants {
instruction_code = 0x8D instruction_prefix_wide = Assembler::REX_W,
instruction_prefix_wide_extended = Assembler::REX_WB,
lea_instruction_code = 0x8D,
mov64_instruction_code = 0xB8
}; };
void verify(); void verify();
@ -406,8 +363,6 @@ class NativeLoadAddress: public NativeMovRegMem {
} }
}; };
#endif // AMD64
// jump rel32off // jump rel32off
class NativeJump: public NativeInstruction { class NativeJump: public NativeInstruction {
@ -424,22 +379,23 @@ class NativeJump: public NativeInstruction {
address next_instruction_address() const { return addr_at(next_instruction_offset); } address next_instruction_address() const { return addr_at(next_instruction_offset); }
address jump_destination() const { address jump_destination() const {
address dest = (int_at(data_offset)+next_instruction_address()); address dest = (int_at(data_offset)+next_instruction_address());
#ifdef AMD64 // What is this about? // 32bit used to encode unresolved jmp as jmp -1
// 64bit can't produce this so it used jump to self.
// Now 32bit and 64bit use jump to self as the unresolved address
// which the inline cache code (and relocs) know about
// return -1 if jump to self // return -1 if jump to self
dest = (dest == (address) this) ? (address) -1 : dest; dest = (dest == (address) this) ? (address) -1 : dest;
#endif // AMD64
return dest; return dest;
} }
void set_jump_destination(address dest) { void set_jump_destination(address dest) {
intptr_t val = dest - next_instruction_address(); intptr_t val = dest - next_instruction_address();
#ifdef AMD64 if (dest == (address) -1) {
if (dest == (address) -1) { // can't encode jump to -1
val = -5; // jump to self val = -5; // jump to self
} else {
assert((labs(val) & 0xFFFFFFFF00000000) == 0,
"must be 32bit offset");
} }
#ifdef AMD64
assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
#endif // AMD64 #endif // AMD64
set_int_at(data_offset, (jint)val); set_int_at(data_offset, (jint)val);
} }
@ -568,11 +524,15 @@ inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) =
(ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
inline bool NativeInstruction::is_safepoint_poll() { inline bool NativeInstruction::is_safepoint_poll() {
#ifdef AMD64 #ifdef AMD64
return ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && if ( ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
ubyte_at(1) == 0x05 && // 00 rax 101 ubyte_at(1) == 0x05 ) { // 00 rax 101
((intptr_t) addr_at(6)) + int_at(2) == (intptr_t) os::get_polling_page(); address fault = addr_at(6) + int_at(2);
return os::is_poll_address(fault);
} else {
return false;
}
#else #else
return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2regl || return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg ||
ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) && ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) &&
(ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */ (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */
(os::is_poll_address((address)int_at(2))); (os::is_poll_address((address)int_at(2)));

View File

@ -30,11 +30,11 @@ void Relocation::pd_set_data_value(address x, intptr_t o) {
#ifdef AMD64 #ifdef AMD64
x += o; x += o;
typedef Assembler::WhichOperand WhichOperand; typedef Assembler::WhichOperand WhichOperand;
WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm64, call32, narrow oop WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm, call32, narrow oop
assert(which == Assembler::disp32_operand || assert(which == Assembler::disp32_operand ||
which == Assembler::narrow_oop_operand || which == Assembler::narrow_oop_operand ||
which == Assembler::imm64_operand, "format unpacks ok"); which == Assembler::imm_operand, "format unpacks ok");
if (which == Assembler::imm64_operand) { if (which == Assembler::imm_operand) {
*pd_address_in_code() = x; *pd_address_in_code() = x;
} else if (which == Assembler::narrow_oop_operand) { } else if (which == Assembler::narrow_oop_operand) {
address disp = Assembler::locate_operand(addr(), which); address disp = Assembler::locate_operand(addr(), which);
@ -81,11 +81,16 @@ void Relocation::pd_set_call_destination(address x) {
nativeCall_at(addr())->set_destination(x); nativeCall_at(addr())->set_destination(x);
} else if (ni->is_jump()) { } else if (ni->is_jump()) {
NativeJump* nj = nativeJump_at(addr()); NativeJump* nj = nativeJump_at(addr());
#ifdef AMD64
// Unresolved jumps are recognized by a destination of -1
// However 64bit can't actually produce such an address
// and encodes a jump to self but jump_destination will
// return a -1 as the signal. We must not relocate this
// jmp or the ic code will not see it as unresolved.
if (nj->jump_destination() == (address) -1) { if (nj->jump_destination() == (address) -1) {
x = (address) -1; // retain jump to self x = addr(); // jump to self
} }
#endif // AMD64
nj->set_jump_destination(x); nj->set_jump_destination(x);
} else if (ni->is_cond_jump()) { } else if (ni->is_cond_jump()) {
// %%%% kludge this, for now, until we get a jump_destination method // %%%% kludge this, for now, until we get a jump_destination method
@ -106,19 +111,19 @@ address* Relocation::pd_address_in_code() {
// we must parse the instruction a bit to find the embedded word. // we must parse the instruction a bit to find the embedded word.
assert(is_data(), "must be a DataRelocation"); assert(is_data(), "must be a DataRelocation");
typedef Assembler::WhichOperand WhichOperand; typedef Assembler::WhichOperand WhichOperand;
WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm64/imm32 WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm/imm32
#ifdef AMD64 #ifdef AMD64
assert(which == Assembler::disp32_operand || assert(which == Assembler::disp32_operand ||
which == Assembler::call32_operand || which == Assembler::call32_operand ||
which == Assembler::imm64_operand, "format unpacks ok"); which == Assembler::imm_operand, "format unpacks ok");
if (which != Assembler::imm64_operand) { if (which != Assembler::imm_operand) {
// The "address" in the code is a displacement can't return it as // The "address" in the code is a displacement can't return it as
// and address* since it is really a jint* // and address* since it is really a jint*
ShouldNotReachHere(); ShouldNotReachHere();
return NULL; return NULL;
} }
#else #else
assert(which == Assembler::disp32_operand || which == Assembler::imm32_operand, "format unpacks ok"); assert(which == Assembler::disp32_operand || which == Assembler::imm_operand, "format unpacks ok");
#endif // AMD64 #endif // AMD64
return (address*) Assembler::locate_operand(addr(), which); return (address*) Assembler::locate_operand(addr(), which);
} }
@ -131,11 +136,11 @@ address Relocation::pd_get_address_from_code() {
// we must parse the instruction a bit to find the embedded word. // we must parse the instruction a bit to find the embedded word.
assert(is_data(), "must be a DataRelocation"); assert(is_data(), "must be a DataRelocation");
typedef Assembler::WhichOperand WhichOperand; typedef Assembler::WhichOperand WhichOperand;
WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm64/imm32 WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm/imm32
assert(which == Assembler::disp32_operand || assert(which == Assembler::disp32_operand ||
which == Assembler::call32_operand || which == Assembler::call32_operand ||
which == Assembler::imm64_operand, "format unpacks ok"); which == Assembler::imm_operand, "format unpacks ok");
if (which != Assembler::imm64_operand) { if (which != Assembler::imm_operand) {
address ip = addr(); address ip = addr();
address disp = Assembler::locate_operand(ip, which); address disp = Assembler::locate_operand(ip, which);
address next_ip = Assembler::locate_next_instruction(ip); address next_ip = Assembler::locate_next_instruction(ip);
@ -169,3 +174,44 @@ void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen)
NativeInstruction* ni = nativeInstruction_at(x); NativeInstruction* ni = nativeInstruction_at(x);
*(short*)ni->addr_at(0) = instrs[0]; *(short*)ni->addr_at(0) = instrs[0];
} }
void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
#ifdef _LP64
typedef Assembler::WhichOperand WhichOperand;
WhichOperand which = (WhichOperand) format();
// This format is imm but it is really disp32
which = Assembler::disp32_operand;
address orig_addr = old_addr_for(addr(), src, dest);
NativeInstruction* oni = nativeInstruction_at(orig_addr);
int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which);
// This poll_addr is incorrect by the size of the instruction it is irrelevant
intptr_t poll_addr = (intptr_t)oni + *orig_disp;
NativeInstruction* ni = nativeInstruction_at(addr());
intptr_t new_disp = poll_addr - (intptr_t) ni;
int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which);
* disp = (int32_t)new_disp;
#endif // _LP64
}
void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
#ifdef _LP64
typedef Assembler::WhichOperand WhichOperand;
WhichOperand which = (WhichOperand) format();
// This format is imm but it is really disp32
which = Assembler::disp32_operand;
address orig_addr = old_addr_for(addr(), src, dest);
NativeInstruction* oni = nativeInstruction_at(orig_addr);
int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which);
// This poll_addr is incorrect by the size of the instruction it is irrelevant
intptr_t poll_addr = (intptr_t)oni + *orig_disp;
NativeInstruction* ni = nativeInstruction_at(addr());
intptr_t new_disp = poll_addr - (intptr_t) ni;
int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which);
* disp = (int32_t)new_disp;
#endif // _LP64
}

View File

@ -78,18 +78,18 @@ void OptoRuntime::generate_exception_blob() {
address start = __ pc(); address start = __ pc();
__ pushl(rdx); __ push(rdx);
__ subl(rsp, return_off * wordSize); // Prolog! __ subptr(rsp, return_off * wordSize); // Prolog!
// rbp, location is implicitly known // rbp, location is implicitly known
__ movl(Address(rsp,rbp_off *wordSize),rbp); __ movptr(Address(rsp,rbp_off *wordSize), rbp);
// Store exception in Thread object. We cannot pass any arguments to the // Store exception in Thread object. We cannot pass any arguments to the
// handle_exception call, since we do not want to make any assumption // handle_exception call, since we do not want to make any assumption
// about the size of the frame where the exception happened in. // about the size of the frame where the exception happened in.
__ get_thread(rcx); __ get_thread(rcx);
__ movl(Address(rcx, JavaThread::exception_oop_offset()), rax); __ movptr(Address(rcx, JavaThread::exception_oop_offset()), rax);
__ movl(Address(rcx, JavaThread::exception_pc_offset()), rdx); __ movptr(Address(rcx, JavaThread::exception_pc_offset()), rdx);
// This call does all the hard work. It checks if an exception handler // This call does all the hard work. It checks if an exception handler
// exists in the method. // exists in the method.
@ -97,7 +97,7 @@ void OptoRuntime::generate_exception_blob() {
// If not, it prepares for stack-unwinding, restoring the callee-save // If not, it prepares for stack-unwinding, restoring the callee-save
// registers of the frame being removed. // registers of the frame being removed.
// //
__ movl(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument __ movptr(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument
__ set_last_Java_frame(rcx, noreg, noreg, NULL); __ set_last_Java_frame(rcx, noreg, noreg, NULL);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
@ -108,10 +108,10 @@ void OptoRuntime::generate_exception_blob() {
__ reset_last_Java_frame(rcx, false, false); __ reset_last_Java_frame(rcx, false, false);
// Restore callee-saved registers // Restore callee-saved registers
__ movl(rbp, Address(rsp, rbp_off * wordSize)); __ movptr(rbp, Address(rsp, rbp_off * wordSize));
__ addl(rsp, return_off * wordSize); // Epilog! __ addptr(rsp, return_off * wordSize); // Epilog!
__ popl(rdx); // Exception pc __ pop(rdx); // Exception pc
// rax,: exception handler for given <exception oop/exception pc> // rax,: exception handler for given <exception oop/exception pc>
@ -119,23 +119,23 @@ void OptoRuntime::generate_exception_blob() {
// We have a handler in rax, (could be deopt blob) // We have a handler in rax, (could be deopt blob)
// rdx - throwing pc, deopt blob will need it. // rdx - throwing pc, deopt blob will need it.
__ pushl(rax); __ push(rax);
// rcx contains handler address // rcx contains handler address
__ get_thread(rcx); // TLS __ get_thread(rcx); // TLS
// Get the exception // Get the exception
__ movl(rax, Address(rcx, JavaThread::exception_oop_offset())); __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
// Get the exception pc in case we are deoptimized // Get the exception pc in case we are deoptimized
__ movl(rdx, Address(rcx, JavaThread::exception_pc_offset())); __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
#ifdef ASSERT #ifdef ASSERT
__ movl(Address(rcx, JavaThread::exception_handler_pc_offset()), 0); __ movptr(Address(rcx, JavaThread::exception_handler_pc_offset()), (int32_t)NULL_WORD);
__ movl(Address(rcx, JavaThread::exception_pc_offset()), 0); __ movptr(Address(rcx, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
#endif #endif
// Clear the exception oop so GC no longer processes it as a root. // Clear the exception oop so GC no longer processes it as a root.
__ movl(Address(rcx, JavaThread::exception_oop_offset()), 0); __ movptr(Address(rcx, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
__ popl(rcx); __ pop(rcx);
// rax,: exception oop // rax,: exception oop
// rcx: exception handler // rcx: exception handler

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,6 @@
// Implementation of the platform-specific part of StubRoutines - for // Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file. // a description of how to extend it, see the stubRoutines.hpp file.
address StubRoutines::i486::_verify_mxcsr_entry = NULL; address StubRoutines::x86::_verify_mxcsr_entry = NULL;
address StubRoutines::i486::_verify_fpu_cntrl_wrd_entry= NULL; address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry= NULL;
address StubRoutines::i486::_call_stub_compiled_return = NULL; address StubRoutines::x86::_call_stub_compiled_return = NULL;

View File

@ -31,7 +31,7 @@ enum platform_dependent_constants {
code_size2 = 22000 // simply increase if too small (assembler will crash if too small) code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
}; };
class i486 { class x86 {
friend class StubGenerator; friend class StubGenerator;
friend class VMStructs; friend class VMStructs;
@ -54,4 +54,4 @@ class i486 {
}; };
static bool returns_to_call_stub(address return_pc) { return (return_pc == _call_stub_return_address) || static bool returns_to_call_stub(address return_pc) { return (return_pc == _call_stub_return_address) ||
return_pc == i486::get_call_stub_compiled_return(); } return_pc == x86::get_call_stub_compiled_return(); }

View File

@ -28,16 +28,16 @@
// Implementation of the platform-specific part of StubRoutines - for // Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file. // a description of how to extend it, see the stubRoutines.hpp file.
address StubRoutines::amd64::_get_previous_fp_entry = NULL; address StubRoutines::x86::_get_previous_fp_entry = NULL;
address StubRoutines::amd64::_verify_mxcsr_entry = NULL; address StubRoutines::x86::_verify_mxcsr_entry = NULL;
address StubRoutines::amd64::_f2i_fixup = NULL; address StubRoutines::x86::_f2i_fixup = NULL;
address StubRoutines::amd64::_f2l_fixup = NULL; address StubRoutines::x86::_f2l_fixup = NULL;
address StubRoutines::amd64::_d2i_fixup = NULL; address StubRoutines::x86::_d2i_fixup = NULL;
address StubRoutines::amd64::_d2l_fixup = NULL; address StubRoutines::x86::_d2l_fixup = NULL;
address StubRoutines::amd64::_float_sign_mask = NULL; address StubRoutines::x86::_float_sign_mask = NULL;
address StubRoutines::amd64::_float_sign_flip = NULL; address StubRoutines::x86::_float_sign_flip = NULL;
address StubRoutines::amd64::_double_sign_mask = NULL; address StubRoutines::x86::_double_sign_mask = NULL;
address StubRoutines::amd64::_double_sign_flip = NULL; address StubRoutines::x86::_double_sign_flip = NULL;
address StubRoutines::amd64::_mxcsr_std = NULL; address StubRoutines::x86::_mxcsr_std = NULL;

View File

@ -30,13 +30,13 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _
enum platform_dependent_constants enum platform_dependent_constants
{ {
code_size1 = 9000, // simply increase if too small (assembler will code_size1 = 19000, // simply increase if too small (assembler will
// crash if too small) // crash if too small)
code_size2 = 22000 // simply increase if too small (assembler will code_size2 = 22000 // simply increase if too small (assembler will
// crash if too small) // crash if too small)
}; };
class amd64 { class x86 {
friend class StubGenerator; friend class StubGenerator;
private: private:

View File

@ -43,9 +43,9 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
// //
#ifdef ASSERT #ifdef ASSERT
{ Label L; { Label L;
__ leal(rax, Address(rbp, __ lea(rax, Address(rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize)); frame::interpreter_frame_monitor_block_top_offset * wordSize));
__ cmpl(rax, rsp); // rax, = maximal rsp for current rbp, __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp,
// (stack grows negative) // (stack grows negative)
__ jcc(Assembler::aboveEqual, L); // check if frame is complete __ jcc(Assembler::aboveEqual, L); // check if frame is complete
__ stop ("interpreter frame not set up"); __ stop ("interpreter frame not set up");
@ -80,7 +80,7 @@ address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(con
address TemplateInterpreterGenerator::generate_ClassCastException_handler() { address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
address entry = __ pc(); address entry = __ pc();
// object is at TOS // object is at TOS
__ popl(rax); __ pop(rax);
// expression stack must be empty before entering the VM if an exception // expression stack must be empty before entering the VM if an exception
// happened // happened
__ empty_expression_stack(); __ empty_expression_stack();
@ -97,7 +97,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
address entry = __ pc(); address entry = __ pc();
if (pass_oop) { if (pass_oop) {
// object is at TOS // object is at TOS
__ popl(rbx); __ pop(rbx);
} }
// expression stack must be empty before entering the VM if an exception happened // expression stack must be empty before entering the VM if an exception happened
__ empty_expression_stack(); __ empty_expression_stack();
@ -110,7 +110,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
if (message != NULL) { if (message != NULL) {
__ lea(rbx, ExternalAddress((address)message)); __ lea(rbx, ExternalAddress((address)message));
} else { } else {
__ movl(rbx, NULL_WORD); __ movptr(rbx, (int32_t)NULL_WORD);
} }
__ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx); __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
} }
@ -123,7 +123,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc(); address entry = __ pc();
// NULL last_sp until next java call // NULL last_sp until next java call
__ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ dispatch_next(state); __ dispatch_next(state);
return entry; return entry;
} }
@ -160,32 +160,32 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// In SSE mode, interpreter returns FP results in xmm0 but they need // In SSE mode, interpreter returns FP results in xmm0 but they need
// to end up back on the FPU so it can operate on them. // to end up back on the FPU so it can operate on them.
if (state == ftos && UseSSE >= 1) { if (state == ftos && UseSSE >= 1) {
__ subl(rsp, wordSize); __ subptr(rsp, wordSize);
__ movflt(Address(rsp, 0), xmm0); __ movflt(Address(rsp, 0), xmm0);
__ fld_s(Address(rsp, 0)); __ fld_s(Address(rsp, 0));
__ addl(rsp, wordSize); __ addptr(rsp, wordSize);
} else if (state == dtos && UseSSE >= 2) { } else if (state == dtos && UseSSE >= 2) {
__ subl(rsp, 2*wordSize); __ subptr(rsp, 2*wordSize);
__ movdbl(Address(rsp, 0), xmm0); __ movdbl(Address(rsp, 0), xmm0);
__ fld_d(Address(rsp, 0)); __ fld_d(Address(rsp, 0));
__ addl(rsp, 2*wordSize); __ addptr(rsp, 2*wordSize);
} }
__ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter"); __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter");
// Restore stack bottom in case i2c adjusted stack // Restore stack bottom in case i2c adjusted stack
__ movl(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
// and NULL it as marker that rsp is now tos until next java call // and NULL it as marker that rsp is now tos until next java call
__ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
__ get_cache_and_index_at_bcp(rbx, rcx, 1); __ get_cache_and_index_at_bcp(rbx, rcx, 1);
__ movl(rbx, Address(rbx, rcx, __ movl(rbx, Address(rbx, rcx,
Address::times_4, constantPoolCacheOopDesc::base_offset() + Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset())); ConstantPoolCacheEntry::flags_offset()));
__ andl(rbx, 0xFF); __ andptr(rbx, 0xFF);
__ leal(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
__ dispatch_next(state, step); __ dispatch_next(state, step);
return entry; return entry;
} }
@ -196,29 +196,29 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
// In SSE mode, FP results are in xmm0 // In SSE mode, FP results are in xmm0
if (state == ftos && UseSSE > 0) { if (state == ftos && UseSSE > 0) {
__ subl(rsp, wordSize); __ subptr(rsp, wordSize);
__ movflt(Address(rsp, 0), xmm0); __ movflt(Address(rsp, 0), xmm0);
__ fld_s(Address(rsp, 0)); __ fld_s(Address(rsp, 0));
__ addl(rsp, wordSize); __ addptr(rsp, wordSize);
} else if (state == dtos && UseSSE >= 2) { } else if (state == dtos && UseSSE >= 2) {
__ subl(rsp, 2*wordSize); __ subptr(rsp, 2*wordSize);
__ movdbl(Address(rsp, 0), xmm0); __ movdbl(Address(rsp, 0), xmm0);
__ fld_d(Address(rsp, 0)); __ fld_d(Address(rsp, 0));
__ addl(rsp, 2*wordSize); __ addptr(rsp, 2*wordSize);
} }
__ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter"); __ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter");
// The stack is not extended by deopt but we must NULL last_sp as this // The stack is not extended by deopt but we must NULL last_sp as this
// entry is like a "return". // entry is like a "return".
__ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
// handle exceptions // handle exceptions
{ Label L; { Label L;
const Register thread = rcx; const Register thread = rcx;
__ get_thread(thread); __ get_thread(thread);
__ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::zero, L); __ jcc(Assembler::zero, L);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
__ should_not_reach_here(); __ should_not_reach_here();
@ -254,14 +254,14 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
address entry = __ pc(); address entry = __ pc();
switch (type) { switch (type) {
case T_BOOLEAN: __ c2bool(rax); break; case T_BOOLEAN: __ c2bool(rax); break;
case T_CHAR : __ andl(rax, 0xFFFF); break; case T_CHAR : __ andptr(rax, 0xFFFF); break;
case T_BYTE : __ sign_extend_byte (rax); break; case T_BYTE : __ sign_extend_byte (rax); break;
case T_SHORT : __ sign_extend_short(rax); break; case T_SHORT : __ sign_extend_short(rax); break;
case T_INT : /* nothing to do */ break; case T_INT : /* nothing to do */ break;
case T_DOUBLE : case T_DOUBLE :
case T_FLOAT : case T_FLOAT :
{ const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
__ popl(t); // remove return address first __ pop(t); // remove return address first
__ pop_dtos_to_rsp(); __ pop_dtos_to_rsp();
// Must return a result for interpreter or compiler. In SSE // Must return a result for interpreter or compiler. In SSE
// mode, results are returned in xmm0 and the FPU stack must // mode, results are returned in xmm0 and the FPU stack must
@ -280,13 +280,13 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
__ fld_d(Address(rsp, 0)); __ fld_d(Address(rsp, 0));
} }
// and pop the temp // and pop the temp
__ addl(rsp, 2 * wordSize); __ addptr(rsp, 2 * wordSize);
__ pushl(t); // restore return address __ push(t); // restore return address
} }
break; break;
case T_OBJECT : case T_OBJECT :
// retrieve result from frame // retrieve result from frame
__ movl(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
// and verify it // and verify it
__ verify_oop(rax); __ verify_oop(rax);
break; break;
@ -322,12 +322,12 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset()); const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset());
if (ProfileInterpreter) { // %%% Merge this into methodDataOop if (ProfileInterpreter) { // %%% Merge this into methodDataOop
__ increment(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset())); __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
} }
// Update standard invocation counters // Update standard invocation counters
__ movl(rax, backedge_counter); // load backedge counter __ movl(rax, backedge_counter); // load backedge counter
__ increment(rcx, InvocationCounter::count_increment); __ incrementl(rcx, InvocationCounter::count_increment);
__ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
__ movl(invocation_counter, rcx); // save invocation count __ movl(invocation_counter, rcx); // save invocation count
@ -382,10 +382,10 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
// The call returns the address of the verified entry point for the method or NULL // The call returns the address of the verified entry point for the method or NULL
// if the compilation did not complete (either went background or bailed out). // if the compilation did not complete (either went background or bailed out).
__ movl(rax, (int)false); __ movptr(rax, (int32_t)false);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
__ movl(rbx, Address(rbp, method_offset)); // restore methodOop __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
// Preserve invariant that rsi/rdi contain bcp/locals of sender frame // Preserve invariant that rsi/rdi contain bcp/locals of sender frame
// and jump to the interpreted entry. // and jump to the interpreted entry.
@ -433,7 +433,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
Label after_frame_check_pop; Label after_frame_check_pop;
__ pushl(rsi); __ push(rsi);
const Register thread = rsi; const Register thread = rsi;
@ -443,43 +443,43 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
const Address stack_size(thread, Thread::stack_size_offset()); const Address stack_size(thread, Thread::stack_size_offset());
// locals + overhead, in bytes // locals + overhead, in bytes
__ leal(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size)); __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size));
#ifdef ASSERT #ifdef ASSERT
Label stack_base_okay, stack_size_okay; Label stack_base_okay, stack_size_okay;
// verify that thread stack base is non-zero // verify that thread stack base is non-zero
__ cmpl(stack_base, 0); __ cmpptr(stack_base, (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, stack_base_okay); __ jcc(Assembler::notEqual, stack_base_okay);
__ stop("stack base is zero"); __ stop("stack base is zero");
__ bind(stack_base_okay); __ bind(stack_base_okay);
// verify that thread stack size is non-zero // verify that thread stack size is non-zero
__ cmpl(stack_size, 0); __ cmpptr(stack_size, 0);
__ jcc(Assembler::notEqual, stack_size_okay); __ jcc(Assembler::notEqual, stack_size_okay);
__ stop("stack size is zero"); __ stop("stack size is zero");
__ bind(stack_size_okay); __ bind(stack_size_okay);
#endif #endif
// Add stack base to locals and subtract stack size // Add stack base to locals and subtract stack size
__ addl(rax, stack_base); __ addptr(rax, stack_base);
__ subl(rax, stack_size); __ subptr(rax, stack_size);
// Use the maximum number of pages we might bang. // Use the maximum number of pages we might bang.
const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
(StackRedPages+StackYellowPages); (StackRedPages+StackYellowPages);
__ addl(rax, max_pages * page_size); __ addptr(rax, max_pages * page_size);
// check against the current stack bottom // check against the current stack bottom
__ cmpl(rsp, rax); __ cmpptr(rsp, rax);
__ jcc(Assembler::above, after_frame_check_pop); __ jcc(Assembler::above, after_frame_check_pop);
__ popl(rsi); // get saved bcp / (c++ prev state ). __ pop(rsi); // get saved bcp / (c++ prev state ).
__ popl(rax); // get return address __ pop(rax); // get return address
__ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry())); __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
// all done with frame size check // all done with frame size check
__ bind(after_frame_check_pop); __ bind(after_frame_check_pop);
__ popl(rsi); __ pop(rsi);
__ bind(after_frame_check); __ bind(after_frame_check);
} }
@ -507,18 +507,18 @@ void InterpreterGenerator::lock_method(void) {
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
__ movl(rax, access_flags); __ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC); __ testl(rax, JVM_ACC_STATIC);
__ movl(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
__ jcc(Assembler::zero, done); __ jcc(Assembler::zero, done);
__ movl(rax, Address(rbx, methodOopDesc::constants_offset())); __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
__ movl(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes())); __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
__ movl(rax, Address(rax, mirror_offset)); __ movptr(rax, Address(rax, mirror_offset));
__ bind(done); __ bind(done);
} }
// add space for monitor & lock // add space for monitor & lock
__ subl(rsp, entry_size); // add space for a monitor entry __ subptr(rsp, entry_size); // add space for a monitor entry
__ movl(monitor_block_top, rsp); // set new monitor block top __ movptr(monitor_block_top, rsp); // set new monitor block top
__ movl(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
__ movl(rdx, rsp); // object address __ mov(rdx, rsp); // object address
__ lock_object(rdx); __ lock_object(rdx);
} }
@ -528,38 +528,38 @@ void InterpreterGenerator::lock_method(void) {
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// initialize fixed part of activation frame // initialize fixed part of activation frame
__ pushl(rax); // save return address __ push(rax); // save return address
__ enter(); // save old & set new rbp, __ enter(); // save old & set new rbp,
__ pushl(rsi); // set sender sp __ push(rsi); // set sender sp
__ pushl(NULL_WORD); // leave last_sp as null __ push((int32_t)NULL_WORD); // leave last_sp as null
__ movl(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop __ movptr(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop
__ leal(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
__ pushl(rbx); // save methodOop __ push(rbx); // save methodOop
if (ProfileInterpreter) { if (ProfileInterpreter) {
Label method_data_continue; Label method_data_continue;
__ movl(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
__ testl(rdx, rdx); __ testptr(rdx, rdx);
__ jcc(Assembler::zero, method_data_continue); __ jcc(Assembler::zero, method_data_continue);
__ addl(rdx, in_bytes(methodDataOopDesc::data_offset())); __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
__ bind(method_data_continue); __ bind(method_data_continue);
__ pushl(rdx); // set the mdp (method data pointer) __ push(rdx); // set the mdp (method data pointer)
} else { } else {
__ pushl(0); __ push(0);
} }
__ movl(rdx, Address(rbx, methodOopDesc::constants_offset())); __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
__ movl(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
__ pushl(rdx); // set constant pool cache __ push(rdx); // set constant pool cache
__ pushl(rdi); // set locals pointer __ push(rdi); // set locals pointer
if (native_call) { if (native_call) {
__ pushl(0); // no bcp __ push(0); // no bcp
} else { } else {
__ pushl(rsi); // set bcp __ push(rsi); // set bcp
} }
__ pushl(0); // reserve word for pointer to expression stack bottom __ push(0); // reserve word for pointer to expression stack bottom
__ movl(Address(rsp, 0), rsp); // set expression stack bottom __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
} }
// End of helpers // End of helpers
@ -598,21 +598,21 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// these conditions first and use slow path if necessary. // these conditions first and use slow path if necessary.
// rbx,: method // rbx,: method
// rcx: receiver // rcx: receiver
__ movl(rax, Address(rsp, wordSize)); __ movptr(rax, Address(rsp, wordSize));
// check if local 0 != NULL and read field // check if local 0 != NULL and read field
__ testl(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path); __ jcc(Assembler::zero, slow_path);
__ movl(rdi, Address(rbx, methodOopDesc::constants_offset())); __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
// read first instruction word and extract bytecode @ 1 and index @ 2 // read first instruction word and extract bytecode @ 1 and index @ 2
__ movl(rdx, Address(rbx, methodOopDesc::const_offset())); __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
__ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset())); __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
// Shift codes right to get the index on the right. // Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a> // The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2*BitsPerByte); __ shrl(rdx, 2*BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size()))); __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
__ movl(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes())); __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
// rax,: local 0 // rax,: local 0
// rbx,: method // rbx,: method
@ -629,21 +629,21 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ movl(rcx, __ movl(rcx,
Address(rdi, Address(rdi,
rdx, rdx,
Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2*BitsPerByte); __ shrl(rcx, 2*BitsPerByte);
__ andl(rcx, 0xFF); __ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield); __ cmpl(rcx, Bytecodes::_getfield);
__ jcc(Assembler::notEqual, slow_path); __ jcc(Assembler::notEqual, slow_path);
// Note: constant pool entry is not valid before bytecode is resolved // Note: constant pool entry is not valid before bytecode is resolved
__ movl(rcx, __ movptr(rcx,
Address(rdi, Address(rdi,
rdx, rdx,
Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())); Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
__ movl(rdx, __ movl(rdx,
Address(rdi, Address(rdi,
rdx, rdx,
Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())); Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
Label notByte, notShort, notChar; Label notByte, notShort, notChar;
const Address field_address (rax, rcx, Address::times_1); const Address field_address (rax, rcx, Address::times_1);
@ -682,13 +682,14 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ bind(okay); __ bind(okay);
#endif // ASSERT #endif // ASSERT
// All the rest are a 32 bit wordsize // All the rest are a 32 bit wordsize
__ movl(rax, field_address); // This is ok for now. Since fast accessors should be going away
__ movptr(rax, field_address);
__ bind(xreturn_path); __ bind(xreturn_path);
// _ireturn/_areturn // _ireturn/_areturn
__ popl(rdi); // get return address __ pop(rdi); // get return address
__ movl(rsp, rsi); // set sp to sender sp __ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi); __ jmp(rdi);
// generate a vanilla interpreter entry as the slow path // generate a vanilla interpreter entry as the slow path
@ -732,18 +733,18 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// rcx: size of parameters // rcx: size of parameters
// rsi: sender sp // rsi: sender sp
__ popl(rax); // get return address __ pop(rax); // get return address
// for natives the size of locals is zero // for natives the size of locals is zero
// compute beginning of parameters (rdi) // compute beginning of parameters (rdi)
__ leal(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
// add 2 zero-initialized slots for native calls // add 2 zero-initialized slots for native calls
// NULL result handler // NULL result handler
__ pushl(NULL_WORD); __ push((int32_t)NULL_WORD);
// NULL oop temp (mirror or jni oop result) // NULL oop temp (mirror or jni oop result)
__ pushl(NULL_WORD); __ push((int32_t)NULL_WORD);
if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
// initialize fixed part of activation frame // initialize fixed part of activation frame
@ -818,8 +819,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
{ Label L; { Label L;
const Address monitor_block_top (rbp, const Address monitor_block_top (rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize); frame::interpreter_frame_monitor_block_top_offset * wordSize);
__ movl(rax, monitor_block_top); __ movptr(rax, monitor_block_top);
__ cmpl(rax, rsp); __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ stop("broken stack frame setup in interpreter"); __ stop("broken stack frame setup in interpreter");
__ bind(L); __ bind(L);
@ -838,19 +839,19 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ get_method(method); __ get_method(method);
__ verify_oop(method); __ verify_oop(method);
__ load_unsigned_word(t, Address(method, methodOopDesc::size_of_parameters_offset())); __ load_unsigned_word(t, Address(method, methodOopDesc::size_of_parameters_offset()));
__ shll(t, Interpreter::logStackElementSize()); __ shlptr(t, Interpreter::logStackElementSize());
__ addl(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
__ subl(rsp, t); __ subptr(rsp, t);
__ andl(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
// get signature handler // get signature handler
{ Label L; { Label L;
__ movl(t, Address(method, methodOopDesc::signature_handler_offset())); __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
__ testl(t, t); __ testptr(t, t);
__ jcc(Assembler::notZero, L); __ jcc(Assembler::notZero, L);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
__ get_method(method); __ get_method(method);
__ movl(t, Address(method, methodOopDesc::signature_handler_offset())); __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
__ bind(L); __ bind(L);
} }
@ -867,7 +868,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// result handler is in rax, // result handler is in rax,
// set result handler // set result handler
__ movl(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax); __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax);
// pass mirror handle if static call // pass mirror handle if static call
{ Label L; { Label L;
@ -876,34 +877,34 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ testl(t, JVM_ACC_STATIC); __ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L); __ jcc(Assembler::zero, L);
// get mirror // get mirror
__ movl(t, Address(method, methodOopDesc:: constants_offset())); __ movptr(t, Address(method, methodOopDesc:: constants_offset()));
__ movl(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
__ movl(t, Address(t, mirror_offset)); __ movptr(t, Address(t, mirror_offset));
// copy mirror into activation frame // copy mirror into activation frame
__ movl(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t); __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
// pass handle to mirror // pass handle to mirror
__ leal(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
__ movl(Address(rsp, wordSize), t); __ movptr(Address(rsp, wordSize), t);
__ bind(L); __ bind(L);
} }
// get native function entry point // get native function entry point
{ Label L; { Label L;
__ movl(rax, Address(method, methodOopDesc::native_function_offset())); __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ cmp32(rax, unsatisfied.addr()); __ cmpptr(rax, unsatisfied.addr());
__ jcc(Assembler::notEqual, L); __ jcc(Assembler::notEqual, L);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
__ get_method(method); __ get_method(method);
__ verify_oop(method); __ verify_oop(method);
__ movl(rax, Address(method, methodOopDesc::native_function_offset())); __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
__ bind(L); __ bind(L);
} }
// pass JNIEnv // pass JNIEnv
__ get_thread(thread); __ get_thread(thread);
__ leal(t, Address(thread, JavaThread::jni_environment_offset())); __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
__ movl(Address(rsp, 0), t); __ movptr(Address(rsp, 0), t);
// set_last_Java_frame_before_call // set_last_Java_frame_before_call
// It is enough that the pc() // It is enough that the pc()
@ -934,14 +935,14 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std())); __ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
} }
else if (CheckJNICalls ) { else if (CheckJNICalls ) {
__ call(RuntimeAddress(StubRoutines::i486::verify_mxcsr_entry())); __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
} }
} }
// Either restore the x87 floating pointer control word after returning // Either restore the x87 floating pointer control word after returning
// from the JNI call or verify that it wasn't changed. // from the JNI call or verify that it wasn't changed.
if (CheckJNICalls) { if (CheckJNICalls) {
__ call(RuntimeAddress(StubRoutines::i486::verify_fpu_cntrl_wrd_entry())); __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
} }
// save potential result in ST(0) & rdx:rax // save potential result in ST(0) & rdx:rax
@ -975,7 +976,10 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
if(os::is_MP()) { if(os::is_MP()) {
if (UseMembar) { if (UseMembar) {
__ membar(); // Force this write out before the read below // Force this write out before the read below
__ membar(Assembler::Membar_mask_bits(
Assembler::LoadLoad | Assembler::LoadStore |
Assembler::StoreLoad | Assembler::StoreStore));
} else { } else {
// Write serialization page so VM thread can do a pseudo remote membar. // Write serialization page so VM thread can do a pseudo remote membar.
// We use the current thread pointer to calculate a thread specific // We use the current thread pointer to calculate a thread specific
@ -1008,7 +1012,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// preserved and correspond to the bcp/locals pointers. So we do a runtime call // preserved and correspond to the bcp/locals pointers. So we do a runtime call
// by hand. // by hand.
// //
__ pushl(thread); __ push(thread);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
JavaThread::check_special_condition_for_native_trans))); JavaThread::check_special_condition_for_native_trans)));
__ increment(rsp, wordSize); __ increment(rsp, wordSize);
@ -1023,8 +1027,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ reset_last_Java_frame(thread, true, true); __ reset_last_Java_frame(thread, true, true);
// reset handle block // reset handle block
__ movl(t, Address(thread, JavaThread::active_handles_offset())); __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
__ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), 0); __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
// If result was an oop then unbox and save it in the frame // If result was an oop then unbox and save it in the frame
{ Label L; { Label L;
@ -1033,14 +1037,14 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), __ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize),
handler.addr()); handler.addr());
__ jcc(Assembler::notEqual, no_oop); __ jcc(Assembler::notEqual, no_oop);
__ cmpl(Address(rsp, 0), NULL_WORD); __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD);
__ pop(ltos); __ pop(ltos);
__ testl(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::zero, store_result); __ jcc(Assembler::zero, store_result);
// unbox // unbox
__ movl(rax, Address(rax, 0)); __ movptr(rax, Address(rax, 0));
__ bind(store_result); __ bind(store_result);
__ movl(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax); __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax);
// keep stack depth as expected by pushing oop which will eventually be discarded // keep stack depth as expected by pushing oop which will eventually be discarded
__ push(ltos); __ push(ltos);
__ bind(no_oop); __ bind(no_oop);
@ -1051,9 +1055,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled); __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
__ jcc(Assembler::notEqual, no_reguard); __ jcc(Assembler::notEqual, no_reguard);
__ pushad(); __ pusha();
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
__ popad(); __ popa();
__ bind(no_reguard); __ bind(no_reguard);
} }
@ -1063,12 +1067,12 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// Can't call_VM until bcp is within reasonable. // Can't call_VM until bcp is within reasonable.
__ get_method(method); // method is junk from thread_in_native to now. __ get_method(method); // method is junk from thread_in_native to now.
__ verify_oop(method); __ verify_oop(method);
__ movl(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop __ movptr(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop
__ leal(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
// handle exceptions (exception handling will handle unlocking!) // handle exceptions (exception handling will handle unlocking!)
{ Label L; { Label L;
__ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD); __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::zero, L); __ jcc(Assembler::zero, L);
// Note: At some point we may want to unify this with the code used in call_VM_base(); // Note: At some point we may want to unify this with the code used in call_VM_base();
// i.e., we should use the StubRoutines::forward_exception code. For now this // i.e., we should use the StubRoutines::forward_exception code. For now this
@ -1089,10 +1093,10 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// to check that the object has not been unlocked by an explicit monitorexit bytecode. // to check that the object has not been unlocked by an explicit monitorexit bytecode.
const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock)); const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
__ leal(rdx, monitor); // address of first monitor __ lea(rdx, monitor); // address of first monitor
__ movl(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
__ testl(t, t); __ testptr(t, t);
__ jcc(Assembler::notZero, unlock); __ jcc(Assembler::notZero, unlock);
// Entry already unlocked, need to throw exception // Entry already unlocked, need to throw exception
@ -1114,14 +1118,14 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
__ pop(ltos); __ pop(ltos);
__ movl(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
__ call(t); __ call(t);
// remove activation // remove activation
__ movl(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
__ leave(); // remove frame anchor __ leave(); // remove frame anchor
__ popl(rdi); // get return address __ pop(rdi); // get return address
__ movl(rsp, t); // set sp to sender sp __ mov(rsp, t); // set sp to sender sp
__ jmp(rdi); __ jmp(rdi);
if (inc_counter) { if (inc_counter) {
@ -1165,10 +1169,10 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
generate_stack_overflow_check(); generate_stack_overflow_check();
// get return address // get return address
__ popl(rax); __ pop(rax);
// compute beginning of parameters (rdi) // compute beginning of parameters (rdi)
__ leal(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize)); __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
// rdx - # of additional locals // rdx - # of additional locals
// allocate space for locals // allocate space for locals
@ -1178,8 +1182,10 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ testl(rdx, rdx); __ testl(rdx, rdx);
__ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
__ bind(loop); __ bind(loop);
if (TaggedStackInterpreter) __ pushl(NULL_WORD); // push tag if (TaggedStackInterpreter) {
__ pushl(NULL_WORD); // initialize local variables __ push((int32_t)NULL_WORD); // push tag
}
__ push((int32_t)NULL_WORD); // initialize local variables
__ decrement(rdx); // until everything initialized __ decrement(rdx); // until everything initialized
__ jcc(Assembler::greater, loop); __ jcc(Assembler::greater, loop);
__ bind(exit); __ bind(exit);
@ -1262,8 +1268,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
{ Label L; { Label L;
const Address monitor_block_top (rbp, const Address monitor_block_top (rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize); frame::interpreter_frame_monitor_block_top_offset * wordSize);
__ movl(rax, monitor_block_top); __ movptr(rax, monitor_block_top);
__ cmpl(rax, rsp); __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ stop("broken stack frame setup in interpreter"); __ stop("broken stack frame setup in interpreter");
__ bind(L); __ bind(L);
@ -1283,12 +1289,12 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true);
__ movl(rbx, Address(rbp, method_offset)); // restore methodOop __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
__ movl(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); __ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
__ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
__ test_method_data_pointer(rax, profile_method_continue); __ test_method_data_pointer(rax, profile_method_continue);
__ addl(rax, in_bytes(methodDataOopDesc::data_offset())); __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
__ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax); __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
__ jmp(profile_method_continue); __ jmp(profile_method_continue);
} }
// Handle overflow of counter and compile method // Handle overflow of counter and compile method
@ -1482,7 +1488,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Restore sp to interpreter_frame_last_sp even though we are going // Restore sp to interpreter_frame_last_sp even though we are going
// to empty the expression stack for the exception processing. // to empty the expression stack for the exception processing.
__ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
// rax,: exception // rax,: exception
// rdx: return address/pc that threw exception // rdx: return address/pc that threw exception
__ restore_bcp(); // rsi points to call/send __ restore_bcp(); // rsi points to call/send
@ -1544,7 +1550,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// deoptimization blob's unpack entry because of the presence of // deoptimization blob's unpack entry because of the presence of
// adapter frames in C2. // adapter frames in C2.
Label caller_not_deoptimized; Label caller_not_deoptimized;
__ movl(rdx, Address(rbp, frame::return_addr_offset * wordSize)); __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize));
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx);
__ testl(rax, rax); __ testl(rax, rax);
__ jcc(Assembler::notZero, caller_not_deoptimized); __ jcc(Assembler::notZero, caller_not_deoptimized);
@ -1553,10 +1559,10 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ get_method(rax); __ get_method(rax);
__ verify_oop(rax); __ verify_oop(rax);
__ load_unsigned_word(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset()))); __ load_unsigned_word(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
__ shll(rax, Interpreter::logStackElementSize()); __ shlptr(rax, Interpreter::logStackElementSize());
__ restore_locals(); __ restore_locals();
__ subl(rdi, rax); __ subptr(rdi, rax);
__ addl(rdi, wordSize); __ addptr(rdi, wordSize);
// Save these arguments // Save these arguments
__ get_thread(rcx); __ get_thread(rcx);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), rcx, rax, rdi); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), rcx, rax, rdi);
@ -1592,8 +1598,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// maintain this kind of invariant all the time we call a small // maintain this kind of invariant all the time we call a small
// fixup routine to move the mutated arguments onto the top of our // fixup routine to move the mutated arguments onto the top of our
// expression stack if necessary. // expression stack if necessary.
__ movl(rax, rsp); __ mov(rax, rsp);
__ movl(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ get_thread(rcx); __ get_thread(rcx);
// PC must point into interpreter here // PC must point into interpreter here
__ set_last_Java_frame(rcx, noreg, rbp, __ pc()); __ set_last_Java_frame(rcx, noreg, rbp, __ pc());
@ -1601,8 +1607,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ get_thread(rcx); __ get_thread(rcx);
__ reset_last_Java_frame(rcx, true, true); __ reset_last_Java_frame(rcx, true, true);
// Restore the last_sp and null it out // Restore the last_sp and null it out
__ movl(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
@ -1624,13 +1630,13 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// preserve exception over this code sequence // preserve exception over this code sequence
__ pop_ptr(rax); __ pop_ptr(rax);
__ get_thread(rcx); __ get_thread(rcx);
__ movl(Address(rcx, JavaThread::vm_result_offset()), rax); __ movptr(Address(rcx, JavaThread::vm_result_offset()), rax);
// remove the activation (without doing throws on illegalMonitorExceptions) // remove the activation (without doing throws on illegalMonitorExceptions)
__ remove_activation(vtos, rdx, false, true, false); __ remove_activation(vtos, rdx, false, true, false);
// restore exception // restore exception
__ get_thread(rcx); __ get_thread(rcx);
__ movl(rax, Address(rcx, JavaThread::vm_result_offset())); __ movptr(rax, Address(rcx, JavaThread::vm_result_offset()));
__ movl(Address(rcx, JavaThread::vm_result_offset()), NULL_WORD); __ movptr(Address(rcx, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
__ verify_oop(rax); __ verify_oop(rax);
// Inbetween activations - previous activation type unknown yet // Inbetween activations - previous activation type unknown yet
@ -1641,12 +1647,12 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// rdx: return address/pc that threw exception // rdx: return address/pc that threw exception
// rsp: expression stack of caller // rsp: expression stack of caller
// rbp,: rbp, of caller // rbp,: rbp, of caller
__ pushl(rax); // save exception __ push(rax); // save exception
__ pushl(rdx); // save return address __ push(rdx); // save return address
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx);
__ movl(rbx, rax); // save exception handler __ mov(rbx, rax); // save exception handler
__ popl(rdx); // restore return address __ pop(rdx); // restore return address
__ popl(rax); // restore exception __ pop(rax); // restore exception
// Note that an "issuing PC" is actually the next PC after the call // Note that an "issuing PC" is actually the next PC after the call
__ jmp(rbx); // jump to exception handler of caller __ jmp(rbx); // jump to exception handler of caller
} }
@ -1665,7 +1671,7 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
__ load_earlyret_value(state); __ load_earlyret_value(state);
__ get_thread(rcx); __ get_thread(rcx);
__ movl(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset())); __ movptr(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset()));
const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset()); const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
// Clear the earlyret state // Clear the earlyret state
@ -1716,12 +1722,12 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
address entry = __ pc(); address entry = __ pc();
// prepare expression stack // prepare expression stack
__ popl(rcx); // pop return address so expression stack is 'pure' __ pop(rcx); // pop return address so expression stack is 'pure'
__ push(state); // save tosca __ push(state); // save tosca
// pass tosca registers as arguments & call tracer // pass tosca registers as arguments & call tracer
__ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx); __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
__ movl(rcx, rax); // make sure return address is not destroyed by pop(state) __ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
__ pop(state); // restore tosca __ pop(state); // restore tosca
// return // return
@ -1732,12 +1738,12 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
void TemplateInterpreterGenerator::count_bytecode() { void TemplateInterpreterGenerator::count_bytecode() {
__ increment(ExternalAddress((address) &BytecodeCounter::_counter_value)); __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
} }
void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
__ increment(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()])); __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
} }
@ -1747,7 +1753,7 @@ void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
__ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); __ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
ExternalAddress table((address) BytecodePairHistogram::_counters); ExternalAddress table((address) BytecodePairHistogram::_counters);
Address index(noreg, rbx, Address::times_4); Address index(noreg, rbx, Address::times_4);
__ increment(ArrayAddress(table, index)); __ incrementl(ArrayAddress(table, index));
} }

View File

@ -27,6 +27,8 @@
#define __ _masm-> #define __ _masm->
#ifndef CC_INTERP
const int method_offset = frame::interpreter_frame_method_offset * wordSize; const int method_offset = frame::interpreter_frame_method_offset * wordSize;
const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize; const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize;
const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
@ -39,11 +41,11 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
#ifdef ASSERT #ifdef ASSERT
{ {
Label L; Label L;
__ leaq(rax, Address(rbp, __ lea(rax, Address(rbp,
frame::interpreter_frame_monitor_block_top_offset * frame::interpreter_frame_monitor_block_top_offset *
wordSize)); wordSize));
__ cmpq(rax, rsp); // rax = maximal rsp for current rbp (stack __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
// grows negative) // grows negative)
__ jcc(Assembler::aboveEqual, L); // check if frame is complete __ jcc(Assembler::aboveEqual, L); // check if frame is complete
__ stop ("interpreter frame not set up"); __ stop ("interpreter frame not set up");
__ bind(L); __ bind(L);
@ -84,7 +86,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
address entry = __ pc(); address entry = __ pc();
// object is at TOS // object is at TOS
__ popq(c_rarg1); __ pop(c_rarg1);
// expression stack must be empty before entering the VM if an // expression stack must be empty before entering the VM if an
// exception happened // exception happened
@ -104,7 +106,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
address entry = __ pc(); address entry = __ pc();
if (pass_oop) { if (pass_oop) {
// object is at TOS // object is at TOS
__ popq(c_rarg2); __ pop(c_rarg2);
} }
// expression stack must be empty before entering the VM if an // expression stack must be empty before entering the VM if an
// exception happened // exception happened
@ -137,7 +139,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc(); address entry = __ pc();
// NULL last_sp until next java call // NULL last_sp until next java call
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ dispatch_next(state); __ dispatch_next(state);
return entry; return entry;
} }
@ -153,12 +155,13 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
address entry = __ pc(); address entry = __ pc();
// Restore stack bottom in case i2c adjusted stack // Restore stack bottom in case i2c adjusted stack
__ movq(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
// and NULL it as marker that esp is now tos until next java call // and NULL it as marker that esp is now tos until next java call
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
__ get_cache_and_index_at_bcp(rbx, rcx, 1); __ get_cache_and_index_at_bcp(rbx, rcx, 1);
__ movl(rbx, Address(rbx, rcx, __ movl(rbx, Address(rbx, rcx,
Address::times_8, Address::times_8,
@ -166,7 +169,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
3 * wordSize)); 3 * wordSize));
__ andl(rbx, 0xFF); __ andl(rbx, 0xFF);
if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter. if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
__ leaq(rsp, Address(rsp, rbx, Address::times_8)); __ lea(rsp, Address(rsp, rbx, Address::times_8));
__ dispatch_next(state, step); __ dispatch_next(state, step);
return entry; return entry;
} }
@ -176,13 +179,13 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
int step) { int step) {
address entry = __ pc(); address entry = __ pc();
// NULL last_sp until next java call // NULL last_sp until next java call
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp(); __ restore_bcp();
__ restore_locals(); __ restore_locals();
// handle exceptions // handle exceptions
{ {
Label L; Label L;
__ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL); __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
__ jcc(Assembler::zero, L); __ jcc(Assembler::zero, L);
__ call_VM(noreg, __ call_VM(noreg,
CAST_FROM_FN_PTR(address, CAST_FROM_FN_PTR(address,
@ -231,7 +234,7 @@ address TemplateInterpreterGenerator::generate_result_handler_for(
case T_DOUBLE : /* nothing to do */ break; case T_DOUBLE : /* nothing to do */ break;
case T_OBJECT : case T_OBJECT :
// retrieve result from frame // retrieve result from frame
__ movq(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize)); __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
// and verify it // and verify it
__ verify_oop(rax); __ verify_oop(rax);
break; break;
@ -336,7 +339,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
InterpreterRuntime::frequency_counter_overflow), InterpreterRuntime::frequency_counter_overflow),
c_rarg1); c_rarg1);
__ movq(rbx, Address(rbp, method_offset)); // restore methodOop __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
// Preserve invariant that r13/r14 contain bcp/locals of sender frame // Preserve invariant that r13/r14 contain bcp/locals of sender frame
// and jump to the interpreted entry. // and jump to the interpreted entry.
__ jmp(*do_continue, relocInfo::none); __ jmp(*do_continue, relocInfo::none);
@ -385,36 +388,36 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
const Address stack_size(r15_thread, Thread::stack_size_offset()); const Address stack_size(r15_thread, Thread::stack_size_offset());
// locals + overhead, in bytes // locals + overhead, in bytes
__ movq(rax, rdx); __ mov(rax, rdx);
__ shll(rax, Interpreter::logStackElementSize()); // 2 slots per parameter. __ shlptr(rax, Interpreter::logStackElementSize()); // 2 slots per parameter.
__ addq(rax, overhead_size); __ addptr(rax, overhead_size);
#ifdef ASSERT #ifdef ASSERT
Label stack_base_okay, stack_size_okay; Label stack_base_okay, stack_size_okay;
// verify that thread stack base is non-zero // verify that thread stack base is non-zero
__ cmpq(stack_base, 0); __ cmpptr(stack_base, (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, stack_base_okay); __ jcc(Assembler::notEqual, stack_base_okay);
__ stop("stack base is zero"); __ stop("stack base is zero");
__ bind(stack_base_okay); __ bind(stack_base_okay);
// verify that thread stack size is non-zero // verify that thread stack size is non-zero
__ cmpq(stack_size, 0); __ cmpptr(stack_size, 0);
__ jcc(Assembler::notEqual, stack_size_okay); __ jcc(Assembler::notEqual, stack_size_okay);
__ stop("stack size is zero"); __ stop("stack size is zero");
__ bind(stack_size_okay); __ bind(stack_size_okay);
#endif #endif
// Add stack base to locals and subtract stack size // Add stack base to locals and subtract stack size
__ addq(rax, stack_base); __ addptr(rax, stack_base);
__ subq(rax, stack_size); __ subptr(rax, stack_size);
// add in the red and yellow zone sizes // add in the red and yellow zone sizes
__ addq(rax, (StackRedPages + StackYellowPages) * page_size); __ addptr(rax, (StackRedPages + StackYellowPages) * page_size);
// check against the current stack bottom // check against the current stack bottom
__ cmpq(rsp, rax); __ cmpptr(rsp, rax);
__ jcc(Assembler::above, after_frame_check); __ jcc(Assembler::above, after_frame_check);
__ popq(rax); // get return address __ pop(rax); // get return address
__ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry())); __ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
// all done with frame size check // all done with frame size check
@ -458,17 +461,17 @@ void InterpreterGenerator::lock_method(void) {
__ movl(rax, access_flags); __ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC); __ testl(rax, JVM_ACC_STATIC);
// get receiver (assume this is frequent case) // get receiver (assume this is frequent case)
__ movq(rax, Address(r14, Interpreter::local_offset_in_bytes(0))); __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
__ jcc(Assembler::zero, done); __ jcc(Assembler::zero, done);
__ movq(rax, Address(rbx, methodOopDesc::constants_offset())); __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
__ movq(rax, Address(rax, __ movptr(rax, Address(rax,
constantPoolOopDesc::pool_holder_offset_in_bytes())); constantPoolOopDesc::pool_holder_offset_in_bytes()));
__ movq(rax, Address(rax, mirror_offset)); __ movptr(rax, Address(rax, mirror_offset));
#ifdef ASSERT #ifdef ASSERT
{ {
Label L; Label L;
__ testq(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::notZero, L); __ jcc(Assembler::notZero, L);
__ stop("synchronization object is NULL"); __ stop("synchronization object is NULL");
__ bind(L); __ bind(L);
@ -479,11 +482,11 @@ void InterpreterGenerator::lock_method(void) {
} }
// add space for monitor & lock // add space for monitor & lock
__ subq(rsp, entry_size); // add space for a monitor entry __ subptr(rsp, entry_size); // add space for a monitor entry
__ movq(monitor_block_top, rsp); // set new monitor block top __ movptr(monitor_block_top, rsp); // set new monitor block top
// store object // store object
__ movq(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
__ movq(c_rarg1, rsp); // object address __ movptr(c_rarg1, rsp); // object address
__ lock_object(c_rarg1); __ lock_object(c_rarg1);
} }
@ -498,40 +501,187 @@ void InterpreterGenerator::lock_method(void) {
// rdx: cp cache // rdx: cp cache
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// initialize fixed part of activation frame // initialize fixed part of activation frame
__ pushq(rax); // save return address __ push(rax); // save return address
__ enter(); // save old & set new rbp __ enter(); // save old & set new rbp
__ pushq(r13); // set sender sp __ push(r13); // set sender sp
__ pushq((int)NULL_WORD); // leave last_sp as null __ push((int)NULL_WORD); // leave last_sp as null
__ movq(r13, Address(rbx, methodOopDesc::const_offset())); // get constMethodOop __ movptr(r13, Address(rbx, methodOopDesc::const_offset())); // get constMethodOop
__ leaq(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase __ lea(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
__ pushq(rbx); // save methodOop __ push(rbx); // save methodOop
if (ProfileInterpreter) { if (ProfileInterpreter) {
Label method_data_continue; Label method_data_continue;
__ movq(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset()))); __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
__ testq(rdx, rdx); __ testptr(rdx, rdx);
__ jcc(Assembler::zero, method_data_continue); __ jcc(Assembler::zero, method_data_continue);
__ addq(rdx, in_bytes(methodDataOopDesc::data_offset())); __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
__ bind(method_data_continue); __ bind(method_data_continue);
__ pushq(rdx); // set the mdp (method data pointer) __ push(rdx); // set the mdp (method data pointer)
} else { } else {
__ pushq(0); __ push(0);
} }
__ movq(rdx, Address(rbx, methodOopDesc::constants_offset())); __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
__ movq(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes())); __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
__ pushq(rdx); // set constant pool cache __ push(rdx); // set constant pool cache
__ pushq(r14); // set locals pointer __ push(r14); // set locals pointer
if (native_call) { if (native_call) {
__ pushq(0); // no bcp __ push(0); // no bcp
} else { } else {
__ pushq(r13); // set bcp __ push(r13); // set bcp
} }
__ pushq(0); // reserve word for pointer to expression stack bottom __ push(0); // reserve word for pointer to expression stack bottom
__ movq(Address(rsp, 0), rsp); // set expression stack bottom __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
} }
// End of helpers // End of helpers
// Various method entries
//------------------------------------------------------------------------------------------------------------------------
//
//
// Call an accessor method (assuming it is resolved, otherwise drop
// into vanilla (slow path) entry
address InterpreterGenerator::generate_accessor_entry(void) {
// rbx: methodOop
// r13: senderSP must preserver for slow path, set SP to it on fast path
address entry_point = __ pc();
Label xreturn_path;
// do fastpath for resolved accessor methods
if (UseFastAccessorMethods) {
// Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
// thereof; parameter size = 1
// Note: We can only use this code if the getfield has been resolved
// and if we don't have a null-pointer exception => check for
// these conditions first and use slow path if necessary.
Label slow_path;
// If we need a safepoint check, generate full interpreter entry.
__ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
SafepointSynchronize::_not_synchronized);
__ jcc(Assembler::notEqual, slow_path);
// rbx: method
__ movptr(rax, Address(rsp, wordSize));
// check if local 0 != NULL and read field
__ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
__ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
// read first instruction word and extract bytecode @ 1 and index @ 2
__ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
__ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2 * BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
__ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
// rax: local 0
// rbx: method
// rdx: constant pool cache index
// rdi: constant pool cache
// check if getfield has been resolved and read constant pool cache entry
// check the validity of the cache entry by testing whether _indices field
// contains Bytecode::_getfield in b1 byte.
assert(in_words(ConstantPoolCacheEntry::size()) == 4,
"adjust shift below");
__ movl(rcx,
Address(rdi,
rdx,
Address::times_8,
constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2 * BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
__ jcc(Assembler::notEqual, slow_path);
// Note: constant pool entry is not valid before bytecode is resolved
__ movptr(rcx,
Address(rdi,
rdx,
Address::times_8,
constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::f2_offset()));
// edx: flags
__ movl(rdx,
Address(rdi,
rdx,
Address::times_8,
constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset()));
Label notObj, notInt, notByte, notShort;
const Address field_address(rax, rcx, Address::times_1);
// Need to differentiate between igetfield, agetfield, bgetfield etc.
// because they are different sizes.
// Use the type from the constant pool cache
__ shrl(rdx, ConstantPoolCacheEntry::tosBits);
// Make sure we don't need to mask edx for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
__ cmpl(rdx, atos);
__ jcc(Assembler::notEqual, notObj);
// atos
__ load_heap_oop(rax, field_address);
__ jmp(xreturn_path);
__ bind(notObj);
__ cmpl(rdx, itos);
__ jcc(Assembler::notEqual, notInt);
// itos
__ movl(rax, field_address);
__ jmp(xreturn_path);
__ bind(notInt);
__ cmpl(rdx, btos);
__ jcc(Assembler::notEqual, notByte);
// btos
__ load_signed_byte(rax, field_address);
__ jmp(xreturn_path);
__ bind(notByte);
__ cmpl(rdx, stos);
__ jcc(Assembler::notEqual, notShort);
// stos
__ load_signed_word(rax, field_address);
__ jmp(xreturn_path);
__ bind(notShort);
#ifdef ASSERT
Label okay;
__ cmpl(rdx, ctos);
__ jcc(Assembler::equal, okay);
__ stop("what type is this?");
__ bind(okay);
#endif
// ctos
__ load_unsigned_word(rax, field_address);
__ bind(xreturn_path);
// _ireturn/_areturn
__ pop(rdi);
__ mov(rsp, r13);
__ jmp(rdi);
__ ret(0);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
(void) generate_normal_entry(false);
} else {
(void) generate_normal_entry(false);
}
return entry_point;
}
// Interpreter stub for calling a native method. (asm interpreter) // Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the // This sets up a somewhat different looking stack for calling the
// native method than the typical interpreter frame setup. // native method than the typical interpreter frame setup.
@ -561,20 +711,20 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// rbx: methodOop // rbx: methodOop
// rcx: size of parameters // rcx: size of parameters
// r13: sender sp // r13: sender sp
__ popq(rax); // get return address __ pop(rax); // get return address
// for natives the size of locals is zero // for natives the size of locals is zero
// compute beginning of parameters (r14) // compute beginning of parameters (r14)
if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter. if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
__ leaq(r14, Address(rsp, rcx, Address::times_8, -wordSize)); __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
// add 2 zero-initialized slots for native calls // add 2 zero-initialized slots for native calls
// initialize result_handler slot // initialize result_handler slot
__ pushq((int) NULL); __ push((int) NULL_WORD);
// slot for oop temp // slot for oop temp
// (static native method holder mirror/jni oop result) // (static native method holder mirror/jni oop result)
__ pushq((int) NULL); __ push((int) NULL_WORD);
if (inc_counter) { if (inc_counter) {
__ movl(rcx, invocation_counter); // (pre-)fetch invocation count __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
@ -651,8 +801,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
Label L; Label L;
const Address monitor_block_top(rbp, const Address monitor_block_top(rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize); frame::interpreter_frame_monitor_block_top_offset * wordSize);
__ movq(rax, monitor_block_top); __ movptr(rax, monitor_block_top);
__ cmpq(rax, rsp); __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ stop("broken stack frame setup in interpreter"); __ stop("broken stack frame setup in interpreter");
__ bind(L); __ bind(L);
@ -674,22 +824,22 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
methodOopDesc::size_of_parameters_offset())); methodOopDesc::size_of_parameters_offset()));
__ shll(t, Interpreter::logStackElementSize()); __ shll(t, Interpreter::logStackElementSize());
__ subq(rsp, t); __ subptr(rsp, t);
__ subq(rsp, frame::arg_reg_save_area_bytes); // windows __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andq(rsp, -16); // must be 16 byte boundry (see amd64 ABI) __ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI)
// get signature handler // get signature handler
{ {
Label L; Label L;
__ movq(t, Address(method, methodOopDesc::signature_handler_offset())); __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
__ testq(t, t); __ testptr(t, t);
__ jcc(Assembler::notZero, L); __ jcc(Assembler::notZero, L);
__ call_VM(noreg, __ call_VM(noreg,
CAST_FROM_FN_PTR(address, CAST_FROM_FN_PTR(address,
InterpreterRuntime::prepare_native_call), InterpreterRuntime::prepare_native_call),
method); method);
__ get_method(method); __ get_method(method);
__ movq(t, Address(method, methodOopDesc::signature_handler_offset())); __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
__ bind(L); __ bind(L);
} }
@ -711,9 +861,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// result handler is in rax // result handler is in rax
// set result handler // set result handler
__ movq(Address(rbp, __ movptr(Address(rbp,
(frame::interpreter_frame_result_handler_offset) * wordSize), (frame::interpreter_frame_result_handler_offset) * wordSize),
rax); rax);
// pass mirror handle if static call // pass mirror handle if static call
{ {
@ -724,25 +874,25 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ testl(t, JVM_ACC_STATIC); __ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L); __ jcc(Assembler::zero, L);
// get mirror // get mirror
__ movq(t, Address(method, methodOopDesc::constants_offset())); __ movptr(t, Address(method, methodOopDesc::constants_offset()));
__ movq(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes())); __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
__ movq(t, Address(t, mirror_offset)); __ movptr(t, Address(t, mirror_offset));
// copy mirror into activation frame // copy mirror into activation frame
__ movq(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
t); t);
// pass handle to mirror // pass handle to mirror
__ leaq(c_rarg1, __ lea(c_rarg1,
Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize)); Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
__ bind(L); __ bind(L);
} }
// get native function entry point // get native function entry point
{ {
Label L; Label L;
__ movq(rax, Address(method, methodOopDesc::native_function_offset())); __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ movptr(rscratch2, unsatisfied.addr()); __ movptr(rscratch2, unsatisfied.addr());
__ cmpq(rax, rscratch2); __ cmpptr(rax, rscratch2);
__ jcc(Assembler::notEqual, L); __ jcc(Assembler::notEqual, L);
__ call_VM(noreg, __ call_VM(noreg,
CAST_FROM_FN_PTR(address, CAST_FROM_FN_PTR(address,
@ -750,12 +900,12 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
method); method);
__ get_method(method); __ get_method(method);
__ verify_oop(method); __ verify_oop(method);
__ movq(rax, Address(method, methodOopDesc::native_function_offset())); __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
__ bind(L); __ bind(L);
} }
// pass JNIEnv // pass JNIEnv
__ leaq(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset())); __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
// It is enough that the pc() points into the right code // It is enough that the pc() points into the right code
// segment. It does not have to be the correct return pc. // segment. It does not have to be the correct return pc.
@ -786,10 +936,10 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// register after returning from the JNI Call or verify that // register after returning from the JNI Call or verify that
// it wasn't changed during -Xcheck:jni. // it wasn't changed during -Xcheck:jni.
if (RestoreMXCSROnJNICalls) { if (RestoreMXCSROnJNICalls) {
__ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std())); __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
} }
else if (CheckJNICalls) { else if (CheckJNICalls) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
} }
// NOTE: The order of these pushes is known to frame::interpreter_frame_result // NOTE: The order of these pushes is known to frame::interpreter_frame_result
@ -838,12 +988,12 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// preserved and correspond to the bcp/locals pointers. So we do a // preserved and correspond to the bcp/locals pointers. So we do a
// runtime call by hand. // runtime call by hand.
// //
__ movq(c_rarg0, r15_thread); __ mov(c_rarg0, r15_thread);
__ movq(r12, rsp); // remember sp __ mov(r12, rsp); // remember sp
__ subq(rsp, frame::arg_reg_save_area_bytes); // windows __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andq(rsp, -16); // align stack as required by ABI __ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
__ movq(rsp, r12); // restore sp __ mov(rsp, r12); // restore sp
__ reinit_heapbase(); __ reinit_heapbase();
__ bind(Continue); __ bind(Continue);
} }
@ -855,8 +1005,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ reset_last_Java_frame(true, true); __ reset_last_Java_frame(true, true);
// reset handle block // reset handle block
__ movq(t, Address(r15_thread, JavaThread::active_handles_offset())); __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset()));
__ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD); __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
// If result is an oop unbox and store it in frame where gc will see it // If result is an oop unbox and store it in frame where gc will see it
// and result handler will pick it up // and result handler will pick it up
@ -864,15 +1014,15 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
{ {
Label no_oop, store_result; Label no_oop, store_result;
__ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT))); __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
__ cmpq(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize)); __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
__ jcc(Assembler::notEqual, no_oop); __ jcc(Assembler::notEqual, no_oop);
// retrieve result // retrieve result
__ pop(ltos); __ pop(ltos);
__ testq(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::zero, store_result); __ jcc(Assembler::zero, store_result);
__ movq(rax, Address(rax, 0)); __ movptr(rax, Address(rax, 0));
__ bind(store_result); __ bind(store_result);
__ movq(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax); __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
// keep stack depth as expected by pushing oop which will eventually be discarde // keep stack depth as expected by pushing oop which will eventually be discarde
__ push(ltos); __ push(ltos);
__ bind(no_oop); __ bind(no_oop);
@ -885,13 +1035,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
JavaThread::stack_guard_yellow_disabled); JavaThread::stack_guard_yellow_disabled);
__ jcc(Assembler::notEqual, no_reguard); __ jcc(Assembler::notEqual, no_reguard);
__ pushaq(); // XXX only save smashed registers __ pusha(); // XXX only save smashed registers
__ movq(r12, rsp); // remember sp __ mov(r12, rsp); // remember sp
__ subq(rsp, frame::arg_reg_save_area_bytes); // windows __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andq(rsp, -16); // align stack as required by ABI __ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
__ movq(rsp, r12); // restore sp __ mov(rsp, r12); // restore sp
__ popaq(); // XXX only restore smashed registers __ popa(); // XXX only restore smashed registers
__ reinit_heapbase(); __ reinit_heapbase();
__ bind(no_reguard); __ bind(no_reguard);
@ -906,12 +1056,12 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// restore r13 to have legal interpreter frame, i.e., bci == 0 <=> // restore r13 to have legal interpreter frame, i.e., bci == 0 <=>
// r13 == code_base() // r13 == code_base()
__ movq(r13, Address(method, methodOopDesc::const_offset())); // get constMethodOop __ movptr(r13, Address(method, methodOopDesc::const_offset())); // get constMethodOop
__ leaq(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase __ lea(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
// handle exceptions (exception handling will handle unlocking!) // handle exceptions (exception handling will handle unlocking!)
{ {
Label L; Label L;
__ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL); __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
__ jcc(Assembler::zero, L); __ jcc(Assembler::zero, L);
// Note: At some point we may want to unify this with the code // Note: At some point we may want to unify this with the code
// used in call_VM_base(); i.e., we should use the // used in call_VM_base(); i.e., we should use the
@ -942,10 +1092,10 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
wordSize - sizeof(BasicObjectLock))); wordSize - sizeof(BasicObjectLock)));
// monitor expect in c_rarg1 for slow unlock path // monitor expect in c_rarg1 for slow unlock path
__ leaq(c_rarg1, monitor); // address of first monitor __ lea(c_rarg1, monitor); // address of first monitor
__ movq(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
__ testq(t, t); __ testptr(t, t);
__ jcc(Assembler::notZero, unlock); __ jcc(Assembler::notZero, unlock);
// Entry already unlocked, need to throw exception // Entry already unlocked, need to throw exception
@ -973,17 +1123,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ pop(ltos); __ pop(ltos);
__ pop(dtos); __ pop(dtos);
__ movq(t, Address(rbp, __ movptr(t, Address(rbp,
(frame::interpreter_frame_result_handler_offset) * wordSize)); (frame::interpreter_frame_result_handler_offset) * wordSize));
__ call(t); __ call(t);
// remove activation // remove activation
__ movq(t, Address(rbp, __ movptr(t, Address(rbp,
frame::interpreter_frame_sender_sp_offset * frame::interpreter_frame_sender_sp_offset *
wordSize)); // get sender sp wordSize)); // get sender sp
__ leave(); // remove frame anchor __ leave(); // remove frame anchor
__ popq(rdi); // get return address __ pop(rdi); // get return address
__ movq(rsp, t); // set sp to sender sp __ mov(rsp, t); // set sp to sender sp
__ jmp(rdi); __ jmp(rdi);
if (inc_counter) { if (inc_counter) {
@ -1032,11 +1182,11 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
generate_stack_overflow_check(); generate_stack_overflow_check();
// get return address // get return address
__ popq(rax); __ pop(rax);
// compute beginning of parameters (r14) // compute beginning of parameters (r14)
if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter. if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
__ leaq(r14, Address(rsp, rcx, Address::times_8, -wordSize)); __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
// rdx - # of additional locals // rdx - # of additional locals
// allocate space for locals // allocate space for locals
@ -1046,8 +1196,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ testl(rdx, rdx); __ testl(rdx, rdx);
__ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
__ bind(loop); __ bind(loop);
if (TaggedStackInterpreter) __ pushq((int) NULL); // push tag if (TaggedStackInterpreter) __ push((int) NULL_WORD); // push tag
__ pushq((int) NULL); // initialize local variables __ push((int) NULL_WORD); // initialize local variables
__ decrementl(rdx); // until everything initialized __ decrementl(rdx); // until everything initialized
__ jcc(Assembler::greater, loop); __ jcc(Assembler::greater, loop);
__ bind(exit); __ bind(exit);
@ -1137,8 +1287,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
Label L; Label L;
const Address monitor_block_top (rbp, const Address monitor_block_top (rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize); frame::interpreter_frame_monitor_block_top_offset * wordSize);
__ movq(rax, monitor_block_top); __ movptr(rax, monitor_block_top);
__ cmpq(rax, rsp); __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ stop("broken stack frame setup in interpreter"); __ stop("broken stack frame setup in interpreter");
__ bind(L); __ bind(L);
@ -1160,14 +1310,14 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method),
r13, true); r13, true);
__ movq(rbx, Address(rbp, method_offset)); // restore methodOop __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
__ movq(rax, Address(rbx, __ movptr(rax, Address(rbx,
in_bytes(methodOopDesc::method_data_offset()))); in_bytes(methodOopDesc::method_data_offset())));
__ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
rax); rax);
__ test_method_data_pointer(rax, profile_method_continue); __ test_method_data_pointer(rax, profile_method_continue);
__ addq(rax, in_bytes(methodDataOopDesc::data_offset())); __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
__ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
rax); rax);
__ jmp(profile_method_continue); __ jmp(profile_method_continue);
} }
@ -1357,7 +1507,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
Interpreter::_rethrow_exception_entry = __ pc(); Interpreter::_rethrow_exception_entry = __ pc();
// Restore sp to interpreter_frame_last_sp even though we are going // Restore sp to interpreter_frame_last_sp even though we are going
// to empty the expression stack for the exception processing. // to empty the expression stack for the exception processing.
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
// rax: exception // rax: exception
// rdx: return address/pc that threw exception // rdx: return address/pc that threw exception
__ restore_bcp(); // r13 points to call/send __ restore_bcp(); // r13 points to call/send
@ -1369,7 +1519,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// rax: exception // rax: exception
// r13: exception bcp // r13: exception bcp
__ verify_oop(rax); __ verify_oop(rax);
__ movq(c_rarg1, rax); __ mov(c_rarg1, rax);
// expression stack must be empty before entering the VM in case of // expression stack must be empty before entering the VM in case of
// an exception // an exception
@ -1424,7 +1574,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// deoptimization blob's unpack entry because of the presence of // deoptimization blob's unpack entry because of the presence of
// adapter frames in C2. // adapter frames in C2.
Label caller_not_deoptimized; Label caller_not_deoptimized;
__ movq(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize)); __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
InterpreterRuntime::interpreter_contains), c_rarg1); InterpreterRuntime::interpreter_contains), c_rarg1);
__ testl(rax, rax); __ testl(rax, rax);
@ -1437,8 +1587,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
size_of_parameters_offset()))); size_of_parameters_offset())));
__ shll(rax, Interpreter::logStackElementSize()); __ shll(rax, Interpreter::logStackElementSize());
__ restore_locals(); // XXX do we need this? __ restore_locals(); // XXX do we need this?
__ subq(r14, rax); __ subptr(r14, rax);
__ addq(r14, wordSize); __ addptr(r14, wordSize);
// Save these arguments // Save these arguments
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
Deoptimization:: Deoptimization::
@ -1477,15 +1627,15 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// maintain this kind of invariant all the time we call a small // maintain this kind of invariant all the time we call a small
// fixup routine to move the mutated arguments onto the top of our // fixup routine to move the mutated arguments onto the top of our
// expression stack if necessary. // expression stack if necessary.
__ movq(c_rarg1, rsp); __ mov(c_rarg1, rsp);
__ movq(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
// PC must point into interpreter here // PC must point into interpreter here
__ set_last_Java_frame(noreg, rbp, __ pc()); __ set_last_Java_frame(noreg, rbp, __ pc());
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2); __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
__ reset_last_Java_frame(true, true); __ reset_last_Java_frame(true, true);
// Restore the last_sp and null it out // Restore the last_sp and null it out
__ movq(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize)); __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp(); // XXX do we need this? __ restore_bcp(); // XXX do we need this?
__ restore_locals(); // XXX do we need this? __ restore_locals(); // XXX do we need this?
@ -1506,12 +1656,12 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// preserve exception over this code sequence // preserve exception over this code sequence
__ pop_ptr(rax); __ pop_ptr(rax);
__ movq(Address(r15_thread, JavaThread::vm_result_offset()), rax); __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax);
// remove the activation (without doing throws on illegalMonitorExceptions) // remove the activation (without doing throws on illegalMonitorExceptions)
__ remove_activation(vtos, rdx, false, true, false); __ remove_activation(vtos, rdx, false, true, false);
// restore exception // restore exception
__ movq(rax, Address(r15_thread, JavaThread::vm_result_offset())); __ movptr(rax, Address(r15_thread, JavaThread::vm_result_offset()));
__ movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD); __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
__ verify_oop(rax); __ verify_oop(rax);
// In between activations - previous activation type unknown yet // In between activations - previous activation type unknown yet
@ -1522,14 +1672,14 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// rdx: return address/pc that threw exception // rdx: return address/pc that threw exception
// rsp: expression stack of caller // rsp: expression stack of caller
// rbp: ebp of caller // rbp: ebp of caller
__ pushq(rax); // save exception __ push(rax); // save exception
__ pushq(rdx); // save return address __ push(rdx); // save return address
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address), SharedRuntime::exception_handler_for_return_address),
rdx); rdx);
__ movq(rbx, rax); // save exception handler __ mov(rbx, rax); // save exception handler
__ popq(rdx); // restore return address __ pop(rdx); // restore return address
__ popq(rax); // restore exception __ pop(rax); // restore exception
// Note that an "issuing PC" is actually the next PC after the call // Note that an "issuing PC" is actually the next PC after the call
__ jmp(rbx); // jump to exception __ jmp(rbx); // jump to exception
// handler of caller // handler of caller
@ -1547,7 +1697,7 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
__ empty_expression_stack(); __ empty_expression_stack();
__ load_earlyret_value(state); __ load_earlyret_value(state);
__ movq(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset())); __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset()); Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset());
// Clear the earlyret state // Clear the earlyret state
@ -1609,21 +1759,21 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
address entry = __ pc(); address entry = __ pc();
__ push(state); __ push(state);
__ pushq(c_rarg0); __ push(c_rarg0);
__ pushq(c_rarg1); __ push(c_rarg1);
__ pushq(c_rarg2); __ push(c_rarg2);
__ pushq(c_rarg3); __ push(c_rarg3);
__ movq(c_rarg2, rax); // Pass itos __ mov(c_rarg2, rax); // Pass itos
#ifdef _WIN64 #ifdef _WIN64
__ movflt(xmm3, xmm0); // Pass ftos __ movflt(xmm3, xmm0); // Pass ftos
#endif #endif
__ call_VM(noreg, __ call_VM(noreg,
CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
c_rarg1, c_rarg2, c_rarg3); c_rarg1, c_rarg2, c_rarg3);
__ popq(c_rarg3); __ pop(c_rarg3);
__ popq(c_rarg2); __ pop(c_rarg2);
__ popq(c_rarg1); __ pop(c_rarg1);
__ popq(c_rarg0); __ pop(c_rarg0);
__ pop(state); __ pop(state);
__ ret(0); // return from result handler __ ret(0); // return from result handler
@ -1657,10 +1807,10 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
assert(Interpreter::trace_code(t->tos_in()) != NULL, assert(Interpreter::trace_code(t->tos_in()) != NULL,
"entry must have been generated"); "entry must have been generated");
__ movq(r12, rsp); // remember sp __ mov(r12, rsp); // remember sp
__ andq(rsp, -16); // align stack as required by ABI __ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
__ movq(rsp, r12); // restore sp __ mov(rsp, r12); // restore sp
__ reinit_heapbase(); __ reinit_heapbase();
} }
@ -1674,3 +1824,4 @@ void TemplateInterpreterGenerator::stop_interpreter_at() {
__ bind(L); __ bind(L);
} }
#endif // !PRODUCT #endif // !PRODUCT
#endif // ! CC_INTERP

File diff suppressed because it is too large Load Diff

View File

@ -26,7 +26,7 @@
Bytecodes::Code code); Bytecodes::Code code);
static void invokevirtual_helper(Register index, Register recv, static void invokevirtual_helper(Register index, Register recv,
Register flags); Register flags);
static void volatile_barrier( ); static void volatile_barrier(Assembler::Membar_mask_bits order_constraint );
// Helpers // Helpers
static void index_check(Register array, Register index); static void index_check(Register array, Register index);

File diff suppressed because it is too large Load Diff

View File

@ -67,23 +67,23 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// //
// void getPsrInfo(VM_Version::CpuidInfo* cpuid_info); // void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
// //
__ pushl(rbp); __ push(rbp);
__ movl(rbp, Address(rsp, 8)); // cpuid_info address __ movptr(rbp, Address(rsp, 8)); // cpuid_info address
__ pushl(rbx); __ push(rbx);
__ pushl(rsi); __ push(rsi);
__ pushfd(); // preserve rbx, and flags __ pushf(); // preserve rbx, and flags
__ popl(rax); __ pop(rax);
__ pushl(rax); __ push(rax);
__ movl(rcx, rax); __ mov(rcx, rax);
// //
// if we are unable to change the AC flag, we have a 386 // if we are unable to change the AC flag, we have a 386
// //
__ xorl(rax, EFL_AC); __ xorl(rax, EFL_AC);
__ pushl(rax); __ push(rax);
__ popfd(); __ popf();
__ pushfd(); __ pushf();
__ popl(rax); __ pop(rax);
__ cmpl(rax, rcx); __ cmpptr(rax, rcx);
__ jccb(Assembler::notEqual, detect_486); __ jccb(Assembler::notEqual, detect_486);
__ movl(rax, CPU_FAMILY_386); __ movl(rax, CPU_FAMILY_386);
@ -95,13 +95,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// not support the "cpuid" instruction. // not support the "cpuid" instruction.
// //
__ bind(detect_486); __ bind(detect_486);
__ movl(rax, rcx); __ mov(rax, rcx);
__ xorl(rax, EFL_ID); __ xorl(rax, EFL_ID);
__ pushl(rax); __ push(rax);
__ popfd(); __ popf();
__ pushfd(); __ pushf();
__ popl(rax); __ pop(rax);
__ cmpl(rcx, rax); __ cmpptr(rcx, rax);
__ jccb(Assembler::notEqual, detect_586); __ jccb(Assembler::notEqual, detect_586);
__ bind(cpu486); __ bind(cpu486);
@ -113,13 +113,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// at this point, we have a chip which supports the "cpuid" instruction // at this point, we have a chip which supports the "cpuid" instruction
// //
__ bind(detect_586); __ bind(detect_586);
__ xorl(rax, rax); __ xorptr(rax, rax);
__ cpuid(); __ cpuid();
__ orl(rax, rax); __ orptr(rax, rax);
__ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input
// value of at least 1, we give up and // value of at least 1, we give up and
// assume a 486 // assume a 486
__ leal(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -134,13 +134,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movl(rax, 4); // and rcx already set to 0x0 __ movl(rax, 4); // and rcx already set to 0x0
__ xorl(rcx, rcx); __ xorl(rcx, rcx);
__ cpuid(); __ cpuid();
__ pushl(rax); __ push(rax);
__ andl(rax, 0x1f); // Determine if valid cache parameters used __ andl(rax, 0x1f); // Determine if valid cache parameters used
__ orl(rax, rax); // rax,[4:0] == 0 indicates invalid cache __ orl(rax, rax); // rax,[4:0] == 0 indicates invalid cache
__ popl(rax); __ pop(rax);
__ jccb(Assembler::equal, std_cpuid1); __ jccb(Assembler::equal, std_cpuid1);
__ leal(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -152,7 +152,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(std_cpuid1); __ bind(std_cpuid1);
__ movl(rax, 1); __ movl(rax, 1);
__ cpuid(); __ cpuid();
__ leal(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -171,7 +171,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// //
__ movl(rax, 0x80000008); __ movl(rax, 0x80000008);
__ cpuid(); __ cpuid();
__ leal(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -183,7 +183,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(ext_cpuid5); __ bind(ext_cpuid5);
__ movl(rax, 0x80000005); __ movl(rax, 0x80000005);
__ cpuid(); __ cpuid();
__ leal(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -195,7 +195,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(ext_cpuid1); __ bind(ext_cpuid1);
__ movl(rax, 0x80000001); __ movl(rax, 0x80000001);
__ cpuid(); __ cpuid();
__ leal(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -205,10 +205,10 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// return // return
// //
__ bind(done); __ bind(done);
__ popfd(); __ popf();
__ popl(rsi); __ pop(rsi);
__ popl(rbx); __ pop(rbx);
__ popl(rbp); __ pop(rbp);
__ ret(0); __ ret(0);
# undef __ # undef __

View File

@ -60,17 +60,17 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// //
// rcx and rdx are first and second argument registers on windows // rcx and rdx are first and second argument registers on windows
__ pushq(rbp); __ push(rbp);
__ movq(rbp, c_rarg0); // cpuid_info address __ mov(rbp, c_rarg0); // cpuid_info address
__ pushq(rbx); __ push(rbx);
__ pushq(rsi); __ push(rsi);
// //
// we have a chip which supports the "cpuid" instruction // we have a chip which supports the "cpuid" instruction
// //
__ xorl(rax, rax); __ xorl(rax, rax);
__ cpuid(); __ cpuid();
__ leaq(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -85,13 +85,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movl(rax, 4); __ movl(rax, 4);
__ xorl(rcx, rcx); // L1 cache __ xorl(rcx, rcx); // L1 cache
__ cpuid(); __ cpuid();
__ pushq(rax); __ push(rax);
__ andl(rax, 0x1f); // Determine if valid cache parameters used __ andl(rax, 0x1f); // Determine if valid cache parameters used
__ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache
__ popq(rax); __ pop(rax);
__ jccb(Assembler::equal, std_cpuid1); __ jccb(Assembler::equal, std_cpuid1);
__ leaq(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -103,7 +103,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(std_cpuid1); __ bind(std_cpuid1);
__ movl(rax, 1); __ movl(rax, 1);
__ cpuid(); __ cpuid();
__ leaq(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -122,7 +122,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// //
__ movl(rax, 0x80000008); __ movl(rax, 0x80000008);
__ cpuid(); __ cpuid();
__ leaq(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -134,7 +134,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(ext_cpuid5); __ bind(ext_cpuid5);
__ movl(rax, 0x80000005); __ movl(rax, 0x80000005);
__ cpuid(); __ cpuid();
__ leaq(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -146,7 +146,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(ext_cpuid1); __ bind(ext_cpuid1);
__ movl(rax, 0x80000001); __ movl(rax, 0x80000001);
__ cpuid(); __ cpuid();
__ leaq(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
__ movl(Address(rsi, 0), rax); __ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx); __ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx); __ movl(Address(rsi, 8), rcx);
@ -156,9 +156,9 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// return // return
// //
__ bind(done); __ bind(done);
__ popq(rsi); __ pop(rsi);
__ popq(rbx); __ pop(rbx);
__ popq(rbp); __ pop(rbp);
__ ret(0); __ ret(0);
# undef __ # undef __

View File

@ -49,7 +49,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
#ifndef PRODUCT #ifndef PRODUCT
if (CountCompiledCalls) { if (CountCompiledCalls) {
__ increment(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
} }
#endif /* PRODUCT */ #endif /* PRODUCT */
@ -58,7 +58,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// get receiver klass // get receiver klass
address npe_addr = __ pc(); address npe_addr = __ pc();
__ movl(rax, Address(rcx, oopDesc::klass_offset_in_bytes())); __ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
// compute entry offset (in words) // compute entry offset (in words)
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
#ifndef PRODUCT #ifndef PRODUCT
@ -76,12 +76,12 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const Register method = rbx; const Register method = rbx;
// load methodOop and target address // load methodOop and target address
__ movl(method, Address(rax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes())); __ movptr(method, Address(rax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes()));
if (DebugVtables) { if (DebugVtables) {
Label L; Label L;
__ cmpl(method, NULL_WORD); __ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ cmpl(Address(method, methodOopDesc::from_compiled_offset()), NULL_WORD); __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L); __ jcc(Assembler::notZero, L);
__ stop("Vtable entry is NULL"); __ stop("Vtable entry is NULL");
__ bind(L); __ bind(L);
@ -114,7 +114,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
#ifndef PRODUCT #ifndef PRODUCT
if (CountCompiledCalls) { if (CountCompiledCalls) {
__ increment(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
} }
#endif /* PRODUCT */ #endif /* PRODUCT */
// get receiver (need to skip return address on top of stack) // get receiver (need to skip return address on top of stack)
@ -123,16 +123,16 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
// get receiver klass (also an implicit null-check) // get receiver klass (also an implicit null-check)
address npe_addr = __ pc(); address npe_addr = __ pc();
__ movl(rbx, Address(rcx, oopDesc::klass_offset_in_bytes())); __ movptr(rbx, Address(rcx, oopDesc::klass_offset_in_bytes()));
__ movl(rsi, rbx); // Save klass in free register __ mov(rsi, rbx); // Save klass in free register
// Most registers are in use, so save a few // Most registers are in use, so save a few
__ pushl(rdx); __ push(rdx);
// compute itable entry offset (in words) // compute itable entry offset (in words)
const int base = instanceKlass::vtable_start_offset() * wordSize; const int base = instanceKlass::vtable_start_offset() * wordSize;
assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below"); assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
__ movl(rdx, Address(rbx, instanceKlass::vtable_length_offset() * wordSize)); // Get length of vtable __ movl(rdx, Address(rbx, instanceKlass::vtable_length_offset() * wordSize)); // Get length of vtable
__ leal(rbx, Address(rbx, rdx, Address::times_4, base)); __ lea(rbx, Address(rbx, rdx, Address::times_ptr, base));
if (HeapWordsPerLong > 1) { if (HeapWordsPerLong > 1) {
// Round up to align_object_offset boundary // Round up to align_object_offset boundary
__ round_to(rbx, BytesPerLong); __ round_to(rbx, BytesPerLong);
@ -143,16 +143,16 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ jmpb(entry); __ jmpb(entry);
__ bind(next); __ bind(next);
__ addl(rbx, itableOffsetEntry::size() * wordSize); __ addptr(rbx, itableOffsetEntry::size() * wordSize);
__ bind(entry); __ bind(entry);
// If the entry is NULL then we've reached the end of the table // If the entry is NULL then we've reached the end of the table
// without finding the expected interface, so throw an exception // without finding the expected interface, so throw an exception
__ movl(rdx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes())); __ movptr(rdx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
__ testl(rdx, rdx); __ testptr(rdx, rdx);
__ jcc(Assembler::zero, throw_icce); __ jcc(Assembler::zero, throw_icce);
__ cmpl(rax, rdx); __ cmpptr(rax, rdx);
__ jcc(Assembler::notEqual, next); __ jcc(Assembler::notEqual, next);
// We found a hit, move offset into rbx, // We found a hit, move offset into rbx,
@ -163,10 +163,10 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
// Get methodOop and entrypoint for compiler // Get methodOop and entrypoint for compiler
const Register method = rbx; const Register method = rbx;
__ movl(method, Address(rsi, rdx, Address::times_1, method_offset)); __ movptr(method, Address(rsi, rdx, Address::times_1, method_offset));
// Restore saved register, before possible trap. // Restore saved register, before possible trap.
__ popl(rdx); __ pop(rdx);
// method (rbx): methodOop // method (rbx): methodOop
// rcx: receiver // rcx: receiver
@ -174,9 +174,9 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
#ifdef ASSERT #ifdef ASSERT
if (DebugVtables) { if (DebugVtables) {
Label L1; Label L1;
__ cmpl(method, NULL_WORD); __ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L1); __ jcc(Assembler::equal, L1);
__ cmpl(Address(method, methodOopDesc::from_compiled_offset()), NULL_WORD); __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L1); __ jcc(Assembler::notZero, L1);
__ stop("methodOop is null"); __ stop("methodOop is null");
__ bind(L1); __ bind(L1);
@ -188,7 +188,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ bind(throw_icce); __ bind(throw_icce);
// Restore saved register // Restore saved register
__ popl(rdx); __ pop(rdx);
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
masm->flush(); masm->flush();

View File

@ -79,14 +79,14 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// load methodOop and target address // load methodOop and target address
const Register method = rbx; const Register method = rbx;
__ movq(method, Address(rax, __ movptr(method, Address(rax,
entry_offset * wordSize + entry_offset * wordSize +
vtableEntry::method_offset_in_bytes())); vtableEntry::method_offset_in_bytes()));
if (DebugVtables) { if (DebugVtables) {
Label L; Label L;
__ cmpq(method, (int)NULL); __ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L); __ jcc(Assembler::equal, L);
__ cmpq(Address(method, methodOopDesc::from_compiled_offset()), (int)NULL_WORD); __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L); __ jcc(Assembler::notZero, L);
__ stop("Vtable entry is NULL"); __ stop("Vtable entry is NULL");
__ bind(L); __ bind(L);
@ -138,7 +138,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
// when there are mistakes in this assembly code that could generate // when there are mistakes in this assembly code that could generate
// a spurious fault. Ask me how I know... // a spurious fault. Ask me how I know...
__ pushq(j_rarg1); // Most registers are in use, so save one __ push(j_rarg1); // Most registers are in use, so save one
// compute itable entry offset (in words) // compute itable entry offset (in words)
const int base = instanceKlass::vtable_start_offset() * wordSize; const int base = instanceKlass::vtable_start_offset() * wordSize;
@ -147,27 +147,27 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
// Get length of vtable // Get length of vtable
__ movl(j_rarg1, __ movl(j_rarg1,
Address(rbx, instanceKlass::vtable_length_offset() * wordSize)); Address(rbx, instanceKlass::vtable_length_offset() * wordSize));
__ leaq(rbx, Address(rbx, j_rarg1, Address::times_8, base)); __ lea(rbx, Address(rbx, j_rarg1, Address::times_8, base));
if (HeapWordsPerLong > 1) { if (HeapWordsPerLong > 1) {
// Round up to align_object_offset boundary // Round up to align_object_offset boundary
__ round_to_q(rbx, BytesPerLong); __ round_to(rbx, BytesPerLong);
} }
Label hit, next, entry, throw_icce; Label hit, next, entry, throw_icce;
__ jmpb(entry); __ jmpb(entry);
__ bind(next); __ bind(next);
__ addq(rbx, itableOffsetEntry::size() * wordSize); __ addptr(rbx, itableOffsetEntry::size() * wordSize);
__ bind(entry); __ bind(entry);
// If the entry is NULL then we've reached the end of the table // If the entry is NULL then we've reached the end of the table
// without finding the expected interface, so throw an exception // without finding the expected interface, so throw an exception
__ movq(j_rarg1, Address(rbx, itableOffsetEntry::interface_offset_in_bytes())); __ movptr(j_rarg1, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
__ testq(j_rarg1, j_rarg1); __ testptr(j_rarg1, j_rarg1);
__ jcc(Assembler::zero, throw_icce); __ jcc(Assembler::zero, throw_icce);
__ cmpq(rax, j_rarg1); __ cmpptr(rax, j_rarg1);
__ jccb(Assembler::notEqual, next); __ jccb(Assembler::notEqual, next);
// We found a hit, move offset into j_rarg1 // We found a hit, move offset into j_rarg1
@ -184,10 +184,10 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ load_klass(rax, j_rarg0); __ load_klass(rax, j_rarg0);
const Register method = rbx; const Register method = rbx;
__ movq(method, Address(rax, j_rarg1, Address::times_1, method_offset)); __ movptr(method, Address(rax, j_rarg1, Address::times_1, method_offset));
// Restore saved register, before possible trap. // Restore saved register, before possible trap.
__ popq(j_rarg1); __ pop(j_rarg1);
// method (rbx): methodOop // method (rbx): methodOop
// j_rarg0: receiver // j_rarg0: receiver
@ -196,9 +196,9 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
#ifdef ASSERT #ifdef ASSERT
if (DebugVtables) { if (DebugVtables) {
Label L2; Label L2;
__ cmpq(method, (int)NULL); __ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L2); __ jcc(Assembler::equal, L2);
__ cmpq(Address(method, methodOopDesc::from_compiled_offset()), (int)NULL_WORD); __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L2); __ jcc(Assembler::notZero, L2);
__ stop("compiler entrypoint is null"); __ stop("compiler entrypoint is null");
__ bind(L2); __ bind(L2);
@ -212,7 +212,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ bind(throw_icce); __ bind(throw_icce);
// Restore saved register // Restore saved register
__ popq(j_rarg1); __ pop(j_rarg1);
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
__ flush(); __ flush();

View File

@ -236,7 +236,7 @@ reg_class xdb_reg7( XMM7a,XMM7b );
// This is a block of C++ code which provides values, functions, and // This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description // definitions necessary in the rest of the architecture description
source %{ source %{
#define RELOC_IMM32 Assembler::imm32_operand #define RELOC_IMM32 Assembler::imm_operand
#define RELOC_DISP32 Assembler::disp32_operand #define RELOC_DISP32 Assembler::disp32_operand
#define __ _masm. #define __ _masm.
@ -593,11 +593,11 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
if (VerifyStackAtCalls) { if (VerifyStackAtCalls) {
Label L; Label L;
MacroAssembler masm(&cbuf); MacroAssembler masm(&cbuf);
masm.pushl(rax); masm.push(rax);
masm.movl(rax, rsp); masm.mov(rax, rsp);
masm.andl(rax, StackAlignmentInBytes-1); masm.andptr(rax, StackAlignmentInBytes-1);
masm.cmpl(rax, StackAlignmentInBytes-wordSize); masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
masm.popl(rax); masm.pop(rax);
masm.jcc(Assembler::equal, L); masm.jcc(Assembler::equal, L);
masm.stop("Stack is not properly aligned!"); masm.stop("Stack is not properly aligned!");
masm.bind(L); masm.bind(L);
@ -1150,7 +1150,8 @@ void emit_java_to_interp(CodeBuffer &cbuf ) {
__ relocate(static_stub_Relocation::spec(mark), RELOC_IMM32); __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM32);
// static stub relocation also tags the methodOop in the code-stream. // static stub relocation also tags the methodOop in the code-stream.
__ movoop(rbx, (jobject)NULL); // method is zapped till fixup time __ movoop(rbx, (jobject)NULL); // method is zapped till fixup time
__ jump(RuntimeAddress((address)-1)); // This is recognized as unresolved by relocs/nativeInst/ic code
__ jump(RuntimeAddress(__ pc()));
__ end_a_stub(); __ end_a_stub();
// Update current stubs pointer and restore code_end. // Update current stubs pointer and restore code_end.
@ -1181,7 +1182,7 @@ void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
#ifdef ASSERT #ifdef ASSERT
uint code_size = cbuf.code_size(); uint code_size = cbuf.code_size();
#endif #endif
masm.cmpl(rax, Address(rcx, oopDesc::klass_offset_in_bytes())); masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
masm.jump_cc(Assembler::notEqual, masm.jump_cc(Assembler::notEqual,
RuntimeAddress(SharedRuntime::get_ic_miss_stub())); RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
/* WARNING these NOPs are critical so that verified entry point is properly /* WARNING these NOPs are critical so that verified entry point is properly
@ -1687,20 +1688,20 @@ encode %{
// Compare super with sub directly, since super is not in its own SSA. // Compare super with sub directly, since super is not in its own SSA.
// The compiler used to emit this test, but we fold it in here, // The compiler used to emit this test, but we fold it in here,
// to allow platform-specific tweaking on sparc. // to allow platform-specific tweaking on sparc.
__ cmpl(Reax, Resi); __ cmpptr(Reax, Resi);
__ jcc(Assembler::equal, hit); __ jcc(Assembler::equal, hit);
#ifndef PRODUCT #ifndef PRODUCT
__ increment(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); __ incrementl(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
#endif //PRODUCT #endif //PRODUCT
__ movl(Redi,Address(Resi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())); __ movptr(Redi,Address(Resi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()));
__ movl(Recx,Address(Redi,arrayOopDesc::length_offset_in_bytes())); __ movl(Recx,Address(Redi,arrayOopDesc::length_offset_in_bytes()));
__ addl(Redi,arrayOopDesc::base_offset_in_bytes(T_OBJECT)); __ addptr(Redi,arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ repne_scan(); __ repne_scan();
__ jcc(Assembler::notEqual, miss); __ jcc(Assembler::notEqual, miss);
__ movl(Address(Resi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()),Reax); __ movptr(Address(Resi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()),Reax);
__ bind(hit); __ bind(hit);
if( $primary ) if( $primary )
__ xorl(Redi,Redi); __ xorptr(Redi,Redi);
__ bind(miss); __ bind(miss);
%} %}
@ -1749,15 +1750,15 @@ encode %{
// optimizer if the C function is a pure function. // optimizer if the C function is a pure function.
__ ffree(0); __ ffree(0);
} else if (rt == T_FLOAT) { } else if (rt == T_FLOAT) {
__ leal(rsp, Address(rsp, -4)); __ lea(rsp, Address(rsp, -4));
__ fstp_s(Address(rsp, 0)); __ fstp_s(Address(rsp, 0));
__ movflt(xmm0, Address(rsp, 0)); __ movflt(xmm0, Address(rsp, 0));
__ leal(rsp, Address(rsp, 4)); __ lea(rsp, Address(rsp, 4));
} else if (rt == T_DOUBLE) { } else if (rt == T_DOUBLE) {
__ leal(rsp, Address(rsp, -8)); __ lea(rsp, Address(rsp, -8));
__ fstp_d(Address(rsp, 0)); __ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0)); __ movdbl(xmm0, Address(rsp, 0));
__ leal(rsp, Address(rsp, 8)); __ lea(rsp, Address(rsp, 8));
} }
} }
%} %}
@ -2888,10 +2889,10 @@ encode %{
__ jccb(Assembler::equal, done); __ jccb(Assembler::equal, done);
__ jccb(Assembler::above, inc); __ jccb(Assembler::above, inc);
__ bind(nan); __ bind(nan);
__ decrement(as_Register($dst$$reg)); __ decrement(as_Register($dst$$reg)); // NO L qqq
__ jmpb(done); __ jmpb(done);
__ bind(inc); __ bind(inc);
__ increment(as_Register($dst$$reg)); __ increment(as_Register($dst$$reg)); // NO L qqq
__ bind(done); __ bind(done);
%} %}
@ -3158,7 +3159,7 @@ encode %{
enc_class mov_i2x(regXD dst, eRegI src) %{ enc_class mov_i2x(regXD dst, eRegI src) %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
__ movd(as_XMMRegister($dst$$reg), as_Register($src$$reg)); __ movdl(as_XMMRegister($dst$$reg), as_Register($src$$reg));
%} %}
@ -3258,30 +3259,30 @@ encode %{
} }
if (EmitSync & 1) { if (EmitSync & 1) {
// set box->dhw = unused_mark (3) // set box->dhw = unused_mark (3)
// Force all sync thru slow-path: slow_enter() and slow_exit() // Force all sync thru slow-path: slow_enter() and slow_exit()
masm.movl (Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())) ; masm.movptr (Address(boxReg, 0), int32_t(markOopDesc::unused_mark())) ;
masm.cmpl (rsp, 0) ; masm.cmpptr (rsp, (int32_t)0) ;
} else } else
if (EmitSync & 2) { if (EmitSync & 2) {
Label DONE_LABEL ; Label DONE_LABEL ;
if (UseBiasedLocking) { if (UseBiasedLocking) {
// Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument. // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters); masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
} }
masm.movl (tmpReg, Address(objReg, 0)) ; // fetch markword masm.movptr(tmpReg, Address(objReg, 0)) ; // fetch markword
masm.orl (tmpReg, 0x1); masm.orptr (tmpReg, 0x1);
masm.movl (Address(boxReg, 0), tmpReg); // Anticipate successful CAS masm.movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchg(boxReg, Address(objReg, 0)); // Updates tmpReg masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
masm.jcc(Assembler::equal, DONE_LABEL); masm.jcc(Assembler::equal, DONE_LABEL);
// Recursive locking // Recursive locking
masm.subl(tmpReg, rsp); masm.subptr(tmpReg, rsp);
masm.andl(tmpReg, 0xFFFFF003 ); masm.andptr(tmpReg, (int32_t) 0xFFFFF003 );
masm.movl(Address(boxReg, 0), tmpReg); masm.movptr(Address(boxReg, 0), tmpReg);
masm.bind(DONE_LABEL) ; masm.bind(DONE_LABEL) ;
} else { } else {
// Possible cases that we'll encounter in fast_lock // Possible cases that we'll encounter in fast_lock
// ------------------------------------------------ // ------------------------------------------------
// * Inflated // * Inflated
// -- unlocked // -- unlocked
@ -3310,15 +3311,15 @@ encode %{
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters); masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
} }
masm.movl (tmpReg, Address(objReg, 0)) ; // [FETCH] masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
masm.testl (tmpReg, 0x02) ; // Inflated v (Stack-locked or neutral) masm.testptr(tmpReg, 0x02) ; // Inflated v (Stack-locked or neutral)
masm.jccb (Assembler::notZero, IsInflated) ; masm.jccb (Assembler::notZero, IsInflated) ;
// Attempt stack-locking ... // Attempt stack-locking ...
masm.orl (tmpReg, 0x1); masm.orptr (tmpReg, 0x1);
masm.movl (Address(boxReg, 0), tmpReg); // Anticipate successful CAS masm.movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchg(boxReg, Address(objReg, 0)); // Updates tmpReg masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
if (_counters != NULL) { if (_counters != NULL) {
masm.cond_inc32(Assembler::equal, masm.cond_inc32(Assembler::equal,
ExternalAddress((address)_counters->fast_path_entry_count_addr())); ExternalAddress((address)_counters->fast_path_entry_count_addr()));
@ -3326,9 +3327,9 @@ encode %{
masm.jccb (Assembler::equal, DONE_LABEL); masm.jccb (Assembler::equal, DONE_LABEL);
// Recursive locking // Recursive locking
masm.subl(tmpReg, rsp); masm.subptr(tmpReg, rsp);
masm.andl(tmpReg, 0xFFFFF003 ); masm.andptr(tmpReg, 0xFFFFF003 );
masm.movl(Address(boxReg, 0), tmpReg); masm.movptr(Address(boxReg, 0), tmpReg);
if (_counters != NULL) { if (_counters != NULL) {
masm.cond_inc32(Assembler::equal, masm.cond_inc32(Assembler::equal,
ExternalAddress((address)_counters->fast_path_entry_count_addr())); ExternalAddress((address)_counters->fast_path_entry_count_addr()));
@ -3360,36 +3361,33 @@ encode %{
// This is convenient but results a ST-before-CAS penalty. The following CAS suffers // This is convenient but results a ST-before-CAS penalty. The following CAS suffers
// additional latency as we have another ST in the store buffer that must drain. // additional latency as we have another ST in the store buffer that must drain.
if (EmitSync & 8192) { if (EmitSync & 8192) {
masm.movl (Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty masm.movptr(Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty
masm.get_thread (scrReg) ; masm.get_thread (scrReg) ;
masm.movl (boxReg, tmpReg); // consider: LEA box, [tmp-2] masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
masm.movl (tmpReg, 0); // consider: xor vs mov masm.movptr(tmpReg, 0); // consider: xor vs mov
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchg (scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
} else } else
if ((EmitSync & 128) == 0) { // avoid ST-before-CAS if ((EmitSync & 128) == 0) { // avoid ST-before-CAS
masm.movl (scrReg, boxReg) ; masm.movptr(scrReg, boxReg) ;
masm.movl (boxReg, tmpReg); // consider: LEA box, [tmp-2] masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) { if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) {
// prefetchw [eax + Offset(_owner)-2] // prefetchw [eax + Offset(_owner)-2]
masm.emit_raw (0x0F) ; masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
masm.emit_raw (0x0D) ;
masm.emit_raw (0x48) ;
masm.emit_raw (ObjectMonitor::owner_offset_in_bytes()-2) ;
} }
if ((EmitSync & 64) == 0) { if ((EmitSync & 64) == 0) {
// Optimistic form: consider XORL tmpReg,tmpReg // Optimistic form: consider XORL tmpReg,tmpReg
masm.movl (tmpReg, 0 ) ; masm.movptr(tmpReg, 0 ) ;
} else { } else {
// Can suffer RTS->RTO upgrades on shared or cold $ lines // Can suffer RTS->RTO upgrades on shared or cold $ lines
// Test-And-CAS instead of CAS // Test-And-CAS instead of CAS
masm.movl (tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner
masm.testl (tmpReg, tmpReg) ; // Locked ? masm.testptr(tmpReg, tmpReg) ; // Locked ?
masm.jccb (Assembler::notZero, DONE_LABEL) ; masm.jccb (Assembler::notZero, DONE_LABEL) ;
} }
// Appears unlocked - try to swing _owner from null to non-null. // Appears unlocked - try to swing _owner from null to non-null.
@ -3401,41 +3399,38 @@ encode %{
// (rsp or the address of the box) into m->owner is harmless. // (rsp or the address of the box) into m->owner is harmless.
// Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchg (scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.movl (Address(scrReg, 0), 3) ; // box->_displaced_header = 3 masm.movptr(Address(scrReg, 0), 3) ; // box->_displaced_header = 3
masm.jccb (Assembler::notZero, DONE_LABEL) ; masm.jccb (Assembler::notZero, DONE_LABEL) ;
masm.get_thread (scrReg) ; // beware: clobbers ICCs masm.get_thread (scrReg) ; // beware: clobbers ICCs
masm.movl (Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg) ; masm.movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg) ;
masm.xorl (boxReg, boxReg) ; // set icc.ZFlag = 1 to indicate success masm.xorptr(boxReg, boxReg) ; // set icc.ZFlag = 1 to indicate success
// If the CAS fails we can either retry or pass control to the slow-path. // If the CAS fails we can either retry or pass control to the slow-path.
// We use the latter tactic. // We use the latter tactic.
// Pass the CAS result in the icc.ZFlag into DONE_LABEL // Pass the CAS result in the icc.ZFlag into DONE_LABEL
// If the CAS was successful ... // If the CAS was successful ...
// Self has acquired the lock // Self has acquired the lock
// Invariant: m->_recursions should already be 0, so we don't need to explicitly set it. // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
// Intentional fall-through into DONE_LABEL ... // Intentional fall-through into DONE_LABEL ...
} else { } else {
masm.movl (Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty masm.movptr(Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty
masm.movl (boxReg, tmpReg) ; masm.movptr(boxReg, tmpReg) ;
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) { if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) {
// prefetchw [eax + Offset(_owner)-2] // prefetchw [eax + Offset(_owner)-2]
masm.emit_raw (0x0F) ; masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
masm.emit_raw (0x0D) ;
masm.emit_raw (0x48) ;
masm.emit_raw (ObjectMonitor::owner_offset_in_bytes()-2) ;
} }
if ((EmitSync & 64) == 0) { if ((EmitSync & 64) == 0) {
// Optimistic form // Optimistic form
masm.xorl (tmpReg, tmpReg) ; masm.xorptr (tmpReg, tmpReg) ;
} else { } else {
// Can suffer RTS->RTO upgrades on shared or cold $ lines // Can suffer RTS->RTO upgrades on shared or cold $ lines
masm.movl (tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner
masm.testl (tmpReg, tmpReg) ; // Locked ? masm.testptr(tmpReg, tmpReg) ; // Locked ?
masm.jccb (Assembler::notZero, DONE_LABEL) ; masm.jccb (Assembler::notZero, DONE_LABEL) ;
} }
// Appears unlocked - try to swing _owner from null to non-null. // Appears unlocked - try to swing _owner from null to non-null.
@ -3443,7 +3438,7 @@ encode %{
// Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
masm.get_thread (scrReg) ; masm.get_thread (scrReg) ;
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchg (scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
// If the CAS fails we can either retry or pass control to the slow-path. // If the CAS fails we can either retry or pass control to the slow-path.
// We use the latter tactic. // We use the latter tactic.
@ -3514,19 +3509,19 @@ encode %{
if (EmitSync & 4) { if (EmitSync & 4) {
// Disable - inhibit all inlining. Force control through the slow-path // Disable - inhibit all inlining. Force control through the slow-path
masm.cmpl (rsp, 0) ; masm.cmpptr (rsp, 0) ;
} else } else
if (EmitSync & 8) { if (EmitSync & 8) {
Label DONE_LABEL ; Label DONE_LABEL ;
if (UseBiasedLocking) { if (UseBiasedLocking) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL); masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
} }
// classic stack-locking code ... // classic stack-locking code ...
masm.movl (tmpReg, Address(boxReg, 0)) ; masm.movptr(tmpReg, Address(boxReg, 0)) ;
masm.testl (tmpReg, tmpReg) ; masm.testptr(tmpReg, tmpReg) ;
masm.jcc (Assembler::zero, DONE_LABEL) ; masm.jcc (Assembler::zero, DONE_LABEL) ;
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchg(tmpReg, Address(objReg, 0)); // Uses EAX which is box masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box
masm.bind(DONE_LABEL); masm.bind(DONE_LABEL);
} else { } else {
Label DONE_LABEL, Stacked, CheckSucc, Inflated ; Label DONE_LABEL, Stacked, CheckSucc, Inflated ;
@ -3536,12 +3531,12 @@ encode %{
if (UseBiasedLocking) { if (UseBiasedLocking) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL); masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
} }
masm.cmpl (Address(boxReg, 0), 0) ; // Examine the displaced header masm.cmpptr(Address(boxReg, 0), 0) ; // Examine the displaced header
masm.movl (tmpReg, Address(objReg, 0)) ; // Examine the object's markword masm.movptr(tmpReg, Address(objReg, 0)) ; // Examine the object's markword
masm.jccb (Assembler::zero, DONE_LABEL) ; // 0 indicates recursive stack-lock masm.jccb (Assembler::zero, DONE_LABEL) ; // 0 indicates recursive stack-lock
masm.testl (tmpReg, 0x02) ; // Inflated? masm.testptr(tmpReg, 0x02) ; // Inflated?
masm.jccb (Assembler::zero, Stacked) ; masm.jccb (Assembler::zero, Stacked) ;
masm.bind (Inflated) ; masm.bind (Inflated) ;
@ -3571,11 +3566,8 @@ encode %{
masm.get_thread (boxReg) ; masm.get_thread (boxReg) ;
if ((EmitSync & 4096) && VM_Version::supports_3dnow() && os::is_MP()) { if ((EmitSync & 4096) && VM_Version::supports_3dnow() && os::is_MP()) {
// prefetchw [ebx + Offset(_owner)-2] // prefetchw [ebx + Offset(_owner)-2]
masm.emit_raw (0x0F) ; masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2));
masm.emit_raw (0x0D) ;
masm.emit_raw (0x4B) ;
masm.emit_raw (ObjectMonitor::owner_offset_in_bytes()-2) ;
} }
// Note that we could employ various encoding schemes to reduce // Note that we could employ various encoding schemes to reduce
@ -3584,22 +3576,22 @@ encode %{
// In practice the chain of fetches doesn't seem to impact performance, however. // In practice the chain of fetches doesn't seem to impact performance, however.
if ((EmitSync & 65536) == 0 && (EmitSync & 256)) { if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
// Attempt to reduce branch density - AMD's branch predictor. // Attempt to reduce branch density - AMD's branch predictor.
masm.xorl (boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.orl (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
masm.orl (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
masm.orl (boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
masm.jccb (Assembler::notZero, DONE_LABEL) ; masm.jccb (Assembler::notZero, DONE_LABEL) ;
masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
masm.jmpb (DONE_LABEL) ; masm.jmpb (DONE_LABEL) ;
} else { } else {
masm.xorl (boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.orl (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
masm.jccb (Assembler::notZero, DONE_LABEL) ; masm.jccb (Assembler::notZero, DONE_LABEL) ;
masm.movl (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
masm.orl (boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
masm.jccb (Assembler::notZero, CheckSucc) ; masm.jccb (Assembler::notZero, CheckSucc) ;
masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
masm.jmpb (DONE_LABEL) ; masm.jmpb (DONE_LABEL) ;
} }
// The Following code fragment (EmitSync & 65536) improves the performance of // The Following code fragment (EmitSync & 65536) improves the performance of
@ -3615,9 +3607,9 @@ encode %{
masm.bind (CheckSucc) ; masm.bind (CheckSucc) ;
// Optional pre-test ... it's safe to elide this // Optional pre-test ... it's safe to elide this
if ((EmitSync & 16) == 0) { if ((EmitSync & 16) == 0) {
masm.cmpl (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ; masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ;
masm.jccb (Assembler::zero, LGoSlowPath) ; masm.jccb (Assembler::zero, LGoSlowPath) ;
} }
// We have a classic Dekker-style idiom: // We have a classic Dekker-style idiom:
@ -3645,39 +3637,37 @@ encode %{
// //
// We currently use (3), although it's likely that switching to (2) // We currently use (3), although it's likely that switching to (2)
// is correct for the future. // is correct for the future.
masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
if (os::is_MP()) { if (os::is_MP()) {
if (VM_Version::supports_sse2() && 1 == FenceInstruction) { if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
masm.emit_raw (0x0F) ; // MFENCE ... masm.mfence();
masm.emit_raw (0xAE) ; } else {
masm.emit_raw (0xF0) ; masm.lock () ; masm.addptr(Address(rsp, 0), 0) ;
} else {
masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
} }
} }
// Ratify _succ remains non-null // Ratify _succ remains non-null
masm.cmpl (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ; masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ;
masm.jccb (Assembler::notZero, LSuccess) ; masm.jccb (Assembler::notZero, LSuccess) ;
masm.xorl (boxReg, boxReg) ; // box is really EAX masm.xorptr(boxReg, boxReg) ; // box is really EAX
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchg(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); masm.cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
masm.jccb (Assembler::notEqual, LSuccess) ; masm.jccb (Assembler::notEqual, LSuccess) ;
// Since we're low on registers we installed rsp as a placeholding in _owner. // Since we're low on registers we installed rsp as a placeholding in _owner.
// Now install Self over rsp. This is safe as we're transitioning from // Now install Self over rsp. This is safe as we're transitioning from
// non-null to non=null // non-null to non=null
masm.get_thread (boxReg) ; masm.get_thread (boxReg) ;
masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg) ; masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg) ;
// Intentional fall-through into LGoSlowPath ... // Intentional fall-through into LGoSlowPath ...
masm.bind (LGoSlowPath) ; masm.bind (LGoSlowPath) ;
masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure masm.orptr(boxReg, 1) ; // set ICC.ZF=0 to indicate failure
masm.jmpb (DONE_LABEL) ; masm.jmpb (DONE_LABEL) ;
masm.bind (LSuccess) ; masm.bind (LSuccess) ;
masm.xorl (boxReg, boxReg) ; // set ICC.ZF=1 to indicate success masm.xorptr(boxReg, boxReg) ; // set ICC.ZF=1 to indicate success
masm.jmpb (DONE_LABEL) ; masm.jmpb (DONE_LABEL) ;
} }
masm.bind (Stacked) ; masm.bind (Stacked) ;
@ -3686,9 +3676,9 @@ encode %{
// Try to reset the header to displaced header. // Try to reset the header to displaced header.
// The "box" value on the stack is stable, so we can reload // The "box" value on the stack is stable, so we can reload
// and be assured we observe the same value as above. // and be assured we observe the same value as above.
masm.movl (tmpReg, Address(boxReg, 0)) ; masm.movptr(tmpReg, Address(boxReg, 0)) ;
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchg(tmpReg, Address(objReg, 0)); // Uses EAX which is box masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box
// Intention fall-thru into DONE_LABEL // Intention fall-thru into DONE_LABEL
@ -3720,12 +3710,12 @@ encode %{
int count_offset = java_lang_String::count_offset_in_bytes(); int count_offset = java_lang_String::count_offset_in_bytes();
int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
masm.movl(rax, Address(rsi, value_offset)); masm.movptr(rax, Address(rsi, value_offset));
masm.movl(rcx, Address(rsi, offset_offset)); masm.movl(rcx, Address(rsi, offset_offset));
masm.leal(rax, Address(rax, rcx, Address::times_2, base_offset)); masm.lea(rax, Address(rax, rcx, Address::times_2, base_offset));
masm.movl(rbx, Address(rdi, value_offset)); masm.movptr(rbx, Address(rdi, value_offset));
masm.movl(rcx, Address(rdi, offset_offset)); masm.movl(rcx, Address(rdi, offset_offset));
masm.leal(rbx, Address(rbx, rcx, Address::times_2, base_offset)); masm.lea(rbx, Address(rbx, rcx, Address::times_2, base_offset));
// Compute the minimum of the string lengths(rsi) and the // Compute the minimum of the string lengths(rsi) and the
// difference of the string lengths (stack) // difference of the string lengths (stack)
@ -3736,14 +3726,14 @@ encode %{
masm.movl(rsi, Address(rsi, count_offset)); masm.movl(rsi, Address(rsi, count_offset));
masm.movl(rcx, rdi); masm.movl(rcx, rdi);
masm.subl(rdi, rsi); masm.subl(rdi, rsi);
masm.pushl(rdi); masm.push(rdi);
masm.cmovl(Assembler::lessEqual, rsi, rcx); masm.cmovl(Assembler::lessEqual, rsi, rcx);
} else { } else {
masm.movl(rdi, Address(rdi, count_offset)); masm.movl(rdi, Address(rdi, count_offset));
masm.movl(rcx, Address(rsi, count_offset)); masm.movl(rcx, Address(rsi, count_offset));
masm.movl(rsi, rdi); masm.movl(rsi, rdi);
masm.subl(rdi, rcx); masm.subl(rdi, rcx);
masm.pushl(rdi); masm.push(rdi);
masm.jcc(Assembler::lessEqual, ECX_GOOD_LABEL); masm.jcc(Assembler::lessEqual, ECX_GOOD_LABEL);
masm.movl(rsi, rcx); masm.movl(rsi, rcx);
// rsi holds min, rcx is unused // rsi holds min, rcx is unused
@ -3761,14 +3751,14 @@ encode %{
// Compare first characters // Compare first characters
masm.subl(rcx, rdi); masm.subl(rcx, rdi);
masm.jcc(Assembler::notZero, POP_LABEL); masm.jcc(Assembler::notZero, POP_LABEL);
masm.decrement(rsi); masm.decrementl(rsi);
masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL); masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
{ {
// Check after comparing first character to see if strings are equivalent // Check after comparing first character to see if strings are equivalent
Label LSkip2; Label LSkip2;
// Check if the strings start at same location // Check if the strings start at same location
masm.cmpl(rbx,rax); masm.cmpptr(rbx,rax);
masm.jcc(Assembler::notEqual, LSkip2); masm.jcc(Assembler::notEqual, LSkip2);
// Check if the length difference is zero (from stack) // Check if the length difference is zero (from stack)
@ -3780,8 +3770,8 @@ encode %{
} }
// Shift rax, and rbx, to the end of the arrays, negate min // Shift rax, and rbx, to the end of the arrays, negate min
masm.leal(rax, Address(rax, rsi, Address::times_2, 2)); masm.lea(rax, Address(rax, rsi, Address::times_2, 2));
masm.leal(rbx, Address(rbx, rsi, Address::times_2, 2)); masm.lea(rbx, Address(rbx, rsi, Address::times_2, 2));
masm.negl(rsi); masm.negl(rsi);
// Compare the rest of the characters // Compare the rest of the characters
@ -3790,18 +3780,18 @@ encode %{
masm.load_unsigned_word(rdi, Address(rax, rsi, Address::times_2, 0)); masm.load_unsigned_word(rdi, Address(rax, rsi, Address::times_2, 0));
masm.subl(rcx, rdi); masm.subl(rcx, rdi);
masm.jcc(Assembler::notZero, POP_LABEL); masm.jcc(Assembler::notZero, POP_LABEL);
masm.increment(rsi); masm.incrementl(rsi);
masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL); masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL);
// Strings are equal up to min length. Return the length difference. // Strings are equal up to min length. Return the length difference.
masm.bind(LENGTH_DIFF_LABEL); masm.bind(LENGTH_DIFF_LABEL);
masm.popl(rcx); masm.pop(rcx);
masm.jmp(DONE_LABEL); masm.jmp(DONE_LABEL);
// Discard the stored length difference // Discard the stored length difference
masm.bind(POP_LABEL); masm.bind(POP_LABEL);
masm.addl(rsp, 4); masm.addptr(rsp, 4);
// That's it // That's it
masm.bind(DONE_LABEL); masm.bind(DONE_LABEL);
%} %}
@ -4315,7 +4305,8 @@ encode %{
enc_class enc_membar_volatile %{ enc_class enc_membar_volatile %{
MacroAssembler masm(&cbuf); MacroAssembler masm(&cbuf);
masm.membar(); masm.membar(Assembler::Membar_mask_bits(Assembler::StoreLoad |
Assembler::StoreStore));
%} %}
// Atomically load the volatile long // Atomically load the volatile long
@ -11151,7 +11142,7 @@ instruct convXI2XD_reg(regXD dst, eRegI src)
format %{ "MOVD $dst,$src\n\t" format %{ "MOVD $dst,$src\n\t"
"CVTDQ2PD $dst,$dst\t# i2d" %} "CVTDQ2PD $dst,$dst\t# i2d" %}
ins_encode %{ ins_encode %{
__ movd($dst$$XMMRegister, $src$$Register); __ movdl($dst$$XMMRegister, $src$$Register);
__ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister); __ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe(pipe_slow); // XXX ins_pipe(pipe_slow); // XXX
@ -11249,7 +11240,7 @@ instruct convI2X_reg(regX dst, eRegI src) %{
format %{ "MOVD $dst,$src\n\t" format %{ "MOVD $dst,$src\n\t"
"CVTDQ2PS $dst,$dst\t# i2f" %} "CVTDQ2PS $dst,$dst\t# i2f" %}
ins_encode %{ ins_encode %{
__ movd($dst$$XMMRegister, $src$$Register); __ movdl($dst$$XMMRegister, $src$$Register);
__ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister); __ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister);
%} %}
ins_pipe(pipe_slow); // XXX ins_pipe(pipe_slow); // XXX
@ -12262,7 +12253,7 @@ instruct cmpL3_reg_reg(eSIRegI dst, eRegL src1, eRegL src2, eFlagsReg flags ) %{
"done:" %} "done:" %}
ins_encode %{ ins_encode %{
Label p_one, m_one, done; Label p_one, m_one, done;
__ xorl($dst$$Register, $dst$$Register); __ xorptr($dst$$Register, $dst$$Register);
__ cmpl(HIGH_FROM_LOW($src1$$Register), HIGH_FROM_LOW($src2$$Register)); __ cmpl(HIGH_FROM_LOW($src1$$Register), HIGH_FROM_LOW($src2$$Register));
__ jccb(Assembler::less, m_one); __ jccb(Assembler::less, m_one);
__ jccb(Assembler::greater, p_one); __ jccb(Assembler::greater, p_one);
@ -12270,10 +12261,10 @@ instruct cmpL3_reg_reg(eSIRegI dst, eRegL src1, eRegL src2, eFlagsReg flags ) %{
__ jccb(Assembler::below, m_one); __ jccb(Assembler::below, m_one);
__ jccb(Assembler::equal, done); __ jccb(Assembler::equal, done);
__ bind(p_one); __ bind(p_one);
__ increment($dst$$Register); __ incrementl($dst$$Register);
__ jmpb(done); __ jmpb(done);
__ bind(m_one); __ bind(m_one);
__ decrement($dst$$Register); __ decrementl($dst$$Register);
__ bind(done); __ bind(done);
%} %}
ins_pipe( pipe_slow ); ins_pipe( pipe_slow );

View File

@ -478,7 +478,7 @@ reg_class int_no_rcx_reg(RAX,
// Class for all int registers except RAX, RDX (and RSP) // Class for all int registers except RAX, RDX (and RSP)
reg_class int_no_rax_rdx_reg(RBP, reg_class int_no_rax_rdx_reg(RBP,
RDI RDI,
RSI, RSI,
RCX, RCX,
RBX, RBX,
@ -552,7 +552,7 @@ reg_class double_reg(XMM0, XMM0_H,
// This is a block of C++ code which provides values, functions, and // This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description // definitions necessary in the rest of the architecture description
source %{ source %{
#define RELOC_IMM64 Assembler::imm64_operand #define RELOC_IMM64 Assembler::imm_operand
#define RELOC_DISP32 Assembler::disp32_operand #define RELOC_DISP32 Assembler::disp32_operand
#define __ _masm. #define __ _masm.
@ -962,11 +962,11 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
if (VerifyStackAtCalls) { if (VerifyStackAtCalls) {
Label L; Label L;
MacroAssembler masm(&cbuf); MacroAssembler masm(&cbuf);
masm.pushq(rax); masm.push(rax);
masm.movq(rax, rsp); masm.mov(rax, rsp);
masm.andq(rax, StackAlignmentInBytes-1); masm.andptr(rax, StackAlignmentInBytes-1);
masm.cmpq(rax, StackAlignmentInBytes-wordSize); masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
masm.popq(rax); masm.pop(rax);
masm.jcc(Assembler::equal, L); masm.jcc(Assembler::equal, L);
masm.stop("Stack is not properly aligned!"); masm.stop("Stack is not properly aligned!");
masm.bind(L); masm.bind(L);
@ -1817,6 +1817,7 @@ void emit_java_to_interp(CodeBuffer& cbuf)
__ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64); __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
// static stub relocation also tags the methodOop in the code-stream. // static stub relocation also tags the methodOop in the code-stream.
__ movoop(rbx, (jobject) NULL); // method is zapped till fixup time __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time
// This is recognized as unresolved by relocs/nativeinst/ic code
__ jump(RuntimeAddress(__ pc())); __ jump(RuntimeAddress(__ pc()));
// Update current stubs pointer and restore code_end. // Update current stubs pointer and restore code_end.
@ -1863,9 +1864,9 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
#endif #endif
if (UseCompressedOops) { if (UseCompressedOops) {
masm.load_klass(rscratch1, j_rarg0); masm.load_klass(rscratch1, j_rarg0);
masm.cmpq(rax, rscratch1); masm.cmpptr(rax, rscratch1);
} else { } else {
masm.cmpq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
} }
masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@ -1949,7 +1950,7 @@ int emit_deopt_handler(CodeBuffer& cbuf)
__ call(next, relocInfo::none); // reloc none is fine since it is a disp32 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
__ bind(next); __ bind(next);
// adjust it so it matches "the_pc" // adjust it so it matches "the_pc"
__ subq(Address(rsp, 0), __ offset() - offset); __ subptr(Address(rsp, 0), __ offset() - offset);
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
__ end_a_stub(); __ end_a_stub();
@ -2577,23 +2578,23 @@ encode %{
// Compare super with sub directly, since super is not in its own SSA. // Compare super with sub directly, since super is not in its own SSA.
// The compiler used to emit this test, but we fold it in here, // The compiler used to emit this test, but we fold it in here,
// to allow platform-specific tweaking on sparc. // to allow platform-specific tweaking on sparc.
__ cmpq(Rrax, Rrsi); __ cmpptr(Rrax, Rrsi);
__ jcc(Assembler::equal, hit); __ jcc(Assembler::equal, hit);
#ifndef PRODUCT #ifndef PRODUCT
__ lea(Rrcx, ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); __ lea(Rrcx, ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
__ incrementl(Address(Rrcx, 0)); __ incrementl(Address(Rrcx, 0));
#endif //PRODUCT #endif //PRODUCT
__ movq(Rrdi, Address(Rrsi, __ movptr(Rrdi, Address(Rrsi,
sizeof(oopDesc) + sizeof(oopDesc) +
Klass::secondary_supers_offset_in_bytes())); Klass::secondary_supers_offset_in_bytes()));
__ movl(Rrcx, Address(Rrdi, arrayOopDesc::length_offset_in_bytes())); __ movl(Rrcx, Address(Rrdi, arrayOopDesc::length_offset_in_bytes()));
__ addq(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); __ addptr(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
if (UseCompressedOops) { if (UseCompressedOops) {
__ encode_heap_oop(Rrax); __ encode_heap_oop(Rrax);
__ repne_scanl(); __ repne_scanl();
__ jcc(Assembler::notEqual, cmiss); __ jcc(Assembler::notEqual, cmiss);
__ decode_heap_oop(Rrax); __ decode_heap_oop(Rrax);
__ movq(Address(Rrsi, __ movptr(Address(Rrsi,
sizeof(oopDesc) + sizeof(oopDesc) +
Klass::secondary_super_cache_offset_in_bytes()), Klass::secondary_super_cache_offset_in_bytes()),
Rrax); Rrax);
@ -2602,16 +2603,16 @@ encode %{
__ decode_heap_oop(Rrax); __ decode_heap_oop(Rrax);
__ jmp(miss); __ jmp(miss);
} else { } else {
__ repne_scanq(); __ repne_scan();
__ jcc(Assembler::notEqual, miss); __ jcc(Assembler::notEqual, miss);
__ movq(Address(Rrsi, __ movptr(Address(Rrsi,
sizeof(oopDesc) + sizeof(oopDesc) +
Klass::secondary_super_cache_offset_in_bytes()), Klass::secondary_super_cache_offset_in_bytes()),
Rrax); Rrax);
} }
__ bind(hit); __ bind(hit);
if ($primary) { if ($primary) {
__ xorq(Rrdi, Rrdi); __ xorptr(Rrdi, Rrdi);
} }
__ bind(miss); __ bind(miss);
%} %}
@ -3527,8 +3528,9 @@ encode %{
masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr())); masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
} }
if (EmitSync & 1) { if (EmitSync & 1) {
masm.movptr (Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())) ; // Without cast to int32_t a movptr will destroy r10 which is typically obj
masm.cmpq (rsp, 0) ; masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
} else } else
if (EmitSync & 2) { if (EmitSync & 2) {
Label DONE_LABEL; Label DONE_LABEL;
@ -3536,29 +3538,30 @@ encode %{
// Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument. // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters); masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
} }
masm.movl(tmpReg, 0x1); // QQQ was movl...
masm.orq(tmpReg, Address(objReg, 0)); masm.movptr(tmpReg, 0x1);
masm.movq(Address(boxReg, 0), tmpReg); masm.orptr(tmpReg, Address(objReg, 0));
masm.movptr(Address(boxReg, 0), tmpReg);
if (os::is_MP()) { if (os::is_MP()) {
masm.lock(); masm.lock();
} }
masm.cmpxchgq(boxReg, Address(objReg, 0)); // Updates tmpReg masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
masm.jcc(Assembler::equal, DONE_LABEL); masm.jcc(Assembler::equal, DONE_LABEL);
// Recursive locking // Recursive locking
masm.subq(tmpReg, rsp); masm.subptr(tmpReg, rsp);
masm.andq(tmpReg, 7 - os::vm_page_size()); masm.andptr(tmpReg, 7 - os::vm_page_size());
masm.movq(Address(boxReg, 0), tmpReg); masm.movptr(Address(boxReg, 0), tmpReg);
masm.bind(DONE_LABEL); masm.bind(DONE_LABEL);
masm.nop(); // avoid branch to branch masm.nop(); // avoid branch to branch
} else { } else {
Label DONE_LABEL, IsInflated, Egress; Label DONE_LABEL, IsInflated, Egress;
masm.movq (tmpReg, Address(objReg, 0)) ; masm.movptr(tmpReg, Address(objReg, 0)) ;
masm.testq (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
masm.jcc (Assembler::notZero, IsInflated) ; masm.jcc (Assembler::notZero, IsInflated) ;
// it's stack-locked, biased or neutral // it's stack-locked, biased or neutral
// TODO: optimize markword triage order to reduce the number of // TODO: optimize markword triage order to reduce the number of
// conditional branches in the most common cases. // conditional branches in the most common cases.
@ -3568,13 +3571,14 @@ encode %{
if (UseBiasedLocking) { if (UseBiasedLocking) {
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters); masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
masm.movq (tmpReg, Address(objReg, 0)) ; // [FETCH] masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
} }
masm.orq (tmpReg, 1) ; // was q will it destroy high?
masm.movq (Address(boxReg, 0), tmpReg) ; masm.orl (tmpReg, 1) ;
if (os::is_MP()) { masm.lock(); } masm.movptr(Address(boxReg, 0), tmpReg) ;
masm.cmpxchgq(boxReg, Address(objReg, 0)); // Updates tmpReg if (os::is_MP()) { masm.lock(); }
masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
if (_counters != NULL) { if (_counters != NULL) {
masm.cond_inc32(Assembler::equal, masm.cond_inc32(Assembler::equal,
ExternalAddress((address) _counters->fast_path_entry_count_addr())); ExternalAddress((address) _counters->fast_path_entry_count_addr()));
@ -3582,9 +3586,9 @@ encode %{
masm.jcc (Assembler::equal, DONE_LABEL); masm.jcc (Assembler::equal, DONE_LABEL);
// Recursive locking // Recursive locking
masm.subq (tmpReg, rsp); masm.subptr(tmpReg, rsp);
masm.andq (tmpReg, 7 - os::vm_page_size()); masm.andptr(tmpReg, 7 - os::vm_page_size());
masm.movq (Address(boxReg, 0), tmpReg); masm.movptr(Address(boxReg, 0), tmpReg);
if (_counters != NULL) { if (_counters != NULL) {
masm.cond_inc32(Assembler::equal, masm.cond_inc32(Assembler::equal,
ExternalAddress((address) _counters->fast_path_entry_count_addr())); ExternalAddress((address) _counters->fast_path_entry_count_addr()));
@ -3599,16 +3603,17 @@ encode %{
// We should also think about trying a CAS without having // We should also think about trying a CAS without having
// fetched _owner. If the CAS is successful we may // fetched _owner. If the CAS is successful we may
// avoid an RTO->RTS upgrade on the $line. // avoid an RTO->RTS upgrade on the $line.
masm.movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())) ; // Without cast to int32_t a movptr will destroy r10 which is typically obj
masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
masm.movq (boxReg, tmpReg) ; masm.mov (boxReg, tmpReg) ;
masm.movq (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.testq (tmpReg, tmpReg) ; masm.testptr(tmpReg, tmpReg) ;
masm.jcc (Assembler::notZero, DONE_LABEL) ; masm.jcc (Assembler::notZero, DONE_LABEL) ;
// It's inflated and appears unlocked // It's inflated and appears unlocked
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchgq(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
// Intentional fall-through into DONE_LABEL ... // Intentional fall-through into DONE_LABEL ...
masm.bind (DONE_LABEL) ; masm.bind (DONE_LABEL) ;
@ -3627,8 +3632,8 @@ encode %{
Register tmpReg = as_Register($tmp$$reg); Register tmpReg = as_Register($tmp$$reg);
MacroAssembler masm(&cbuf); MacroAssembler masm(&cbuf);
if (EmitSync & 4) { if (EmitSync & 4) {
masm.cmpq (rsp, 0) ; masm.cmpptr(rsp, 0) ;
} else } else
if (EmitSync & 8) { if (EmitSync & 8) {
Label DONE_LABEL; Label DONE_LABEL;
@ -3638,15 +3643,15 @@ encode %{
// Check whether the displaced header is 0 // Check whether the displaced header is 0
//(=> recursive unlock) //(=> recursive unlock)
masm.movq(tmpReg, Address(boxReg, 0)); masm.movptr(tmpReg, Address(boxReg, 0));
masm.testq(tmpReg, tmpReg); masm.testptr(tmpReg, tmpReg);
masm.jcc(Assembler::zero, DONE_LABEL); masm.jcc(Assembler::zero, DONE_LABEL);
// If not recursive lock, reset the header to displaced header // If not recursive lock, reset the header to displaced header
if (os::is_MP()) { if (os::is_MP()) {
masm.lock(); masm.lock();
} }
masm.cmpxchgq(tmpReg, Address(objReg, 0)); // Uses RAX which is box masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
masm.bind(DONE_LABEL); masm.bind(DONE_LABEL);
masm.nop(); // avoid branch to branch masm.nop(); // avoid branch to branch
} else { } else {
@ -3655,44 +3660,44 @@ encode %{
if (UseBiasedLocking) { if (UseBiasedLocking) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL); masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
} }
masm.movq (tmpReg, Address(objReg, 0)) ; masm.movptr(tmpReg, Address(objReg, 0)) ;
masm.cmpq (Address(boxReg, 0), (int)NULL_WORD) ; masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
masm.jcc (Assembler::zero, DONE_LABEL) ; masm.jcc (Assembler::zero, DONE_LABEL) ;
masm.testq (tmpReg, 0x02) ; masm.testl (tmpReg, 0x02) ;
masm.jcc (Assembler::zero, Stacked) ; masm.jcc (Assembler::zero, Stacked) ;
// It's inflated // It's inflated
masm.movq (boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
masm.xorq (boxReg, r15_thread) ; masm.xorptr(boxReg, r15_thread) ;
masm.orq (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
masm.jcc (Assembler::notZero, DONE_LABEL) ; masm.jcc (Assembler::notZero, DONE_LABEL) ;
masm.movq (boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
masm.orq (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
masm.jcc (Assembler::notZero, CheckSucc) ; masm.jcc (Assembler::notZero, CheckSucc) ;
masm.mov64 (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int)NULL_WORD) ; masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
masm.jmp (DONE_LABEL) ; masm.jmp (DONE_LABEL) ;
if ((EmitSync & 65536) == 0) { if ((EmitSync & 65536) == 0) {
Label LSuccess, LGoSlowPath ; Label LSuccess, LGoSlowPath ;
masm.bind (CheckSucc) ; masm.bind (CheckSucc) ;
masm.cmpq (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int)NULL_WORD) ; masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
masm.jcc (Assembler::zero, LGoSlowPath) ; masm.jcc (Assembler::zero, LGoSlowPath) ;
// I'd much rather use lock:andl m->_owner, 0 as it's faster than the // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
// the explicit ST;MEMBAR combination, but masm doesn't currently support // the explicit ST;MEMBAR combination, but masm doesn't currently support
// "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
// are all faster when the write buffer is populated. // are all faster when the write buffer is populated.
masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int)NULL_WORD) ; masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
if (os::is_MP()) { if (os::is_MP()) {
masm.lock () ; masm.addq (Address(rsp, 0), 0) ; masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
} }
masm.cmpq (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int)NULL_WORD) ; masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
masm.jcc (Assembler::notZero, LSuccess) ; masm.jcc (Assembler::notZero, LSuccess) ;
masm.movptr (boxReg, (int)NULL_WORD) ; // box is really EAX masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchgq (r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
masm.jcc (Assembler::notEqual, LSuccess) ; masm.jcc (Assembler::notEqual, LSuccess) ;
// Intentional fall-through into slow-path // Intentional fall-through into slow-path
@ -3705,10 +3710,10 @@ encode %{
masm.jmp (DONE_LABEL) ; masm.jmp (DONE_LABEL) ;
} }
masm.bind (Stacked) ; masm.bind (Stacked) ;
masm.movq (tmpReg, Address (boxReg, 0)) ; // re-fetch masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch
if (os::is_MP()) { masm.lock(); } if (os::is_MP()) { masm.lock(); }
masm.cmpxchgq(tmpReg, Address(objReg, 0)); // Uses RAX which is box masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
if (EmitSync & 65536) { if (EmitSync & 65536) {
masm.bind (CheckSucc) ; masm.bind (CheckSucc) ;
@ -3736,10 +3741,10 @@ encode %{
masm.load_heap_oop(rax, Address(rsi, value_offset)); masm.load_heap_oop(rax, Address(rsi, value_offset));
masm.movl(rcx, Address(rsi, offset_offset)); masm.movl(rcx, Address(rsi, offset_offset));
masm.leaq(rax, Address(rax, rcx, Address::times_2, base_offset)); masm.lea(rax, Address(rax, rcx, Address::times_2, base_offset));
masm.load_heap_oop(rbx, Address(rdi, value_offset)); masm.load_heap_oop(rbx, Address(rdi, value_offset));
masm.movl(rcx, Address(rdi, offset_offset)); masm.movl(rcx, Address(rdi, offset_offset));
masm.leaq(rbx, Address(rbx, rcx, Address::times_2, base_offset)); masm.lea(rbx, Address(rbx, rcx, Address::times_2, base_offset));
// Compute the minimum of the string lengths(rsi) and the // Compute the minimum of the string lengths(rsi) and the
// difference of the string lengths (stack) // difference of the string lengths (stack)
@ -3748,8 +3753,8 @@ encode %{
masm.movl(rsi, Address(rsi, count_offset)); masm.movl(rsi, Address(rsi, count_offset));
masm.movl(rcx, rdi); masm.movl(rcx, rdi);
masm.subl(rdi, rsi); masm.subl(rdi, rsi);
masm.pushq(rdi); masm.push(rdi);
masm.cmovl(Assembler::lessEqual, rsi, rcx); masm.cmov(Assembler::lessEqual, rsi, rcx);
// Is the minimum length zero? // Is the minimum length zero?
masm.bind(RCX_GOOD_LABEL); masm.bind(RCX_GOOD_LABEL);
@ -3770,7 +3775,7 @@ encode %{
// Check after comparing first character to see if strings are equivalent // Check after comparing first character to see if strings are equivalent
Label LSkip2; Label LSkip2;
// Check if the strings start at same location // Check if the strings start at same location
masm.cmpq(rbx, rax); masm.cmpptr(rbx, rax);
masm.jcc(Assembler::notEqual, LSkip2); masm.jcc(Assembler::notEqual, LSkip2);
// Check if the length difference is zero (from stack) // Check if the length difference is zero (from stack)
@ -3782,9 +3787,9 @@ encode %{
} }
// Shift RAX and RBX to the end of the arrays, negate min // Shift RAX and RBX to the end of the arrays, negate min
masm.leaq(rax, Address(rax, rsi, Address::times_2, 2)); masm.lea(rax, Address(rax, rsi, Address::times_2, 2));
masm.leaq(rbx, Address(rbx, rsi, Address::times_2, 2)); masm.lea(rbx, Address(rbx, rsi, Address::times_2, 2));
masm.negq(rsi); masm.negptr(rsi);
// Compare the rest of the characters // Compare the rest of the characters
masm.bind(WHILE_HEAD_LABEL); masm.bind(WHILE_HEAD_LABEL);
@ -3792,18 +3797,18 @@ encode %{
masm.load_unsigned_word(rdi, Address(rax, rsi, Address::times_2, 0)); masm.load_unsigned_word(rdi, Address(rax, rsi, Address::times_2, 0));
masm.subl(rcx, rdi); masm.subl(rcx, rdi);
masm.jcc(Assembler::notZero, POP_LABEL); masm.jcc(Assembler::notZero, POP_LABEL);
masm.incrementq(rsi); masm.increment(rsi);
masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL); masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL);
// Strings are equal up to min length. Return the length difference. // Strings are equal up to min length. Return the length difference.
masm.bind(LENGTH_DIFF_LABEL); masm.bind(LENGTH_DIFF_LABEL);
masm.popq(rcx); masm.pop(rcx);
masm.jmp(DONE_LABEL); masm.jmp(DONE_LABEL);
// Discard the stored length difference // Discard the stored length difference
masm.bind(POP_LABEL); masm.bind(POP_LABEL);
masm.addq(rsp, 8); masm.addptr(rsp, 8);
// That's it // That's it
masm.bind(DONE_LABEL); masm.bind(DONE_LABEL);
%} %}
@ -3893,7 +3898,7 @@ encode %{
enc_class absF_encoding(regF dst) enc_class absF_encoding(regF dst)
%{ %{
int dstenc = $dst$$reg; int dstenc = $dst$$reg;
address signmask_address = (address) StubRoutines::amd64::float_sign_mask(); address signmask_address = (address) StubRoutines::x86::float_sign_mask();
cbuf.set_inst_mark(); cbuf.set_inst_mark();
if (dstenc >= 8) { if (dstenc >= 8) {
@ -3910,7 +3915,7 @@ encode %{
enc_class absD_encoding(regD dst) enc_class absD_encoding(regD dst)
%{ %{
int dstenc = $dst$$reg; int dstenc = $dst$$reg;
address signmask_address = (address) StubRoutines::amd64::double_sign_mask(); address signmask_address = (address) StubRoutines::x86::double_sign_mask();
cbuf.set_inst_mark(); cbuf.set_inst_mark();
emit_opcode(cbuf, 0x66); emit_opcode(cbuf, 0x66);
@ -3928,7 +3933,7 @@ encode %{
enc_class negF_encoding(regF dst) enc_class negF_encoding(regF dst)
%{ %{
int dstenc = $dst$$reg; int dstenc = $dst$$reg;
address signflip_address = (address) StubRoutines::amd64::float_sign_flip(); address signflip_address = (address) StubRoutines::x86::float_sign_flip();
cbuf.set_inst_mark(); cbuf.set_inst_mark();
if (dstenc >= 8) { if (dstenc >= 8) {
@ -3945,7 +3950,7 @@ encode %{
enc_class negD_encoding(regD dst) enc_class negD_encoding(regD dst)
%{ %{
int dstenc = $dst$$reg; int dstenc = $dst$$reg;
address signflip_address = (address) StubRoutines::amd64::double_sign_flip(); address signflip_address = (address) StubRoutines::x86::double_sign_flip();
cbuf.set_inst_mark(); cbuf.set_inst_mark();
emit_opcode(cbuf, 0x66); emit_opcode(cbuf, 0x66);
@ -4003,7 +4008,7 @@ encode %{
emit_opcode(cbuf, 0xE8); emit_opcode(cbuf, 0xE8);
emit_d32_reloc(cbuf, emit_d32_reloc(cbuf,
(int) (int)
(StubRoutines::amd64::f2i_fixup() - cbuf.code_end() - 4), (StubRoutines::x86::f2i_fixup() - cbuf.code_end() - 4),
runtime_call_Relocation::spec(), runtime_call_Relocation::spec(),
RELOC_DISP32); RELOC_DISP32);
@ -4020,7 +4025,7 @@ encode %{
%{ %{
int dstenc = $dst$$reg; int dstenc = $dst$$reg;
int srcenc = $src$$reg; int srcenc = $src$$reg;
address const_address = (address) StubRoutines::amd64::double_sign_flip(); address const_address = (address) StubRoutines::x86::double_sign_flip();
// cmpq $dst, [0x8000000000000000] // cmpq $dst, [0x8000000000000000]
cbuf.set_inst_mark(); cbuf.set_inst_mark();
@ -4061,7 +4066,7 @@ encode %{
emit_opcode(cbuf, 0xE8); emit_opcode(cbuf, 0xE8);
emit_d32_reloc(cbuf, emit_d32_reloc(cbuf,
(int) (int)
(StubRoutines::amd64::f2l_fixup() - cbuf.code_end() - 4), (StubRoutines::x86::f2l_fixup() - cbuf.code_end() - 4),
runtime_call_Relocation::spec(), runtime_call_Relocation::spec(),
RELOC_DISP32); RELOC_DISP32);
@ -4117,7 +4122,7 @@ encode %{
emit_opcode(cbuf, 0xE8); emit_opcode(cbuf, 0xE8);
emit_d32_reloc(cbuf, emit_d32_reloc(cbuf,
(int) (int)
(StubRoutines::amd64::d2i_fixup() - cbuf.code_end() - 4), (StubRoutines::x86::d2i_fixup() - cbuf.code_end() - 4),
runtime_call_Relocation::spec(), runtime_call_Relocation::spec(),
RELOC_DISP32); RELOC_DISP32);
@ -4134,7 +4139,7 @@ encode %{
%{ %{
int dstenc = $dst$$reg; int dstenc = $dst$$reg;
int srcenc = $src$$reg; int srcenc = $src$$reg;
address const_address = (address) StubRoutines::amd64::double_sign_flip(); address const_address = (address) StubRoutines::x86::double_sign_flip();
// cmpq $dst, [0x8000000000000000] // cmpq $dst, [0x8000000000000000]
cbuf.set_inst_mark(); cbuf.set_inst_mark();
@ -4175,7 +4180,7 @@ encode %{
emit_opcode(cbuf, 0xE8); emit_opcode(cbuf, 0xE8);
emit_d32_reloc(cbuf, emit_d32_reloc(cbuf,
(int) (int)
(StubRoutines::amd64::d2l_fixup() - cbuf.code_end() - 4), (StubRoutines::x86::d2l_fixup() - cbuf.code_end() - 4),
runtime_call_Relocation::spec(), runtime_call_Relocation::spec(),
RELOC_DISP32); RELOC_DISP32);

View File

@ -23,8 +23,9 @@
*/ */
#include "incls/_precompiled.incl" #include "incls/_precompiled.incl"
#include "incls/_assembler_linux_x86_32.cpp.incl" #include "incls/_assembler_linux_x86.cpp.incl"
#ifndef _LP64
void MacroAssembler::int3() { void MacroAssembler::int3() {
call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
} }
@ -39,3 +40,45 @@ void MacroAssembler::get_thread(Register thread) {
movptr(thread, tls); movptr(thread, tls);
} }
#else
void MacroAssembler::int3() {
call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
}
void MacroAssembler::get_thread(Register thread) {
// call pthread_getspecific
// void * pthread_getspecific(pthread_key_t key);
if (thread != rax) {
push(rax);
}
push(rdi);
push(rsi);
push(rdx);
push(rcx);
push(r8);
push(r9);
push(r10);
// XXX
mov(r10, rsp);
andq(rsp, -16);
push(r10);
push(r11);
movl(rdi, ThreadLocalStorage::thread_index());
call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
pop(r11);
pop(rsp);
pop(r10);
pop(r9);
pop(r8);
pop(rcx);
pop(rdx);
pop(rsi);
pop(rdi);
if (thread != rax) {
mov(thread, rax);
pop(rax);
}
}
#endif

View File

@ -23,59 +23,111 @@
*/ */
#include "incls/_precompiled.incl" #include "incls/_precompiled.incl"
#include "incls/_assembler_solaris_x86_32.cpp.incl" #include "incls/_assembler_solaris_x86.cpp.incl"
void MacroAssembler::int3() { void MacroAssembler::int3() {
pushl(rax); push(rax);
pushl(rdx); push(rdx);
pushl(rcx); push(rcx);
call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
popl(rcx); pop(rcx);
popl(rdx); pop(rdx);
popl(rax); pop(rax);
} }
void MacroAssembler::get_thread(Register thread) { #define __ _masm->
#ifndef _LP64
// Try to emit a Solaris-specific fast TSD/TLS accessor. static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) {
ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode () ;
if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) { // T1
// Use thread as a temporary: mov r, gs:[0]; mov r, [r+tlsOffset]
emit_byte (Assembler::GS_segment) ;
// ExternalAddress doesn't work because it can't take NULL
AddressLiteral null(0, relocInfo::none);
movptr (thread, null);
movl (thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset())) ;
return ;
} else
if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) { // T2
// mov r, gs:[tlsOffset]
emit_byte (Assembler::GS_segment) ;
AddressLiteral tls((address)ThreadLocalStorage::pd_getTlsOffset(), relocInfo::none);
movptr (thread, tls);
return ;
}
// slow call to of thr_getspecific // slow call to of thr_getspecific
// int thr_getspecific(thread_key_t key, void **value); // int thr_getspecific(thread_key_t key, void **value);
// Consider using pthread_getspecific instead. // Consider using pthread_getspecific instead.
pushl(0); // allocate space for return value __ push(0); // allocate space for return value
if (thread != rax) pushl(rax); // save rax, if caller still wants it if (thread != rax) __ push(rax); // save rax, if caller still wants it
pushl(rcx); // save caller save __ push(rcx); // save caller save
pushl(rdx); // save caller save __ push(rdx); // save caller save
if (thread != rax) { if (thread != rax) {
leal(thread, Address(rsp, 3 * sizeof(int))); // address of return value __ lea(thread, Address(rsp, 3 * sizeof(int))); // address of return value
} else { } else {
leal(thread, Address(rsp, 2 * sizeof(int))); // address of return value __ lea(thread, Address(rsp, 2 * sizeof(int))); // address of return value
} }
pushl(thread); // and pass the address __ push(thread); // and pass the address
pushl(ThreadLocalStorage::thread_index()); // the key __ push(ThreadLocalStorage::thread_index()); // the key
call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
increment(rsp, 2 * wordSize); __ increment(rsp, 2 * wordSize);
popl(rdx); __ pop(rdx);
popl(rcx); __ pop(rcx);
if (thread != rax) popl(rax); if (thread != rax) __ pop(rax);
popl(thread); __ pop(thread);
}
#else
static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) {
// slow call to of thr_getspecific
// int thr_getspecific(thread_key_t key, void **value);
// Consider using pthread_getspecific instead.
if (thread != rax) {
__ push(rax);
}
__ push(0); // space for return value
__ push(rdi);
__ push(rsi);
__ lea(rsi, Address(rsp, 16)); // pass return value address
__ push(rdx);
__ push(rcx);
__ push(r8);
__ push(r9);
__ push(r10);
// XXX
__ mov(r10, rsp);
__ andptr(rsp, -16);
__ push(r10);
__ push(r11);
__ movl(rdi, ThreadLocalStorage::thread_index());
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
__ pop(r11);
__ pop(rsp);
__ pop(r10);
__ pop(r9);
__ pop(r8);
__ pop(rcx);
__ pop(rdx);
__ pop(rsi);
__ pop(rdi);
__ pop(thread); // load return value
if (thread != rax) {
__ pop(rax);
}
}
#endif //LP64
void MacroAssembler::get_thread(Register thread) {
int segment = NOT_LP64(Assembler::GS_segment) LP64_ONLY(Assembler::FS_segment);
// Try to emit a Solaris-specific fast TSD/TLS accessor.
ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode ();
if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) { // T1
// Use thread as a temporary: mov r, gs:[0]; mov r, [r+tlsOffset]
emit_byte (segment);
// ExternalAddress doesn't work because it can't take NULL
AddressLiteral null(0, relocInfo::none);
movptr (thread, null);
movptr(thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset())) ;
return ;
} else
if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) { // T2
// mov r, gs:[tlsOffset]
emit_byte (segment);
AddressLiteral tls_off((address)ThreadLocalStorage::pd_getTlsOffset(), relocInfo::none);
movptr (thread, tls_off);
return ;
}
slow_call_thr_specific(this, thread);
} }

View File

@ -1,87 +0,0 @@
/*
* Copyright 2004-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
#include "incls/_precompiled.incl"
#include "incls/_assembler_solaris_x86_64.cpp.incl"
void MacroAssembler::int3() {
call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
}
void MacroAssembler::get_thread(Register thread) {
// Try to emit a Solaris-specific fast TSD/TLS accessor.
ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode();
if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) { // T1
// Use thread as a temporary: mov r, fs:[0]; mov r, [r+tlsOffset]
emit_byte(Assembler::FS_segment);
movq(thread, Address(NULL, relocInfo::none));
movq(thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset()));
return;
} else if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) { // T2
// mov r, fs:[tlsOffset]
emit_byte(Assembler::FS_segment);
ExternalAddress tls_off((address) ThreadLocalStorage::pd_getTlsOffset());
movptr(thread, tls_off);
return;
}
// slow call to of thr_getspecific
// int thr_getspecific(thread_key_t key, void **value);
// Consider using pthread_getspecific instead.
if (thread != rax) {
pushq(rax);
}
pushq(0); // space for return value
pushq(rdi);
pushq(rsi);
leaq(rsi, Address(rsp, 16)); // pass return value address
pushq(rdx);
pushq(rcx);
pushq(r8);
pushq(r9);
pushq(r10);
// XXX
movq(r10, rsp);
andq(rsp, -16);
pushq(r10);
pushq(r11);
movl(rdi, ThreadLocalStorage::thread_index());
call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
popq(r11);
popq(rsp);
popq(r10);
popq(r9);
popq(r8);
popq(rcx);
popq(rdx);
popq(rsi);
popq(rdi);
popq(thread); // load return value
if (thread != rax) {
popq(rax);
}
}

View File

@ -62,13 +62,13 @@ encode %{
enc_class solaris_breakpoint %{ enc_class solaris_breakpoint %{
MacroAssembler* masm = new MacroAssembler(&cbuf); MacroAssembler* masm = new MacroAssembler(&cbuf);
// Really need to fix this // Really need to fix this
masm->pushl(rax); masm->push(rax);
masm->pushl(rcx); masm->push(rcx);
masm->pushl(rdx); masm->push(rdx);
masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
masm->popl(rdx); masm->pop(rdx);
masm->popl(rcx); masm->pop(rcx);
masm->popl(rax); masm->pop(rax);
%} %}
enc_class call_epilog %{ enc_class call_epilog %{

View File

@ -23,13 +23,14 @@
*/ */
#include "incls/_precompiled.incl" #include "incls/_precompiled.incl"
#include "incls/_assembler_windows_x86_32.cpp.incl" #include "incls/_assembler_windows_x86.cpp.incl"
void MacroAssembler::int3() { void MacroAssembler::int3() {
emit_byte(0xCC); emit_byte(0xCC);
} }
#ifndef _LP64
// The current scheme to accelerate access to the thread // The current scheme to accelerate access to the thread
// pointer is to store the current thread in the os_exception_wrapper // pointer is to store the current thread in the os_exception_wrapper
// and reference the current thread from stubs and compiled code // and reference the current thread from stubs and compiled code
@ -58,3 +59,40 @@ void MacroAssembler::get_thread(Register thread) {
"Thread Pointer Offset has not been initialized"); "Thread Pointer Offset has not been initialized");
movl(thread, Address(thread, ThreadLocalStorage::get_thread_ptr_offset())); movl(thread, Address(thread, ThreadLocalStorage::get_thread_ptr_offset()));
} }
#else
// call (Thread*)TlsGetValue(thread_index());
void MacroAssembler::get_thread(Register thread) {
if (thread != rax) {
push(rax);
}
push(rdi);
push(rsi);
push(rdx);
push(rcx);
push(r8);
push(r9);
push(r10);
// XXX
mov(r10, rsp);
andq(rsp, -16);
push(r10);
push(r11);
movl(c_rarg0, ThreadLocalStorage::thread_index());
call(RuntimeAddress((address)TlsGetValue));
pop(r11);
pop(rsp);
pop(r10);
pop(r9);
pop(r8);
pop(rcx);
pop(rdx);
pop(rsi);
pop(rdi);
if (thread != rax) {
mov(thread, rax);
pop(rax);
}
}
#endif

View File

@ -369,7 +369,7 @@ frame os::current_frame() {
// apparently _asm not supported on windows amd64 // apparently _asm not supported on windows amd64
typedef intptr_t* get_fp_func (); typedef intptr_t* get_fp_func ();
get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*, get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
StubRoutines::amd64::get_previous_fp_entry()); StubRoutines::x86::get_previous_fp_entry());
if (func == NULL) return frame(NULL, NULL, NULL); if (func == NULL) return frame(NULL, NULL, NULL);
intptr_t* fp = (*func)(); intptr_t* fp = (*func)();
#else #else

View File

@ -1848,6 +1848,19 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveP\n", fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveP\n",
offset, offset+1, offset+1); offset, offset+1, offset+1);
} }
else if( instr->_matrule && instr->_matrule->_rChild && !strcmp(instr->_matrule->_rChild->_opType,"CMoveN") ) {
int offset = 1;
// Special special hack to see if the Cmp? has been incorporated in the conditional move
MatchNode *rl = instr->_matrule->_rChild->_lChild;
if( rl && !strcmp(rl->_opType, "Binary") ) {
MatchNode *rlr = rl->_rChild;
if (rlr && strncmp(rlr->_opType, "Cmp", 3) == 0)
offset = 2;
}
// Special hack for ideal CMoveN; ideal type depends on inputs
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n",
offset, offset+1, offset+1);
}
else if( instr->needs_base_oop_edge(_globalNames) ) { else if( instr->needs_base_oop_edge(_globalNames) ) {
// Special hack for ideal AddP. Bottom type is an oop IFF it has a // Special hack for ideal AddP. Bottom type is an oop IFF it has a
// legal base-pointer input. Otherwise it is NOT an oop. // legal base-pointer input. Otherwise it is NOT an oop.

View File

@ -278,7 +278,7 @@ ByteSize FrameMap::sp_offset_for_spill(const int index) const {
ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const { ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const {
int end_of_spills = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) + int end_of_spills = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
_num_spills * spill_slot_size_in_bytes; _num_spills * spill_slot_size_in_bytes;
int offset = round_to(end_of_spills, HeapWordSize) + index * sizeof(BasicObjectLock); int offset = (int) round_to(end_of_spills, HeapWordSize) + index * sizeof(BasicObjectLock);
return in_ByteSize(offset); return in_ByteSize(offset);
} }

View File

@ -37,7 +37,7 @@ Register LIR_OprDesc::as_register_hi() const {
return FrameMap::cpu_rnr2reg(cpu_regnrHi()); return FrameMap::cpu_rnr2reg(cpu_regnrHi());
} }
#ifdef IA32 #if defined(X86)
XMMRegister LIR_OprDesc::as_xmm_float_reg() const { XMMRegister LIR_OprDesc::as_xmm_float_reg() const {
return FrameMap::nr2xmmreg(xmm_regnr()); return FrameMap::nr2xmmreg(xmm_regnr());
@ -48,7 +48,7 @@ XMMRegister LIR_OprDesc::as_xmm_double_reg() const {
return FrameMap::nr2xmmreg(xmm_regnrLo()); return FrameMap::nr2xmmreg(xmm_regnrLo());
} }
#endif #endif // X86
#ifdef SPARC #ifdef SPARC
@ -81,7 +81,7 @@ LIR_Opr LIR_OprFact::value_type(ValueType* type) {
case floatTag : return LIR_OprFact::floatConst(type->as_FloatConstant()->value()); case floatTag : return LIR_OprFact::floatConst(type->as_FloatConstant()->value());
case longTag : return LIR_OprFact::longConst(type->as_LongConstant()->value()); case longTag : return LIR_OprFact::longConst(type->as_LongConstant()->value());
case doubleTag : return LIR_OprFact::doubleConst(type->as_DoubleConstant()->value()); case doubleTag : return LIR_OprFact::doubleConst(type->as_DoubleConstant()->value());
default: ShouldNotReachHere(); default: ShouldNotReachHere(); return LIR_OprFact::intConst(-1);
} }
} }
@ -94,7 +94,7 @@ LIR_Opr LIR_OprFact::dummy_value_type(ValueType* type) {
case floatTag: return LIR_OprFact::floatConst(0.0); case floatTag: return LIR_OprFact::floatConst(0.0);
case longTag: return LIR_OprFact::longConst(0); case longTag: return LIR_OprFact::longConst(0);
case doubleTag: return LIR_OprFact::doubleConst(0.0); case doubleTag: return LIR_OprFact::doubleConst(0.0);
default: ShouldNotReachHere(); default: ShouldNotReachHere(); return LIR_OprFact::intConst(-1);
} }
return illegalOpr; return illegalOpr;
} }
@ -162,6 +162,7 @@ char LIR_OprDesc::type_char(BasicType t) {
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
return '?';
} }
} }
@ -1374,7 +1375,7 @@ void LIR_OprDesc::print(outputStream* out) const {
} else if (is_double_cpu()) { } else if (is_double_cpu()) {
out->print(as_register_hi()->name()); out->print(as_register_hi()->name());
out->print(as_register_lo()->name()); out->print(as_register_lo()->name());
#ifdef IA32 #if defined(X86)
} else if (is_single_xmm()) { } else if (is_single_xmm()) {
out->print(as_xmm_float_reg()->name()); out->print(as_xmm_float_reg()->name());
} else if (is_double_xmm()) { } else if (is_double_xmm()) {

View File

@ -135,6 +135,13 @@ class LIR_Const: public LIR_OprPtr {
return as_jint_hi(); return as_jint_hi();
} }
} }
jlong as_jlong_bits() const {
if (type() == T_DOUBLE) {
return jlong_cast(_value.get_jdouble());
} else {
return as_jlong();
}
}
virtual void print_value_on(outputStream* out) const PRODUCT_RETURN; virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
@ -302,6 +309,7 @@ class LIR_OprDesc: public CompilationResourceObj {
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
return single_size;
} }
} }
@ -417,12 +425,12 @@ class LIR_OprDesc: public CompilationResourceObj {
return as_register(); return as_register();
} }
#ifdef IA32 #ifdef X86
XMMRegister as_xmm_float_reg() const; XMMRegister as_xmm_float_reg() const;
XMMRegister as_xmm_double_reg() const; XMMRegister as_xmm_double_reg() const;
// for compatibility with RInfo // for compatibility with RInfo
int fpu () const { return lo_reg_half(); } int fpu () const { return lo_reg_half(); }
#endif #endif // X86
#ifdef SPARC #ifdef SPARC
FloatRegister as_float_reg () const; FloatRegister as_float_reg () const;
@ -503,14 +511,14 @@ class LIR_Address: public LIR_OprPtr {
, _type(type) , _type(type)
, _disp(disp) { verify(); } , _disp(disp) { verify(); }
#ifdef IA32 #ifdef X86
LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type): LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type):
_base(base) _base(base)
, _index(index) , _index(index)
, _scale(scale) , _scale(scale)
, _type(type) , _type(type)
, _disp(disp) { verify(); } , _disp(disp) { verify(); }
#endif #endif // X86
LIR_Opr base() const { return _base; } LIR_Opr base() const { return _base; }
LIR_Opr index() const { return _index; } LIR_Opr index() const { return _index; }
@ -535,31 +543,93 @@ class LIR_OprFact: public AllStatic {
static LIR_Opr illegalOpr; static LIR_Opr illegalOpr;
static LIR_Opr single_cpu(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::int_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } static LIR_Opr single_cpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::int_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
static LIR_Opr single_cpu_oop(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } static LIR_Opr single_cpu_oop(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
static LIR_Opr double_cpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::long_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); } static LIR_Opr double_cpu(int reg1, int reg2) {
LP64_ONLY(assert(reg1 == reg2, "must be identical"));
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
(reg2 << LIR_OprDesc::reg2_shift) |
LIR_OprDesc::long_type |
LIR_OprDesc::cpu_register |
LIR_OprDesc::double_size);
}
static LIR_Opr single_fpu(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size); } static LIR_Opr single_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
LIR_OprDesc::float_type |
LIR_OprDesc::fpu_register |
LIR_OprDesc::single_size); }
#ifdef SPARC #ifdef SPARC
static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); } static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
#endif (reg2 << LIR_OprDesc::reg2_shift) |
#ifdef IA32 LIR_OprDesc::double_type |
static LIR_Opr double_fpu(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | (reg << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); } LIR_OprDesc::fpu_register |
static LIR_Opr single_xmm(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::is_xmm_mask); } LIR_OprDesc::double_size); }
static LIR_Opr double_xmm(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | (reg << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::is_xmm_mask); }
#endif #endif
#ifdef X86
static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
(reg << LIR_OprDesc::reg2_shift) |
LIR_OprDesc::double_type |
LIR_OprDesc::fpu_register |
LIR_OprDesc::double_size); }
static LIR_Opr single_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
LIR_OprDesc::float_type |
LIR_OprDesc::fpu_register |
LIR_OprDesc::single_size |
LIR_OprDesc::is_xmm_mask); }
static LIR_Opr double_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
(reg << LIR_OprDesc::reg2_shift) |
LIR_OprDesc::double_type |
LIR_OprDesc::fpu_register |
LIR_OprDesc::double_size |
LIR_OprDesc::is_xmm_mask); }
#endif // X86
static LIR_Opr virtual_register(int index, BasicType type) { static LIR_Opr virtual_register(int index, BasicType type) {
LIR_Opr res; LIR_Opr res;
switch (type) { switch (type) {
case T_OBJECT: // fall through case T_OBJECT: // fall through
case T_ARRAY: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break; case T_ARRAY:
case T_INT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break; res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
case T_LONG: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break; LIR_OprDesc::object_type |
case T_FLOAT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break; LIR_OprDesc::cpu_register |
case T_DOUBLE: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break; LIR_OprDesc::single_size |
LIR_OprDesc::virtual_mask);
break;
case T_INT:
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::int_type |
LIR_OprDesc::cpu_register |
LIR_OprDesc::single_size |
LIR_OprDesc::virtual_mask);
break;
case T_LONG:
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::long_type |
LIR_OprDesc::cpu_register |
LIR_OprDesc::double_size |
LIR_OprDesc::virtual_mask);
break;
case T_FLOAT:
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::float_type |
LIR_OprDesc::fpu_register |
LIR_OprDesc::single_size |
LIR_OprDesc::virtual_mask);
break;
case
T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::double_type |
LIR_OprDesc::fpu_register |
LIR_OprDesc::double_size |
LIR_OprDesc::virtual_mask);
break;
default: ShouldNotReachHere(); res = illegalOpr; default: ShouldNotReachHere(); res = illegalOpr;
} }
@ -572,8 +642,8 @@ class LIR_OprFact: public AllStatic {
// old-style calculation; check if old and new method are equal // old-style calculation; check if old and new method are equal
LIR_OprDesc::OprType t = as_OprType(type); LIR_OprDesc::OprType t = as_OprType(type);
LIR_Opr old_res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | t | LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) | ((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask); LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
assert(res == old_res, "old and new method not equal"); assert(res == old_res, "old and new method not equal");
#endif #endif
@ -588,11 +658,39 @@ class LIR_OprFact: public AllStatic {
LIR_Opr res; LIR_Opr res;
switch (type) { switch (type) {
case T_OBJECT: // fall through case T_OBJECT: // fall through
case T_ARRAY: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break; case T_ARRAY:
case T_INT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break; res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
case T_LONG: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break; LIR_OprDesc::object_type |
case T_FLOAT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break; LIR_OprDesc::stack_value |
case T_DOUBLE: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break; LIR_OprDesc::single_size);
break;
case T_INT:
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::int_type |
LIR_OprDesc::stack_value |
LIR_OprDesc::single_size);
break;
case T_LONG:
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::long_type |
LIR_OprDesc::stack_value |
LIR_OprDesc::double_size);
break;
case T_FLOAT:
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::float_type |
LIR_OprDesc::stack_value |
LIR_OprDesc::single_size);
break;
case T_DOUBLE:
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::double_type |
LIR_OprDesc::stack_value |
LIR_OprDesc::double_size);
break;
default: ShouldNotReachHere(); res = illegalOpr; default: ShouldNotReachHere(); res = illegalOpr;
} }
@ -601,7 +699,10 @@ class LIR_OprFact: public AllStatic {
assert(index >= 0, "index must be positive"); assert(index >= 0, "index must be positive");
assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big"); assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
LIR_Opr old_res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::stack_value | as_OprType(type) | LIR_OprDesc::size_for(type)); LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::stack_value |
as_OprType(type) |
LIR_OprDesc::size_for(type));
assert(res == old_res, "old and new method not equal"); assert(res == old_res, "old and new method not equal");
#endif #endif

View File

@ -215,7 +215,7 @@ void LIR_Assembler::emit_block(BlockBegin* block) {
#endif /* PRODUCT */ #endif /* PRODUCT */
assert(block->lir() != NULL, "must have LIR"); assert(block->lir() != NULL, "must have LIR");
IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
#ifndef PRODUCT #ifndef PRODUCT
if (CommentedAssembly) { if (CommentedAssembly) {
@ -227,7 +227,7 @@ void LIR_Assembler::emit_block(BlockBegin* block) {
emit_lir_list(block->lir()); emit_lir_list(block->lir());
IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed")); X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
} }
@ -434,7 +434,7 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
break; break;
default: ShouldNotReachHere(); default: ShouldNotReachHere();
} }
#if defined(IA32) && defined(TIERED) #if defined(X86) && defined(TIERED)
// C2 leave fpu stack dirty clean it // C2 leave fpu stack dirty clean it
if (UseSSE < 2) { if (UseSSE < 2) {
int i; int i;
@ -445,7 +445,7 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
ffree(0); ffree(0);
} }
} }
#endif // IA32 && TIERED #endif // X86 && TIERED
} }

View File

@ -75,9 +75,9 @@ class LIR_Assembler: public CompilationResourceObj {
void emit_stubs(CodeStubList* stub_list); void emit_stubs(CodeStubList* stub_list);
// addresses // addresses
static Address as_Address(LIR_Address* addr); Address as_Address(LIR_Address* addr);
static Address as_Address_lo(LIR_Address* addr); Address as_Address_lo(LIR_Address* addr);
static Address as_Address_hi(LIR_Address* addr); Address as_Address_hi(LIR_Address* addr);
// debug information // debug information
void add_call_info(int pc_offset, CodeEmitInfo* cinfo); void add_call_info(int pc_offset, CodeEmitInfo* cinfo);

View File

@ -1717,7 +1717,7 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
assert(log2_scale == 0, "must not have a scale"); assert(log2_scale == 0, "must not have a scale");
addr = new LIR_Address(base_op, index_op->as_jint(), dst_type); addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
} else { } else {
#ifdef IA32 #ifdef X86
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
#else #else
if (index_op->is_illegal() || log2_scale == 0) { if (index_op->is_illegal() || log2_scale == 0) {

View File

@ -80,7 +80,7 @@ LinearScan::LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map)
, _scope_value_cache(0) // initialized later with correct length , _scope_value_cache(0) // initialized later with correct length
, _interval_in_loop(0, 0) // initialized later with correct length , _interval_in_loop(0, 0) // initialized later with correct length
, _cached_blocks(*ir->linear_scan_order()) , _cached_blocks(*ir->linear_scan_order())
#ifdef IA32 #ifdef X86
, _fpu_stack_allocator(NULL) , _fpu_stack_allocator(NULL)
#endif #endif
{ {
@ -116,7 +116,7 @@ int LinearScan::reg_num(LIR_Opr opr) {
return opr->cpu_regnr(); return opr->cpu_regnr();
} else if (opr->is_double_cpu()) { } else if (opr->is_double_cpu()) {
return opr->cpu_regnrLo(); return opr->cpu_regnrLo();
#ifdef IA32 #ifdef X86
} else if (opr->is_single_xmm()) { } else if (opr->is_single_xmm()) {
return opr->fpu_regnr() + pd_first_xmm_reg; return opr->fpu_regnr() + pd_first_xmm_reg;
} else if (opr->is_double_xmm()) { } else if (opr->is_double_xmm()) {
@ -128,6 +128,7 @@ int LinearScan::reg_num(LIR_Opr opr) {
return opr->fpu_regnrLo() + pd_first_fpu_reg; return opr->fpu_regnrLo() + pd_first_fpu_reg;
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
return -1;
} }
} }
@ -140,7 +141,7 @@ int LinearScan::reg_numHi(LIR_Opr opr) {
return -1; return -1;
} else if (opr->is_double_cpu()) { } else if (opr->is_double_cpu()) {
return opr->cpu_regnrHi(); return opr->cpu_regnrHi();
#ifdef IA32 #ifdef X86
} else if (opr->is_single_xmm()) { } else if (opr->is_single_xmm()) {
return -1; return -1;
} else if (opr->is_double_xmm()) { } else if (opr->is_double_xmm()) {
@ -152,6 +153,7 @@ int LinearScan::reg_numHi(LIR_Opr opr) {
return opr->fpu_regnrHi() + pd_first_fpu_reg; return opr->fpu_regnrHi() + pd_first_fpu_reg;
} else { } else {
ShouldNotReachHere(); ShouldNotReachHere();
return -1;
} }
} }
@ -1063,7 +1065,7 @@ IntervalUseKind LinearScan::use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr) {
} }
#ifdef IA32 #ifdef X86
if (op->code() == lir_cmove) { if (op->code() == lir_cmove) {
// conditional moves can handle stack operands // conditional moves can handle stack operands
assert(op->result_opr()->is_register(), "result must always be in a register"); assert(op->result_opr()->is_register(), "result must always be in a register");
@ -1128,7 +1130,7 @@ IntervalUseKind LinearScan::use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr) {
} }
} }
} }
#endif // IA32 #endif // X86
// all other operands require a register // all other operands require a register
return mustHaveRegister; return mustHaveRegister;
@ -1261,7 +1263,7 @@ void LinearScan::build_intervals() {
// virtual fpu operands. Otherwise no allocation for fpu registers is // virtual fpu operands. Otherwise no allocation for fpu registers is
// perfomed and so the temp ranges would be useless // perfomed and so the temp ranges would be useless
if (has_fpu_registers()) { if (has_fpu_registers()) {
#ifdef IA32 #ifdef X86
if (UseSSE < 2) { if (UseSSE < 2) {
#endif #endif
for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) { for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
@ -1270,7 +1272,7 @@ void LinearScan::build_intervals() {
assert(reg_numHi(opr) == -1, "missing addition of range for hi-register"); assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
caller_save_registers[num_caller_save_registers++] = reg_num(opr); caller_save_registers[num_caller_save_registers++] = reg_num(opr);
} }
#ifdef IA32 #ifdef X86
} }
if (UseSSE > 0) { if (UseSSE > 0) {
for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) { for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) {
@ -1299,8 +1301,8 @@ void LinearScan::build_intervals() {
// Update intervals for registers live at the end of this block; // Update intervals for registers live at the end of this block;
BitMap live = block->live_out(); BitMap live = block->live_out();
int size = live.size(); int size = (int)live.size();
for (int number = live.get_next_one_offset(0, size); number < size; number = live.get_next_one_offset(number + 1, size)) { for (int number = (int)live.get_next_one_offset(0, size); number < size; number = (int)live.get_next_one_offset(number + 1, size)) {
assert(live.at(number), "should not stop here otherwise"); assert(live.at(number), "should not stop here otherwise");
assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds"); assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds");
TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2)); TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2));
@ -1654,7 +1656,7 @@ void LinearScan::resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to
const BitMap live_at_edge = to_block->live_in(); const BitMap live_at_edge = to_block->live_in();
// visit all registers where the live_at_edge bit is set // visit all registers where the live_at_edge bit is set
for (int r = live_at_edge.get_next_one_offset(0, size); r < size; r = live_at_edge.get_next_one_offset(r + 1, size)) { for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
assert(r < num_regs, "live information set for not exisiting interval"); assert(r < num_regs, "live information set for not exisiting interval");
assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge"); assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge");
@ -1824,7 +1826,7 @@ void LinearScan::resolve_exception_entry(BlockBegin* block, MoveResolver &move_r
// visit all registers where the live_in bit is set // visit all registers where the live_in bit is set
int size = live_set_size(); int size = live_set_size();
for (int r = block->live_in().get_next_one_offset(0, size); r < size; r = block->live_in().get_next_one_offset(r + 1, size)) { for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
resolve_exception_entry(block, r, move_resolver); resolve_exception_entry(block, r, move_resolver);
} }
@ -1898,7 +1900,7 @@ void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, M
// visit all registers where the live_in bit is set // visit all registers where the live_in bit is set
BlockBegin* block = handler->entry_block(); BlockBegin* block = handler->entry_block();
int size = live_set_size(); int size = live_set_size();
for (int r = block->live_in().get_next_one_offset(0, size); r < size; r = block->live_in().get_next_one_offset(r + 1, size)) { for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver); resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver);
} }
@ -2032,19 +2034,19 @@ LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even"); assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
} }
#ifdef SPARC
#ifdef _LP64 #ifdef _LP64
return LIR_OprFact::double_cpu(assigned_reg, assigned_reg); return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
#else #else
#ifdef SPARC
return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg); return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
#endif
#else #else
return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi); return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
#endif #endif // SPARC
#endif // LP64
} }
case T_FLOAT: { case T_FLOAT: {
#ifdef IA32 #ifdef X86
if (UseSSE >= 1) { if (UseSSE >= 1) {
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register"); assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register"); assert(interval->assigned_regHi() == any_reg, "must not have hi register");
@ -2058,7 +2060,7 @@ LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
} }
case T_DOUBLE: { case T_DOUBLE: {
#ifdef IA32 #ifdef X86
if (UseSSE >= 2) { if (UseSSE >= 2) {
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register"); assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)"); assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
@ -2122,7 +2124,7 @@ LIR_Opr LinearScan::color_lir_opr(LIR_Opr opr, int op_id, LIR_OpVisitState::OprM
LIR_Opr res = operand_for_interval(interval); LIR_Opr res = operand_for_interval(interval);
#ifdef IA32 #ifdef X86
// new semantic for is_last_use: not only set on definite end of interval, // new semantic for is_last_use: not only set on definite end of interval,
// but also before hole // but also before hole
// This may still miss some cases (e.g. for dead values), but it is not necessary that the // This may still miss some cases (e.g. for dead values), but it is not necessary that the
@ -2475,6 +2477,7 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<Scope
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
return -1;
} }
} }
@ -2515,7 +2518,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
scope_values->append(sv); scope_values->append(sv);
return 1; return 1;
#ifdef IA32 #ifdef X86
} else if (opr->is_single_xmm()) { } else if (opr->is_single_xmm()) {
VMReg rname = opr->as_xmm_float_reg()->as_VMReg(); VMReg rname = opr->as_xmm_float_reg()->as_VMReg();
LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname)); LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname));
@ -2525,7 +2528,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
#endif #endif
} else if (opr->is_single_fpu()) { } else if (opr->is_single_fpu()) {
#ifdef IA32 #ifdef X86
// the exact location of fpu stack values is only known // the exact location of fpu stack values is only known
// during fpu stack allocation, so the stack allocator object // during fpu stack allocation, so the stack allocator object
// must be present // must be present
@ -2548,12 +2551,23 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
ScopeValue* second; ScopeValue* second;
if (opr->is_double_stack()) { if (opr->is_double_stack()) {
#ifdef _LP64
Location loc1;
Location::Type loc_type = opr->type() == T_LONG ? Location::lng : Location::dbl;
if (!frame_map()->locations_for_slot(opr->double_stack_ix(), loc_type, &loc1, NULL)) {
bailout("too large frame");
}
// Does this reverse on x86 vs. sparc?
first = new LocationValue(loc1);
second = &_int_0_scope_value;
#else
Location loc1, loc2; Location loc1, loc2;
if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) { if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) {
bailout("too large frame"); bailout("too large frame");
} }
first = new LocationValue(loc1); first = new LocationValue(loc1);
second = new LocationValue(loc2); second = new LocationValue(loc2);
#endif // _LP64
} else if (opr->is_double_cpu()) { } else if (opr->is_double_cpu()) {
#ifdef _LP64 #ifdef _LP64
@ -2573,9 +2587,10 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
#endif #endif //_LP64
#ifdef IA32
#ifdef X86
} else if (opr->is_double_xmm()) { } else if (opr->is_double_xmm()) {
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation"); assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg(); VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg();
@ -2589,13 +2604,13 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
} else if (opr->is_double_fpu()) { } else if (opr->is_double_fpu()) {
// On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
// the double as float registers in the native ordering. On IA32, // the double as float registers in the native ordering. On X86,
// fpu_regnrLo is a FPU stack slot whose VMReg represents // fpu_regnrLo is a FPU stack slot whose VMReg represents
// the low-order word of the double and fpu_regnrLo + 1 is the // the low-order word of the double and fpu_regnrLo + 1 is the
// name for the other half. *first and *second must represent the // name for the other half. *first and *second must represent the
// least and most significant words, respectively. // least and most significant words, respectively.
#ifdef IA32 #ifdef X86
// the exact location of fpu stack values is only known // the exact location of fpu stack values is only known
// during fpu stack allocation, so the stack allocator object // during fpu stack allocation, so the stack allocator object
// must be present // must be present
@ -2865,7 +2880,6 @@ void LinearScan::assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw) {
op->verify(); op->verify();
#endif #endif
#ifndef _LP64
// remove useless moves // remove useless moves
if (op->code() == lir_move) { if (op->code() == lir_move) {
assert(op->as_Op1() != NULL, "move must be LIR_Op1"); assert(op->as_Op1() != NULL, "move must be LIR_Op1");
@ -2879,7 +2893,6 @@ void LinearScan::assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw) {
has_dead = true; has_dead = true;
} }
} }
#endif
} }
if (has_dead) { if (has_dead) {
@ -3192,7 +3205,7 @@ void LinearScan::verify_constants() {
BitMap live_at_edge = block->live_in(); BitMap live_at_edge = block->live_in();
// visit all registers where the live_at_edge bit is set // visit all registers where the live_at_edge bit is set
for (int r = live_at_edge.get_next_one_offset(0, size); r < size; r = live_at_edge.get_next_one_offset(r + 1, size)) { for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id())); TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id()));
Value value = gen()->instruction_for_vreg(r); Value value = gen()->instruction_for_vreg(r);
@ -3438,7 +3451,7 @@ void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_sta
state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL); state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL);
} }
#ifdef IA32 #ifdef X86
for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) { for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL); state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
} }
@ -4357,7 +4370,7 @@ void Interval::print(outputStream* out) const {
opr = LIR_OprFact::single_cpu(assigned_reg()); opr = LIR_OprFact::single_cpu(assigned_reg());
} else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) { } else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg); opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
#ifdef IA32 #ifdef X86
} else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) { } else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) {
opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg); opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
#endif #endif
@ -5435,7 +5448,7 @@ void LinearScanWalker::alloc_locked_reg(Interval* cur) {
} }
bool LinearScanWalker::no_allocation_possible(Interval* cur) { bool LinearScanWalker::no_allocation_possible(Interval* cur) {
#ifdef IA32 #ifdef X86
// fast calculation of intervals that can never get a register because the // fast calculation of intervals that can never get a register because the
// the next instruction is a call that blocks all registers // the next instruction is a call that blocks all registers
// Note: this does not work if callee-saved registers are available (e.g. on Sparc) // Note: this does not work if callee-saved registers are available (e.g. on Sparc)

View File

@ -177,7 +177,7 @@ class LinearScan : public CompilationResourceObj {
bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); } bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
// handling of fpu stack allocation (platform dependent, needed for debug information generation) // handling of fpu stack allocation (platform dependent, needed for debug information generation)
#ifdef IA32 #ifdef X86
FpuStackAllocator* _fpu_stack_allocator; FpuStackAllocator* _fpu_stack_allocator;
bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); } bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); }
#else #else

View File

@ -336,21 +336,6 @@ JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, klassOopDesc* klas
assert(oop(klass)->is_klass(), "not a class"); assert(oop(klass)->is_klass(), "not a class");
assert(rank >= 1, "rank must be nonzero"); assert(rank >= 1, "rank must be nonzero");
#ifdef _LP64
// In 64 bit mode, the sizes are stored in the top 32 bits
// of each 64 bit stack entry.
// dims is actually an intptr_t * because the arguments
// are pushed onto a 64 bit stack.
// We must create an array of jints to pass to multi_allocate.
// We reuse the current stack because it will be popped
// after this bytecode is completed.
if ( rank > 1 ) {
int index;
for ( index = 1; index < rank; index++ ) { // First size is ok
dims[index] = dims[index*2];
}
}
#endif
oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK); oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
thread->set_vm_result(obj); thread->set_vm_result(obj);
JRT_END JRT_END

View File

@ -127,7 +127,7 @@ public:
// Used as a combined index for locals and temps // Used as a combined index for locals and temps
enum Cell { enum Cell {
Cell_0 Cell_0, Cell_max = INT_MAX
}; };
// A StateVector summarizes the type information at some // A StateVector summarizes the type information at some

View File

@ -1200,11 +1200,13 @@ class section_word_Relocation : public internal_word_Relocation {
class poll_Relocation : public Relocation { class poll_Relocation : public Relocation {
bool is_data() { return true; } bool is_data() { return true; }
relocInfo::relocType type() { return relocInfo::poll_type; } relocInfo::relocType type() { return relocInfo::poll_type; }
void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
}; };
class poll_return_Relocation : public Relocation { class poll_return_Relocation : public Relocation {
bool is_data() { return true; } bool is_data() { return true; }
relocInfo::relocType type() { return relocInfo::poll_return_type; } relocInfo::relocType type() { return relocInfo::poll_return_type; }
void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
}; };

View File

@ -229,7 +229,7 @@ void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
HeapWord* first_card_start = _bsa->address_for_index(first_card_index); HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
if (first_card_start < pre_top) { if (first_card_start < pre_top) {
HeapWord* second_card_start = HeapWord* second_card_start =
_bsa->address_for_index(first_card_index + 1); _bsa->inc_by_region_size(first_card_start);
// Ensure enough room to fill with the smallest block // Ensure enough room to fill with the smallest block
second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve); second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);

View File

@ -258,6 +258,7 @@ c1_LIRGenerator_<arch>.cpp ciArray.hpp
c1_LIRGenerator_<arch>.cpp ciObjArrayKlass.hpp c1_LIRGenerator_<arch>.cpp ciObjArrayKlass.hpp
c1_LIRGenerator_<arch>.cpp ciTypeArrayKlass.hpp c1_LIRGenerator_<arch>.cpp ciTypeArrayKlass.hpp
c1_LIRGenerator_<arch>.cpp sharedRuntime.hpp c1_LIRGenerator_<arch>.cpp sharedRuntime.hpp
c1_LIRGenerator_<arch>.cpp vmreg_<arch>.inline.hpp
c1_LinearScan.cpp c1_CFGPrinter.hpp c1_LinearScan.cpp c1_CFGPrinter.hpp
c1_LinearScan.cpp c1_Compilation.hpp c1_LinearScan.cpp c1_Compilation.hpp
@ -281,7 +282,7 @@ c1_LinearScan_<arch>.cpp c1_LinearScan.hpp
c1_LinearScan_<arch>.hpp generate_platform_dependent_include c1_LinearScan_<arch>.hpp generate_platform_dependent_include
c1_MacroAssembler.hpp assembler.hpp c1_MacroAssembler.hpp assembler.hpp
c1_MacroAssembler.hpp assembler_<arch_model>.inline.hpp c1_MacroAssembler.hpp assembler_<arch>.inline.hpp
c1_MacroAssembler_<arch>.cpp arrayOop.hpp c1_MacroAssembler_<arch>.cpp arrayOop.hpp
c1_MacroAssembler_<arch>.cpp biasedLocking.hpp c1_MacroAssembler_<arch>.cpp biasedLocking.hpp

View File

@ -26,7 +26,7 @@ ad_<arch_model>.cpp adGlobals_<arch_model>.hpp
ad_<arch_model>.cpp ad_<arch_model>.hpp ad_<arch_model>.cpp ad_<arch_model>.hpp
ad_<arch_model>.cpp allocation.inline.hpp ad_<arch_model>.cpp allocation.inline.hpp
ad_<arch_model>.cpp assembler.hpp ad_<arch_model>.cpp assembler.hpp
ad_<arch_model>.cpp assembler_<arch_model>.inline.hpp ad_<arch_model>.cpp assembler_<arch>.inline.hpp
ad_<arch_model>.cpp biasedLocking.hpp ad_<arch_model>.cpp biasedLocking.hpp
ad_<arch_model>.cpp cfgnode.hpp ad_<arch_model>.cpp cfgnode.hpp
ad_<arch_model>.cpp collectedHeap.inline.hpp ad_<arch_model>.cpp collectedHeap.inline.hpp
@ -957,7 +957,7 @@ runtime.hpp vframe.hpp
runtime_<arch_model>.cpp adGlobals_<arch_model>.hpp runtime_<arch_model>.cpp adGlobals_<arch_model>.hpp
runtime_<arch_model>.cpp ad_<arch_model>.hpp runtime_<arch_model>.cpp ad_<arch_model>.hpp
runtime_<arch_model>.cpp assembler.hpp runtime_<arch_model>.cpp assembler.hpp
runtime_<arch_model>.cpp assembler_<arch_model>.inline.hpp runtime_<arch_model>.cpp assembler_<arch>.inline.hpp
runtime_<arch_model>.cpp globalDefinitions.hpp runtime_<arch_model>.cpp globalDefinitions.hpp
runtime_<arch_model>.cpp interfaceSupport.hpp runtime_<arch_model>.cpp interfaceSupport.hpp
runtime_<arch_model>.cpp interpreter.hpp runtime_<arch_model>.cpp interpreter.hpp

View File

@ -228,7 +228,7 @@ arrayOop.hpp universe.inline.hpp
assembler.cpp assembler.hpp assembler.cpp assembler.hpp
assembler.cpp assembler.inline.hpp assembler.cpp assembler.inline.hpp
assembler.cpp assembler_<arch_model>.inline.hpp assembler.cpp assembler_<arch>.inline.hpp
assembler.cpp codeBuffer.hpp assembler.cpp codeBuffer.hpp
assembler.cpp icache.hpp assembler.cpp icache.hpp
assembler.cpp os.hpp assembler.cpp os.hpp
@ -248,29 +248,29 @@ assembler.inline.hpp codeBuffer.hpp
assembler.inline.hpp disassembler.hpp assembler.inline.hpp disassembler.hpp
assembler.inline.hpp threadLocalStorage.hpp assembler.inline.hpp threadLocalStorage.hpp
assembler_<arch_model>.cpp assembler_<arch_model>.inline.hpp assembler_<arch>.cpp assembler_<arch>.inline.hpp
assembler_<arch_model>.cpp biasedLocking.hpp assembler_<arch>.cpp biasedLocking.hpp
assembler_<arch_model>.cpp cardTableModRefBS.hpp assembler_<arch>.cpp cardTableModRefBS.hpp
assembler_<arch_model>.cpp collectedHeap.inline.hpp assembler_<arch>.cpp collectedHeap.inline.hpp
assembler_<arch_model>.cpp interfaceSupport.hpp assembler_<arch>.cpp interfaceSupport.hpp
assembler_<arch_model>.cpp interpreter.hpp assembler_<arch>.cpp interpreter.hpp
assembler_<arch_model>.cpp objectMonitor.hpp assembler_<arch>.cpp objectMonitor.hpp
assembler_<arch_model>.cpp os.hpp assembler_<arch>.cpp os.hpp
assembler_<arch_model>.cpp resourceArea.hpp assembler_<arch>.cpp resourceArea.hpp
assembler_<arch_model>.cpp sharedRuntime.hpp assembler_<arch>.cpp sharedRuntime.hpp
assembler_<arch_model>.cpp stubRoutines.hpp assembler_<arch>.cpp stubRoutines.hpp
assembler_<arch_model>.hpp generate_platform_dependent_include assembler_<arch>.hpp generate_platform_dependent_include
assembler_<arch_model>.inline.hpp assembler.inline.hpp assembler_<arch>.inline.hpp assembler.inline.hpp
assembler_<arch_model>.inline.hpp codeBuffer.hpp assembler_<arch>.inline.hpp codeBuffer.hpp
assembler_<arch_model>.inline.hpp codeCache.hpp assembler_<arch>.inline.hpp codeCache.hpp
assembler_<arch_model>.inline.hpp handles.inline.hpp assembler_<arch>.inline.hpp handles.inline.hpp
assembler_<os_arch_model>.cpp assembler.hpp assembler_<os_arch>.cpp assembler.hpp
assembler_<os_arch_model>.cpp assembler_<arch_model>.inline.hpp assembler_<os_arch>.cpp assembler_<arch>.inline.hpp
assembler_<os_arch_model>.cpp os.hpp assembler_<os_arch>.cpp os.hpp
assembler_<os_arch_model>.cpp threadLocalStorage.hpp assembler_<os_arch>.cpp threadLocalStorage.hpp
atomic.cpp atomic.hpp atomic.cpp atomic.hpp
atomic.cpp atomic_<os_arch>.inline.hpp atomic.cpp atomic_<os_arch>.inline.hpp
@ -1926,7 +1926,7 @@ hpi_<os_family>.cpp os.hpp
hpi_imported.h jni.h hpi_imported.h jni.h
icBuffer.cpp assembler_<arch_model>.inline.hpp icBuffer.cpp assembler_<arch>.inline.hpp
icBuffer.cpp collectedHeap.inline.hpp icBuffer.cpp collectedHeap.inline.hpp
icBuffer.cpp compiledIC.hpp icBuffer.cpp compiledIC.hpp
icBuffer.cpp icBuffer.hpp icBuffer.cpp icBuffer.hpp
@ -1947,7 +1947,7 @@ icBuffer.hpp bytecodes.hpp
icBuffer.hpp stubs.hpp icBuffer.hpp stubs.hpp
icBuffer_<arch>.cpp assembler.hpp icBuffer_<arch>.cpp assembler.hpp
icBuffer_<arch>.cpp assembler_<arch_model>.inline.hpp icBuffer_<arch>.cpp assembler_<arch>.inline.hpp
icBuffer_<arch>.cpp bytecodes.hpp icBuffer_<arch>.cpp bytecodes.hpp
icBuffer_<arch>.cpp collectedHeap.inline.hpp icBuffer_<arch>.cpp collectedHeap.inline.hpp
icBuffer_<arch>.cpp icBuffer.hpp icBuffer_<arch>.cpp icBuffer.hpp
@ -1962,7 +1962,7 @@ icache.cpp resourceArea.hpp
icache.hpp allocation.hpp icache.hpp allocation.hpp
icache.hpp stubCodeGenerator.hpp icache.hpp stubCodeGenerator.hpp
icache_<arch>.cpp assembler_<arch_model>.inline.hpp icache_<arch>.cpp assembler_<arch>.inline.hpp
icache_<arch>.cpp icache.hpp icache_<arch>.cpp icache.hpp
icache_<arch>.hpp generate_platform_dependent_include icache_<arch>.hpp generate_platform_dependent_include
@ -2095,7 +2095,7 @@ interp_masm_<arch_model>.cpp sharedRuntime.hpp
interp_masm_<arch_model>.cpp synchronizer.hpp interp_masm_<arch_model>.cpp synchronizer.hpp
interp_masm_<arch_model>.cpp thread_<os_family>.inline.hpp interp_masm_<arch_model>.cpp thread_<os_family>.inline.hpp
interp_masm_<arch_model>.hpp assembler_<arch_model>.inline.hpp interp_masm_<arch_model>.hpp assembler_<arch>.inline.hpp
interp_masm_<arch_model>.hpp invocationCounter.hpp interp_masm_<arch_model>.hpp invocationCounter.hpp
interpreter.cpp allocation.inline.hpp interpreter.cpp allocation.inline.hpp
@ -2402,7 +2402,7 @@ jniFastGetField.cpp jniFastGetField.hpp
jniFastGetField.hpp allocation.hpp jniFastGetField.hpp allocation.hpp
jniFastGetField.hpp jvm_misc.hpp jniFastGetField.hpp jvm_misc.hpp
jniFastGetField_<arch_model>.cpp assembler_<arch_model>.inline.hpp jniFastGetField_<arch_model>.cpp assembler_<arch>.inline.hpp
jniFastGetField_<arch_model>.cpp jniFastGetField.hpp jniFastGetField_<arch_model>.cpp jniFastGetField.hpp
jniFastGetField_<arch_model>.cpp jvm_misc.hpp jniFastGetField_<arch_model>.cpp jvm_misc.hpp
jniFastGetField_<arch_model>.cpp resourceArea.hpp jniFastGetField_<arch_model>.cpp resourceArea.hpp
@ -2905,7 +2905,7 @@ mutex_<os_family>.inline.hpp interfaceSupport.hpp
mutex_<os_family>.inline.hpp os_<os_family>.inline.hpp mutex_<os_family>.inline.hpp os_<os_family>.inline.hpp
mutex_<os_family>.inline.hpp thread_<os_family>.inline.hpp mutex_<os_family>.inline.hpp thread_<os_family>.inline.hpp
nativeInst_<arch>.cpp assembler_<arch_model>.inline.hpp nativeInst_<arch>.cpp assembler_<arch>.inline.hpp
nativeInst_<arch>.cpp handles.hpp nativeInst_<arch>.cpp handles.hpp
nativeInst_<arch>.cpp nativeInst_<arch>.hpp nativeInst_<arch>.cpp nativeInst_<arch>.hpp
nativeInst_<arch>.cpp oop.hpp nativeInst_<arch>.cpp oop.hpp
@ -3174,7 +3174,7 @@ os.hpp top.hpp
os_<os_arch>.cpp allocation.inline.hpp os_<os_arch>.cpp allocation.inline.hpp
os_<os_arch>.cpp arguments.hpp os_<os_arch>.cpp arguments.hpp
os_<os_arch>.cpp assembler_<arch_model>.inline.hpp os_<os_arch>.cpp assembler_<arch>.inline.hpp
os_<os_arch>.cpp classLoader.hpp os_<os_arch>.cpp classLoader.hpp
os_<os_arch>.cpp events.hpp os_<os_arch>.cpp events.hpp
os_<os_arch>.cpp extendedPC.hpp os_<os_arch>.cpp extendedPC.hpp
@ -3208,7 +3208,7 @@ os_<os_arch>.hpp generate_platform_dependent_include
os_<os_family>.cpp allocation.inline.hpp os_<os_family>.cpp allocation.inline.hpp
os_<os_family>.cpp arguments.hpp os_<os_family>.cpp arguments.hpp
os_<os_family>.cpp assembler_<arch_model>.inline.hpp os_<os_family>.cpp assembler_<arch>.inline.hpp
os_<os_family>.cpp attachListener.hpp os_<os_family>.cpp attachListener.hpp
os_<os_family>.cpp classLoader.hpp os_<os_family>.cpp classLoader.hpp
os_<os_family>.cpp compileBroker.hpp os_<os_family>.cpp compileBroker.hpp
@ -3267,7 +3267,7 @@ osThread.hpp javaFrameAnchor.hpp
osThread.hpp objectMonitor.hpp osThread.hpp objectMonitor.hpp
osThread.hpp top.hpp osThread.hpp top.hpp
osThread_<os_family>.cpp assembler_<arch_model>.inline.hpp osThread_<os_family>.cpp assembler_<arch>.inline.hpp
osThread_<os_family>.cpp atomic.hpp osThread_<os_family>.cpp atomic.hpp
osThread_<os_family>.cpp handles.inline.hpp osThread_<os_family>.cpp handles.inline.hpp
osThread_<os_family>.cpp mutexLocker.hpp osThread_<os_family>.cpp mutexLocker.hpp
@ -3480,7 +3480,7 @@ register_definitions_<arch>.cpp interp_masm_<arch_model>.hpp
register_definitions_<arch>.cpp register.hpp register_definitions_<arch>.cpp register.hpp
register_definitions_<arch>.cpp register_<arch>.hpp register_definitions_<arch>.cpp register_<arch>.hpp
relocInfo.cpp assembler_<arch_model>.inline.hpp relocInfo.cpp assembler_<arch>.inline.hpp
relocInfo.cpp compiledIC.hpp relocInfo.cpp compiledIC.hpp
relocInfo.cpp copy.hpp relocInfo.cpp copy.hpp
relocInfo.cpp nativeInst_<arch>.hpp relocInfo.cpp nativeInst_<arch>.hpp
@ -3493,7 +3493,7 @@ relocInfo.hpp allocation.hpp
relocInfo.hpp top.hpp relocInfo.hpp top.hpp
relocInfo_<arch>.cpp assembler.inline.hpp relocInfo_<arch>.cpp assembler.inline.hpp
relocInfo_<arch>.cpp assembler_<arch_model>.inline.hpp relocInfo_<arch>.cpp assembler_<arch>.inline.hpp
relocInfo_<arch>.cpp nativeInst_<arch>.hpp relocInfo_<arch>.cpp nativeInst_<arch>.hpp
relocInfo_<arch>.cpp oop.inline.hpp relocInfo_<arch>.cpp oop.inline.hpp
relocInfo_<arch>.cpp relocInfo.hpp relocInfo_<arch>.cpp relocInfo.hpp
@ -3676,7 +3676,7 @@ sharedRuntime.hpp resourceArea.hpp
sharedRuntime.hpp threadLocalStorage.hpp sharedRuntime.hpp threadLocalStorage.hpp
sharedRuntime_<arch_model>.cpp assembler.hpp sharedRuntime_<arch_model>.cpp assembler.hpp
sharedRuntime_<arch_model>.cpp assembler_<arch_model>.inline.hpp sharedRuntime_<arch_model>.cpp assembler_<arch>.inline.hpp
sharedRuntime_<arch_model>.cpp compiledICHolderOop.hpp sharedRuntime_<arch_model>.cpp compiledICHolderOop.hpp
sharedRuntime_<arch_model>.cpp debugInfoRec.hpp sharedRuntime_<arch_model>.cpp debugInfoRec.hpp
sharedRuntime_<arch_model>.cpp icBuffer.hpp sharedRuntime_<arch_model>.cpp icBuffer.hpp
@ -3819,7 +3819,7 @@ statSampler.cpp vm_version_<arch_model>.hpp
statSampler.hpp perfData.hpp statSampler.hpp perfData.hpp
statSampler.hpp task.hpp statSampler.hpp task.hpp
stubCodeGenerator.cpp assembler_<arch_model>.inline.hpp stubCodeGenerator.cpp assembler_<arch>.inline.hpp
stubCodeGenerator.cpp disassembler.hpp stubCodeGenerator.cpp disassembler.hpp
stubCodeGenerator.cpp forte.hpp stubCodeGenerator.cpp forte.hpp
stubCodeGenerator.cpp oop.inline.hpp stubCodeGenerator.cpp oop.inline.hpp
@ -3830,7 +3830,7 @@ stubCodeGenerator.hpp allocation.hpp
stubCodeGenerator.hpp assembler.hpp stubCodeGenerator.hpp assembler.hpp
stubGenerator_<arch_model>.cpp assembler.hpp stubGenerator_<arch_model>.cpp assembler.hpp
stubGenerator_<arch_model>.cpp assembler_<arch_model>.inline.hpp stubGenerator_<arch_model>.cpp assembler_<arch>.inline.hpp
stubGenerator_<arch_model>.cpp frame.inline.hpp stubGenerator_<arch_model>.cpp frame.inline.hpp
stubGenerator_<arch_model>.cpp handles.inline.hpp stubGenerator_<arch_model>.cpp handles.inline.hpp
stubGenerator_<arch_model>.cpp instanceOop.hpp stubGenerator_<arch_model>.cpp instanceOop.hpp
@ -4562,7 +4562,7 @@ vm_version.cpp vm_version_<arch_model>.hpp
vm_version.hpp allocation.hpp vm_version.hpp allocation.hpp
vm_version.hpp ostream.hpp vm_version.hpp ostream.hpp
vm_version_<arch_model>.cpp assembler_<arch_model>.inline.hpp vm_version_<arch_model>.cpp assembler_<arch>.inline.hpp
vm_version_<arch_model>.cpp java.hpp vm_version_<arch_model>.cpp java.hpp
vm_version_<arch_model>.cpp os_<os_family>.inline.hpp vm_version_<arch_model>.cpp os_<os_family>.inline.hpp
vm_version_<arch_model>.cpp resourceArea.hpp vm_version_<arch_model>.cpp resourceArea.hpp
@ -4603,7 +4603,7 @@ vtableStubs.cpp vtune.hpp
vtableStubs.hpp allocation.hpp vtableStubs.hpp allocation.hpp
vtableStubs_<arch_model>.cpp assembler.hpp vtableStubs_<arch_model>.cpp assembler.hpp
vtableStubs_<arch_model>.cpp assembler_<arch_model>.inline.hpp vtableStubs_<arch_model>.cpp assembler_<arch>.inline.hpp
vtableStubs_<arch_model>.cpp instanceKlass.hpp vtableStubs_<arch_model>.cpp instanceKlass.hpp
vtableStubs_<arch_model>.cpp interp_masm_<arch_model>.hpp vtableStubs_<arch_model>.cpp interp_masm_<arch_model>.hpp
vtableStubs_<arch_model>.cpp klassVtable.hpp vtableStubs_<arch_model>.cpp klassVtable.hpp

View File

@ -57,7 +57,7 @@ dump.cpp systemDictionary.hpp
dump.cpp vmThread.hpp dump.cpp vmThread.hpp
dump.cpp vm_operations.hpp dump.cpp vm_operations.hpp
dump_<arch_model>.cpp assembler_<arch_model>.inline.hpp dump_<arch_model>.cpp assembler_<arch>.inline.hpp
dump_<arch_model>.cpp compactingPermGenGen.hpp dump_<arch_model>.cpp compactingPermGenGen.hpp
forte.cpp collectedHeap.inline.hpp forte.cpp collectedHeap.inline.hpp

View File

@ -199,6 +199,12 @@ public:
// "index" in "_offset_array". // "index" in "_offset_array".
HeapWord* address_for_index(size_t index) const; HeapWord* address_for_index(size_t index) const;
// Return the address "p" incremented by the size of
// a region. This method does not align the address
// returned to the start of a region. It is a simple
// primitive.
HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; }
// Shared space support // Shared space support
void serialize(SerializeOopClosure* soc, HeapWord* start, HeapWord* end); void serialize(SerializeOopClosure* soc, HeapWord* start, HeapWord* end);
}; };

View File

@ -573,8 +573,6 @@ const Type *AddPNode::bottom_type() const {
intptr_t txoffset = Type::OffsetBot; intptr_t txoffset = Type::OffsetBot;
if (tx->is_con()) { // Left input is an add of a constant? if (tx->is_con()) { // Left input is an add of a constant?
txoffset = tx->get_con(); txoffset = tx->get_con();
if (txoffset != (int)txoffset)
txoffset = Type::OffsetBot; // oops: add_offset will choke on it
} }
return tp->add_offset(txoffset); return tp->add_offset(txoffset);
} }
@ -595,8 +593,6 @@ const Type *AddPNode::Value( PhaseTransform *phase ) const {
intptr_t p2offset = Type::OffsetBot; intptr_t p2offset = Type::OffsetBot;
if (p2->is_con()) { // Left input is an add of a constant? if (p2->is_con()) { // Left input is an add of a constant?
p2offset = p2->get_con(); p2offset = p2->get_con();
if (p2offset != (int)p2offset)
p2offset = Type::OffsetBot; // oops: add_offset will choke on it
} }
return p1->add_offset(p2offset); return p1->add_offset(p2offset);
} }
@ -675,7 +671,7 @@ const Type *AddPNode::mach_bottom_type( const MachNode* n) {
// Check for any interesting operand info. // Check for any interesting operand info.
// In particular, check for both memory and non-memory operands. // In particular, check for both memory and non-memory operands.
// %%%%% Clean this up: use xadd_offset // %%%%% Clean this up: use xadd_offset
int con = opnd->constant(); intptr_t con = opnd->constant();
if ( con == TypePtr::OffsetBot ) goto bottom_out; if ( con == TypePtr::OffsetBot ) goto bottom_out;
offset += con; offset += con;
con = opnd->constant_disp(); con = opnd->constant_disp();
@ -695,6 +691,8 @@ const Type *AddPNode::mach_bottom_type( const MachNode* n) {
guarantee(tptr == NULL, "must be only one pointer operand"); guarantee(tptr == NULL, "must be only one pointer operand");
tptr = et->isa_oopptr(); tptr = et->isa_oopptr();
guarantee(tptr != NULL, "non-int operand must be pointer"); guarantee(tptr != NULL, "non-int operand must be pointer");
if (tptr->higher_equal(tp->add_offset(tptr->offset())))
tp = tptr; // Set more precise type for bailout
continue; continue;
} }
if ( eti->_hi != eti->_lo ) goto bottom_out; if ( eti->_hi != eti->_lo ) goto bottom_out;

View File

@ -467,6 +467,10 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
// get successor block succ_no // get successor block succ_no
assert(succ_no < in->_num_succs, "illegal successor number"); assert(succ_no < in->_num_succs, "illegal successor number");
Block* out = in->_succs[succ_no]; Block* out = in->_succs[succ_no];
// Compute frequency of the new block. Do this before inserting
// new block in case succ_prob() needs to infer the probability from
// surrounding blocks.
float freq = in->_freq * in->succ_prob(succ_no);
// get ProjNode corresponding to the succ_no'th successor of the in block // get ProjNode corresponding to the succ_no'th successor of the in block
ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj(); ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
// create region for basic block // create region for basic block
@ -491,6 +495,8 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
} }
// remap predecessor's successor to new block // remap predecessor's successor to new block
in->_succs.map(succ_no, block); in->_succs.map(succ_no, block);
// Set the frequency of the new block
block->_freq = freq;
// add new basic block to basic block list // add new basic block to basic block list
_blocks.insert(block_no + 1, block); _blocks.insert(block_no + 1, block);
_num_blocks++; _num_blocks++;

View File

@ -464,6 +464,12 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
} }
} }
if (kit.stopped()) {
// Instance exactly does not matches the desired type.
kit.set_jvms(slow_jvms);
return kit.transfer_exceptions_into_jvms();
}
// fall through if the instance exactly matches the desired type // fall through if the instance exactly matches the desired type
kit.replace_in_map(receiver, exact_receiver); kit.replace_in_map(receiver, exact_receiver);

View File

@ -829,9 +829,7 @@ SafePointNode* SafePointNode::next_exception() const {
//------------------------------Ideal------------------------------------------ //------------------------------Ideal------------------------------------------
// Skip over any collapsed Regions // Skip over any collapsed Regions
Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this; return remove_dead_region(phase, can_reshape) ? this : NULL;
return NULL;
} }
//------------------------------Identity--------------------------------------- //------------------------------Identity---------------------------------------

View File

@ -43,7 +43,7 @@ void LRG::dump( ) const {
if( _degree_valid ) tty->print( "%d ", _eff_degree ); if( _degree_valid ) tty->print( "%d ", _eff_degree );
else tty->print("? "); else tty->print("? ");
if( _def == NodeSentinel ) { if( is_multidef() ) {
tty->print("MultiDef "); tty->print("MultiDef ");
if (_defs != NULL) { if (_defs != NULL) {
tty->print("("); tty->print("(");
@ -765,7 +765,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// if the LRG is an unaligned pair, we will have to spill // if the LRG is an unaligned pair, we will have to spill
// so clear the LRG's register mask if it is not already spilled // so clear the LRG's register mask if it is not already spilled
if ( !n->is_SpillCopy() && if ( !n->is_SpillCopy() &&
(lrg._def == NULL || lrg._def == NodeSentinel || !lrg._def->is_SpillCopy()) && (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
lrgmask.is_misaligned_Pair()) { lrgmask.is_misaligned_Pair()) {
lrg.Clear(); lrg.Clear();
} }
@ -1282,7 +1282,7 @@ uint PhaseChaitin::Select( ) {
// Live range is live and no colors available // Live range is live and no colors available
else { else {
assert( lrg->alive(), "" ); assert( lrg->alive(), "" );
assert( !lrg->_fat_proj || lrg->_def == NodeSentinel || assert( !lrg->_fat_proj || lrg->is_multidef() ||
lrg->_def->outcnt() > 0, "fat_proj cannot spill"); lrg->_def->outcnt() > 0, "fat_proj cannot spill");
assert( !orig_mask.is_AllStack(), "All Stack does not spill" ); assert( !orig_mask.is_AllStack(), "All Stack does not spill" );

View File

@ -156,6 +156,8 @@ public:
// Alive if non-zero, dead if zero // Alive if non-zero, dead if zero
bool alive() const { return _def != NULL; } bool alive() const { return _def != NULL; }
bool is_multidef() const { return _def == NodeSentinel; }
bool is_singledef() const { return _def != NodeSentinel; }
#ifndef PRODUCT #ifndef PRODUCT
void dump( ) const; void dump( ) const;
@ -320,7 +322,8 @@ class PhaseChaitin : public PhaseRegAlloc {
uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx ); uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx ); uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
int clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ); int clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg );
Node *split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru ); Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits,
int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru);
// True if lidx is used before any real register is def'd in the block // True if lidx is used before any real register is def'd in the block
bool prompt_use( Block *b, uint lidx ); bool prompt_use( Block *b, uint lidx );
Node *get_spillcopy_wide( Node *def, Node *use, uint uidx ); Node *get_spillcopy_wide( Node *def, Node *use, uint uidx );

Some files were not shown because too many files have changed in this diff Show More